
    gji              	          d Z ddlZddlZddlZddlmZ ddlmZmZm	Z	 ddl
Z
ddlmZ ddlmZ  ej        e          Zdd	d
Zd Z G d de          Z	 dZdZededdddddf	Z e
j        dd                    e          z  e
j        e
j        z  e
j        z            Z e
j        d          Z e
j        ee
j        e
j        z  e
j        z            Z e
j        d          Z d'dZ!d(d Z" G d! d"          Z#d# Z$d$ Z%d)d&Z&dS )*z!Tokenization classes for BERTweet    N)copyfile)ListOptionalTuple   )PreTrainedTokenizer)loggingz	vocab.txtz	bpe.codes)
vocab_filemerges_filec                     t                      }| d         }| dd         D ]}|                    ||f           |}t          |          }|S )z
    Return set of symbol pairs in a word.

    Word is represented as tuple of symbols (symbols being variable-length strings).
    r      N)setadd)wordpairs	prev_charchars       n/var/www/html/ai-engine/env/lib/python3.11/site-packages/transformers/models/bertweet/tokenization_bertweet.py	get_pairsr   &   s[     EEEQIQRR  		9d#$$$		JJEL    c            
           e Zd ZdZeZ	 	 	 	 	 	 	 	 d fd	Z	 dd
ee         de	ee                  dee         fdZ
	 d d
ee         de	ee                  dedee         f fdZ	 dd
ee         de	ee                  dee         fdZed             Zd Zd Zd Zd Zd Zd Zd Zd Zddede	e         dee         fdZd Z xZS )!BertweetTokenizera	  
    Constructs a BERTweet tokenizer, using Byte-Pair-Encoding.

    This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
    this superclass for more information regarding those methods.

    Args:
        vocab_file (`str`):
            Path to the vocabulary file.
        merges_file (`str`):
            Path to the merges file.
        normalization (`bool`, *optional*, defaults to `False`):
            Whether or not to apply a normalization preprocess.
        bos_token (`str`, *optional*, defaults to `"<s>"`):
            The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.

            <Tip>

            When building a sequence using special tokens, this is not the token that is used for the beginning of
            sequence. The token used is the `cls_token`.

            </Tip>

        eos_token (`str`, *optional*, defaults to `"</s>"`):
            The end of sequence token.

            <Tip>

            When building a sequence using special tokens, this is not the token that is used for the end of sequence.
            The token used is the `sep_token`.

            </Tip>

        sep_token (`str`, *optional*, defaults to `"</s>"`):
            The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
            sequence classification or for a text and a question for question answering. It is also used as the last
            token of a sequence built with special tokens.
        cls_token (`str`, *optional*, defaults to `"<s>"`):
            The classifier token which is used when doing sequence classification (classification of the whole sequence
            instead of per-token classification). It is the first token of the sequence when built with special tokens.
        unk_token (`str`, *optional*, defaults to `"<unk>"`):
            The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
            token instead.
        pad_token (`str`, *optional*, defaults to `"<pad>"`):
            The token used for padding, for example when batching sequences of different lengths.
        mask_token (`str`, *optional*, defaults to `"<mask>"`):
            The token used for masking values. This is the token used when training this model with masked language
            modeling. This is the token which the model will try to predict.
    F<s></s><unk><pad><mask>c                    	 ddl m} || _        n1# t          $ r$ t                              d           d | _        Y nw xY w|| _        || _        i | _        d| j        t          |          <   d| j        t          |	          <   d| j        t          |          <   d| j        t          |          <   | 
                    |           d | j                                        D             | _        t          |d	          5 }|                                                    d
          d d         }d d d            n# 1 swxY w Y   d |D             }t!          t#          |t%          t'          |                                        | _        i | _        || _        t/                      | _        ddd| _         t5                      j        d|||||||	|
d| d S )Nr   )demojizezsemoji is not installed, thus not converting emoticons or emojis into text. Install emoji: pip3 install emoji==0.6.0r      r   c                     i | ]\  }}||	S  r"   ).0kvs      r   
<dictcomp>z.BertweetTokenizer.__init__.<locals>.<dictcomp>   s    >>>A1>>>r   utf-8encoding
c                 `    g | ]+}t          |                                d d                   ,S )Nr+   )tuplesplit)r#   merges     r   
<listcomp>z.BertweetTokenizer.__init__.<locals>.<listcomp>   s1    @@@%crc*++@@@r   'z...)u   ’u   …)normalization	bos_token	eos_token	sep_token	cls_token	unk_token	pad_token
mask_tokenr"   )emojir   	demojizerImportErrorloggerwarningr
   r   encoderstradd_from_fileitemsdecoderopenreadr.   dictziprangelen	bpe_rankscacher2   TweetTokenizertweetPreprocessorspecial_punctssuper__init__)selfr
   r   r2   r3   r4   r5   r6   r7   r8   r9   kwargsr   merges_handlemerges	__class__s                  r   rP   zBertweetTokenizer.__init__k   sB   		"&&&&&&%DNN 	" 	" 	"NN(   "DNNN	" %&'(S^^$'(S^^$'(S^^$'(S^^$:&&&>>););)=)=>>>+000 	;M"''))//55crc:F	; 	; 	; 	; 	; 	; 	; 	; 	; 	; 	; 	; 	; 	; 	;@@@@@c&%F*<*<==>>
*!/!1!1&)%88 
	
'!
	
 
	
 
	
 
	
 
	
 
	
 
	
s    +??0D==EENtoken_ids_0token_ids_1returnc                 p    || j         g|z   | j        gz   S | j         g}| j        g}||z   |z   |z   |z   |z   S )a  
        Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
        adding special tokens. A BERTweet sequence has the following format:

        - single sequence: `<s> X </s>`
        - pair of sequences: `<s> A </s></s> B </s>`

        Args:
            token_ids_0 (`List[int]`):
                List of IDs to which the special tokens will be added.
            token_ids_1 (`List[int]`, *optional*):
                Optional second list of IDs for sequence pairs.

        Returns:
            `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
        )cls_token_idsep_token_id)rQ   rV   rW   clsseps        r    build_inputs_with_special_tokensz2BertweetTokenizer.build_inputs_with_special_tokens   s[    ( %&48I7JJJ ! ![ 3&,{:S@@r   already_has_special_tokensc                     |r$t                                          ||d          S |dgdgt          |          z  z   dgz   S dgdgt          |          z  z   ddgz   dgt          |          z  z   dgz   S )a  
        Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
        special tokens using the tokenizer `prepare_for_model` method.

        Args:
            token_ids_0 (`List[int]`):
                List of IDs.
            token_ids_1 (`List[int]`, *optional*):
                Optional second list of IDs for sequence pairs.
            already_has_special_tokens (`bool`, *optional*, defaults to `False`):
                Whether or not the token list is already formatted with special tokens for the model.

        Returns:
            `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
        T)rV   rW   r_   Nr   r   )rO   get_special_tokens_maskrI   )rQ   rV   rW   r_   rU   s       r   ra   z)BertweetTokenizer.get_special_tokens_mask   s    & & 	7722'[]a 3    31#K 0 001QC77sqcC,,,-A61#K@P@P:PQUVTWWWr   c                     | j         g}| j        g}|t          ||z   |z             dgz  S t          ||z   |z   |z   |z   |z             dgz  S )a  
        Create a mask from the two sequences passed to be used in a sequence-pair classification task. BERTweet does
        not make use of token type ids, therefore a list of zeros is returned.

        Args:
            token_ids_0 (`List[int]`):
                List of IDs.
            token_ids_1 (`List[int]`, *optional*):
                Optional second list of IDs for sequence pairs.

        Returns:
            `List[int]`: List of zeros.
        Nr   )r[   rZ   rI   )rQ   rV   rW   r]   r\   s        r   $create_token_type_ids_from_sequencesz6BertweetTokenizer.create_token_type_ids_from_sequences   sm    "  ! !s[(3.//1#553$s*S0;>DEEKKr   c                 *    t          | j                  S N)rI   r?   rQ   s    r   
vocab_sizezBertweetTokenizer.vocab_size   s    4<   r   c                 0    t          | j        fi | j        S re   )rF   r?   added_tokens_encoderrf   s    r   	get_vocabzBertweetTokenizer.get_vocab   s    DL>>D$=>>>r   c                     | j         v r j         |         S t          |          }t          t          |d d                   |d         dz   gz             }t          |          }|s|S 	 t	          | fd          }| j        vrn8|\  }}g }d}|t          |          k     r	 |                    ||          }	|                    |||	                    |	}n-# t          $ r  |                    ||d                     Y nw xY w||         |k    rC|t          |          dz
  k     r-||dz            |k    r|
                    ||z              |dz  }n |
                    ||                    |dz  }|t          |          k     t          |          }|}t          |          dk    rnt          |          }Wd	                    |          }|d d
         }| j         |<   |S )Nr+   z</w>Tc                 T    j                             | t          d                    S )Ninf)rJ   getfloat)pairrQ   s    r   <lambda>z'BertweetTokenizer.bpe.<locals>.<lambda>  s     1C1CD%PU,,1W1W r   )keyr   r   r    @@ )rK   r-   listr   minrJ   rI   indexextend
ValueErrorappendjoin)
rQ   tokenr   r   bigramfirstsecondnew_wordijs
   `         r   bpezBertweetTokenizer.bpe   s#   DJ:e$$U||T$ss)__R6(9'::;;$ 	L	($W$W$W$WXXXFT^++"ME6HAc$ii--

5!,,A
 OOD1I...AA "   OODH---E 7e##CIIM(9(9d1q5kV>S>SOOEFN333FAAOODG,,,FA c$ii--  XHD4yyA~~!$9	(: zz$CRCy 
5s   (C 'DDc                    | j         r|                     |          }g }t          j        d|          }|D ]J}|                    t          |                     |                              d                               K|S )zTokenize a string.z\S+\n? )r2   normalizeTweetrefindallrx   ru   r   r.   )rQ   textsplit_tokenswordsr|   s        r   	_tokenizezBertweetTokenizer._tokenize(  s     	-&&t,,D
9d++ 	B 	BETXXe__%:%:3%?%? @ @AAAAr   c                      j         D ]#}|                    | j         |                   }$ j                            |          }d                     fd|D                       }|                    dd                              dd                              dd                              dd	                              d
d          }|                    dd                              dd                              dd                              dd                              dd                              dd          }|                    dd                              dd                              dd                              dd          }d                    |                                          S ) z'
        Normalize a raw Tweet
        r   c                 :    g | ]}                     |          S r"   )normalizeToken)r#   r|   rQ   s     r   r0   z4BertweetTokenizer.normalizeTweet.<locals>.<listcomp>;  s'    MMMUd11%88MMMr   zcannot zcan not zn't z n't zn 't zca n'tzcan'tzai n'tzain'tz'm z 'm z're z 're z's z 's z'll z 'll z'd z 'd z've z 've z p . m .z  p.m.z p . m z p.m z a . m .z a.m.z a . m z a.m )rN   replacerM   tokenizer{   r.   )rQ   tweetpuncttokens	normTweets   `    r   r   z BertweetTokenizer.normalizeTweet3  s    ( 	E 	EEMM%)<U)CDDEE'0077HHMMMMfMMMNN	 i44WVW%%WWg&&WXw''WXw'' 	 eV,,WVW%%WUF##WVW%%WUF##WVW%% 	 j(33WY((WZ))WY((	 	 xx	))***r   c                 B   |                                 }|                    d          rdS |                    d          s|                    d          rdS t          |          dk    r4|| j        v r| j        |         S | j        |                     |          S |S |S )z-
        Normalize tokens in a Tweet
        @z@USERhttpwwwHTTPURLr   )lower
startswithrI   rN   r;   )rQ   r|   lowercased_tokens      r   r   z BertweetTokenizer.normalizeTokenU  s     !;;==C   	7((00 
	4D4O4OPU4V4V 
	9ZZ1__+++*511~)~~e,,,Lr   c                 r    | j                             || j                             | j                            S )z0Converts a token (str) in an id using the vocab.)r?   rn   r7   )rQ   r|   s     r   _convert_token_to_idz&BertweetTokenizer._convert_token_to_idh  s,    |t|'7'7'G'GHHHr   c                 B    | j                             || j                  S )z=Converts an index (integer) in a token (str) using the vocab.)rC   rn   r7   )rQ   rw   s     r   _convert_id_to_tokenz&BertweetTokenizer._convert_id_to_tokenl  s    |t~666r   c                 |    d                     |                              dd                                          }|S )z:Converts a sequence of tokens (string) in a single string.r   rs    )r{   r   strip)rQ   r   
out_strings      r   convert_tokens_to_stringz*BertweetTokenizer.convert_tokens_to_stringp  s5    XXf%%--eR88>>@@
r   save_directoryfilename_prefixc                    t           j                            |          s t                              d| d           d S t           j                            ||r|dz   ndt          d         z             }t           j                            ||r|dz   ndt          d         z             }t           j                            | j                  t           j                            |          k    r:t           j        	                    | j                  rt          | j        |           nzt           j        	                    | j                  sVt          |d          5 }| j                                        }|                    |           d d d            n# 1 swxY w Y   t           j                            | j                  t           j                            |          k    rt          | j        |           ||fS )NzVocabulary path (z) should be a directory-r   r
   r   wb)ospathisdirr=   errorr{   VOCAB_FILES_NAMESabspathr
   isfiler   rD   sp_modelserialized_model_protowriter   )rQ   r   r   out_vocab_fileout_merge_fileficontent_spiece_models          r   save_vocabularyz!BertweetTokenizer.save_vocabularyu  s   w}}^,, 	LLT^TTTUUUFoM_s222QbcoQpp
 
 oM_s222QbcpQqq
 
 7??4?++rw~/N/NNNSUSZSaSabfbqSrSrNT_n555500 	/nd++ /r'+}'K'K'M'M$-.../ / / / / / / / / / / / / / / 7??4+,,0O0OOOT%~666~--s   /FFFc                    t          |t                    rs	 t          |dd          5 }|                     |           ddd           n# 1 swxY w Y   n0# t          $ r}|d}~wt
          $ r t          d| d          w xY wdS |                                }|D ]f}|                                }|	                    d          }|dk    rt          d	          |d|         }t          | j                  | j        |<   gdS )
zi
        Loads a pre-existing dictionary from a text file and adds its symbols to this instance.
        rr'   r(   NzIncorrect encoding detected in z, please rebuild the datasetr   r+   z5Incorrect dictionary format, expected '<token> <cnt>')
isinstancer@   rD   rA   FileNotFoundErrorUnicodeError	Exception	readlinesr   rfindry   rI   r?   )	rQ   ffdfnfelineslineTmplineidxr   s	            r   rA   zBertweetTokenizer.add_from_file  sq    a 	c!S7333 +r&&r***+ + + + + + + + + + + + + + +$   
 c c c a! a a abbbcF 	3 	3G==??D**S//Cbyy !XYYY:D!$T\!2!2DL	3 	3s9   A AA AA AA 
B!A##!B)Fr   r   r   r   r   r   r   re   )NF)__name__
__module____qualname____doc__r   vocab_files_namesrP   r   intr   r^   boolra   rc   propertyrg   rj   r   r   r   r   r   r   r   r@   r   r   rA   __classcell__)rU   s   @r   r   r   6   sS       0 0d * :
 :
 :
 :
 :
 :
z JNA A9A3;DI3FA	cA A A A6 sxX X9X3;DI3FXkoX	cX X X X X X: JNL L9L3;DI3FL	cL L L L0 ! ! X!? ? ?* * *X	 	 	 +  +  +D  &I I I7 7 7  
. .c .HSM .]bcf]g . . . .:3 3 3 3 3 3 3r   r   ac  
    (?:
      [<>]?
      [:;=8]                     # eyes
      [\-o\*\']?                 # optional nose
      [\)\]\(\[dDpP/\:\}\{@\|\\] # mouth
      |
      [\)\]\(\[dDpP/\:\}\{@\|\\] # mouth
      [\-o\*\']?                 # optional nose
      [:;=8]                     # eyes
      [<>]?
      |
      <3                         # heart
    )u  			# Capture 1: entire matched URL
  (?:
  https?:				# URL protocol and colon
    (?:
      /{1,3}				# 1-3 slashes
      |					#   or
      [a-z0-9%]				# Single letter or digit or '%'
                                       # (Trying not to match e.g. "URI::Escape")
    )
    |					#   or
                                       # looks like domain name followed by a slash:
    [a-z0-9.\-]+[.]
    (?:[a-z]{2,13})
    /
  )
  (?:					# One or more:
    [^\s()<>{}\[\]]+			# Run of non-space, non-()<>{}[]
    |					#   or
    \([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...)
    |
    \([^\s]+?\)				# balanced parens, non-recursive: (...)
  )+
  (?:					# End with:
    \([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...)
    |
    \([^\s]+?\)				# balanced parens, non-recursive: (...)
    |					#   or
    [^\s`!()\[\]{};:'".,<>?«»“”‘’]	# not a space or one of these punct chars
  )
  |					# OR, the following to match naked domains:
  (?:
    (?<!@)			        # not preceded by a @, avoid matching foo@_gmail.com_
    [a-z0-9]+
    (?:[.\-][a-z0-9]+)*
    [.]
    (?:[a-z]{2,13})
    \b
    /?
    (?!@)			        # not succeeded by a @,
                            # avoid matching "foo.na" in "foo.na@example.com"
  )
a	  
    (?:
      (?:            # (international)
        \+?[01]
        [ *\-.\)]*
      )?
      (?:            # (area code)
        [\(]?
        \d{3}
        [ *\-.\)]*
      )?
      \d{3}          # exchange
      [ *\-.\)]*
      \d{4}          # base
    )z	<[^>\s]+>z[\-]+>|<[\-]+z(?:@[\w_]+)z(?:\#+[\w_]+[\w\'_\-]*[\w_]+)z#[\w.+-]+@[\w-]+\.(?:[\w-]\.?)+[\w-]a  
    (?:[^\W\d_](?:[^\W\d_]|['\-_])+[^\W\d_]) # Words with apostrophes or dashes.
    |
    (?:[+\-]?\d+[,/.:-]\d+[+\-]?)  # Numbers, including fractions, decimals.
    |
    (?:[\w_]+)                     # Words without apostrophes or dashes.
    |
    (?:\.(?:\s*\.){1,})            # Ellipsis dots.
    |
    (?:\S)                         # Everything else that isn't whitespace.
    z(%s)|z([^a-zA-Z0-9])\1{3,}z&(#?(x?))([^&;\s]+);strictc                 d    |d}t          | t                    r|                     ||          S | S )Nr'   )r   bytesdecode)r   r)   errorss      r   _str_to_unicoder   \  s8    $ -{{8V,,,Kr   r"   Tr'   c                 d    fd}t                               |t          | |                    S )u  
    Remove entities from text by converting them to their corresponding unicode character.

    Args:
        text:
            A unicode string or a byte string encoded in the given *encoding* (which defaults to 'utf-8').
        keep (list):
            List of entity names which should not be replaced. This supports both numeric entities (`&#nnnn;` and
            `&#hhhh;`) and named entities (such as `&nbsp;` or `&gt;`).
        remove_illegal (bool):
            If `True`, entities that can't be converted are removed. Otherwise, entities that can't be converted are
            kept "as is".

    Returns: A unicode string with the entities removed.

    See https://github.com/scrapy/w3lib/blob/master/w3lib/html.py

    Examples:

    ```python
    >>> from nltk.tokenize.casual import _replace_html_entities

    >>> _replace_html_entities(b"Price: &pound;100")
    'Price: \xa3100'

    >>> print(_replace_html_entities(b"Price: &pound;100"))
    Price: £100
    ```c                 P   |                      d          }|                      d          r}	 |                      d          rt          |d          }nt          |d          }d|cxk    rdk    r&n n#t          |f                              d          S nO# t          $ r d }Y nAw xY w|v r|                      d	          S t
          j        j                            |          }|'	 t          |          S # t          t          f$ r Y nw xY wrd
n|                      d	          S )Nr   r   r       
         cp1252r   r   )groupr   r   r   ry   htmlentitiesname2codepointrn   chrOverflowError)matchentity_bodynumberkeepremove_illegals      r   _convert_entityz/_replace_html_entities.<locals>._convert_entity  sO   kk!nn;;q>> 	G;;q>> 2 b11FF b11F
 6))))T))))) &++228<<<    d""{{1~~%599+FF6{{".    $7rrQ7s$   A(B B&%B&)C8 8DD)ENT_REsubr   )r   r   r   r)   r   s    ``  r   _replace_html_entitiesr   d  sB    <8 8 8 8 8 8: ::otX'F'FGGGr   c                        e Zd ZdZddZd ZdS )rL   a  
    Examples:

    ```python
    >>> # Tokenizer for tweets.
    >>> from nltk.tokenize import TweetTokenizer

    >>> tknzr = TweetTokenizer()
    >>> s0 = "This is a cooool #dummysmiley: :-) :-P <3 and some arrows < > -> <--"
    >>> tknzr.tokenize(s0)
    ['This', 'is', 'a', 'cooool', '#dummysmiley', ':', ':-)', ':-P', '<3', 'and', 'some', 'arrows', '<', '>', '->', '<--']

    >>> # Examples using *strip_handles* and *reduce_len parameters*:
    >>> tknzr = TweetTokenizer(strip_handles=True, reduce_len=True)
    >>> s1 = "@remy: This is waaaaayyyy too much for you!!!!!!"
    >>> tknzr.tokenize(s1)
    [':', 'This', 'is', 'waaayyy', 'too', 'much', 'for', 'you', '!', '!', '!']
    ```TFc                 0    || _         || _        || _        d S re   preserve_case
reduce_lenstrip_handles)rQ   r   r   r   s       r   rP   zTweetTokenizer.__init__  s    *$*r   c                    t          |          }| j        rt          |          }| j        rt	          |          }t
                              d|          }t                              |          }| j	        sd |D             }|S )z
        Args:
            text: str

        Returns: list(str) A tokenized list of strings; concatenating this list returns the original string if
        `preserve_case=False`
        \1\1\1c                 n    g | ]2}t                               |          r|n|                                3S r"   )EMOTICON_REsearchr   )r#   xs     r   r0   z+TweetTokenizer.tokenize.<locals>.<listcomp>  s7    NNN1+,,Q//>QQQWWYYNNNr   )
r   r   remove_handlesr   reduce_lengtheningHANG_REr   WORD_REr   r   )rQ   r   	safe_textr   s       r   r   zTweetTokenizer.tokenize  s     &d++ 	(!$''D? 	,%d++DKK	400		**! 	ONNNNNEr   NTFF)r   r   r   r   rP   r   r"   r   r   rL   rL     sA         &+ + + +
    r   rL   c                 V    t          j        d          }|                    d|           S )za
    Replace repeated character sequences of length 3 or greater with sequences of length 3.
    z	(.)\1{2,}r   regexcompiler   r   patterns     r   r   r     s'     mL))G;;y$'''r   c                 V    t          j        d          }|                    d|           S )z4
    Remove Twitter username handles from text.
    zv(?<![A-Za-z0-9_!@#\$%&*])@(([A-Za-z0-9_]){20}(?!@))|(?<![A-Za-z0-9_!@#\$%&*])@(([A-Za-z0-9_]){1,19})(?![A-Za-z0-9_]*@)r   r  r  s     r   r   r     s1     m 	B G ;;sD!!!r   Fc                 L    t          |||                              |           S )z:
    Convenience function for wrapping the tokenizer.
    r   )rL   r   )r   r   r   r   s       r   casual_tokenizer
    s/     *\ijjjss  r   )Nr   )r"   Tr'   r  )'r   r   r   r   shutilr   typingr   r   r   r  tokenization_utilsr   utilsr	   
get_loggerr   r=   r   r   r   	EMOTICONSURLSREGEXPSr  r{   VERBOSEIUNICODEr   r   r   r   r   r   rL   r   r   r
  r"   r   r   <module>r     sI    ( '  				 				       ( ( ( ( ( ( ( ( ( (  5 5 5 5 5 5       
	H	%	%      q3 q3 q3 q3 q3+ q3 q3 q3~L		$)\ 		  (.
A+` %-chhw&7&779PSXS`9`
a
a %-/
0
0 emIu}uw'>'NOO 
.	/	/   ;H ;H ;H ;HB0 0 0 0 0 0 0 0p( ( (" " "      r   