
    Ng<'                     f    d dl mZ d dlmZmZ d Zd Zd Z G d d          Zd Z	d	 Z
d
 Zd ZdS )    )finditer)escapeunescapec              #   ,  K   t          |          dk    rt          d          d}	 	 |                     ||          }|dk    r||fV  n7# t          $ r* |t          |           k    r|t          |           fV  Y dS w xY w|t          |          z   }n)a  
    Return the offsets of the tokens in *s*, as a sequence of ``(start, end)``
    tuples, by splitting the string at each occurrence of *sep*.

        >>> from nltk.tokenize.util import string_span_tokenize
        >>> s = '''Good muffins cost $3.88\nin New York.  Please buy me
        ... two of them.\n\nThanks.'''
        >>> list(string_span_tokenize(s, " ")) # doctest: +NORMALIZE_WHITESPACE
        [(0, 4), (5, 12), (13, 17), (18, 26), (27, 30), (31, 36), (37, 37),
        (38, 44), (45, 48), (49, 55), (56, 58), (59, 73)]

    :param s: the string to be tokenized
    :type s: str
    :param sep: the token separator
    :type sep: str
    :rtype: iter(tuple(int, int))
    r   z!Token delimiter must not be emptyTN)len
ValueErrorindex)ssepleftrights       N/var/www/html/ai-engine/env/lib/python3.11/site-packages/nltk/tokenize/util.pystring_span_tokenizer      s      $ 3xx1}}<===D
 	GGC&&EzzEk!!! 	 	 	s1vv~~CFFl"""EE	
 s3xx
 s   "A 0B ?B c              #      K   d}t          ||           D ]'}|                                \  }}||k    r||fV  |}(|t          |           fV  dS )a  
    Return the offsets of the tokens in *s*, as a sequence of ``(start, end)``
    tuples, by splitting the string at each successive match of *regexp*.

        >>> from nltk.tokenize.util import regexp_span_tokenize
        >>> s = '''Good muffins cost $3.88\nin New York.  Please buy me
        ... two of them.\n\nThanks.'''
        >>> list(regexp_span_tokenize(s, r'\s')) # doctest: +NORMALIZE_WHITESPACE
        [(0, 4), (5, 12), (13, 17), (18, 23), (24, 26), (27, 30), (31, 36),
        (38, 44), (45, 48), (49, 51), (52, 55), (56, 58), (59, 64), (66, 73)]

    :param s: the string to be tokenized
    :type s: str
    :param regexp: regular expression that matches token separators (must not be empty)
    :type regexp: str
    :rtype: iter(tuple(int, int))
    r   N)r   spanr   )r
   regexpr   mr   nexts         r   regexp_span_tokenizer   .   ss      $ Dfa    ffhhtD==+
A,    c              #   :   K   d}| D ]\  }}||z
  ||z
  fV  |}dS )a  
    Return a sequence of relative spans, given a sequence of spans.

        >>> from nltk.tokenize import WhitespaceTokenizer
        >>> from nltk.tokenize.util import spans_to_relative
        >>> s = '''Good muffins cost $3.88\nin New York.  Please buy me
        ... two of them.\n\nThanks.'''
        >>> list(spans_to_relative(WhitespaceTokenizer().span_tokenize(s))) # doctest: +NORMALIZE_WHITESPACE
        [(0, 4), (1, 7), (1, 4), (1, 5), (1, 2), (1, 3), (1, 5), (2, 6),
        (1, 3), (1, 2), (1, 3), (1, 2), (1, 5), (2, 7)]

    :param spans: a sequence of (start, end) offsets of the tokens
    :type spans: iter(tuple(int, int))
    :rtype: iter(tuple(int, int))
    r   N )spansprevr   r   s       r   spans_to_relativer   I   sJ        D  eTk54<'''' r   c                   F    e Zd ZdZdZdZdZdZdZdZ	dZ
d	Zeeeeee	e
egZd
S )CJKCharsa^  
    An object that enumerates the code points of the CJK characters as listed on
    https://en.wikipedia.org/wiki/Basic_Multilingual_Plane#Basic_Multilingual_Plane

    This is a Python port of the CJK code point enumerations of Moses tokenizer:
    https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/detokenizer.perl#L309
    i   i  i.  iϤ  i@  i  i   i  i   i  i0  iO  ie  i  i   i N)__name__
__module____qualname____doc__Hangul_JamoCJK_RadicalsPhags_PaHangul_SyllablesCJK_Compatibility_IdeographsCJK_Compatibility_FormsKatakana_Hangul_HalfwidthSupplementary_Ideographic_Planerangesr   r   r   r   r   _   sv          K* "L H & $2  - !/'# 	$!'	FFFr   r   c                 :     t           fddD                       S )u  
    Python port of Moses' code to check for CJK character.

    >>> CJKChars().ranges
    [(4352, 4607), (11904, 42191), (43072, 43135), (44032, 55215), (63744, 64255), (65072, 65103), (65381, 65500), (131072, 196607)]
    >>> is_cjk(u'㏾')
    True
    >>> is_cjk(u'﹟')
    False

    :param character: The character that needs to be checked.
    :type character: char
    :return: bool
    c                 P    g | ]"\  }}|t                    cxk    o|k    nc #S r   )ord).0startend	characters      r   
<listcomp>zis_cjk.<locals>.<listcomp>   sO     	
 	
 	
s S^^****s****	
 	
 	
r   )r   r   r    r!   r"   r#   r$   r%   )any)r9   s   `r   is_cjkr<      s?     	
 	
 	
 	
		
 	
 	
  r   c           	      0    t          | dddddd          S )a  
    This function transforms the input text into an "escaped" version suitable
    for well-formed XML formatting.

    Note that the default xml.sax.saxutils.escape() function don't escape
    some characters that Moses does so we have to manually add them to the
    entities dictionary.

        >>> input_str = ''')| & < > ' " ] ['''
        >>> expected_output =  ''')| &amp; &lt; &gt; ' " ] ['''
        >>> escape(input_str) == expected_output
        True
        >>> xml_escape(input_str)
        ')&#124; &amp; &lt; &gt; &apos; &quot; &#93; &#91;'

    :param text: The text that needs to be escaped.
    :type text: str
    :rtype: str
    &apos;&quot;&#124;&#91;&#93;)'"|[]entities)r   texts    r   
xml_escaperL      s7    ( 
 
	 	 	 	r   c           	      0    t          | dddddd          S )aj  
    This function transforms the "escaped" version suitable
    for well-formed XML formatting into humanly-readable string.

    Note that the default xml.sax.saxutils.unescape() function don't unescape
    some characters that Moses does so we have to manually add them to the
    entities dictionary.

        >>> from xml.sax.saxutils import unescape
        >>> s = ')&#124; &amp; &lt; &gt; &apos; &quot; &#93; &#91;'
        >>> expected = ''')| & < > ' " ] ['''
        >>> xml_unescape(s) == expected
        True

    :param text: The text that needs to be unescaped.
    :type text: str
    :rtype: str
    rC   rD   rE   rF   rG   )r>   r?   r@   rA   rB   rH   )r   rJ   s    r   xml_unescaperN      s7    & 
 
	 	 	 	r   c           	          d}g }| D ]l}	 |                     ||          }n)# t          $ r}t          d| d| d          |d}~ww xY w|t          |          z   }|                    ||f           m|S )a  
    This module attempt to find the offsets of the tokens in *s*, as a sequence
    of ``(start, end)`` tuples, given the tokens and also the source string.

        >>> from nltk.tokenize import TreebankWordTokenizer
        >>> from nltk.tokenize.util import align_tokens
        >>> s = str("The plane, bound for St Petersburg, crashed in Egypt's "
        ... "Sinai desert just 23 minutes after take-off from Sharm el-Sheikh "
        ... "on Saturday.")
        >>> tokens = TreebankWordTokenizer().tokenize(s)
        >>> expected = [(0, 3), (4, 9), (9, 10), (11, 16), (17, 20), (21, 23),
        ... (24, 34), (34, 35), (36, 43), (44, 46), (47, 52), (52, 54),
        ... (55, 60), (61, 67), (68, 72), (73, 75), (76, 83), (84, 89),
        ... (90, 98), (99, 103), (104, 109), (110, 119), (120, 122),
        ... (123, 131), (131, 132)]
        >>> output = list(align_tokens(tokens, s))
        >>> len(tokens) == len(expected) == len(output)  # Check that length of tokens and tuples are the same.
        True
        >>> expected == list(align_tokens(tokens, s))  # Check that the output is as expected.
        True
        >>> tokens == [s[start:end] for start, end in output]  # Check that the slices of the string corresponds to the tokens.
        True

    :param tokens: The list of strings that are the result of tokenization
    :type tokens: list(str)
    :param sentence: The original string
    :type sentence: str
    :rtype: list(tuple(int,int))
    r   zsubstring "z" not found in "rD   N)r	   r   r   append)tokenssentencepointoffsetstokenr7   es          r   align_tokensrW      s    < EG ' '	VNN5%00EE 	V 	V 	VM5MM(MMMNNTUU	VE

"u~&&&&Ns   !
AAAN)rer   xml.sax.saxutilsr   r   r   r   r   r   r<   rL   rN   rW   r   r   r   <module>rZ      s          - - - - - - - -     D  6  ,? ? ? ? ? ? ? ?D  @  @  >' ' ' ' 'r   