o
    f2h}                     @   s  d dl mZmZmZ d dlmZmZmZmZm	Z	m
Z
mZmZ d dlZd dlZd dlm  mZ d dlmZ d dlmZ ddlmZ ddlmZmZ dd	lmZ erWdd
lmZ e  	d3dddededeeee! f fddZ"eddG dd dZ#eddG dd dZ$G dd dZ%G dd de%Z&G dd dZ'G dd de'Z(G d d! d!Z)G d"d# d#e)Z*G d$d% d%e)Z+G d&d' d'Z,G d(d) d)e,Z-G d*d+ d+e,Z.G d,d- d-e,Z/G d.d/ d/Z0e  e# fddded0e#dee$ee$ f fd1d2Z1dS )4    )	dataclassfieldreplace)TYPE_CHECKINGDictIterableListOptionalSequenceTupleUnionN)Tensor)Categorical   )CHUNK_LENGTH)	Tokenizerget_tokenizer)compression_ratio)Whispermodelr   mel	tokenizerreturnc           
         s<  du rt | j| jdjdu sjjvrtd|jdk}|r'|d}|j	dd | j
j| j
jfkr;| |}|j	d }tjgg| |j}| ||dddf }tj|j	d tjd}d	|tj< tj |dd|f< |jdd
}|jdd
   fddt|D }	|r|d }|	d }	||	fS )ao  
    Detect the spoken language in the audio, and return them as list of strings, along with the ids
    of the most probable language tokens and the probability distribution over all language tokens.
    This is performed outside the main decode loop in order to not interfere with kv-caching.

    Returns
    -------
    language_tokens : Tensor, shape = (n_audio,)
        ids of the most probable language tokens, which appears after the startoftranscript token.
    language_probs : List[Dict[str, float]], length = n_audio
        list of dictionaries containing the probability distribution over all languages.
    N)num_languageszCThis model doesn't have language tokens so it can't perform lang id   r   )dtypeFdimc                    s*   g | ]  fd dt jjD qS )c                    s"   i | ]\}}| |f   qS  )item).0jc)ilanguage_token_probsr    d/var/www/html/alexa/alex_system/speach-to-text/venv/lib/python3.10/site-packages/whisper/decoding.py
<dictcomp>B   s    z.detect_language.<locals>.<listcomp>.<dictcomp>)zipall_language_tokensall_language_codes)r"   r&   r   )r%   r'   
<listcomp>A   s    z#detect_language.<locals>.<listcomp>)r   is_multilingualr   languagelanguage_tokensot_sequence
ValueErrorndim	unsqueezeshapedimsn_audio_ctxn_audio_stateencodertorchtensorsottodevicelogitsonesboollistr*   npinfargmaxsoftmaxcpurange)
r   r   r   singlen_audioxr?   masklanguage_tokenslanguage_probsr    r,   r'   detect_language   s:   




rO   T)frozenc                   @   s  e Zd ZU dZeed< dZee ed< dZe	ed< dZ
ee ed< dZee ed< dZee ed	< dZee	 ed
< dZee	 ed< dZeeeee f  ed< dZeeeee f  ed< dZeeeee f  ed< dZeed< dZeed< dZee	 ed< dZeed< dS )DecodingOptions
transcribetaskNr/   g        temperature
sample_lenbest_of	beam_sizepatiencelength_penaltypromptprefixz-1suppress_tokensTsuppress_blankFwithout_timestamps      ?max_initial_timestampfp16)__name__
__module____qualname__rS   str__annotations__r/   r	   rT   floatrU   intrV   rW   rX   rY   rZ   r   r   r[   r\   r   r]   rA   r^   r`   ra   r    r    r    r'   rQ   P   s    
 rQ   c                   @   s   e Zd ZU eed< eed< dZeeee	f  ed< e
edZee ed< dZeed< ejZe	ed	< ejZe	ed
< ejZe	ed< ejZe	ed< dS )DecodingResultaudio_featuresr/   NrN   )default_factorytokens textavg_logprobno_speech_probrT   r   )rb   rc   rd   r   rf   re   rN   r	   r   rg   r   rB   rl   r   rh   rn   rC   nanro   rp   rT   r   r    r    r    r'   ri   u   s   
 ri   c                   @   s6   e Zd ZdededefddZdddZdd	d
ZdS )	Inferencerl   rj   r   c                 C      t )zAPerform a forward pass on the decoder and return per-token logitsNotImplementedErrorselfrl   rj   r    r    r'   r?         zInference.logitsNc                 C   rs   )z9Update the key-value cache according to the updated beamsrt   )rw   source_indicesr    r    r'   rearrange_kv_cache   rx   zInference.rearrange_kv_cachec                 C      dS )z:Clean up any resources or hooks after decoding is finishedNr    rw   r    r    r'   cleanup_caching   rx   zInference.cleanup_caching)r   N)rb   rc   rd   r   r?   rz   r}   r    r    r    r'   rr      s    
rr   c                   @   sD   e Zd ZdddefddZdededefd	d
Zdd Zdd ZdS )PyTorchInferencer   r   initial_token_lengthc                 C   sN   || _ || _i | _g | _dd | j jjD }dd | j jjD }|| | _d S )Nc                 S      g | ]}|j jqS r    )attnkeyr"   blockr    r    r'   r-          z-PyTorchInference.__init__.<locals>.<listcomp>c                 S   r   r    )r   valuer   r    r    r'   r-      r   )r   r   kv_cachehooksdecoderblocks
kv_modules)rw   r   r   key_modulesvalue_modulesr    r    r'   __init__   s   zPyTorchInference.__init__rl   rj   r   c                 C   sP   | j s| j \| _ | _|jd | jkr|d d dd f }| jj||| j dS )Nr   )r   )r   r   install_kv_cache_hooksr   r5   r   r   rv   r    r    r'   r?      s
   zPyTorchInference.logitsc                 C   s$   | j D ]}|  qi | _g | _ d S N)r   remover   )rw   hookr    r    r'   r}      s   


z PyTorchInference.cleanup_cachingc                 C   s@   |t tt|kr| jD ]}| j| |  | j|< qd S d S r   )rB   rH   lenr   r   detach)rw   ry   moduler    r    r'   rz      s
   
z#PyTorchInference.rearrange_kv_cacheN)	rb   rc   rd   rh   r   r   r?   r}   rz   r    r    r    r'   r~      s
    

r~   c                   @   s6   e Zd Zdeee  deee  dee fddZdS )SequenceRankerrl   sum_logprobsr   c                 C   rs   )z
        Given a list of groups of samples and their cumulative log probabilities,
        return the indices of the samples in each group to select as the final result
        rt   rw   rl   r   r    r    r'   rank   s   zSequenceRanker.rankN)rb   rc   rd   r   r   rg   rh   r   r    r    r    r'   r      s    

r   c                   @   sD   e Zd ZdZdee fddZdeee  deee  fddZ	d	S )
MaximumLikelihoodRankerz
    Select the sample with the highest log probabilities, penalized using either
    a simple length normalization or Google NMT paper's length penalty
    rY   c                 C   s
   || _ d S r   )rY   )rw   rY   r    r    r'   r         
z MaximumLikelihoodRanker.__init__rl   r   c                    s2   fdd dd |D } fddt ||D S )Nc                    sL   g }t | |D ]\}} jd u r|}n	d| d  j }|||  q|S )N      )r)   rY   append)logprobslengthsresultlogproblengthpenaltyr|   r    r'   scores   s   
z,MaximumLikelihoodRanker.rank.<locals>.scoresc                 S   s   g | ]	}d d |D qS )c                 S      g | ]}t |qS r    r   r"   tr    r    r'   r-      r   z;MaximumLikelihoodRanker.rank.<locals>.<listcomp>.<listcomp>r    r"   sr    r    r'   r-          z0MaximumLikelihoodRanker.rank.<locals>.<listcomp>c                    s    g | ]\}}t  ||qS r    )rC   rE   )r"   pl)r   r    r'   r-      s     )r)   )rw   rl   r   r   r    )r   rw   r'   r      s   zMaximumLikelihoodRanker.rankN)
rb   rc   rd   __doc__r	   rg   r   r   r   r   r    r    r    r'   r      s    &r   c                
   @   sd   e Zd Zdd Zdedededeeef fddZdededeeee  e	e	e
  f fd	d
ZdS )TokenDecoderc                 C   r{   )z=Initialize any stateful variables for decoding a new sequenceNr    r|   r    r    r'   reset   s    zTokenDecoder.resetrl   r?   r   r   c                 C   rs   )a  Specify how to select the next token, based on the current trace and logits

        Parameters
        ----------
        tokens : Tensor, shape = (n_batch, current_sequence_length)
            all tokens in the context so far, including the prefix and sot_sequence tokens

        logits : Tensor, shape = (n_batch, vocab_size)
            per-token logits of the probability distribution at the current step

        sum_logprobs : Tensor, shape = (n_batch)
            cumulative log probabilities for each sequence

        Returns
        -------
        tokens : Tensor, shape = (n_batch, current_sequence_length + 1)
            the tokens, appended with the selected next token

        completed : bool
            True if all sequences has reached the end of text

        rt   )rw   rl   r?   r   r    r    r'   update   s   zTokenDecoder.updatec                 C   rs   )a  Finalize search and return the final candidate sequences

        Parameters
        ----------
        tokens : Tensor, shape = (n_audio, n_group, current_sequence_length)
            all tokens in the context so far, including the prefix and sot_sequence

        sum_logprobs : Tensor, shape = (n_audio, n_group)
            cumulative log probabilities for each sequence

        Returns
        -------
        tokens : Sequence[Sequence[Tensor]], length = n_audio
            sequence of Tensors containing candidate token sequences, for each audio input

        sum_logprobs : List[List[float]], length = n_audio
            sequence of cumulative log probabilities corresponding to the above

        rt   r   r    r    r'   finalize   s   zTokenDecoder.finalizeN)rb   rc   rd   r   r   r   rA   r   r
   r   rg   r   r    r    r    r'   r      s$    

r   c                
   @   sR   e Zd ZdedefddZdedededeeef fd	d
Z	dedefddZ
dS )GreedyDecoderrT   eotc                 C      || _ || _d S r   )rT   r   )rw   rT   r   r    r    r'   r        
zGreedyDecoder.__init__rl   r?   r   r   c                 C   s   | j dkr|jdd}n
t|| j  d }tj| dd}|t|j	d |f }|||d d df | j
k 7 }| j
||d d df | j
k< tj||d d d f gdd}|d d df | j
k }||fS )Nr   r   r   )r?   )rT   rE   r   sampleFlog_softmaxrg   r:   aranger5   r   catall)rw   rl   r?   r   next_tokensr   current_logprobs	completedr    r    r'   r     s   
zGreedyDecoder.updatec                 C   s   t j|d| jd}|| fS )N)r   r   )r   )r   padr   tolistr   r    r    r'   r   '  s   zGreedyDecoder.finalizeN)rb   rc   rd   rg   rh   r   r   r   rA   r   r   r    r    r    r'   r     s    

r   c                
   @   sj   e Zd Z	ddedededee fddZdd	 Zd
e	de	de	de
e	ef fddZde	de	fddZdS )BeamSearchDecoderNrW   r   	inferencerX   c                 C   sV   || _ || _|| _|pd| _t|| j | _d | _| jdks)J d| d| dd S )Nr_   r   zInvalid beam size (z) or patience ())rW   r   r   rX   roundmax_candidatesfinished_sequences)rw   rW   r   r   rX   r    r    r'   r   .  s   
zBeamSearchDecoder.__init__c                 C   s
   d | _ d S r   )r   r|   r    r    r'   r   @  r   zBeamSearchDecoder.resetrl   r?   r   r   c                    s4  |j d  j dkrt|j  d j d|j d  j } jd u r-dd t|D  _tj| dd}g g g }}}t|D ]}	i i i }
}}t jD ]9}|	 j | }||  }t	|| 
 jd  D ]\}}|| |  }t|| g }||
|< |||< qlqQd}t|
|
jd	d
D ]/}|d  jkr|
| ||< q|
| |t|< || |||  |d7 }| jkr nq|| qBtj||jd} j| t jt|ksJ t	 j|D ]\}}t||jd	d
D ]}t| jkr n|| ||< qqt fdd jD }||fS )Nr   z[0] % z != 0c                 S   s   g | ]}i qS r    r    )r"   _r    r    r'   r-   K      z,BeamSearchDecoder.update.<locals>.<listcomp>r   r   r   T)r   reverser>   c                 3   s    | ]
}t | jkV  qd S r   )r   r   r"   	sequencesr|   r    r'   	<genexpr>z  s
    
z+BeamSearchDecoder.update.<locals>.<genexpr>)r5   rW   r2   r   rH   r   r   rg   r   r)   topkr!   tuplesortedgetr   r   r   r:   r;   r>   r   rz   r   r   )rw   rl   r?   r   rJ   r   r   ry   r   r%   r   sourcesfinishedr#   idxr[   r   tokennew_logprobsequencesavedpreviously_finishednewly_finishedseqr   r    r|   r'   r   C  sZ   
 


zBeamSearchDecoder.updatepreceding_tokensc                 C   s   |  }t| jD ]=\}}t|| jk rFtt|| d d d D ]#}|||f  | j	g }|| | 
 |t|< t|| jkrE nq"q	dd | jD }dd | jD }||fS )Nr   c                 S   s   g | ]}d d |  D qS )c                 S   s   g | ]}t |qS r    )r:   r;   )r"   r   r    r    r'   r-     s    z9BeamSearchDecoder.finalize.<locals>.<listcomp>.<listcomp>)keysr   r    r    r'   r-     s    z.BeamSearchDecoder.finalize.<locals>.<listcomp>c                 S   s   g | ]}t | qS r    )rB   valuesr   r    r    r'   r-     s    )rG   	enumerater   r   rW   rB   rC   argsortr   r   r!   r   )rw   r   r   r%   r   r#   r   rl   r    r    r'   r     s"    zBeamSearchDecoder.finalizer   )rb   rc   rd   rh   rr   r	   rg   r   r   r   r   rA   r   r   r    r    r    r'   r   -  s,    


=r   c                   @   s"   e Zd ZdededdfddZdS )LogitFilterr?   rl   r   Nc                 C   rs   )a  Apply any filtering or masking to logits in-place

        Parameters
        ----------
        logits : Tensor, shape = (n_batch, vocab_size)
            per-token logits of the probability distribution at the current step

        tokens : Tensor, shape = (n_batch, current_sequence_length)
            all tokens in the context so far, including the prefix and sot_sequence tokens

        rt   rw   r?   rl   r    r    r'   apply  s   zLogitFilter.apply)rb   rc   rd   r   r   r    r    r    r'   r     s    r   c                   @   s0   e Zd ZdedefddZdedefddZd	S )
SuppressBlankr   sample_beginc                 C   r   r   )r   r   )rw   r   r   r    r    r'   r     r   zSuppressBlank.__init__r?   rl   c                 C   s>   |j d | jkrtj |d d | jd| jjg f< d S d S )Nr    )r5   r   rC   rD   r   encoder   r   r    r    r'   r     s   *zSuppressBlank.applyN)rb   rc   rd   r   rh   r   r   r   r    r    r    r'   r     s    r   c                   @   s0   e Zd Zdee fddZdedefddZdS )	SuppressTokensr\   c                 C   s   t || _d S r   )rB   r\   rw   r\   r    r    r'   r     s   zSuppressTokens.__init__r?   rl   c                 C   s   t j |d d | jf< d S r   )rC   rD   r\   r   r    r    r'   r     s   zSuppressTokens.applyN)rb   rc   rd   r
   rh   r   r   r   r    r    r    r'   r     s    r   c                   @   s8   e Zd Zdededee fddZdedefdd	Zd
S )ApplyTimestampRulesr   r   max_initial_timestamp_indexc                 C   s   || _ || _|| _d S r   )r   r   r   )rw   r   r   r   r    r    r'   r     s   
zApplyTimestampRules.__init__r?   rl   c                 C   s   | j jd urtj |d d | j jf< t|jd D ]w}||| jd f }dd | D }t|dko:|d | j j	k}t|dk pH|d | j j	k}|rf|rZtj ||| j j	d f< ntj ||d | j j
f< ||| j j	 }| dkr|r~|s~|d }	n|d d }	tj ||| j j	|	f< q|jd | jkrtj |d d d | j j	f< | jd ur| j j	| j }
tj |d d |
d d f< tj| dd}t|jd D ],}||| j j	d f jdd}||d | j j	f  }||krtj ||d | j j	f< qd S )	Nr   c                 S   s   g | ]}|qS r    r    r   r    r    r'   r-     r   z-ApplyTimestampRules.apply.<locals>.<listcomp>r   r   r   r   r   )r   no_timestampsrC   rD   rH   r5   r   r   r   timestamp_beginr   genumelr   r   r   rg   	logsumexpmax)rw   r?   rl   ksampled_tokensr   last_was_timestamppenultimate_was_timestamp
timestampstimestamp_lastlast_allowedr   timestamp_logprobmax_text_token_logprobr    r    r'   r     sL   

zApplyTimestampRules.applyN)	rb   rc   rd   r   rh   r	   r   r   r   r    r    r    r'   r     s    

r   c                   @   s   e Zd ZU eed< eed< eed< ee ed< ddde	fdd	Z
de	d
e	fddZd
ee fddZd
ee fddZdefddZdedefddZdedefddZe ded
ee fddZdS )DecodingTaskr   sequence_rankerr   logit_filtersr   r   optionsc                 C   s  || _ |jpd}t|j|j||jd}|| _| || _|j	p#|j
p#d| _|jj| _|jp2|jjd | _|j| _| jjr@|j| _|  | _t| j| _| j|j| _t|t| j| _t|j| _|j	d urtt|j	|j | j|j!| _"nt#|j$|j | _"g | _%| jj&r| j%'t(| j| j | jj)r| j%'t*| +  |jst,|jj- }d }|j.rt/| jj.| }| j%'t0|| j| d S d S )Nen)r   r/   rS   r   r   )1r   r/   r   r.   r   rS   r   _verify_optionsr   rW   rV   n_groupr6   
n_text_ctxn_ctxrU   r1   r^   #sot_sequence_including_notimestamps_get_initial_tokensinitial_tokensr   r   indexr<   	sot_indexr~   r   r   rY   r   r   r   rX   r   r   rT   r   r]   r   r   r\   r   _get_suppress_tokensr   r7   r`   r   r   )rw   r   r   r/   r   	precisionr   r    r    r'   r     sX   




zDecodingTask.__init__r   c                 C   s   |j d ur|jd urtd|jdkr|jd urtd|jd ur*|j d u r*td|jd urBd|j  kr=dksBtd td|S )Nz-beam_size and best_of can't be given togetherr   z4best_of with greedy sampling (T=0) is not compatiblez'patience requires beam_size to be givenr   z8length_penalty (alpha) should be a value between 0 and 1)rW   rV   r2   rT   rX   rY   )rw   r   r    r    r'   r  <  s   


zDecodingTask._verify_optionsc                 C   s   t | j}| jj }r4t|tr| jd|  n|}| j	d ur0| j
d | j	 }|| d  }|| }| jj }r^t|trI| jd|  n|}| jjg|| j
d d  d   | }t|S )Nr   r   r   )rB   r1   r   r[   
isinstancere   r   r   striprU   r  rZ   sot_prevr   )rw   rl   r[   prefix_tokensmax_prefix_lenrZ   prompt_tokensr    r    r'   r  K  s,   

z DecodingTask._get_initial_tokensc                 C   s   | j j}t|trdd |dD }d|v r&dd |D }|| jj n|d u s0t|dkr3g }n	t|t	s<J d|| jj
| jj| jj| jj| jjg | jjd ur]|| jj ttt|S )Nc                 S   r   r    )rh   r   r    r    r'   r-   k  r   z5DecodingTask._get_suppress_tokens.<locals>.<listcomp>,r   c                 S   s   g | ]}|d kr|qS )r   r    r   r    r    r'   r-   n      r   zsuppress_tokens must be a list)r   r\   r  re   splitextendr   non_speech_tokensr   rB   rR   	translater<   r  sot_lm	no_speechr   r   r   setr   r    r    r'   r  g  s(   
	z!DecodingTask._get_suppress_tokensr   c                 C   sr   | j jr| }|jdd  | jjj| jjjfkr|}n| j|}|j	| j jr+t
jnt
jkr7td|j	 S |S )Nr   z'audio_features has an incorrect dtype: )r   ra   halfr5   r   r6   r7   r8   r9   r   r:   float16float32	TypeError)rw   r   rj   r    r    r'   _get_audio_features  s   
z DecodingTask._get_audio_featuresrj   rl   c                 C   s|   | j jg|jd  }d }| j jd u s| j jdkr:| j|| j\}}dd |D }| j jd u r:||d d | jd f< ||fS )Nr   lang_idc                 S   s   g | ]	}t ||jd qS ))r   )r   r   )r"   probsr    r    r'   r-     r   z1DecodingTask._detect_language.<locals>.<listcomp>r   )r   r/   r5   rS   r   rO   r   r
  )rw   rj   rl   	languages
lang_probslang_tokensr    r    r'   _detect_language  s   zDecodingTask._detect_languagec                 C   s  |j d }tj||jd}tjg| }zet| jD ]X}| j	||}|dkrG| j
jd urG|d d | jf  jdd}|d d | j
jf  }|d d df }| jD ]}	|	|| qR| j|||\}}
|
so|j d | jkrq nqW | j  n| j  w |||fS )Nr   r   r   r   )r5   r:   zerosr>   rC   rq   rH   rU   r   r?   r   r  r
  rg   rF   r   r   r   r   r   r  r}   )rw   rj   rl   n_batchr   no_speech_probsr%   r?   probs_at_sotlogit_filterr   r    r    r'   
_main_loop  s&   


zDecodingTask._main_loopc                    s   j    j|jd } |}t jg|d} 	||\}} j
jdkr6dd t|||D S |j jdd|j} ||\}}}|d d  j }|d d  j }|jd t|  krk|ksnJ  J || jd}|| j} j ||\}} fdd|D } j||}	d	d t|	|D }fd
d|D }
dd t|	|D }dd t||D }|
|||||f}tttt|dkrtdttt|  fddt| D S )Nr   r   r!  c                 S   s    g | ]\}}}t |||d qS ))rj   r/   rN   )ri   )r"   featuresr/   r"  r    r    r'   r-     s    z$DecodingTask.run.<locals>.<listcomp>r   r   c                    s    g | ]} fd d|D qS )c                    s(   g | ]}| j |jk d   qS ))r   r   )r   r   nonzeror   rw   r   r    r'   r-     s   ( z/DecodingTask.run.<locals>.<listcomp>.<listcomp>r    r   r/  r    r'   r-     s    c                 S   s   g | ]
\}}||   qS r    )r   )r"   r%   r   r    r    r'   r-     s    c                    s   g | ]	}  | qS r    )decoder  r   )r   r    r'   r-     r   c                 S   s   g | ]\}}|| qS r    r    )r"   r%   lpr    r    r'   r-     r  c                 S   s    g | ]\}}|t |d   qS )r   r   )r"   r   r1  r    r    r'   r-     s    zinconsistent result lengths: c                    s8   g | ]\}}}}}}t |||||| jjt|d qS ))rj   r/   rl   rn   ro   rp   rT   r   )ri   r   rT   r   )r"   rn   r/   rl   r-  ro   rp   r|   r    r'   r-     s    )r   r   r   r5   r   r:   r;   r  repeatr&  r   rS   r)   repeat_interleaver  r=   r>   r,  r   reshaper   r   r   r  mapRuntimeErrorrB   )rw   r   rJ   rj   rl   r#  rN   r   r)  selectedtextsavg_logprobsfieldsr    r/  r'   run  sX   



&
zDecodingTask.runN)rb   rc   rd   rr   rf   r   r   r   r   rQ   r   r  r   rh   r  r  r   r   r&  r,  r:   no_gradri   r;  r    r    r    r'   r     s   
 : r   r   c                 K   sL   |j dk }r|d}|rt|fi |}t| ||}|r$|d S |S )a;  
    Performs decoding of 30-second audio segment(s), provided as Mel spectrogram(s).

    Parameters
    ----------
    model: Whisper
        the Whisper model instance

    mel: torch.Tensor, shape = (80, 3000) or (*, 80, 3000)
        A tensor containing the Mel spectrogram(s)

    options: DecodingOptions
        A dataclass that contains all necessary options for decoding 30-second segments

    Returns
    -------
    result: Union[DecodingResult, List[DecodingResult]]
        The result(s) of decoding contained in `DecodingResult` dataclass instance(s)
    r   r   )r3   r4   r   r   r;  )r   r   r   kwargsrI   r   r    r    r'   r0    s   
r0  r   )2dataclassesr   r   r   typingr   r   r   r   r	   r
   r   r   numpyrC   r:   torch.nn.functionalnn
functionalr   r   torch.distributionsr   audior   r   r   r   utilsr   r   r   r<  dictrO   rQ   ri   rr   r~   r   r   r   r   r   r   r   r   r   r   r0  r    r    r    r'   <module>   sh    (=$#8j
C  