
    g                     L   d dl Z d dlmZ d dlmZmZmZmZmZm	Z	 ddl
mZ ddlmZmZmZmZmZmZmZmZ ddedefd	Zd
 Zd Zd Z	 	 	 	 	 	 ddee	eee         f                  deee                  deee                  deeeef                  dee         dee         fdZdS )    N)
MethodType)AnyDictListOptionalTupleUnion   )PartialState)calculate_maximum_sizesconvert_bytescopy_tensor_to_devicesignorant_find_batch_sizeinfer_auto_device_mapis_pippy_availablepad_input_tensorssend_to_devicenum_processes
max_memoryc                 |  	 |dk    rt          | |d          S |t          |           \  }}||d         z   |z  	t          	          		                    d          \  }}t	          j        t          |                    dz  		 d| 		fdt          |          D             }t          | ||d	          }|S )
zH
    Calculates the device map for `model` with an offset for PiPPy
    r
   F)no_split_module_classesclean_resultNr    g?c                     i | ]}|S  r   ).0imemorys     P/var/www/html/ai-engine/env/lib/python3.11/site-packages/accelerate/inference.py
<dictcomp>z'generate_device_map.<locals>.<dictcomp>0   s    >>>Aa>>>    )r   r   r   )r   r   r   splitmathceilfloatrange)
modelr   r   r   
model_sizesharedvalueending
device_mapr   s
            @r   generate_device_mapr-      s     $UD[jopppp4U;;
F vay(M9v&&S))v 5<<((3.%%V%%>>>>})=)=>>>
& 7	  J r!   c                     d }| | D ]}t          |          }| n|,|*|                                D ]}t          |          }| n|S N)r   values)argskwargsfound_batch_sizeargkwargs        r   find_pippy_batch_sizer6   :   s     	 	C7<<+ ,.6]]__ 	 	E7>>+ ,r!   c                     ddl m}mm} t	                      }fd|D             } || |||          }	|	                    |j        |j                  }
 ||
|          }|S )aB  
    Attaches the split points to the model based on `self.device_map` and generates a `PipelineStage`. Requires passing
    in needed `args` and `kwargs` as the model needs on the CPU.

    Users can pass in custom `num_chunks` as an optional hyper-parameter. By default will use
    `AcceleratorState.num_processes`
    r   )ScheduleGPipe
SplitPointpipelinec                      i | ]
}|j         S r   )	BEGINNING)r   split_pointr9   s     r   r    z"build_pipeline.<locals>.<dictcomp>V   s    TTT+z3TTTr!   )mb_args	mb_kwargs
split_spec)device)torch.distributed.pipeliningr8   r9   r:   r   build_stagelocal_process_indexrA   )r'   split_pointsr1   r2   
num_chunksr8   r:   stater@   pipestagescheduler9   s               @r   build_pipelinerK   I   s     QPPPPPPPPP NNETTTT|TTTJ8	  D U6u|LLE}UJ//HOr!   c                 Z   t                      }d }|j        dk    r	 | |i |}nu|j        rRt          ||          }|t	          d          ||k    r"t          |||          }t          |||          } | |i | n|j        r |             }n
 |              |rt          |          }|S )Nr
   z-Could not find batch size from args or kwargs)r   r   is_local_main_processr6   
ValueErrorr   is_last_processr   )forwardrF   gather_outputr1   r2   rG   outputr3   s           r   pippy_forwardrS   c   s    NNEFa$)&))		$ 0v>>#LMMM:--(/?LL*63CZPP     		 			 0'//Mr!   autor   FrE   r   example_argsexample_kwargsrF   rQ   c                 Z   t                      st          d          t                      }t          |d          }t          |d          }|j        |dk    rgt          | |          }g }t          d          D ]B|                    t          fd|	                                D                                  C|| _
        t          | |||          | j        | _        | j        | _        | _        || _
        fd}	t#          |	|           }
|
|	_        |	| _        | S )	a  
    Wraps `model` for pipeline parallel inference.

    Args:
        model (`torch.nn.Module`):
            A model we want to split for pipeline-parallel inference
        split_points (`str` or `List[str]`, defaults to 'auto'):
            How to generate the split points and chunk the model across each GPU. 'auto' will find the best balanced
            split given any model. Should be a list of layer names in the model to split by otherwise.
        no_split_module_classes (`List[str]`):
            A list of class names for layers we don't want to be split.
        example_args (tuple of model inputs):
            The expected inputs for the model that uses order-based inputs for a *single process*. Recommended to use
            this method if possible.
        example_kwargs (dict of model inputs)
            The expected inputs for the model that uses dictionary-based inputs for a *single process*. This is a
            *highly* limiting structure that requires the same keys be present at *all* inference calls. Not
            recommended unless the prior condition is true for all cases.
        num_chunks (`int`, defaults to the number of available GPUs):
            The number of different stages the Pipeline will have. By default it will assign one chunk per GPU, but
            this can be tuned and played with. In general one should have num_chunks >= num_gpus.
        gather_output (`bool`, defaults to `False`):
            If `True`, the output from the last GPU (which holds the true outputs) is sent across to all GPUs.
    zEUsing `torch.distributed.pipelining` requires PyTorch 2.4.0 or later.cpuNrT   )r   r
   c              3   .   K   | ]\  }}|k    |V  d S r/   r   )r   kvr   s      r   	<genexpr>z prepare_pippy.<locals>.<genexpr>   s+      $P$P41aaQ$P$Pr!   c                  2    t          j        g| R i |S r/   )rS   step)r1   r2   rQ   rF   rI   s     r   rP   zprepare_pippy.<locals>.forward   s'    UZ]TTTTTVTTTr!   )r   ImportErrorr   r   r   r-   r&   appendnextitemshf_split_pointsrK   rP   _original_forward__call___original_callpippy_stager   __wrapped__)r'   rE   r   rU   rV   rF   rQ   rG   r,   rP   model_forwardr   rI   s        ``    @@r   prepare_pippyrj   |   sl   B  cabbbNNE!,66L#NE::N(
v(
Tklll
q*%% 	R 	RA$P$P$P$P:3C3C3E3E$P$P$P P PQQQQ(E5,njYYE#mE >EE(EU U U U U U U
 w..M'GEMLr!   )r
   NN)rT   Nr   NNF)r#   typesr   typingr   r   r   r   r   r	   rG   r   utilsr   r   r   r   r   r   r   r   intdictr-   r6   rK   rS   strboolrj   r   r!   r   <module>rr      s          : : : : : : : : : : : : : : : :      	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 c ae    6    4  6 5;37)+/3 $$)< <5d3i01< &d3i0< 5:&	<
 T#s(^,< < D>< < < < < <r!   