import torch
import sys
from transformers import LlamaForCausalLM, LlamaTokenizer

import os
os.environ['TRANSFORMERS_CACHE'] = '/var/www/.cache/huggingface/hub/'
os.environ['HF_TOKEN'] = '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'

# Load the tokenizer and model
tokenizer = LlamaTokenizer.from_pretrained("meta-llama/Llama-3.2-11B-Vision-Instruct")
model = LlamaForCausalLM.from_pretrained("meta-llama/Llama-3.2-11B-Vision-Instruct")



# Move the model to a GPU if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

# Prepare text input
text_input = sys.argv[1]
inputs = tokenizer(text_input, return_tensors="pt").to(device)

# Generate the output
with torch.no_grad():
    outputs = model.generate(**inputs, max_length=5000, num_return_sequences=1)

# Decode the output
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(response)
