from transformers import AutoTokenizer, AutoModelForCausalLM

model_name = "meta-llama/Llama-3.2-11B-Vision-Instruct"  # replace with the LLAMA2 variant you want
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype="auto")


import requests
from bs4 import BeautifulSoup

def fetch_webpage_text(url):
    response = requests.get(url)
    soup = BeautifulSoup(response.text, 'html.parser')
    
    # Extract visible text
    texts = [element.get_text() for element in soup.find_all(['p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6'])]
    return "\n".join(texts)

url = "https://www.aveda.co.uk/product/20192/87258/hair-care/all-hair-care/shampowder-dry-shampoo?size=56_g"
webpage_text = fetch_webpage_text(url)

# Prepare the input prompt (e.g., summarize or analyze the content)
prompt = f"can you share the ingredients details of following webpage content:\n\n{webpage_text}"

# Tokenize and generate a response
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_length=1024)  # adjust max_length as needed

# Decode and print the model’s output
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(response)




