import requests
from bs4 import BeautifulSoup
from transformers import AutoModelForCausalLM, AutoTokenizer

# Web scraping
url = 'https://feedspark.com'
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
text = soup.get_text()

# Load LLaMA 2 model
model_name = "meta-llama/Llama-3.2-11B-Vision-Instruct"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)

# Prepare input for LLaMA 2
input_text = f"Summarize the following content:\n{text}"
inputs = tokenizer.encode(input_text, return_tensors='pt')

# Generate summary
outputs = model.generate(inputs, max_length=2000)
summary = tokenizer.decode(outputs[0], skip_special_tokens=True)

print(summary)
