llama2-demo / app.py
learningai's picture
Update app.py
a8c8525
raw
history blame
356 Bytes
import torch
from huggingface_hub import login
login(token="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")
# Use a pipeline as a high-level helper
from transformers import pipeline
pipe = pipeline("text-generation", model="meta-llama/Llama-2-7b-chat-hf")
print("Providing input to the pipeline.....")
response = pipe("Hello, how are you?")
print(response)