Spaces:
Runtime error
Runtime error
acecalisto3
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -32,16 +32,17 @@ import mysql.connector
|
|
32 |
from mysql.connector import errorcode, pooling
|
33 |
from dotenv import load_dotenv
|
34 |
from huggingface_hub import login
|
|
|
35 |
|
36 |
model_name = "openlm-research/open_llama_3b_v2"
|
37 |
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False, legacy=False)
|
38 |
-
model =
|
39 |
|
40 |
# Determine the maximum supported length for the model
|
41 |
-
max_supported_length =
|
42 |
|
43 |
openllama_pipeline = pipeline(
|
44 |
-
"
|
45 |
model=model,
|
46 |
tokenizer=tokenizer,
|
47 |
truncation=True,
|
@@ -49,19 +50,6 @@ openllama_pipeline = pipeline(
|
|
49 |
# ... other parameters
|
50 |
)
|
51 |
|
52 |
-
model_name = "openlm-research/open_llama_3b_v2" # Or another OpenLlama variant
|
53 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False, legacy=False)
|
54 |
-
model = OpenLlamaForCausalLM.from_pretrained(model_name)
|
55 |
-
|
56 |
-
openllama_pipeline = pipeline(
|
57 |
-
"GenerationMixin",
|
58 |
-
model=model,
|
59 |
-
tokenizer=tokenizer,
|
60 |
-
truncation=True,
|
61 |
-
max_length=max_tokens, # Assuming max_tokens is your max_length variable
|
62 |
-
# ... other parameters
|
63 |
-
)
|
64 |
-
|
65 |
nlp = AutoTokenizer.from_pretrained("bert-base-uncased")
|
66 |
|
67 |
HUGGINGFACE_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
|
|
|
32 |
from mysql.connector import errorcode, pooling
|
33 |
from dotenv import load_dotenv
|
34 |
from huggingface_hub import login
|
35 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
36 |
|
37 |
model_name = "openlm-research/open_llama_3b_v2"
|
38 |
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False, legacy=False)
|
39 |
+
model = AutoModelForCausalLM.from_pretrained(model_name) # Use AutoModelForCausalLM
|
40 |
|
41 |
# Determine the maximum supported length for the model
|
42 |
+
max_supported_length = 2048 # You might need to adjust this
|
43 |
|
44 |
openllama_pipeline = pipeline(
|
45 |
+
"text-generation", # Use "text-generation"
|
46 |
model=model,
|
47 |
tokenizer=tokenizer,
|
48 |
truncation=True,
|
|
|
50 |
# ... other parameters
|
51 |
)
|
52 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
nlp = AutoTokenizer.from_pretrained("bert-base-uncased")
|
54 |
|
55 |
HUGGINGFACE_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
|