Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -9,17 +9,13 @@ from sentence_transformers import SentenceTransformer
|
|
9 |
# Retrieve the token from environment variables
|
10 |
huggingface_token = os.getenv('LLAMA_ACCES_TOKEN')
|
11 |
|
12 |
-
# Assuming necessary initializations and model loading here
|
13 |
-
# Retrieve the token from environment variables
|
14 |
-
huggingface_token = os.getenv('LLAMA_ACCES_TOKEN')
|
15 |
-
|
16 |
# Use the token with from_pretrained
|
17 |
#tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf", token=huggingface_token)
|
18 |
#model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", token=huggingface_token)
|
19 |
|
20 |
# Load the tokenizer and model
|
21 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
22 |
-
model = AutoModelForCausalLM.from_pretrained("
|
23 |
|
24 |
# Assuming BERTopic and other necessary components are initialized here
|
25 |
# Initialize your BERTopic model
|
|
|
9 |
# Retrieve the token from environment variables
|
10 |
huggingface_token = os.getenv('LLAMA_ACCES_TOKEN')
|
11 |
|
|
|
|
|
|
|
|
|
12 |
# Use the token with from_pretrained
|
13 |
#tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf", token=huggingface_token)
|
14 |
#model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", token=huggingface_token)
|
15 |
|
16 |
# Load the tokenizer and model
|
17 |
+
tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
|
18 |
+
model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
|
19 |
|
20 |
# Assuming BERTopic and other necessary components are initialized here
|
21 |
# Initialize your BERTopic model
|