Spaces:
Runtime error
Runtime error
Commit
·
68ee998
1
Parent(s):
477b22c
remove llama
Browse files
app.py
CHANGED
@@ -4,8 +4,8 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
4 |
vicuna_model = AutoModelForCausalLM.from_pretrained("lmsys/vicuna-7b-v1.3")
|
5 |
vicuna_tokenizer = AutoTokenizer.from_pretrained("lmsys/vicuna-7b-v1.3")
|
6 |
|
7 |
-
llama_model = AutoModelForCausalLM.from_pretrained("luodian/llama-7b-hf")
|
8 |
-
llama_tokenizer = AutoTokenizer.from_pretrained("luodian/llama-7b-hf")
|
9 |
|
10 |
# Define the function for generating responses
|
11 |
def generate_response(model, tokenizer, prompt):
|
@@ -17,9 +17,9 @@ def generate_response(model, tokenizer, prompt):
|
|
17 |
# Define the Gradio interface
|
18 |
def chatbot_interface(prompt):
|
19 |
vicuna_response = generate_response(vicuna_model, vicuna_tokenizer, prompt)
|
20 |
-
llama_response = generate_response(llama_model, llama_tokenizer, prompt)
|
21 |
|
22 |
-
return {"Vicuna-7B": vicuna_response
|
23 |
|
24 |
iface = gr.Interface(fn=chatbot_interface,
|
25 |
inputs="text",
|
|
|
4 |
vicuna_model = AutoModelForCausalLM.from_pretrained("lmsys/vicuna-7b-v1.3")
|
5 |
vicuna_tokenizer = AutoTokenizer.from_pretrained("lmsys/vicuna-7b-v1.3")
|
6 |
|
7 |
+
# llama_model = AutoModelForCausalLM.from_pretrained("luodian/llama-7b-hf")
|
8 |
+
# llama_tokenizer = AutoTokenizer.from_pretrained("luodian/llama-7b-hf")
|
9 |
|
10 |
# Define the function for generating responses
|
11 |
def generate_response(model, tokenizer, prompt):
|
|
|
17 |
# Define the Gradio interface
|
18 |
def chatbot_interface(prompt):
|
19 |
vicuna_response = generate_response(vicuna_model, vicuna_tokenizer, prompt)
|
20 |
+
# llama_response = generate_response(llama_model, llama_tokenizer, prompt)
|
21 |
|
22 |
+
return {"Vicuna-7B": vicuna_response}
|
23 |
|
24 |
iface = gr.Interface(fn=chatbot_interface,
|
25 |
inputs="text",
|