Spaces:
Sleeping
Sleeping
nvdia llm mistral
Browse files- app.py +17 -14
- requirements.txt +2 -0
app.py
CHANGED
@@ -1,21 +1,24 @@
|
|
1 |
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
|
|
|
|
|
|
2 |
import gradio as gr
|
|
|
3 |
|
4 |
-
|
5 |
|
6 |
-
|
7 |
-
model = AutoModelForCausalLM.from_pretrained(model_id)
|
8 |
-
model.to_bettertransformer()
|
9 |
|
10 |
-
|
|
|
11 |
|
12 |
-
def gpt(prompt, top_k, penalty_alpha):
|
13 |
-
return pipe(prompt, top_k=top_k, penalty_alpha=penalty_alpha)[0]["generated_text"]
|
14 |
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
).
|
|
|
|
|
|
1 |
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
2 |
+
from langchain_nvidia_ai_endpoints import ChatNVIDIA
|
3 |
+
from langchain_core.output_parsers import StrOutputParser
|
4 |
+
from langchain_core.prompts import ChatPromptTemplate
|
5 |
import gradio as gr
|
6 |
+
import os
|
7 |
|
8 |
+
os.environ["NVIDIA_API_KEY"] = "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"
|
9 |
|
10 |
+
prompt = ChatPromptTemplate.from_messages([("system", "You are a helpful AI assistant named Fred."), ("user", "{input}")])
|
|
|
|
|
11 |
|
12 |
+
llm = ChatNVIDIA(model="mixtral_8x7b")
|
13 |
+
chain = prompt | llm | StrOutputParser()
|
14 |
|
|
|
|
|
15 |
|
16 |
+
def chat(prompt, history):
|
17 |
+
|
18 |
+
for chunk in chain.stream({"input": prompt}):
|
19 |
+
yield chunk.content
|
20 |
+
|
21 |
+
gr.Chat
|
22 |
+
demo = gr.ChatInterface(chat).queue()
|
23 |
+
|
24 |
+
demo.launch()
|
requirements.txt
CHANGED
@@ -2,3 +2,5 @@ transformers
|
|
2 |
gradio
|
3 |
torch
|
4 |
optimum
|
|
|
|
|
|
2 |
gradio
|
3 |
torch
|
4 |
optimum
|
5 |
+
langchain
|
6 |
+
langchain-nvidia-ai-endpoints
|