Spaces:
Sleeping
Sleeping
Upload app.py with huggingface_hub
Browse files
app.py
CHANGED
@@ -1,8 +1,5 @@
|
|
1 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
2 |
from langchain_core.prompts import ChatPromptTemplate
|
3 |
-
from langchain.prompts import PromptTemplate
|
4 |
-
from langchain_core.output_parsers import StrOutputParser
|
5 |
-
from langchain.memory import ConversationSummaryMemory
|
6 |
from langchain_huggingface import HuggingFacePipeline
|
7 |
from langchain_core.runnables import RunnableSequence
|
8 |
import gradio as gr
|
@@ -17,28 +14,26 @@ generator = pipeline(
|
|
17 |
"text-generation",
|
18 |
model=model,
|
19 |
tokenizer=tokenizer,
|
20 |
-
max_new_tokens=
|
21 |
do_sample=True,
|
22 |
temperature=0.7
|
23 |
)
|
24 |
|
25 |
-
|
26 |
-
|
27 |
# LangChain wrapper
|
28 |
llm = HuggingFacePipeline(pipeline=generator)
|
29 |
|
30 |
# Prompt template
|
31 |
prompt = ChatPromptTemplate.from_messages([
|
32 |
-
|
|
|
33 |
])
|
34 |
|
35 |
# Runnable sequence instead of LLMChain
|
36 |
-
chain = prompt | llm
|
37 |
-
|
38 |
|
39 |
# Gradio interface
|
40 |
-
def generate_answer(
|
41 |
-
result = chain.invoke({"
|
42 |
return result
|
43 |
|
44 |
-
gr.Interface(fn=generate_answer, inputs="text", outputs="text", title="Gemma 2B
|
|
|
1 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
2 |
from langchain_core.prompts import ChatPromptTemplate
|
|
|
|
|
|
|
3 |
from langchain_huggingface import HuggingFacePipeline
|
4 |
from langchain_core.runnables import RunnableSequence
|
5 |
import gradio as gr
|
|
|
14 |
"text-generation",
|
15 |
model=model,
|
16 |
tokenizer=tokenizer,
|
17 |
+
max_new_tokens=200,
|
18 |
do_sample=True,
|
19 |
temperature=0.7
|
20 |
)
|
21 |
|
|
|
|
|
22 |
# LangChain wrapper
|
23 |
llm = HuggingFacePipeline(pipeline=generator)
|
24 |
|
25 |
# Prompt template
|
26 |
prompt = ChatPromptTemplate.from_messages([
|
27 |
+
("system", "You are a helpful assistant. Please respond to the user queries."),
|
28 |
+
("user", "Question: {question}")
|
29 |
])
|
30 |
|
31 |
# Runnable sequence instead of LLMChain
|
32 |
+
chain = prompt | llm
|
|
|
33 |
|
34 |
# Gradio interface
|
35 |
+
def generate_answer(question):
|
36 |
+
result = chain.invoke({"question": question})
|
37 |
return result
|
38 |
|
39 |
+
gr.Interface(fn=generate_answer, inputs="text", outputs="text", title="Gemma 2B Chat").launch()
|