Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,50 +1,58 @@
|
|
1 |
-
import asyncio
|
2 |
-
|
3 |
-
import chainlit as cl
|
4 |
-
|
5 |
-
from chain import Chain
|
6 |
import os
|
7 |
|
8 |
-
from
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
import chainlit as cl
|
10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
-
@cl.oauth_callback
|
13 |
-
def oauth_callback(
|
14 |
-
provider_id: str,
|
15 |
-
token: str,
|
16 |
-
raw_user_data: Dict[str, str],
|
17 |
-
default_user: cl.User,
|
18 |
-
) -> Optional[cl.User]:
|
19 |
-
return default_user
|
20 |
|
21 |
@cl.on_chat_start
|
22 |
-
async def
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
|
|
29 |
|
30 |
|
31 |
@cl.on_message
|
32 |
-
async def on_message(message:
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
await chain.text("Alright, here we go:")
|
46 |
-
coroutines = []
|
47 |
-
for i in range(num):
|
48 |
-
coroutines.append(chain.text_stream("1 2 3 4 5", delay=1, name=f"Counter {i + 1}"))
|
49 |
-
await asyncio.gather(*coroutines)
|
50 |
-
await chain.text_stream("Okay, I'm done counting now.", final=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
|
3 |
+
from langchain.llms.huggingface_hub import HuggingFaceHub
|
4 |
+
from langchain.prompts import ChatPromptTemplate
|
5 |
+
from langchain.schema import StrOutputParser
|
6 |
+
from langchain.schema.runnable import Runnable
|
7 |
+
from langchain.schema.runnable.config import RunnableConfig
|
8 |
+
|
9 |
+
from chainlit.playground.config import add_llm_provider
|
10 |
+
from chainlit.playground.providers.langchain import LangchainGenericProvider
|
11 |
import chainlit as cl
|
12 |
|
13 |
+
# Instantiate the LLM
|
14 |
+
llm = HuggingFaceHub(
|
15 |
+
model_kwargs={"max_length": 500},
|
16 |
+
repo_id="google/flan-t5-xxl",
|
17 |
+
huggingfacehub_api_token=os.environ["HUGGINGFACEHUB_API_TOKEN"],
|
18 |
+
)
|
19 |
+
|
20 |
+
# Add the LLM provider
|
21 |
+
add_llm_provider(
|
22 |
+
LangchainGenericProvider(
|
23 |
+
# It is important that the id of the provider matches the _llm_type
|
24 |
+
id=llm._llm_type,
|
25 |
+
# The name is not important. It will be displayed in the UI.
|
26 |
+
name="HuggingFaceHub",
|
27 |
+
# This should always be a Langchain llm instance (correctly configured)
|
28 |
+
llm=llm,
|
29 |
+
# If the LLM works with messages, set this to True
|
30 |
+
is_chat=False,
|
31 |
+
)
|
32 |
+
)
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
@cl.on_chat_start
|
36 |
+
async def on_chat_start():
|
37 |
+
prompt = ChatPromptTemplate.from_messages(
|
38 |
+
[
|
39 |
+
("human", "{question}"),
|
40 |
+
]
|
41 |
+
)
|
42 |
+
runnable = prompt | llm | StrOutputParser()
|
43 |
+
cl.user_session.set("runnable", runnable)
|
44 |
|
45 |
|
46 |
@cl.on_message
|
47 |
+
async def on_message(message: cl.Message):
|
48 |
+
runnable = cl.user_session.get("runnable") # type: Runnable
|
49 |
+
|
50 |
+
msg = cl.Message(content="")
|
51 |
+
|
52 |
+
async for chunk in runnable.astream(
|
53 |
+
{"question": message.content},
|
54 |
+
config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler()]),
|
55 |
+
):
|
56 |
+
await msg.stream_token(chunk)
|
57 |
+
|
58 |
+
await msg.send()
|
|
|
|
|
|
|
|
|
|
|
|
|
|