Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -21,26 +21,24 @@ qa_relevance = Feedback(feedback_openai.relevance, name="Answer Relevance").on_i
|
|
21 |
# Create a Tru object
|
22 |
tru = Tru()
|
23 |
|
24 |
-
# Initialize the HuggingFacePipeline for local LLM
|
25 |
-
local_llm = HuggingFacePipeline.from_model_id(
|
26 |
-
model_id="Tonic/stablemed",
|
27 |
-
task="text-generation",
|
28 |
-
model_kwargs={"temperature": 0.2, "top_p": 0.95, "max_length": 256}
|
29 |
-
)
|
30 |
-
|
31 |
# Set the window memory to go back 4 turns
|
32 |
window_memory = ConversationBufferWindowMemory(k=4)
|
33 |
|
34 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
conversation = ConversationChain(
|
36 |
-
llm=
|
37 |
verbose=True,
|
38 |
memory=window_memory
|
39 |
)
|
40 |
|
41 |
-
# Update the conversation prompt template
|
42 |
conversation.prompt.template = '''The following is a consult between a clinical consultant and a public health and medical expert. The AI is an expert on medicine and public health and gives recommendations specific to location and conditions. If the AI does not know the answer to a question, it truthfully says it does not know.
|
43 |
-
|
44 |
Current conversation:
|
45 |
{history}
|
46 |
Human: {input}
|
@@ -49,17 +47,13 @@ AI:'''
|
|
49 |
# Wrap the conversation with TruChain to instrument it
|
50 |
tc_conversation = tru.Chain(conversation, app_id='Trulens-StableMed', feedbacks=[qa_relevance])
|
51 |
|
52 |
-
# Initialize Gradio Client
|
53 |
-
client = Client("https://tonic-stablemed-chat.hf.space/")
|
54 |
-
|
55 |
# Make a prediction using the wrapped conversation
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
api_name="/predict"
|
60 |
-
)
|
61 |
|
62 |
# Print the result
|
63 |
print(result)
|
64 |
|
|
|
65 |
tru.run_dashboard()
|
|
|
21 |
# Create a Tru object
|
22 |
tru = Tru()
|
23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
# Set the window memory to go back 4 turns
|
25 |
window_memory = ConversationBufferWindowMemory(k=4)
|
26 |
|
27 |
+
# Define a custom function to interact with the Gradio client
|
28 |
+
def gradio_client_interaction(prompt):
|
29 |
+
client = Client("https://tonic-stablemed-chat.hf.space/")
|
30 |
+
result = client.predict(prompt, prompt, api_name="/predict")
|
31 |
+
return result['data'][0] # Assuming the response format
|
32 |
+
|
33 |
+
# Create the ConversationChain with the custom function
|
34 |
conversation = ConversationChain(
|
35 |
+
llm=gradio_client_interaction,
|
36 |
verbose=True,
|
37 |
memory=window_memory
|
38 |
)
|
39 |
|
40 |
+
# Update the conversation prompt template
|
41 |
conversation.prompt.template = '''The following is a consult between a clinical consultant and a public health and medical expert. The AI is an expert on medicine and public health and gives recommendations specific to location and conditions. If the AI does not know the answer to a question, it truthfully says it does not know.
|
|
|
42 |
Current conversation:
|
43 |
{history}
|
44 |
Human: {input}
|
|
|
47 |
# Wrap the conversation with TruChain to instrument it
|
48 |
tc_conversation = tru.Chain(conversation, app_id='Trulens-StableMed', feedbacks=[qa_relevance])
|
49 |
|
|
|
|
|
|
|
50 |
# Make a prediction using the wrapped conversation
|
51 |
+
user_input = "Howdy!"
|
52 |
+
system_prompt = "Howdy!"
|
53 |
+
result = tc_conversation.generate(user_input, system_prompt=system_prompt)
|
|
|
|
|
54 |
|
55 |
# Print the result
|
56 |
print(result)
|
57 |
|
58 |
+
# Run the TruLens dashboard
|
59 |
tru.run_dashboard()
|