Spaces:
Sleeping
Sleeping
Create appy.py
Browse files
appy.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from gradio_client import Client
|
2 |
+
from langchain import tru
|
3 |
+
from trulens_eval.schema import Select
|
4 |
+
from trulens_eval.tru import Tru
|
5 |
+
from trulens_eval.feedback import Feedback
|
6 |
+
from trulens_eval.feedback import OpenAI as Feedback_OpenAI
|
7 |
+
from langchain import HuggingFacePipeline
|
8 |
+
from langchain.llms import HuggingFacePipeline
|
9 |
+
from langchain.chains import ConversationChain
|
10 |
+
from langchain.chains.conversation.memory import ConversationBufferWindowMemory
|
11 |
+
import os
|
12 |
+
|
13 |
+
# Access environment variables
|
14 |
+
openai_api_key = os.environ.get("OPENAI_API_KEY")
|
15 |
+
huggingface_api_token = os.environ.get("HUGGINGFACE_API_TOKEN")
|
16 |
+
|
17 |
+
|
18 |
+
# Define a feedback function for query-statement relevance using OpenAI
|
19 |
+
feedback_openai = Feedback_OpenAI()
|
20 |
+
qa_relevance = Feedback(feedback_openai.relevance, name="Answer Relevance").on_input_output()
|
21 |
+
|
22 |
+
# Create a Tru object
|
23 |
+
tru = Tru()
|
24 |
+
|
25 |
+
# Initialize the HuggingFacePipeline for local LLM
|
26 |
+
local_llm = HuggingFacePipeline.from_model_id(
|
27 |
+
model_id="chavinlo/alpaca-native",
|
28 |
+
task="text-generation",
|
29 |
+
model_kwargs={"temperature": 0.6, "top_p": 0.95, "max_length": 256}
|
30 |
+
)
|
31 |
+
|
32 |
+
# Set the window memory to go back 4 turns
|
33 |
+
window_memory = ConversationBufferWindowMemory(k=4)
|
34 |
+
|
35 |
+
# Create the ConversationChain with the given window memory
|
36 |
+
conversation = ConversationChain(
|
37 |
+
llm=local_llm,
|
38 |
+
verbose=True,
|
39 |
+
memory=window_memory
|
40 |
+
)
|
41 |
+
|
42 |
+
# Update the conversation prompt template to prime it as a gardening expert
|
43 |
+
conversation.prompt.template = '''The following is a friendly conversation between a human and an AI gardening expert. The AI is an expert on gardening and gives recommendations specific to location and conditions. If the AI does not know the answer to a question, it truthfully says it does not know.
|
44 |
+
|
45 |
+
Current conversation:
|
46 |
+
{history}
|
47 |
+
Human: {input}
|
48 |
+
AI:'''
|
49 |
+
|
50 |
+
# Wrap the conversation with TruChain to instrument it
|
51 |
+
tc_conversation = tru.Chain(conversation, app_id='GardeningAIwithMemory_v1', feedbacks=[qa_relevance])
|
52 |
+
|
53 |
+
# Initialize Gradio Client
|
54 |
+
client = Client("https://tonic-stablemed-chat.hf.space/")
|
55 |
+
|
56 |
+
# Make a prediction using the wrapped conversation
|
57 |
+
result = client.predict(
|
58 |
+
"Howdy!", # str in 'user_input' Textbox component
|
59 |
+
"Howdy!", # str in 'system_prompt' Textbox component
|
60 |
+
api_name="/predict"
|
61 |
+
)
|
62 |
+
|
63 |
+
# Print the result
|
64 |
+
print(result)
|
65 |
+
|
66 |
+
tru.run_dashboard()
|