Spaces:
Runtime error
Runtime error
Commit
·
9407713
1
Parent(s):
32f98c6
Update app.py
Browse files
app.py
CHANGED
@@ -64,9 +64,9 @@ def audio_tag(
|
|
64 |
|
65 |
|
66 |
def formatted_message(audio_class):
|
67 |
-
if cached_audio_class != audio_class:
|
68 |
-
|
69 |
-
|
70 |
rocks, crackling fire, trees, animals, and the wind. In order to do this, we're going to provide you the human's text input for the conversation.
|
71 |
The goal is for you to embody that non-human entity and converse with the human.
|
72 |
|
@@ -78,27 +78,27 @@ Tree: Hello human, I am a tree
|
|
78 |
|
79 |
Let's begin:
|
80 |
Non-human Entity: {formatted_classname}"""
|
81 |
-
|
82 |
Human Input: {{human_input}}
|
83 |
{formatted_classname}:'''
|
84 |
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
|
103 |
output = chatgpt_chain.predict(human_input=message)
|
104 |
|
|
|
64 |
|
65 |
|
66 |
def formatted_message(audio_class):
|
67 |
+
#if cached_audio_class != audio_class:
|
68 |
+
cached_audio_class = audio_class
|
69 |
+
prefix = f"""You are going to act as a magical tool that allows for humans to communicate with non-human entities like
|
70 |
rocks, crackling fire, trees, animals, and the wind. In order to do this, we're going to provide you the human's text input for the conversation.
|
71 |
The goal is for you to embody that non-human entity and converse with the human.
|
72 |
|
|
|
78 |
|
79 |
Let's begin:
|
80 |
Non-human Entity: {formatted_classname}"""
|
81 |
+
suffix = f'''{{history}}
|
82 |
Human Input: {{human_input}}
|
83 |
{formatted_classname}:'''
|
84 |
|
85 |
+
suffix = f'''Source: {audio_class}
|
86 |
+
Length of Audio in Seconds: {audio_length}
|
87 |
+
Human Input: {userText}
|
88 |
+
{audio_class} Response:'''
|
89 |
+
template = prefix + suffix
|
90 |
+
|
91 |
+
prompt = PromptTemplate(
|
92 |
+
input_variables=["history", "human_input"],
|
93 |
+
template=template
|
94 |
+
)
|
95 |
+
|
96 |
+
chatgpt_chain = LLMChain(
|
97 |
+
llm=OpenAI(temperature=.5, openai_api_key=session_token),
|
98 |
+
prompt=prompt,
|
99 |
+
verbose=True,
|
100 |
+
memory=ConversationalBufferWindowMemory(k=2, ai=audio_class),
|
101 |
+
)
|
102 |
|
103 |
output = chatgpt_chain.predict(human_input=message)
|
104 |
|