Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -42,29 +42,6 @@ if torch.cuda.is_available():
|
|
42 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
43 |
tokenizer.pad_token = tokenizer.eos_token
|
44 |
|
45 |
-
def save_chat_history(chat_history):
|
46 |
-
|
47 |
-
file_path = 'chat_history.json'
|
48 |
-
|
49 |
-
conversation_id = str(uuid.uuid4())
|
50 |
-
|
51 |
-
conversation_entry = {
|
52 |
-
"id": conversation_id,
|
53 |
-
"chat_history": chat_history
|
54 |
-
}
|
55 |
-
|
56 |
-
if os.path.exists(file_path):
|
57 |
-
with open(file_path, 'r') as file:
|
58 |
-
data = json.load(file)
|
59 |
-
else:
|
60 |
-
data = []
|
61 |
-
|
62 |
-
data.append(conversation_entry)
|
63 |
-
|
64 |
-
with open(file_path, 'w') as file:
|
65 |
-
json.dump(data, file, indent=4)
|
66 |
-
|
67 |
-
return conversation_id
|
68 |
def make_prompt(entry):
|
69 |
return f"### Human: YOUR INSTRUCTION HERE,ONLY TELL A STORY: {entry} ### Assistant:"
|
70 |
|
@@ -111,11 +88,6 @@ def generate(
|
|
111 |
for text in streamer:
|
112 |
outputs.append(text)
|
113 |
yield "".join(outputs)
|
114 |
-
# final_story = "".join(outputs) # The complete story
|
115 |
-
# conversation_id = save_chat_history(chat_history + [(message, final_story)])
|
116 |
-
|
117 |
-
yield f"Conversation ID: {conversation_id}"
|
118 |
-
|
119 |
|
120 |
chat_interface = gr.ChatInterface(
|
121 |
fn=generate,
|
|
|
42 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
43 |
tokenizer.pad_token = tokenizer.eos_token
|
44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
def make_prompt(entry):
|
46 |
return f"### Human: YOUR INSTRUCTION HERE,ONLY TELL A STORY: {entry} ### Assistant:"
|
47 |
|
|
|
88 |
for text in streamer:
|
89 |
outputs.append(text)
|
90 |
yield "".join(outputs)
|
|
|
|
|
|
|
|
|
|
|
91 |
|
92 |
chat_interface = gr.ChatInterface(
|
93 |
fn=generate,
|