Spaces:
Sleeping
Sleeping
Disable data saving:
Browse files
app.py
CHANGED
|
@@ -29,24 +29,24 @@ def randomize_seed_generator():
|
|
| 29 |
return seed
|
| 30 |
|
| 31 |
|
| 32 |
-
def save_inputs_and_outputs(now, inputs, outputs, generate_kwargs, model):
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
|
| 51 |
|
| 52 |
def get_total_inputs(inputs, chatbot, preprompt, user_name, assistant_name, sep):
|
|
@@ -94,7 +94,7 @@ def generate(
|
|
| 94 |
top_p,
|
| 95 |
max_new_tokens,
|
| 96 |
repetition_penalty,
|
| 97 |
-
do_save=True,
|
| 98 |
):
|
| 99 |
client = Client(
|
| 100 |
model2endpoint[model_name],
|
|
@@ -177,14 +177,14 @@ def generate(
|
|
| 177 |
|
| 178 |
yield chat, history, user_message, ""
|
| 179 |
|
| 180 |
-
if HF_TOKEN and do_save:
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
|
| 189 |
return chat, history, user_message, ""
|
| 190 |
|
|
@@ -231,7 +231,7 @@ def retry_last_answer(
|
|
| 231 |
top_p,
|
| 232 |
max_new_tokens,
|
| 233 |
repetition_penalty,
|
| 234 |
-
do_save,
|
| 235 |
):
|
| 236 |
if chat and history:
|
| 237 |
# Removing the previous conversation from chat
|
|
@@ -255,7 +255,7 @@ def retry_last_answer(
|
|
| 255 |
top_p,
|
| 256 |
max_new_tokens,
|
| 257 |
repetition_penalty,
|
| 258 |
-
do_save,
|
| 259 |
)
|
| 260 |
|
| 261 |
|
|
@@ -294,12 +294,12 @@ with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
|
|
| 294 |
"""
|
| 295 |
)
|
| 296 |
|
| 297 |
-
with gr.Row():
|
| 298 |
-
|
| 299 |
-
|
| 300 |
-
|
| 301 |
-
|
| 302 |
-
|
| 303 |
|
| 304 |
with gr.Row():
|
| 305 |
selected_model = gr.Radio(choices=model_names, value=model_names[1], label="Select a model")
|
|
@@ -406,7 +406,7 @@ with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
|
|
| 406 |
top_p,
|
| 407 |
max_new_tokens,
|
| 408 |
repetition_penalty,
|
| 409 |
-
do_save,
|
| 410 |
],
|
| 411 |
outputs=[chatbot, history, last_user_message, user_message],
|
| 412 |
)
|
|
@@ -425,7 +425,7 @@ with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
|
|
| 425 |
top_p,
|
| 426 |
max_new_tokens,
|
| 427 |
repetition_penalty,
|
| 428 |
-
do_save,
|
| 429 |
],
|
| 430 |
outputs=[chatbot, history, last_user_message, user_message],
|
| 431 |
)
|
|
@@ -443,7 +443,7 @@ with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
|
|
| 443 |
top_p,
|
| 444 |
max_new_tokens,
|
| 445 |
repetition_penalty,
|
| 446 |
-
do_save,
|
| 447 |
],
|
| 448 |
outputs=[chatbot, history, last_user_message, user_message],
|
| 449 |
)
|
|
|
|
| 29 |
return seed
|
| 30 |
|
| 31 |
|
| 32 |
+
# def save_inputs_and_outputs(now, inputs, outputs, generate_kwargs, model):
|
| 33 |
+
# buffer = StringIO()
|
| 34 |
+
# timestamp = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f")
|
| 35 |
+
# file_name = f"prompts_{timestamp}.jsonl"
|
| 36 |
+
# data = {"model": model, "inputs": inputs, "outputs": outputs, "generate_kwargs": generate_kwargs}
|
| 37 |
+
# pd.DataFrame([data]).to_json(buffer, orient="records", lines=True)
|
| 38 |
+
|
| 39 |
+
# # Push to Hub
|
| 40 |
+
# upload_file(
|
| 41 |
+
# path_in_repo=f"{now.date()}/{now.hour}/{file_name}",
|
| 42 |
+
# path_or_fileobj=buffer.getvalue().encode(),
|
| 43 |
+
# repo_id=DIALOGUES_DATASET,
|
| 44 |
+
# token=HF_TOKEN,
|
| 45 |
+
# repo_type="dataset",
|
| 46 |
+
# )
|
| 47 |
+
|
| 48 |
+
# # Clean and rerun
|
| 49 |
+
# buffer.close()
|
| 50 |
|
| 51 |
|
| 52 |
def get_total_inputs(inputs, chatbot, preprompt, user_name, assistant_name, sep):
|
|
|
|
| 94 |
top_p,
|
| 95 |
max_new_tokens,
|
| 96 |
repetition_penalty,
|
| 97 |
+
# do_save=True,
|
| 98 |
):
|
| 99 |
client = Client(
|
| 100 |
model2endpoint[model_name],
|
|
|
|
| 177 |
|
| 178 |
yield chat, history, user_message, ""
|
| 179 |
|
| 180 |
+
# if HF_TOKEN and do_save:
|
| 181 |
+
# try:
|
| 182 |
+
# now = datetime.datetime.now()
|
| 183 |
+
# current_time = now.strftime("%Y-%m-%d %H:%M:%S")
|
| 184 |
+
# print(f"[{current_time}] Pushing prompt and completion to the Hub")
|
| 185 |
+
# save_inputs_and_outputs(now, prompt, output, generate_kwargs, model_name)
|
| 186 |
+
# except Exception as e:
|
| 187 |
+
# print(e)
|
| 188 |
|
| 189 |
return chat, history, user_message, ""
|
| 190 |
|
|
|
|
| 231 |
top_p,
|
| 232 |
max_new_tokens,
|
| 233 |
repetition_penalty,
|
| 234 |
+
# do_save,
|
| 235 |
):
|
| 236 |
if chat and history:
|
| 237 |
# Removing the previous conversation from chat
|
|
|
|
| 255 |
top_p,
|
| 256 |
max_new_tokens,
|
| 257 |
repetition_penalty,
|
| 258 |
+
# do_save,
|
| 259 |
)
|
| 260 |
|
| 261 |
|
|
|
|
| 294 |
"""
|
| 295 |
)
|
| 296 |
|
| 297 |
+
# with gr.Row():
|
| 298 |
+
# do_save = gr.Checkbox(
|
| 299 |
+
# value=True,
|
| 300 |
+
# label="Store data",
|
| 301 |
+
# info="You agree to the storage of your prompt and generated text for research and development purposes:",
|
| 302 |
+
# )
|
| 303 |
|
| 304 |
with gr.Row():
|
| 305 |
selected_model = gr.Radio(choices=model_names, value=model_names[1], label="Select a model")
|
|
|
|
| 406 |
top_p,
|
| 407 |
max_new_tokens,
|
| 408 |
repetition_penalty,
|
| 409 |
+
# do_save,
|
| 410 |
],
|
| 411 |
outputs=[chatbot, history, last_user_message, user_message],
|
| 412 |
)
|
|
|
|
| 425 |
top_p,
|
| 426 |
max_new_tokens,
|
| 427 |
repetition_penalty,
|
| 428 |
+
# do_save,
|
| 429 |
],
|
| 430 |
outputs=[chatbot, history, last_user_message, user_message],
|
| 431 |
)
|
|
|
|
| 443 |
top_p,
|
| 444 |
max_new_tokens,
|
| 445 |
repetition_penalty,
|
| 446 |
+
# do_save,
|
| 447 |
],
|
| 448 |
outputs=[chatbot, history, last_user_message, user_message],
|
| 449 |
)
|