Spaces:
Sleeping
Sleeping
Oscar Wang
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -31,7 +31,7 @@ def generate_and_save_data():
|
|
31 |
}
|
32 |
],
|
33 |
temperature=1,
|
34 |
-
max_tokens=
|
35 |
top_p=1,
|
36 |
stream=True,
|
37 |
stop=None,
|
@@ -40,8 +40,10 @@ def generate_and_save_data():
|
|
40 |
prompt = ""
|
41 |
prompt_tokens = 0
|
42 |
for chunk in completion:
|
43 |
-
|
44 |
-
|
|
|
|
|
45 |
|
46 |
# Use the generated prompt to query the model again
|
47 |
second_completion = client.chat.completions.create(
|
@@ -53,7 +55,7 @@ def generate_and_save_data():
|
|
53 |
}
|
54 |
],
|
55 |
temperature=1,
|
56 |
-
max_tokens=
|
57 |
top_p=1,
|
58 |
stream=True,
|
59 |
stop=None,
|
@@ -62,8 +64,10 @@ def generate_and_save_data():
|
|
62 |
response = ""
|
63 |
response_tokens = 0
|
64 |
for chunk in second_completion:
|
65 |
-
|
66 |
-
|
|
|
|
|
67 |
|
68 |
# Update the combined token count
|
69 |
combined_tokens += (prompt_tokens + response_tokens)
|
|
|
31 |
}
|
32 |
],
|
33 |
temperature=1,
|
34 |
+
max_tokens=8000,
|
35 |
top_p=1,
|
36 |
stream=True,
|
37 |
stop=None,
|
|
|
40 |
prompt = ""
|
41 |
prompt_tokens = 0
|
42 |
for chunk in completion:
|
43 |
+
content = chunk.choices[0].delta.content
|
44 |
+
if content is not None:
|
45 |
+
prompt += content
|
46 |
+
prompt_tokens += len(content.split()) # Assuming tokens are words for simplicity
|
47 |
|
48 |
# Use the generated prompt to query the model again
|
49 |
second_completion = client.chat.completions.create(
|
|
|
55 |
}
|
56 |
],
|
57 |
temperature=1,
|
58 |
+
max_tokens=1024,
|
59 |
top_p=1,
|
60 |
stream=True,
|
61 |
stop=None,
|
|
|
64 |
response = ""
|
65 |
response_tokens = 0
|
66 |
for chunk in second_completion:
|
67 |
+
content = chunk.choices[0].delta.content
|
68 |
+
if content is not None:
|
69 |
+
response += content
|
70 |
+
response_tokens += len(content.split()) # Assuming tokens are words for simplicity
|
71 |
|
72 |
# Update the combined token count
|
73 |
combined_tokens += (prompt_tokens + response_tokens)
|