Oscar Wang commited on
Commit
8d8e72d
·
verified ·
1 Parent(s): 50f4808

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -6
app.py CHANGED
@@ -31,7 +31,7 @@ def generate_and_save_data():
31
  }
32
  ],
33
  temperature=1,
34
- max_tokens=1024,
35
  top_p=1,
36
  stream=True,
37
  stop=None,
@@ -40,8 +40,10 @@ def generate_and_save_data():
40
  prompt = ""
41
  prompt_tokens = 0
42
  for chunk in completion:
43
- prompt += chunk.choices[0].delta.content or ""
44
- prompt_tokens += len(chunk.choices[0].delta.content.split()) # Assuming tokens are words for simplicity
 
 
45
 
46
  # Use the generated prompt to query the model again
47
  second_completion = client.chat.completions.create(
@@ -53,7 +55,7 @@ def generate_and_save_data():
53
  }
54
  ],
55
  temperature=1,
56
- max_tokens=8000,
57
  top_p=1,
58
  stream=True,
59
  stop=None,
@@ -62,8 +64,10 @@ def generate_and_save_data():
62
  response = ""
63
  response_tokens = 0
64
  for chunk in second_completion:
65
- response += chunk.choices[0].delta.content or ""
66
- response_tokens += len(chunk.choices[0].delta.content.split()) # Assuming tokens are words for simplicity
 
 
67
 
68
  # Update the combined token count
69
  combined_tokens += (prompt_tokens + response_tokens)
 
31
  }
32
  ],
33
  temperature=1,
34
+ max_tokens=8000,
35
  top_p=1,
36
  stream=True,
37
  stop=None,
 
40
  prompt = ""
41
  prompt_tokens = 0
42
  for chunk in completion:
43
+ content = chunk.choices[0].delta.content
44
+ if content is not None:
45
+ prompt += content
46
+ prompt_tokens += len(content.split()) # Assuming tokens are words for simplicity
47
 
48
  # Use the generated prompt to query the model again
49
  second_completion = client.chat.completions.create(
 
55
  }
56
  ],
57
  temperature=1,
58
+ max_tokens=1024,
59
  top_p=1,
60
  stream=True,
61
  stop=None,
 
64
  response = ""
65
  response_tokens = 0
66
  for chunk in second_completion:
67
+ content = chunk.choices[0].delta.content
68
+ if content is not None:
69
+ response += content
70
+ response_tokens += len(content.split()) # Assuming tokens are words for simplicity
71
 
72
  # Update the combined token count
73
  combined_tokens += (prompt_tokens + response_tokens)