sarim commited on
Commit
41dd208
·
1 Parent(s): 9489fce

decrease chink size

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -33,7 +33,7 @@ model = GroqModel('llama-3.1-70b-versatile', api_key = api_key)
33
 
34
 
35
 
36
- def split_into_token_chunks(text: str, max_tokens: int = 2500) -> list:
37
  """
38
  Splits a long string into chunks of a specified maximum number of tokens (words).
39
 
@@ -82,8 +82,8 @@ async def ppt_content(data):
82
  listOfString = split_into_token_chunks("".join(data))
83
  print(len(listOfString))
84
  message_history: list[ModelMessage] = []
85
- for i, chunk in enumerate(listOfString):
86
- print(f"Chunk {i}:\n{chunk}\n")
87
 
88
  for x in listOfString:
89
  result = agent.run_sync(user_prompt = f"Create me a powerpoint presentation {x}",message_history = message_history)
 
33
 
34
 
35
 
36
+ def split_into_token_chunks(text: str, max_tokens: int = 1500) -> list:
37
  """
38
  Splits a long string into chunks of a specified maximum number of tokens (words).
39
 
 
82
  listOfString = split_into_token_chunks("".join(data))
83
  print(len(listOfString))
84
  message_history: list[ModelMessage] = []
85
+ # for i, chunk in enumerate(listOfString):
86
+ # print(f"Chunk {i}:\n{chunk}\n")
87
 
88
  for x in listOfString:
89
  result = agent.run_sync(user_prompt = f"Create me a powerpoint presentation {x}",message_history = message_history)