prateekbh commited on
Commit
2b76edc
1 Parent(s): c434d8b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -3
app.py CHANGED
@@ -38,12 +38,21 @@ class StopOnTokens(StoppingCriteria):
38
  return True
39
  return False
40
 
 
 
 
 
 
 
 
 
 
41
  def getProductDetails(history, image):
42
  product_description=getImageDescription(image)
43
  clients = InferenceClient("google/gemma-7b")
44
  rand_val = random.randint(1, 1111111111111111)
45
  if not history:
46
- history = [[{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "hello"}]]
47
  generate_kwargs = dict(
48
  temperature=0.67,
49
  max_new_tokens=1024,
@@ -54,9 +63,9 @@ def getProductDetails(history, image):
54
  )
55
  system_prompt="you're a helpful e-commerce marketting assitant"
56
  prompt="Write me a poem"
57
- formatted_prompt = self.format_prompt(f"{system_prompt}, {prompt}", history)
58
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=stream_output, details=True, return_full_text=False)
59
- # output = ""
60
 
61
  # for response in stream:
62
  # output += response.token.text
 
38
  return True
39
  return False
40
 
41
+ def format_prompt(self, message, history):
42
+ prompt = ""
43
+ if history:
44
+ for user_prompt, bot_response in history:
45
+ prompt += f"<start_of_turn>user{user_prompt}<end_of_turn>"
46
+ prompt += f"<start_of_turn>model{bot_response}"
47
+ prompt += f"<start_of_turn>user{message}<end_of_turn><start_of_turn>modelo"
48
+ return prompt
49
+
50
  def getProductDetails(history, image):
51
  product_description=getImageDescription(image)
52
  clients = InferenceClient("google/gemma-7b")
53
  rand_val = random.randint(1, 1111111111111111)
54
  if not history:
55
+ history = []
56
  generate_kwargs = dict(
57
  temperature=0.67,
58
  max_new_tokens=1024,
 
63
  )
64
  system_prompt="you're a helpful e-commerce marketting assitant"
65
  prompt="Write me a poem"
66
+ formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
67
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=stream_output, details=True, return_full_text=False)
68
+ output = ""
69
 
70
  # for response in stream:
71
  # output += response.token.text