DarkAngel commited on
Commit
cceb174
·
verified ·
1 Parent(s): a4d01da

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -8
app.py CHANGED
@@ -22,14 +22,16 @@ def generate_response(shloka, transliteration):
22
  "content": f"Shloka: {shloka} Transliteration: {transliteration}"
23
  }
24
  ]
 
25
  inputs = tokenizer.apply_chat_template(
26
  input_message,
27
  tokenize=True,
28
- add_generation_prompt=True, # Enable for generation
29
  return_tensors="pt"
30
- ).to("cuda") # Assuming the model is running on GPU
 
 
31
 
32
- # Generate response
33
  text_streamer = TextStreamer(tokenizer, skip_prompt=True)
34
  generated_tokens = model.generate(
35
  input_ids=inputs,
@@ -42,8 +44,7 @@ def generate_response(shloka, transliteration):
42
 
43
  raw_response = tokenizer.decode(generated_tokens[0], skip_special_tokens=True)
44
 
45
- # Format the response
46
- # Assuming raw_response contains English Meaning, Hindi Meaning, and Word Meaning in sequence
47
  try:
48
  sections = raw_response.split("Hindi Meaning:")
49
  english_meaning = sections[0].strip()
@@ -58,12 +59,10 @@ def generate_response(shloka, transliteration):
58
  f"Word Meaning:\n{word_meaning}"
59
  )
60
  except IndexError:
61
- # In case the response format is not as expected
62
  formatted_response = raw_response
63
 
64
  return formatted_response
65
 
66
- # Gradio interface
67
  interface = gr.Interface(
68
  fn=generate_response,
69
  inputs=[
@@ -75,7 +74,6 @@ interface = gr.Interface(
75
  description="Input a Shloka with its transliteration, and this model will provide meanings in English and Hindi along with word meanings."
76
  )
77
 
78
- # Launch the interface
79
  if __name__ == "__main__":
80
  interface.launch()
81
 
 
22
  "content": f"Shloka: {shloka} Transliteration: {transliteration}"
23
  }
24
  ]
25
+
26
  inputs = tokenizer.apply_chat_template(
27
  input_message,
28
  tokenize=True,
29
+ add_generation_prompt=True,
30
  return_tensors="pt"
31
+ ).to("cpu")
32
+
33
+ model = model.to("cpu")
34
 
 
35
  text_streamer = TextStreamer(tokenizer, skip_prompt=True)
36
  generated_tokens = model.generate(
37
  input_ids=inputs,
 
44
 
45
  raw_response = tokenizer.decode(generated_tokens[0], skip_special_tokens=True)
46
 
47
+
 
48
  try:
49
  sections = raw_response.split("Hindi Meaning:")
50
  english_meaning = sections[0].strip()
 
59
  f"Word Meaning:\n{word_meaning}"
60
  )
61
  except IndexError:
 
62
  formatted_response = raw_response
63
 
64
  return formatted_response
65
 
 
66
  interface = gr.Interface(
67
  fn=generate_response,
68
  inputs=[
 
74
  description="Input a Shloka with its transliteration, and this model will provide meanings in English and Hindi along with word meanings."
75
  )
76
 
 
77
  if __name__ == "__main__":
78
  interface.launch()
79