Sreekan commited on
Commit
23474ee
·
verified ·
1 Parent(s): 9c379a0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -6
app.py CHANGED
@@ -1,4 +1,4 @@
1
- '''
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
 
@@ -64,7 +64,7 @@ demo = gr.ChatInterface(
64
  if __name__ == "__main__":
65
  demo.launch()
66
 
67
-
68
 
69
  import gradio as gr
70
  from langchain.chains import LLMChain
@@ -329,7 +329,9 @@ try:
329
  print(f"[TRACE] Workflow Result: {result}") # Final workflow result
330
  except Exception as e:
331
  print(f"[ERROR] Workflow execution failed: {e}")
332
- '''
 
 
333
  from typing import Dict
334
  from pydantic import BaseModel
335
  from langgraph.graph import StateGraph, END
@@ -343,7 +345,7 @@ import gradio as gr
343
  #print(torch.cuda.is_available()) # Check if a GPU is available
344
  #a = torch.Tensor([1]).cuda() # Should succeed without errors
345
  #print(a)
346
- '''
347
  # Define HuggingFace LLM
348
  def create_llm():
349
  model_name = "Qwen/Qwen2.5-7B-Instruct-1M"
@@ -447,7 +449,6 @@ def gradio_interface(visitor_name, visitor_mobile, doctor_name, department_name)
447
  # Execute workflow
448
  result = compiled_graph.invoke(state.dict())
449
  return result["messages"][0]
450
- '''
451
 
452
  iface = gr.Interface(
453
  fn=gradio_interface,
@@ -463,5 +464,5 @@ iface = gr.Interface(
463
  if __name__ == "__main__":
464
  iface.launch()
465
 
466
-
467
 
 
1
+
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
 
 
64
  if __name__ == "__main__":
65
  demo.launch()
66
 
67
+ '''
68
 
69
  import gradio as gr
70
  from langchain.chains import LLMChain
 
329
  print(f"[TRACE] Workflow Result: {result}") # Final workflow result
330
  except Exception as e:
331
  print(f"[ERROR] Workflow execution failed: {e}")
332
+
333
+
334
+
335
  from typing import Dict
336
  from pydantic import BaseModel
337
  from langgraph.graph import StateGraph, END
 
345
  #print(torch.cuda.is_available()) # Check if a GPU is available
346
  #a = torch.Tensor([1]).cuda() # Should succeed without errors
347
  #print(a)
348
+
349
  # Define HuggingFace LLM
350
  def create_llm():
351
  model_name = "Qwen/Qwen2.5-7B-Instruct-1M"
 
449
  # Execute workflow
450
  result = compiled_graph.invoke(state.dict())
451
  return result["messages"][0]
 
452
 
453
  iface = gr.Interface(
454
  fn=gradio_interface,
 
464
  if __name__ == "__main__":
465
  iface.launch()
466
 
467
+ '''
468