Pijush2023 commited on
Commit
7a077d7
·
verified ·
1 Parent(s): f0bef0b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -28
app.py CHANGED
@@ -1,32 +1,13 @@
1
  import gradio as gr
2
  import os
3
- import logging
4
- from langchain_core.prompts import ChatPromptTemplate
5
- from langchain_core.output_parsers import StrOutputParser
6
- from langchain_openai import ChatOpenAI
7
- from langchain_community.graphs import Neo4jGraph
8
- from typing import List, Tuple
9
- from pydantic import BaseModel, Field
10
- from langchain_core.messages import AIMessage, HumanMessage
11
- from langchain_core.runnables import (
12
- RunnableBranch,
13
- RunnableLambda,
14
- RunnablePassthrough,
15
- RunnableParallel,
16
- )
17
- from langchain_core.prompts.prompt import PromptTemplate
18
  import requests
19
  import tempfile
20
- from langchain.memory import ConversationBufferWindowMemory
21
- import time
22
- import logging
23
- from langchain.chains import ConversationChain
24
  import torch
25
- import torchaudio
26
- from transformers import pipeline, AutoModelForSpeechSeq2Seq, AutoProcessor
27
  import numpy as np
28
- import threading
29
-
 
 
30
 
31
  # Setup Neo4j
32
  graph = Neo4jGraph(
@@ -35,10 +16,9 @@ graph = Neo4jGraph(
35
  password="Z10duoPkKCtENuOukw3eIlvl0xJWKtrVSr-_hGX1LQ4"
36
  )
37
 
38
- # Define a prompt template for generating responses
39
- template = """I am a guide for Birmingham, Alabama. I can provide recommendations and insights about the city, including events and activities.
40
- Ask your question directly, and I'll provide a precise and quick,short and crisp response in a conversational way without any Greet.
41
- {context}
42
 
43
  Data:
44
  {context}
@@ -71,6 +51,11 @@ def get_response(question):
71
  context = retrieve_from_neo4j(question)
72
  prompt = qa_prompt.format_prompt(context=context, question=question)
73
  response = chat_model(prompt.to_string())
 
 
 
 
 
74
  return response
75
  except Exception as e:
76
  return f"Error: {str(e)}"
@@ -141,4 +126,4 @@ with gr.Blocks() as demo:
141
  )
142
 
143
  # Launch the Gradio interface
144
- demo.launch(show_error=True, share=True)
 
1
  import gradio as gr
2
  import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  import requests
4
  import tempfile
 
 
 
 
5
  import torch
 
 
6
  import numpy as np
7
+ from transformers import pipeline, AutoModelForSpeechSeq2Seq, AutoProcessor
8
+ from langchain_core.prompts import ChatPromptTemplate
9
+ from langchain_openai import ChatOpenAI
10
+ from langchain_community.graphs import Neo4jGraph
11
 
12
  # Setup Neo4j
13
  graph = Neo4jGraph(
 
16
  password="Z10duoPkKCtENuOukw3eIlvl0xJWKtrVSr-_hGX1LQ4"
17
  )
18
 
19
+ # Define a concise prompt template for generating responses
20
+ template = """I am a guide for Birmingham, Alabama. I will provide a precise and short response based solely on the provided data.
21
+ Do not include any additional commentary or context.
 
22
 
23
  Data:
24
  {context}
 
51
  context = retrieve_from_neo4j(question)
52
  prompt = qa_prompt.format_prompt(context=context, question=question)
53
  response = chat_model(prompt.to_string())
54
+
55
+ # Filter extraneous content, keeping only the answer part
56
+ if "Answer:" in response:
57
+ response = response.split("Answer:")[-1].strip() # Extract the part after "Answer:" and strip extra spaces
58
+
59
  return response
60
  except Exception as e:
61
  return f"Error: {str(e)}"
 
126
  )
127
 
128
  # Launch the Gradio interface
129
+ demo.launch(show_error=True, share=True)