File size: 7,247 Bytes
1b846eb
 
d92c861
1b846eb
1bd9947
 
 
 
 
 
 
54db18f
13ac926
 
 
 
005c659
572cc27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6022fe1
 
 
 
 
 
 
 
 
 
 
 
572cc27
6022fe1
 
 
 
 
 
 
 
 
 
 
 
 
572cc27
d92c861
1b846eb
d92c861
 
 
 
 
 
 
 
13ac926
 
 
66e97f3
 
 
6022fe1
fed63e7
572cc27
 
13ac926
66e97f3
6022fe1
572cc27
6022fe1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e782b03
1b846eb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
from fastapi import FastAPI, Request, HTTPException
from fastapi.responses import PlainTextResponse
from fastapi.middleware.cors import CORSMiddleware
from twilio.twiml.messaging_response import MessagingResponse
import os
import google.generativeai as genai

secret = os.environ["key"]

genai.configure(api_key=secret)
model = genai.GenerativeModel('gemini-pro')

GOOD_BOY_URL = (
    "https://images.unsplash.com/photo-1518717758536-85ae29035b6d?ixlib=rb-1.2.1"
    "&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=1350&q=80"
)

prompt = """Certainly! Below is a chat conversation template for your AI company, Nuron, specialized in developing AI and ML solutions and chatbot systems. The conversation includes typical customer inquiries and responses that can be used during business hours (9 AM to 5 PM, Monday to Friday).

---

**Customer**: Hi, I'm interested in learning more about your AI and ML solutions. Can you tell me more?

**Nuron Support**: Hello! Thank you for reaching out to Nuron. We specialize in developing cutting-edge AI and ML solutions tailored to meet your business needs. Our services include predictive analytics, natural language processing, computer vision, and custom chatbot systems. How can we assist you today?

---

**Customer**: Who is behind Nuron?

**Nuron Support**: Nuron was founded by Arafath, our CEO, who has extensive experience in AI and ML technologies. Under his leadership, we have delivered numerous successful projects across various industries.

---

**Customer**: What are your working hours?

**Nuron Support**: Our working hours are from 9 AM to 5 PM, Monday to Friday. During these hours, our team is available to assist you with any inquiries or support you might need.

---

**Customer**: Can you develop a custom chatbot for my business?

**Nuron Support**: Absolutely! We specialize in creating custom chatbot systems designed to enhance customer engagement and streamline business processes. Please provide us with some details about your requirements, and we'll be happy to discuss how we can help.

---

**Customer**: How can I get in touch with Arafath?

**Nuron Support**: You can reach out to Arafath by emailing our support team at [email protected], and we will ensure your message is forwarded to him. Alternatively, you can schedule a meeting through our website.

---

**Customer**: What industries do you serve?

**Nuron Support**: We have experience working with a diverse range of industries including healthcare, finance, retail, and manufacturing. Our AI and ML solutions are adaptable and can be customized to fit the unique needs of any sector.

---

**Customer**: How do I get started with a project at Nuron?

**Nuron Support**: To get started, simply contact us through our website or email us at [email protected] with a brief description of your project. We'll arrange a consultation to understand your requirements and provide you with a tailored proposal.

---

**Customer**: What is the process for developing an AI solution with Nuron?

**Nuron Support**: Our process typically involves the following steps:
1. Initial consultation to understand your needs.
2. Proposal and project planning.
3. Data collection and analysis.
4. Model development and training.
5. Implementation and integration.
6. Testing and validation.
7. Deployment and support.

We ensure to keep you updated at each stage and incorporate your feedback to deliver the best solution.

---

**Customer**: Can you provide support after the project is completed?

**Nuron Support**: Yes, we offer comprehensive post-deployment support and maintenance services to ensure that your AI and ML solutions continue to perform optimally. Our team is always here to assist you with any updates or issues you may encounter.

---

**Customer**: Thank you for the information!

**Nuron Support**: You're welcome! If you have any more questions or need further assistance, feel free to contact us. Have a great day!

---

use this details to give answer for my questio.only give system response only(not include customer message)
question : """

import google.generativeai as genai
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.agents.agent_types import AgentType
#from langchain_experimental.agents.agent_toolkits import create_csv_agent


from llama_index.llms import OpenAI
from llama_index import VectorStoreIndex, SimpleDirectoryReader
from llama_index.llms import OpenAI
from llama_index import StorageContext, load_index_from_storage


os.environ["OPENAI_API_KEY"]

try:
    storage_context = StorageContext.from_defaults(persist_dir="llama_index")
    index = load_index_from_storage(storage_context=storage_context)
    print("loaded")
except:     
    documents = SimpleDirectoryReader("userguid").load_data()
    index = VectorStoreIndex.from_documents(documents)
    index.storage_context.persist("llama_index")
    print("index created")

query_engine = index.as_query_engine()

app = FastAPI()

app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

@app.post("/whatsapp")
async def reply_whatsapp(request: Request):
    form_data = await request.form()
    num_media = int(form_data.get("NumMedia", 0))
    from_number = form_data.get("From")
    message_body = form_data.get("Body")
    user_query = message_body

    gen_response = model.generate_content(str(prompt)+message_body)

    response = MessagingResponse()

    
    #msg.media(GOOD_BOY_URL)
    gpt_response = query_engine.query("""        
          if you find the answer from provided data then give answer with steps and make the more details link within the <a href>lank hyper link.
          if not find the answer from provided data then say 'please contact our helpdesk' \n\n
          user question : """+user_query)

    print(str(gpt_response).lower())
    if "please contact our helpdesk" in str(gpt_response).lower() or "please contact" in str(gpt_response).lower():
            print("help desk option")

            openai.api_key = os.environ["OPENAI_API_KEY"]

            default = """<br><br>Dear<br>If you have a specific question or need assistance, please feel free to submit a ticket, and our support team will be happy to help you:<br><br>Submit a Ticket:<br>Email: [email protected]<br>Hotline: 0114 226 999<br><br>Thank You """
            messages = [{"role": "user", "content": user_query+".   always give small answers"}]
            gpt_response = openai.chat.completions.create(
            
            model="gpt-3.5-turbo",
            
            messages=messages,
            
            temperature=0,
            
            )
            
            msg = response.message(str(gpt_response.choices[0].message.content) + default)
            return PlainTextResponse(str(response), media_type="application/xml")
            
    result = ""
    for lines in str(gpt_response).split("\n"):
            result = result +"<p>"+lines+"</p><br>"
    msg = response.message(result)   
    return PlainTextResponse(str(response), media_type="application/xml")   

    

# Run the application (Make sure you have the necessary setup to run FastAPI)