JaniShubh commited on
Commit
aa93fe7
·
1 Parent(s): 35c7ff1

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +30 -0
  2. langchain.ipynb +539 -0
  3. requirements.txt +5 -0
app.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.llms import OpenAI
2
+ import os
3
+
4
+ from dotenv import load_dotenv
5
+
6
+ load_dotenv()
7
+
8
+ import streamlit as st
9
+
10
+
11
+
12
+ def get_openAI_response(question):
13
+ llm=OpenAI(openai_api_key=os.getenv("OPEN_API_KEY"),model_name= "text-davinci-003",temperature = 0.5)
14
+ response = llm(question)
15
+ return response
16
+
17
+
18
+ st.set_page_config(page_title = "QnA demo")
19
+ st.header("langchain app")
20
+
21
+ input = st.text_input("Input : ",key="input")
22
+ response = get_openAI_response(input)
23
+
24
+ submit = st.button("ask the question")
25
+
26
+ if submit:
27
+ st.subheader("AI answer : ")
28
+ st.write(response)
29
+
30
+
langchain.ipynb ADDED
@@ -0,0 +1,539 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 5,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "from langchain.llms import OpenAI"
10
+ ]
11
+ },
12
+ {
13
+ "cell_type": "code",
14
+ "execution_count": 51,
15
+ "metadata": {},
16
+ "outputs": [],
17
+ "source": [
18
+ "import os\n",
19
+ "os.environ['OPEN_API_KEY'] = \"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\""
20
+ ]
21
+ },
22
+ {
23
+ "cell_type": "code",
24
+ "execution_count": null,
25
+ "metadata": {},
26
+ "outputs": [],
27
+ "source": []
28
+ },
29
+ {
30
+ "cell_type": "code",
31
+ "execution_count": 10,
32
+ "metadata": {},
33
+ "outputs": [],
34
+ "source": [
35
+ "llm=OpenAI (openai_api_key=os.environ[\"OPEN_API_KEY\"], temperature=0.6)"
36
+ ]
37
+ },
38
+ {
39
+ "cell_type": "code",
40
+ "execution_count": 9,
41
+ "metadata": {},
42
+ "outputs": [
43
+ {
44
+ "name": "stdout",
45
+ "output_type": "stream",
46
+ "text": [
47
+ "\n",
48
+ "\n",
49
+ "Delhi is the capital of India.\n"
50
+ ]
51
+ }
52
+ ],
53
+ "source": [
54
+ "text = \"capital of India\"\n",
55
+ "\n",
56
+ "print(llm.predict(text))"
57
+ ]
58
+ },
59
+ {
60
+ "cell_type": "code",
61
+ "execution_count": 11,
62
+ "metadata": {},
63
+ "outputs": [],
64
+ "source": [
65
+ "os.environ['HUGGINGFACEHUB_API_TOKEN']=\"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\""
66
+ ]
67
+ },
68
+ {
69
+ "cell_type": "code",
70
+ "execution_count": 12,
71
+ "metadata": {},
72
+ "outputs": [],
73
+ "source": [
74
+ "from langchain import HuggingFaceHub"
75
+ ]
76
+ },
77
+ {
78
+ "cell_type": "code",
79
+ "execution_count": 13,
80
+ "metadata": {},
81
+ "outputs": [
82
+ {
83
+ "name": "stderr",
84
+ "output_type": "stream",
85
+ "text": [
86
+ "/Users/jani/Desktop/DS/LLM/Learning/env/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
87
+ " from .autonotebook import tqdm as notebook_tqdm\n",
88
+ "/Users/jani/Desktop/DS/LLM/Learning/env/lib/python3.11/site-packages/huggingface_hub/utils/_deprecation.py:127: FutureWarning: '__init__' (from 'huggingface_hub.inference_api') is deprecated and will be removed from version '1.0'. `InferenceApi` client is deprecated in favor of the more feature-complete `InferenceClient`. Check out this guide to learn how to convert your script to use it: https://huggingface.co/docs/huggingface_hub/guides/inference#legacy-inferenceapi-client.\n",
89
+ " warnings.warn(warning_message, FutureWarning)\n"
90
+ ]
91
+ }
92
+ ],
93
+ "source": [
94
+ "hf_llm = HuggingFaceHub(repo_id=\"google/flan-t5-large\",model_kwargs={\"temperature\" :0, \"max_length\" : 64})"
95
+ ]
96
+ },
97
+ {
98
+ "cell_type": "code",
99
+ "execution_count": 14,
100
+ "metadata": {},
101
+ "outputs": [
102
+ {
103
+ "name": "stdout",
104
+ "output_type": "stream",
105
+ "text": [
106
+ "Delhi\n"
107
+ ]
108
+ }
109
+ ],
110
+ "source": [
111
+ "output = hf_llm.predict(\"what is capital of bharat\")\n",
112
+ "print(output)"
113
+ ]
114
+ },
115
+ {
116
+ "cell_type": "markdown",
117
+ "metadata": {},
118
+ "source": [
119
+ "### simple sequential chain"
120
+ ]
121
+ },
122
+ {
123
+ "cell_type": "code",
124
+ "execution_count": 15,
125
+ "metadata": {},
126
+ "outputs": [],
127
+ "source": [
128
+ "from langchain.prompts import PromptTemplate"
129
+ ]
130
+ },
131
+ {
132
+ "cell_type": "code",
133
+ "execution_count": 4,
134
+ "metadata": {},
135
+ "outputs": [
136
+ {
137
+ "data": {
138
+ "text/plain": [
139
+ "'what is the captial of India'"
140
+ ]
141
+ },
142
+ "execution_count": 4,
143
+ "metadata": {},
144
+ "output_type": "execute_result"
145
+ }
146
+ ],
147
+ "source": [
148
+ "prompt_template = PromptTemplate(input_variables=[\"country\"] , \n",
149
+ "template = \"what is the captial of {country}\")\n",
150
+ "\n",
151
+ "prompt_template.format(country=\"India\")"
152
+ ]
153
+ },
154
+ {
155
+ "cell_type": "code",
156
+ "execution_count": 16,
157
+ "metadata": {},
158
+ "outputs": [],
159
+ "source": [
160
+ "from langchain.chains import LLMChain"
161
+ ]
162
+ },
163
+ {
164
+ "cell_type": "code",
165
+ "execution_count": 24,
166
+ "metadata": {},
167
+ "outputs": [
168
+ {
169
+ "data": {
170
+ "text/plain": [
171
+ "'\\n\\nThe capital of India is New Delhi.'"
172
+ ]
173
+ },
174
+ "execution_count": 24,
175
+ "metadata": {},
176
+ "output_type": "execute_result"
177
+ }
178
+ ],
179
+ "source": [
180
+ "chain = LLMChain(llm=llm,prompt=prompt_template)\n",
181
+ "chain.run(\"India\")"
182
+ ]
183
+ },
184
+ {
185
+ "cell_type": "code",
186
+ "execution_count": 26,
187
+ "metadata": {},
188
+ "outputs": [],
189
+ "source": [
190
+ "captial_template = PromptTemplate(input_variables=['country'],\n",
191
+ "template=\"what is capital of {country}\")\n",
192
+ "\n",
193
+ "captial_chain = LLMChain(llm=llm,prompt=captial_template)"
194
+ ]
195
+ },
196
+ {
197
+ "cell_type": "code",
198
+ "execution_count": 30,
199
+ "metadata": {},
200
+ "outputs": [],
201
+ "source": [
202
+ "famous_template = PromptTemplate(input_variables=['capital'],\n",
203
+ "template=\"tell me most famous food of {captial}\")\n",
204
+ "\n",
205
+ "famous_chain=LLMChain(llm=llm,prompt=famous_template)"
206
+ ]
207
+ },
208
+ {
209
+ "cell_type": "code",
210
+ "execution_count": 31,
211
+ "metadata": {},
212
+ "outputs": [],
213
+ "source": [
214
+ "from langchain.chains import SimpleSequentialChain"
215
+ ]
216
+ },
217
+ {
218
+ "cell_type": "code",
219
+ "execution_count": 34,
220
+ "metadata": {},
221
+ "outputs": [
222
+ {
223
+ "data": {
224
+ "text/plain": [
225
+ "'\\n\\nThe most famous food of Canada is poutine. Poutine is a dish consisting of french fries, gravy, and cheese curds. It is a popular comfort food and can be found in most Canadian restaurants.'"
226
+ ]
227
+ },
228
+ "execution_count": 34,
229
+ "metadata": {},
230
+ "output_type": "execute_result"
231
+ }
232
+ ],
233
+ "source": [
234
+ "chain = SimpleSequentialChain(chains=[captial_chain,famous_chain])\n",
235
+ "chain.run(\"Canada\")"
236
+ ]
237
+ },
238
+ {
239
+ "cell_type": "markdown",
240
+ "metadata": {},
241
+ "source": [
242
+ "### sequential chain"
243
+ ]
244
+ },
245
+ {
246
+ "cell_type": "code",
247
+ "execution_count": 36,
248
+ "metadata": {},
249
+ "outputs": [],
250
+ "source": [
251
+ "captial_template = PromptTemplate(input_variables=['country'],\n",
252
+ "template=\"what is capital of {country}\")\n",
253
+ "\n",
254
+ "captial_chain = LLMChain(llm=llm,prompt=captial_template,output_key='captial')"
255
+ ]
256
+ },
257
+ {
258
+ "cell_type": "code",
259
+ "execution_count": 37,
260
+ "metadata": {},
261
+ "outputs": [],
262
+ "source": [
263
+ "famous_template = PromptTemplate(input_variables=['capital'],\n",
264
+ "template=\"tell me most famous food of {captial}\")\n",
265
+ "\n",
266
+ "famous_chain=LLMChain(llm=llm,prompt=famous_template,output_key='food')"
267
+ ]
268
+ },
269
+ {
270
+ "cell_type": "code",
271
+ "execution_count": 38,
272
+ "metadata": {},
273
+ "outputs": [],
274
+ "source": [
275
+ "from langchain.chains import SequentialChain"
276
+ ]
277
+ },
278
+ {
279
+ "cell_type": "code",
280
+ "execution_count": 40,
281
+ "metadata": {},
282
+ "outputs": [],
283
+ "source": [
284
+ "chain = SequentialChain(chains=[captial_chain,famous_chain],\n",
285
+ "input_variables=['country'],\n",
286
+ "output_variables=['captial',\"food\"])"
287
+ ]
288
+ },
289
+ {
290
+ "cell_type": "code",
291
+ "execution_count": 43,
292
+ "metadata": {},
293
+ "outputs": [
294
+ {
295
+ "data": {
296
+ "text/plain": [
297
+ "{'country': 'thailand',\n",
298
+ " 'captial': '\\n\\nBangkok is the capital of Thailand.',\n",
299
+ " 'food': ' The most famous foods from Bangkok are Pad Thai, Tom Yum Goong, Som Tam (Green Papaya Salad), Kaeng Khiao Wan (Green Curry), Khao Pad (Fried Rice), and Gai Pad Met Mamuang (Chicken with Cashew Nuts).'}"
300
+ ]
301
+ },
302
+ "execution_count": 43,
303
+ "metadata": {},
304
+ "output_type": "execute_result"
305
+ }
306
+ ],
307
+ "source": [
308
+ "chain({'country':\"thailand\"})"
309
+ ]
310
+ },
311
+ {
312
+ "cell_type": "markdown",
313
+ "metadata": {},
314
+ "source": [
315
+ "### chatopenAI"
316
+ ]
317
+ },
318
+ {
319
+ "cell_type": "code",
320
+ "execution_count": 44,
321
+ "metadata": {},
322
+ "outputs": [],
323
+ "source": [
324
+ "from langchain.chat_models import ChatOpenAI"
325
+ ]
326
+ },
327
+ {
328
+ "cell_type": "code",
329
+ "execution_count": 45,
330
+ "metadata": {},
331
+ "outputs": [],
332
+ "source": [
333
+ "from langchain.schema import HumanMessage,SystemMessage,AIMessage"
334
+ ]
335
+ },
336
+ {
337
+ "cell_type": "code",
338
+ "execution_count": 54,
339
+ "metadata": {},
340
+ "outputs": [],
341
+ "source": [
342
+ "chat_llm=ChatOpenAI(openai_api_key=os.environ[\"OPEN_API_KEY\"], temperature=0.6,model='gpt-3.5-turbo')"
343
+ ]
344
+ },
345
+ {
346
+ "cell_type": "code",
347
+ "execution_count": 55,
348
+ "metadata": {},
349
+ "outputs": [
350
+ {
351
+ "data": {
352
+ "text/plain": [
353
+ "ChatOpenAI(client=<openai.resources.chat.completions.Completions object at 0x13fa68d90>, async_client=<openai.resources.chat.completions.AsyncCompletions object at 0x168d46c90>, temperature=0.6, openai_api_key='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', openai_proxy='')"
354
+ ]
355
+ },
356
+ "execution_count": 55,
357
+ "metadata": {},
358
+ "output_type": "execute_result"
359
+ }
360
+ ],
361
+ "source": [
362
+ "chat_llm"
363
+ ]
364
+ },
365
+ {
366
+ "cell_type": "code",
367
+ "execution_count": 57,
368
+ "metadata": {},
369
+ "outputs": [
370
+ {
371
+ "data": {
372
+ "text/plain": [
373
+ "AIMessage(content='Why did the peanut go to the police station?\\n\\nBecause it was a-salted!')"
374
+ ]
375
+ },
376
+ "execution_count": 57,
377
+ "metadata": {},
378
+ "output_type": "execute_result"
379
+ }
380
+ ],
381
+ "source": [
382
+ "chat_llm([\n",
383
+ " SystemMessage(content=\"you are a comdeian\"),\n",
384
+ " HumanMessage(content=\"tell me a joke about nut\")\n",
385
+ "])"
386
+ ]
387
+ },
388
+ {
389
+ "cell_type": "markdown",
390
+ "metadata": {},
391
+ "source": [
392
+ "### Prompt template + LLM + Output parser"
393
+ ]
394
+ },
395
+ {
396
+ "cell_type": "code",
397
+ "execution_count": 58,
398
+ "metadata": {},
399
+ "outputs": [],
400
+ "source": [
401
+ "from langchain.chat_models import ChatOpenAI\n",
402
+ "from langchain.prompts.chat import ChatPromptTemplate\n",
403
+ "from langchain.schema import BaseOutputParser"
404
+ ]
405
+ },
406
+ {
407
+ "cell_type": "code",
408
+ "execution_count": 60,
409
+ "metadata": {},
410
+ "outputs": [],
411
+ "source": [
412
+ "class csv_output(BaseOutputParser):\n",
413
+ " def parse(self, text: str):\n",
414
+ " return text.strip().split(\",\")"
415
+ ]
416
+ },
417
+ {
418
+ "cell_type": "code",
419
+ "execution_count": 77,
420
+ "metadata": {},
421
+ "outputs": [],
422
+ "source": [
423
+ "template = \"tell me 5 words that consists of ___ word answer in a list just 5 words nothing else\"\n",
424
+ "human_template = \"{text}\"\n",
425
+ "\n",
426
+ "chat_prompt = ChatPromptTemplate.from_messages([\n",
427
+ " (\"system\",template),\n",
428
+ " (\"human\",human_template)\n",
429
+ "])"
430
+ ]
431
+ },
432
+ {
433
+ "cell_type": "code",
434
+ "execution_count": 74,
435
+ "metadata": {},
436
+ "outputs": [],
437
+ "source": [
438
+ "chain = chat_prompt | chat_llm | csv_output()"
439
+ ]
440
+ },
441
+ {
442
+ "cell_type": "code",
443
+ "execution_count": 78,
444
+ "metadata": {},
445
+ "outputs": [
446
+ {
447
+ "data": {
448
+ "text/plain": [
449
+ "['1. Adore\\n2. Respond\\n3. Responsibility\\n4. Soda\\n5. Modest']"
450
+ ]
451
+ },
452
+ "execution_count": 78,
453
+ "metadata": {},
454
+ "output_type": "execute_result"
455
+ }
456
+ ],
457
+ "source": [
458
+ "chain.invoke({\"text\" : \"do\"})"
459
+ ]
460
+ },
461
+ {
462
+ "cell_type": "code",
463
+ "execution_count": null,
464
+ "metadata": {},
465
+ "outputs": [],
466
+ "source": []
467
+ },
468
+ {
469
+ "cell_type": "code",
470
+ "execution_count": null,
471
+ "metadata": {},
472
+ "outputs": [],
473
+ "source": []
474
+ },
475
+ {
476
+ "cell_type": "code",
477
+ "execution_count": null,
478
+ "metadata": {},
479
+ "outputs": [],
480
+ "source": []
481
+ },
482
+ {
483
+ "cell_type": "code",
484
+ "execution_count": null,
485
+ "metadata": {},
486
+ "outputs": [],
487
+ "source": []
488
+ },
489
+ {
490
+ "cell_type": "code",
491
+ "execution_count": null,
492
+ "metadata": {},
493
+ "outputs": [],
494
+ "source": []
495
+ },
496
+ {
497
+ "cell_type": "code",
498
+ "execution_count": null,
499
+ "metadata": {},
500
+ "outputs": [],
501
+ "source": []
502
+ },
503
+ {
504
+ "cell_type": "code",
505
+ "execution_count": null,
506
+ "metadata": {},
507
+ "outputs": [],
508
+ "source": []
509
+ },
510
+ {
511
+ "cell_type": "code",
512
+ "execution_count": null,
513
+ "metadata": {},
514
+ "outputs": [],
515
+ "source": []
516
+ }
517
+ ],
518
+ "metadata": {
519
+ "kernelspec": {
520
+ "display_name": "env",
521
+ "language": "python",
522
+ "name": "python3"
523
+ },
524
+ "language_info": {
525
+ "codemirror_mode": {
526
+ "name": "ipython",
527
+ "version": 3
528
+ },
529
+ "file_extension": ".py",
530
+ "mimetype": "text/x-python",
531
+ "name": "python",
532
+ "nbconvert_exporter": "python",
533
+ "pygments_lexer": "ipython3",
534
+ "version": "3.11.6"
535
+ }
536
+ },
537
+ "nbformat": 4,
538
+ "nbformat_minor": 2
539
+ }
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ langchain
2
+ openai
3
+ huggingface_hub
4
+ python-dotenv
5
+ streamlit