deepakaitcs commited on
Commit
99709e3
1 Parent(s): 24c1914
Files changed (2) hide show
  1. app.py +19 -22
  2. requirements.txt +4 -0
app.py CHANGED
@@ -1,29 +1,26 @@
1
- # import openai
2
- from langchain_openai.llms import OpenAI
3
- from dotenv import load_dotenv
4
- import os
5
-
6
- from langchain_core.prompts import PromptTemplate
7
- from langchain.chains import LLMChain
8
-
9
 
10
- # os.environ["OPENAPI_KEY"]='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'
11
  load_dotenv()
12
- api_key = os.environ['OPEN_API_KEY'];
13
- llm = OpenAI(openai_api_key=api_key)
14
-
15
- mytemplate = """Question: {question}
16
- Answer: Let's think step by step."""
17
-
18
- # prompt = PromptTemplate.from_template(mytemplate)
19
 
20
- # mychain = LLMChain(llm=llm, prompt=prompt)
21
 
22
- # question ="who is fastest man on earth?"
23
- # print(mychain.run(question))
 
 
 
24
 
25
- result= llm.generate(['two hit movies of bollywood ', 'two flop movies of bollywood'])
 
26
 
27
- print(result.llm_output)
28
- print(result.generations[1][0].text)
29
 
 
 
 
 
 
1
+ ## Difference between Open AI and hugging face
 
 
 
 
 
 
 
2
 
3
+ from dotenv import load_dotenv
4
  load_dotenv()
5
+ from langchain_openai import OpenAI
6
+ llm = OpenAI()
7
+ result= llm.invoke("What is capital of India")
8
+ print(result)
 
 
 
9
 
10
+ ## Use Huggingface to load google Model using langchain
11
 
12
+ from langchain import HuggingFaceHub
13
+ llm2 = HuggingFaceHub(
14
+ repo_id="google/flan-t5-large",
15
+ model_kwargs={"temperature":0, "max_length":180}
16
+ )
17
 
18
+ results2= llm2("what is the capital of Sri lanka")
19
+ print(results2)
20
 
21
+ ## Use Google GenAI chat models using langChain
 
22
 
23
+ from langchain_google_genai import ChatGoogleGenerativeAI
24
+ chat = ChatGoogleGenerativeAI(model="gemini-pro")
25
+ results3 = chat.invoke("What is the capital of Pakistan")
26
+ print(results3.content)
requirements.txt CHANGED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ langchain
2
+ openai
3
+ streamlit
4
+ langchain_google_genai