File size: 1,475 Bytes
b324113
 
 
a871520
 
 
 
1570310
 
 
a871520
0252582
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b324113
 
 
 
0252582
b324113
 
a871520
0252582
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import WebBaseLoader
from langchain.chains.summarize import load_summarize_chain
from bs4 import BeautifulSoup
import os
from dotenv import load_dotenv
load_dotenv()
from langchain import HuggingFaceHub
import requests
import sys

#OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')

hf_token = os.environ.get('HUGGINGFACEHUB_API_TOKEN')
#starchat_repo_id = os.environ.get('starchat_repo_id')
repo_id=os.environ.get('repo_id')
#port = os.getenv('port')

llm = HuggingFaceHub(repo_id=repo_id,  #for Llama2
                     #repo_id=starchat_repo_id,     #for StarChat                  
                     huggingfacehub_api_token=hf_token,
                     model_kwargs={#"min_length":512,  #for StarChat
                                   "min_length":1024,  #for Llama2
                                   "max_new_tokens":3072, "do_sample":True,  #for StarChat
                                   #"max_new_tokens":5632, "do_sample":True,  #for Llama2                                 
                                   "temperature":0.1,
                                   "top_k":50,
                                   "top_p":0.95, "eos_token_id":49155}) 

loader = WebBaseLoader("https://www.usinoip.com/")
docs = loader.load()

#llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo-16k")
chain = load_summarize_chain(llm, chain_type="stuff")

result=chain.run(docs)
print(result)