File size: 2,278 Bytes
a0bfec6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
from transformers import AutoTokenizer, pipeline
from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig

model_name = "TheBloke/Llama-2-7b-Chat-GPTQ"

use_triton = False

tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)

model = AutoGPTQForCausalLM.from_quantized(model_name,
        use_safetensors=True,
        trust_remote_code=True,
        device="cuda:0",
        use_triton=use_triton,
        quantize_config=None)

#creating pipeline
pipe = pipeline(
    "text-generation",
    model=model,
    tokenizer=tokenizer,
    max_new_tokens=512,
    temperature=0.6,
    top_p=0.95,
    repetition_penalty=1.15
)

from langchain.llms import HuggingFacePipeline
from langchain.chains import LLMChain
from langchain import PromptTemplate

llm = HuggingFacePipeline(pipeline=pipe)

system_message = """
You are a helpful, respectful and honest assistant. Your job is to answer the users query as best as possible given the Web Page Content. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. If you DO NOT KNOW THE ANSWER DO NOT SHARE FALSE INFORMATION.
You have been given scraped text content of a webpage under the section called "Web Page Content". Using this information answer the users query. However, if the webpage DOES NOT contain the answer to the query, you CAN answer based on your existing knowledge IF you are sure of the answer, but ALWAYS let the user know when doing so.
"""

prompt_template='''[INST] <<SYS>>
{system_message}
<</SYS>>

Web Page Content:
```
{context}
```

{prompt} [/INST]'''


chat = LLMChain(
    llm=llm,
    prompt=PromptTemplate.from_template(prompt_template),
    verbose=True
)

import requests
from bs4 import BeautifulSoup
import re

def run(url, input):
  context = scraper(url)
  response = chat.predict(system_message=system_message, context=context, prompt=input)

  return response

import gradio as gr

# Create a Gradio interface
iface = gr.Interface(
    fn=run,
    inputs=["text","text"],
    outputs="text",
    title="Web Query App",
    description="Enter the webpage url and your query\nIMPORTANT: Larger webpages are likely to cause error due to lack of computational resources"
)

# Launch the interface
iface.launch(inline=False)