zhtet commited on
Commit
5bf0c23
·
1 Parent(s): 4335fe0

Update models/llamaCustom.py

Browse files
Files changed (1) hide show
  1. models/llamaCustom.py +1 -25
models/llamaCustom.py CHANGED
@@ -22,7 +22,7 @@ from llama_index import (
22
  from llama_index.llms import CompletionResponse, CustomLLM, LLMMetadata
23
 
24
  # from langchain.llms.base import LLM
25
- from llama_index.prompts import Prompt
26
  from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, pipeline
27
 
28
  # from utils.customLLM import CustomLLM
@@ -46,30 +46,6 @@ prompt_helper = PromptHelper(
46
  chunk_overlap_ratio=CHUNK_OVERLAP_RATIO,
47
  )
48
 
49
- text_qa_template_str = (
50
- "Context information is below.\n"
51
- "---------------------\n"
52
- "{context_str}\n"
53
- "---------------------\n"
54
- "Using both the context information and also using your own knowledge, "
55
- "answer the question: {query_str}\n"
56
- "If the question is relevant, you can answer by providing the name of the chapter, the article and the title to the answer. In addition, you can add the page number of the document when you found the answer.\n"
57
- "If the context isn't helpful, you can also answer the question on your own.\n"
58
- )
59
- text_qa_template = Prompt(text_qa_template_str)
60
-
61
- refine_template_str = (
62
- "The original question is as follows: {query_str}\n"
63
- "We have provided an existing answer: {existing_answer}\n"
64
- "We have the opportunity to refine the existing answer "
65
- "(only if needed) with some more context below.\n"
66
- "------------\n"
67
- "{context_msg}\n"
68
- "------------\n"
69
- "Using both the new context and your own knowledege, update or repeat the existing answer.\n"
70
- )
71
- refine_template = Prompt(refine_template_str)
72
-
73
 
74
  @st.cache_resource
75
  def load_model(mode_name: str):
 
22
  from llama_index.llms import CompletionResponse, CustomLLM, LLMMetadata
23
 
24
  # from langchain.llms.base import LLM
25
+ # from llama_index.prompts import Prompt
26
  from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, pipeline
27
 
28
  # from utils.customLLM import CustomLLM
 
46
  chunk_overlap_ratio=CHUNK_OVERLAP_RATIO,
47
  )
48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
 
50
  @st.cache_resource
51
  def load_model(mode_name: str):