kurugai commited on
Commit
5031331
·
verified ·
1 Parent(s): ab6e796

Delete test.py

Browse files
Files changed (1) hide show
  1. test.py +0 -37
test.py DELETED
@@ -1,37 +0,0 @@
1
- import langchain as lc
2
- from langchain import PromptTemplate, LLMChain
3
- from langchain_community.llms import LlamaCpp
4
- from langchain.chains import ConversationChain
5
- import gradio as gr
6
-
7
- # Define the prompt template
8
- template = """
9
- <s>사용자 질문에 알맞은 답변을 해주세요.
10
-
11
- {history}
12
- <|im_start|>user
13
- {input}
14
- <|im_end|>
15
- <|im_start|>assistant
16
- """
17
-
18
- # Make sure the model path is correct for your system!
19
- llm = LlamaCpp(
20
- model_path="models/10.7B/EEVE_ggml-model-Q5_K_M.gguf",
21
- lora_path="loras/240417_275dw_2ep_new_prompt/ggml-adapter-model.bin",
22
- temperature=0.1,
23
- max_tokens=4096,
24
- early_stopping=True,
25
- do_sample=True,
26
- repetition_penalty=1.17,
27
- top_k=40,
28
- top_p=0.1,
29
- stop=['<|im_end|>']
30
- )
31
-
32
- def predict(input, history):
33
- prompt = PromptTemplate(input_variables=["input","history"], template=template)
34
- chain = LLMChain(prompt=prompt, llm=llm)
35
- return chain.invoke(input=input, history=history)
36
-
37
- gr.ChatInterface(predict).queue().launch(share=True)