黄腾 aopstudio commited on
Commit
de35307
·
1 Parent(s): 6f0c45a

fix jina module not find bug (#1779)

Browse files

### What problem does this PR solve?

fix jina module not find bug

### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)

Co-authored-by: Zhedong Cen <[email protected]>

Files changed (1) hide show
  1. rag/llm/chat_model.py +2 -1
rag/llm/chat_model.py CHANGED
@@ -28,7 +28,6 @@ import os
28
  import json
29
  import requests
30
  import asyncio
31
- from rag.svr.jina_server import Prompt,Generation
32
 
33
  class Base(ABC):
34
  def __init__(self, key, model_name, base_url):
@@ -413,6 +412,7 @@ class LocalLLM(Base):
413
  self.client = Client(port=12345, protocol="grpc", asyncio=True)
414
 
415
  def _prepare_prompt(self, system, history, gen_conf):
 
416
  if system:
417
  history.insert(0, {"role": "system", "content": system})
418
  if "max_tokens" in gen_conf:
@@ -420,6 +420,7 @@ class LocalLLM(Base):
420
  return Prompt(message=history, gen_conf=gen_conf)
421
 
422
  def _stream_response(self, endpoint, prompt):
 
423
  answer = ""
424
  try:
425
  res = self.client.stream_doc(
 
28
  import json
29
  import requests
30
  import asyncio
 
31
 
32
  class Base(ABC):
33
  def __init__(self, key, model_name, base_url):
 
412
  self.client = Client(port=12345, protocol="grpc", asyncio=True)
413
 
414
  def _prepare_prompt(self, system, history, gen_conf):
415
+ from rag.svr.jina_server import Prompt,Generation
416
  if system:
417
  history.insert(0, {"role": "system", "content": system})
418
  if "max_tokens" in gen_conf:
 
420
  return Prompt(message=history, gen_conf=gen_conf)
421
 
422
  def _stream_response(self, endpoint, prompt):
423
+ from rag.svr.jina_server import Prompt,Generation
424
  answer = ""
425
  try:
426
  res = self.client.stream_doc(