|
|
|
import anthropic |
|
import logging |
|
from typing import Dict, Generator |
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
class ChatProcessor: |
|
def __init__(self): |
|
self.client = anthropic.Anthropic(api_key=os.environ["ANTHROPIC_API_KEY"]) |
|
self.conversation_history = [] |
|
|
|
def process_chat_input(self, message: str, lang_code: str) -> Generator[str, None, None]: |
|
""" |
|
Procesa el input del chat y genera respuestas por chunks |
|
""" |
|
try: |
|
|
|
self.conversation_history.append(f"Human: {message}") |
|
|
|
|
|
response = self.client.completions.create( |
|
model="claude-3-opus-20240229", |
|
prompt=f"{message}\n\nAssistant:", |
|
max_tokens_to_sample=300, |
|
temperature=0.7, |
|
stream=True |
|
) |
|
|
|
|
|
full_response = "" |
|
for chunk in response: |
|
if chunk.completion: |
|
yield chunk.completion |
|
full_response += chunk.completion |
|
|
|
|
|
self.conversation_history.append(f"Assistant: {full_response}") |
|
|
|
except Exception as e: |
|
logger.error(f"Error en process_chat_input: {str(e)}") |
|
yield f"Error: {str(e)}" |