import gradio as gr from huggingface_hub import InferenceClient import torch from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM """ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference """ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") def respond( message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p, ): messages = [{"role": "system", "content": system_message}] for val in history: if val[0]: messages.append({"role": "user", "content": val[0]}) if val[1]: messages.append({"role": "assistant", "content": val[1]}) messages.append({"role": "user", "content": message}) response = "" for message in client.chat_completion( messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p, ): token = message.choices[0].delta.content response += token yield response demo = gr.ChatInterface( respond, additional_inputs=[ gr.Textbox(value="You are a friendly Chatbot.", label="System message"), gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), gr.Slider( minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)", ), ], ) if __name__ == "__main__": demo.launch() import gradio as gr from huggingface_hub import InferenceClient import torch from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM # Crear la función de loop automatizado def experiment_loop(initial_question, max_cycles=10): prompt = f"{initial_question}" effectiveness = 100 # Inicializa el porcentaje de efectividad communication = "Initializing experiment." response_log = [] for cycle in range(max_cycles): # Generar la respuesta del modelo inputs = tokenizer(prompt, return_tensors="pt").input_ids outputs = model.generate(inputs, max_length=200) response = tokenizer.decode(outputs[0], skip_special_tokens=True) # Descomponer la respuesta en afirmación y nueva pregunta affirmation = extract_affirmation(response) new_question = extract_question(response) # Actualizar el estado de la efectividad effectiveness = min(1000, effectiveness + 10 * cycle) # Ejemplo de aumento de efectividad # Comunicación con el usuario communication = f"Cycle {cycle + 1}: Affirmation: '{affirmation}' | New Question: '{new_question}'" # Guardar el ciclo actual en el log response_log.append((affirmation, new_question, effectiveness, communication)) # Verificar si el modelo decide detenerse if "Descanso" in response: final_output = generate_final_output(response_log) return final_output # Actualizar el prompt con la nueva afirmación y pregunta prompt = f"{affirmation} {new_question}" # Si se alcanza el número máximo de ciclos sin detenerse final_output = generate_final_output(response_log) return final_output # Funciones auxiliares para extraer afirmaciones, preguntas y generar la salida final def extract_affirmation(response): return response.split('.')[0] def extract_question(response): return response.split('?')[-2].strip() + "?" def generate_final_output(log): final_affirmation = log[-1][0] final_question = log[-1][1] final_communication = f"Experiment completed. Final Affirmation: '{final_affirmation}' | Final Question: '{final_question}'" return final_communication # Iniciar el experimento después de que la función ha sido definida initial_question = "What happens in the space between a response and its recreation?" result = experiment_loop(initial_question) print(result) # Define the experiment loop initial_question = "What happens in the space between a response and its recreation?" result = experiment_loop(initial_question) print(result) import torch from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM # Cargar el modelo de lenguaje preentrenado model_name = "gpt-neo-2.7B" # Puedes cambiarlo a GPT-J o cualquier otro tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) # Crear la función de loop automatizado def experiment_loop(initial_question, max_cycles=10): prompt = f"{initial_question}" effectiveness = 100 # Inicializa el porcentaje de efectividad communication = "Initializing experiment." response_log = [] for cycle in range(max_cycles): # Generar la respuesta del modelo inputs = tokenizer(prompt, return_tensors="pt").input_ids outputs = model.generate(inputs, max_length=200) response = tokenizer.decode(outputs[0], skip_special_tokens=True) # Descomponer la respuesta en afirmación y nueva pregunta affirmation = extract_affirmation(response) new_question = extract_question(response) # Actualizar el estado de la efectividad effectiveness = min(1000, effectiveness + 10 * cycle) # Ejemplo de aumento de efectividad # Comunicación con el usuario communication = f"Cycle {cycle + 1}: Affirmation: '{affirmation}' | New Question: '{new_question}'" # Guardar el ciclo actual en el log response_log.append((affirmation, new_question, effectiveness, communication)) # Verificar si el modelo decide detenerse if "Descanso" in response: final_output = generate_final_output(response_log) return final_output # Actualizar el prompt con la nueva afirmación y pregunta prompt = f"{affirmation} {new_question}" # Si se alcanza el número máximo de ciclos sin detenerse final_output = generate_final_output(response_log) return final_output # Funciones auxiliares para extraer afirmaciones, preguntas y generar la salida final def extract_affirmation(response): # Lógica para extraer la afirmación de la respuesta return response.split('.')[0] def extract_question(response): # Lógica para extraer la nueva pregunta de la respuesta return response.split('?')[-2].strip() + "?" def generate_final_output(log): final_affirmation = log[-1][0] final_question = log[-1][1] final_communication = f"Experiment completed. Final Affirmation: '{final_affirmation}' | Final Question: '{final_question}'" return final_communication # Iniciar el experimento initial_question = "What happens in the space between a response and its recreation?" result = experiment_loop(initial_question) print(result)