# --- Imports --- import gradio as gr import spaces from transformers import pipeline # --- Load Model --- pipe = pipeline(model="InstaDeepAI/ChatNT", trust_remote_code=True) # --- Logs --- log_file = "logs.txt" def log_message(message: str): with open(log_file, "a") as log: log.write(f"{message}\n") # --- Utilities --- def read_dna_sequence(dna_text, fasta_file): """ Returns: dna_sequence: str warning: str if any error: str if any """ dna_sequence = "" warning = "" error = "" # Pasted text if dna_text and dna_text.strip(): dna_sequence = dna_text.strip().replace("\n", "") # Uploaded FASTA overrides text if fasta_file is not None: if dna_sequence: warning = "Warning: Both pasted DNA and FASTA file provided. Using file only." try: with open(fasta_file.name, "r") as f: content = f.read() if not content.startswith(">"): error = "Invalid FASTA: must start with '>' header line." return "", warning, error sequence = "" for line in content.splitlines(): if not line or line.startswith(">"): continue sequence += line.strip() dna_sequence = sequence except Exception: error = "Could not read the FASTA file." if dna_sequence and not dna_sequence.isupper(): dna_sequence = dna_sequence.upper() warning += "\nNote: DNA sequence was converted to uppercase." return dna_sequence, warning.strip(), error def validate_inputs(dna_sequence, custom_question): """ Returns: valid: bool error: str """ placeholders = custom_question.count("") if not custom_question.strip(): return False, "Please provide a question." if dna_sequence and placeholders == 0: log_message("Error: DNA sequence provided but no token.") return False, "Your question must contain the token if you provide a DNA sequence." if not dna_sequence and placeholders == 1: log_message("Error: token but no sequence.") return False, "You must provide a DNA sequence if you use the token." if placeholders > 1: return False, "Only one token is allowed." return True, "" # --- Main Inference --- @spaces.GPU def run_chatnt(dna_text, fasta_file, custom_question): feedback_msgs = [] dna_sequence, warning, fasta_error = read_dna_sequence(dna_text, fasta_file) if fasta_error: return "", fasta_error is_valid, validation_error = validate_inputs(dna_sequence, custom_question) if not is_valid: return "", validation_error final_prompt = custom_question inputs = { "english_sequence": final_prompt, "dna_sequences": [dna_sequence] if dna_sequence else [] } output = pipe(inputs=inputs) result = output if warning: feedback_msgs.append(warning) return result, "\n".join(feedback_msgs) # --- Gradio Interface --- css = """ .gradio-container { font-family: sans-serif; } .gr-button { color: white; border-color: black; background: black; } footer { display: none !important; } """ example_dna = "ATGCATGCATGCATGC" example_question = "Does this sequence contain a donor splice site?" with gr.Blocks(css=css) as demo: gr.Markdown("# 🧬 ChatNT: A Multimodal Conversational Agent for DNA, RNA and Protein Tasks") gr.Markdown( "[ChatNT]{https://www.nature.com/articles/s42256-025-01047-1} is the first multimodal conversational agent designed with a deep understanding of biological sequences (DNA, RNA, proteins). It enables users — even those with no coding background — to interact with biological data through natural language and it generalizes across multiple biological tasks and modalities.\n" "This Hugging Face Space is powered by a [ZeroGPU](https://huggingface.co/docs/hub/en/spaces-zerogpu), which is free but **limited to 5 minutes per day per user**.\n" ) gr.Image("https://media.springernature.com/w440/springer-static/cover-hires/journal/42256/7/6") with gr.Row(): with gr.Column(scale=1): dna_text = gr.Textbox( label="Paste your DNA sequence", placeholder="ATGCATGC...", lines=4 ) fasta_file = gr.File( label="Or upload your FASTA file", file_types=[".fasta", ".fa", ".txt"], height=50 ) custom_question = gr.Textbox( label="English Question", placeholder="Does this sequence contain a donor splice site?" ) use_example = gr.Button("Use Example") submit_btn = gr.Button("Run Query", variant="primary") with gr.Column(scale=1): output = gr.Textbox( label="Model Answer", lines=12 ) error_box = gr.Textbox( label="Execution Feedback", lines=4 ) submit_btn.click( run_chatnt, inputs=[dna_text, fasta_file, custom_question], outputs=[output, error_box], ) use_example.click( lambda: (example_dna, None, example_question), inputs=[], outputs=[dna_text, fasta_file, custom_question] ) gr.Markdown( """ You must use **exactly one `` token** if you want the model to see your sequence. It is also possible to use the model without any DNA sequence (in this case, the `` token must not be present in the question). You can either paste a sequence or upload a FASTA file. --- ### ✅ Good queries - "Does this sequence `` contain a donor splice site?" - "Is it possible for you to identify whether there's a substantial presence of H3 histone protein occupancy in the nucleotide sequence `` in yeast?" - "Determine the degradation rate of the mouse RNA sequence `` within the -5 to 5 range." ### ❌ What will not work properly - "What is the length of this sequence ``?" For more examples, you can refer to the [training dataset]{https://huggingface.co/datasets/InstaDeepAI/ChatNT_training_data}. """ ) gr.Markdown(""" ### 📚 Citation If you use **ChatNT**, please cite: ```bibtex @article{deAlmeida2025, title = {A multimodal conversational agent for DNA, RNA and protein tasks}, author = {de Almeida, Bernardo P. and Richard, Guillaume and Dalla-Torre, Hugo and Blum, Christopher and Hexemer, Lorenz and Pandey, Priyanka and Laurent, Stefan and Rajesh, Chandana and Lopez, Marie and Laterre, Alexandre and Lang, Maren and Şahin, Uğur and Beguir, Karim and Pierrot, Thomas}, journal = {Nature Machine Intelligence}, year = {2025}, volume = {7}, number = {6}, pages = {928--941}, doi = {10.1038/s42256-025-01047-1}, url = {https://doi.org/10.1038/s42256-025-01047-1}, issn = {2522-5839} } ``` """, show_copy_button=True ) if __name__ == "__main__": demo.queue() demo.launch(debug=True, show_error=True)