materials-ai-app / app /models.py
mgbam's picture
Add application file
4fe5752
# app/models.py
from transformers import AutoTokenizer, AutoModelForTokenClassification, pipeline
import torch
# Load a domain-specific model (example: MatSciBERT for materials text)
MATS_BERT_MODEL = "m3rg-iitd/matscibert" # adjust model name as needed
tokenizer = AutoTokenizer.from_pretrained(MATS_BERT_MODEL)
model = AutoModelForTokenClassification.from_pretrained(MATS_BERT_MODEL)
# Create a pipeline for token classification (NER, relation extraction)
ner_pipeline = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple")
def extract_entities(text: str):
"""
Process text using a domain-specific BERT model to extract entities.
"""
results = ner_pipeline(text)
# Format the output as a list of (entity, score, start, end)
entities = [{"entity": r["entity_group"], "word": r["word"], "score": r["score"]} for r in results]
return entities
def answer_question(query: str):
"""
For demonstration, we use a simple approach.
In practice, you may combine a retrieval step with a Q&A model.
"""
# For example purposes, we simulate an answer by echoing the query.
# Replace this with your domain-specific Q&A logic.
return f"Simulated answer for query: '{query}'"
# Model loading and inference functions