Salman95s's picture
Upload 27 files
cfc20e5 verified
raw
history blame
5.61 kB
import json
import re
from collections import defaultdict
import torch
import torch.nn as nn
import torch.optim as optim
def flatten_json_data(json_file_path):
with open(json_file_path, 'r', encoding='utf-8') as f:
data = json.load(f)
flattened_data = []
for dataset in data:
for section in ["conversation_samples", "technical_terminology"]:
if section in dataset:
for category, samples in dataset[section].items():
for item in samples:
if "balochi" in item and "english" in item and "urdu" in item and "persian" in item: # Check if all keys are present
for balochi_sentence, english_sentence, urdu_sentence, persian_sentence in zip(item['balochi'], item['english'], item['urdu'], item['persian']):
flattened_data.append({
"context": item.get('context', category),
"balochi": balochi_sentence,
"english": english_sentence,
"urdu": urdu_sentence,
"persian": persian_sentence
})
return flattened_data
def create_vocab(tokenized_sentences):
vocab = defaultdict(lambda: len(vocab))
vocab['<PAD>'] = 0
vocab['<UNK>'] = 1 # Add unknown token
for sentence in tokenized_sentences:
for word in sentence:
vocab[word]
return dict(vocab)
def tokenize_text(text):
return re.findall(r"[\w']+(?:ء)?|[\u0600-\u06FF]+|[؟،۔٬؛٪٫٬]+|\S", text)
class SimpleRNNTranslator(nn.Module):
def __init__(self, input_size, output_size, hidden_size):
super(SimpleRNNTranslator, self).__init__()
self.hidden_size = hidden_size
self.embedding = nn.Embedding(input_size, hidden_size)
self.rnn = nn.RNN(hidden_size, hidden_size, batch_first=True)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x):
embedded = self.embedding(x)
output, hidden = self.rnn(embedded)
output = self.fc(output[:, -1, :]) # Use the last output in the sequence
return output
def train_model(flattened_data, num_epochs=500, hidden_size=256, learning_rate=0.0005):
# Tokenize sentences
balochi_sentences = [tokenize_text(entry['balochi']) for entry in flattened_data]
english_sentences = [tokenize_text(entry['english']) for entry in flattened_data]
# Create vocabularies
balochi_vocab = create_vocab(balochi_sentences)
english_vocab = create_vocab(english_sentences)
input_size = len(balochi_vocab)
output_size = len(english_vocab)
# Prepare data
max_balochi_len = max(len(sentence) for sentence in balochi_sentences)
max_english_len = max(len(sentence) for sentence in english_sentences)
def encode_sentences(sentences, vocab, max_len):
encoded = [
[vocab.get(word, vocab['<UNK>']) for word in sentence] + [0] * (max_len - len(sentence))
for sentence in sentences
]
return torch.LongTensor(encoded)
X = encode_sentences(balochi_sentences, balochi_vocab, max_balochi_len)
Y = encode_sentences(english_sentences, english_vocab, max_english_len)
# Initialize model, loss, and optimizer
model = SimpleRNNTranslator(input_size, output_size, hidden_size)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# Training loop
for epoch in range(num_epochs):
model.train()
optimizer.zero_grad()
outputs = model(X) # Shape: [batch_size, output_size]
# Reshape target to match CrossEntropyLoss expectations
loss = criterion(outputs, Y[:, 0]) # Only use the first word of target sentences
loss.backward()
optimizer.step()
print(f"Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}")
return model, balochi_vocab, english_vocab, max_balochi_len
def translate(model, balochi_sentence, balochi_vocab, english_vocab, max_balochi_len):
tokenized_sentence = tokenize_text(balochi_sentence)
encoded_sentence = [balochi_vocab.get(word, balochi_vocab['<UNK>']) for word in tokenized_sentence] # Use <UNK> for OOV words
padded_sentence = encoded_sentence + [0] * (max_balochi_len - len(encoded_sentence))
input_tensor = torch.LongTensor([padded_sentence])
output = model(input_tensor)
predicted_indices = torch.argmax(output, dim=1)
reverse_english_vocab = {idx: word for word, idx in english_vocab.items()}
predicted_words = [reverse_english_vocab.get(idx.item(), '<UNK>') for idx in predicted_indices] # Use <UNK> for OOV indices
return " ".join(predicted_words)
# Example usage
json_file_path = "mergedv1.json" # Replace with your JSON file path
flattened_data = flatten_json_data(json_file_path)
# Check if data was loaded
if not flattened_data:
print("Error: No data loaded from JSON. Check file path and format.")
else:
model, balochi_vocab, english_vocab, max_balochi_len = train_model(flattened_data)
balochi_input = "دنیا ءِ ات، پُر راز ءِ ات، ہر کسی ءِ دل ءِ موج ءِ ات"
translated_sentence = translate(model, balochi_input, balochi_vocab, english_vocab, max_balochi_len)
print(f"Balochi Input: {balochi_input}")
print(f"Translated Output: {translated_sentence}")