metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "joonvena/mvj",
"score": 2
} |
#### File: management/commands/batchrun_execute_job_run.py
```python
import argparse
from typing import Any
from django.core.management.base import BaseCommand
from ...job_running import execute_job_run
from ...models import JobRun
class Command(BaseCommand):
help = "Job Run Executor"
@classmethod
def add_arguments(cls, parser: argparse.ArgumentParser) -> None:
parser.add_argument("job_run_id", type=int)
def handle(self, *args: Any, **options: Any) -> None:
job_run_id: int = options.get("job_run_id") # type: ignore
job_run = JobRun.objects.get(pk=job_run_id)
execute_job_run(job_run)
```
#### File: mvj/leasing/api_functions.py
```python
from decimal import Decimal
from django.utils.dateparse import parse_date
from rest_framework import permissions
from rest_framework.response import Response
from rest_framework.views import APIView
from leasing.utils import calculate_increase_with_360_day_calendar
class CalculateIncreaseWith360DayCalendar(APIView):
permission_classes = (permissions.IsAuthenticated,)
def post(self, request, *args, **kwargs):
return self.process_request(request, request.data)
def get(self, request, format=None):
return self.process_request(request, request.query_params)
def process_request(self, request, data):
start_date = parse_date(data.get("start_date"))
end_date = parse_date(data.get("end_date"))
percentage = Decimal(data.get("percentage"))
amount = Decimal(data.get("amount"))
result_dict = {}
result_dict["result"] = calculate_increase_with_360_day_calendar(
start_date, end_date, percentage, amount
)
return Response(result_dict)
```
#### File: leasing/migrations/0027_change_rent_base_fields_required.py
```python
import enumfields.fields
from django.db import migrations, models
import leasing.enums
def forwards_func(apps, schema_editor):
ContractRent = apps.get_model("leasing", "ContractRent") # noqa: N806
contract_rents_with_missing_data_qs = ContractRent.objects.exclude(
base_amount__isnull=False
) | ContractRent.objects.exclude(base_amount_period__isnull=False)
for contract_rent in contract_rents_with_missing_data_qs:
if not contract_rent.base_amount:
contract_rent.base_amount = contract_rent.amount
if not contract_rent.base_amount_period:
contract_rent.base_amount_period = contract_rent.period
contract_rent.save()
class Migration(migrations.Migration):
dependencies = [
("leasing", "0026_land_use_agreement_estate_remove_unique"),
]
operations = [
migrations.RunPython(forwards_func, migrations.RunPython.noop),
migrations.AlterField(
model_name="contractrent",
name="base_amount",
field=models.DecimalField(
decimal_places=2, max_digits=10, verbose_name="Base amount"
),
),
migrations.AlterField(
model_name="contractrent",
name="base_amount_period",
field=enumfields.fields.EnumField(
enum=leasing.enums.PeriodType,
max_length=30,
verbose_name="Base amount period",
),
),
]
```
#### File: leasing/models/land_use_agreement.py
```python
from decimal import ROUND_HALF_UP, Decimal
from fractions import Fraction
from django.contrib.gis.db import models
from django.db import connection, transaction
from django.db.models import Max, Sum
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from enumfields import EnumField
from sequences import get_next_value
from leasing.enums import (
InfillDevelopmentCompensationState,
InvoiceState,
InvoiceType,
LandUseAgreementAttachmentType,
LandUseAgreementLitigantContactType,
LandUseContractType,
)
from leasing.models.contact import Contact
from leasing.models.decision import DecisionMaker
from leasing.models.land_area import AbstractAddress, Plot
from leasing.models.lease import District, Municipality
from leasing.utils import calculate_increase_with_360_day_calendar
from users.models import User
from .mixins import NameModel, TimeStampedSafeDeleteModel
class LandUseAgreementType(NameModel):
"""
In Finnish: Tyyppi
"""
identifier = models.CharField(verbose_name=_("Identifier"), max_length=255)
class LandUseAgreementStatus(NameModel):
"""
In Finnish: Olotila
"""
class LandUseAgreementDefinition(NameModel):
"""
In Finnish: Määritelmä
"""
class LandUseAgreementIdentifier(TimeStampedSafeDeleteModel):
"""
In Finnish: Maankäyttösopimustunnus
"""
# In Finnish: Tunnus
identifier = models.CharField(
verbose_name=_("Identifier"), max_length=255, blank=True, null=True
)
# In Finnish: Tyyppi
type = models.ForeignKey(
LandUseAgreementType,
verbose_name=_("Land use agreement type"),
related_name="+",
on_delete=models.PROTECT,
)
# In Finnish: Kaupunki
municipality = models.ForeignKey(
Municipality,
verbose_name=_("Municipality"),
related_name="+",
on_delete=models.PROTECT,
)
# In Finnish: Kaupunginosa
district = models.ForeignKey(
District, verbose_name=_("District"), related_name="+", on_delete=models.PROTECT
)
# In Finnish: Juokseva numero
sequence = models.PositiveIntegerField(verbose_name=_("Sequence number"))
class Meta:
verbose_name = pgettext_lazy("Model name", "Land use agreement identifier")
verbose_name_plural = pgettext_lazy(
"Model name", "Land use agreement identifiers"
)
unique_together = ("type", "municipality", "district", "sequence")
def save(self, *args, **kwargs):
self.identifier = str(self)
super(LandUseAgreementIdentifier, self).save(*args, **kwargs)
def __str__(self):
"""Returns the land use agreement identifier as a string
The Land use agreement identifier is constructed out of the type identifier, municipality,
district, and sequence, in that order. For example, the identifier
for a land use agreement (MA) in Helsinki (1), Vallila (22), and sequence
number 1 would be MA122-1.
"""
return "{}{}{:02}-{}".format(
self.type.identifier,
self.municipality.identifier,
int(self.district.identifier),
self.sequence,
)
class LandUseAgreement(TimeStampedSafeDeleteModel):
"""
In Finnish: Maankäyttösopimus
"""
# In Finnish: Tunnus
identifier = models.OneToOneField(
LandUseAgreementIdentifier,
verbose_name=_("Land use agreement identifier"),
null=True,
blank=True,
on_delete=models.PROTECT,
)
# In Finnish: Kaupunki
municipality = models.ForeignKey(
Municipality,
verbose_name=_("Municipality"),
related_name="+",
on_delete=models.PROTECT,
)
# In Finnish: Kaupunginosa
district = models.ForeignKey(
District, verbose_name=_("District"), related_name="+", on_delete=models.PROTECT
)
# In Finnish: Määritelmä
definition = models.ForeignKey(
LandUseAgreementDefinition,
verbose_name=_("Land use agreement definition"),
related_name="+",
on_delete=models.PROTECT,
blank=True,
null=True,
)
# In Finnish: Olotila
status = models.ForeignKey(
LandUseAgreementStatus,
verbose_name=_("Land use agreement status"),
related_name="+",
on_delete=models.PROTECT,
)
# In Finnish: Tyyppi
type = models.ForeignKey(
LandUseAgreementType,
verbose_name=_("Land use agreement type"),
related_name="+",
on_delete=models.PROTECT,
)
# In Finnish: Valmistelija
preparer = models.ForeignKey(
User,
verbose_name=_("Preparer"),
related_name="+",
null=True,
blank=True,
on_delete=models.PROTECT,
)
# In Finnish: Sopimuksen tyyppi
land_use_contract_type = EnumField(
LandUseContractType,
verbose_name=_("Contract type"),
null=True,
blank=True,
max_length=30,
)
# In Finnish: Arvioitu toteutumisvuosi
estimated_completion_year = models.PositiveSmallIntegerField(
verbose_name=_("Estimated completion year"), null=True, blank=True
)
# In Finnish: Arvioitu esittelyvuosi
estimated_introduction_year = models.PositiveSmallIntegerField(
verbose_name=_("Estimated introduction year"), null=True, blank=True
)
# In Finnish: Hankealue
project_area = models.CharField(
verbose_name=_("Project area"), null=True, blank=True, max_length=255
)
# In Finnish: Asemakaavan diaarinumero
plan_reference_number = models.CharField(
verbose_name=_("Plan reference number"), null=True, blank=True, max_length=255
)
# In Finnish: Asemakaavan nro.
plan_number = models.CharField(
verbose_name=_("Plan number"), max_length=255, null=True, blank=True
)
# In Finnish: Päättäjä
plan_acceptor = models.ForeignKey(
DecisionMaker,
verbose_name=_("Plan acceptor"),
related_name="land_use_agreements",
null=True,
blank=True,
on_delete=models.PROTECT,
)
# In Finnish: Asemakaavan lainvoimaisuuspvm
plan_lawfulness_date = models.DateField(
verbose_name=_("Plan lawfulness date"), null=True, blank=True
)
# In Finnish: Kiinteistöt
plots = models.ManyToManyField(Plot)
# In Finnish: Asemakaavan käsittelyvaihe
state = EnumField(
InfillDevelopmentCompensationState,
verbose_name=_("State"),
null=True,
blank=True,
max_length=30,
)
class Meta:
verbose_name = pgettext_lazy("Model name", "Land use agreement")
verbose_name_plural = pgettext_lazy("Model name", "Land use agreements")
def __str__(self):
return "Land use agreement #{}".format(self.id)
@transaction.atomic
def create_identifier(self):
if self.identifier_id:
return
if not self.type or not self.municipality or not self.district:
return
# lock LandUseAgreementIdentifier table to prevent a (theoretically) possible
# race condition when increasing the sequence
with connection.cursor() as cursor:
cursor.execute("LOCK TABLE %s" % self._meta.db_table)
max_sequence = LandUseAgreementIdentifier.objects.filter(
type=self.type, municipality=self.municipality, district=self.district
).aggregate(Max("sequence"))["sequence__max"]
if not max_sequence:
max_sequence = 0
identifier = LandUseAgreementIdentifier.objects.create(
type=self.type,
municipality=self.municipality,
district=self.district,
sequence=max_sequence + 1,
)
self.identifier = identifier
def update_compensations(self, compensations_data):
unit_price_data = (
compensations_data.pop("unit_prices_used_in_calculation")
if "unit_prices_used_in_calculation" in compensations_data
else None
)
if hasattr(self, "compensations"):
for attr_name, value in compensations_data.items():
setattr(self.compensations, attr_name, value)
self.compensations.save()
else:
self.compensations = LandUseAgreementCompensations.objects.create(
land_use_agreement=self, **compensations_data
)
if unit_price_data:
self.update_compensations_unit_prices(unit_price_data)
def update_compensations_unit_prices(self, unit_price_data):
unit_price_ids = []
for item in unit_price_data:
match_data = {
"id": item.pop("id") if "id" in item else None,
"compensations": self.compensations,
}
obj, _ = LandUseAgreementCompensationsUnitPrice.objects.update_or_create(
defaults=item, **match_data
)
unit_price_ids.append(obj.id)
# remove unit prices that are not in the data
self.compensations.unit_prices_used_in_calculation.exclude(
id__in=unit_price_ids
).delete()
def save(self, *args, **kwargs):
self.create_identifier()
super().save(*args, **kwargs)
def get_attachment_file_upload_to(instance, filename):
return "/".join(
[
"land_use_agreement_attachments",
str(instance.land_use_agreement.id),
filename,
]
)
class LandUseAgreementAttachment(TimeStampedSafeDeleteModel):
"""
In Finnish: Liitetiedosto
"""
land_use_agreement = models.ForeignKey(
LandUseAgreement, related_name="attachments", on_delete=models.PROTECT
)
# In Finnish: Tyyppi
type = EnumField(
LandUseAgreementAttachmentType, verbose_name=_("Type"), max_length=30
)
# In Finnish: Tiedosto
file = models.FileField(
upload_to=get_attachment_file_upload_to, blank=False, null=False
)
# In Finnish: Lataaja
uploader = models.ForeignKey(
User, verbose_name=_("Uploader"), related_name="+", on_delete=models.PROTECT
)
# In Finnish: Latausaika
uploaded_at = models.DateTimeField(
auto_now_add=True, verbose_name=_("Time uploaded")
)
class Meta:
verbose_name = pgettext_lazy("Model name", "Land use agreement attachment")
verbose_name_plural = pgettext_lazy(
"Model name", "Land use agreement attachments"
)
class LandUseAgreementCompensations(NameModel):
"""
In Finnish: Maankäyttökorvaus
"""
# In Finnish: Maankäyttösopimus
land_use_agreement = models.OneToOneField(
LandUseAgreement,
related_name="compensations",
verbose_name=_("Land use agreement"),
null=True,
on_delete=models.CASCADE,
)
# In Finnish: Rahakorvaus
cash_compensation = models.DecimalField(
verbose_name=_("Cash compensation"),
decimal_places=2,
max_digits=12,
blank=True,
null=True,
)
# In Finnish: Maakorvaus
land_compensation = models.DecimalField(
verbose_name=_("Land compensation"),
decimal_places=2,
max_digits=12,
blank=True,
null=True,
)
# In Finnish: Muu korvaus
other_compensation = models.DecimalField(
verbose_name=_("Other compensation"),
decimal_places=2,
max_digits=12,
blank=True,
null=True,
)
# In Finnish: Ensimmäisen maksuerän korotus
first_installment_increase = models.DecimalField(
verbose_name=_("First installment increase"),
decimal_places=2,
max_digits=12,
blank=True,
null=True,
)
# In Finnish: Katualueen hankinta-arvo
street_acquisition_value = models.DecimalField(
verbose_name=_("Street acquisition value"),
decimal_places=2,
max_digits=12,
blank=True,
null=True,
)
# In Finnish: Katualueen pinta-ala
street_area = models.DecimalField(
decimal_places=2, max_digits=12, blank=True, null=True
)
# In Finnish: Puistoalueen hankinta-arvo
park_acquisition_value = models.DecimalField(
verbose_name=_("Park acquisition value"),
decimal_places=2,
max_digits=12,
blank=True,
null=True,
)
# In Finnish: Puistoalueen pinta-ala
park_area = models.DecimalField(
decimal_places=2, max_digits=12, blank=True, null=True
)
# In Finnish: Muun alueen hankinta-arvo
other_acquisition_value = models.DecimalField(
verbose_name=_("Other acquisition value"),
decimal_places=2,
max_digits=12,
blank=True,
null=True,
)
# In Finnish: Muun alueen pinta-ala
other_area = models.DecimalField(
decimal_places=2, max_digits=12, blank=True, null=True
)
class LandUseAgreementConditionFormOfManagement(NameModel):
"""
In Finnish: Maankäyttösopimuksen ehdon hallintamuoto
"""
class LandUseAgreementCompensationsUnitPrice(NameModel):
"""
In Finnish: Maankäyttökorvauslaskelmassa käytetty yksikköhinta
"""
# In Finnish: Maankäyttökorvaukset
compensations = models.ForeignKey(
LandUseAgreementCompensations,
related_name="unit_prices_used_in_calculation",
on_delete=models.CASCADE,
)
# In Finnish: Kaavayksikön käyttötarkoitus
usage = models.CharField(verbose_name=_("Usage"), blank=True, max_length=255)
# In Finnish: Hallintamuoto
management = models.ForeignKey(
LandUseAgreementConditionFormOfManagement,
verbose_name=_("Management"),
related_name="+",
on_delete=models.PROTECT,
)
# In Finnish: Suojeltu
protected = models.CharField(
verbose_name=_("Protected"), blank=True, max_length=255
)
# In Finnish: Pinta-ala
area = models.DecimalField(
verbose_name=_("Area"), decimal_places=2, max_digits=12, blank=True, null=True,
)
# In Finnish: Yksikköhinta €
unit_value = models.DecimalField(
verbose_name=_("Unit value"),
decimal_places=2,
max_digits=12,
blank=True,
null=True,
)
# In Finnish: Alennus %
discount = models.DecimalField(
verbose_name=_("Discount"),
decimal_places=2,
max_digits=12,
blank=True,
null=True,
)
# In Finnish: Käytetty hinta
used_price = models.DecimalField(
verbose_name=_("Used price"),
decimal_places=2,
max_digits=12,
blank=True,
null=True,
)
class LandUseAgreementEstate(NameModel):
"""
In Finnish: Kohde
"""
land_use_agreement = models.ForeignKey(
LandUseAgreement,
verbose_name=_("Land use agreement"),
related_name="estate_ids",
null=True,
blank=True,
on_delete=models.CASCADE,
)
estate_id = models.CharField(verbose_name=_("Estate id"), max_length=50)
class LandUseAgreementDecisionType(NameModel):
"""
In Finnish: Maankäyttösopimuspäätöksen tyyppi
"""
class Meta(NameModel.Meta):
verbose_name = pgettext_lazy("Model name", "Land use agreement decision type")
verbose_name_plural = pgettext_lazy(
"Model name", "Land use agreement decision types"
)
class LandUseAgreementDecision(TimeStampedSafeDeleteModel):
"""
In Finnish: Maankäyttösopimuspäätös
"""
land_use_agreement = models.ForeignKey(
LandUseAgreement,
verbose_name=_("Land use agreement"),
related_name="decisions",
on_delete=models.PROTECT,
)
# In Finnish: Diaarinumero
reference_number = models.CharField(
verbose_name=_("Reference number"), null=True, blank=True, max_length=255
)
# In Finnish: Päättäjä
decision_maker = models.ForeignKey(
DecisionMaker,
verbose_name=_("Decision maker"),
related_name="+",
null=True,
blank=True,
on_delete=models.PROTECT,
)
# In Finnish: Päätöspäivämäärä
decision_date = models.DateField(
verbose_name=_("Decision date"), null=True, blank=True
)
# In Finnish: Pykälä
section = models.CharField(
verbose_name=_("Section"), null=True, blank=True, max_length=255
)
# In Finnish: Maankäyttösopimuspäätöksen tyyppi
type = models.ForeignKey(
LandUseAgreementDecisionType,
verbose_name=_("Type"),
related_name="+",
null=True,
blank=True,
on_delete=models.PROTECT,
)
# In Finnish: Selite
description = models.TextField(verbose_name=_("Description"), null=True, blank=True)
class Meta:
verbose_name = pgettext_lazy("Model name", "Land use agreement decision")
verbose_name_plural = pgettext_lazy(
"Model name", "Land use agreement decisions"
)
class LandUseAgreementDecisionConditionType(NameModel):
"""
In Finnish: Maankäyttösopimuspäätöksen ehtotyyppi
"""
class Meta(NameModel.Meta):
verbose_name = pgettext_lazy(
"Model name", "Land use agreement decision condition type"
)
verbose_name_plural = pgettext_lazy(
"Model name", "Land use agreement decision condition types"
)
class LandUseAgreementDecisionCondition(TimeStampedSafeDeleteModel):
"""
In Finnish: Maankäyttösopimuspäätöksen ehto
"""
# In Finnish: Päätös
decision = models.ForeignKey(
LandUseAgreementDecision,
verbose_name=_("Decision"),
related_name="conditions",
on_delete=models.PROTECT,
)
# In Finnish: Ehtotyyppi
type = models.ForeignKey(
LandUseAgreementDecisionConditionType,
verbose_name=_("Type"),
related_name="+",
null=True,
blank=True,
on_delete=models.PROTECT,
)
# In Finnish: Valvontapäivämäärä
supervision_date = models.DateField(
verbose_name=_("Supervision date"), null=True, blank=True
)
# In Finnish: Valvottu päivämäärä
supervised_date = models.DateField(
verbose_name=_("Supervised date"), null=True, blank=True
)
# In Finnish: Selite
description = models.TextField(verbose_name=_("Description"), null=True, blank=True)
recursive_get_related_skip_relations = ["decision"]
class Meta:
verbose_name = pgettext_lazy(
"Model name", "Land use agreement decision condition"
)
verbose_name_plural = pgettext_lazy(
"Model name", "Land use agreement decision conditions"
)
class LandUseAgreementAddress(AbstractAddress):
land_use_agreement = models.ForeignKey(
LandUseAgreement,
verbose_name=_("Land use agreement"),
related_name="addresses",
on_delete=models.CASCADE,
)
# In Finnish: Ensisijainen osoite
is_primary = models.BooleanField(verbose_name=_("Is primary?"), default=False)
class Meta:
verbose_name = pgettext_lazy("Model name", "Land use agreement address")
verbose_name_plural = pgettext_lazy(
"Model name", "Land use agreement addresses"
)
class LandUseAgreementCondition(TimeStampedSafeDeleteModel):
"""
In Finnish: Maankäyttösopimuksen ehto
"""
land_use_agreement = models.ForeignKey(
LandUseAgreement,
verbose_name=_("Land use agreement"),
related_name="conditions",
on_delete=models.PROTECT,
)
# In Finnish: Maankäyttösopimuksen ehdon tyyppi
form_of_management = models.ForeignKey(
LandUseAgreementConditionFormOfManagement,
verbose_name=_("Form of management"),
related_name="+",
on_delete=models.PROTECT,
)
# In Finnish: Velvoite k-m2
obligated_area = models.PositiveIntegerField(
verbose_name=_("Obligated area (f-m2)")
)
# In Finnish: Toteutunut k-m2
actualized_area = models.PositiveIntegerField(
verbose_name=_("Actualized area (f-m2)")
)
# In Finnish: Subventio
subvention_amount = models.PositiveIntegerField(verbose_name=_("Subvention amount"))
# In Finnish: Korvaus %
compensation_pc = models.PositiveSmallIntegerField(
verbose_name=_("Compensation percent")
)
# In Finnish: Valvottava pvm
supervision_date = models.DateField(verbose_name=_("Supervision date"))
# In Finnish: Valvottu pvm
supervised_date = models.DateField(verbose_name=_("Supervised date"))
class LandUseAgreementLitigant(TimeStampedSafeDeleteModel):
"""
In Finnish: Osapuoli
"""
land_use_agreement = models.ForeignKey(
LandUseAgreement,
verbose_name=_("Land use agreement"),
related_name="litigants",
on_delete=models.CASCADE,
)
# In Finnish: Viite
reference = models.CharField(
verbose_name=_("Section"), null=True, blank=True, max_length=255
)
contacts = models.ManyToManyField(
Contact,
through="leasing.LandUseAgreementLitigantContact",
related_name="litigants",
)
recursive_get_related_skip_relations = ["land_use_agreement", "contacts"]
class Meta:
verbose_name = pgettext_lazy("Model name", "Land use agreement litigant")
verbose_name_plural = pgettext_lazy(
"Model name", "Land use agreement litigants"
)
class LandUseAgreementLitigantContact(TimeStampedSafeDeleteModel):
land_use_agreement_litigant = models.ForeignKey(
LandUseAgreementLitigant,
verbose_name=_("Land use agreement litigant"),
on_delete=models.PROTECT,
)
# In Finnish: Asiakas
contact = models.ForeignKey(
Contact, verbose_name=_("Contact"), on_delete=models.PROTECT,
)
# In Finnish: Kontaktin tyyppi
type = EnumField(
LandUseAgreementLitigantContactType,
verbose_name=_("Contact type"),
max_length=30,
)
# In Finnish: Alkupäivämäärä
start_date = models.DateField(verbose_name=_("Start date"), null=True, blank=True)
# In Finnish: Loppupäivämäärä
end_date = models.DateField(verbose_name=_("End date"), null=True, blank=True)
recursive_get_related_skip_relations = ["land_use_agreement_litigant"]
class Meta:
verbose_name = pgettext_lazy("Model name", "Land use agreement litigant")
verbose_name_plural = pgettext_lazy(
"Model name", "Land use agreement litigants"
)
def __str__(self):
return "LandUseAgreementLitigantContact id: {} contact: {} period: {} - {}".format(
self.id, self.contact, self.start_date, self.end_date
)
class LandUseAgreementReceivableType(models.Model):
"""
In Finnish: Saamislaji
"""
name = models.CharField(verbose_name=_("Name"), max_length=255)
sap_material_code = models.CharField(
verbose_name=_("SAP material code"), blank=True, max_length=255
)
sap_order_item_number = models.CharField(
verbose_name=_("SAP order item number"), blank=True, max_length=255
)
is_active = models.BooleanField(verbose_name=_("Is active?"), default=True)
class Meta:
verbose_name = pgettext_lazy("Model name", "Receivable type")
verbose_name_plural = pgettext_lazy("Model name", "Receivable types")
def __str__(self):
return self.name
class LandUseAgreementInvoiceSet(models.Model):
land_use_agreement = models.ForeignKey(
"leasing.LandUseAgreement",
verbose_name=_("Land use agreement"),
related_name="invoicesets",
on_delete=models.PROTECT,
)
recursive_get_related_skip_relations = ["land_use_agreement"]
class Meta:
verbose_name = pgettext_lazy("Model name", "Invoice set")
verbose_name_plural = pgettext_lazy("Model name", "Invoice set")
def create_credit_invoiceset(self, receivable_type=None, notes=""):
all_invoices = self.invoices.filter(type=InvoiceType.CHARGE)
if not all_invoices:
raise RuntimeError(
'No refundable invoices found (no invoices with the type "{}" found)'.format(
InvoiceType.CHARGE.value
)
)
credit_invoiceset = LandUseAgreementInvoiceSet.objects.create(
land_use_agreement=self.land_use_agreement
)
for invoice in all_invoices:
credit_invoice = invoice.create_credit_invoice(
receivable_type=receivable_type, notes=notes
)
if credit_invoice:
credit_invoiceset.invoices.add(credit_invoice)
return credit_invoiceset
def create_credit_invoiceset_for_amount(
self, amount=None, receivable_type=None, notes=""
):
if amount and not receivable_type:
raise RuntimeError("receivable_type is required if amount is provided.")
all_invoices = self.invoices.filter(type=InvoiceType.CHARGE)
if not all_invoices:
raise RuntimeError(
'No refundable invoices found (no invoices with the type "{}" found)'.format(
InvoiceType.CHARGE.value
)
)
shares = {}
all_shares = Fraction()
total_row_count = LandUseAgreementInvoiceRow.objects.filter(
invoice__in=all_invoices, receivable_type=receivable_type
).count()
has_tenants = (
LandUseAgreementInvoiceRow.objects.filter(
invoice__in=all_invoices,
receivable_type=receivable_type,
tenant__isnull=False,
).count()
== total_row_count
)
total_row_amount = LandUseAgreementInvoiceRow.objects.filter(
invoice__in=all_invoices, receivable_type=receivable_type
).aggregate(total_row_amount=Sum("amount"))["total_row_amount"]
if amount > total_row_amount:
raise RuntimeError(
'Credit amount "{}" is more that total row amount "{}"!'.format(
amount, total_row_amount
)
)
for invoice in all_invoices:
if has_tenants:
shares[invoice] = invoice.get_fraction_for_receivable_type(
receivable_type
)
else:
shares[invoice] = Fraction(
invoice.rows.filter(receivable_type=receivable_type).count(),
total_row_count,
)
all_shares += shares[invoice]
if all_shares != 1:
raise RuntimeError("Shares together do not equal 1/1")
credit_invoiceset = LandUseAgreementInvoiceSet.objects.create(
lease=self.lease,
billing_period_start_date=self.billing_period_start_date,
billing_period_end_date=self.billing_period_end_date,
)
total_credited_amount = Decimal(0)
for i, (invoice, fraction) in enumerate(shares.items()):
invoice_credit_amount = Decimal(
amount * Decimal(fraction.numerator / fraction.denominator)
).quantize(Decimal(".01"), rounding=ROUND_HALF_UP)
total_credited_amount += invoice_credit_amount
# If this is the last share, check if we need to round
if i == len(shares) - 1 and total_credited_amount.compare(
amount
) != Decimal("0"):
invoice_credit_amount += amount - total_credited_amount
credit_invoice = invoice.create_credit_invoice(
amount=invoice_credit_amount,
receivable_type=receivable_type,
notes=notes,
)
credit_invoiceset.invoices.add(credit_invoice)
return credit_invoiceset
class LandUseAgreementInvoice(TimeStampedSafeDeleteModel):
"""
In Finnish: Lasku
"""
invoiceset = models.ForeignKey(
LandUseAgreementInvoiceSet,
verbose_name=_("Invoice set"),
related_name="invoices",
null=True,
blank=True,
on_delete=models.PROTECT,
)
land_use_agreement = models.ForeignKey(
LandUseAgreement,
verbose_name=_("Land use agreement"),
related_name="invoices",
on_delete=models.PROTECT,
)
# In Finnish: Laskutettu määrä
billed_amount = models.DecimalField(
verbose_name=_("Billed amount"), max_digits=10, decimal_places=2, default=0
)
# In Finnish: Hyvitetty lasku
credited_invoice = models.ForeignKey(
"self",
verbose_name=_("Credited invoice"),
related_name="credit_invoices",
null=True,
blank=True,
on_delete=models.PROTECT,
)
# In Finnish: Eräpäivä
due_date = models.DateField(verbose_name=_("Due date"), null=True, blank=True)
# In Finnish: Laskutuspvm
invoicing_date = models.DateField(
verbose_name=_("Invoicing date"), null=True, blank=True
)
# In Finnish: Tiedote
notes = models.TextField(verbose_name=_("Notes"), blank=True)
# In Finnish: Laskun numero
number = models.PositiveIntegerField(
verbose_name=_("Number"), unique=True, null=True, blank=True
)
# In Finnish: Maksamaton määrä
outstanding_amount = models.DecimalField(
verbose_name=_("Outstanding amount"),
max_digits=10,
decimal_places=2,
default=0,
)
# In Finnish: Lykkäyspvm
postpone_date = models.DateField(
verbose_name=_("Postpone date"), null=True, blank=True
)
# In Finnish: Laskunsaaja
recipient = models.ForeignKey(
Contact, verbose_name=_("Recipient"), related_name="+", on_delete=models.PROTECT
)
# In Finnish: Laskun tila
state = EnumField(
InvoiceState, verbose_name=_("State"), max_length=30, default=InvoiceState.OPEN
)
# In Finnish: Maksupäivä
paid_date = models.DateField(verbose_name=_("Paid date"), null=True, blank=True)
# In Finnish: Lähetyspäivä
sent_date = models.DateField(verbose_name=_("Sent date"), null=True, blank=True)
# In Finnish: Lähetetty SAP:iin
sent_to_sap_at = models.DateTimeField(
verbose_name=_("Sent to SAP at"), null=True, blank=True
)
# In Finnish: Laskun pääoma
# TODO: Remove column and calculate total on-the-fly
total_amount = models.DecimalField(
verbose_name=_("Total amount"), max_digits=10, decimal_places=2, default=0
)
# In Finnish: Laskun tyyppi
type = EnumField(
InvoiceType, verbose_name=_("Type"), max_length=30, default=InvoiceType.CHARGE
)
# In Finnish: Korko laskulle
interest_invoice_for = models.ForeignKey(
"self",
verbose_name=_("Interest invoice for"),
related_name="interest_invoices",
null=True,
blank=True,
on_delete=models.PROTECT,
)
def generate_number(self):
if self.number:
return self.number
with transaction.atomic():
self.number = get_next_value("invoice_numbers", initial_value=1000000)
self.save()
return self.number
def update_amounts(self):
for row in self.rows.all():
row.update_amount()
rows_sum = self.rows.aggregate(sum=Sum("amount"))["sum"]
if not rows_sum:
rows_sum = Decimal(0)
self.billed_amount = rows_sum
self.total_amount = rows_sum
payments_total = self.payments.aggregate(sum=Sum("paid_amount"))["sum"]
if not payments_total:
payments_total = Decimal(0)
# Aggregating like this ignores the manager (i.e. includes deleted rows which we don't want):
# total_credited_amount = self.credit_invoices.aggregate(sum=Sum("rows__amount"))["sum"]
# ... so we have to iterate the rows and tally the sum by hand
total_credited_amount = Decimal(0)
for credit_inv in self.credit_invoices.all():
for row in credit_inv.rows.all():
total_credited_amount += row.amount
self.outstanding_amount = max(
Decimal(0), self.billed_amount - payments_total - total_credited_amount,
)
# Don't mark as refunded unless credited amount is nonzero
if total_credited_amount != Decimal(0) and total_credited_amount.compare(
self.billed_amount
) != Decimal(-1):
self.state = InvoiceState.REFUNDED
elif self.type == InvoiceType.CHARGE and self.outstanding_amount == Decimal(0):
self.state = InvoiceState.PAID
self.save()
class LandUseAgreementInvoiceRow(TimeStampedSafeDeleteModel):
"""
In Finnish: Rivi laskulla
"""
# In Finnish: Lasku
invoice = models.ForeignKey(
LandUseAgreementInvoice,
verbose_name=_("Invoice"),
related_name="rows",
on_delete=models.CASCADE,
)
# In Finnish: Osapuoli
litigant = models.ForeignKey(
LandUseAgreementLitigant,
verbose_name=_("Litigant"),
related_name="+",
null=True,
blank=True,
on_delete=models.PROTECT,
)
# In Finnish: Saamislaji
receivable_type = models.ForeignKey(
LandUseAgreementReceivableType,
verbose_name=_("Receivable type"),
related_name="+",
on_delete=models.PROTECT,
)
# In Finnish: Laskutettava määrä
amount = models.DecimalField(
verbose_name=_("Amount"), max_digits=10, decimal_places=2, default=0
)
# In Finnish: Korvauksen määrä €
compensation_amount = models.DecimalField(
verbose_name=_("Compensation amount"),
decimal_places=2,
max_digits=12,
default=0,
)
# In Finnish: Selite
description = models.TextField(verbose_name=_("Description"), null=True, blank=True)
# In Finnish: Korotuksen määrä %
increase_percentage = models.DecimalField(
verbose_name=_("Increase percentage"),
decimal_places=2,
max_digits=12,
default=0,
)
# In Finnish: Asemakaavan lainvoimaisuuspvm
plan_lawfulness_date = models.DateField(
verbose_name=_("Plan lawfulness date"), null=True, blank=True
)
# In Finnish: Allekirjoituspvm
sign_date = models.DateField(verbose_name=_("Sign date"), null=True, blank=True)
recursive_get_related_skip_relations = ["invoice"]
class Meta:
verbose_name = pgettext_lazy("Model name", "Invoice row")
verbose_name_plural = pgettext_lazy("Model name", "Invoice rows")
def update_amount(self):
self.amount = calculate_increase_with_360_day_calendar(
self.sign_date,
self.plan_lawfulness_date,
self.increase_percentage,
self.compensation_amount,
)
self.save()
class LandUseAgreementInvoicePayment(TimeStampedSafeDeleteModel):
"""
In Finnish: Maksusuoritus
"""
# In Finnish: Lasku
invoice = models.ForeignKey(
LandUseAgreementInvoice,
verbose_name=_("Invoice"),
related_name="payments",
on_delete=models.CASCADE,
)
# In Finnish: Maksettu määrä
paid_amount = models.DecimalField(
verbose_name=_("Paid amount"), max_digits=10, decimal_places=2
)
# In Finnish: Maksettu pvm
paid_date = models.DateField(verbose_name=_("Paid date"))
# In Finnish: Arkistointitunnus
filing_code = models.CharField(verbose_name=_("Name"), blank=True, max_length=35)
recursive_get_related_skip_relations = ["invoice"]
class Meta:
verbose_name = pgettext_lazy("Model name", "Invoice payment")
verbose_name_plural = pgettext_lazy("Model name", "Invoice payments")
```
#### File: tests/report/test_reports.py
```python
from multiprocessing import Event, Value
import pytest
from django.core import mail
from django_q.brokers import get_broker
from django_q.cluster import monitor, pusher, worker
from django_q.queues import Queue
from django_q.tasks import queue_size
from leasing.report.lease.lease_statistic_report import LeaseStatisticReport
@pytest.fixture(autouse=True)
def use_q_cluster_testing(settings):
settings.Q_CLUSTER = {
"name": "DjangORM",
"cpu_affinity": 1,
"testing": True,
"log_level": "DEBUG",
"orm": "default",
}
@pytest.mark.django_db(transaction=True)
def test_simple_async_report_send(rf, admin_user):
broker = get_broker()
assert broker.queue_size() == 0
request = rf.get("/")
request.query_params = {}
request.user = admin_user
report = LeaseStatisticReport()
response = report.get_response(request)
assert response.data
assert broker.queue_size() == 1
# Run async task
task_queue = Queue()
result_queue = Queue()
event = Event()
event.set()
pusher(task_queue, event, broker=broker)
assert task_queue.qsize() == 1
assert queue_size(broker=broker) == 0
task_queue.put("STOP")
worker(task_queue, result_queue, Value("f", -1))
assert task_queue.qsize() == 0
assert result_queue.qsize() == 1
result_queue.put("STOP")
monitor(result_queue)
assert result_queue.qsize() == 0
broker.delete_queue()
# Test report file have been sent via email
assert len(mail.outbox) == 1
assert len(mail.outbox[0].attachments) == 1
```
#### File: leasing/tests/test_utils.py
```python
from datetime import date
from leasing.utils import calculate_increase_with_360_day_calendar, days360
def test_days360_year():
date1 = date(year=2020, month=1, day=1)
date2 = date(year=2021, month=1, day=1)
days = days360(date1, date2, True)
assert days == 360
def test_days360_leap_year():
date1 = date(year=2020, month=1, day=15)
date2 = date(year=2020, month=3, day=15)
days = days360(date1, date2, True)
assert days == 60
def test_calculate_increase_with_360_day_calendar():
date1 = date(year=2020, month=8, day=3)
date2 = date(year=2020, month=10, day=15)
increase_percentage = 3
current_amount = 150000.0
expected_amount = 151000.0
calculated_amount = calculate_increase_with_360_day_calendar(
date1, date2, increase_percentage, current_amount
)
assert expected_amount == calculated_amount
```
#### File: mvj/leasing/utils.py
```python
import calendar
from decimal import Decimal
def days360(start_date, end_date, method_eu=False):
"""
Calculates the number of days for a period by using a 360-day calendar.
"""
start_day = start_date.day
start_month = start_date.month
start_year = start_date.year
end_day = end_date.day
end_month = end_date.month
end_year = end_date.year
if start_day == 31 or (
method_eu is False
and start_month == 2
and (
start_day == 29
or (start_day == 28 and calendar.isleap(start_year) is False)
)
):
start_day = 30
if end_day == 31:
if method_eu is False and start_day != 30:
end_day = 1
if end_month == 12:
end_year += 1
end_month = 1
else:
end_month += 1
else:
end_day = 30
return (
end_day
+ end_month * 30
+ end_year * 360
- start_day
- start_month * 30
- start_year * 360
)
def calculate_increase_with_360_day_calendar(
date1, date2, increase_percentage, current_amount
):
day_count = days360(date1, date2, True)
increase_multiplier = Decimal(day_count) / 360 * Decimal(increase_percentage) / 100
amount = Decimal(current_amount) + (
Decimal(current_amount) * Decimal(increase_multiplier)
)
rounded_amount = round(amount, -3)
return rounded_amount
``` |
{
"source": "joonvena/tunnistamo",
"score": 2
} |
#### File: tunnistamo/auth_backends/views.py
```python
from django.conf import settings
from django.http import HttpResponse
from django.urls import reverse
from social_django.utils import load_backend, load_strategy
def suomifi_metadata_view(request):
complete_url = reverse('auth_backends:suomifi_metadata')
saml_backend = load_backend(
load_strategy(request),
'suomifi',
redirect_uri=complete_url,
)
metadata, errors = saml_backend.generate_metadata_xml()
if not errors:
return HttpResponse(content=metadata, content_type='text/xml')
def suomifi_logout_view(request, uuid=None):
saml_backend = load_backend(
load_strategy(request),
'suomifi',
redirect_uri=getattr(settings, 'LOGIN_URL'),
)
return saml_backend.process_logout_message()
``` |
{
"source": "joonwoo8395/joonwoo",
"score": 2
} |
#### File: bin/py3/AsyncServer.py
```python
import asyncore
import socket
import signal
import os
import sys
import Mobigen.Common.Log_PY3 as Log; Log.Init()
def handler(sigNum, frame):
sys.stderr.write('Catch Signal Number : %s \n' % sigNum)
sys.stderr.flush()
os.kill(os.getpid(), signal.SIGKILL)
# sigNum 15 : Terminate
signal.signal(signal.SIGTERM, handler)
# sigNum 2 : Keyboard Interrupt
signal.signal(signal.SIGINT, handler)
# sigNum 1 : Hangup detected
try:
signal.signal(signal.SIGHUP, signal.SIG_IGN)
except: pass
# sigNum 13 : Broken Pipe
try:
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
except: pass
def call_shutdown():
__LOG__.Trace('!!! SHUTDOWN !!!')
os._exit(1)
# header_size | file_name, file_size | content_data
# length : 4 | length : 0~9999 | length : 1~4 GB
class FileHandler(asyncore.dispatcher_with_send):
def __init__(self, sock):
asyncore.dispatcher_with_send.__init__(self, sock=sock)
self.recv_size = 8192
self.file_name = ''
self.file_size = 0
self.write_size = 0
self.chk_size = 0
self.new_file_flag = True
self.file_obj = None
self.save_path = ''
self.save_file = ''
self.temp_file = ''
def handle_read(self):
try:
data = self.recv(self.recv_size)
if data:
if self.new_file_flag:
length = int(data[:4])
header = str(data[4:length+4],'utf-8')
self.section = header.split('|')[0].strip()
self.file_name = header.split('|')[1].strip()
self.file_size = int(header.split('|')[2].strip())
self.save_path = os.path.dirname(self.file_name)
self.save_file = os.path.join(self.file_name)
self.temp_file = self.save_file + '.tmp'
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
__LOG__.Trace("makedirs: %s"%self.save_path)
self.file_obj = open(self.temp_file, 'wb')
__LOG__.Trace('file open: %s'%self.temp_file)
self.file_obj.write(data[length+4:])
self.write_size = len(data[length+4:])
self.new_file_flag = False
__LOG__.Trace('recv header: size[%s], msg[%04d%s]'%(len(data), length, header) )
else:
self.file_obj.write(data)
self.write_size = self.write_size + len(data)
if self.chk_size <= self.write_size:
__LOG__.Trace('recv file: size[%s], total[%s/%s]'%(len(data), self.write_size, self.file_size) )
self.chk_size = self.chk_size + (self.file_size/10)
except:
__LOG__.Exception()
self.handle_close()
def handle_close(self):
try:
__LOG__.Trace('connection close: %s'%self)
if self.file_obj:
self.file_obj.close()
__LOG__.Trace('file close: %s'%self.temp_file)
if os.path.exists(self.temp_file):
os.rename(self.temp_file, self.save_file)
__LOG__.Trace("rename : %s"%self.save_file)
__LOG__.Trace('STDOUT = %s://%s' % (self.section, self.save_file ))
self.close()
except:
__LOG__.Exception()
class FileServer(asyncore.dispatcher):
def __init__(self, host, port):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((host, port))
self.listen(5)
__LOG__.Trace('binding to {}'.format(self.socket.getsockname()) )
def handle_accepted(self, sock, addr):
__LOG__.Trace('Incoming connection from %s' % repr(addr))
handler = FileHandler(sock)
PROC_NAME = os.path.basename(sys.argv[0])
def main():
if len(sys.argv) < 3:
print ( "Usage : %s IP PORT" % PROC_NAME )
print ( "Example : %s 0.0.0.0 19288" % PROC_NAME )
sys.exit()
ip = sys.argv[1]
port = int(sys.argv[2])
#path = sys.argv[3]
try :
log_suffix = sys.argv[3]
except :
log_suffix = os.getpid()
if '-d' not in sys.argv :
log_path = os.path.expanduser('~/BPETL/log/Async')
try : os.makedirs(log_path)
except : pass
log_name = '%s_%s.log' % ( os.path.basename(sys.argv[0]), log_suffix )
log_file = os.path.join( log_path, log_name )
Log.Init(Log.CRotatingLog(log_file, 10240000, 9))
__LOG__.Trace('START')
try:
server = FileServer(ip, port)
asyncore.loop()
except:
__LOG__.Exception()
__LOG__.Trace('END')
if __name__ == '__main__':
main()
```
#### File: bin/py3/EventFilter.py
```python
import sys
import os
import re
import time
import getopt
import signal
import itertools
from collections import defaultdict
from collections import deque
SHUTDOWN = False
def shutdown(sigNum, frame):
global SHUTDOWN
SHUTDOWN = True
sys.stderr.write('Catch Signal : %s \n\n' % sigNum)
sys.stderr.flush()
signal.signal(signal.SIGTERM, shutdown) # sigNum 15 : Terminate
signal.signal(signal.SIGINT, shutdown) # sigNum 2 : Interrupt
def usage():
print ( sys.stderr, """Usage : python [file][option] ..
[-h | --help ] : help message
[-g (word) | --grep (word) ] : grep (word) about STDIN
[-n (number) | --number (number) ] : cashing n about STDIN
[-r (regex) | --regular (regex) ] : grep (regex) about STDIN
[-m (number) | --maxlensize (number) ] : set deque max size
""")
print ( sys.stderr, """Exam :
1. python test.py -g .txt -n 3
2. python test.py -g .txt -r '\d'
3. python test.py -t 20 -n 2 -r '\d' -g txt -g csv --grep jpg
""")
class EFEventModule(object) :
def __init__(self,options,args):
object.__init__(self)
self.options = options
self.args = args
self.stacksize = 1
self.deq = deque(maxlen=1000)
self.olist = defaultdict(lambda:[])
self.option_onlyn_flag = True
def processing(self, stdin):
#for option e
for eop in self.olist['-e']:
if eop in stdin :
return
#for option g
for patt in self.olist['-g'] :
if patt in stdin :
self.deq.append(stdin)
#print 'g flag' , self.deq
return
#for option r
for patt in self.olist['-r'] :
if re.search(patt, stdin) :
self.deq.append(stdin)
#print 'r flag',self.deq
return
#for option only n
if self.option_onlyn_flag:
self.deq.append(stdin)
def run(self):
self.preprocessing()
while not SHUTDOWN:
stdin = sys.stdin.readline()
stdin_strip = stdin.strip()
#for optionprocessing
self.processing(stdin_strip)
#for afterprocessing
if self.deq.count(stdin_strip) == int(self.stacksize):
sys.stdout.write("%s" % stdin)
sys.stdout.flush()
deqcopy = deque(itertools.islice(self.deq,0,len(self.deq)))
for d in deqcopy:
if stdin_strip == d:
self.deq.remove(d)
#stderr
sys.stderr.write("%s" % stdin)
sys.stderr.flush()
def preprocessing(self):
try:
for op, p in self.options:
if op in ('-h','--help'):
#option h
usage()
os._exit(1)
elif op in ('-n','--number'):
self.stacksize = p
elif op in ('-g','--grep'):
self.olist['-g'].append(p)
self.option_onlyn_flag = False
elif op in ('-e','--except'):
self.olist['-e'].append(p)
elif op in ('-r','--regular'):
self.olist['-r'].append(p)
self.option_onlyn_flag = False
elif op in ('-m','--maxlensize'):
#option m
self.deq = deque(maxlen=int(p))
else:
raise Exception("unhandled option")
except ValueError:
raise Exception("you need to check input type")
def __del__(self):
pass
def main():
try:
if len(sys.argv)==1 :
usage()
os._exit(1)
options, args = getopt.getopt(sys.argv[1:],'g:hn:e:r:m:',['grep=','help','number=','except=','regular=', 'maxlensize='])
obj = EFEventModule(options,args)
obj.run()
except getopt.GetoptError:
raise Exception("unhandled option, please [filename][-h|--help]")
if __name__== "__main__":
main()
```
#### File: bin/py3/FileGlob.py
```python
import os
import sys
import time
import datetime
import signal
import subprocess
import configparser as ConfigParser
from pathlib import Path
import shutil
import re
#import $MOBIGEN_LIB$
import Mobigen.Common.Log_PY3 as Log; Log.Init()
#import $PROJECT_LIB$
#import Mobigen.Utils.LogClient as c_log
#- shutdown ----------------------------------------------------
SHUTDOWN = False
def shutdown(signalnum, handler):
global SHUTDOWN
SHUTDOWN = True
sys.stderr.write('Catch Signal: %s \n' % signalnum)
sys.stderr.flush()
signal.signal(signal.SIGTERM, shutdown) # sigNum 15 : Terminate
signal.signal(signal.SIGINT, shutdown) # sigNum 2 : Keyboard Interrupt
signal.signal(signal.SIGHUP, shutdown) # sigNum 1 : Hangup detected
signal.signal(signal.SIGPIPE, shutdown) # sigNum 13 : Broken Pipe
'''
On Windows, signal() can only be called with
SIGABRT, SIGFPE,SIGILL, SIGINT, SIGSEGV, or SIGTERM.
A ValueError will be raised in any other case.
'''
#- def global setting ----------------------------------------------------
def makedirs(path) :
try :
os.makedirs(path)
__LOG__.Trace( 'makedirs : %s' % path )
except : pass
def stdout(msg) :
sys.stdout.write('%s\n' % msg)
sys.stdout.flush()
__LOG__.Trace('Std OUT [ %s ]' % msg)
#- Class ----------------------------------------------------
class ClassName:
def __init__(self, conf) :
#open
__LOG__.Trace("__init__")
section = 'MONITOR'
self.watch_path = conf.get(section, 'WATCH_PATH')
if not os.path.isdir(self.watch_path): raise Exception( 'WATCH_PATH is not directory' )
#makedirs(self.watch_path)
if self.watch_path == '/' : self.watch_path = self.watch_path[:-1]
try: self.watch_interval = conf.getint(section, 'WATCH_INTERVAL')
except:
try: self.watch_interval = conf.getint('GENERAL', 'WATCH_INTERVAL')
except: self.watch_interval = 5
try: self.dir_recursive = conf.getboolean(section, 'DIR_RECURSIVE')
except:
try: self.dir_recursive = conf.getboolean('GENERAL', 'DIR_RECURSIVE')
except: self.dir_recursive = True
try: self.watch_file_startswith = conf.get(section, 'WATCH_FILE_STARTSWITH').split(';')
except: self.watch_file_startswith = []
__LOG__.Watch(self.watch_file_startswith)
try: self.watch_file_endswith = conf.get(section, 'WATCH_FILE_ENDSWITH')
except: self.watch_file_endswith = ''
try: self.watch_file_match = conf.get(section, 'WATCH_FILE_MATCH')
except: self.watch_file_match = ''
try: self.watch_file_match_pattern = conf.get(section, 'WATCH_FILE_MATCH_PATTERN')
except: self.watch_file_match_pattern = ''
try: self.comp_flag = conf.getboolean(section, 'COMP_FLAG')
except:
try: self.comp_flag = conf.getboolean('GENERAL', 'COMP_FLAG')
except: self.comp_flag = False
try: self.comp_interval = conf.getint(section, 'COMP_INTERVAL')
except:
try: self.comp_interval = conf.getint('GENERAL', 'COMP_INTERVAL')
except: self.comp_interval = 5
try: self.comp_move = conf.getboolean(section, 'COMP_MOVE')
except:
try: self.comp_move = conf.getboolean('GENERAL', 'COMP_MOVE')
except: self.comp_move = True
try: centerName = conf.get(section, 'CENTER_NAME')
except:
try: centerName = conf.get('GENERAL', 'CENTER_NAME')
except: centerName = ''
try: centerId = conf.get(section, 'CENTER_ID')
except:
try: centerId = conf.get('GENERAL', 'CENTER_ID')
except: centerId = ''
self.DISK_FLAG_PER = conf.getint(section, 'DISK_PERCENT')
self.data_partition = conf.get(section, 'DATA_PARTITION')
# self.logInit(centerName, centerId)
# def logInit(self, centerName, centerId) :
# self.center_name = centerName
# self.center_id = centerId
# self.process_name = os.path.basename(sys.argv[0])
# self.process_type = '일반모듈'
# self.start_time = ''
# self.end_time = ''
# self.std_in = ''
# self.std_out = ''
# self.in_file_size = ''
# self.in_file_row_cnt = ''
# self.out_file_size = ''
# self.out_file_row_cnt = ''
# self.table_info = ''
# self.key_info = ''
# self.partition_info = ''
# self.result_flag = ''
# self.success_cnt = ''
# self.fail_reason = ''
# self.header_cnt = ''
# self.comp_row_cnt = ''
# self.error_column_length = ''
# self.error_check_notnull = ''
# self.error_check_type_legth = ''
# self.error_check_format = ''
# self.error_change_cont = ''
# def logSend(self, std_out) :
# if '://' in std_out :
# std_out = std_out.split('://')[1]
# self.std_out = std_out
# if not os.path.exists(std_out) :
# self.out_file_size = ''
# self.out_file_row_cnt = ''
# else :
# self.out_file_size = str(os.path.getsize(std_out))
# if std_out.upper().endswith('.CSV') or std_out.upper().endswith('.DAT') :
# #self.out_file_row_cnt = subprocess.check_output(["wc","-l", std_out]).split()[0]
# self.out_file_row_cnt = subprocess.getstatusoutput('/usr/bin/wc -l %s' % std_out)[-1].split()[0]
# self.end_time = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
# sendLogData = '|^|'.join(map(str, [self.center_name, self.center_id, self.process_name, self.process_type, self.start_time, self.end_time, self.std_in, self.std_out, self.in_file_size, self.out_file_size, self.in_file_row_cnt, self.out_file_row_cnt, self.table_info, self.key_info, self.partition_info, self.result_flag, self.success_cnt, self.fail_reason, self.header_cnt, self.comp_row_cnt, self.error_column_length, self.error_check_notnull, self.error_check_type_legth, self.error_check_format, self.error_change_cont]))
# c_log.irisLogClient().log("SendLog://{}\n".format(sendLogData))
# __LOG__.Trace('send Log Data : {}'.format(sendLogData))
#2021 04 21 Park
def checkDisk(self):
DISK_FLAG = False
disk_used = shutil.disk_usage(self.data_partition)
use_per = int(disk_used.used/disk_used.total * 100)
if self.DISK_FLAG_PER > use_per :
DISK_FLAG = True
else :
__LOG__.Trace('Disk used : %s 퍼센트 | time sleep 60' % str(use_per) )
return DISK_FLAG
def processing(self, file_name) :
if not os.path.isfile(file_name): return
startswith_flag = False
for watch_startswith in self.watch_file_startswith:
if os.path.basename(file_name).startswith( watch_startswith ): startswith_flag = True
#if self.watch_file_startswith != '' and not os.path.basename(file_name).startswith( self.watch_file_startswith ) : return
if not startswith_flag: return
if self.watch_file_endswith != '' and not os.path.basename(file_name).endswith( self.watch_file_endswith ) : return
if self.watch_file_match != '' and not self.watch_file_match in file_name : return
if self.watch_file_match_pattern != '' and not re.findall( self.watch_file_match_pattern, file_name ) : return
if self.comp_move:
comp_watch_path = self.watch_path + '_COMP'
dirs, names = os.path.split( file_name )
comp_path = comp_watch_path + dirs.split( self.watch_path )[1]
makedirs( comp_path )
comp_file_name = os.path.join( comp_path, names )
if os.path.isfile( comp_file_name ) :
head, tail = os.path.splitext(comp_file_name)
comp_file_name = head + '_' + datetime.datetime.now().strftime('%Y%m%d%H%M%S') + tail
shutil.move( file_name, comp_file_name )
__LOG__.Trace( "File Moved From : {} To : {}".format(file_name, comp_file_name) )
stdout( 'file://%s' % comp_file_name )
# self.logSend(comp_file_name)
#############################################################################
def run(self):
while not SHUTDOWN :
# self.start_time = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
try:
if self.dir_recursive:
configfiles = Path(self.watch_path).glob('**/*')
else:
configfiles = Path(self.watch_path).glob('*')
######################## 김준우 추가 2021-05-31 ######################################
sortedFiles = sorted(list(configfiles), key=os.path.getmtime)
######################################################################################
obj_list = []
for glob_file in sortedFiles:
if len(self.watch_file_startswith) != 0 and self.watch_file_endswith != '' :
for watch_startswith in self.watch_file_startswith:
if os.path.basename(glob_file).startswith( watch_startswith ) and os.path.basename(glob_file).endswith( self.watch_file_endswith ):
if glob_file not in obj_list: obj_list.append(glob_file)
elif len(self.watch_file_startswith) != 0 and self.watch_file_endswith == '':
for watch_startswith in self.watch_file_startswith:
if os.path.basename(glob_file).startswith( watch_startswith ):
if glob_file not in obj_list: obj_list.append(glob_file)
elif len(self.watch_file_startswith) == 0 and self.watch_file_endswith != '':
if os.path.basename(glob_file).endswith( self.watch_file_endswith ):
if glob_file not in obj_list: obj_list.append(glob_file)
__LOG__.Watch(obj_list)
#2021 04 21 Park
if self.checkDisk():
for file_name in obj_list:
if not os.path.isfile(file_name): continue
if self.comp_flag:
first_mtime = os.path.getmtime(file_name)
time.sleep(self.comp_interval)
if first_mtime != os.path.getmtime(file_name): continue
if not self.checkDisk() : break
self.processing(file_name)
time.sleep(self.watch_interval)
time.sleep(self.watch_interval)
except FileNotFoundError:
#__LOG__.Exception()
continue
except:
if not SHUTDOWN : __LOG__.Exception()
#- main function ----------------------------------------------------
def main():
module = os.path.basename(sys.argv[0])
if len(sys.argv) < 3:
sys.stderr.write('Usage : %s conf {option:[[log_arg]-d]}\n' % module )
sys.stderr.write('Usage : %s conf {option:[[log_arg]-d]}\n' % module )
#python3 /home/test/Project_name/bin/py3/BaseModule.py SECTION /home/test/Project_name/conf/BaseModule.conf
#python3 /home/test/Project_name/bin/py3/BaseModule.py SECTION /home/test/Project_name/conf/BaseModule.conf 0
#python3 /home/test/Project_name/bin/py3/BaseModule.py SECTION /home/test/Project_name/conf/BaseModule.conf -d
sys.stderr.flush()
os._exit(1)
config_file = sys.argv[1]
conf = ConfigParser.ConfigParser()
conf.read(config_file)
if '-d' not in sys.argv :
etc_argv = sys.argv[2:]
log_arg = ''
if len(sys.argv[2:]) > 0 :
log_arg = '_' + sys.argv[2]
log_path = conf.get('GENERAL', 'LOG_PATH')
makedirs( log_path )
log_file = os.path.join(log_path, '%s%s.log' % (os.path.splitext(module)[0], log_arg ))
Log.Init(Log.CRotatingLog(log_file, 10240000, 9))
else:
Log.Init()
pid = os.getpid()
__LOG__.Trace('============= %s START [pid:%s]==================' % ( module, pid ))
ClassName(conf).run()
__LOG__.Trace('============= %s END [pid:%s]====================' % (module, pid ))
#- if name start ----------------------------------------------
if __name__ == "__main__" :
main()
```
#### File: bin/py3/FilePatternMonitor.py
```python
try : import ConfigParser
except : import configparser as ConfigParser
import glob
import os
import signal
import sys
import time
import shutil
import datetime
import Mobigen.Common.Log_PY3 as Log;
Log.Init()
SHUTDOWN = False
def handler(signum, frame):
global SHUTDOWN
SHUTDOWN = True
__LOG__.Trace("Catch Signal = %s" % signum)
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGHUP, handler)
signal.signal(signal.SIGPIPE, handler)
def mkdirs(path) :
try : os.makedirs(path)
except OSError as exc: #Python > 2.5
#if exc.errno == errno.EEXIST and os.path.isdir(path) :
pass
#else : raise
class FilePatternMonitor:
def __init__(self, module, section, cfgfile):
self.module = module
self.section = section
self.cfgfile = cfgfile
self.interval = 10
self.monitor_path = "~/"
self.file_patt = "*"
self.ext_str = ""
self.search_patt = self.file_patt + self.ext_str
self.sequence = "name"
self.file_sequence = lambda x: x
if '-d' not in sys.argv:
self.set_logger()
else: Log.Init()
self.set_config(self.section)
def set_config(self, section):
cfg = ConfigParser.ConfigParser()
cfg.read(self.cfgfile)
if cfg.has_option(section, "LS_INTERVAL"):
self.interval = cfg.getint(section, "LS_INTERVAL")
elif self.cfg.has_option("GENERAL", "LS_INTERVAL"):
self.interval = cfg.getint("GENERAL", "LS_INTERVAL")
if cfg.has_option(section, "DIRECTORY"):
self.monitor_path = cfg.get(section, "DIRECTORY")
else:
__LOG__.Trace("Error: Require monitoring directory option on config")
sys.exit()
if cfg.has_option(section, "INDEX_FILE"):
self.idx_file = cfg.get(section, "INDEX_FILE")
else:
__LOG__.Trace("Error: Require index file path on config")
sys.exit()
if cfg.has_option(section, "FILE_PATTERN"):
self.file_patt = cfg.get(section, "FILE_PATTERN")
else:
__LOG__.Trace("Error: Require file pattern on config")
sys.exit()
if cfg.has_option(section, "EXTEND_STR"):
self.ext_str = cfg.get(section, "EXTEND_STR")
if not os.path.isdir(os.path.split(self.idx_file)[0]):
os.makedirs(os.path.split(self.idx_file)[0])
if cfg.has_option(section, "FILE_SORT"):
self.sequence = cfg.get(section, "FILE_SORT")
#########################################################################
if cfg.has_option(section, "COMP_DIR") :
self.COMP_DIR = cfg.get(section, "COMP_DIR")
if cfg.has_option(section, "CHECK_INTERVAL") :
self.CHECK_INTERVAL = cfg.getint(section, "CHECK_INTERVAL")
#########################################################################
if self.sequence == "basename":
self.file_sequence = lambda x: os.path.basename(x)
elif self.sequence == "atime":
self.file_sequence = lambda x: os.stat(x).st_atime
elif self.sequence == "ctime":
self.file_sequence = lambda x: os.stat(x).st_ctime
elif self.sequence == "mtime":
self.file_sequence = lambda x: os.stat(x).st_mtime
def set_logger(self):
cfg = ConfigParser.ConfigParser()
cfg.read(self.cfgfile)
log_path = cfg.get("GENERAL", "LOG_PATH")
log_file = os.path.join(log_path, "%s_%s.log" % (self.module, self.section))
Log.Init(Log.CRotatingLog(log_file, 10240000, 9))
def stdout(self, msg):
sys.stdout.write(msg + "\n")
sys.stdout.flush()
__LOG__.Trace("Std OUT : %s" % (msg))
def load_index(self):
idx_file = self.idx_file
if os.path.exists(idx_file):
with open(idx_file, "r") as rfd:
curr = rfd.read().strip()
__LOG__.Trace("Load index : %s" % curr)
if not os.path.exists(curr):
__LOG__.Trace("File indicated by index file not exists : %s" % curr)
curr = None
if curr == '':
curr = None
else:
__LOG__.Trace("Index file not exists : %s" % idx_file)
curr = None
return curr
def dump_index(self, curr_file):
# 인덱스파일 쓰기
with open(self.idx_file, "w") as wfd:
wfd.write(curr_file + "\n")
__LOG__.Trace("Dump Index : %s" % curr_file)
def get_file_list(self):
# 패턴을 이용해 monitor_path 안에 있는 파일 리스트 얻기
file_list = glob.glob(os.path.join(self.monitor_path, self.search_patt))
file_list.sort(key=self.file_sequence)
return file_list
def get_new_list(self, curr_file):
#
file_list = self.get_file_list()
# __LOG__.Trace("file_list : {}".format(file_list) )
for file_name in file_list:
# __LOG__.Trace("index_file : {}".format(self.file_sequence(curr_file)))
# __LOG__.Trace("new File List : {}".format(self.file_sequence(file_name)))
if self.file_sequence(curr_file) < self.file_sequence(file_name):
# __LOG__.Trace("return : {}".format(file_list[file_list.index(file_name):]))
return file_list[file_list.index(file_name):]
return []
def fileModifiedCheck(self, filePath, lastMTime=0) :
time.sleep(self.CHECK_INTERVAL)
# 2020-10-13 추가 (김준우) 파일 mtime 체크
fileMTime = os.path.getmtime(filePath)
returnCompPath = None
if fileMTime > lastMTime :
__LOG__.Trace("Updating file..... Last Mtime : {}, Now Mtime : {}".format(lastMTime, fileMTime))
returnCompPath = self.fileModifiedCheck(filePath, fileMTime)
elif fileMTime == lastMTime :
__LOG__.Trace("File Transform complete Last Mtime : {}, Now Mtime : {}".format(lastMTime, fileMTime))
if not os.path.exists(self.COMP_DIR) :
mkdirs(self.COMP_DIR)
compPath = os.path.join( self.COMP_DIR, os.path.basename(filePath) )
if os.path.exists(compPath) :
compPath = os.path.join( self.COMP_DIR, os.path.basename(filePath)+ '.' + datetime.datetime.today().strftime('%Y%m%d%H%M%S') )
shutil.move( filePath, compPath )
__LOG__.Trace( "Move From : {} To : {}".format(filePath, compPath) )
returnCompPath = compPath
else :
__LOG__.Trace("invalid Time Last Mtime : {}, Now Mtime : {}".format(lastMTime, fileMTime))
returnCompPath = ''
return returnCompPath
def run(self):
# curr_file = self.load_index()
compFilePath = None
while not SHUTDOWN :
# try :
# if curr_file:
# new_file_list = self.get_new_list(curr_file)
# else:
# new_file_list = self.get_file_list()
new_file_list = self.get_file_list()
for file_name in new_file_list:
if len(self.ext_str) == 0:
out_file = file_name
else:
out_file = file_name[:-(len(self.ext_str))]
###############################################
compFilePath = self.fileModifiedCheck(out_file)
__LOG__.Trace( "File Check : {}".format(compFilePath) )
if os.path.exists(compFilePath) :
self.stdout("file://%s" % compFilePath)
###############################################
# self.stdout("file://%s" % out_file)
# curr_file = file_name
# curr_file = compFilePath
# if curr_file and len(new_file_list) > 0:
# self.dump_index(curr_file)
time.sleep(self.interval)
# if curr_file:
# self.dump_index(curr_file)
def main():
module = os.path.basename(sys.argv[0])
if len(sys.argv) < 3:
print( "Usage : %s Section ConfigFile" % module, sys.stderr )
print( "Exam : %s LTE_CDR /home/eva/E2ES/conf/FilePatternMonitor.conf" % module, sys.stderr)
return
section = sys.argv[1]
cfgfile = sys.argv[2]
FilePatternMonitor(module, section, cfgfile).run()
if __name__ == "__main__":
try:
main()
except:
__LOG__.Exception()
```
#### File: bin/py3/ReadDir_v2.py
```python
import os
import sys
import time
import datetime
import signal
import subprocess
import glob
import configparser as ConfigParser
#import $MOBIGEN_LIB$
import Mobigen.Common.Log_PY3 as Log; Log.Init()
import Mobigen.Database.Postgres_py3 as pg
import Mobigen.Database.iris_py3 as iris
import Mobigen.API.M6_PY3 as M6
import Mobigen.Utils.LogClient as logClient
#import $PROJECT_LIB$
#import pandas,numpy
import pandas as pd
import numpy as np
#- shutdown ----------------------------------------------------
SHUTDOWN = False
def shutdown(signalnum, handler):
global SHUTDOWN
SHUTDOWN = True
sys.stderr.write('Catch Signal: %s \n' % signalnum)
sys.stderr.flush()
signal.signal(signal.SIGTERM, shutdown) # sigNum 15 : Terminate
signal.signal(signal.SIGINT, shutdown) # sigNum 2 : Keyboard Interrupt
signal.signal(signal.SIGHUP, shutdown) # sigNum 1 : Hangup detected
signal.signal(signal.SIGPIPE, shutdown) # sigNum 13 : Broken Pipe
'''
On Windows, signal() can only be called with
SIGABRT, SIGFPE,SIGILL, SIGINT, SIGSEGV, or SIGTERM.
A ValueError will be raised in any other case.
'''
#- def global setting ----------------------------------------------------
def stderr(msg) :
sys.stderr.write(msg + '\n')
sys.stderr.flush()
__LOG__.Trace('Std ERR : %s' % msg)
def makedirs(path) :
try :
os.makedirs(path)
__LOG__.Trace( path )
except : pass
#- Class ----------------------------------------------------
class DirObservation:
def __init__(self, module, conf, section) :
#open
__LOG__.Trace("__init__")
pd.options.display.float_format = '{:.f}'.format
#sheet
try : self.conf_sheet_name_list = conf.get(section, 'SHEET_NAMES').split(',')
except : self.conf_sheet_name_list = []
#sep
try : self.conf_out_sep = conf.get("GENERAL","OUT_DATA_SEP")
except : self.conf_out_sep = '^'
#sheet out name
try : self.conf_sheet_out_name_list = conf.get(section,"SHEET_OUT_NAMES").split(',')
except : self.conf_sheet_out_name_list = []
#save dir
try : self.conf_save_dir = conf.get("GENERAL","SAVE_DAT_DIR")
except : self.conf_save_dir = ""
else : makedirs(self.conf_save_dir)
#pgsql table sql
try : self.conf_table_sql_path = conf.get(section,"TABLE_SQL_PATH")
except : self.conf_table_sql_path = ''
#Pgsql info
try : self.conf_psql_connection = conf.get("GENERAL","PGSQL_CLASS")
except : raise Exception("Please check configure PGSQL_CLASS")
#IRIS info
try : self.conf_iris_conn = conf.get("GENERAL","IRIS_CLASS")
except : raise Exception("Please check configure IRIS_CLASS")
#time init
#self.cur_time = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
try : self.conf_dat_remove_list = conf.get(section,"REMOVE_NAMES").split(',')
except : self.conf_dat_remove_list = ['DMN','TERM']
try : self.conf_word_dict_flag = conf.getboolean(section,"WORD_SHEET_FLAG")
except : self.conf_word_dict_flag = False
self.table_name_list = [("CNTR_STDZN_%s_DFNTN_TB" % name) for name in self.conf_sheet_out_name_list]
##WORD
if 'TERM' in self.conf_sheet_out_name_list :
self.table_name_list.append("CNTR_DICT_WORD_DFNTN_TB")
self.conf_dat_remove_list.append("WORD")
#CENTERS
try : self.conf_centers = conf.get(section,"CENTERS")
except : self.conf_centers = 'ALL'
#MIN MAX EXCEPT CENTER
try : self.mm_except_center_list = conf.get(section,'MIN_MAX_EXCEPT_CENTER')
except : self.mm_except_center_list = []
#CTL PATH
try : self.conf_ctl_path = conf.get("GENERAL","IRIS_CTL_PATH")
except : raise Exception("Cannot read conf")
#IRIS(DAT) PATH
try :
self.conf_iris_path = conf.get("GENERAL","IRIS_DAT_PATH")
makedirs(self.conf_iris_path)
except : raise Exception("Cannot read conf")
def __del__(self):
#close
__LOG__.Trace("__del__")
def getSQL(self, table_name):
table_sql_file = table_name+".sql"
sql_file = os.path.join(self.conf_table_sql_path,table_sql_file)
sql = None
with open(sql_file,'r') as f:
sql = f.read()
__LOG__.Trace(sql)
return sql
#테이블 생성
def createTable(self, table_name):
if not table_name : raise Exception("Need Table Name for create")
__LOG__.Trace(table_name)
try : sql = self.getSQL(table_name+"_pgsql")
except : return None
if sql:
try :
result = self.cur.curs.execute(sql)
self.cur.commit()
__LOG__.Trace(result)
except pg.psycopg2.errors.DuplicateTable :
self.cur.rollback()
__LOG__.Trace("%s Table is already exists" % table_name)
def createFunction(self):
sql = '''
CREATE FUNCTION update_false() RETURNS trigger LANGUAGE plpgsql AS $$
BEGIN
IF NEW.CREATE_TIME <> OLD.CREATE_TIME THEN
RAISE EXCEPTION 'update create_time is not a allowed';
END IF;
IF NEW.UPDATE_TIME < OLD.CREATE_TIME THEN
RAISE EXCEPTION 'update update_time less than create_time is not a allowed';
END IF;
RETURN NEW;
END $$
'''
try :
self.cur.curs.execute(sql)
self.cur.commit()
__LOG__.Trace("Create Function Success !!")
except pg.psycopg2.errors.lookup("42723") : #DuplicateFunction Code
self.cur.rollback()
__LOG__.Trace("update_false() Function already exists !" )
#Function을 테이블에 Trigger 연결
def createTrigger(self,table_name):
sql = '''
CREATE TRIGGER avoid_update_{table}
BEFORE UPDATE
ON {tableName}
FOR EACH ROW
EXECUTE PROCEDURE update_false()
'''.format(table=table_name.split('_')[2],tableName=table_name)
try :
self.cur.curs.execute(sql)
self.cur.commit()
__LOG__.Trace("Create Trigger Success !!")
except pg.psycopg2.errors.lookup("42710") : #DuplicateObject
self.cur.rollback()
__LOG__.Trace("avoid_update_%s Trigger already exists !" % table_name.split('_')[2])
def tableInit(self):
#Connection 연결
self.cur = pg.Postgres_py3(self.conf_psql_connection)
#table 이름 정보
#self.table_names = self.conf_sheet_out_name_list
#Function생성( 두 테이블이 공유 )
self.createFunction()
#테이블 별로
for table_name in self.table_name_list:
self.createTable(table_name)
self.createTrigger(table_name)
def findValueInFile(self, file_name, search_key, search_data, want_column):
result = []
if not (type(search_key).__name__ == type(search_data).__name__ == type(want_column).__name__ == 'list'):
return None
if os.path.exists(file_name):
with open (file_name, 'r') as f:
__LOG__.Trace("FILE READ FOR RENAME, FILE NAME = %s" % file_name)
header = f.readline().strip().split(self.conf_out_sep)
rows = [ line.split(self.conf_out_sep) for line in f.readlines() ]
for row in rows:
wanted_row_flag = True
for idx, key in enumerate(search_key) :
if row[header.index(key)] != search_data[idx] :
wanted_row_flag = False
continue
if wanted_row_flag :
for col in want_column :
result.append(row[header.index(col)])
__LOG__.Watch(["FIND VALUE IN FILE" ,result])
return result
#Xlsx을 temp로 변환 (Pandas)
def convertXlsxToTemp(self,file_path) :
sheet_name_list = self.conf_sheet_name_list
#시트별로 읽어야 함
for idx,sheet_name in enumerate(sheet_name_list):
sheet_out_name = self.conf_sheet_out_name_list[idx]
#법정동코드는 header가 2번행에 있음
# if sheet_name != '법정동코드 연계 자료분석용' : sheet_df = pd.read_excel(file_path,sheet_name=sheet_name,dtype=str)
# else : sheet_df = pd.read_excel(file_path, sheet_name = sheet_name, header=1, dtype=str)
#header in second row
sheet_df = pd.read_excel(file_path, sheet_name = sheet_name, header=1, dtype=str, keep_default_na=False)
save_path = os.path.normcase("%s/CNTR_STDZN_%s_DFNTN_TB" % (self.conf_save_dir, sheet_out_name))
### 참조용 열 삭제 맨 앞 1,2번 째
if sheet_name in ['도메인_정리','용어_정리']:
sheet_df.drop(sheet_df.columns[[0,1]], axis = 'columns', inplace = True )
save_path = os.path.normcase("%s/CNTR_STDZN_%s_DFNTN_TB" % (self.conf_iris_path, sheet_out_name) )
if sheet_name == '표준화_정리':
sheet_df.drop(sheet_df.columns[[0,1]], axis = 'columns', inplace = True )
###단어 추출
if sheet_name == '용어_정리':
self.getWordFromTerm(sheet_df)
__LOG__.Trace("===== PROCESS START : %s =====" % sheet_out_name)
save_file = '%s_%s_TMP.temp'%(save_path, self.cur_time)
header = sheet_df.columns.tolist()
df_col_size = len(header)
__LOG__.Watch(["HEADER ::",header])
time_list = [self.cur_time,'']
header.extend(['CREATE_TIME','UPDATE_TIME'])
#파일에 쓰기
with open(save_file,'w') as f:
f.write( '%s\n'% self.conf_out_sep.join(header) )
decrease_idx_dict = {}
for i in range(0,len(sheet_df)) :
#( \n, \t, nan ) 처리
row = sheet_df.iloc[i].str.replace('\n',' ').replace('\t',' ').replace(np.nan,'').tolist()
try:
#SHEET 필터
if sheet_out_name in ["CODE","CLMN","TERM"]:
cnter_id = row[header.index("CNTR_ID")].strip().upper()
# IF COLUMN END
if sheet_out_name == 'CLMN' :
old_table = row[header.index("PREV_TB_NM")]
if old_table in decrease_idx_dict :
old_idx = row[header.index("IDX")]
row[header.index("IDX")] = str(int(old_idx) - decrease_idx_dict[old_table])
# MIN MAX EXCEPT FILTER
if cnter_id in self.mm_except_center_list :
row[header.index('NOTNULL_YN')] = ''
row[header.index('MINM_VL')] = ''
row[header.index('MAX_VL')] = ''
row[header.index('XCPTN_VL')] = ''
#APPLY FUNCTION
func = row[header.index("CLMN_FUNC")].strip().upper()
func_name = func.split("(",1)[0]
# if func_name == "COLUMN_REMOVE" :
# if old_table not in decrease_idx_dict :
# decrease_idx_dict.setdefault( old_table , 1)
# else : decrease_idx_dict[old_table] += 1
# continue
# elif func_name == "COLUMN_RENAME" :
if func_name == "COLUMN_RENAME" :
new_column_name = func.split('"')[1] # RENAME COLUMNS NAME
row[header.index("NW_CLMN_NM")] = new_column_name
if "TERM" in self.loading_file_dict:
kor_dmn_lrg, kor_dmn_mddl, kor_dmn_smll, kor_term = self.findValueInFile(self.loading_file_dict["TERM"],\
["PREV_TB_NM", "ENG_ABB_NM"], [old_table, new_column_name],\
["KOR_DMN_LRG_CTGRY", "KOR_DMN_MDDL_CTGRY", "KOR_DMN_SMLL_CTGRY","KOR_TERM"]) # WANTED COLUMN
row[header.index("KOR_DMN_LRG_CTGRY")] = kor_dmn_lrg
row[header.index("KOR_DMN_MDDL_CTGRY")] = kor_dmn_mddl
row[header.index("KOR_DMN_SMLL_CTGRY")] = kor_dmn_smll
row[header.index("KOR_CLMN_NM")] = kor_term
if "DMN" in self.loading_file_dict:
d_type, d_lnth, dmn_frmt = self.findValueInFile(self.loading_file_dict["DMN"],["KOR_DMN_LRG_CTGRY", "KOR_DMN_MDDL_CTGRY", "KOR_DMN_SMLL_CTGRY"],\
[kor_dmn_lrg, kor_dmn_mddl, kor_dmn_smll], ["DATA_TYPE","DATA_LNTH","DMN_FRMT"])
type_lnth = d_type + "|" + d_lnth
row[header.index("TYPE_LNTH")] = type_lnth
row[header.index("DATA_FRMT")] = dmn_frmt
# IF COLUMN END
#CENTER ID FILTER
if self.conf_centers.upper() == 'ALL' : pass
else :
cnter_list = [cnter.strip() for cnter in self.conf_centers.split(",") ]
if cnter_id not in cnter_list : continue
except:
__LOG__.Exception()
#(row가 비어있는게 아닐 떄)
if not row == [''] * df_col_size :
row.extend(time_list)
if sheet_out_name == 'TERM':
self.upperList(row, header, ["CNTR_ID","PREV_TB_NM","PREV_CLMN_NM","ENG_TERM","ENG_ABB_NM"])
abb = row[header.index("ENG_ABB_NM")]
if len(abb) > 31 :
__LOG__.Trace("========== ERROR !!!! FIXME !!!! ==========")
__LOG__.Trace("ENG_ABB_NM IS TOO LONG, PLEASE CHECK !!!")
__LOG__.Watch(row)
__LOG__.Trace("===========================================")
elif sheet_out_name == 'DMN':
self.upperList(row, header, ["ENG_DMN_MDDL_CTGRY","ENG_DMN_SMLL_CTGRY","DATA_TYPE"])
elif sheet_out_name == 'CLMN':
self.upperList(row, header, ["CNTR_ID","PRD_CD","PREV_TB_NM","NW_TB_NM","PREV_CLMN_NM","NW_CLMN_NM","TYPE_LNTH"])
row_str = self.conf_out_sep.join(row)
f.write( '%s\n'% row_str)
if sheet_out_name in ["CLMN","DMN","TERM"] :
new_file_name = '%s_%s.dat'%(save_path,self.cur_time)
os.rename(save_file, new_file_name)
__LOG__.Trace("Rename : %s -> %s" % (save_file, new_file_name))
save_file = new_file_name
self.loading_file_dict[sheet_out_name] = save_file
else :
self.temp_file_dict[sheet_out_name] = save_file
__LOG__.Trace("convertXlsx To Dat : %s"%save_file)
self.std_out = ''
def upperList(self, tg_list, header, column):
index_list = [ header.index(tg_col) for tg_col in column ]
for idx, col in enumerate(tg_list) :
if idx in index_list:
tg_list[idx] = col.upper()
return tg_list
#temp 최신 temp읽어서 dat 2개로
def combitationTemp(self):
code_list = self.conf_sheet_out_name_list
code_file_name = self.temp_file_dict["CODE"]
hdongcode_file_name = None
hbdongcode_file_name = None
if 'HDONGCODE' in code_list: hdongcode_file_name = self.temp_file_dict["HDONGCODE"]
if 'HBDONGCODE' in code_list: hbdongcode_file_name = self.temp_file_dict["HBDONGCODE"]
#CODE
code_dict= {}
file_name_list = [ code_file for code_file in [code_file_name, hdongcode_file_name,\
hbdongcode_file_name] if code_file ]
__LOG__.Trace("Combination Temp files : %s" % ",".join(file_name_list))
code = pd.read_csv(code_file_name,sep = self.conf_out_sep,header=0,dtype=str,keep_default_na=False)
code=code.fillna('')
header = code.columns.tolist()
#CM이 아닌 값들 저장
not_cm_code_list = []
not_cm_code_list.append(header)
not_cm_code=code[~code['CNTR_ID'].isin(["CM"]) | ~code['CD_NM'].isin(["행정동 코드","시군구 코드","시도 코드"])]
for idx,r in not_cm_code.iterrows():
not_cm_code_list.append(r.tolist())
code = code[code['CNTR_ID'] == 'CM']
for i in code.index:
code_dict.setdefault(code['CNTR_ID'][i],{}).setdefault(code['CD_NM'][i],{}).setdefault(code['CD_VL'][i],code['CD_KOR_MNNG'][i])
code_dict.setdefault('CM',{})
code_dict['CM']['행정동 코드']= {}
code_dict['CM']['시군구 코드'] = {}
code_dict['CM']['시도 코드'] = {}
code = None
#법정동 연계코드
if hbdongcode_file_name :
hbdongcode = pd.read_csv(hbdongcode_file_name,sep = self.conf_out_sep,header=0,dtype=str,usecols=['행정기관코드','시도','시군구','행정동(행정기관명)'],na_values='')
hbdongcode = hbdongcode.fillna('')
for row in hbdongcode.index:
if hbdongcode['행정기관코드'][row][2:]=='00000000':
code_dict['CM']['시도 코드'].setdefault(hbdongcode['행정기관코드'][row][:2],hbdongcode['시도'][row])
elif hbdongcode['행정기관코드'][row][5:]=='00000':
code_dict['CM']['시군구 코드'].setdefault(hbdongcode['행정기관코드'][row][:5], ' '.join([hbdongcode['시도'][row],hbdongcode['시군구'][row] ] ) )
else :
code_dict['CM']['행정동 코드'].setdefault(hbdongcode['행정기관코드'][row], ' '.join([hbdongcode['시도'][row],hbdongcode['시군구'][row],hbdongcode['행정동(행정기관명)'][row] ]))
hbdongcode = None
#행정동 코드
if hdongcode_file_name :
hdongcode = pd.read_csv(hdongcode_file_name,sep = self.conf_out_sep, header=0, dtype=str , usecols=['행정동코드','시도명','시군구명','읍면동명'],na_values='')
hdongcode = hdongcode.fillna('')
for row in hdongcode.index:
if hdongcode['행정동코드'][row][2:]=='00000000':
if hdongcode['행정동코드'][row][:2] not in code_dict['CM']['시도 코드']:
code_dict['CM']['시도 코드'].setdefault(hdongcode['행정동코드'][row][:2],hdongcode['시도명'][row])
elif hdongcode['행정동코드'][row][5:]=='00000':
if hdongcode['행정동코드'][row][:5] not in code_dict['CM']['시군구 코드']:
code_dict['CM']['시군구 코드'].setdefault(hdongcode['행정동코드'][row][:5],' '.join([hdongcode['시도명'][row],hdongcode['시군구명'][row] ] ) )
else :
if hdongcode['행정동코드'][row] not in code_dict['CM']['행정동 코드']:
code_dict['CM']['행정동 코드'].setdefault(hdongcode['행정동코드'][row],' '.join([hdongcode['시도명'][row],hdongcode['시군구명'][row],hdongcode['읍면동명'][row] ]))
hdongcode = None
#dict to Dat
self.dictToDat( code_dict, code_file_name, not_cm_code_list)
for file_name in file_name_list:
os.remove(file_name)
__LOG__.Trace("file delete : %s" % file_name)
#dict를 Dat파일로
def dictToDat(self, code_dict, code_file_name, extend_list):
data_list = []
data_list.extend(extend_list)
data_list.extend( self.getSortList(code_dict,"시도 코드"))
data_list.extend( self.getSortList(code_dict,"시군구 코드"))
data_list.extend( self.getSortList(code_dict,"행정동 코드"))
file_name,ext = os.path.splitext(code_file_name)
save_dat_name_tmp = file_name+"_COMB.temp"
with open (save_dat_name_tmp,'w') as f:
row_cnt = 0
for row in data_list:
try:
f.write( '%s\n'% self.conf_out_sep.join(row))
row_cnt +=1
except :
__LOG__.Trace("File Write Error : %s" % row )
#self.out_file_row_cnt = row_cnt
save_dat_name =file_name.replace('_TMP','') + ".dat"
os.rename(save_dat_name_tmp,save_dat_name)
self.loading_file_dict["CODE"] = save_dat_name
__LOG__.Trace("Combination result : %s " % save_dat_name )
#dict를 정렬된 리스트로
def getSortList(self, code_dict, p_key):
key_tmp = code_dict['CM'][p_key].keys()
sort_key = sorted(key_tmp)
result_list = []
for key in sort_key:
tmp_list = ["CM","COMMON",p_key,key,code_dict["CM"][p_key].get(key),"-",'한국행정구역분류_2020.10.1-"법정동코드 연계 자료분석용"기준 + KIKcd_H.20181210(말소코드포함) , 행정동코드 2자리',self.cur_time,'']
result_list.append(tmp_list)
return result_list
#return new DataFrame을 해서 밖에서 처리 가능
def getWordFromTerm(self, df):
__LOG__.Trace("===== PROCESS START : WORD =====")
word_dict = {}
abb_dict = {}
dfntn_dict = {}
type_dict = {}
word_sheet_df = None
if self.conf_word_dict_flag :
word_sheet_df = pd.read_excel(self.in_file, sheet_name = "단어사전", header=1, dtype=str,keep_default_na=False)
word_sheet_df.rename(columns = lambda x : x.strip(), inplace = True )
for idx,row in word_sheet_df.iterrows():
type_dict.setdefault(row.KOR_WORD_NM, row.TYPE)
word_dict.setdefault(row.ENG_WORD_NM.upper(), []).append(row.KOR_WORD_NM)
abb_dict.setdefault(row.ENG_WORD_NM.upper(), []).append(row.ENG_WORD_ABB_NM.upper())
dfntn_dict.setdefault(row.KOR_WORD_NM, row.KOR_WORD_DFNTN.replace("\n",""))
for idx, row in df[['CNTR_ID','KOR_TERM','ENG_TERM','ENG_ABB_NM']].dropna().iterrows():
try:
if self.conf_centers == 'ALL':
pass
else:
center_list = [ center.strip().upper() for center in self.conf_centers.split(',') ]
if row.CNTR_ID.strip().upper() not in center_list :
continue
kor_t_list = row.KOR_TERM.upper().split('_')
eng_t_list = row.ENG_TERM.upper().split('_')
eng_a_list = row.ENG_ABB_NM.upper().split('_')
if len(kor_t_list) == len(eng_t_list) == len(eng_a_list):
for idx,kor in enumerate(kor_t_list):
word_dict.setdefault(eng_t_list[idx], []).append(kor)
abb_dict.setdefault(eng_t_list[idx], []).append(eng_a_list[idx])
else:
__LOG__.Trace("!!!! ERROR !!!! : LENGTH is Different, row : %s" % ', '.join(row.tolist() ) )
continue
except Exception as e:
__LOG__.Watch(["!!!!ERROR!!!! : ",e , row ] )
# CNTR_DICT_WORD_DFNTN_TB_YYYYMMDDHHMMSS.dat
file_name = '%s_%s'% ("CNTR_DICT_WORD_DFNTN_TB",self.cur_time)
file_path = os.path.join(self.conf_iris_path, file_name)
with open (file_path + '.tmp' ,'w') as f:
header = ["TYPE","KOR_WORD_NM","ENG_WORD_NM","ENG_WORD_ABB_NM","KOR_WORD_DFNTN","CREATE_TIME","UPDATE_TIME"]
f.write("%s\n" % self.conf_out_sep.join(header))
for key in word_dict:
if len(list(set(abb_dict[key]))) != 1:
__LOG__.Trace("!!!! ERROR !!!! : ABBreviation(약어) Overlap , ABB : %s, ENG_WORD : %s" % (','.join(abb_dict[key]), key) )
continue
for kor_word in list(set(word_dict[key])):
word_dfntn = ''
if kor_word in dfntn_dict:
word_dfntn = dfntn_dict[kor_word]
else :
__LOG__.Trace("WARNING !!!! %s IS NOT DEFINITION, PLEASE FILL XLSX FILE !!!!" % kor_word )
f.write("%s\n" % self.conf_out_sep.join( map( str, [type_dict.setdefault(kor_word, "표준어"), kor_word, key, list(set(abb_dict[key]))[0], word_dfntn, self.cur_time,''])))
os.rename(file_path + '.tmp' , file_path + '.dat')
__LOG__.Trace("Rename , %s -> %s" % ((file_path + ".tmp"), (file_path + ".dat") ))
self.loading_file_dict["WORD"] = file_path + ".dat"
def loadDatToDatabase(self):
__LOG__.Trace("Loading Target File : %s " % ', '.join(self.loading_file_dict))
for code in self.loading_file_dict :
self.loadDatFile(code, self.loading_file_dict[code])
if self.result_flag:
try :
self.deletePastPgsql(code)
except :
__LOG__.Exception()
# self.logging()
def loadDatFile(self,table_code,save_file):
# self.loggingInit("POSTGRES로딩")
#self.out_file_size = os.path.getsize(save_file)
#self.out_file_row_cnt = int(subprocess.check_output(["wc","-l",save_file]).split()[0])
self.cur.separator = self.conf_out_sep
if table_code != 'WORD' : table_name = 'CNTR_STDZN_%s_DFNTN_TB' % table_code
else : table_name = 'CNTR_DICT_%s_DFNTN_TB' % table_code
#self.table_info = table_name
result = self.cur.load(table_name,save_file)
if 'Success' in result :
__LOG__.Trace("PGSQL Load Success : %s" % save_file)
self.success_cnt = result.rsplit(":",1)[1].strip()
self.result_flag = True
self.cur.commit()
else :
__LOG__.Trace("PGSQL Load Failed")
self.fail_reason = result
self.cur.rollback()
__LOG__.Trace(result)
# 12월 3일 추가 --- 지난 데이터 삭제 : Pgsql
def deletePastPgsql(self, table_code):
if table_code != "WORD" : table_name = 'CNTR_STDZN_%s_DFNTN_TB' % table_code
else : table_name = "CNTR_DICT_%s_DFNTN_TB" % table_code
file_cur_time = os.path.splitext(self.loading_file_dict[table_code])[0].rsplit("_",1)[1]
selectSql = '''
SELECT max(create_time) FROM {tableName}
'''.format(tableName = table_name)
max_time = self.cur.execute_iter(selectSql)[0][0]
if not max_time : return False
if max_time != file_cur_time:
__LOG__.Trace("Selected Max time : %s And File Write Time : %s" % (max_time , file_cur_time))
return False
countSql = '''
SELECT count(*) FROM {tableName}
'''.format(tableName = table_name)
__LOG__.Trace("AS-IS Delete sql in POSTGRES.%s Total Count = %s"%(table_name, self.cur.execute_iter(countSql)[0][0]))
deleteSql = '''
DELETE FROM {tableName} WHERE CAST(NULLIF(create_time,'') AS DOUBLE PRECISION)
< CAST({maxTime} AS DOUBLE PRECISION) or NULLIF(create_time, '') is null
'''.format(tableName = table_name,maxTime = max_time)
del_result = self.cur.execute(deleteSql)
__LOG__.Trace("Delete Past Data : %s"%del_result)
__LOG__.Trace("TO-BE Delete sql POSTGRES.in %s Total Count = %s"%(table_name, self.cur.execute_iter(countSql)[0][0]))
self.cur.commit()
def splitHeader(self, table_name, filePath) :
path_ctl = self.conf_ctl_path + "/" + table_name + ".ctl"
path_dat = self.conf_iris_path + "/" + table_name + "_" + self.cur_time + ".dat"
if not os.path.exists(filePath) :
return None, None
dataList = list()
with open(filePath, 'r') as f :
dataList = f.readlines()
# ##삭제 파일
# if table_name.split('_')[2] in self.conf_dat_remove_list:
# os.remove(filePath)
# __LOG__.Trace("FILE REMOVE : %s" % filePath)
header_str = dataList.pop(0)
with open(path_ctl, 'w') as cf :
#Write ctl
cf.write(header_str.replace(self.conf_out_sep, '\n'))
with open(path_dat, 'w') as df :
df.write(''.join(dataList) )
return path_ctl, path_dat
def irisLoadDat(self, tableNm, filePath) :
# self.loggingInit("IRIS로딩")
#self.out_file_size = os.path.getsize(filePath)
#self.out_file_row_cnt = int(subprocess.check_output(["wc","-l",filePath]).split()[0])
self.load_some_success_flag = False
self.result_flag = False
ctlFile, datFile = self.splitHeader(tableNm, filePath)
result = None
if os.path.isfile(ctlFile) and os.path.isfile(datFile) :
self.partition_info = self.cur_time
self.key_info = '0'
result = self.iris_db.load(tableNm, self.key_info , self.partition_info , ctlFile, datFile)
if '+OK' in result :
if 'SOME SUCCESS' in result :
self.load_some_success_flag = True
__LOG__.Trace('IRIS Load Fail : SOME SUCCESS')
self.fail_reason = result
else :
__LOG__.Trace('IRIS Load Success : %s' % datFile)
os.remove(datFile)
__LOG__.Trace("FILE REMOVE : %s " % datFile)
self.success_cnt = result.rsplit(":",1)[1].strip()
self.result_flag = True
else :
__LOG__.Trace('IRIS Load Fail')
self.fail_reason = result
__LOG__.Trace(result)
# self.logging()
# iris Connection 및 Load
def irisLoad(self) :
for code in self.loading_file_dict:
if code != 'WORD' : table_nm = "CNTR_STDZN_%s_DFNTN_TB" % code
else : table_nm = "CNTR_DICT_%s_DFNTN_TB" % code
#self.table_info = table_nm
__LOG__.Trace("Table Name : %s, File Name : %s"%(table_nm, self.loading_file_dict[code]))
self.irisLoadDat(table_nm, self.loading_file_dict[code])
if self.load_some_success_flag or self.result_flag :
try : self.deletePastIRIS(table_nm, self.load_some_success_flag )
except : __LOG__.Exception()
# iris del
def deletePastIRIS(self, table_name, some_success_flag):
# GET max Create Time
selectSQL = '''
SELECT max(create_time) FROM {tableName};
'''.format(tableName = table_name)
result = self.iris_db.execute_iter(selectSQL)
file_cur_time = os.path.splitext(self.loading_file_dict[table_name.split("_")[2]])[0].rsplit("_",1)[1]
#get ctime
ctime = next(result)[0]
if not ctime : return False
if file_cur_time != ctime :
__LOG__.Trace("Selected Max time : %s And File Write Time : %s" % (ctime , file_cur_time))
return False
tmSQL = '''
SELECT count(*) FROM {tableName} ;
'''.format(tableName = table_name)
__LOG__.Trace("AS-IS Delete sql in IRIS.%s Total Count = %s"%(table_name,next(self.iris_db.execute_iter(tmSQL))))
del_option = '<>'
if some_success_flag : #True = Some success , False : success
del_option = '='
#Delete
deleteSQL = '''
DELETE FROM {tableName} WHERE create_time {option} {ctime} ;
'''.format(tableName = table_name, option = del_option, ctime = ctime)
self.iris_db.execute(deleteSQL)
__LOG__.Trace("TO-BE Delete sql in IRIS.%s Total Count = %s"%(table_name,next(self.iris_db.execute_iter(tmSQL))))
#IRIS GET CURSOR AND TABLE CREATE
def irisTableInit(self):
self.iris_db = iris.Client(self.conf_iris_conn)
self.iris_db.setSep(self.conf_out_sep,'\n')
for table_name in self.table_name_list:
try : sql = self.getSQL(table_name+"_iris")
except : __LOG__.Exception()
try :
self.iris_db.execute(sql)
__LOG__.Trace("Table Create : %s"%table_name)
except Exception as e:
if "already exists" in str(e):
pass
else : raise Exception(e)
# def logging(self):
#
# #center_name, center_id, process_name, process_type, start_time, end_time, duration, in_file_size,
# #out_file_size, in_file_row_cnt, out_file_row_cnt, table_info, key_info, partition_info, result_flag,
# #success_cnt, fail_reason, '', '', '', '', '', '', ''
#
# #start_time = self.cur_time[:8] + '000000'
# self.start_time = self.cur_time
#
# end_time = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
# #end_time = datetime.datetime.now().strftime("%Y%m%d%H%M%S")[:8] + '000000'
#
# #a =
# #send.
# #__LOG__.Trace()
# msg = '|^|'.join(map(str,[self.center_name , self.center_id, self.process_name, self.process_type, self.start_time, end_time, self.std_in, self.std_out,\
# self.in_file_size, self.out_file_size, self.in_file_row_cnt, self.out_file_row_cnt, self.table_info, \
# self.key_info , self.partition_info, str(self.result_flag), self.success_cnt, self.fail_reason, self.header_cnt,\
# self.comp_row_cnt, self.error_column_length, self.error_check_notnull, self.error_check_type_legth, self.error_check_format,\
# self.error_change_cont ]))
#
# logClient.irisLogClient().log(msg)
# __LOG__.Trace("Send Log Socket : %s" % msg )
#
#
# def loggingInit(self, process_type):
#
# self.center_name = '중계서버'
# self.center_id = 'GW'
# self.process_name = os.path.basename(sys.argv[0])
# self.process_type = process_type
# self.start_time = ''
# self.end_time = ''
# self.std_in = ''
# self.std_out = ''
# self.in_file_size = ''
# self.out_file_size = ''
# self.in_file_row_cnt = ''
# self.out_file_row_cnt = ''
#
# self.table_info = ''
# self.key_info = ''
# self.partition_info = ''
# self.result_flag = ''
# self.success_cnt = ''
# self.fail_reason = ''
#
# self.header_cnt = ''
# self.comp_row_cnt = ''
# self.error_column_length = ''
# self.error_check_notnull = ''
# self.error_check_type_legth = ''
# self.error_check_format = ''
# self.error_change_cont = ''
def processing(self,in_file) :
__LOG__.Trace( "processing : %s"%in_file )
self.in_file = in_file
if in_file:
self.loading_file_dict = {}
self.temp_file_dict = {}
self.convertXlsxToTemp(in_file)
self.combitationTemp()
#self.tableInit()
#self.loadDatToDatabase()
self.irisTableInit()
self.irisLoad()
def run(self):
while not SHUTDOWN:
std_in = None
is_std_err = False
try:
std_in = sys.stdin.readline().strip()
self.cur_time = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
if not std_in :
is_std_err =True
continue
__LOG__.Trace("STD IN : %s"%std_in)
try :
prefix, in_file = std_in.split('://',1)
except :
is_std_err= True
__LOG__.Trace("Input Format error : %s"%std_in)
continue
if prefix != "file":
is_std_err = True
__LOG__.Trace("Prefix is not match : %s"%prefix)
continue
if not os.path.exists(in_file):
is_std_err = True
__LOG__.Trace("File is not Exists : %s"%in_file)
continue
if os.path.splitext(in_file)[1] != ".xlsx":
is_std_err = True
__LOG__.Trace("File is not xlsx : %s"%in_file)
# self.std_in = std_in
#
# self.in_file_size = os.path.getsize(in_file)
#
# self.in_file_row_cnt = ''
stime = time.time()
self.processing(in_file)
etime = time.time()
__LOG__.Trace( 'Duration %s sec' % ( etime - stime ) )
is_std_err = True
except:
if not SHUTDOWN : __LOG__.Exception()
finally :
if std_in != None and is_std_err :
stderr( std_in )
#- main function ----------------------------------------------------
def main():
module = os.path.basename(sys.argv[0])
if len(sys.argv) < 3:
sys.stderr.write('Usage : %s section conf {option:[[log_arg]-d]}\n' % module )
sys.stderr.write('Usage : %s section conf {option:[[log_arg]-d]}\n' % module )
sys.stderr.flush()
os._exit(1)
section = sys.argv[1]
config_file = sys.argv[2]
conf = ConfigParser.ConfigParser()
conf.read(config_file)
if '-d' not in sys.argv :
etc_argv = sys.argv[3:]
log_arg = ''
if len(sys.argv[3:]) > 0 :
log_arg = '_' + sys.argv[3]
log_path = conf.get('GENERAL', 'LOG_PATH')
makedirs( log_path )
log_file = os.path.join(log_path, '%s_%s%s.log' % (os.path.splitext(module)[0], section, log_arg ))
Log.Init(Log.CRotatingLog(log_file, 10240000, 9))
else:
Log.Init()
pid = os.getpid()
__LOG__.Trace('============= %s START [pid:%s]==================' % ( module, pid ))
DirObservation(module, conf, section).run()
__LOG__.Trace('============= %s END [pid:%s]====================' % (module, pid ))
#- if name start ----------------------------------------------
if __name__ == "__main__" :
main()
```
#### File: bin/py3/SFTPGWUpload.py
```python
import os
import sys
import time
import datetime
import signal
import subprocess
import glob
import configparser as ConfigParser
import paramiko
import socket
import shutil
#import $MOBIGEN_LIB$
import Mobigen.Common.Log_PY3 as Log; Log.Init()
import Mobigen.Utils.LogClient as c_log
#import $PROJECT_LIB$
#- shutdown ----------------------------------------------------
SHUTDOWN = False
def shutdown(signalnum, handler):
global SHUTDOWN
SHUTDOWN = True
sys.stderr.write('Catch Signal: %s \n' % signalnum)
sys.stderr.flush()
signal.signal(signal.SIGTERM, shutdown) # sigNum 15 : Terminate
signal.signal(signal.SIGINT, shutdown) # sigNum 2 : Keyboard Interrupt
signal.signal(signal.SIGHUP, shutdown) # sigNum 1 : Hangup detected
signal.signal(signal.SIGPIPE, shutdown) # sigNum 13 : Broken Pipe
'''
On Windows, signal() can only be called with
SIGABRT, SIGFPE,SIGILL, SIGINT, SIGSEGV, or SIGTERM.
A ValueError will be raised in any other case.
'''
#- def global setting ----------------------------------------------------
def stderr(msg) :
sys.stderr.write(msg + '\n')
sys.stderr.flush()
__LOG__.Trace('Std ERR : %s' % msg)
def stdout(msg) :
sys.stdout.write(msg + '\n')
sys.stdout.flush()
__LOG__.Trace('Std OUT : %s' % msg)
def makedirs(path) :
try :
os.makedirs(path)
__LOG__.Trace( path )
except : pass
#- Class ----------------------------------------------------
class SFTPUpload:
def __init__(self, conf, section) :
#open
__LOG__.Trace("__init__")
section = 'SFTP_UPLOAD'
server_host_name = socket.gethostname()
try : self.HOST_IP = conf.get(section, "HOST_IP")
except :
self.HOST_IP = conf.get("GENERAL", "HOST_IP")
if 'ntbdgw2' == server_host_name :
try :
self.HOST_IP = conf.get(section, "GW_HOST_IP")
except :
self.HOST_IP = conf.get("GENERAL", "GW_HOST_IP")
elif 'tbdetl1' == server_host_name :
try :
self.HOST_IP = conf.get(section, "EX_HOST_IP")
except :
self.HOST_IP = conf.get("GENERAL", "EX_HOST_IP")
try : self.PORT = conf.getint(section, "%s_PORT" % server_host_name )
except :
self.PORT = conf.getint("GENERAL", "%s_PORT" % server_host_name )
try : self.USER = conf.get(section, "%s_USER" % server_host_name)
except :
self.USER = conf.get("GENERAL", "%s_USER" % server_host_name)
try : self.PASSWD = conf.get(section, "%s_PASSWD" % server_host_name)
except :
self.PASSWD = conf.get("GENERAL", "%s_PASSWD" % server_host_name)
try : self.RETRY_CNT = conf.getint(section, "RETRY_CNT")
except :
self.RETRY_CNT = conf.getint("GENERAL", "RETRY_CNT")
try : self.RETRY_SLEEP = conf.getint(section, "RETRY_SLEEP")
except :
self.RETRY_SLEEP = conf.getint("GENERAL", "RETRY_SLEEP")
try : self.UPLOAD_PATH = conf.get(section, "UPLOAD_PATH")
except :
self.UPLOAD_PATH = conf.get("GENERAL", "UPLOAD_PATH")
try : self.EXT = conf.get(section, "EXT")
except :
self.EXT = conf.get("GENERAL" , "EXT")
try : self.CENTER_ID = conf.get(section, socket.gethostname())
except : self.CENTER_ID = conf.get("GENERAL", socket.gethostname())
def __del__(self):
#close
__LOG__.Trace("__del__")
def closeSFTP(self, sftp, transport) :
if transport :
transport.close()
if sftp :
sftp.close()
__LOG__.Trace("Close SFTP Connect")
def connectSFTP(self) :
__LOG__.Trace("SFTP Connect")
sftp = None
transport = None
attempts = 0
while attempts < self.RETRY_CNT :
try :
transport = paramiko.Transport( (self.HOST_IP, self.PORT) )
transport.connect(username=self.USER,
password=<PASSWORD>)
sftp = paramiko.SFTPClient.from_transport(transport)
__LOG__.Trace("SFTP Connect Success!")
break
except :
attempts += 1
__LOG__.Exception()
time.sleep(self.RETRY_SLEEP)
if attempts == self.RETRY_CNT :
os._exit(1)
return sftp, transport
def sftpMakeDir(self, sftp, remotePath) :
remoteLastPath = os.path.basename(remotePath)
newLastPath = ''
__LOG__.Trace('last Path : %s' %remoteLastPath)
__LOG__.Trace('full Path : %s' %remotePath)
# if '_' in remoteLastPath and 'TR' in remoteLastPath :
# newLastPath = remoteLastPath.rsplit('_', 1)[0]
# remotePath = os.path.join(os.path.dirname(remotePath), newLastPath)
__LOG__.Trace('full Path2 : %s' %remotePath)
try :
sftp.stat(remotePath)
pass
except :
try :
sftp.mkdir(remotePath)
except :
self.sftpMakeDir(sftp, os.path.abspath(os.path.join( remotePath, os.pardir ) ) )
sftp.mkdir(remotePath)
def fileUpload(self, sftp, transport, in_file, in_path, nowTime) :
uploadFlag = False
if sftp == None or transport == None :
sftp, transport = self.connectSFTP()
# filename = os.path.basename(in_file)
tempName, ext = os.path.splitext(os.path.basename(in_file))
# fileDir = os.path.dirname(in_file)
try :
uploadPath = os.path.join(in_path, tempName)
tempUploadPath = uploadPath + ".tmp"
fileUploadPath = uploadPath + ext
self.sftpMakeDir(sftp, os.path.dirname(tempUploadPath))
__LOG__.Trace("UPLOAD File Path : {} -> {}".format(in_file, tempUploadPath) )
sftp.put(in_file, tempUploadPath)
try :
sftp.stat(fileUploadPath)
__LOG__.Trace("Upload File already Exists")
new_uploadPath = os.path.join( os.path.dirname(uploadPath), os.path.basename(uploadPath).rsplit(".", 1)[0] + "_{}".format(nowTime) + ext )
__LOG__.Trace("New File : {}".format(new_uploadPath))
sftp.rename( tempUploadPath, new_uploadPath )
__LOG__.Trace("UPLOAD Success!! {}".format(new_uploadPath))
except IOError :
sftp.rename( tempUploadPath, fileUploadPath )
__LOG__.Trace("UPLOAD Success!! {}".format(fileUploadPath))
uploadFlag = True
except :
__LOG__.Trace("UPLOAD Fail!!")
raise
return uploadFlag
def processing(self, in_file) :
__LOG__.Trace( "processing : %s" % in_file )
fileName, fileExt = os.path.splitext(in_file)
nowTime = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
sftpUploadPath = self.UPLOAD_PATH.format(self.CENTER_ID)
sftp, transport = self.connectSFTP()
uploadFlag = self.fileUpload(sftp, transport, in_file, sftpUploadPath, nowTime)
self.closeSFTP(sftp, transport)
if uploadFlag :
if os.path.exists(in_file) and os.path.getsize(in_file) == 0 :
os.remove(in_file) # 파일 삭제
__LOG__.Trace("0 Byte File Remove!! , Remove File : %s" % in_file)
__LOG__.Trace('Upload Result : {}'.format(uploadFlag) )
def run(self) :
while not SHUTDOWN :
try:
std_in = sys.stdin.readline().strip()
if not std_in :
is_std_err = True
continue
__LOG__.Trace('STD IN : %s' % std_in )
try :
prefix, in_file = std_in.split('://', 1)
except :
is_std_err = True
__LOG__.Trace( 'Input format error : %s' % std_in )
continue
if prefix != 'file' :
is_std_err = True
__LOG__.Trace('Prefix is not match : %s' % prefix)
continue
if not os.path.exists( in_file ) :
is_std_err = True
__LOG__.Trace('Not found file : %s' % in_file)
continue
if os.path.splitext( in_file )[1] != self.EXT :
is_std_err = True
__LOG__.Trace('Invalid File EXT : %s' % in_file )
continue
stime = time.time()
self.processing(in_file)
etime = time.time()
__LOG__.Trace( 'Duration %s sec' % ( etime - stime ) )
is_std_err = True
except:
if not SHUTDOWN : __LOG__.Exception()
finally :
if std_in != None and is_std_err :
stderr( std_in )
#- main function ----------------------------------------------------
def main():
module = os.path.basename(sys.argv[0])
if len(sys.argv) < 3:
sys.stderr.write('Usage : %s conf {option:[[log_arg]-d]}\n' % module )
sys.stderr.write('Usage : %s conf {option:[[log_arg]-d]}\n' % module )
#python3 /home/test/Project_name/bin/py3/BaseModule.py /home/test/Project_name/conf/BaseModule.conf KC
#python3 /home/test/Project_name/bin/py3/BaseModule.py /home/test/Project_name/conf/BaseModule.conf 0
#python3 /home/test/Project_name/bin/py3/BaseModule.py /home/test/Project_name/conf/BaseModule.conf -d
sys.stderr.flush()
os._exit(1)
section = sys.argv[2]
config_file = sys.argv[1]
conf = ConfigParser.ConfigParser()
conf.read(config_file)
if '-d' not in sys.argv :
etc_argv = sys.argv[3:]
log_arg = ''
if len(sys.argv[3:]) > 0 :
log_arg = '_' + sys.argv[3]
log_path = conf.get('GENERAL', 'LOG_PATH')
makedirs( log_path )
log_file = os.path.join(log_path, '%s_%s%s.log' % (os.path.splitext(module)[0], section, log_arg ))
Log.Init(Log.CRotatingLog(log_file, 10240000, 9))
else:
Log.Init()
pid = os.getpid()
__LOG__.Trace('============= %s START [pid:%s]==================' % ( module, pid ))
SFTPUpload(conf, section).run()
__LOG__.Trace('============= %s END [pid:%s]====================' % (module, pid ))
#- if name start ----------------------------------------------
if __name__ == "__main__" :
main()
```
#### File: bin/py3/WriteDatFromTable.py
```python
import os
import sys
import time
import datetime
import signal
import subprocess
import glob
import configparser as ConfigParser
import numpy as np
#import $MOBIGEN_LIB$
import Mobigen.Common.Log_PY3 as Log; Log.Init()
import Mobigen.Database.Postgres_py3 as pg
import Mobigen.Database.iris_py3 as iris
import Mobigen.Utils.LogClient as logClient
#import $PROJECT_LIB$
#- shutdown ----------------------------------------------------
SHUTDOWN = False
def shutdown(signalnum, handler):
global SHUTDOWN
SHUTDOWN = True
sys.stderr.write('Catch Signal: %s \n' % signalnum)
sys.stderr.flush()
signal.signal(signal.SIGTERM, shutdown) # sigNum 15 : Terminate
signal.signal(signal.SIGINT, shutdown) # sigNum 2 : Keyboard Interrupt
signal.signal(signal.SIGHUP, shutdown) # sigNum 1 : Hangup detected
signal.signal(signal.SIGPIPE, shutdown) # sigNum 13 : Broken Pipe
'''
On Windows, signal() can only be called with
SIGABRT, SIGFPE,SIGILL, SIGINT, SIGSEGV, or SIGTERM.
A ValueError will be raised in any other case.
'''
#- def global setting ----------------------------------------------------
def stderr(msg) :
sys.stderr.write(msg + '\n')
sys.stderr.flush()
__LOG__.Trace('Std ERR : %s' % msg)
def stdout(msg) :
sys.stdout.write(msg + '\n')
sys.stdout.flush()
__LOG__.Trace('Std OUT : %s' % msg)
def makedirs(path) :
try :
os.makedirs(path)
__LOG__.Trace( path )
except : pass
#- Class ----------------------------------------------------
class WriteDat:
def __init__(self, module, conf, section) :
self.process_name = module
#open
__LOG__.Trace("__init__")
#sep
try : self.out_data_sep = conf.get("GENERAL", 'OUT_DATA_SEP')
except : self.out_data_sep = '^'
try : self.conf_dat_path = conf.get("GENERAL", "SAVE_DAT_DIR")
except : raise Exception("Require DAT_DIR for writing")
try : self.conf_db_class = conf.get("GENERAL", "PGSQL_CLASS")
except Exception as e : raise Exception("error : conf read error : %s"% e)
try : self.conf_iris_class = conf.get("GENERAL", "IRIS_CLASS")
except Exception as e : raise Exception("error : conf read error : %s"% e)
try : self.conf_code_key = conf.get(section,"CODE_KEY").split(',')
except Exception as e : raise Exception("error : conf read error : %s"% e )
try : self.conf_column_key = conf.get(section,"COLUMN_KEY").split(',')
except Exception as e : raise Exception("error : conf read error : %s"% e )
try : self.conf_ctl_path = conf.get("GENERAL","IRIS_CTL_PATH")
except Exception as e : raise Exception("error : conf read error : %s"%e)
try : self.conf_iris_path = conf.get("GENERAL","IRIS_DAT_PATH")
except Exception as e : raise Exception("error : conf read error : %s"%e)
self.result_dict = {}
self.key_index = []
def __del__(self):
#close
__LOG__.Trace("__del__")
#get header and key index
def getColumns(self, table_name):
header_list = []
# with open (self.conf_header_path+'/'+table_name+'.header','r') as f:
# header_list = f.read().replace('\n','').split("^")
# os.path.join(dirname, filename)
with open (self.conf_ctl_path + '/' + table_name + '.ctl','r') as f:
header_list = [line for line in f.read().split("\n") if line]
table_idf = table_name.split("_")[2]
#gey key And header
if table_idf == 'CODE':
self.key_index = [ header_list.index(key) for key in self.conf_code_key ]
elif table_idf == 'COLUMN':
self.key_index = [ header_list.index(key) for key in self.conf_column_key ]
else:
raise Exception("Table Name is not match")
__LOG__.Trace(self.key_index)
return header_list
#DB 연결 후 커서 생성
def connectDB(self):
try:
self.cur = pg.Postgres_py3(self.conf_db_class)
except Exception as e:
__LOG__.Trace("Cannnot connect DB : %s"% e)
#IRIS connect and get cursor
def connectIRIS(self):
try:
self.iris_cur = iris.Client(self.conf_iris_class)
self.iris_cur.setSep(self.out_data_sep,"\n")
except Exception as e:
__LOG__.Trace("Cannot connect DB : %s"% e)
def getKeyDataStr(self, row):
key = None
data = None
try :
key = self.out_data_sep.join( map ( str , [ row[key] for key in self.key_index ] ))
data = self.out_data_sep.join( map ( str , row ) )
except :
__LOG__.Watch(row)
__LOG__.Exception()
return key , data
#IRIS Select
def selectIRIS(self, table_name, column_list):
self.connectIRIS()
selectSQL = '''
SELECT {columns} FROM {tableName} ;
'''.format(columns = ','.join(column_list),tableName = table_name)
cnt = 0
for row in self.iris_cur.execute_iter(selectSQL):
key, data = self.getKeyDataStr( row )
self.result_dict.setdefault(key, []).append( data )
__LOG__.Trace("IRIS Select Complete : %s" % table_name)
#PGSQL Select
def selectPG(self,table_name, column_list):
#cursor 생성
self.connectDB()
#테이블 데이터 쿼리
sql = '''
SELECT {columns} FROM {tableName}
'''.format(columns = ','.join(column_list),tableName=table_name)
for row in self.cur.execute_iter(sql=sql):
key, data = self.getKeyDataStr(row)
self.result_dict.setdefault(key, []).append( data )
__LOG__.Trace("Select Complete : %s"% table_name)
# reset time columns
def resetTimeColumn(self, row_str):
row = row_str.split(self.out_data_sep)
row[-2] = self.cur_time # CREATE_TIME
row[-1] = '' # UPDATE_TIME
return self.out_data_sep.join(row)
# combinate dict and write file
def combinateAndWrite(self, table_name, column_list):
# self.loggingInit("일반모듈")
__LOG__.Trace("Combinate And Write Dict Start ")
file_name = "%s_%s" % ( table_name.upper() , self.cur_time )
file_path = os.path.join( self.conf_dat_path , file_name ) #real file path
comb_dict = {}
with open( file_path + '.tmp' , 'w' ) as f:
f.write(self.out_data_sep.join(column_list) + "\n")
row_cnt = 0
for key in self.result_dict: #start for
row = None
row_list = list(set(self.result_dict[key]))
if len(row_list) == 1:
row = self.resetTimeColumn( row_list[0] )
elif len(row_list) == 2:
u_time_list = []
for one_row in row_list :
u_time = one_row.split(self.out_data_sep)[-1]
if u_time == '' or u_time == None : u_time = 0
try : u_time = int(u_time)
except :
__LOG__.Exception()
u_time_list.append(u_time)
row_str = None
if u_time_list[0] > u_time_list[1] : row_str = row_list[0]
else : row_str = row_list[1]
row = self.resetTimeColumn(row_str)
else:
raise Exception("same key is repeated 3 or more")
__LOG__.Watch(row_list)
row_list_spt = row.split(self.out_data_sep)
comb_key, comb_data = self.getKeyDataStr(row_list_spt)
comb_data = row
comb_dict.setdefault(comb_key, comb_data)
#end for
#start for
for c_key in comb_dict:
w_row = None
w_row = comb_dict[c_key]
row_cnt += 1
f.write(w_row + "\n")
#end for
# self.out_file_row_cnt = row_cnt
os.rename( file_path + '.tmp' , file_path + '.dat')
# if os.path.exists(file_path + '.dat'):
# self.out_file_size = os.path.getsize(file_path + ".dat" )
# else :
# slef.out_file_size = 0
__LOG__.Trace("Combinate And Write Dict Complete : %s" % ( file_path + '.dat' ))
std_out_msg = "file://%s" % (file_path + '.dat')
stdout(std_out_msg)
# self.std_out = std_out_msg
#
# self.logging()
# def loggingInit(self, process_type):
#
# self.center_name = '중계서버'
# self.center_id = 'GW'
#
# self.process_type = process_type
#
# self.in_file_size = ''
#
# self.in_file_row_cnt = ''
#
# self.out_file_size = ''
#
# self.out_file_row_cnt = ''
#
# self.table_info = ''
# self.key_info = ''
# self.partition_info = ''
# self.result_flag = ''
# self.success_cnt = ''
# self.fail_reason = ''
# self.header_cnt = ''
# self.comp_row_cnt = ''
# self.error_column_length = ''
# self.error_check_notnull = ''
# self.error_check_type_legth = ''
# self.error_check_format = ''
# self.error_change_cont = ''
#
#
#
# def logging(self):
#
# #center_name, center_id, process_name, process_type, start_time, end_time, duration, in_file_size,
# #out_file_size, in_file_row_cnt, out_file_row_cnt, table_info, key_info, partition_info, result_flag,
# #success_cnt, fail_reason, '', '', '', '', '', '', ''
#
# #start_time = self.cur_time[:8]+'000000'
# start_time = self.cur_time
#
# #end_time = datetime.datetime.now().strftime("%Y%m%d") + '000000'
# end_time = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
#
# msg = '|^|'.join(map(str,[self.center_name, self.center_id, self.process_name,\
# self.process_type, start_time, end_time, self.std_in, self.std_out,\
# self.in_file_size, self.out_file_size, self.in_file_row_cnt, self.out_file_row_cnt,\
# self.table_info, self.key_info, self.partition_info, self.result_flag, self.success_cnt, self.fail_reason,\
# self.header_cnt, self.comp_row_cnt, self.error_column_length, self.error_check_notnull, self.error_check_type_legth,\
# self.error_check_format, self.error_change_cont]))
#
# logClient.irisLogClient().log(msg)
# __LOG__.Trace("Send Log Socket : %s" % msg )
#
def processing(self, prefix, table_name) :
__LOG__.Trace( "processing : %s" % table_name )
header_list = self.getColumns(table_name)
self.result_dict = {}
#DB data get
if prefix == 'IRIS':
self.selectIRIS( table_name, header_list )
elif prefix == 'PGSQL':
self.selectPG( table_name, header_list )
elif prefix == 'BOTH':
self.selectIRIS( table_name, header_list )
self.selectPG( table_name, header_list )
self.combinateAndWrite( table_name, header_list )
def run(self):
while not SHUTDOWN :
self.cur_time = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
std_in = None
is_std_err = False
try:
std_in = sys.stdin.readline().strip()
#BOTH://UPDATED>>%s
#PGSQL://UPDATED>>%s
#IRIS://UPDATED>>%s
if not std_in :
is_std_err = True
continue
__LOG__.Trace('STD IN : %s' % std_in )
try :
prefix, line = std_in.split('://', 1)
except :
is_std_err = True
__LOG__.Trace( 'Input format error : %s' % std_in )
continue
if prefix not in ["BOTH","PGSQL","IRIS"] :
is_std_err = True
__LOG__.Trace('Prefix is not match : %s' % prefix)
continue
try :
msg , table_name = line.split('>>')
except :
is_std_err = True
__LOG__.Trace("Data format error : %s " % line)
continue
if msg != "UPDATED":
is_std_err = True
__LOG__.Trace("Message is not match : %s" % msg )
continue
stime = time.time()
# self.std_in = std_in
self.processing( prefix, table_name )
etime = time.time()
__LOG__.Trace( 'Duration %s sec' % ( etime - stime ) )
is_std_err = True
except:
if not SHUTDOWN : __LOG__.Exception()
finally :
if std_in != None and is_std_err :
stderr( std_in )
#- main function ----------------------------------------------------
def main():
module = os.path.basename(sys.argv[0])
if len(sys.argv) < 3:
sys.stderr.write('Usage : %s section conf {option:[[log_arg]-d]}\n' % module )
sys.stderr.write('Usage : %s section conf {option:[[log_arg]-d]}\n' % module )
#python3 /home/test/Project_name/bin/py3/BaseModule.py SECTION /home/test/Project_name/conf/BaseModule.conf
#python3 /home/test/Project_name/bin/py3/BaseModule.py SECTION /home/test/Project_name/conf/BaseModule.conf 0
#python3 /home/test/Project_name/bin/py3/BaseModule.py SECTION /home/test/Project_name/conf/BaseModule.conf -d
sys.stderr.flush()
os._exit(1)
section = sys.argv[1]
config_file = sys.argv[2]
conf = ConfigParser.ConfigParser()
conf.read(config_file)
if '-d' not in sys.argv :
etc_argv = sys.argv[3:]
log_arg = ''
if len(sys.argv[3:]) > 0 :
log_arg = '_' + sys.argv[3]
log_path = conf.get('GENERAL', 'LOG_PATH')
makedirs( log_path )
log_file = os.path.join(log_path, '%s_%s%s.log' % (os.path.splitext(module)[0], section, log_arg ))
Log.Init(Log.CRotatingLog(log_file, 10240000, 9))
else:
Log.Init()
pid = os.getpid()
__LOG__.Trace('============= %s START [pid:%s]==================' % ( module, pid ))
WriteDat(module, conf, section).run()
__LOG__.Trace('============= %s END [pid:%s]====================' % (module, pid ))
#- if name start ----------------------------------------------
if __name__ == "__main__" :
main()
```
#### File: bin/py3/WriteLog.py
```python
import os
import sys
import time
import datetime
import signal
import select
import configparser as ConfigParser
import socket
#import $MOBIGEN_LIB$
import Mobigen.Common.Log_PY3 as Log; Log.Init()
#import $PROJECT_LIB$
#- shutdown ----------------------------------------------------
SHUTDOWN = False
def shutdown(signalnum, handler):
global SHUTDOWN
SHUTDOWN = True
sys.stderr.write('Catch Signal: %s \n' % signalnum)
sys.stderr.flush()
signal.signal(signal.SIGTERM, shutdown) # sigNum 15 : Terminate
signal.signal(signal.SIGINT, shutdown) # sigNum 2 : Keyboard Interrupt
signal.signal(signal.SIGHUP, shutdown) # sigNum 1 : Hangup detected
signal.signal(signal.SIGPIPE, shutdown) # sigNum 13 : Broken Pipe
'''
On Windows, signal() can only be called with
SIGABRT, SIGFPE,SIGILL, SIGINT, SIGSEGV, or SIGTERM.
A ValueError will be raised in any other case.
'''
#- def global setting ----------------------------------------------------
def stderr(msg) :
sys.stderr.write(msg + '\n')
sys.stderr.flush()
#__LOG__.Trace('Std ERR : %s' % msg)
def stdout(msg) :
sys.stdout.write(msg + '\n')
sys.stdout.flush()
__LOG__.Trace('Std OUT : %s' % msg)
def makedirs(path) :
try :
os.makedirs(path)
__LOG__.Trace( path )
except : pass
#- Class ----------------------------------------------------
class WriteDat:
def __init__(self, conf, prefix, idx_name) :
section = 'WRITE_LOG'
self.conf = conf
self.target_prefix = prefix
self.idx_name = idx_name
self.conf_idx_path = conf.get(section,'IDX_PATH')
makedirs(self.conf_idx_path)
self.idx_file_name = os.path.join(self.conf_idx_path, self.idx_name)
__LOG__.Watch(self.idx_file_name)
try : self.out_data_sep = conf.get(section,'OUT_DATA_SEP')
except : self.out_data_sep = '|^|'
self.conf_table_name = conf.get(section, self.target_prefix)
self.conf_dat_path = conf.get(section,'DAT_PATH')
makedirs(self.conf_dat_path)
self.dat_file_name_form = os.path.join(self.conf_dat_path, '%s-%s' % ( conf.get(section, self.target_prefix), conf.get("GENERAL", socket.gethostname()) ))
self.conf_ctl_path = conf.get(section,'CTL_PATH')
ctl_path = os.path.join(self.conf_ctl_path, self.conf_table_name + ".ctl")
with open (ctl_path ,'r') as fd:
self.header_len = len([ col for col in fd.read().split('\n') if col != '' ])
try:
with open(self.idx_file_name) as fd:
self.prev_dat_file_name = fd.read().strip()
except:
#self.prev_dat_file_name = '%s-%s00.dat' % ( self.dat_file_name_form, datetime.datetime.now().strftime("%Y%m%d%H%M") )
self.prev_dat_file_name = '%s-%s000.dat' % ( self.dat_file_name_form, datetime.datetime.now().strftime("%Y%m%d%H%M")[:11] )
def run(self):
while not SHUTDOWN :
try:
std_in = None
is_std_err = False
#cur_dat_file_name = '%s-%s00.dat' % ( self.dat_file_name_form, datetime.datetime.now().strftime("%Y%m%d%H%M") )
cur_dat_file_name = '%s-%s000.dat' % ( self.dat_file_name_form, datetime.datetime.now().strftime("%Y%m%d%H%M")[:11] )
if self.prev_dat_file_name != cur_dat_file_name :
if os.path.exists( self.prev_dat_file_name ):
if os.path.getsize(self.prev_dat_file_name) > 0:
stdout( 'file://' + self.prev_dat_file_name )
else:
__LOG__.Trace('file size 0 file remove: %s' % self.prev_dat_file_name)
os.remove(self.prev_dat_file_name)
else:
__LOG__.Trace('prev file is not exists : %s' % self.prev_dat_file_name)
self.prev_dat_file_name = cur_dat_file_name
with open(self.idx_file_name, 'w') as fd:
fd.write(self.prev_dat_file_name)
inputready, outputready, exceptready = select.select([sys.stdin] , [], [] , 10)
for std_in in inputready:
std_in = std_in.readline().strip()
if not std_in :
is_std_err = True
continue
try :
prefix, line = std_in.split('://', 1)
except :
is_std_err = True
continue
## US, DJ 처리 전까지 임시 2021 06 01 ########################################################
if ( prefix == 'ER_LOG' ) and ( self.conf.get("GENERAL", socket.gethostname()) in ['US','DJ'] ):
is_std_err = True
continue
##############################################################################################
if prefix != self.target_prefix :
is_std_err = True
continue
if line == '' or len(line.split(self.out_data_sep)) != self.header_len :
__LOG__.Trace("!!!! ERROR !!!!, DIFFERENCE BETWEEN CTL_LENGTH AND LINE_LENGTH => %s" % line)
is_std_err = True
continue
__LOG__.Trace('STD IN : %s' % std_in )
with open(self.prev_dat_file_name, 'a+') as fd:
#with open(cur_dat_file_name, 'a+') as fd:
fd.write('%s\n' % line)
is_std_err = True
except:
if not SHUTDOWN : __LOG__.Exception()
finally :
if std_in != None and is_std_err :
stderr( std_in )
#- main function ----------------------------------------------------
def main():
module = os.path.basename(sys.argv[0])
if len(sys.argv) < 3:
sys.stderr.write('Usage : %s prefix ConfigFile {option:[[log_arg]-d]}\n' % module )
sys.stderr.write('Usage : %s prefix ConfigFile {option:[[log_arg]-d]}\n' % module )
sys.stderr.flush()
os._exit(1)
prefix = sys.argv[1]
config_file = sys.argv[2]
conf = ConfigParser.ConfigParser()
conf.read(config_file)
log_arg = ''
if '-d' not in sys.argv :
etc_argv = sys.argv[3:]
if len(sys.argv[3:]) > 0 :
log_arg = '_' + sys.argv[3]
log_path = conf.get('GENERAL','LOG_PATH')
makedirs( log_path )
log_file = os.path.join(log_path, '%s_%s%s.log' % (os.path.splitext(module)[0], prefix, log_arg) )
Log.Init(Log.CRotatingLog(log_file, 10240000, 9))
else:
Log.Init()
pid = os.getpid()
__LOG__.Trace('=======s===== %s START [pid:%s]==================' % ( module, pid ))
idx_name = '%s_%s%s.idx' % (os.path.splitext(module)[0], prefix, log_arg)
WriteDat( conf, prefix, idx_name ).run()
__LOG__.Trace('============= %s END [pid:%s]====================' % (module, pid ))
#- if name start ----------------------------------------------
if __name__ == "__main__" :
main()
```
#### File: 20191125/bin/MQLoader.py
```python
import datetime
import json
import requests
import sys
import os
import signal
import ConfigParser
import RabbitMQ as MQ
import Mobigen.Common.Log as Log; Log.Init()
from requests.auth import HTTPBasicAuth
SHUTDOWN = False
## Signal
#
# @param signum
# @param frame Stack Frame
def handler(signum, frame):
global SHUTDOWN
SHUTDOWN = True
__LOG__.Trace('Catch Signal = %s' % signum)
## SIGTERM
signal.signal(signal.SIGTERM, handler)
## SIGINT
signal.signal(signal.SIGINT, handler)
## SIGHUP
signal.signal(signal.SIGHUP , handler)
## SIGPIPE
signal.signal(signal.SIGPIPE, handler)
class MQLoader:
def __init__(self, section, cfg):
self.section = section
self.cfg = cfg
self.MQ_VHOST = self.cfg.get(section, 'MQ_VHOST')
self.use_bson = self.cfg.get(section, 'MQ_USE_BSON').upper() == 'Y'
self.MQ_HOST = self.cfg.get(section, 'MQ_HOST')
self.MQ_SSL_PORT = int(self.cfg.get(section, 'MQ_SSL_PORT'))
self.MQ_USER = self.cfg.get(section, 'USER')
self.MQ_PASS = self.cfg.get(section, 'PASS')
self.MQ_CA_CERTS = self.cfg.get(section, 'MQ_CA_CERTS')
self.MQ_CERTFILE = self.cfg.get(section, 'MQ_CERTFILE')
self.MQ_KEYFILE = self.cfg.get(section, 'MQ_KEYFILE')
self.ELG = self.cfg.get(section, 'ELG')
self.STATUS = self.cfg.get(section, 'STATUS')
self.NOT_MQ_EMS = self.cfg.get(section, 'NOT_MQ_EMS')
self.MQ_LOG_PATH = self.cfg.get('GENERAL', 'MQ_LOG_PATH')
self.repo = dict()
def writeStdOut(self, msg):
sys.stdout.write(msg+'\n')
sys.stdout.flush()
__LOG__.Trace("STD OUT: %s" % msg)
def writeStdErr(self, msg):
sys.stderr.write('%s\n' % (msg))
sys.stderr.flush()
__LOG__.Trace('STD ERR: %s' % (msg))
## MQ Connection
def mqInitConnection(self):
self.mq = MQ.DirectQueueClient()
self.mq.connectSSL(self.MQ_USER, self.MQ_PASS, self.MQ_HOST, self.MQ_SSL_PORT, self.MQ_VHOST, self.MQ_CA_CERTS, self.MQ_CERTFILE, self.MQ_KEYFILE)
def jsonLoad( self, initFilePath ) :
jsonDict = None
try :
jsonFile = open(initFilePath, 'r')
jsonFileStr = jsonFile.read()
jsonFile.close()
self.repo = json.loads(jsonFileStr)
return self.repo
except :
__LOG__.Exception
def exportStdOut( self, dataMap, oneExportInfo) :
dataMap['emsIp'] = oneExportInfo['expEmsIp']
dataMap['idx'] = oneExportInfo['idx']
dataMap['workEndDate'] = self.repo['workInfo']['workEndDate']
dataMap['workProgStatCd'] = self.repo['workInfo']['workProgStatCd']
dataMap['emsNm'] = oneExportInfo['expEmsNm']
dataMap['oprrId'] = oneExportInfo['oprrId']
__LOG__.Trace( 'RAN_EMS exportStdOut!! %s ' % dataMap )
self.writeStdOut(json.dumps(dataMap))
def queueDatCtlFileCreate( self, dataMap, lkngUnitDict ) :
__LOG__.Trace( 'queue File Create' )
queueDict = dict()
queueDict['idx'] = dataMap['idx']
queueDict['expEmsIp'] = dataMap['expEmsIp']
queueDict['workId'] = dataMap['workId']
queueDict['workStaDate'] = dataMap['workStaDate']
queueDict['workStatus'] = dataMap['workTypCd']
queueDict['queueNm'] = lkngUnitDict['mqNm']
queueDict['queueMsg'] = dataMap
queueDict['evntDate'] = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
dateName = datetime.datetime.now().strftime('%Y%m%d_%H%M')
self.writeStdOut('%s%s' %('file://', json.dumps( queueDict, encoding='utf-8') ))
def exportQueue( self, dataMap, oneExportInfo ) :
__LOG__.Trace( 'MQ Export Start!!')
vendor = oneExportInfo['vendor'].encode('utf-8')
expEmsIp = oneExportInfo['expEmsIp']
lkngUnitDict = oneExportInfo['lkngUnitDict']
idx = oneExportInfo['idx']
if lkngUnitDict['unitDistYn'] == 'Y' :
__LOG__.Trace('MQ insert Start!!')
__LOG__.Trace('Queue Name = %s ' % lkngUnitDict['mqNm'] )
self.mqInitConnection()
__LOG__.Trace('EMS Ip = %s Export Y !!' % expEmsIp)
try :
self.mq.connectChannel()
self.mq.put( lkngUnitDict['mqNm'], dataMap, use_bson = self.use_bson)
__LOG__.Trace('Queue insert Success')
try :
dataMap['idx'] = idx
dataMap['expEmsIp'] = expEmsIp
self.queueDatCtlFileCreate( dataMap, lkngUnitDict )
except :
__LOG__.Exception()
except :
__LOG__.Exception()
finally :
self.mq.disConnect()
else :
__LOG__.Trace('Queue insert Fail!! EMS Ip = %s , unitDistYn = %s ' % (expEmsIp, lkngUnitDict['unitDistYn'] ) )
def exportSeparation( self ) :
if not self.repo == None :
dataMap = dict()
dataMap['workId'] = self.repo['workInfo']['workId'].encode('utf-8')
dataMap['workStaDate'] = self.repo['workInfo']['workStaDate'].encode('utf-8')
dataMap['workTypCd'] = self.STATUS
expEmsIpList = list()
for oneEqpInfo in self.repo['eqpInfo'] :
cmdWorkTypCd = oneEqpInfo['cmdWorkTypCd']
if oneEqpInfo['eqpTyp'] == self.NOT_MQ_EMS and oneEqpInfo['vendor'] == self.ELG :
self.exportStdOut(dataMap, oneEqpInfo)
else :
continue
for oneExportInfo in self.repo['exportInfo'] :
cmdWorkTypCd = oneExportInfo['cmdWorkTypCd']
__LOG__.Trace( 'cmdWorkTypCd : %s ' % cmdWorkTypCd )
if oneExportInfo['eqpTyp'] == self.NOT_MQ_EMS and oneExportInfo['vendor'] == self.ELG :
continue
else :
self.exportQueue( dataMap, oneExportInfo )
## data To MQ
def run(self):
__LOG__.Trace('MQLoader start!!')
while not SHUTDOWN:
strLine = None
try:
strIn = sys.stdin.readline()
strLine = strIn.strip()
if strLine == '' :
continue
prefix, initFilePath = strLine.split('://')
if not os.path.exists(initFilePath):
__LOG__.Exception('[ERROR] File not found!')
self.writeStdErr(strIn)
continue
try :
self.repo = self.jsonLoad( initFilePath )
self.exportSeparation()
except :
__LOG__.Exception()
except:
if not SHUTDOWN :
__LOG__.exception('[ERROR]')
finally:
self.writeStdErr(strIn)
def main():
module = os.path.basename(sys.argv[0])
section = sys.argv[1] # TACS_MQ_LOADER
cfgFile = sys.argv[2]
cfg = ConfigParser.ConfigParser()
cfg.read(cfgFile)
logPath = cfg.get("GENERAL", "LOG_PATH")
logFile = os.path.join(logPath, "%s_%s.log" % (module, section))
logCfgPath = cfg.get("GENERAL", "LOG_CONF")
logCfg = ConfigParser.ConfigParser()
logCfg.read(logCfgPath)
Log.Init(Log.CRotatingLog(logFile, logCfg.get("LOG", "MAX_SIZE"), logCfg.get("LOG", "MAX_CNT") ))
ml = MQLoader(section, cfg)
ml.run()
if __name__ == '__main__':
try:
main()
except:
__LOG__.Exception('[ERROR] In main')
```
#### File: 20191206/bin/Qmigration.py
```python
import datetime
import os
import sys
import signal
import time
import ConfigParser
import glob
import json
import Mobigen.Common.Log as Log; Log.Init()
from watchdog.observers import Observer
import shutil
import RabbitMQ as MQ
from watchdog.events import FileSystemEventHandler
from watchdog.events import FileCreatedEvent
# Process Shutdown Flag
SHUTDOWN = False
# @param signum
# @param frame -
def handler(signum, frame):
global SHUTDOWN
self.observer.stop()
self.observer.join()
SHUTDOWN = True
__LOG__.Trace("signal: process shutdown")
# SIGTERM
signal.signal(signal.SIGTERM, handler)
# SIGINT
signal.signal(signal.SIGINT, handler)
# SIGHUP
signal.signal(signal.SIGHUP, handler)
# SIGPIPE
signal.signal(signal.SIGPIPE, handler)
# JSON
class QMigration(FileSystemEventHandler):
def __init__(self, cfg, section):
self.cfg = cfg
self.PREFIX = self.cfg.get(section, 'OUT_PREFIX')
self.PATTERN = self.cfg.get(section, 'PATTERN')
self.COMP_PATH = self.cfg.get(section, 'COMP_PATH')
self.MONITOR_PATH = self.cfg.get(section, "DIRECTORY")
self.MQ_VHOST = self.cfg.get(section, 'MQ_VHOST')
self.use_bson = self.cfg.get(section, 'MQ_USE_BSON').upper() == 'Y'
self.MQ_HOST = self.cfg.get(section, 'MQ_HOST')
self.MQ_SSL_PORT = int(self.cfg.get(section, 'MQ_PORT'))
self.MQ_USER = self.cfg.get(section, 'USER')
self.MQ_PASS = self.cfg.get(section, 'PASS')
self.MQ_CA_CERTS = self.cfg.get(section, 'MQ_CA_CERTS')
self.MQ_CERTFILE = self.cfg.get(section, 'MQ_CERTFILE')
self.MQ_KEYFILE = self.cfg.get(section, 'MQ_KEYFILE')
def mkdirs(self, path) :
try : os.makedirs(path)
except OSError as exc:
pass
def stdout(self, msg):
sys.stdout.write("stdout" + msg + '\n')
sys.stdout.flush()
__LOG__.Trace("std OUT: %s" % msg)
def dispatch( self, event ) :
if isinstance( event, FileCreatedEvent ) :
try :
dirPath, fileName = os.path.split( event.src_path )
name, ext = os.path.splitext( fileName )
__LOG__.Trace( 'Event : %s' % event )
if ext == self.PATTERN :
self.parse(event.src_path)
except :
__LOG__.Exception()
def mqInitConnection(self) :
self.mq = MQ.DirectQueueClient()
self.mq.connectSSL(self.MQ_USER, self.MQ_PASS, self.MQ_HOST, self.MQ_SSL_PORT, self.MQ_VHOST,self.MQ_CA_CERTS, self.MQ_CERTFILE, self.MQ_KEYFILE)
def parse(self, filePath) :
if not os.path.exists(filePath) : return
__LOG__.Trace('File Exists')
dataStr = ''
with open(filePath, 'r') as jsonFile :
dataStr = jsonFile.read()
dataDict = json.loads(dataStr)
self.exportQueue(dataDict, filePath)
def exportQueue(self, dataDict, filePath) :
__LOG__.Trace('MQ Export Start !! %s' % dataDict )
self.mqInitConnection()
try :
self.mq.connectChannel()
self.mq.put(dataDict['QUEUE_NM'], json.dumps(dataDict['QUEUE_MSG'], encoding='utf-8'), use_bson = self.use_bson)
__LOG__.Trace('Queue Insert Success')
except :
__LOG__.Exception()
finally :
self.mq.disConnect()
def run(self, section):
__LOG__.Trace("start QMigration!!")
# config
monitorPath = self.MONITOR_PATH
if not os.path.exists(monitorPath) :
os.mkdir(monitorPath)
self.eventHandler(monitorPath)
while not SHUTDOWN :
time.sleep(0.1)
def eventHandler(self, monitorPath) :
__LOG__.Trace('now Check Path : %s' % monitorPath)
self.observer = Observer()
self.observer.schedule(self, monitorPath, recursive=True)
self.observer.start()
def main():
module = os.path.basename(sys.argv[0])
section = sys.argv[1] # TACS_FILE_MONITOR
cfgfile = sys.argv[2] # /home/tacs/user/KimJW/ETL/conf/FileMonitor.conf
cfg = ConfigParser.ConfigParser()
cfg.read(cfgfile)
logPath = cfg.get("GENERAL", "LOG_PATH")
logFile = os.path.join(logPath, "%s_%s.log" % (module, section))
logCfgPath = cfg.get("GENERAL", "LOG_CONF")
logCfg = ConfigParser.ConfigParser()
logCfg.read(logCfgPath)
Log.Init(Log.CRotatingLog(logFile, logCfg.get("LOG", "MAX_SIZE"), logCfg.get("LOG", "MAX_CNT") ))
fm = QMigration(cfg, section)
fm.run(section)
__LOG__.Trace("end main!")
if __name__ == "__main__":
try:
main()
except:
__LOG__.Exception("Queue Migration main error")
```
#### File: 20191206/bin/RoleManager.py
```python
import os
import sys
import signal
import time
import datetime
import ConfigParser
import json
import logging
import Mobigen.Common.Log as Log; Log.Init()
from apscheduler.schedulers.background import BackgroundScheduler
from RestAPI import RestAPI
import API.M6 as M6
SHUTDOWN = False
ENM_WORK_ROLE = 'Amos_Administrator'
roleManager = None
def handler(signum, frame):
global SHUTDOWN
SHUTDOWN = True
roleManager.shutdown()
__LOG__.Trace('signal: process shutdown')
def getContents(filePath):
if not os.path.exists(filePath) : return '{}'
f = open(filePath, 'r')
contents = f.read()
f.close()
return str(contents).encode('utf-8')
# SIGTERM
signal.signal(signal.SIGTERM, handler)
# SIGINT
signal.signal(signal.SIGINT, handler)
# SIGHUP
signal.signal(signal.SIGHUP, handler)
# SIGPIPE
signal.signal(signal.SIGPIPE, handler)
class RoleManager:
def __init__(self, cfg, section):
h = logging.StreamHandler()
h.setFormatter(logging.Formatter('%(levelname)s:%(name)s:%(message)s'))
log = logging.getLogger('apscheduler.executors.default')
log.setLevel(logging.INFO)
log.addHandler(h)
self.section = section
self.cfg = cfg
self.conn = None
self.cursor = None
self.restAPI = RestAPI()
self.hostList = self.cfg.get('ENM_SERVER_MAPPING', 'ENM_API_SERVER_LIST').split(',')
self.initRepo()
self.scheduler = BackgroundScheduler()
#self.scheduler.add_job(self.initCommandRepo, 'cron', minute='*/{}'.format(INIT_INTERVAL), second='0', id='RoleManager')
self.scheduler.add_job(self.checkJob, 'cron', minute='*', second='0', id='RoleManager')
self.scheduler.add_job(self.checkRoleStatus, 'cron', minute='*/5', second='0', id='RoleCheckManager')
#self.scheduler.add_job(self.checkRoleStatus, 'cron', minute='*', second='0', id='RoleCheckManager')
self.scheduler.start()
__LOG__.Trace('start!')
def initRepo(self):
repoStr = getContents(os.path.join(self.cfg.get(self.section, 'DUMP_PATH'), 'schedule_dump.json'))
self.jobRepo = json.loads(repoStr)
def stdOut(self, msg):
sys.stdout.write(msg+'\n')
sys.stdout.flush()
# print(msg, file=sys.stdout)
__LOG__.Trace('OUT: %s' % msg)
def stdErr(self, msg):
sys.stderr.write('%s\n' % (msg))
sys.stderr.flush()
__LOG__.Trace('ERR: %s' % msg)
def shutdown(self):
try :
df = open(os.path.join(self.cfg.get(self.section, 'DUMP_PATH'), 'schedule_dump.json'), 'w')
df.write(json.dumps(self.jobRepo, encoding='utf-8'))
df.close()
if self.scheduler :
self.scheduler.shutdown()
__LOG__.Trace('scheduler shutdown')
else :
__LOG__.Trace('scheduler is None')
except :
__LOG__.Exception()
def disConnect(self,conn,cursor):
if cursor != None:
try : cursor.close()
except : pass
if conn != None :
try : conn.close()
except : pass
def initConnect(self):
self.conn = M6.Connection(self.IRIS, self.IRIS_ID, self.IRIS_PASS, Database='tacs')
__LOG__.Trace('IRIS Connect!')
try :
self.cursor = self.conn.cursor()
self.cursor.SetFieldSep('|^|')
self.cursor.SetRecordSep('|^-^|')
except :
__LOG__.Exception()
finally :
self.conn.commit()
def run(self, section):
__LOG__.Trace('RoleManager start!!')
#self.initConnect()
while not SHUTDOWN:
try :
strIn = sys.stdin.readline()
strLine = strIn.strip()
if strLine == '' :
self.stdErr(strIn)
else :
if os.path.exists(strLine) :
jsonStr = getContents(strLine)
#jsonStr = getContents('/home/tacs/DATA/WORKINFO/RAN_EMS/O190429000001_192.168.100.55.json')
jsonObj = json.loads(jsonStr)
__LOG__.Trace(jsonObj)
repoKey = jsonObj['workId']
self.jobRepo[repoKey] = jsonObj
except :
__LOG__.Exception()
finally :
self.stdErr(strIn)
#self.disConnect(self.conn, self.cursor)
def checkJob(self):
nDate = datetime.datetime.now()
nStr = nDate.strftime('%Y%m%d%H%M00')
gabStr = (nDate - datetime.timedelta(minutes=1)).strftime('%Y%m%d%H%M00')
__LOG__.Trace('nStr : %s' % nStr)
for key in self.jobRepo.keys() :
oneJob = self.jobRepo[key]
staDate = oneJob['workStaDate']
endDate = oneJob['workEndDate']
__LOG__.Trace('%s : %s ~ %s, ENABLED:%s' % (oneJob['workId'], staDate, endDate, 'ENABLED' in oneJob))
if 'ENABLED' not in oneJob and (staDate <= nStr and gabStr <= staDate) : self.addRole(oneJob)
elif 'ENABLED' in oneJob and endDate <= nStr : self.delRole(oneJob)
else :
if nStr < staDate or nStr < endDate : __LOG__.Trace('keep : %s' % oneJob['workId'])
else :
del self.jobRepo[oneJob['workId']]
__LOG__.Trace('delete: %s' % oneJob)
def addRole(self, jsonObj):
enmApiServer = self.cfg.get('ENM_SERVER_MAPPING', jsonObj['emsIp'])
__LOG__.Trace('addRole : %s, %s, %s' % (enmApiServer, jsonObj['workId'], jsonObj['oprrId']))
self.restAPI.changeUserRole(jsonObj['emsIp'],'ADD',jsonObj['oprrId'])
self.jobRepo[jsonObj['workId']]['ENABLED'] = True
def delRole(self, jsonObj):
enmApiServer = self.cfg.get('ENM_SERVER_MAPPING', jsonObj['emsIp'])
__LOG__.Trace('delRole : %s, %s, %s' % (enmApiServer, jsonObj['workId'], jsonObj['oprrId']))
self.restAPI.changeUserRole(jsonObj['emsIp'],'REMOVE',jsonObj['oprrId'])
del self.jobRepo[jsonObj['workId']]
def checkRoleStatus(self) :
nDate = datetime.datetime.now()
yymmdd = nDate.strftime('%Y%m%d')
hhmm = nDate.strftime('%H%M')
evntDate = nDate.strftime('%Y%m%d%H%M%S')
for oneHost in self.hostList :
result = self.checkAllUserRole(oneHost)
if len(result) > 0 :
f = open('/home/tacs/DATA2/AUDIT_LOG/AUDIT_17/%s_%s_%s.audit' % (yymmdd, hhmm, oneHost), 'a')
for oneInfo in result :
oneInfo['evntDate'] = evntDate
f.write('%s\n' % JSON.dumps(oneInfo, encoding='utf-8'))
f.close()
def getAllUser(self, host) :
uri = '/oss/idm/usermanagement/users'
result = '[]'
try :
code, result = self.restAPI.execute(host, 'GET', uri)
except :
__LOG__.Exception()
userList = json.loads(result)
return userList
def getUserRole(self, host, userId) :
if userId is None or userId == '' : return None
uri = '/oss/idm/usermanagement/users/%s/privileges' % userId
code, result = self.restAPI.execute(host, 'GET', uri)
userInfo = json.loads(result)
return userInfo
def checkAllUserRole(self, host) :
userRoleList = []
userList = self.getAllUser(host)
for oneUser in userList :
if oneUser['username'] == 'SKT_TACS' : continue
userRoleInfo = self.getUserRole(host, oneUser['username'])
for oneRole in userRoleInfo :
if ENM_WORK_ROLE == oneRole['role'] :
__LOG__.Trace('Host: %s ,User : %s, Role : %s' % (host, oneUser['username'], oneRole['role']))
nDate = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
userRoleList.append({'workUserId':oneUser['username'], 'beforePriv':oneRole['role'], 'checkDate':nDate})
time.sleep(1)
currentOprrIdList = []
for key in self.jobRepo.keys() :
oneJob = self.jobRepo[key]
if 'ENABLED' in oneJob :
oprrIdList = oneJob['oprrId'].split(';')
oprrIdList = [x for x in oprrIdList if x]
currentOprrIdList = currentOprrIdList + oprrIdList
result = []
for oneUserInfo in userRoleList :
if not oneUserInfo['workUserId'] in currentOprrIdList :
result.append(oneUserInfo)
return result
def main():
global roleManager
module = os.path.basename(sys.argv[0])
section = sys.argv[1] # ROLE_MANAGER
cfgfile = sys.argv[2] # /home/tacs/TACS-EF/ETL/conf/RoleManager.conf
cfg = ConfigParser.ConfigParser()
cfg.read(cfgfile)
logPath = cfg.get("GENERAL", "LOG_PATH")
logFile = os.path.join(logPath, "%s_%s.log" % (module, section))
logCfgPath = cfg.get("GENERAL", "LOG_CONF")
logCfg = ConfigParser.ConfigParser()
logCfg.read(logCfgPath)
Log.Init(Log.CRotatingLog(logFile, logCfg.get("LOG", "MAX_SIZE"), logCfg.get("LOG", "MAX_CNT") ))
roleManager = RoleManager(cfg, section)
roleManager.run(section)
__LOG__.Trace('end main!')
if __name__ == '__main__':
try:
main()
except:
__LOG__.Exception('main error')
```
#### File: bin/bin/IRISSelect.py
```python
import os
import sys
import json
import Mobigen.Common.Log as Log;
import ConfigParser
import API.M6 as M6
class IRIS_SQL :
def __init__(self, IRIS,IRIS_ID,IRIS_PASS) :
self.IRIS = IRIS
self.IRIS_ID = IRIS_ID
self.IRIS_PASS = <PASSWORD>PASS
def updateIdx( self ) :
conn = None
try :
conn = M6.Connection(self.IRIS, self.IRIS_ID, self.IRIS_PASS, Database ='tacs')
cursor = conn.cursor()
sql = """ UPDATE TACS.TACS_WORK_INFO_IDX
SET IDX = IDX + 1 """
cursor.Execute2(sql)
except :
__LOG__.Exception()
finally :
if conn : conn.close()
def deleteCollectCheck( self, workId, lastChgDate ) :
conn = None
__LOG__.Trace("WORK_ID : %s , LAST_CHG_DATE : %s" % (workId, lastChgDate))
try :
conn = M6.Connection(self.IRIS, self.IRIS_ID, self.IRIS_PASS, Database ='tacs')
cursor = conn.cursor()
sql = """ DELETE
FROM TACS.TACS_WORK_COLLECT_CHECK
WHERE WORK_ID = '%s' AND LAST_CHG_DATE = '%s'
"""%(workId, lastChgDate)
result = cursor.Execute2(sql)
__LOG__.Trace("DELETE RESULT TACS.TACS_WORK_COLLECT_CHECK : %s" % result )
except :
__LOG__.Exception()
finally :
if conn : conn.close()
def insertCollectCheck( self, workId, lastChgDate ) :
conn = None
__LOG__.Trace("WORK_ID : %s , LAST_CHG_DATE : %s" % (workId, lastChgDate))
try :
conn = M6.Connection(self.IRIS, self.IRIS_ID, self.IRIS_PASS, Database ='tacs')
cursor = conn.cursor()
sql = """ INSERT INTO TACS.TACS_WORK_COLLECT_CHECK(WORK_ID, LAST_CHG_DATE)
VALUES('%s', '%s')
""" % (workId, lastChgDate)
result = cursor.Execute2(sql)
__LOG__.Trace("INSERT RESULT TACS.TACS_WORK_COLLECT_CHECK : %s" % result )
except :
__LOG__.Exception()
finally :
if conn : conn.close()
def selectCollectCheck( self, workId, lastChgDate ) :
conn = None
result = ''
__LOG__.Trace("WORK_ID : %s , LAST_CHG_DATE : %s" % (workId, lastChgDate))
try :
conn = M6.Connection(self.IRIS, self.IRIS_ID, self.IRIS_PASS, Database ='tacs')
cursor = conn.cursor()
sql = """ SELECT COUNT(*)
FROM TACS.TACS_WORK_COLLECT_CHECK
WHERE WORK_ID = '%s' AND LAST_CHG_DATE = '%s'
""" % (workId, lastChgDate)
cursor.Execute2(sql)
for oneRaw in cursor :
result = oneRaw[0].encode('utf-8')
except :
__LOG__.Exception()
finally :
if conn : conn.close()
return result
def selectIDX ( self ) :
conn = None
result = ''
try :
conn = M6.Connection(self.IRIS, self.IRIS_ID, self.IRIS_PASS, Database ='tacs')
cursor = conn.cursor()
sql = """ SELECT IDX
FROM TACS.TACS_WORK_INFO_IDX
"""
cursor.Execute2(sql)
for oneRaw in cursor :
result = oneRaw[0]
except :
__LOG__.Exception()
finally :
if conn : conn.close()
return result
def selectWorkInfoCnt (self, workId) :
workId = workId.strip()
__LOG__.Trace('workId = %s' % workId )
conn = None
result = 0
try :
conn = M6.Connection(self.IRIS, self.IRIS_ID, self.IRIS_PASS, Database ='tacs')
cursor = conn.cursor()
sql = """
SELECT COUNT(*)
FROM TACS.TACS_WORK_INFO
WHERE WORK_ID = '%s'
""" % workId
cursor.Execute2(sql)
for oneRaw in cursor :
result = int(oneRaw.encode('utf-8'))
except :
__LOG__.Exception()
finally :
if conn : conn.close()
return result
def selectIdOmpData (self, tacsEqpId) :
tacsEqpId = tacsEqpId.strip()
__LOG__.Trace('emsId = %s' % tacsEqpId)
resultList = list()
conn = None
try :
conn = M6.Connection(self.IRIS, self.IRIS_ID, self.IRIS_PASS, Database = 'tacs' )
cursor = conn.cursor()
sql = """
SELECT
EQP_TYP, EQP_ID
FROM
TACS.TNG_IM_EQP_BAS
WHERE EMS_EQP_ID = '%s'
""" % tacsEqpId
resultMsg = cursor.Execute2(sql)
for oneRaw in cursor :
resultDict = {'tacsOmpEqpTyp' : '', 'tacsOmpEqpId' : ''}
resultDict['tacsOmpEqpTyp'] = oneRaw[0].encode('utf-8')
resultDict['tacsOmpEqpId'] = oneRaw[1].encode('utf-8')
resultList.append(resultDict)
except :
__LOG__.Exception()
finally :
if conn : conn.close()
return resultList
def selectIpEmsData ( self, emsIp ) :
emsIp = emsIp.strip()
__LOG__.Trace( 'emsIp = %s' % emsIp )
resultDict = {'tacsEqpId' : '' ,'expEmsNm' : '' ,'expEmsIp' : '', 'vendor' : '', 'eqpTyp' : ''}
conn = None
try :
conn = M6.Connection(self.IRIS, self.IRIS_ID, self.IRIS_PASS, Database = 'tacs' )
cursor = conn.cursor()
sql = """
SELECT
EQP_NM, REP_IP_ADDR, SPLY_BP_ID, EQP_TYP, EQP_ID
FROM
TACS.TNG_IM_EQP_BAS
WHERE
(REP_IP_ADDR = '%s'
OR REP_IP_ADDR_1 = '%s'
OR REP_IP_ADDR_2 = '%s'
OR REP_IP_ADDR_3 = '%s'
OR REP_IP_ADDR_4 = '%s'
OR REP_IP_ADDR_5 = '%s')
LIMIT 1
""" % (emsIp, emsIp, emsIp, emsIp, emsIp, emsIp)
resultMsg = cursor.Execute2(sql)
for oneRaw in cursor :
resultDict['expEmsNm'] = oneRaw[0].encode('utf-8')
resultDict['expEmsIp'] = oneRaw[1].encode('utf-8')
resultDict['vendor'] = oneRaw[2].encode('utf-8')
resultDict['eqpTyp'] = oneRaw[3].encode('utf-8')
resultDict['tacsEqpId'] = oneRaw[4].encode('utf-8')
except :
__LOG__.Exception()
finally :
if conn : conn.close()
return resultDict['expEmsNm'], resultDict['expEmsIp'], resultDict['vendor'], resultDict['eqpTyp'], resultDict['tacsEqpId']
def selectRelationCheck(self) :
__LOG__.Trace('!! Relation Check !!')
conn = None
result = ''
try :
conn = M6.Connection(self.IRIS, self.IRIS_ID, self.IRIS_PASS, Database = 'tacs' )
cursor = conn.cursor()
sql = """
SELECT
RELATION_CHECK_YN
FROM
TACS.TACS_CORE_EMS_RELATION_CHECK
LIMIT 1
"""
resultMsg = cursor.Execute2(sql)
__LOG__.Trace(sql)
__LOG__.Trace(resultMsg)
if 'OK' in resultMsg :
for oneRaw in cursor :
result = oneRaw[0].encode('utf-8')
else :
__LOG__.Trace('Query Fail!! ')
except :
__LOG__.Exception()
finally :
if conn : conn.close()
return result
def selectIpCommonEqpDataRelationOff ( self, svrIpList ) :
__LOG__.Trace('svrIp = %s | Realtion Off )' % (svrIpList) )
conn = None
result = ''
try :
conn = M6.Connection(self.IRIS, self.IRIS_ID, self.IRIS_PASS, Database = 'tacs' )
cursor = conn.cursor()
ipList = '\'%s\'' % '\',\''.join(svrIpList)
__LOG__.Trace(ipList)
sql = """
SELECT
EQP_EMS_NM
FROM
TACS.TNG_IM_EQP_BAS
WHERE
(
REP_IP_ADDR IN (%s)
OR REP_IP_ADDR_1 IN (%s)
OR REP_IP_ADDR_2 IN (%s)
OR REP_IP_ADDR_3 IN (%s)
OR REP_IP_ADDR_4 IN (%s)
OR REP_IP_ADDR_5 IN (%s)
)
LIMIT 1
""" % (ipList, ipList, ipList, ipList, ipList, ipList)
resultMsg = cursor.Execute2(sql)
__LOG__.Trace(sql)
__LOG__.Trace(resultMsg)
if 'OK' in resultMsg :
for oneRaw in cursor :
result = oneRaw[0].encode('utf-8')
else :
__LOG__.Trace('Query Fail!! ')
except :
__LOG__.Exception()
finally :
if conn : conn.close()
return result
def selectIpCommonEqpData ( self, svrIpList, emsEqpId ) :
__LOG__.Trace('svrIp = %s | parentEqpId : %s ' % (svrIpList, emsEqpId) )
conn = None
result = ''
try :
conn = M6.Connection(self.IRIS, self.IRIS_ID, self.IRIS_PASS, Database = 'tacs' )
cursor = conn.cursor()
ipList = '\'%s\'' % '\',\''.join(svrIpList)
# if type(emsEqpId) is list :
# emsEqpId = '\'%s\'' % '\',\''.join(emsEqpId)
# elif type(emsEqpId) is str :
# emsEqpId = '\'%s\'' % emsEqpId
__LOG__.Trace(ipList)
sql = """
SELECT
EQP_EMS_NM
FROM
TACS.TNG_IM_EQP_BAS
WHERE
(
REP_IP_ADDR IN (%s)
OR REP_IP_ADDR_1 IN (%s)
OR REP_IP_ADDR_2 IN (%s)
OR REP_IP_ADDR_3 IN (%s)
OR REP_IP_ADDR_4 IN (%s)
OR REP_IP_ADDR_5 IN (%s)
) AND
( EMS_EQP_ID = '%s' OR EQP_ID = '%s' )
LIMIT 1
""" % (ipList, ipList, ipList, ipList, ipList, ipList, emsEqpId, emsEqpId)
resultMsg = cursor.Execute2(sql)
__LOG__.Trace(sql)
__LOG__.Trace(resultMsg)
if 'OK' in resultMsg :
for oneRaw in cursor :
result = oneRaw[0].encode('utf-8')
else :
__LOG__.Trace('Query Fail!! ')
except :
__LOG__.Exception()
finally :
if conn : conn.close()
return result
def selectIpOtherEqpData ( self, svrIpList, emsEqpId ) :
__LOG__.Trace('svrIp = %s | parentEqpId : %s ' % (svrIpList, emsEqpId) )
conn = None
result = ''
try :
conn = M6.Connection(self.IRIS, self.IRIS_ID, self.IRIS_PASS, Database = 'tacs' )
cursor = conn.cursor()
ipList = '\'%s\'' % '\',\''.join(svrIpList)
if type(emsEqpId) is list :
emsEqpId = '\'%s\'' % '\',\''.join(emsEqpId)
elif type(emsEqpId) is str :
emsEqpId = '\'%s\'' % emsEqpId
else :
__LOG__.Trace('TACS EQP ID invalid : %s' % emsEqpId)
__LOG__.Trace(ipList)
sql = """
SELECT
EQP_EMS_NM
FROM
TACS.TNG_IM_EQP_BAS
WHERE
(
REP_IP_ADDR IN (%s)
OR REP_IP_ADDR_1 IN (%s)
OR REP_IP_ADDR_2 IN (%s)
OR REP_IP_ADDR_3 IN (%s)
OR REP_IP_ADDR_4 IN (%s)
OR REP_IP_ADDR_5 IN (%s)
) AND
EMS_EQP_ID IN (%s)
LIMIT 1
""" % (ipList, ipList, ipList, ipList, ipList, ipList, emsEqpId )
resultMsg = cursor.Execute2(sql)
__LOG__.Trace(sql)
__LOG__.Trace(resultMsg)
if 'OK' in resultMsg :
for oneRaw in cursor :
result = oneRaw[0].encode('utf-8')
else :
__LOG__.Trace('Query Fail!! ')
except :
__LOG__.Exception()
finally :
if conn : conn.close()
__LOG__.Trace(result)
return result
def selectNmEmsData (self, emsNm) :
emsNm = emsNm.strip()
__LOG__.Trace('emsNm = %s' % emsNm)
resultDict = {'expEmsNm' : '' ,'expEmsIp' : '', 'vendor' : '', 'eqpTyp' : ''}
conn = None
try :
conn = M6.Connection(self.IRIS, self.IRIS_ID, self.IRIS_PASS, Database='tacs')
cursor = conn.cursor()
#ipList = '\'%s\'' % '\',\''.join(svrIpList)
sql = """
SELECT
EQP_NM, REP_IP_ADDR, SPLY_BP_ID, EQP_TYP
FROM
TACS.TNG_IM_EQP_BAS
WHERE
EQP_EMS_NM = '%s' """ % emsNm
resultMsg = cursor.Execute2(sql)
for oneRaw in cursor :
resultDict['expEmsNm'] = oneRaw[0].encode('utf-8')
resultDict['expEmsIp'] = oneRaw[1].encode('utf-8')
resultDict['vendor'] = oneRaw[2].encode('utf-8')
resultDict['eqpTyp'] = oneRaw[3].encode('utf-8')
except :
__LOG__.Exception()
finally :
if conn : conn.close()
return resultDict['expEmsNm'], resultDict['expEmsIp'], resultDict['vendor'], resultDict['eqpTyp']
def deleteWorkEqpInfo ( self, workId, idx, key, partition ) :
__LOG__.Trace('key : %s | partition : %s | workId : %s | idx : %s' % ( key, partition ,workId, idx ) )
conn = None
try :
conn = M6.Connection(self.IRIS, self.IRIS_ID, self.IRIS_PASS,Database='tacs')
cursor = conn.cursor()
workSql = """/*+ LOCATION( key = '%s' AND PARTITION = '%s' ) */
DELETE
FROM TACS.TACS_WORK_INFO
WHERE IDX = '%s' and WORK_ID = '%s'
""" %( key, partition, idx, workId )
resultWorkMsg = cursor.Execute2(workSql)
eqpSql = """/*+ LOCATION( key = '%s' AND PARTITION = '%s' ) */
DELETE
FROM TACS.TACS_WORK_INFO
WHERE IDX = '%s' and WORK_ID = '%s'
""" %( key, partition, idx, workId )
resultEqpMsg = cursor.Execute2(eqpSql)
except :
__LOG__.Exception()
finally :
if conn : conn.close()
def selectNmEqpData (self, eqpNm) :
eqpNm = eqpNm.strip()
__LOG__.Trace('eqpNm = %s' %eqpNm)
conn = None
result = ''
try :
conn = M6.Connection(self.IRIS, self.IRIS_ID, self.IRIS_PASS,Database='tacs')
cursor = conn.cursor()
sql = """SELECT EQP_EMS_NM
FROM TACS.TNG_IM_EQP_BAS
WHERE EQP_NM ='%s'""" % str(eqpNm)
cursor.Execute2(sql)
for oneRaw in cursor :
result = oneRaw[0].encode('utf-8')
except :
__LOG__.Exception()
finally:
if conn : conn.close()
return result
def selectDate (self, workId ,workStaDate) :
workIdKey = workId[-1]
__LOG__.Trace('workIdKey = %s / workId = %s / workStaDate = %s' %( workIdKey, workId, workStaDate ) )
result = ''
conn = None
try :
conn = M6.Connection(self.IRIS, self.IRIS_ID, self.IRIS_PASS, Database='tacs')
cursor = conn.cursor()
sql1 = """/*+ LOCATION( key = '%s' and partition = '%s' ) */
SELECT MAX(IDX+0)
FROM TACS.TACS_WORK_INFO
WHERE WORK_ID = '%s'
""" % ( workIdKey, workStaDate, workId )
cursor.Execute2(sql1)
IDX = ''
for oneRaw in cursor :
IDX = oneRaw[0]
sql2 = """/*+ LOCATION( key = '%s' and partition = '%s' ) */
SELECT LAST_CHG_DATE
FROM TACS.TACS_WORK_INFO
WHERE IDX = '%s' """ % ( workIdKey, workStaDate , IDX )
cursor.Execute2(sql2)
lastChgDate = ''
for oneRaw in cursor :
result = oneRaw[0].encode('utf-8')
__LOG__.Trace(result)
cursor.Close()
conn.close()
except :
__LOG__.Exception()
finally :
if conn : conn.close()
return result
def selectLkngUnit (self, emsIp) :
emsIp = emsIp.strip()
__LOG__.Trace('emsIp : %s' % emsIp)
conn = None
resultDict = {'mqNm' : '', 'unitDistYn' : ''}
try :
conn = M6.Connection(self.IRIS, self.IRIS_ID, self.IRIS_PASS, Database='tacs')
cursor = conn.cursor()
sql = "SELECT WORK_MQ_NM, UNIT_DIST_YN FROM TACS.TACS_TB_LNKG_UNIT WHERE EMS_IP = '%s'" % str(emsIp)
cursor.Execute2(sql)
for oneRaw in cursor :
resultDict['mqNm'] = oneRaw[0].encode('utf-8')
resultDict['unitDistYn'] = oneRaw[1].encode('utf-8')
except :
__LOG__.Exception()
finally:
if conn : conn.close()
return resultDict
def selectWorkInfo(self, hint, workId, workStaDate) :
__LOG__.Trace('hint({}), workId({}), workStaDate({})'.format(hint, workId, workStaDate))
conn = None
resultDict = {}
try :
conn = M6.Connection(self.IRIS, self.IRIS_ID, self.IRIS_PASS, Database='tacs')
cursor = conn.cursor()
sql = '''
{}
SELECT
MAX(IDX + 0)
FROM TACS.TACS_WORK_INFO
WHERE
WORK_ID = \'{}\'
AND
WORK_STA_DATE = \'{}\'
'''.format(hint, workId, workStaDate)
__LOG__.Trace('query: {}'.format(sql))
cursor.Execute2(sql)
idx = None
for oneRow in cursor :
idx = oneRow[0]
if not idx :
raise Exception('Unavailable IDX({})'.format(idx))
sql = '''
{}
SELECT
WORK_ID
, DMN_DIV_CD
, TO_CHAR(WORK_EVNT_DATE, \'YYYYMMDD\')
, WORK_TYP_CD
, WORK_NM
, TO_CHAR(WORK_STA_DATE, \'YYYY-MM-DD HH24:MI\')
, TO_CHAR(WORK_END_DATE, \'YYYY-MM-DD HH24:MI\')
, CMD_TYP_CD
, CMD_WORK_TYP_CD
, RSK_CMD_INCL_YN
, API_CALN_SVR_DIV_CD
, CMD_CTRL_TYP_CD
, WORK_PROG_STAT_CD
, TO_CHAR(LAST_CHG_DATE, \'YYYY-MM-DD HH24:MI\')
, VENDOR
FROM
TACS_WORK_INFO
WHERE
IDX = \'{}\'
AND
WORK_ID = \'{}\'
AND
WORK_STA_DATE = \'{}\'
LIMIT 1
'''.format(hint, idx, workId, workStaDate)
__LOG__.Trace('query: {}'.format(sql))
cursor.Execute2(sql)
for oneRow in cursor :
resultDict['workId'] = oneRow[0]
resultDict['dmnDivCd'] = oneRow[1]
resultDict['workEvntDate'] = oneRow[2]
resultDict['workTypCd'] = oneRow[3]
resultDict['workNm'] = oneRow[4]
resultDict['workStaDate'] = oneRow[5]
resultDict['workEndDate'] = oneRow[6]
#resultDict['cmdTypCd'] = oneRow[7]
resultDict['cmdWorkTypCd'] = oneRow[8]
resultDict['rskCmdInclYn'] = oneRow[9]
resultDict['apiCalnSvrDivCd'] = oneRow[10]
resultDict['cmdCtrlTypCd'] = oneRow[11]
resultDict['workProgStatCd'] = oneRow[12]
resultDict['lastChgDate'] = oneRow[13]
#resultDict['vendor'] = oneRow[14]
except :
__LOG__.Exception()
finally :
if conn : conn.close()
return resultDict
def selectEqpInfo(self, hint, workId, workStaDate, emsIp) :
__LOG__.Trace('hint({}), workId({}), workStaDate({}), emsIp({})'.format(hint, workId, workStaDate, emsIp))
conn = None
resultList = []
try :
conn = M6.Connection(self.IRIS, self.IRIS_ID, self.IRIS_PASS, Database='tacs')
cursor = conn.cursor()
sql = '''
{}
SELECT
MAX(IDX + 0)
FROM TACS_EQP_INFO
WHERE
WORK_ID = \'{}\'
AND
WORK_STA_DATE = \'{}\'
'''.format(hint, workId, workStaDate)
__LOG__.Trace('query: {}'.format(sql))
cursor.Execute2(sql)
idx = None
for oneRow in cursor :
idx = oneRow[0]
if not idx :
raise Exception('Unavailable IDX({})'.format(idx))
sql = '''
{}
SELECT
UNQ_IDNT_NO
, CMD_WORK_TYP_CD
, TANGO_EQP_ID
, ENB_ID
, EMS_NM
, EMS_IP
, EQP_ID
, EQP_NM
, SVR_IP
, SVR_CNNT_ACNTG_ID
, ROOT_ACNTG_USE_YN
, APRVR_ID
, WORK_REGRT_ID
, OPRR_ID
, SECURE_GW_OPRR_ID
, ADD_USER_ACNTG_ID
, CMD_TYP_CD
, WORK_FILD_CD
, CMD_INFO
, SCRIPT_INFO
, VENDOR
FROM
TACS_EQP_INFO
WHERE
IDX = \'{}\'
AND
WORK_ID = \'{}\'
AND
WORK_STA_DATE = \'{}\'
AND
EMS_IP = \'{}\'
'''.format(hint, idx, workId, workStaDate, emsIp)
__LOG__.Trace('query: {}'.format(sql))
cursor.Execute2(sql)
for oneRow in cursor :
resultDict = {}
resultDict['unqIdntNo'] = oneRow[0]
resultDict['cmdWorkTypCd'] = oneRow[1]
resultDict['tangoEqpId'] = oneRow[2]
resultDict['enbId'] = oneRow[3]
resultDict['emsNm'] = oneRow[4]
resultDict['emsIp'] = oneRow[5]
resultDict['eqpId'] = oneRow[6]
resultDict['eqpNm'] = oneRow[7]
resultDict['svrIp'] = oneRow[8]
resultDict['svrCnntAcntgId'] = oneRow[9]
resultDict['rootAcntgUseYn'] = oneRow[10]
resultDict['aprvrId'] = oneRow[11]
resultDict['workRegrtId'] = oneRow[12]
resultDict['oprrId'] = oneRow[13]
resultDict['secureGwOprrId'] = oneRow[14]
resultDict['addUserAcntgId'] = oneRow[15]
resultDict['cmdTypCd'] = oneRow[16]
resultDict['workFildCd'] = oneRow[17]
if oneRow[18] :
resultDict['cmdInfo'] = json.loads(oneRow[18])
if oneRow[19] :
resultDict['scriptInfo'] = json.loads(oneRow[19])
#resultDict['vendor'] = oneRow[20]
resultList.append(resultDict)
except :
__LOG__.Exception()
finally :
if conn : conn.close()
return resultList
```
#### File: 20201016/bin/WorkInfoDistributer.py
```python
import os
import sys
import signal
import ConfigParser
import json
import shutil
import hashlib
import uuid
from datetime import datetime
import IRISSelect
import IRISSelectRange
import SftpClient as SFTPClient
import Mobigen.Common.Log as Log; Log.Init()
import AESDecode
SHUTDOWN = False
def handler(signum, frame):
global SHUTDOWN
SHUTDOWN = True
__LOG__.Trace('signal : process shutdown')
# SIGTERM
signal.signal(signal.SIGTERM, handler)
# SIGINT
signal.signal(signal.SIGINT, handler)
# SIGHUP
signal.signal(signal.SIGHUP, handler)
# SIGPIPE
signal.signal(signal.SIGPIPE, handler)
class WorkInfoDistributer :
def __init__(self, cfg) :
self.cfg = cfg
self._initConfig()
def _initConfig(self) :
irisCfgPath = self.cfg.get('GENERAL', 'IRIS_CONF')
irisCfg = ConfigParser.ConfigParser()
irisCfg.read(irisCfgPath)
AES = AESDecode.AESDecode()
dbUrl = irisCfg.get("IRISDB","IRIS")
dbUser = irisCfg.get("IRISDB","IRIS_ID")
dbPasswd = AES.decodeAES(irisCfg.get("IRISDB","IRIS_PASS"))
self.irisObj = IRISSelect.IRIS_SQL(dbUrl, dbUser, dbPasswd)
self.rawWorkInfoBaseDir = self.cfg.get('MODULE_CONF', 'TACS_WORKINFO_RAW')
self.emsWorkInfoBaseDir = self.cfg.get('MODULE_CONF', 'TACS_WORKINFO_EMS')
self.enmWorkInfoBaseDir = self.cfg.get('MODULE_CONF', 'ENM_WORKINFO_BASE')
self.port = int(self.cfg.get('MODULE_CONF', 'ENM_SFTP_PORT'))
self.user = self.cfg.get('MODULE_CONF', 'ENM_SFTP_USER')
self.passwd = self.cfg.get('MODULE_CONF', 'ENM_SFTP_PASSWD')
self.auditLogTempDir = self.cfg.get('MODULE_CONF', 'TACS_AUDITLOG_TEMP')
self.auditLogBaseDir = self.cfg.get('MODULE_CONF', 'TACS_AUDITLOG_PATH')
self.exportWorkCode = self.cfg.get('MODULE_CONF', 'EXPORT_WORK_CODE')
self.roleFilePath = self.cfg.get('MODULE_CONF', 'ROLE_FILE_PATH')
self.JSON_POSTFIX = '_META.json'
self.SITEFILE_POSTFIX = '.txt'
self.SHA_POSTFIX = '.sha'
def _stdOut(self, msg):
sys.stdout.write(msg+'\n')
sys.stdout.flush()
__LOG__.Trace("STD OUT: %s" % msg)
def _stderr(self, value) :
sys.stderr.write('stderr: {}{}'.format(value, '\n'))
sys.stderr.flush()
def _makeWorkFiles(self, paramDict) :
__LOG__.Trace('paramDict: {}'.format(paramDict))
logDict = {}
try :
workId = paramDict['workId']
workStaDate = paramDict['workStaDate']
emsIp = paramDict['emsIp']
workInfoDict, eqpInfoList = self._selectWorkInfo(workId, workStaDate, emsIp)
metaDict = {}
eventDate = workInfoDict['workEvntDate']
del workInfoDict['workEvntDate']
metaDict['workInfo'] = workInfoDict
metaDict['eqpInfo'] = []
enbIpList = []
scriptInfoList = []
for oneEqpInfo in eqpInfoList :
if 'svrIp' in oneEqpInfo.keys() :
# if oneEqpInfo['svrIp'] :
if not (oneEqpInfo['svrIp'] in enbIpList) :
enbIpList.append(oneEqpInfo['svrIp'])
if 'scriptInfo' in oneEqpInfo.keys() :
# if oneEqpInfo['scriptInfo'] :
scriptInfoList.extend(oneEqpInfo['scriptInfo'])
oprrIds = oneEqpInfo['oprrId'].split(';')
__LOG__.Trace('oprrIds: {}'.format(oprrIds))
if '' in oprrIds :
oprrIds.remove('')
idx = 0
for oprrId in oprrIds :
eqpInfoByOprrId = {}
if not oprrId :
continue
if len(oprrIds) > 1 :
idx += 1
eqpInfoByOprrId['unqIdntNo'] = '{}-{}'.format(oneEqpInfo['unqIdntNo'], idx)
else :
eqpInfoByOprrId['unqIdntNo'] = oneEqpInfo['unqIdntNo']
eqpInfoByOprrId['cmdWorkTypCd'] = oneEqpInfo['cmdWorkTypCd']
eqpInfoByOprrId['tangoEqpId'] = oneEqpInfo['tangoEqpId']
eqpInfoByOprrId['enbId'] = oneEqpInfo['enbId']
eqpInfoByOprrId['emsNm'] = oneEqpInfo['emsNm']
eqpInfoByOprrId['emsIp'] = oneEqpInfo['emsIp']
eqpInfoByOprrId['eqpId'] = oneEqpInfo['eqpId']
eqpInfoByOprrId['eqpNm'] = oneEqpInfo['eqpNm']
eqpInfoByOprrId['svrIp'] = oneEqpInfo['svrIp']
eqpInfoByOprrId['svrCnntAcntgId'] = oneEqpInfo['svrCnntAcntgId']
eqpInfoByOprrId['rootAcntgUseYn'] = oneEqpInfo['rootAcntgUseYn']
eqpInfoByOprrId['aprvrId'] = oneEqpInfo['aprvrId']
eqpInfoByOprrId['workRegrtId'] = oneEqpInfo['workRegrtId']
eqpInfoByOprrId['oprrId'] = oprrId
eqpInfoByOprrId['secureGwOprrId'] = oneEqpInfo['secureGwOprrId']
eqpInfoByOprrId['addUserAcntgId'] = oneEqpInfo['addUserAcntgId']
eqpInfoByOprrId['cmdTypCd'] = oneEqpInfo['cmdTypCd']
eqpInfoByOprrId['workFildCd'] = oneEqpInfo['workFildCd']
if 'cmdInfo' in oneEqpInfo :
eqpInfoByOprrId['cmdInfo'] = oneEqpInfo['cmdInfo']
if 'scriptInfo' in oneEqpInfo :
eqpInfoByOprrId['scriptInfo'] = oneEqpInfo['scriptInfo']
metaDict['eqpInfo'].append(eqpInfoByOprrId)
metaJson = json.dumps(metaDict, ensure_ascii=False)
__LOG__.Trace('metaJson: {}'.format(metaJson))
emsWorkInfoPath = os.path.join(self.emsWorkInfoBaseDir, emsIp, workId)
self._mkdirs(emsWorkInfoPath)
self._createFile(os.path.join(emsWorkInfoPath, '{}{}'.format(workId, self.JSON_POSTFIX)), metaJson)
__LOG__.Trace('enbIpList: {}'.format(enbIpList))
self._createSitefile(os.path.join(emsWorkInfoPath, '{}_sitefiles{}'.format(workId, self.SITEFILE_POSTFIX)), enbIpList)
__LOG__.Trace('scriptInfoList: {}'.format(scriptInfoList))
self._copyFile(eventDate, emsIp, workId, scriptInfoList)
self._makeSHA256Files(emsIp, workId)
self._uploadWorkFiles(emsIp, workId)
## 김준우 2020-09-16 추가 ##
self._updateDistibuteYn(workId, emsIp, workStaDate)
############################
logDict['tacsLnkgRst'] = 'OK'
stdOutDict = dict()
stdOutDict['idx'] = paramDict['idx']
stdOutDict['workId'] = paramDict['workId']
stdOutDict['workStaDate'] = paramDict['workStaDate']
stdOutDict['workEndDate'] = paramDict['workEndDate']
stdOutDict['workProgStatCd'] = paramDict['workProgStatCd']
stdOutDict['emsIp'] = paramDict['emsIp']
stdOutDict['emsNm'] = paramDict['emsNm']
stdOutDict['oprrId'] = paramDict['oprrId']
self._mkdirs(self.roleFilePath)
roleFileName = '%s.json' % ('_'.join([stdOutDict['workId'], stdOutDict['emsIp']]))
with open(os.path.join(self.roleFilePath, roleFileName), 'w') as f :
f.write(json.dumps(stdOutDict))
self._stdOut(os.path.join(self.roleFilePath, roleFileName))
except Exception as ex :
__LOG__.Trace('{} makeWorkFiles process failed. {}'.format(paramDict, ex))
logDict['tacsLnkgRst'] = 'FAIL'
logDict['tacsLnkgRsn'] = ex.args
raise ex
finally :
currentDateObj = datetime.now()
yyyyMMdd = currentDateObj.strftime('%Y%m%d')
currentDate = currentDateObj.strftime('%Y%m%d%H%M%S')
logDict['evntTypCd'] = self.exportWorkCode
logDict['evntDate'] = currentDate
logDict['workId'] = workId
logDict['lnkgEqpIp'] = emsIp
self._writeTacsHistoryFile(yyyyMMdd, currentDate, logDict)
#################### 김준우 09-16 추가 #####################################
def _updateDistibuteYn(self, workId, emsIp, workStaDate) :
__LOG__.Trace('{} EMS distribute Update'.format(emsIp))
try :
key = workId[-1]
partition = IRISSelectRange.IRISSelectRange().dailyRange(workStaDate)
hint = '''
/*+ LOCATION(KEY = {} AND PARTITION = {}) */
'''.format(key, partition)
self.irisObj.updateDistributeYn(hint, workId, emsIp, workStaDate)
except Exception as ex :
__LOG__.Trace('distribute YN Update failed [idx : {}, workId : {}, emsIp : {}'.format(idx, workId, emsIp))
raise ex
############################################################################
def _selectWorkInfo(self, workId, workStaDate, emsIp) :
try :
key = workId[-1]
partition = IRISSelectRange.IRISSelectRange().dailyRange(workStaDate)
hint = '''
/*+ LOCATION(KEY = {} AND PARTITION = {}) */
'''.format(key, partition)
workInfoDict = self.irisObj.selectWorkInfo(hint, workId, workStaDate)
if not workInfoDict :
raise Exception('No such workId({}), workStaDate({}), workInfo'.format(workId, workStaDate))
__LOG__.Trace('workInfoDict: {}'.format(workInfoDict))
eqpInfoList = self.irisObj.selectEqpInfo(hint, workId, workStaDate, emsIp)
if not eqpInfoList :
raise Exception('No such workId({}), workStaDate({}), emsIp({}), eqpInfo'.format(workId, workStaDate, emsIp))
__LOG__.Trace('eqpInfoList: {}'.format(eqpInfoList))
return workInfoDict, eqpInfoList
except Exception as ex :
__LOG__.Trace('selectWorkInfo process failed. {}'.format(ex))
raise ex
def _mkdirs(self, directory) :
isExists = os.path.exists(directory)
__LOG__.Trace('{} isExists: {}'.format(directory, isExists))
if not isExists :
__LOG__.Trace('create directories {}'.format(directory))
os.makedirs(directory)
def _createFile(self, filePath, contents) :
f = None
try :
f = open(filePath, 'w')
f.write(contents)
__LOG__.Trace('{} file is created.'.format(filePath))
except Exception as ex :
__LOG__.Trace('{} to file process failed. {}'.format(contents, ex))
raise ex
finally :
if f : f.close()
def _createSitefile(self, filePath, enbIpList) :
f = None
try :
f = open(filePath, 'w')
length = len(enbIpList)
for idx, oneEnbIp in enumerate(enbIpList) :
if idx == (length - 1) :
f.write(oneEnbIp)
else :
f.write('{}{}'.format(oneEnbIp, '\n'))
__LOG__.Trace('{} file is created.'.format(filePath))
except Exception as ex :
__LOG__.Trace('{} to sitefile process failed. {}'.format(enbIpList, ex))
raise ex
finally :
if f : f.close()
def _readFile(self, filePath) :
f = None
try :
f = open(filePath, 'r')
contents = f.read()
return contents
except Exception as ex :
__LOG__.Trace('{} readFile process failed. {}'.format(filePath, ex))
raise ex
finally :
if f : f.close()
def _copyFile(self, eventDate, emsIp, workId, scriptInfoList) :
__LOG__.Trace('eventDate({}), emsIp({}), workId({}), scriptInfoList: {}'.format(eventDate, emsIp, workId, scriptInfoList))
try :
scptNmList = []
for oneScriptInfo in scriptInfoList :
atchdPathFileNm = oneScriptInfo['atchdPathFileNm'] if oneScriptInfo['atchdPathFileNm'] else None
if not atchdPathFileNm :
continue
tangoScptNm = os.path.basename(atchdPathFileNm)
scptNm = oneScriptInfo['scptNm'] if oneScriptInfo['scptNm'] else None
scptNmDict = {}
scptNmDict[tangoScptNm] = scptNm if scptNm else tangoScptNm
scptNmList.append(scptNmDict)
rawWorkInfoPath = os.path.join(self.rawWorkInfoBaseDir, eventDate, workId)
#rawWorkInfoPath = os.path.join('/home/tacs/DATA/WORKINFO/M_COMP', eventDate, workId)
copyFilesDict = {}
for oneFile in os.listdir(rawWorkInfoPath) :
if oneFile.endswith(self.JSON_POSTFIX) or not os.path.isfile(os.path.join(rawWorkInfoPath, oneFile)) :
continue
for oneScptNmDict in scptNmList :
if oneFile in oneScptNmDict :
copyFilesDict[oneFile] = oneScptNmDict[oneFile]
break
__LOG__.Trace('copyFilesDict: {}'.format(copyFilesDict))
emsWorkInfoPath = os.path.join(self.emsWorkInfoBaseDir, emsIp, workId)
for k, v in copyFilesDict.items() :
if not v :
v = k
srcPath = os.path.join(rawWorkInfoPath, k)
desPath = os.path.join(emsWorkInfoPath, v)
shutil.copy2(srcPath, desPath)
__LOG__.Trace('copyFiles {} -> {}, succeed'.format(srcPath, desPath))
except Exception as ex :
__LOG__.Trace('{} copyFiles process failed. {}'.format(scriptInfoList, ex))
raise ex
def _makeSHA256Files(self, emsIp, workId) :
__LOG__.Trace('emsIp({}), workId({})'.format(emsIp, workId))
try :
emsWorkInfoPath = os.path.join(self.emsWorkInfoBaseDir, emsIp, workId)
for oneFile in os.listdir(emsWorkInfoPath) :
if oneFile.endswith(self.SHA_POSTFIX) or oneFile.startswith('.') or not os.path.isfile(os.path.join(emsWorkInfoPath, oneFile)) :
continue
contents = self._readFile(os.path.join(emsWorkInfoPath, oneFile))
__LOG__.Trace('contents: {}'.format(contents))
if not contents :
continue
hexdigest = hashlib.sha256(contents.encode()).hexdigest()
__LOG__.Trace('hexdigest: {}'.format(hexdigest))
self._createFile(os.path.join(emsWorkInfoPath, '{}{}'.format(oneFile, self.SHA_POSTFIX)), hexdigest)
except Exception as ex :
__LOG__.Trace('makeSHA256Files process failed. {}'.format(ex))
raise ex
def _uploadWorkFiles(self, emsIp, workId) :
__LOG__.Trace('emsIp({}), workId({})'.format(emsIp, workId))
sftpClient = None
try :
#########################################
#### sftpClient = SFTPClient.SftpClient(emsIp, self.port, self.user, self.passwd)
sftpClient = SFTPClient.SftpClient(emsIp, self.port, 'root','!hello.root0')
##########################################
enmWorkInfoPath = os.path.join(self.enmWorkInfoBaseDir, workId)
sftpClient.mkdirs(enmWorkInfoPath)
emsWorkInfoPath = os.path.join(self.emsWorkInfoBaseDir, emsIp, workId)
for oneFile in os.listdir(emsWorkInfoPath) :
if oneFile.startswith('.') or not os.path.isfile(os.path.join(emsWorkInfoPath, oneFile)) :
continue
sftpClient.upload(os.path.join(emsWorkInfoPath, oneFile), os.path.join(enmWorkInfoPath, oneFile))
except Exception as ex :
__LOG__.Trace('uploadWorkFiles process failed. {}'.format(ex))
raise ex
finally :
if sftpClient : sftpClient.close()
def _writeTacsHistoryFile(self, yyyyMMdd, eventDate, logDict) :
if logDict :
__LOG__.Trace('received workInfo history: {}'.format(logDict))
try :
tacsHistoryTempPath = os.path.join(self.auditLogTempDir, 'AUDIT_{}'.format(self.exportWorkCode))
self._mkdirs(tacsHistoryTempPath)
contents = json.dumps(logDict, ensure_ascii=False)
__LOG__.Trace('contents: {}'.format(contents))
tacsHistoryFilename = self._getTacsHistoryFilename(yyyyMMdd, eventDate)
__LOG__.Trace('tacsHistoryFilename: {}'.format(tacsHistoryFilename))
self._createFile(os.path.join(tacsHistoryTempPath, tacsHistoryFilename), contents)
tacsHistoryPath = os.path.join(self.auditLogBaseDir, 'AUDIT_{}'.format(self.exportWorkCode))
self._mkdirs(tacsHistoryPath)
shutil.move(os.path.join(tacsHistoryTempPath, tacsHistoryFilename), os.path.join(tacsHistoryPath, tacsHistoryFilename))
__LOG__.Trace('tacsHistory file move from {} -> to {} succeed.'.format(os.path.join(tacsHistoryTempPath, tacsHistoryFilename), os.path.join(tacsHistoryPath, tacsHistoryFilename)))
except Exception as ex :
__LOG__.Trace('tacsHistory {} load process failed. {}'.format(logDict, ex))
else :
__LOG__.Trace('received workInfo history({}) is invalid.'.format(logDict))
def _getTacsHistoryFilename(self, yyyyMMdd, eventDate) :
HHmm = datetime.strptime(eventDate, '%Y%m%d%H%M%S').strftime('%H%M')
tacsHistoryFilename = '{}_{}_{}.audit'.format(yyyyMMdd, HHmm, uuid.uuid4())
return tacsHistoryFilename
def run(self) :
while not SHUTDOWN :
try :
strIn = sys.stdin.readline()
stdLine = strIn.strip()
if stdLine :
if not '://' in stdLine :
if stdLine.strip() :
stdLine = stdLine.replace('\'', '"')
paramDict = json.loads(stdLine)
if not ('queueMsg' in paramDict) :
self._makeWorkFiles(paramDict)
self._stderr(strIn)
else :
self._stderr(strIn)
else :
self._stderr(strIn)
else :
self._stderr(strIn)
except :
__LOG__.Exception()
self._stderr(strIn)
__LOG__.Trace('run is terminated')
def main() :
reload(sys)
sys.setdefaultencoding('UTF-8')
module = os.path.basename(sys.argv[0])
section = sys.argv[1]
cfgfile = sys.argv[2]
cfg = ConfigParser.ConfigParser()
cfg.read(cfgfile)
logPath = cfg.get("GENERAL", "LOG_PATH")
logFile = os.path.join(logPath, "%s_%s.log" % (module, section))
logCfgPath = cfg.get("GENERAL", "LOG_CONF")
logCfg = ConfigParser.ConfigParser()
logCfg.read(logCfgPath)
Log.Init(Log.CRotatingLog(logFile, logCfg.get("LOG", "MAX_SIZE"), logCfg.get("LOG", "MAX_CNT") ))
workInfoDistributer = WorkInfoDistributer(cfg)
workInfoDistributer.run()
__LOG__.Trace('main is terminated.')
if __name__ == '__main__' :
try :
main()
except :
__LOG__.Exception()
```
#### File: ETL/bin/SftpClient_BACK.py
```python
import os
import paramiko
import Mobigen.Common.Log as Log; Log.Init()
class SftpClient :
def __init__(self, host, port, user, passwd) :
self.host = host
self.port = port
self.user = user
self.passwd = <PASSWORD>
self._connect()
def _connect(self) :
try :
sftpHosts = self.host.split(';')
__LOG__.Trace('SFTP host: {}'.format(sftpHosts))
for oneHost in sftpHosts :
try :
self.transport = paramiko.Transport((oneHost, int(self.port)))
self.transport.connect(username = self.user, password = <PASSWORD>)
__LOG__.Trace('SFTP Connected HOST({})'.format(oneHost))
except Exception as ex :
__LOG__.Trace('SFTP Connection faild. HOST({})'.format(oneHost))
self.sftp = paramiko.SFTPClient.from_transport(self.transport)
except :
__LOG__.Trace('SFTP Connection error. HOST({})/PORT({})'.format(self.host, self.port))
raise
def download(self, remoteFilepath, remoteFilename, localFilepath) :
try :
self.sftp.get(os.path.join(remoteFilepath, remoteFilename), os.path.join(localFilepath, remoteFilename))
except :
__LOG__.Trace('SFTP file download failed.')
raise
def upload(self, sourceFilepath, destinationFilepath) :
try :
self.sftp.put(sourceFilepath, destinationFilepath)
except :
__LOG__.Trace('SFTP file upload failed.')
raise
def close(self) :
try :
if self.sftp :
self.sftp.close()
__LOG__.Trace('sftp closed')
if self.transport :
self.transport.close()
__LOG__.Trace('transport closed')
except :
__LOG__.Trace('SFTP Connection close failed.')
raise
```
#### File: ETL/bin/WorkInfoCollector_MODULE.py
```python
import os
import sys
reload(sys)
sys.setdefaultencoding('UTF-8')
import signal
import time
from datetime import datetime
from datetime import timedelta
import ConfigParser
import glob
import json
import uuid
import shutil
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from apscheduler.schedulers.blocking import BlockingScheduler
import SftpClient as SFTPClient
import Mobigen.Common.Log as Log; Log.Init()
import subprocess
workInfoCollector = None
def handler(signum, frame):
__LOG__.Trace('signal : process shutdown')
try :
if workInfoCollector :
workInfoCollector.shutdown()
except :
__LOG__.Exception()
# SIGTERM
signal.signal(signal.SIGTERM, handler)
# SIGINT
signal.signal(signal.SIGINT, handler)
# SIGHUP
signal.signal(signal.SIGHUP, handler)
# SIGPIPE
signal.signal(signal.SIGPIPE, handler)
class WorkInfoCollector :
def __init__(self, cfg) :
self.cfg = cfg
self.WORKINFO_REPO = {}
self._initConfig()
def _initConfig(self) :
self.systemName = self.cfg.get('MODULE_CONF', 'TACS_SYSTEM_NAME')
self.workInfoBaseDir = self.cfg.get('MODULE_CONF', 'TACS_WORKINFO_RAW')
self.auditLogTempDir = self.cfg.get('MODULE_CONF', 'TACS_AUDITLOG_TEMP')
self.auditLogBaseDir = self.cfg.get('MODULE_CONF', 'TACS_AUDITLOG_PATH')
self.receivedWorkCode = self.cfg.get('MODULE_CONF', 'RECEIVED_WORK_CODE')
self.tangoWmWorkInfoUrl = self.cfg.get('MODULE_CONF', 'TANGO_WM_WORKINFO_URL')
self.tangoWmEqpInfoUrl = self.cfg.get('MODULE_CONF', 'TANGO_WM_EQPINFO_URL')
self.xAuthToken = self.cfg.get('MODULE_CONF', 'TANGO_WM_X_AUTH_TOKEN')
self.host = self.cfg.get('MODULE_CONF', 'TANGO_WM_SFTP_HOST')
self.port = int(self.cfg.get('MODULE_CONF', 'TANGO_WM_SFTP_PORT'))
self.user = self.cfg.get('MODULE_CONF', 'TANGO_WM_SFTP_USER')
self.passwd = self.cfg.get('MODULE_CONF', 'TANGO_WM_SFTP_PASSWD')
self.scheduleInterval = self.cfg.get('MODULE_CONF', 'SCHEDULE_INTERVAL_MIN')
self.stdoutSleepTime = int(self.cfg.get('MODULE_CONF', 'STDOUT_SLEEP_TIME'))
self.headers = {'x-auth-token' : self.xAuthToken, 'Content-Type' : 'application/json; charset=utf-8'}
self.migration = False
def _executeMigration(self, searchStartDate, searchEndDate) :
__LOG__.Trace('migration process start. searchStartDate({}), searchEndDate({})'.format(searchStartDate, searchEndDate))
try :
searchStartDateObj = datetime.strptime(searchStartDate, '%Y%m%d%H%M%S')
searchEndDateObj = datetime.strptime(searchEndDate, '%Y%m%d%H%M%S')
if searchStartDateObj > searchEndDateObj :
__LOG__.Trace('searchStartDate({}) bigger than searchEndDate({})'.format(searchStartDate, searchEndDate))
print '[ERROR] searchStartDate({}) bigger than searchEndDate({})'.format(searchStartDate, searchEndDate)
else :
# request workInfo
workIdList = self._lookupWorkInfo(searchStartDate, searchEndDate, True)
# request eqpInfo by workId
self._lookupEqpInfo(workIdList)
except Exception as ex :
__LOG__.Trace('workInfo migration failed. {}'.format(ex))
def _executeScheduler(self) :
try :
__LOG__.Trace('scheduler process start')
# request workInfo
workIdList = self._lookupWorkInfo()
# request eqpInfo by workId
self._lookupEqpInfo(workIdList)
except :
__LOG__.Exception()
def _stdout(self, msg) :
sys.stdout.write('stdout' + msg + '\n')
sys.stdout.flush()
__LOG__.Trace('stdout: %s' % msg)
def _lookupWorkInfo(self, fromDate = None, toDate = None, migration = False) :
searchStartDate = fromDate
searchEndDate = toDate
if not migration :
searchEndDateObj = datetime.now()
#searchStartDateObj = datetime(searchEndDateObj.year, searchEndDateObj.month, searchEndDateObj.day, searchEndDateObj.hour, (searchEndDateObj.minute - int(self.scheduleInterval)))
searchStartDateObj = searchEndDateObj - timedelta(minutes=1)
searchStartDate = searchStartDateObj.strftime('%Y%m%d%H%M')
searchEndDate = searchEndDateObj.strftime('%Y%m%d%H%M')
__LOG__.Trace('lookup workInfo from({}) ~ to({})'.format(searchStartDate, searchEndDate))
url = self.tangoWmWorkInfoUrl.format(self.systemName, searchStartDate, searchEndDate)
__LOG__.Trace('request workInfo url: {}'.format(url))
rawDict = self._requestGet(url)
return self._loadWorkInfo(rawDict)
def _lookupEqpInfo(self, workIdList) :
if not workIdList :
__LOG__.Trace('workIdList is empty')
else :
logDictList = list()
yyyyMMdd = None
eventDate = None
for oneWorkId in workIdList :
url = self.tangoWmEqpInfoUrl.format(self.systemName, oneWorkId)
__LOG__.Trace('request eqpInfo url: {}'.format(url))
rawDict = self._requestGet(url)
logDict, yyyyMMdd, eventDate = self._loadEqpInfo(oneWorkId, rawDict, logDictList)
logDictList.append(logDict)
self._writeTacsHistoryFile(yyyyMMdd, eventDate, logDictList)
def _requestGet(self, url, verify = False) :
rawDict = None
response = requests.get(url = url, headers = self.headers, verify = verify)
if response.status_code == 200 :
#jsonText = response.text.decode('string_escape')
#__LOG__.Trace('raw response.text: {}'.format(jsonText))
#__LOG__.Trace('replace response.text: {}'.format(jsonText.replace('\\\\\\"', '\\\"')))
#__LOG__.Trace('replace response.text: {}'.format(jsonText))
#tmpDict = json.loads(response.text)
#__LOG__.Trace('tmpDict: {}'.format(tmpDict))
#__LOG__.Trace('tmpDict.dumps: {}'.format(json.dumps(tmpDict, ensure_ascii=False)))
rawDict = response.json()
#rawDict = json.loads(jsonText)
else :
__LOG__.Trace('!!! Exception !!! requestGet failed. statusCode: {}'.format(response.status_code))
pass
return rawDict
def _loadWorkInfo(self, rawDict) :
if rawDict :
__LOG__.Trace('workInfo rawData: {}'.format(rawDict))
workIdList = []
if type(rawDict['workInfo']) is list :
for oneWorkInfo in rawDict['workInfo'] :
workId = oneWorkInfo['workId']
__LOG__.Trace('workId: {}'.format(workId))
if workId is None or not workId :
__LOG__.Trace('invalid workId({})'.format(workId))
continue
workIdList.append(workId)
wrapper = {}
wrapper['workInfo'] = oneWorkInfo
workEvntDate = datetime.now().strftime('%Y%m%d%H%M%S')
wrapper['workInfo']['workEvntDate'] = workEvntDate
self.WORKINFO_REPO[workId] = wrapper
__LOG__.Trace('WORKINFO_REPO: {}'.format(self.WORKINFO_REPO))
else :
__LOG__.Trace('Unsupported type: {}'.format(type(rawDict['workInfo'])))
pass
return workIdList
else :
__LOG__.Trace('workInfo rawData is None')
return None
def _loadEqpInfo(self, oneWorkId, rawDict, logDictList) :
logDict = dict()
yyyyMMdd = None
eventDate = None
if rawDict :
__LOG__.Trace('eqpInfo rawData: {}'.format(rawDict))
if 'eqpInfo' in rawDict and type(rawDict['eqpInfo']) is list :
scriptFileList = []
wrapper = self.WORKINFO_REPO[oneWorkId]
if wrapper :
wrapper['eqpInfo'] = rawDict['eqpInfo']
for oneEqpInfoDict in rawDict['eqpInfo'] :
if 'scriptInfo' in oneEqpInfoDict :
scriptInfoList = oneEqpInfoDict['scriptInfo']
if scriptInfoList :
for oneScriptInfoDict in scriptInfoList :
filePathname = oneScriptInfoDict['atchdPathFileNm']
if filePathname :
remoteFilepath, remoteFilename = os.path.split(filePathname)
__LOG__.Trace('remoteFilepath({}), remoteFilename({})'.format(remoteFilepath, remoteFilename))
scriptFileDict = {}
scriptFileDict['remoteFilepath'] = remoteFilepath
scriptFileDict['remoteFilename'] = remoteFilename
scriptFileList.append(scriptFileDict)
else :
__LOG__.Trace('workId({})/eqpNm({}) atchdPathFileNm({}) is invalid'.format(oneWorkId, oneEqpInfoDict['eqpNm'], filePathname))
pass
else :
__LOG__.Trace('workId({})/eqpNm({}) scriptInfoList({}) is invalid'.format(oneWorkId, oneEqpInfoDict['eqpNm'], scriptInfoList))
else :
__LOG__.Trace('workId({})/eqpNm({}) scriptInfo does not exist in eqpInfo'.format(oneWorkId, oneEqpInfoDict['eqpNm']))
pass
else :
__LOG__.Trace('no registered workId({}) in WORKINFO_REPO'.format(oneWorkId))
return
__LOG__.Trace('scriptFileList: {}'.format(scriptFileList))
eventDate = wrapper['workInfo']['workEvntDate']
yyyyMMdd = datetime.strptime(eventDate, '%Y%m%d%H%M%S').strftime('%Y%m%d')
__LOG__.Trace('eventDate({}), yyyyMMdd({})'.format(eventDate, yyyyMMdd))
self._getScriptFiles(yyyyMMdd, oneWorkId, scriptFileList)
logDict = self._writeTangoWorkFile(yyyyMMdd, eventDate, oneWorkId, wrapper)
self._removeCompleteWorkInfo(oneWorkId)
else :
__LOG__.Trace('Unsupported type: {}'.format('eqpInfo' in rawDict if type(rawDict['eqpInfo']) else None ))
pass
else :
__LOG__.Trace('workId({}), eqpInfo rawData is None'.format(oneWorkId))
pass
return logDict, yyyyMMdd, eventDate
def _getScriptFiles(self, yyyyMMdd, workId, scriptFileList) :
if not scriptFileList :
__LOG__.Trace('scriptFileList({}) is empty'.format(scriptFileList))
return
try :
tacsWorkInfoPath = os.path.join(self.workInfoBaseDir, yyyyMMdd, workId)
self._mkdirs(tacsWorkInfoPath)
sftpClient = SFTPClient.SftpClient(self.host, self.port, self.user, self.passwd)
for oneScriptFileDict in scriptFileList :
remoteFilepath = oneScriptFileDict['remoteFilepath']
remoteFilename = oneScriptFileDict['remoteFilename']
sftpClient.download(remoteFilepath, remoteFilename, tacsWorkInfoPath)
__LOG__.Trace('scriptFile from({}) -> to({}) download succeed'.format(os.path.join(remoteFilepath, remoteFilename), os.path.join(tacsWorkInfoPath, remoteFilename)))
sftpClient.close()
except Exception as ex :
__LOG__.Trace('scriptFile download proccess failed {}'.format(ex))
self._removeCompleteWorkInfo(workId)
raise ex
def _writeTangoWorkFile(self, yyyyMMdd, eventDate, workId, wrapper) :
logDict = {}
try :
tacsWorkInfoPath = os.path.join(self.workInfoBaseDir, yyyyMMdd, workId)
self._mkdirs(tacsWorkInfoPath)
contents = json.dumps(wrapper, ensure_ascii=False)
__LOG__.Trace('contents: {}'.format(contents))
createFilePath = os.path.join(tacsWorkInfoPath, '{}_{}_META.json'.format(eventDate, workId))
self._createFile(createFilePath, contents)
logDict['tacsLnkgRst'] = 'OK'
if self.migration :
__LOG__.Trace( ['mf','30000', 'put', 'dbl', 'stdoutfile://{}'.format(createFilePath)] )
subprocess.call(['mf', '30000', 'put,dbl,stdoutfile://{}'.format(createFilePath)])
else :
time.sleep(self.stdoutSleepTime)
self._stdout('file://{}'.format(createFilePath))
except Exception as ex :
__LOG__.Trace('workFile write process failed {}'.format(ex))
logDict['tacsLnkgRst'] = 'FAIL'
logDict['tacsLnkgRsn'] = ex.args
self._removeCompleteWorkInfo(workId)
raise ex
finally :
logDict['evntTypCd'] = self.receivedWorkCode
logDict['evntDate'] = eventDate
logDict['workId'] = workId
logDict['lnkgEqpIp'] = ''
return logDict
# self._writeTacsHistoryFile(yyyyMMdd, eventDate, logDict)
def _writeTacsHistoryFile(self, yyyyMMdd, eventDate, logDictList) :
if logDictList :
__LOG__.Trace('received workInfo history: {}'.format(logDictList))
try :
tacsHistoryTempPath = os.path.join(self.auditLogTempDir, 'AUDIT_{}'.format(self.receivedWorkCode))
self._mkdirs(tacsHistoryTempPath)
contentList = list()
for oneLogDict in logDictList :
content = json.dumps(oneLogDict, ensure_ascii=False)
contentList.append(content)
contents = '\n'.join(contentList)
__LOG__.Trace('contents: {}'.format(contents))
tacsHistoryFilename = self._getTacsHistoryFilename(yyyyMMdd, eventDate)
__LOG__.Trace('tacsHistoryFilename: {}'.format(tacsHistoryFilename))
self._createFile(os.path.join(tacsHistoryTempPath, tacsHistoryFilename), contents)
tacsHistoryPath = os.path.join(self.auditLogBaseDir, 'AUDIT_{}'.format(self.receivedWorkCode))
self._mkdirs(tacsHistoryPath)
shutil.move(os.path.join(tacsHistoryTempPath, tacsHistoryFilename), os.path.join(tacsHistoryPath, tacsHistoryFilename))
__LOG__.Trace('tacsHistory file move from {} -> to {} succeed'.format(os.path.join(tacsHistoryTempPath, tacsHistoryFilename), os.path.join(tacsHistoryPath, tacsHistoryFilename)))
except Exception as ex :
__LOG__.Trace('tacsHistory {} load process failed {}'.format(logDict, ex))
else :
__LOG__.Trace('received workInfo history({}) is invalid'.format(logDict))
def _mkdirs(self, directory) :
__LOG__.Trace('{} isExists: {}'.format(directory, os.path.exists(directory)))
if not os.path.exists(directory) :
__LOG__.Trace('create directories {}'.format(directory))
os.makedirs(directory)
def _createFile(self, filePath, contents) :
f = None
try :
f = open(filePath, 'w')
f.write(contents)
__LOG__.Trace('{} file is created'.format(filePath))
except Exception as ex :
__LOG__.Trace('{} to file process failed {}'.format(contents, ex))
raise ex
finally :
if f :
f.close()
def _getTacsHistoryFilename(self, yyyyMMdd, eventDate) :
HHmm = datetime.strptime(eventDate, '%Y%m%d%H%M%S').strftime('%H%M')
tacsHistoryFilename = '{}_{}_{}.audit'.format(yyyyMMdd, HHmm, uuid.uuid4())
return tacsHistoryFilename
def _removeCompleteWorkInfo(self, workId) :
if workId in self.WORKINFO_REPO :
del self.WORKINFO_REPO[workId]
__LOG__.Trace('workId({}), WORKINFO_REPO: {}'.format(workId, self.WORKINFO_REPO))
def shutdown(self) :
try :
if self.scheduler :
#self.scheduler.remove_job('workInfo_scheduler')
self.scheduler.shutdown()
__LOG__.Trace('schduler is terminated')
else :
_LOG__.Trace('scheduler is None')
except Exception as ex :
__LOG__.Trace('shutdown failed {}'.format(ex))
def run(self, searchStartDate = None, searchEndDate = None, migration = False) :
self.migration = migration
if not migration :
self.scheduler = BlockingScheduler()
self.scheduler.add_job(self._executeScheduler, 'cron', minute='*/{}'.format(self.scheduleInterval), second='0', id='workInfo_scheduler')
self.scheduler.start()
else :
self._executeMigration(searchStartDate, searchEndDate)
__LOG__.Trace('migration proccess done')
def main() :
argvLength = len(sys.argv)
if argvLength < 3 :
print '''
[ERROR] WorkInfoCollector argv required at least 3
++ Usage
++++ scheduler : module section cfgfile
++++ migration : module section cfgfile searchStartDate(yyyyMMddHHmm) searchEndDate(yyyyMMddHHmm)
'''
return
module = os.path.basename(sys.argv[0])
section = sys.argv[1]
cfgfile = sys.argv[2]
searchStartDate = None
searchEndDate = None
migration = False
if argvLength == 5 :
migration = True
searchStartDate = sys.argv[3]
searchEndDate = sys.argv[4]
cfg = ConfigParser.ConfigParser()
cfg.read(cfgfile)
logPath = cfg.get("GENERAL", "LOG_PATH")
logFile = os.path.join(logPath, "%s_%s.log" % (module, section))
logCfgPath = cfg.get("GENERAL", "LOG_CONF")
logCfg = ConfigParser.ConfigParser()
logCfg.read(logCfgPath)
Log.Init(Log.CRotatingLog(logFile, logCfg.get("LOG", "MAX_SIZE"), logCfg.get("LOG", "MAX_CNT") ))
global workInfoCollector
workInfoCollector = WorkInfoCollector(cfg)
workInfoCollector.run(searchStartDate, searchEndDate, migration)
__LOG__.Trace('main is terminated')
if __name__ == '__main__' :
try :
main()
except :
__LOG__.Exception()
```
#### File: ETL/bin/WorkInfoFailOver.py
```python
import os
import sys
reload(sys)
sys.setdefaultencoding('UTF-8')
import signal
import time
import datetime
import ConfigParser
import glob
import json
#import uuid
#import shutil
#import requests
#from requests.packages.urllib3.exceptions import InsecureRequestWarning
#requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
#import SftpClient as SFTPClient
import Mobigen.Common.Log as Log; Log.Init()
#import subprocess
import CollectApi as CollectAPI
SHUTDOWN = False
def handler(signum, frame):
__LOG__.Trace('signal : process shutdown')
# SIGTERM
signal.signal(signal.SIGTERM, handler)
# SIGINT
signal.signal(signal.SIGINT, handler)
# SIGHUP
signal.signal(signal.SIGHUP, handler)
# SIGPIPE
signal.signal(signal.SIGPIPE, handler)
class WorkInfoFailOver :
def __init__(self, cfg) :
self.WORKINFO_REPO = {}
self.errFilePath = cfg.get('FAIL_OVER', 'ERROR_FILE_PATH')
self.sliceTime = cfg.getint('FAIL_OVER', 'SLICE_TIME')
collectApiCfgPath = cfg.get('MODULE_CONF', 'COLLECT_API_CFG_PATH')
collectCfg = ConfigParser.ConfigParser()
collectCfg.read(collectApiCfgPath)
self.collectApi = CollectAPI.CollectApi(collectCfg)
## stdOut
def stdOut(self, msg) :
sys.stdout.write(msg+'\n')
sys.stdout.flush()
__LOG__.Trace('STD OUT : %s' % msg)
def stdErr(self, msg) :
sys.stderr.write(msg+'\n')
sys.stderr.flush()
#__LOG__.Trace('STD ERR : %s' % msg)
# staDate, endDate 를 구하는 함수 (60분 이상일경우 시작시간 ~ 시작시간 + 60분)
# @param
# @fileList = [yyyymmddHHMM_yyyymmddHHMM, ....]
def dateCompare(self, fileList) :
lastDate = None
firstDate = None
lastDateStr = None
firstDateStr = None
for oneFile in fileList :
staDate = None
endDate = None
if not '_' in oneFile :
continue
staDate, endDate = oneFile.split('_')
try :
staDateObj = datetime.datetime.strptime(staDate, '%Y%m%d%H%M')
endDateObj = datetime.datetime.strptime(endDate, '%Y%m%d%H%M')
if firstDate is None or staDateObj < firstDate :
firstDate = staDateObj
if lastDate is None or endDateObj > lastDate :
lastDate = endDateObj
if lastDate - firstDate >= datetime.timedelta(minutes = self.sliceTime) :
# return firstDate, firstDate + datetime.timedelta(minutes = self.sliceTime)
lastDate = firstDate + datetime.timedelta(minutes = self.sliceTime)
break
except :
continue
if firstDate and lastDate :
firstDateStr = firstDate.strftime('%Y%m%d%H%M')
lastDateStr = lastDate.strftime('%Y%m%d%H%M')
if firstDate >= lastDate :
__LOG__.Trace( 'firstDate : {} , lastDate : {} invalid Date'.format(firstDateStr, lastDateStr) )
return None, None
return firstDateStr, lastDateStr
# 파일 형식 체크
# @param
# @fileName : yyyymmddHHMM_yyyymmddHHMM
def fileValidCheck(self, fileName) :
validCheck = False
checkFileList = list()
staDate = None
endDate = None
if '_' in fileName :
staDate, endDate = fileName.split('_')
try :
datetime.datetime.strptime(staDate, '%Y%m%d%H%M')
datetime.datetime.strptime(endDate, '%Y%m%d%H%M')
validCheck = True
except :
__LOG__.Trace('%s file invalid' % fileName)
return validCheck
# def thresholdCheck(self, firstDate, lastDate) :
# firstDateTime = datetime.datetime.strptime(firstDate, '%Y%m%d%H%M')
# lastDateTime = datetime.datetime.strptime(lastDate, '%Y%m%d%H%M')
# loopCnt = 1
# # Tango API 실행 시간간격을 분단위로 변환
# checkMin = int((lastDateTime - firstDateTime).total_seconds() / datetime.timedelta(minutes = 1).total_seconds())
# loopCnt = int(checkMin/self.sliceTime) + 1
# if loopCnt > 1 :
# __LOG__.Trace('%s > %s (%s, %s) %s 반복 수행' %( checkMin, self.sliceTime, firstDate,firstDate, loopCnt ) )
# return loopCnt
# Tango-WM Collect
# @param
# @firstDate : staTime (yyyymmddHHMM)
# @lastDate : endTime (yyyymmddHHMM)
def infoCollect(self, firstDateStr, lastDateStr) :
# 김준우 07-27 추가
migrationWorkCnt = 0
migrationSucWorkCnt = 0
try :
__LOG__.Trace('workInfoCollect [%s~%s]' %(firstDateStr, lastDateStr) )
workIdList = self.collectApi.lookupWorkInfo(firstDateStr, lastDateStr, True)
if workIdList != None :
migrationWorkCnt = len(workIdList)
self.collectApi.lookupEqpInfo(workIdList)
migrationSucWorkCnt = self.collectApi.migrationSucWorkCnt
if migrationWorkCnt != 0 :
__LOG__.Trace('!!! Exception !!! 누락 작업수집 : {}/{} '.format(migrationSucWorkCnt, migrationWorkCnt))
# firstTempDate = firstDate
# lastTempDate = lastDate
# for loop in range(loopCnt) :
# if loop + 1 == loopCnt :
# __LOG__.Trace('workInfoCollect [%s~%s]' %(firstDate, lastDate) )
# workIdList = self.collectApi.lookupWorkInfo(firstDate, lastDate, True)
# self.collectApi.lookupEqpInfo(workIdList)
# else :
# __LOG__.Trace('workInfoCollect [%s~%s]' %(firstTempDate, lastTempDate) )
# lastTempDate = str(datetime.datetime.strptime(firstTempDate, '%Y%m%d%H%M') + datetime.timedelta(minute = self.sliceTime))
# workIdList = self.collectApi.lookupWorkInfo(firstTempDate, lastTempDate, True)
# self.collectApi.lookupEqpInfo(workIdList)
# firstTempDate = str(datetime.datetime.strptime(lastTempDate, '%Y%m%d%H%M') + datetime.timedelta(minute = 1))
except :
if migrationWorkCnt != 0 :
__LOG__.Trace('!!! Exception !!! 누락 작업수집 : {}/{} '.format(migrationSucWorkCnt, migrationWorkCnt))
raise
# Tango-WM Collect Execute File remove
# @param
# @fileList : Execute File List [yyyymmddHHMM_yyyymmddHHMM, .....]
# @firstDate : Execute File staDate (yyyymmddHHMM)
# @lastDate : Execute File endDate (yyyymmddHHMM)
def fileRemove(self, fileList, firstDateStr, lastDateStr) :
staDate = None
endDate = None
for oneFile in fileList :
if '_' in oneFile :
staDate, endDate = oneFile.split('_')
if staDate and endDate :
if int(endDate) <= int(lastDateStr) and int(staDate) >= int(firstDateStr) :
os.remove( os.path.join(self.errFilePath, oneFile) )
__LOG__.Trace('File Remove : %s' % oneFile)
else :
__LOG__.Trace('invalid Remove {}'.format(oneFile))
# 파일을 읽어서 처리하는 함수
def fileRead(self) :
if not os.path.exists(self.errFilePath) :
__LOG__.Trace( '{} Path not exists'.format(self.errFilePath) )
return
fileList = os.listdir(self.errFilePath)
lastDateStr = None
firstDateStr = None
checkFileList = list()
for oneFile in fileList :
if self.fileValidCheck(oneFile) :
checkFileList.append(oneFile)
if not checkFileList :
__LOG__.Trace('File not Exists : %s' % self.errFilePath )
return
checkFileList.sort()
firstDateStr, lastDateStr = self.dateCompare(checkFileList)
# loopCnt = self.sliceTimeCheck(firstDate, lastDate)
# self.infoCollect(firstDate, lastDate, loopCnt)
if firstDateStr and lastDateStr :
try :
self.infoCollect(firstDateStr, lastDateStr)
self.fileRemove(checkFileList, firstDateStr, lastDateStr)
except :
__LOG__.Exception()
def run(self) :
__LOG__.Trace('WorkInfo FailOver Start !!')
while not SHUTDOWN :
strIn = ''
strLine = ''
try :
strIn = sys.stdin.readline()
strLine = strIn.strip()
if strLine != 'Fail_Over' :
continue
self.fileRead()
except :
__LOG__.Exception()
continue
finally :
self.stdErr(strIn)
def main() :
module = os.path.basename(sys.argv[0])
cfgfile = sys.argv[1]
cfg = ConfigParser.ConfigParser()
cfg.read(cfgfile)
logPath = cfg.get("GENERAL", "LOG_PATH")
logFile = os.path.join(logPath, "%s.log" % module)
logCfgPath = cfg.get("GENERAL", "LOG_CONF")
logCfg = ConfigParser.ConfigParser()
logCfg.read(logCfgPath)
Log.Init(Log.CRotatingLog(logFile, logCfg.get("LOG", "MAX_SIZE"), logCfg.get("LOG", "MAX_CNT") ))
workInfoFailOver = WorkInfoFailOver(cfg)
workInfoFailOver.run()
__LOG__.Trace('main is terminated')
if __name__ == '__main__' :
try :
main()
except :
__LOG__.Exception()
```
#### File: joonwoo/Migration/IrisMigration_JW.py
```python
import datetime
import os
import sys
import signal
import time
import ConfigParser
import glob
import json
import Mobigen.Common.Log as Log; Log.Init()
import API.M6 as M6
def handler(signum, frame):
__LOG__.info('signal: process shutdown')
# SIGTERM
signal.signal(signal.SIGTERM, handler)
# SIGINT
signal.signal(signal.SIGINT, handler)
# SIGHUP
signal.signal(signal.SIGHUP, handler)
# SIGPIPE
signal.signal(signal.SIGPIPE, handler)
class IrisMirgration :
def __init__(self, cfg, section):
self.section = section
self.cfg = cfg
self.IRIS = self.cfg.get(self.section , 'IRIS')
self.IRIS_ID = self.cfg.get(self.section , 'IRIS_ID')
self.IRIS_PASS = self.cfg.get(self.section , 'IRIS_PASS')
self.conn = None
self.cursor = None
__LOG__.Trace('start!')
def stdout(self, msg):
sys.stdout.write(msg+'\n')
sys.stdout.flush()
# print(msg, file=sys.stdout)
__LOG__.Trace('OUT: %s' % msg)
def disConnect(self,conn,cursor) :
if cursor != None:
try : cursor.close()
except : pass
if conn != None :
try : conn.close()
except : pass
def initConnect(self) :
self.conn = M6.Connection(self.IRIS, self.IRIS_ID, self.IRIS_PASS, Database='tacs')
__LOG__.Trace('IRIS Connect!')
try :
self.cursor = self.conn.cursor()
self.cursor.SetFieldSep('|§|')
self.cursor.SetRecordSep('|§-§|')
#self.cursor.SetFieldSep('|^|')
#self.cursor.SetRecordSep('|^-^|')
except :
__LOG__.Exception()
finally :
self.conn.commit()
def getMigrationTableList(self) :
tableList = self.cfg.get(self.section , 'MIGRATION_TABLES')
return tableList.split(',')
def getSelectDataList(self, table) :
cursor = self.cursor
table = table.strip()
# queryLimit = 'SELECT * FROM %s limit 0;'
sql = """TABLE LIST %s"""
cursor.Execute2(sql % table)
result = list()
tableData = dict()
for raw in cursor :
tableData['scope'] = raw[2].encode('utf-8')
tableData['key'] = raw[5].encode('utf-8')
tableData['part'] = raw[6].encode('utf-8')
# cursor.Execute2(queryLimit % table )
# columnList = cursor.Metadata()['ColumnName']
# query = 'SELECT %s FROM %s GROUP BY IDX;'
# query = """
# /*+ LOCATION ( PARTITION >= '20190929000000' AND PARTITION < '20190930000000' ) */
# SELECT %s FROM %s where LNKG_DMN_DIV_CD != '111' ;"""
query = """
SELECT %s FROM %s
"""
columnList = self.cfg.get( self.section, table )
__LOG__.Trace(columnList)
cursor.Execute2(query % (columnList, table) )
for raw in cursor :
r = '|§|'.join(raw).encode('utf-8')
result.append(r)
return columnList, result, tableData
def makeCTLFile(self, table, columnList) :
#for idx in range(0, len(columnList)) :
# columnList[idx] = str(columnList[idx]).encode('utf-8')
if not os.path.exists(table) : os.mkdir(table)
ctlFile = open(os.path.join(table, 'column.ctl'), 'w')
ctlFile.write(columnList.strip().replace(',', '|§-§|'))
# ctlFile.write('|^-^|'.join(columnList))
ctlFile.close()
def makeDatFile(self, table, dataList, tableData) :
if len(dataList) == 0 :
__LOG__.Trace('DataList Size zero')
else :
if not os.path.exists(table) : os.mkdir(table)
if tableData['scope'].upper() == 'LOCAL' :
cntData = open(os.path.join(table, 'cntData.txt'),'w')
cntData.write(str(len(dataList)))
cntData.close()
for oneRawData in dataList :
oneRawList = oneRawData.split('|§|')
key = oneRawList[0]
evntDate = oneRawList[1]
partitionDate = datetime.datetime.strptime(evntDate, '%Y%m%d%H%M%S')
partition = datetime.datetime.strftime(partitionDate, '%Y%m%d%H0000')
#dataDict = dict()
datFilePath = os.path.join(table, '_'.join(['data.dat', key, partition]))
if os.path.exists(datFilePath) :
oneRawData = ''.join(['|§-§|', oneRawData])
datFile = open(os.path.join(table, '_'.join(['data.dat', key, partition])), 'a+')
#dataDict["key"] = tableData["key"]
#dataDict["part"] = tableData["part"]
#dataDict["data"] = dataList[listNum]
datFile.write(str(oneRawData))
datFile.close()
elif tableData['scope'].upper() == 'GLOBAL' :
datFile = open(os.path.join(table, 'data.dat'), 'w')
datFile.write('|§-§|'.join(dataList))
datFile.close()
def run(self):
__LOG__.Trace('IrisMigration start!!')
try :
self.initConnect()
for table in self.getMigrationTableList() :
columnList, dataList, tableData = self.getSelectDataList(table)
print columnList
self.makeCTLFile(table, columnList)
self.makeDatFile(table, dataList, tableData)
except :
__LOG__.Exception()
self.disConnect(self.conn, self.cursor)
def loadData(self, table) :
__LOG__.Trace('Load to Table : %s' % table)
try :
self.initConnect()
fileList = os.listdir(table)
fileList.sort()
datFileList = list()
for oneDatFile in fileList :
if oneDatFile.find('data.dat') is not -1 :
datFileList.append(oneDatFile)
__LOG__.Trace('datFile : %s | datFileCnt : %s' % (datFileList, len(datFileList) ) )
if len(datFileList) == 1 :
print 'Global!!'
try :
resultGlobal = self.cursor.LoadGlobal(table, os.path.join(table, 'column.ctl') , os.path.join(table, datFileList[0] ) )
print resultGlobal
except :
__LOG__.Exception()
else :
for oneDatFile in datFileList :
f = open (os.path.join(table, oneDatFile ), 'r' )
name, key, partition = oneDatFile.split('_')
fkp = f. read()
# key 값을 찾아서 인덱스 입력
# partition 값을 찾아서 인덱스 입력
result = self.cursor.Load(table, key, partition, os.path.join(table, 'column.ctl'), os.path.join(table, oneDatFile) )
print ( 'LOAD RESULT : %s' % result)
except :
__LOG__.Exception()
self.disConnect(self.conn, self.cursor)
def main():
module = os.path.basename(sys.argv[0])
section = 'IRIS_MIGRATION'
cfgfile = '/home/tacs/user/KimJW/Migration/migration.conf'
cfg = ConfigParser.ConfigParser()
cfg.read(cfgfile)
log_file = '%s_%s.log' % (module, section)
Log.Init(Log.CRotatingLog(log_file, 1000000, 9))
im = IrisMirgration(cfg, section)
loadTable = None
if len(sys.argv) > 1 :
loadTable = sys.argv[1]
im.loadData(loadTable)
else :
im.run()
__LOG__.Trace('end main!')
if __name__ == '__main__':
try:
main()
except:
__LOG__.Exception('main error')
```
#### File: SMS-master/Collect/EventFlow.py
```python
from threading import Thread
from Queue import Queue
from socket import error
from re import compile
from ConfigParser import *
#from os import *
import subprocess
import time
import paramiko
import sys
import signal
import Mobigen.Common.Log as Log
SHUTDOWN = False
def shutdown(sigNum, frame):
global SHUTDOWN
SHUTDOWN = True
sys.stderr.write("Catch Signal :%s" % sigNum)
sys.stderr.flush()
signal.signal(signal.SIGTERM, shutdown) # sigNum 15 : Terminate
signal.signal(signal.SIGINT, shutdown) # sigNum 2 : Interrupt
signal.signal(signal.SIGHUP, shutdown) # sigNum 1 : HangUp
signal.signal(signal.SIGPIPE, shutdown) # sigNum 13 : Broken Pipe
class ServerWatch(object) :
def __init__(self, ip, username, password, port, path, password2, qCnt21, qCnt30, qCnt32, qCnt33):
self.ip =ip
self.uname = username
self.pw = password
self.pw2 = <PASSWORD>
self.port = int(port)
self.path = path
self.client = paramiko.SSHClient()
self.OKFlag = "OK"
self.Q_CNT = {'20001' : qCnt21, '30000' : qCnt30, '32000' : qCnt32, '33000' : qCnt33}
def SSHClinetConnection(self):
client = self.client
# client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
client.connect(self.ip, username=self.uname, password=<PASSWORD>, port=self.port, timeout=10)
except:
client.connect(self.ip, username=self.uname, password=<PASSWORD>, port=self.port, timeout=10)
def commandHOSTNAME(self):
hlist=[]
stdin, stdout, stderr = self.client.exec_command('cat /proc/sys/kernel/hostname')
for line in stdout:
line = line.strip()
hlist.append(line)
retdic={'VALUE':hlist}
return retdic
def commandQUEUE_COUNT2(self, port):
__LOG__.Trace(port)
result = []
excNodeDict = dict()
cmd = '%s %s' % (self.path, port)
__LOG__.Trace(cmd)
stdin, stdout, stderr = self.client.exec_command(cmd)
rs = stdout.readlines()
__LOG__.Trace('\n' + '\n'.join(rs))
for line in rs:
if line.startswith('<begin>') or line.startswith('<end>') or line.startswith('-----------') or ('ACTIVATE TIME' in line and 'Q SIZE' in line):
continue
abn = line.split('|')[0].strip()
ps_name = line.split('|')[1].strip()
status = line.split('|')[3].strip()
que_cnt = line.split('|')[6].strip()
if int(que_cnt) > int(self.Q_CNT[port]) and ('OK' in abn) and ('ACT' in status) :
excNodeDict[ps_name] = que_cnt
result.append({'STATUS':'OK','VALUE':{'NODE':ps_name, 'ABN':abn, 'Q_STATUS': 'OK', 'STATUS': status , 'Q_CNT' : que_cnt}})
if excNodeDict :
result = self.commandQUEUE_COUNT(port, result, excNodeDict)
return result
def commandQUEUE_COUNT(self, port, result, nodeDict):
__LOG__.Trace('Exceed Queue Count')
tempNum = 0
firFlag = False
secFlag = False
while tempNum < 2 :
time.sleep(3)
tempNum += 1
cmd = '%s %s' % (self.path, port)
stdin, stdout, stderr = self.client.exec_command(cmd)
rs = stdout.readlines()
for line in rs :
if line.startswith('<begin>') or line.startswith('<end>') or line.startswith('-----------') or ('ACTIVATE TIME' in line and 'Q SIZE' in line) :
continue
__LOG__.Trace(line)
resultList = line.split('|')
if resultList[1].strip() not in nodeDict.keys() :
continue
ps_name = resultList[1].strip()
que_cnt = resultList[6].strip()
if int(que_cnt) > int(nodeDict[ps_name]) and tempNum == 1 :
nodeDict[ps_name] = que_cnt
firFlag = True
for oneReDict in result :
if oneReDict['VALUE']['NODE'] == ps_name :
oneReDict['VALUE']['Q_CNT'] += (', ' + que_cnt)
if firFlag and tempNum == 2 :
for oneReDict in result :
if oneReDict['VALUE']['NODE'] == ps_name and int(que_cnt) > int(nodeDict[ps_name]) :
oneReDict['VALUE']['Q_STATUS'] = 'NOK'
oneReDict['VALUE']['Q_CNT'] += (', ' + que_cnt)
if not firFlag :
break
return result
def run(self):
__LOG__.Trace('Start EventFlow')
infodic=dict()
try:
self.SSHClinetConnection()
infodic['HOSTNAME']=self.commandHOSTNAME()
infodic['STATUS']=self.OKFlag #이건 Connection Status 아닌가?
infodic['EF'] = {}
stdin, stdout, stderr = self.client.exec_command('ps -ef | grep SIOEF | grep -v grep') # 구동중인 Event Flow 구하기
rs = stdout.readlines()
PORT_IDX = -2
for ef in rs:
port = ef.split()[PORT_IDX]
infodic['EF'][port] = self.commandQUEUE_COUNT2(port)
#infodic[port] = self.commandQUEUE_COUNT2(port)
#infodic['QUEUE_COUNT']=self.commandQUEUE_COUNT2(port)
self.client.close()
return infodic
#password나 <PASSWORD>an<PASSWORD>이 잘못되었을 경우
except :
self.OKFlag = "NOK"
infodic['STATUS']=self.OKFlag
shell = "cat /etc/hosts | awk '{if(/"+self.ip+"/){print $2}}'"
p = subprocess.Popen(shell,shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
hostname = p.stdout.readline()
hostname = hostname.strip()
infodic['HOSTNAME']={'VALUE': [hostname]}
self.client.close()
__LOG__.Exception()
return infodic
class JobProcess(object):
def __init__(self, svrobjlist):
self.data_q = Queue([])
self.THREADPOOL = 10
self.total = dict()
self.putdata(svrobjlist)
def job_process(self,th_id):
while not SHUTDOWN:
try:
ip,obj = self.data_q.get_nowait()
__LOG__.Trace('thread get : %s ' % th_id)
except:
__LOG__.Trace("thread %s is done" % th_id)
break
self.total[ip] = obj.run()
time.sleep(0.1)
def putdata(self, svrobjlist):
for ip,svrobj in svrobjlist:
self.data_q.put((ip,svrobj))
def makeThreadlist(self):
th_list = list()
for i in range(self.THREADPOOL):
th_obj = Thread(target=self.job_process, args=[i])
th_list.append(th_obj)
return th_list
def run(self):
th_list = self.makeThreadlist()
for th_obj in th_list:
th_obj.start()
for th_obj in th_list:
th_obj.join()
__LOG__.Trace("[Collect]SERVER RESOURCE END_______________________")
return self.total
class EventFlow(object):
def __init__(self, getconfigparser):
self.config = getconfigparser
def getConfParser(self):
conflist = list()
conf_dict = dict()
type_list = ['SSH_PORT','USER','PASSWD','PASSWD2']
path = self.config.get('EVENTFLOW','PATH')
qCnt21 = self.config.get('EVENTFLOW','20001_Q_CNT')
qCnt30 = self.config.get('EVENTFLOW','30000_Q_CNT')
qCnt32 = self.config.get('EVENTFLOW','32000_Q_CNT')
qCnt33 = self.config.get('EVENTFLOW','33000_Q_CNT')
for rsc_ip in self.config.get('EVENTFLOW','SERVER_LIST').split(','):
conf_dict['IP'] =rsc_ip
for type in type_list:
try:
conf_dict[type] = self.config.get('EVENTFLOW',type)
except:
conf_dict[type] = self.config.get('RESOURCES',type)
conflist.append((conf_dict['IP'], conf_dict['SSH_PORT'], conf_dict['USER'], conf_dict['PASSWD'], path, conf_dict['PASSWD2'], qCnt21, qCnt30, qCnt32, qCnt33))
return conflist
def run(self):
svrlist =[]
__LOG__.Trace("[Collect]SERVER RESOURCE START_____________________")
infolist = self.getConfParser()
for tup in infolist:
svr_obj = ServerWatch(tup[0],tup[2],tup[3],tup[1],tup[4],tup[5],tup[6],tup[7],tup[8],tup[9]) # ip, username, password, port, path, password2
svrlist.append((tup[0],svr_obj))
jp_obj = JobProcess(svrlist)
return jp_obj.run()
```
#### File: SMS-master/Noti/SMSSend.py
```python
import datetime
import sys
import os
import re
import Mobigen.Common.Log as Log
from socket import *
import time
#import ConfigParser
import struct
IDX_IP = 0
IDX_PORT = 1
IDX_VALUE = 0
IDX_DESC = 1
IDX_IRIS_NODEID = 0
IDX_IRIS_SYS_STATUS = 1
IDX_IRIS_ADM_STATUS = 2
IDX_IRIS_UPDATE_TIME = 3
IDX_IRIS_CPU = 4
IDX_IRIS_LOADAVG = 5
IDX_IRIS_MEMP = 6
IDX_IRIS_MEMF = 7
IDX_IRIS_DISK = 8
IDX_MEMORY_TOTAL = 0
IDX_MEMORY_USED = 1
IDX_MEMORY_AVAILABE = 2
IDX_MEMORY_USE_PER = 3
IDX_DISK_MOUNT = 0
IDX_DISK_1MBLOCKS = 1
IDX_DISK_USED = 2
IDX_DISK_AVAILABLE = 3
IDX_DISK_USE_PER = 4
IDX_LOG_DATE = 0
IDX_LOG_VALUE = 1
IDX_FILE_VALUE =1
class SMSSend() :
#제목 / 통신사 정보 Dict / 전화번호 정보 Dict / 값 Dict
def __init__(self, _Parser, _ValueDict,collect) :
self.ValueDict = _ValueDict
self.PARSER = _Parser
self.Collect = collect
self.GetConfig()
def GetConfig(self) :
try :
self.Title = self.PARSER.get('SMS', 'TITLE')
self.fromNumber = self.PARSER.get('SMS', 'FROM_Number')
self.toNumberList = self.PARSER.get('SMS', 'TO_NUMBER').split(',')
self.alramFilePath = self.PARSER.get('SMS', 'ALRAM_OPEN_PATH')
except :
__LOG__.Exception()
def alreadyAlramMsg(self, Server, Type, status, DiskMount=None) :
if not os.path.exists(self.alramFilePath) :
try :
os.makedirs(self.alramFilePath)
except :
__LOG__.Exception()
errFilePath = None
if DiskMount :
if '/' in DiskMount :
DiskMount = DiskMount.replace('/', '-')
errFilePath = os.path.join(self.alramFilePath, '%s_%s_%s' % (Server, Type, DiskMount) )
else :
errFilePath = os.path.join(self.alramFilePath, '%s_%s' % (Server, Type) )
if status == 'NOK' :
__LOG__.Trace( '이상 발생 [%s] - [%s]' %(Server, Type) )
if not os.path.exists( errFilePath ) :
with open (errFilePath, 'w' ) as filePath :
filePath.write('')
__LOG__.Trace('Error 처음 발생 %s' % errFilePath)
return True, 'ERR-Open'
else :
__LOG__.Trace('Error 해결중.. %s' % errFilePath )
return False, None
else :
__LOG__.Trace( '이상 없음 [%s] - [%s]' %(Server, Type) )
if not os.path.exists( errFilePath ) :
return False, None
else :
__LOG__.Trace('Error 해결 %s' % errFilePath )
os.remove(errFilePath)
return True, 'ERR-Close'
def run(self) :
try :
#Make Msg List
MsgList = []
for Server in self.ValueDict.keys() :
HostName = self.ValueDict[Server]['HOSTNAME']['VALUE'][0]
for Type in self.ValueDict[Server].keys() :
__LOG__.Trace(Type)
# UTF-8 로 바꿀 필요성
if type(HostName)==unicode :
HostName = HostName.encode('cp949')
msgFlag = False
msgStatus = None
######## OPEN, CLOSE 개념추가 2020-04-02 ################
if Type == 'STATUS' :
msgFlag, msgStatus = self.alreadyAlramMsg(Server ,Type, self.ValueDict[Server][Type])
elif Type == 'IRIS' or Type == 'MEMORY' or Type == 'SWAP' or Type == 'LOAD_AVG' or Type == 'IRIS_OPENLAB' or Type == 'WEB-LOGIN' :
msgFlag, msgStatus = self.alreadyAlramMsg(Server ,Type, self.ValueDict[Server][Type]['STATUS'])
elif Type == 'DISK' :
msgFlag = True
if not msgFlag :
continue
##########################################################
#Connected Fail
if Type == 'STATUS' :
#and self.ValueDict[Server][Type] == 'NOK') :
Msg = '[%s-%s] | %s | (%s) | Connected Fail' % (self.Title, msgStatus, Server,HostName) # SSH 자체 Connection Error
Msg = Msg.decode('utf-8')
MsgList.append(Msg)
continue
elif Type == 'IRIS' :
__LOG__.Trace(self.ValueDict[Server][Type])
# if self.ValueDict[Server][Type]['STATUS'] == 'NOK' :
Desc = self.ValueDict[Server][Type]['VALUE'][IDX_IRIS_SYS_STATUS] + '/' + self.ValueDict[Server][Type]['VALUE'][IDX_IRIS_UPDATE_TIME]
Msg = '[%s-%s] | %s (%s) | %s | %s' % (self.Title, msgStatus, Server, HostName, Type, Desc) # IRIS Error
Msg = Msg.decode('utf-8')
MsgList.append(Msg)
continue
# Msg = Msg.decode('utf-8')
elif Type == 'MEMORY' or Type == 'SWAP' :
#__LOG__.Trace(self.ValueDict[Server][Type]['VALUE'][IDX_MEMORY_USE_PER])
# if self.ValueDict[Server][Type]['STATUS'] == 'NOK' :
Msg = '[%s-%s] | %s | (%s) | %s(%%)' % (Type, msgStatus, Server, HostName, self.ValueDict[Server][Type]['VALUE'][IDX_MEMORY_USE_PER])
Msg = Msg.decode('utf-8')
MsgList.append(Msg)
continue
# 사용 메모리 * 100 / total 메모리 [total 메모리 = used + free
elif Type == 'LOAD_AVG' :
#if self.ValueDict[Server][Type]['STATUS'] == 'NOK' :
Msg = '[%s-%s] | %s | (%s) | %s' % (self.Title, msgStatus, Server, HostName, Type, '/'.join(self.ValueDict[Server][Type]['VALUE']))
# 1분 시스템 평균 부하율 / 5분 .. / 15분 ..
Msg = Msg.decode('utf-8')
MsgList.append(Msg)
continue
elif Type == 'DISK' :
for Disk in self.ValueDict[Server][Type] :
#__LOG__.Trace(Disk)
msgFlag, msgStatus = self.alreadyAlramMsg(Server, Type, Disk['STATUS'], Disk['VALUE'][IDX_DISK_MOUNT])
if not msgFlag :
continue
# if Disk['STATUS'] == 'NOK' :
################################# 임시 추가 #######################
# if (HostName == 'Koti-Chain-01' or HostName == 'Koti-Chain-02') and ('/snap/core/8592' == Disk['VALUE'][IDX_DISK_MOUNT] or '/snap/core/8689' == Disk['VALUE'][IDX_DISK_MOUNT] ) :
# continue
##################################################################
# else :
Msg = '[%s-%s] | %s | (%s) | %s | %s | %s(%%)' % (self.Title, msgStatus, Server, HostName, Type, Disk['VALUE'][IDX_DISK_MOUNT], Disk['VALUE'][IDX_DISK_USE_PER])
# mount 위치, 디스크 사용 퍼센트
Msg = Msg.decode('utf-8')
MsgList.append(Msg)
elif Type == 'IRIS_OPENLAB' :
# if self.ValueDict[Server][Type]['STATUS'] == 'NOK' :
__LOG__.Trace(self.ValueDict[Server][Type]['VALUE'])
Msg = '[%s-%s] | %s (%s) | %s' % (self.Title, msgStatus, Server, HostName, self.ValueDict[Server][Type]['VALUE'])
Msg = Msg.decode('utf-8')
MsgList.append(Msg)
continue
elif Type == 'WEB-LOGIN' :
# if self.ValueDict[Server][Type]['STATUS'] == 'NOK' :
Msg = '[%s-%s] | %s (%s) | %s' % (self.Title, msgStatus, Server, HostName, self.ValueDict[Server][Type]['STATUS'])
__LOG__.Trace(self.ValueDict[Server][Type]['VALUE'])
Msg = Msg.decode('utf-8')
MsgList.append(Msg)
continue
else : pass
#Msg 전송 = 한 Connection 당 한 Number 메시지 전송
if MsgList :
for Msg in MsgList :
__LOG__.Trace('\n'+Msg)
for toNumber in self.toNumberList : # 전화번호
__LOG__.Trace('TO Number[%s]' % toNumber)
__LOG__.Trace("/home/tacs/user/KimJW/selenium_sms/SMS-master/smsTransfer.p %s %s '%s'" %(self.fromNumber, toNumber, Msg) )
os.system("/home/tacs/user/KimJW/selenium_sms/SMS-master/smsTransfer.p %s %s '%s'" %(self.fromNumber, toNumber, Msg) )
time.sleep(2) #빨리 보내면 안감 , 그래서 Sleep 줌
else :
__LOG__.Trace('추가적인 이상 없음')
except :
__LOG__.Exception()
```
#### File: bin/backup/swagger_postgresql.py
```python
import os
import sys
import werkzeug
import json
werkzeug.cached_property = werkzeug.utils.cached_property
from flask import Flask, request
from flask_restplus import Api, Resource, fields, Namespace, cors
from flask_restplus._http import HTTPStatus
from flask_cors import CORS
from flask_restplus import reqparse
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
import API_py3.M6 as M6
import API_py3.Log as Log
import psycopg2
app = Flask(__name__)
api = Api(app, version='1.0', title='IRIS API',
description='IRIS API',
)
CORS(app, resources={r'/*': {'origins': '*'}})
get_req_parser = reqparse.RequestParser(bundle_errors=True)
get_req_parser.add_argument('param', type=str, required=True)
json_parser = api.parser()
json_parser.add_argument('json', type=str, required=True, location='json',
help='JSON BODY argument')
arg_parser = api.parser()
arg_parser.add_argument('json', type=str, required=True,
help='URL JSON argument')
rows_list = []
fields = []
class global_variable:
values = {}
def __init__(self, *args, **kwargs):
super(global_variable, self).__init__(*args, **kwargs)
@classmethod
def get(cls, _val_name):
return cls.values[_val_name]
@classmethod
def set(cls, _val_name, _val):
cls.values[_val_name] = _val
@api.route('/select/<string:table_name>')
class select(Resource):
@api.response(200, "Success")
def options(self, table_name):
return {}
@api.doc(parser=json_parser)
@api.expect(get_req_parser)
@api.response(200, "Success")
def get(self, table_name):
global_variable.set('rows_list', [])
global_variable.set('fields', [])
param = request.args["param"]
conn_postgre = psycopg2.connect(host = '192.168.102.107', dbname = 'nbiotdb', user = 'nbiot', password = '<PASSWORD>')
cursor = conn_postgre.cursor()
column_names = [desc[0] for desc in cursor.description]
columns = ['CDATE', 'CHOUR', 'CTIME', 'STAT_TYPE', 'APP_SERVICE_CODE', 'TOTAL_KPI', 'IDC_CODE']
sql = str("""select CDATE, CHOUR, CTIME, STAT_TYPE, APP_SERVICE_CODE, TOTAL_KPI, IDC_CODE from %s limit 100;""" % table_name)
try:
cursor.execute(sql)
rows = cursor.fetchall()
for row in rows :
rowList = list()
for data in row :
rowList.append(str(data))
global_variable.get('rows_list').append(rowList)
for cname in columns:
global_variable.get('fields').append({"name": cname, "type": "TEXT", "grouped": False})
return {"data": {"fields": global_variable.get('fields'), "results": global_variable.get('rows_list')}}
except Exception as ex:
# __LOG__.Trace("Except: %s" % ex)
print("Except: %s" % ex)
finally:
cursor.close()
conn_postgre.close()
return {}
@api.route('/post/')
class post(Resource):
@api.response(200, "Success")
def options(self):
return {}
@api.response(200, "Success")
def post(self):
args = parse_req_data(request)
return {"success": {"code": 0, "messages": args["json"]}, "data": {"method": "post"}}
@api.route('/put/<string:table_name>')
class put(Resource):
@api.response(200, "Success")
def options(self):
return {}
@api.response(200, "Success")
def put(self):
args = parse_req_data(request)
return {"error": {"code": -1, "messages": args["json"]}, "data": {"method": "put"}}
@api.route('/insert/')
@api.route('/delete/')
class delete(Resource):
@api.response(200, "Success")
def options(self):
return {}
@api.response(200, "Success")
def put(self):
args = parse_req_data(request)
return {"error": {"code": -1, "messages": args["json"]}, "data": {"method": "put"}}
@api.response(200, "Success")
def delete(self):
args = parse_req_data(request)
return {"success": {"code": 0, "messages": args["json"]}, "data": {"method": "post"}}
@api.route('/patch/')
class patch(Resource):
@api.response(200, "Success")
def options(self):
return {}
@api.response(200, "Success")
def patch(self):
args = parse_req_data(request)
return {"success": {"code": 0, "messages": args["json"]}, "data": {"method": "patch"}}
def parse_req_data(request):
if not hasattr(request, 'method'):
return None
if request.method.upper() != 'GET':
if request.data:
return json.loads(request.data)
if 'json' in request.args:
return json.loads(request.args['json'])
if request.args:
return request.args # note: type is ImmutableMultiDict
return {}
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Request-Method', '*')
return response
if __name__ == '__main__':
app.run(host='192.168.102.253', port=5050, debug=True)
```
#### File: TextMining/bin/word2vecKor.py
```python
import gensim
import os
import sys
import io
class Word2VecModule():
def __init__(self):
pass
def run(self, line):
model = gensim.models.Word2Vec.load('ko.bin')
module = os.path.basename(sys.argv[0])
listB = line.split(",")
resList = []
for x in listB[:-1]:
print (x)
y = x.rstrip()
try:
# resDict[oneWord] = model.wv.most_similar(positive=[oneWord])
t = model.wv.most_similar(y)
except KeyError as e:
print('%s is not included Dictionary' % y)
t = '%s is not included Dictionary' % y
except Exception as ex:
# print(ex)
t = ex
resList.append(t)
print(resList)
inputString = sys.argv[1]
filename = '../workd2vecFile/res_%s' % inputString
fout = open(filename, 'w')
for t in enumerate(resList):
a = 'index : {} value: {}'.format(*t)
print(a)
fout.write(a)
def main():
f = open(sys.argv[1], "r")
line = f.readline()
wv = Word2VecModule()
wv.run(line)
if __name__ == '__main__':
try:
main()
except ValueError:
print(ValueError)
``` |
{
"source": "joonyoungleeduke/MatchMe",
"score": 2
} |
#### File: server/Accounts/serializers.py
```python
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from rest_framework import serializers
from rest_framework.validators import UniqueValidator
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['url', 'id', 'username', 'email', 'first_name', 'last_name', 'groups', 'profile']
class RegisterSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['id', 'username', 'email', 'password', 'first_name', 'last_name']
extra_kwargs = {
'first_name': {
'required': True,
},
'last_name': {
'required': True,
},
'email': {
'required': True,
},
}
# validators = [
# UniqueValidator(
# queryset=User.objects.all(),
# )
# ]
def create(self, validated_data):
password = validated_data.pop('password')
user = User(**validated_data)
user.set_password(password)
user.save()
return user
```
#### File: server/Profiles/models.py
```python
from django.db import models
from django.contrib.auth.models import User, Group
from django.dispatch import receiver
from django.db.models.signals import post_save
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
bio = models.TextField(blank=True, default='')
ind_matches = models.IntegerField(blank=True, default=0)
total_matches = models.IntegerField(blank=True, default=0)
preference1 = models.CharField(max_length=100)
preference2 = models.CharField(max_length=100)
preference3 = models.CharField(max_length=100)
image = models.ImageField(upload_to='profile_pictures', default='default_profile.jpg')
@receiver(post_save, sender=User)
def create_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance).save()
```
#### File: server/server/schema.py
```python
import graphene
from graphene_django import DjangoObjectType
from graphql_jwt.decorators import login_required
import graphql_jwt
from django.contrib.auth import get_user_model
from server.app.types import UserType, UserProfileType, GroupType, PostType, CommentType, MatchType, HeartType
from server.app.mutations import RegisterUser
class Mutations(graphene.ObjectType):
token_auth = graphql_jwt.ObtainJSONWebToken.Field()
verify_token = graphql_jwt.Verify.Field()
refresh_token = graphql_jwt.Refresh.Field()
register_user = RegisterUser.Field()
class Query(graphene.ObjectType):
whoami = graphene.Field(UserType)
user_info = graphene.Field(UserType, username=graphene.String(required=True))
users_info = graphene.List(UserType)
def resolve_whoami(self, info):
user = info.context.user
if user.is_anonymous:
raise Exception('Not logged in')
return user
@login_required
def resolve_user_info(self, info, username):
return get_user_model().objects.get(username = username)
@login_required
def resolve_users_info(self, info):
return get_user_model().objects.all()
schema = graphene.Schema(query = Query, mutation = Mutations)
``` |
{
"source": "joonyoungparkdev/JoonBot",
"score": 2
} |
#### File: JoonBot/cogs/downbad.py
```python
from discord.ext import commands
class DownBad(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
if payload.emoji.id == "932028197162328114":
print("hit")
def setup(client):
client.add_cog(DownBad(client))
``` |
{
"source": "JoonyoungYi/glass-web",
"score": 3
} |
#### File: JoonyoungYi/glass-web/api.py
```python
import json
import urllib.parse
import urllib.request
from flask import session
from models import Rating, User, Product, Alarm, LoginStatus
from config import *
###############################################################################
#
# INNER FUNCTIONS
#
###############################################################################
def get_login_status():
return session.get('login_status')
def get_user_id():
return session.get('user_id')
def request_get(url, data={}):
#
headers = {'Authorization': session.get('sessionkey', '')}
url_values = urllib.parse.urlencode(data)
full_url = API_BASE_URL + url + '?' + url_values
#
req = urllib.request.Request(full_url,
headers=headers)
response = urllib.request.urlopen(req)
html = response.read().decode('utf-8')
return html
def request_post(url, data={}):
#
headers = {'Authorization': session.get('sessionkey', '')}
full_url = API_BASE_URL + url
#
values = urllib.parse.urlencode(data)
req = urllib.request.Request(full_url,
values.encode('utf-8'),
headers=headers)
response = urllib.request.urlopen(req)
html = response.read().decode('utf-8')
return html
###############################################################################
#
# APIS
#
###############################################################################
#
def AuthLoginFacebookApi(token):
url = '/auth/login/facebook/'
values = {'token': token}
print(token)
raw = request_post(url, values)
raw_obj = json.loads(raw)
session['sessionkey'] = raw_obj['sessionkey']
session['user_id'] = raw_obj['user_id']
if raw_obj['tutorial_finished']:
session['login_status'] = LoginStatus.LOGIN_SUCCESS
else:
session['login_status'] = LoginStatus.LOGIN_TUTORIAL_UNFINISHED
def RatingDetailApi(rating_id):
url = '/rating/detail/'
values = {'rating_id': rating_id}
raw = request_get(url, values)
raw_obj = json.loads(raw)
if raw_obj.get('rating') == None:
return None
rating = Rating(raw_obj['rating'])
return rating
def UserMeFeedListApi(offset):
url = '/user/me/feed/list/'
count = 20
#
values = {'offset': offset, 'count': count}
raw = request_get(url, values)
#
raw_obj = json.loads(raw)
ratings = []
for rating_obj in raw_obj['ratings']:
rating = Rating(rating_obj)
ratings.append(rating)
return ratings
def GlobalFeedListApi(offset):
url = '/global/feed/list/'
count = 20
values = {'offset': offset, 'count': count}
raw = request_get(url, values)
raw_obj = json.loads(raw)
ratings = []
for rating_obj in raw_obj['ratings']:
rating = Rating(rating_obj)
ratings.append(rating)
return ratings
def UserRatingListApi(user_id, offset):
url = '/user/rating/list/'
count = 20
values = {'user_id': user_id, 'offset': offset, 'count': count}
raw = request_get(url, values)
raw_obj = json.loads(raw)
ratings = []
for rating_obj in raw_obj['ratings']:
rating = Rating(rating_obj)
ratings.append(rating)
return ratings
def UserDetailApi(user_id):
url = '/user/detail/'
values = {'user_id': user_id}
raw = request_get(url, values)
raw_obj = json.loads(raw)
return User(raw_obj['user'])
def UserFollowingListApi(user_id):
url = '/user/following/list/'
values = {'user_id': user_id}
raw = request_get(url, values)
raw_obj = json.loads(raw)
users = []
for user_obj in raw_obj.get('users', []):
user = User(user_obj)
if user.id == session['user_id']:
continue
users.append(user)
return users
def UserFollowerListApi(user_id):
url = '/user/follower/list/'
values = {'user_id': user_id}
raw = request_get(url, values)
raw_obj = json.loads(raw)
users = []
for user_obj in raw_obj.get('users', []):
user = User(user_obj)
if user.id == get_user_id():
continue
users.append(user)
return users
def UserRankingListApi(user_id, product_type, offset):
url = '/user/ranking/list/'
lang = "ko"
count = 20
values = {
'user_id': user_id,
'offset': offset,
'count': count,
'lang': lang,
'product_type': product_type
}
raw = request_get(url, values)
raw_obj = json.loads(raw)
products = []
for j, product_obj in enumerate(raw_obj['products']):
product = Product(product_obj)
products.append(product)
return products
def ComparisonListApi():
url = '/comparison/list/'
values = {}
raw = request_get(url, values)
raw_obj = json.loads(raw)
comparisons = []
for comparison_obj in raw_obj.get('comparisons', []):
product_a = Product(comparison_obj['product_a'])
product_b = Product(comparison_obj['product_b'])
comparisons.append((product_a, product_b))
todo_number = raw_obj['step_todo_number']
done_number = raw_obj['done_number']
return comparisons, todo_number, done_number
def ProductDetailApi(product_id):
url = '/product/detail/'
lang = 'ko'
values = {'lang': lang, 'product_id': product_id}
raw = request_get(url, values)
raw_obj = json.loads(raw)
product = Product(raw_obj['product'])
return product
def ProductRatingListApi(product_id, offset):
url = '/product/rating/list/'
count = 20
values = {'product_id': product_id, 'offset': offset, 'count': count}
raw = request_get(url, values)
raw_obj = json.loads(raw)
ratings = []
for rating_obj in raw_obj['ratings']:
rating = Rating(rating_obj)
ratings.append(rating)
return ratings
def UserMeAlarmListApi():
url = '/user/me/alarm/list/'
offset = 0
count = 20
values = {'offset': offset, 'count': count}
raw = request_get(url, values)
raw_obj = json.loads(raw)
alarms = []
for alarm_obj in raw_obj['alarms']:
alarm = Alarm(alarm_obj)
alarms.append(alarm)
return alarms
def RatingLikeToggleApi(rating_id):
url = '/rating/like/toggle/'
values = {'rating_id': rating_id}
raw = request_post(url, values)
raw_obj = json.loads(raw)
return raw_obj['is_liked']
def UserFollowingToggleApi(user_id):
url = '/user/following/toggle/'
values = {'user_id': user_id}
raw = request_post(url, values)
raw_obj = json.loads(raw)
return raw_obj['is_following']
def RatingMultiListApi(is_tutorial, product_type, offset):
url = '/rating/multi/list/'
count = 20
values = {
'is_tutorial': is_tutorial,
'product_type': product_type,
'offset': offset,
'count': count
}
raw = request_get(url, values)
raw_obj = json.loads(raw)
products = []
for product_obj in raw_obj['products']:
product = Product(product_obj)
products.append(product)
todo_number = raw_obj['step_todo_number'] - raw_obj['step_done_number']
done_number = raw_obj['done_number']
return products, todo_number, done_number
def RatingMultiAddApi(product_id, star):
url = '/rating/multi/add/'
values = {'product_id': product_id, 'star': star}
raw = request_post(url, values)
raw_obj = json.loads(raw)
return True
def ComparisonAddApi(win_product_id, lose_product_id):
url = '/comparison/add/'
values = {'win_product_id': win_product_id, 'lose_product_id': lose_product_id}
raw = request_post(url, values)
raw_obj = json.loads(raw)
return True
def UserFacebookListApi():
url = '/user/facebook/list/'
raw = request_get(url)
raw_obj = json.loads(raw)
users = []
for user_obj in raw_obj['users']:
user = User(user_obj)
users.append(user)
return users
def RecommendListApi():
url = '/recommend/list/'
values = {'product_type': 0}
raw = request_get(url, values)
raw_obj = json.loads(raw)
if raw_obj.get('products') is None:
return []
products = []
for product_obj in raw_obj.get('products', []):
product = Product(product_obj)
products.append(product)
return products
def AuthTutorialFinishApi():
url = '/auth/tutorial/finish/'
raw = request_post(url)
session['login_status'] = LoginStatus.LOGIN_SUCCESS
return True
``` |
{
"source": "JoonyoungYi/LLORMA-tensorflow",
"score": 3
} |
#### File: LLORMA-tensorflow/base/dataset.py
```python
import os
import random
import numpy as np
# from ..configs import *
def _make_dir_if_not_exists(path):
if not os.path.exists(path):
os.mkdir(path)
class DatasetManager:
KIND_MOVIELENS_100K = 'movielens-100k'
KIND_MOVIELENS_1M = 'movielens-1m'
KIND_MOVIELENS_10M = 'movielens-10m'
KIND_MOVIELENS_20M = 'movielens-20m'
KIND_NETFLIX = 'netflix'
KIND_OBJECTS = ( \
(KIND_MOVIELENS_100K, 'http://files.grouplens.org/datasets/movielens/ml-100k.zip'), \
(KIND_MOVIELENS_1M, 'http://files.grouplens.org/datasets/movielens/ml-1m.zip'), \
(KIND_MOVIELENS_10M, 'http://files.grouplens.org/datasets/movielens/ml-10m.zip'), \
(KIND_MOVIELENS_20M, 'http://files.grouplens.org/datasets/movielens/ml-20m.zip'), \
(KIND_NETFLIX, None)
)
def _set_kind_and_url(self, kind):
self.kind = kind
for k, url in self.KIND_OBJECTS:
if k == kind:
self.url = url
return True
raise NotImplementedError()
def _download_data_if_not_exists(self):
if not os.path.exists('data/{}'.format(self.kind)):
os.system('wget {url} -O data/{kind}.zip'.format(
url=self.url, kind=self.kind))
os.system(
'unzip data/{kind}.zip -d data/{kind}/'.format(kind=self.kind))
def __init_data(self, detail_path, delimiter, header=False):
current_u = 0
u_dict = {}
current_i = 0
i_dict = {}
data = []
with open('data/{}{}'.format(self.kind, detail_path), 'r') as f:
if header:
f.readline()
for line in f:
cols = line.strip().split(delimiter)
assert len(cols) == 4
# cols = [float(c) for c in cols]
user_id = cols[0]
item_id = cols[1]
r = float(cols[2])
t = int(cols[3])
u = u_dict.get(user_id, None)
if u is None:
u_dict[user_id] = current_u
u = current_u
current_u += 1
i = i_dict.get(item_id, None)
if i is None:
# print(current_i)
i_dict[item_id] = current_i
i = current_i
current_i += 1
data.append((u, i, r, t))
f.close()
data = np.array(data)
np.save('data/{}/data.npy'.format(self.kind), data)
def _init_data(self):
if self.kind == self.KIND_MOVIELENS_100K:
self.__init_data('/ml-100k/u.data', '\t')
elif self.kind == self.KIND_MOVIELENS_1M:
self.__init_data('/ml-1m/ratings.dat', '::')
elif self.kind == self.KIND_MOVIELENS_10M:
self.__init_data('/ml-10M100K/ratings.dat', '::')
elif self.kind == self.KIND_MOVIELENS_20M:
self.__init_data('/ml-20m/ratings.csv', ',', header=True)
else:
raise NotImplementedError()
def _load_base_data(self):
return np.load('data/{}/data.npy'.format(self.kind))
def _split_data(self):
data = self.data
n_shot = self.n_shot
np.random.shuffle(data)
if self.n_shot == -1:
# n_shot이 -1일때는 더 sparse하게 전체 레이팅을 9:1로 test train set을 나눈다.
n_train = int(data.shape[0] * 0.1)
n_valid = int(n_train * 0.9)
train_data = data[:n_valid]
valid_data = data[n_valid:n_train]
test_data = data[n_train:]
np.save(self._get_npy_path('train'), train_data)
np.save(self._get_npy_path('valid'), valid_data)
np.save(self._get_npy_path('test'), test_data)
elif self.n_shot == 0:
# n_shot이 0일때는 다른 알고리즘들처럼 전체 레이팅을 1:9로 test train set을 나눈다.
n_train = int(data.shape[0] * 0.9)
n_valid = int(n_train * 0.98)
train_data = data[:n_valid]
valid_data = data[n_valid:n_train]
test_data = data[n_train:]
np.save(self._get_npy_path('train'), train_data)
np.save(self._get_npy_path('valid'), valid_data)
np.save(self._get_npy_path('test'), test_data)
else:
# 전체 유저 중에 20%를 일단 test user로 뗍니다.
test_user_ids = random.sample(
list(range(self.n_user)), self.n_user // 5)
train_data = []
test_data = []
count_dict = {}
for i in range(data.shape[0]):
row = data[i]
user_id = int(row[0])
if user_id in test_user_ids:
count = count_dict.get(user_id, 0)
if count < n_shot:
train_data.append(row)
else:
test_data.append(row)
count_dict[user_id] = count + 1
else:
train_data.append(row)
train_data = np.array(train_data)
n_valid = int(train_data.shape[0] * 0.98)
train_data, valid_data = train_data[:n_valid], train_data[n_valid:]
np.save(self._get_npy_path('train'), train_data)
np.save(self._get_npy_path('valid'), valid_data)
test_data = np.array(test_data)
np.save(self._get_npy_path('test'), test_data)
def _get_npy_path(self, split_kind):
return 'data/{}/shot-{}/{}.npy'.format(self.kind, self.n_shot,
split_kind)
def __init__(self, kind, n_shot=0):
assert type(n_shot) == int and n_shot >= -1
_make_dir_if_not_exists('data')
self._set_kind_and_url(kind)
self._download_data_if_not_exists()
self.n_shot = n_shot
# 예쁜 형태로 정제된 npy 파일이 없으면, 정제를 수행합니다.
if not os.path.exists('data/{}/data.npy'.format(kind)):
self._init_data()
self.data = self._load_base_data()
_make_dir_if_not_exists(
'data/{}/shot-{}'.format(self.kind, self.n_shot))
self.n_user = int(np.max(self.data[:, 0])) + 1
self.n_item = int(np.max(self.data[:, 1])) + 1
self.n_row = self.n_user
self.n_col = self.n_item
# split된 데이터가 없으면 split합니다.
if not os.path.exists(
self._get_npy_path('train')) or not os.path.exists(
self._get_npy_path('valid')) or not os.path.exists(
self._get_npy_path('test')):
self._split_data()
self.train_data = np.load(self._get_npy_path('train'))
self.valid_data = np.load(self._get_npy_path('valid'))
self.test_data = np.load(self._get_npy_path('test'))
def get_train_data(self):
return self.train_data
def get_valid_data(self):
return self.valid_data
def get_test_data(self):
return self.test_data
# if __name__ == '__main__':
# kind = DatasetManager.KIND_MOVIELENS_100K
# kind = DatasetManager.KIND_MOVIELENS_1M
# kind = DatasetManager.KIND_MOVIELENS_10M
# kind = DatasetManager.KIND_MOVIELENS_20M
# dataset_manager = DatasetManager(kind)
``` |
{
"source": "JoonyoungYi/lol-recommend",
"score": 2
} |
#### File: lol-recommend/app/core.py
```python
import os
import time
import tensorflow as tf
import numpy as np
import pandas as pd
from .configs import *
from .models import init_models
EPOCH_NUMBER = 10000
EARLY_STOP = True
EARLY_STOP_MAX_ITER = 40
def _train(session, saver, models, train_data, valid_data):
model_file_path = _init_model_file_path()
prev_valid_rmse = float("Inf")
early_stop_iters = 0
for epoch in range(EPOCH_NUMBER):
if models['train_op']:
_, train_rmse = session.run(
[models['train_op'], models['rmse']],
feed_dict={
models['u']: train_data['user_id'],
models['i']: train_data['item_id'],
models['r']: train_data['rating'],
models['c']: train_data['confidence'],
})
else:
train_rmse = float("NaN")
_, valid_rmse, mu = session.run(
[models['loss'], models['rmse'], models['mu']],
feed_dict={
models['u']: valid_data['user_id'],
models['i']: valid_data['item_id'],
models['r']: valid_data['rating'],
models['c']: valid_data['confidence'],
})
# print(mu)
# if epoch % 10 == 0:
print('>> EPOCH:', "{:3d}".format(epoch), "{:3f}, {:3f}".format(
train_rmse, valid_rmse))
if EARLY_STOP:
early_stop_iters += 1
if valid_rmse < prev_valid_rmse:
prev_valid_rmse = valid_rmse
early_stop_iters = 0
saver.save(session, model_file_path)
elif early_stop_iters >= EARLY_STOP_MAX_ITER:
print("Early stopping ({} vs. {})...".format(
prev_valid_rmse, valid_rmse))
break
else:
saver.save(session, model_file_path)
return model_file_path
def _test(session, models, valid_data, test_data):
valid_rmse = session.run(
[models['rmse']],
feed_dict={
models['u']: valid_data['user_id'],
models['i']: valid_data['item_id'],
models['r']: valid_data['rating'],
models['c']: valid_data['confidence'],
})
test_rmse = session.run(
[models['rmse']],
feed_dict={
models['u']: test_data['user_id'],
models['i']: test_data['item_id'],
models['r']: test_data['rating'],
models['c']: test_data['confidence'],
})
print("Final valid RMSE: {}, test RMSE: {}".format(valid_rmse, test_rmse))
return valid_rmse, test_rmse
def _init_model_file_path():
folder_path = 'logs/{}'.format(int(time.time() * 1000))
if not os.path.exists(folder_path):
os.mkdir(folder_path)
return os.path.join(folder_path, 'model.ckpt')
def main(data):
K = 1
print("rank", K)
lambda_value = 0.1
N, M = 560200, 140
models = init_models(N, M, K, lambda_value)
saver = tf.train.Saver()
with tf.Session() as session:
session.run(tf.global_variables_initializer())
model_file_path = _train(session, saver, models, data['train'],
data['valid'])
print('Loading best checkpointed model')
saver.restore(session, model_file_path)
valid_rmse, test_rmse = _test(session, models, data['valid'],
data['test'])
```
#### File: lol-recommend/app/models.py
```python
import math
import tensorflow as tf
import numpy as np
def init_models(N, M, K, lambda_value):
u = tf.placeholder(tf.int32, [None], name='u')
i = tf.placeholder(tf.int32, [None], name='i')
r = tf.placeholder(tf.float32, [None], name='r')
c = tf.placeholder(tf.float32, [None], name='c')
p = tf.Variable(tf.random_normal(
[N, K], stddev=1.0 / math.sqrt(N))) # p latent matrix
q = tf.Variable(tf.random_normal(
[M, K], stddev=1.0 / math.sqrt(M))) # q latent matrix
p_lookup = tf.nn.embedding_lookup(p, u)
q_lookup = tf.nn.embedding_lookup(q, i)
mu = tf.reduce_mean(r)
b_u = tf.Variable(tf.zeros([N]))
b_i = tf.Variable(tf.zeros([M]))
b_u_lookup = tf.nn.embedding_lookup(b_u, u)
b_i_lookup = tf.nn.embedding_lookup(b_i, i)
b_ui = mu + tf.add(b_u_lookup, b_i_lookup)
r_ui_hat = tf.add(b_ui, tf.reduce_sum(tf.multiply(p_lookup, q_lookup), 1))
# r_ui_hat = mu # 0.10831531
# reconstruction_loss = tf.reduce_sum(
# tf.square(tf.subtract(r, r_ui_hat)),
# reduction_indices=[0])
reconstruction_loss = tf.reduce_sum(
tf.multiply(c, tf.square(tf.subtract(r, r_ui_hat))),
reduction_indices=[0])
regularizer_loss = tf.add_n([
tf.reduce_sum(tf.square(p)),
tf.reduce_sum(tf.square(q)),
tf.reduce_sum(tf.square(b_u)),
tf.reduce_sum(tf.square(b_i)),
])
loss = tf.add(reconstruction_loss, lambda_value * regularizer_loss)
# rmse = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(r, r_ui_hat))))
rmse = tf.sqrt(
tf.div(
tf.reduce_sum(tf.multiply(c, tf.square(tf.subtract(r, r_ui_hat)))),
tf.reduce_sum(c)))
optimizer = tf.train.AdamOptimizer(1e-4)
# optimizer = tf.train.GradientDescentOptimizer(5e-6)
train_op = optimizer.minimize(loss, var_list=[b_u, b_i, p, q])
# train_op = None
return {
'u': u,
'i': i,
'r': r,
'c': c,
'r_ui_hat': r_ui_hat,
'mu': mu,
'train_op': train_op,
'rmse': rmse,
'loss': loss,
}
```
#### File: JoonyoungYi/lol-recommend/run.py
```python
import numpy as np
from app.core import main
def _get_dict_from_data(data):
return {
'user_id': data[:, 0],
'item_id': data[:, 1],
'rating': data[:, 4],
'confidence': data[:, 5],
}
def _init_data():
try:
train_data = np.load('train.npy')
valid_data = np.load('valid.npy')
test_data = np.load('test.npy')
except:
train_data = None
valid_data = None
test_data = None
if train_data is None or valid_data is None or test_data is None:
raw_data = np.load('win_lose_data.npy')
d = {}
for i in range(raw_data.shape[0]):
for j in range(raw_data.shape[1]):
win = raw_data[i][j][0]
lose = raw_data[i][j][1]
if win + lose < 10:
continue
d[(i, j)] = (win, lose)
full_data = np.zeros((len(d.keys()), 6))
for i, (user_id, item_id) in enumerate(d.keys()):
win, lose = d[(user_id, item_id)]
full_data[i][0] = user_id
full_data[i][1] = item_id
full_data[i][2] = win
full_data[i][3] = lose
full_data[i][4] = win / (win + lose)
full_data[i][5] = win + lose
mask = np.ones(full_data.shape[0]) # 1: train mask
mask[np.random.rand(full_data.shape[0]) < 0.02] = 2 # 2: valid mask
mask[np.random.rand(full_data.shape[0]) < 0.1] = 3 # 3: test mask
train_data = full_data[mask == 1, :]
valid_data = full_data[mask == 2, :]
test_data = full_data[mask == 3, :]
np.save('train.npy', train_data)
np.save('valid.npy', valid_data)
np.save('test.npy', test_data)
return {
'train': _get_dict_from_data(train_data),
'valid': _get_dict_from_data(valid_data),
'test': _get_dict_from_data(test_data),
}
if __name__ == '__main__':
data = _init_data()
main(data)
``` |
{
"source": "JoonyoungYi/movielens-crawler",
"score": 3
} |
#### File: movielens-crawler/checkers/duplication_rotten.py
```python
from models import Session
from models import Item
from models import RottenMovie
def _filter_items(items):
ids = set(i.original_item_id if i.original_item_id else i.id
for i in items)
return [i for i in items if i.id in ids]
session = Session()
for rotten_movie in session.query(RottenMovie):
items = rotten_movie.items.all()
if len(items) == 0:
# raise NotImplementedError('RottenMovie 모델을 지우는 코드를 작성해야 합니다.')
session.delete(rotten_movie)
session.commit()
elif len(items) == 1:
continue
items = _filter_items(items)
if len(items) <= 1:
continue
print(
'\n>>',
'{:4d}'.format(rotten_movie.id),
rotten_movie.year,
rotten_movie.name,
len(items), )
for item in items:
print(
' [*]',
'{:4d}'.format(item.id),
item.get_valid_years(),
item.get_pretty_name(), )
```
#### File: migrations/versions/52dfcf03e9ad_amazon_movie_column_edit.py
```python
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '52dfcf03e9ad'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('amazon_movie', sa.Column('hd_buy_price', sa.Float(), nullable=True))
op.add_column('amazon_movie', sa.Column('hd_rent_price', sa.Float(), nullable=True))
op.add_column('amazon_movie', sa.Column('sd_buy_price', sa.Float(), nullable=True))
op.add_column('amazon_movie', sa.Column('sd_rent_price', sa.Float(), nullable=True))
op.create_index(op.f('ix_amazon_movie_hd_buy_price'), 'amazon_movie', ['hd_buy_price'], unique=False)
op.create_index(op.f('ix_amazon_movie_hd_rent_price'), 'amazon_movie', ['hd_rent_price'], unique=False)
op.create_index(op.f('ix_amazon_movie_sd_buy_price'), 'amazon_movie', ['sd_buy_price'], unique=False)
op.create_index(op.f('ix_amazon_movie_sd_rent_price'), 'amazon_movie', ['sd_rent_price'], unique=False)
op.drop_index('ix_amazon_movie_buy_hd_price', table_name='amazon_movie')
op.drop_index('ix_amazon_movie_buy_sd_price', table_name='amazon_movie')
op.drop_index('ix_amazon_movie_rent_hd_price', table_name='amazon_movie')
op.drop_index('ix_amazon_movie_rent_sd_price', table_name='amazon_movie')
op.drop_column('amazon_movie', 'buy_hd_price')
op.drop_column('amazon_movie', 'buy_sd_price')
op.drop_column('amazon_movie', 'rent_sd_price')
op.drop_column('amazon_movie', 'rent_hd_price')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('amazon_movie', sa.Column('rent_hd_price', mysql.FLOAT(), nullable=True))
op.add_column('amazon_movie', sa.Column('rent_sd_price', mysql.FLOAT(), nullable=True))
op.add_column('amazon_movie', sa.Column('buy_sd_price', mysql.FLOAT(), nullable=True))
op.add_column('amazon_movie', sa.Column('buy_hd_price', mysql.FLOAT(), nullable=True))
op.create_index('ix_amazon_movie_rent_sd_price', 'amazon_movie', ['rent_sd_price'], unique=False)
op.create_index('ix_amazon_movie_rent_hd_price', 'amazon_movie', ['rent_hd_price'], unique=False)
op.create_index('ix_amazon_movie_buy_sd_price', 'amazon_movie', ['buy_sd_price'], unique=False)
op.create_index('ix_amazon_movie_buy_hd_price', 'amazon_movie', ['buy_hd_price'], unique=False)
op.drop_index(op.f('ix_amazon_movie_sd_rent_price'), table_name='amazon_movie')
op.drop_index(op.f('ix_amazon_movie_sd_buy_price'), table_name='amazon_movie')
op.drop_index(op.f('ix_amazon_movie_hd_rent_price'), table_name='amazon_movie')
op.drop_index(op.f('ix_amazon_movie_hd_buy_price'), table_name='amazon_movie')
op.drop_column('amazon_movie', 'sd_rent_price')
op.drop_column('amazon_movie', 'sd_buy_price')
op.drop_column('amazon_movie', 'hd_rent_price')
op.drop_column('amazon_movie', 'hd_buy_price')
# ### end Alembic commands ###
```
#### File: migrations/versions/b48d3707d548_rottenmovie_add_columns_2.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '60dd503aa24b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('rotten_movie', sa.Column('affiliate_amazon_url', sa.String(length=1000), nullable=True))
op.add_column('rotten_movie', sa.Column('affiliate_amazon_valid', sa.Boolean(), nullable=True))
op.add_column('rotten_movie', sa.Column('affiliate_apple_url', sa.String(length=1000), nullable=True))
op.add_column('rotten_movie', sa.Column('affiliate_apple_valid', sa.Boolean(), nullable=True))
op.add_column('rotten_movie', sa.Column('affiliate_fandangonow_url', sa.String(length=1000), nullable=True))
op.add_column('rotten_movie', sa.Column('affiliate_fandangonow_valid', sa.Boolean(), nullable=True))
op.add_column('rotten_movie', sa.Column('affiliate_netflix_url', sa.String(length=1000), nullable=True))
op.add_column('rotten_movie', sa.Column('affiliate_netflix_valid', sa.Boolean(), nullable=True))
op.add_column('rotten_movie', sa.Column('affiliate_vudu_url', sa.String(length=1000), nullable=True))
op.add_column('rotten_movie', sa.Column('affiliate_vudu_valid', sa.Boolean(), nullable=True))
op.create_index(op.f('ix_rotten_movie_affiliate_amazon_valid'), 'rotten_movie', ['affiliate_amazon_valid'], unique=False)
op.create_index(op.f('ix_rotten_movie_affiliate_apple_valid'), 'rotten_movie', ['affiliate_apple_valid'], unique=False)
op.create_index(op.f('ix_rotten_movie_affiliate_fandangonow_valid'), 'rotten_movie', ['affiliate_fandangonow_valid'], unique=False)
op.create_index(op.f('ix_rotten_movie_affiliate_netflix_valid'), 'rotten_movie', ['affiliate_netflix_valid'], unique=False)
op.create_index(op.f('ix_rotten_movie_affiliate_vudu_valid'), 'rotten_movie', ['affiliate_vudu_valid'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_rotten_movie_affiliate_vudu_valid'), table_name='rotten_movie')
op.drop_index(op.f('ix_rotten_movie_affiliate_netflix_valid'), table_name='rotten_movie')
op.drop_index(op.f('ix_rotten_movie_affiliate_fandangonow_valid'), table_name='rotten_movie')
op.drop_index(op.f('ix_rotten_movie_affiliate_apple_valid'), table_name='rotten_movie')
op.drop_index(op.f('ix_rotten_movie_affiliate_amazon_valid'), table_name='rotten_movie')
op.drop_column('rotten_movie', 'affiliate_vudu_valid')
op.drop_column('rotten_movie', 'affiliate_vudu_url')
op.drop_column('rotten_movie', 'affiliate_netflix_valid')
op.drop_column('rotten_movie', 'affiliate_netflix_url')
op.drop_column('rotten_movie', 'affiliate_fandangonow_valid')
op.drop_column('rotten_movie', 'affiliate_fandangonow_url')
op.drop_column('rotten_movie', 'affiliate_apple_valid')
op.drop_column('rotten_movie', 'affiliate_apple_url')
op.drop_column('rotten_movie', 'affiliate_amazon_valid')
op.drop_column('rotten_movie', 'affiliate_amazon_url')
# ### end Alembic commands ###
```
#### File: movielens-crawler/parsers/amazon_movie.py
```python
from traceback import format_exc
import re
import requests
from models import Session
from models import RottenMovie
from models import AmazonMovie
from models import WebPage
def main():
session = Session()
for idx, rotten_movie in enumerate(
session.query(RottenMovie).filter(
RottenMovie.amazon_movie_id.is_(None),
RottenMovie.affiliate_amazon_valid.is_(True), )):
# if rotten_movie.id < 1623:
# continue
m = re.search('^http://www.amazon.com/gp/product/(?P<path>.*)$',
rotten_movie.affiliate_amazon_url)
if not m:
assert re.search('^http://www.amazon.com/gp/video/primesignup',
rotten_movie.affiliate_amazon_url)
continue
url = '/{}'.format(m.group('path'))
print('>>', rotten_movie.id, url)
amazon_movie = session.query(AmazonMovie).filter_by(url=url).first()
if amazon_movie is None:
amazon_movie = AmazonMovie(url=url)
session.add(amazon_movie)
rotten_movie.amazon_movie = amazon_movie
if idx % 100 == 0:
session.commit()
session.commit()
if __name__ == '__main__':
main()
```
#### File: movielens-crawler/parsers/apple_movie_web_page.py
```python
from traceback import format_exc
import requests
from models import Session
from models import AppleMovie
from models import WebPage
from utils.cdn import save_to_cdn
def __request(path):
try:
r = requests.get('https://itunes.apple.com/us/movie{}'.format(path))
return r.text
except:
print(format_exc())
return None
def __save(key, text):
return save_to_cdn(key, text)
def _request_and_save(key, path):
text = __request(path)
if not text:
return False
return __save(key, text)
def main():
session = Session()
for idx, apple_movie in enumerate(
session.query(AppleMovie).filter(AppleMovie.web_page_id.is_(
None))):
# if apple_movie.id < 1623:
# continue
print(
'>>',
'{:4d}'.format(apple_movie.id), )
if not apple_movie.url:
continue
web_page = WebPage()
session.add(web_page)
apple_movie.web_page = web_page
session.commit()
success = _request_and_save(web_page.key, apple_movie.url)
if not success:
session.delete(web_page)
apple_movie.web_page = None
session.commit()
print(' [-] Fail')
else:
print(' [+] Success')
# break
if __name__ == '__main__':
main()
```
#### File: movielens-crawler/parsers/item_price.py
```python
from traceback import format_exc
import re
import requests
from models import Session
from models import Item
def main():
session = Session()
for idx, item in enumerate(
session.query(Item).filter(
Item.price.is_(None),
Item.rotten_movie_id.isnot(None), )):
# if rotten_movie.id < 1623:
# continue
print(">>", item.id)
rotten_movie = item.rotten_movie
if not rotten_movie:
continue
amazon_movie = rotten_movie.amazon_movie
apple_movie = rotten_movie.apple_movie
if not apple_movie and not amazon_movie:
continue
prices = []
if amazon_movie:
prices.append(amazon_movie.get_price())
if apple_movie:
prices.append(apple_movie.price)
prices = [p for p in prices if p and p > 0.0]
if not prices:
continue
price = sum(prices) / len(prices)
item.price = price
if idx % 100 == 0:
session.commit()
session.commit()
if __name__ == '__main__':
main()
```
#### File: movielens-crawler/parsers/rotten_movie_web_page_parser.py
```python
from traceback import format_exc
import requests
from bs4 import BeautifulSoup
from models import Session
from models import RottenMovie
from models import WebPage
from utils.cdn import get_soup_from_web_page
def _get_soup_from_rotten_movie(rotten_movie):
return get_soup_from_web_page(rotten_movie.web_page)
def _get_affiliate_url(movie_div, div_id):
for a_tag in movie_div.find_all('a', recursive=False):
div = a_tag.find('div', recursive=False)
assert div['id'] in ("amazonAffiliates", 'itunesAffiliates',
'FandangoNow', 'vuduAffiliates',
'netflixAffiliates', )
if div['id'] == div_id:
return a_tag['href']
return None
def main():
session = Session()
for idx, rotten_movie in enumerate(
session.query(RottenMovie).filter(
RottenMovie.web_page_id.isnot(None))):
if rotten_movie.id < 434:
continue
print(
'>>',
'{:4d}'.format(rotten_movie.id), )
soup = _get_soup_from_rotten_movie(rotten_movie)
if soup is None:
raise NotImplementedError()
continue
movie_divs = soup.find_all('div', 'movie_links')
if len(movie_divs) > 1:
print(len(movie_divs))
raise NotImplementedError()
if len(movie_divs) <= 0:
continue
movie_div = movie_divs[0]
for keyword, div_id in [
('amazon', "amazonAffiliates"),
('apple', 'itunesAffiliates'),
('fandangonow', 'FandangoNow'),
('vudu', 'vuduAffiliates'),
('netflix', 'netflixAffiliates', ),
]:
prefix = 'affiliate_{}'.format(keyword)
valid_column_name = '{}_valid'.format(prefix)
url_column_name = '{}_url'.format(prefix)
affiliate_url = _get_affiliate_url(movie_div, div_id)
if affiliate_url:
setattr(rotten_movie, valid_column_name, True)
setattr(rotten_movie, url_column_name, affiliate_url)
else:
setattr(rotten_movie, valid_column_name, False)
setattr(rotten_movie, url_column_name, '')
session.commit()
if __name__ == '__main__':
main()
``` |
{
"source": "JoonyoungYi/project-news",
"score": 3
} |
#### File: project-news/research/alg4.py
```python
import csv
import json
from konlpy.tag import Kkma
from collections import Counter
from operator import itemgetter
import itertools
import re, random
#########
# #
#########
def trim_sentences(sentences):
# remove duplicates
j = 0
while ( j < len(sentences)):
sentence = sentences[j]
if sentence in sentences[j+1:]:
sentences.remove(sentence)
else :
j += 1
# split ▲ , -
i = 0
while ( i < len(sentences) ):
sentence = sentences[i]
if u'…' in sentence:
temp = re.split(u'…', sentences[i])
if len(temp) > 1:
sentences.pop(i)
for j in range(0, len(temp)):
sentences.insert(i+j, temp[j])
if u'. ' in sentence:
temp = re.split(u'. ', sentences[i])
if len(temp) > 1:
sentences.pop(i)
for j in range(0, len(temp)):
sentences.insert(i+j, temp[j])
if u' ' in sentence:
temp = re.split(u' ', sentences[i])
if len(temp) > 1:
sentences.pop(i)
for j in range(0, len(temp)):
sentences.insert(i+j, temp[j])
if u'▲' in sentence:
temp = re.split(u'▲', sentences[i])
if len(temp) > 1:
sentences.pop(i)
for j in range(0, len(temp)):
sentences.insert(i+j, temp[j])
if sentence.count(u'-') >= 2:
temp = re.split(u'-', sentences[i])
if len(temp) > 1:
sentences.pop(i)
for j in range(0, len(temp)):
sentences.insert(i+j, temp[j])
i += 1
# merge_sentence
i = 0
while ( i < len(sentences)):
sentence = sentences[i]
if len(sentence) < 10:
if '.' in sentence[len(sentence)/2:] :
if i > 0 :
sentences[i-1] += sentence
sentences.pop(i)
else :
i += 1
else :
if i < (len(sentences) - 1 ):
sentences[i+1] = sentence + sentences[i+1]
sentences.pop(i)
else :
i += 1
else :
i += 1
# split using http*.jpg
i = 0
while ( i < len(sentences) ):
sentence = sentences[i]
if u'http' in sentence and u'.jpg' in sentence:
temp = re.split(u'http.*?\.jpg', sentences[i])
if len(temp) > 1:
sentences.pop(i)
for j in range(0, len(temp)):
sentences.insert(i+j, temp[j])
i += len(temp)
else :
i += 1
# remove [*] or some keywords
i = 0
while ( i < len(sentences)):
if ( u'◀' in sentences[i] and u'▶' in sentences[i]):
temp = re.split(u'◀.*?▶', sentences[i])
sentences[i] = ' '.join(temp).strip()
if u'글꼴 선택 본문 텍스트 크게 본문 텍스트 작게 스크랩' in sentences[i]:
sentences[i] = sentences[i].replace(u'글꼴 선택 본문 텍스트 크게 본문 텍스트 작게 스크랩', '')
if u'Copyrightⓒ 한국경제 TV. All rights reserved. (http: //www .wowtv .co .kr) 무단 전재 및 재배포 금지' in sentences[i]:
sentences[i] = sentences[i].replace(u'Copyrightⓒ 한국경제 TV. All rights reserved. (http: //www .wowtv .co .kr) 무단 전재 및 재배포 금지', '')
if u'+ - Tag Match < 저작권자 © 글로벌 이코노믹 무단 전재 및 재배포금지>' in sentences[i]:
sentences[i] = sentences[i].replace(u'+ - Tag Match < 저작권자 © 글로벌 이코노믹 무단 전재 및 재배포금지>', '')
i += 1
#
i = 0
while ( i < len(sentences)):
if u'(' in sentences[i] and u')' in sentences[i] and u'뉴스':
temp = re.split(u'\(.*뉴스.*?\)', sentences[i])
if len(temp) > 1:
sentences[i] = temp[1]
for j in range(2, len(temp)):
sentences.insert(i+j-1, temp[j])
if ( u'[' in sentences[i] and u']' in sentences[i]):
temp = re.split(u'\[.*?\]', sentences[i])
if len(temp) > 1:
sentences[i] = temp[1]
for j in range(2, len(temp)):
sentences.insert(i+j-1, temp[j])
if u'(' in sentences[i] and u')' in sentences[i] and u'기자':
temp = re.split(u'\(.*기자.*?\)', sentences[i])
if len(temp) > 1:
sentences[i] = temp[1]
for j in range(2, len(temp)):
sentences.insert(i+j-1, temp[j])
if u'기자' in sentences[i] and u'=' in sentences[i]:
temp = re.split(u'기자.*?=', sentences[i])
if len(temp) > 1:
sentences[i] = temp[1]
for j in range(2, len(temp)):
sentences.insert(i+j-1, temp[j])
if u'사진' in sentences[i] and u'=' in sentences[i]:
temp = re.split(u'사진.*?=', sentences[i])
if len(temp) > 1:
sentences[i] = temp[1]
for j in range(2, len(temp)):
sentences.insert(i+j-1, temp[j])
if u'특파원' in sentences[i] and u'=' in sentences[i]:
temp = re.split(u'특파원.*?=', sentences[i])
if len(temp) > 1:
sentences[i] = temp[1]
for j in range(2, len(temp)):
sentences.insert(i+j-1, temp[j])
i += 1
# remove null sentences
i = 0
while ( i < len(sentences)):
sentence = sentences[i]
if len(sentence.strip()) == 0 :
sentences.pop(i)
else :
i += 1
return sentences
#########
# #
#########
def find_index(answer_sentence, sentences):
#
answer = answer_sentence.replace(' ', '')
#
for i in range(0, len(sentences)):
sentence = sentences[i]
sentence = sentence.replace(' ', '')
#
if answer in sentence:
return i
#
for i in range(0, len(sentences)):
sentence = sentences[i]
if i > 0 :
sentence = sentences[i-1][ len(sentences[i-1])/2: ] + sentence
if i < (len(sentences)- 1):
sentence = sentence + sentences[i+1][:len(sentences[i+1])/2]
sentence = sentence.replace(' ', '')
#
if answer in sentence:
return i
#
candidates = []
for i in range(0, len(sentences)):
sentence = sentences[i]
if i > 0 :
sentence = sentences[i-1] + sentence
if i < (len(sentences)- 1):
sentence = sentence + sentences[i+1]
sentence = sentence.replace(' ', '')
#
if answer in sentence:
candidates.append(i)
if len(candidates) > 1:
max_len = max( len(sentences[index]) for index in candidates )
for index in candidates:
if len(sentences[index]) == max_len:
return index
#print " = " + answer_sentence
#print len(sentences)
#for s in sentences:
# print " - " + s
#assert(False)
def get_keyword_weight(kkma, sentences):
def get_words(kkma, sentences):
z = []
for sentence in sentences:
z.extend( word for word, t in kkma.pos(sentence) \
if len(word) > 0 and ( t == 'NNG' or t == 'OL') )
words = list(set(z))
return words
def get_matrix(kkma, words, sentences):
word_index = dict()
for j, word in enumerate(words) :
word_index[word] = j
word_matrix = [ [ 0 for j in range(len(words)) ] for i in range(len(words)) ]
head_words = []
for j, sentence in enumerate(sentences) :
w = 1
if i == 0:
w = 2.128
elif i == 1:
w = 1.471
elif i == 2:
w = 1.220
elif i == 3:
w = 1.031
elif i == 4:
w = 1.031
elif i == 5:
w = 1.020
"""
w = 1
if j == 0:
w = 4
elif j==1:
w = 3
elif j == 2:
w = 2
"""
sentence_words = list(set([ word for word, t in kkma.pos(sentence) \
if len(word) > 0 and ( t == 'NNG' or t == 'OL') ]))
if j <= 3:
head_words.extend(sentence_words)
for a, b in itertools.permutations(sentence_words, 2):
if word_index.get(a) != None and word_index.get(b) != None :
word_matrix[word_index[a]][word_index[b]] += w
for j in range(len(word_matrix)):
row = word_matrix[j]
s = sum(row)
if s > 0 :
word_matrix[j] = [ 0.1 + 0.9 * float(e) / float(s) for e in row ]
#print ' '.join([ str(e) for e in word_matrix[j]] )
return word_matrix
def get_eigen_vector(word_matrix_t):
word_matrix = [ [ 0 for e in range(len(word_matrix_t))] for e in range(len(word_matrix_t)) ]
for i in range(len(word_matrix_t)):
for j in range(len(word_matrix_t)):
word_matrix[i][j] = word_matrix_t[j][i]
x = [ float(1) / float(len(word_matrix)) for i in range(len(word_matrix))]
for j in range(100):
y = [ sum((a * b) for a, b in itertools.izip(word_matrix[i], x)) for i in range(len(word_matrix))]
x = [ e/sum(y) for e in y ]
#print x
return x
#
words = get_words(kkma, sentences)
word_matrix = get_matrix(kkma, words, sentences)
x = get_eigen_vector(word_matrix)
word_weight = [ ( words[i], a ) for i, a in enumerate(x) ]
word_weight = sorted(word_weight, key=itemgetter(1))
keywords = dict([])
weight_max = max( weight for word, weight in word_weight)
weight_min = min( weight for word, weight in word_weight)
for word, weight in word_weight:
w = 1
if weight_max != weight_min :
w = (weight - weight_min) / (weight_max - weight_min) # * (weight / weight_max)
keywords[word] = w * w
#print word, keywords[word]
#
"""
c = Counter(z)
a = [ ( word, c[word] ) for word in c if c[word] > 1 ]
a = sorted(a, key=itemgetter(1), reverse=True)
keywords = dict([])
for word, c in a:
keywords[word] = c * c
print word, c
"""
"""
z = []
for sentence in sentences[:3]:
z.extend( word for word, t in kkma.pos(sentence) \
if len(word) > 1 and ( t == 'NNG' or t == 'OL') )
z = list(set(z))
z_last = []
for sentence in sentences[3:]:
z_last.extend([ word for word, t in kkma.pos(sentence) \
if len(word) > 1 and ( t == 'NNG' or t == 'OL')])
ratio = float(len([ word for word in z_last if word in z ])) / float(len(z_last))
if ratio > 0.1 :
for word in z :
if keywords.get(word) != None:
keywords[word] = keywords[word] * 2
"""
return keywords
#########
# #
#########
def forasterisk_algorithm(sentences):
def amplify_factor(sentence):
return 1
sentence = sentence.replace(' ', '')
f = 1
if u'밝혔다' in sentence:
f *= 1.1
if u'확인됐다' in sentence:
f *= 1.2
if u'시작했다' in sentence:
f *= 1.1
if u'하고있다' in sentence:
f *= 0.5
if u'되고있다' in sentence:
f *= 0.8
if u'?' in sentence:
f *= 0.6
if u'나타났다' in sentence:
f *= 1.2
if u'모습을보이고있다' in sentence:
f *= 0.8
if u'멘붕' in sentence:
f *= 0.6
if u'하지만' in sentence:
f *= 0.8
if u'사진' in sentence and u'있다' in sentence:
f *= 0.5
if not u'.' in sentence:
f *= 0.5
if u'제공' in sentence and (u'왼' in sentence or u'오른' in sentence or u'위' in sentence or u'아래' in sentence):
f *= 0.5
return f
# init
kkma = Kkma()
#
keywords = get_keyword_weight(kkma, sentences)
#
max_i = -1;
max_sentence = None;
max_sum = -1;
avg_sentence_len = float(sum(len(s) for s in sentences)) / float(len(sentences))
for i, sentence in enumerate(sentences) :
sentence_keywords = list(set([ word for word, t in kkma.pos(sentence) if len(word) > 0 and ( t == 'NNG' or t == 'VV' ) ]))
if len(sentence_keywords) == 0:
continue
w = 1
if i == 0:
w = 2.128
elif i == 1:
w = 1.471
elif i == 2:
w = 1.220
elif i == 3:
w = 1.031
elif i == 4:
w = 1.031
elif i == 5:
w = 1.020
len_panelty = (len(sentence) - avg_sentence_len) * 0.25 + avg_sentence_len
s = float(w) * sum(keywords[word] for word in sentence_keywords if keywords.get(word) != None) \
/ len_panelty
s *= amplify_factor(sentence)
#print "-> ", i, s, len(sentence), len(sentence_keywords)
if s > max_sum :
max_sum = s
max_i = i
max_sentence = sentence
#
target_index = max_i
#print max_sentence
#print max_sum
return target_index
#########
# #
#########
def get_answer_sentences_index(answer_sentences, sentences):
#
answer_sentences_index = []
for answer_sentence in answer_sentences:
index = find_index(answer_sentence, sentences)
if index != None:
answer_sentences_index.append(index)
#
answer_sentences_index = list(set(answer_sentences_index))
return answer_sentences_index
#########
# #
#########
tot = 0
cnt = 0
kkma = Kkma()
#cases = [ 75 ]
#for j in range(10):
# cases.append(random.randint(0, 269))
cases = range(0, 270)
for j in cases:
# init
f = open('database/%d.json' % j, "rb")
result = json.loads(f.read())
sentences = trim_sentences(result['sentences'])
answer_sentences = result['answer_sentences']
answer_sentences_index = get_answer_sentences_index(answer_sentences, sentences)
# CORE
target_index = forasterisk_algorithm(sentences)
#target_index = 0
#target_index = random.randint(0, len(sentences)-1)
#
if len(answer_sentences_index) > 0:
tot += 1
if target_index in answer_sentences_index :
cnt += 1
else :
# print part
print '\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
print '>> TITLE %d\t: %s' % (j, result['title'])
print '>> GROUND ANSWER INDEX :', answer_sentences_index
print '>> GROUND ANSWER : '
for index in answer_sentences_index:
print '>> ' + sentences[index]
print '>> ALGORITHM ANSWER INDEX :', target_index
print '>> ALGORITHM ANSWER : '
print '>> ' + sentences[target_index]
# final
f.close()
print '\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
print '>> total -> ' + str(tot)
print '>> count -> ' + str(cnt)
print '>> exact -> ' + str(cnt * 100 / tot) + "%"
```
#### File: project-news/research/utils_newspaper.py
```python
from newspaper import Article
def get_title_text(url):
article = Article(url, language='ko')
article.download()
article.parse()
title = article.title
text = article.text
return title, text
``` |
{
"source": "JoonyoungYi/project-seesae",
"score": 3
} |
#### File: JoonyoungYi/project-seesae/models.py
```python
import datetime, random, math
class ProductModel:
def __init__(self, product_id, product_type, product_name, img_url=None):
self.id = product_id
if (product_type == 1):
self.type = '농산물'
elif(product_type == 2):
self.type = '수산물'
elif(product_type == 3):
self.type = '축산물'
else:
self.type=''
self.name = product_name
self.img_url = img_url
self.change_day = int(math.floor((random.random() - 0.5) * 10))
self.change_week = int(math.floor((random.random() - 0.5) * 10))
self.change_month = int(math.floor((random.random() - 0.5) * 10))
def setSeason(self, season_start, season_end):
d = datetime.date.today()
current = d.month * 100 + d.day
start = season_start.month * 100 + season_start.day
end = season_end.month * 100 + season_end.day
self.season_start = '%02d' % (season_start.month) + '-' + '%02d' % (season_start.day)
self.season_end = '%02d' % (season_end.month) + '-' + '%02d' % (season_end.day)
if ((start <= end and (start <= current <= end)) or ( start >= end and (current <= end or current >= start ))) :
self.season = True
else :
self.season = False
class CommentModel:
def __init__(self, comment_id, user_email, comment_content, timestamp):
self.id = comment_id
self.email = user_email
self.content = comment_content
self.timestamp = timestamp
class StoreModel:
def __init__(self, store_name, latitude, longitude):
self.name = store_name
self.latitude = latitude
self.longitude = longitude
class PriceChartModel:
def __init__(self, product_class_id, product_class_name):
self.label_color = "#AAAAAA"
self.label_color_r = int("AA", 16)
self.label_color_g = int("AA", 16)
self.label_color_b = int("AA", 16)
self.product_class_id = product_class_id
self.product_class_name = product_class_name
def setPrice_values(self, price_values):
self.price_values = price_values
def setLabel_color(self, label_color):
self.label_color = label_color
self.label_color_r = int(label_color[1:3], 16)
self.label_color_g = int(label_color[3:5], 16)
self.label_color_b = int(label_color[5:] , 16)
print self.label_color_r
print self.label_color_g
print self.label_color_b
```
#### File: project-seesae/parser/db_kamis_parser.py
```python
# -*- coding:utf-8 -*-
from urllib import urlopen
from bs4 import BeautifulSoup
from datetime import date
from datetime import timedelta
from datetime import datetime
class KamisDataRetriever:
def __init__(self, retrieveCategory, retrieveItem, fromDate, kindCode = ''):
self.retrieveCategory = retrieveCategory
self.retrieveItem = retrieveItem
self.kindCode = kindCode
self.fromDate = self.dateManipulator(fromDate)
def dateManipulator(self, date):
assert isinstance(date, str)
return datetime.strptime(date, '%Y-%m-%d').date()
def soupCooker(self):
assert isinstance(self.retrieveCategory, str)
assert isinstance(self.retrieveItem, str)
assert isinstance(self.kindCode, str)
assert isinstance(self.fromDate, date)
url = 'https://www.kamis.co.kr/customer/price/retail/item.do?regday='+self.fromDate.isoformat()+'&itemcategorycode='+self.retrieveCategory + '&itemcode='+self.retrieveItem+'&kindcode='+self.kindCode
html = urlopen(url)
soup = BeautifulSoup(html, 'html.parser')
return soup
def produceOutput(self):
l = []
while self.fromDate - timedelta(days=1) != date.today():
print self.fromDate
cat = 0
if self.fromDate.weekday() < 5:
s = self.soupCooker()
div = s.table.parent
table_child = div.findAll(name='table')
for child in table_child:
try:
temp = []
td_c = child.tbody.tr.findAll(name='td')
for c in td_c:
temp.append(c.string)
if len(temp) >2:
tup = (cat, self.fromDate.isoformat(), int(''.join(temp[1].split(','))))
l.append(tup)
else:
tup = (cat, self.fromDate.isoformat(), 0)
l.append(tup)
cat += 1
except:
print 'ERROR!!'
continue
self.fromDate = self.fromDate + timedelta(days = 1)
return l
def productClassList(self):
l = []
s = self.soupCooker()
div = s.table.parent
table_child = div.findAll(name='table')
for child in table_child:
caption = child.caption.span.string.split('>')
tup = (caption[4].strip().encode('utf-8'), caption[5].strip().encode('utf-8'))
l.append(tup)
return l
```
#### File: JoonyoungYi/project-seesae/run.py
```python
from flask import Flask, g, render_template, redirect, request, session, url_for
from db import db_connect, db_insert_favorite, db_insert_hate, db_insert_comment
from config import *
from models import *
import datetime, math, itertools
import sys, random
# -----------------------------------------------------------------------------
# FOR ENCODING
# -----------------------------------------------------------------------------
reload(sys)
sys.setdefaultencoding('utf-8')
# -----------------------------------------------------------------------------
# BASE AND MAIN
# -----------------------------------------------------------------------------
app = Flask(__name__)
app.config.from_object(__name__)
"""
BASE REQUEST
"""
@app.before_request
def before_request():
g.db = db_connect()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None :
db.close()
# -----------------------------------------------------------------------------
# SECTION FOR MAIN PAGE
# -----------------------------------------------------------------------------
@app.route('/')
def show_main():
return make_main('all')
@app.route('/agricultural')
def show_main_agiricultural():
return make_main('agricultural')
@app.route('/fishery')
def show_main_fishery():
return make_main('fishery')
@app.route('/livestock')
def show_main_livestock():
return make_main('livestock')
def make_main(product_type):
if not is_logged_in():
return redirect(url_for('login'))
# user_id
if 'user_id' in session :
user_id = session['user_id']
cur = g.db.cursor()
for_display = None
if product_type == 'agricultural':
cur.execute('SELECT * FROM %s WHERE %s=1' % (table_product, column_product_type))
for_display = { "wp" : "wallpaper-agriculture", "title":"AGRICULTURAL"}
elif product_type == 'fishery':
cur.execute('SELECT * FROM %s WHERE %s=2' % (table_product, column_product_type))
for_display = { "wp" : "wallpaper-fishery", "title":"FISHERY" }
elif product_type == 'livestock':
cur.execute('SELECT * FROM %s WHERE %s=3' % (table_product, column_product_type))
for_display = { "wp" : "wallpaper-livestock", "title":"LIVESTOCK" }
else :
cur.execute('SELECT * FROM %s' % (table_product))
for_display = { "wp" : "wallpaper-agriculture", "title":"SEE-SAE" }
rows = cur.fetchall()
products = []
for row in rows:
product = ProductModel(row[0], row[1], row[2], row[3])
if row[4] == None or row[5] == None:
continue
product.setSeason(row[4], row[5])
products.append(product)
random.shuffle(products)
###
cur.execute('SELECT * FROM %s WHERE %s=%d' % (table_hate, column_user_id, user_id))
###
cur.execute('SELECT * FROM %s WHERE %s=%d' % (table_hate, column_user_id, user_id))
rows = cur.fetchall()
id_hates = []
for row in rows:
id_hates.append(rows[3])
new_products = []
for product in products:
if not product.id in id_hates:
new_products.append(product)
products = new_products
###
return render_template('main.html', product_type=product_type, products=products, for_display=for_display)
"""
SECTION FOR DETAIL
"""
@app.route('/<int:product_id>/')
def show_detail(product_id):
if not is_logged_in():
return redirect(url_for('login'))
cur = g.db.cursor()
# user_id
if 'user_id' in session :
user_id = session['user_id']
# PRODUCT!!!! and SEASON!!!
cur.execute('SELECT * FROM %s WHERE %s=%d LIMIT 1' % (table_product, column_product_id, product_id))
row = cur.fetchall()[0]
product = ProductModel(row[0], row[1], row[2], row[3])
if row[4] == None or row[5] == None:
return redirect(url_for('show_main'))
product.setSeason(row[4], row[5])
# PRICES
cur.execute('SELECT * FROM %s WHERE %s=%d' % (table_product_class, column_product_id, product_id))
price_charts_day = []
price_charts_week = []
price_charts_month = []
rows = cur.fetchall()
for row in rows:
price_chart_day = PriceChartModel(row[0], row[1])
price_charts_day.append(price_chart_day)
price_chart_week = PriceChartModel(row[0], row[1])
price_charts_week.append(price_chart_week)
price_chart_month = PriceChartModel(row[0], row[1])
price_charts_month.append(price_chart_month)
price_date_end = datetime.date.today()
price_date_end_str = price_date_end.strftime("%Y-%m-%d")
price_date_start = price_date_end + datetime.timedelta(days=-7)
price_date_start_str = price_date_start.strftime("%Y-%m-%d")
colors = ['#FF6F00', '#FF8F00', '#FFA000', '#FFB300', '#FFC107', '#FFCA28']
def setLabelColors(price_charts):
price_charts.sort(key=lambda x: (sum( x.price_values ) / len(x.price_values)) , reverse=True)
for i, price_chart in enumerate(price_charts):
price_chart.setLabel_color(colors[ int(math.floor( len(colors) * i / len(price_charts) )) ])
return price_charts
for price_chart in price_charts_day:
cur.execute('SELECT ROUND(AVG(%s)), DAY(%s) FROM %s WHERE %s=%d and %s BETWEEN \'%s\' and \'%s\' \
GROUP BY %s ORDER BY %s ASC' \
% (column_price_value, column_price_date, table_price, \
column_product_class_id, price_chart.product_class_id, column_price_date, \
price_date_start_str, price_date_end_str, \
column_price_date, column_price_date))
rows = cur.fetchall()
price_values = []
old_value = None
for row in rows:
if old_value != None:
if old_value + 1 != int(row[1]):
for i in range( 1, int(row[1]) - old_value) :
price_values.append(0)
old_value = int(row[1])
price_values.append(int(row[0]))
if (len(price_values) <= 8 ):
for i in range( 8 - len(price_values)):
price_values.append(0)
print len (price_values)
assert( len (price_values) == 8 )
price_chart.price_values = price_values
price_charts_day = setLabelColors(price_charts_day)
price_date_start = price_date_end + datetime.timedelta(weeks=-12)
price_date_start_str = price_date_start.strftime("%Y-%m-%d")
for price_chart in price_charts_week:
cur.execute('SELECT ROUND(AVG(%s)), WEEK(%s) FROM %s WHERE %s=%d and %s > 0 and %s BETWEEN \'%s\' and \'%s\' \
GROUP BY CONCAT(YEAR(%s), \'/\', WEEK(%s)) ORDER BY %s ASC' \
% (column_price_value, column_price_date, table_price, \
column_product_class_id, price_chart.product_class_id, column_price_value, column_price_date, \
price_date_start_str, price_date_end_str,\
column_price_date, column_price_date, column_price_date))
rows = cur.fetchall()
price_values = []
old_value = None
for row in rows:
if old_value != None:
if old_value + 1 != int(row[1]):
for i in range( 1, int(row[1]) - old_value) :
price_values.append(0)
old_value = int(row[1])
price_values.append(int(row[0]))
if (len(price_values) < 13 ):
for i in range( 13 - len(price_values)):
price_values.append(0)
print '>> ' + str(len (price_values))
assert( len (price_values) == 13 )
price_chart.price_values = price_values
price_charts_day = setLabelColors(price_charts_week)
price_date_start = price_date_end + datetime.timedelta(days=-365)
price_date_start_str = price_date_start.strftime("%Y-%m-%d")
for price_chart in price_charts_month:
cur.execute('SELECT ROUND(AVG(%s)), MONTH(%s) FROM %s WHERE %s=%d and %s BETWEEN \'%s\' and \'%s\' \
GROUP BY CONCAT(YEAR(%s), \'/\', MONTH(%s)) ORDER BY %s ASC' \
% (column_price_value, column_price_date, table_price, \
column_product_class_id, price_chart.product_class_id, column_price_date, \
price_date_start_str, price_date_end_str,\
column_price_date, column_price_date, column_price_date))
rows = cur.fetchall()
price_values = []
old_value = None
for row in rows:
if old_value != None:
if old_value + 1 != int(row[1]):
for i in range( 1, int(row[1]) - old_value) :
price_values.append(0)
old_value = int(row[1])
price_values.append(int(row[0]))
if (len(price_values) < 13 ):
for i in range( 13 - len(price_values)):
price_values.append(0)
print '>> ' + str(len (price_values))
price_chart.price_values = price_values
price_charts_day = setLabelColors(price_charts_month)
# SIMILAR PRODUCTS!!!
cur.execute('SELECT %s.%s, %s.%s, %s.%s, %s.%s FROM %s LEFT JOIN %s ON %s.%s=%s.%s WHERE %s.%s=%d' \
% (table_product, column_product_id, \
table_product, column_product_type, \
table_product, column_product_name, \
table_product, column_product_img_url, \
table_similar_product_relation, table_product, \
table_similar_product_relation, column_similar_product_id, table_product, column_product_id, \
table_similar_product_relation, column_product_id, product_id))
rows = cur.fetchall()
similar_products = []
for row in rows:
similar_product = ProductModel(row[0], row[1], row[2], row[3])
similar_products.append(similar_product)
# LIKE/HATE INFORMATION
cur.execute('SELECT * FROM %s WHERE %s=%d and %s=%d' \
% ( table_favorite, column_product_id, product_id, column_user_id, user_id ))
rows = cur.fetchall()
for row in rows:
print row
dLike = {}
if len(rows) == 1:
dLike['like'] = 'btn-success'
print dLike['like']
else:
dLike['like'] = ''
cur.execute('SELECT * FROM %s WHERE %s=%d and %s=%d' \
% ( table_hate, column_product_id, product_id, column_user_id, user_id ))
rows = cur.fetchall()
for row in rows:
print row
if len(rows) == 1:
dLike['hate'] = 'btn-danger'
print dLike['hate']
else :
dLike['hate'] = ''
# STORES!!!!
cur.execute('SELECT %s.%s, %s.%s, %s.%s FROM %s LEFT JOIN %s ON %s.%s=%s.%s WHERE %s.%s=%d LIMIT 4' \
% (table_store, column_store_name, \
table_store, column_store_latitude, \
table_store, column_store_longitude, \
table_product_store_relation, table_store, \
table_product_store_relation, column_store_id, table_store, column_store_id, \
table_product_store_relation, column_product_id, product_id))
rows = cur.fetchall()
stores = []
for row in rows:
store = StoreModel(row[0], row[1], row[2])
stores.append(store)
# COMMENTS!!!
cur.execute('SELECT %s.%s, %s.%s, %s.%s, %s.%s FROM %s LEFT JOIN %s ON %s.%s=%s.%s WHERE %s=%d ORDER BY %s.%s DESC' \
% (table_comment, column_comment_id, \
table_comment, column_comment_content, \
table_user, column_user_email, table_comment, column_timestamp, \
table_comment, table_user, \
table_comment, column_user_id, table_user, column_user_id, column_product_id, product_id, table_comment, column_timestamp))
rows = cur.fetchall()
comments = []
for row in rows:
comment = CommentModel(row[0], row[2], row[1], row[3])
comments.append(comment)
return render_template('detail.html', \
product=product, \
price_charts_day=price_charts_day, price_charts_week=price_charts_week, price_charts_month=price_charts_month, \
similar_products=similar_products, stores=stores, comments=comments, dLike=dLike)
# -----------------------------------------------------------------------------
# LIKE HATE BUTTON
# -----------------------------------------------------------------------------
@app.route('/toggle/like/<int:product_id>/')
def toggle_like(product_id):
if not is_logged_in():
return redirect(url_for('login'))
cur = g.db.cursor()
# user_id
if 'user_id' in session :
user_id = session['user_id']
# LIKE/HATE INFORMATION
cur.execute('SELECT * FROM %s WHERE %s=%d and %s=%d' \
% ( table_favorite, column_product_id, product_id, column_user_id, user_id ))
rows = cur.fetchall()
if len(rows) == 1:
cur.execute('DELETE FROM %s WHERE %s=%d' \
% ( table_favorite, column_favorite_id, rows[0][0]))
else :
db_insert_favorite(cur, user_id, product_id)
cur.execute('SELECT * FROM %s WHERE %s=%d and %s=%d' \
% ( table_hate, column_product_id, product_id, column_user_id, user_id ))
rows = cur.fetchall()
if len(rows) == 1:
cur.execute('DELETE FROM %s WHERE %s=%d' \
% ( table_hate, column_hate_id, rows[0][0]))
g.db.commit()
return redirect(url_for('show_detail', product_id=product_id))
@app.route('/toggle/hate/<int:product_id>/')
def toggle_hate(product_id):
if not is_logged_in():
return redirect(url_for('login'))
cur = g.db.cursor()
# user_id
if 'user_id' in session :
user_id = session['user_id']
# LIKE/HATE INFORMATION
cur.execute('SELECT * FROM %s WHERE %s=%d and %s=%d' \
% ( table_hate, column_product_id, product_id, column_user_id, user_id ))
rows = cur.fetchall()
if len(rows) == 1:
cur.execute('DELETE FROM %s WHERE %s=%d' \
% ( table_hate, column_hate_id, rows[0][0]))
else :
db_insert_hate(cur, user_id, product_id)
cur.execute('SELECT * FROM %s WHERE %s=%d and %s=%d' \
% ( table_favorite, column_product_id, product_id, column_user_id, user_id ))
rows = cur.fetchall()
if len(rows) == 1:
cur.execute('DELETE FROM %s WHERE %s=%d' \
% ( table_favorite, column_favorite_id, rows[0][0]))
g.db.commit()
return redirect(url_for('show_detail', product_id=product_id))
# -----------------------------------------------------------------------------
# COMMENT ADD
# -----------------------------------------------------------------------------
@app.route('/add/comment/<int:product_id>', methods=['POST'])
def add_comment(product_id):
if request.method == 'POST':
# user_id
if 'user_id' in session :
user_id = session['user_id']
cur = g.db.cursor()
print (user_id, product_id, request.form['content'])
db_insert_comment(cur, user_id, product_id, request.form['content'].encode('utf-8'))
g.db.commit()
return redirect(url_for('show_detail', product_id=product_id))
"""
SECTION FOR MY PAGE
"""
@app.route('/profile/')
def show_profile():
if not is_logged_in():
return redirect(url_for('login'))
if 'username' in session:
pass
# user_id
if 'user_id' in session :
user_id = session['user_id']
#
cur = g.db.cursor()
cur.execute('SELECT %s.%s, %s.%s, %s.%s, %s.%s FROM %s LEFT JOIN %s ON %s.%s=%s.%s WHERE %s.%s=%d' \
% (table_product, column_product_id, \
table_product, column_product_type, \
table_product, column_product_name, \
table_product, column_product_img_url, \
table_favorite, table_product, \
table_product, column_product_id, \
table_favorite, column_product_id, \
table_favorite, column_user_id, user_id))
rows = cur.fetchall()
favorites = []
for row in rows:
product = ProductModel(row[0], row[1], row[2], row[3])
favorites.append(product)
# COMMENTS!!!
product_id = 1
cur.execute('SELECT %s.%s, %s.%s, %s.%s, %s.%s FROM %s LEFT JOIN %s ON %s.%s=%s.%s WHERE %s.%s=%d' \
% (table_comment, column_comment_id, \
table_comment, column_comment_content, \
table_product, column_product_name, \
table_comment, column_timestamp, \
table_comment, table_product, \
table_comment, column_product_id, table_product, column_product_id, table_comment, column_user_id, user_id))
rows = cur.fetchall()
comments = []
for row in rows:
comment = CommentModel(row[0], row[2], row[1], row[3])
comments.append(comment)
#
cur.execute('SELECT %s.%s, %s.%s, %s.%s, %s.%s FROM %s LEFT JOIN %s ON %s.%s=%s.%s WHERE %s.%s=%d' \
% (table_product, column_product_id, \
table_product, column_product_type, \
table_product, column_product_name, \
table_product, column_product_img_url, \
table_hate, table_product, \
table_product, column_product_id, \
table_hate, column_product_id, \
table_hate, column_user_id, user_id))
rows = cur.fetchall()
hates = []
for row in rows:
product = ProductModel(row[0], row[1], row[2], row[3])
hates.append(product)
# show the user profile for that user
return render_template('profile.html', favorites=favorites, hates=hates, comments=comments )
"""
SECTION FOR LOGIN AND JOIN
"""
def is_logged_in():
if 'logged_in' in session:
return True
else:
return False
@app.route('/login/', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
try :
request.form['login']
except:
return redirect(url_for('join'))
cur = g.db.cursor()
cur.execute('SELECT * FROM %s WHERE %s=\'%s\' and %s=\'%s\'' \
% (table_user, \
column_user_email, request.form['email'], \
column_user_password, request.form['password']))
rows = cur.fetchall()
if len(rows) == 1:
session['logged_in'] = True
session['username'] = request.form['email']
session['user_id'] = rows[0][0]
# flash('You were logged in')
return redirect(url_for('show_main'))
error='Check your email and password'
return render_template('login.html', error=error)
@app.route('/logout/', methods=['POST'])
def logout():
session.pop('logged_in', None)
session.pop('username', None)
session.pop('user_id', None)
#flash('You were logged out')
return redirect(url_for('login'))
@app.route('/join/')
def join():
error=None
return render_template('join.html', error=error)
"""
SECTION FOR RUNNING APP
"""
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "JoonyoungYi/RProp-tensorflow",
"score": 3
} |
#### File: JoonyoungYi/RProp-tensorflow/rprop.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.training import optimizer
class RPropOptimizer(optimizer.Optimizer):
"""
Optimizer that implements the RProp algorithm.
"""
def __init__(self,
stepsize=0.1,
etaplus=1.2,
etaminus=0.5,
stepsizemax=50.0,
stepsizemin=1e-6,
use_locking=False,
name="RProp"):
super(RPropOptimizer, self).__init__(use_locking, name)
self._stepsize = stepsize
self._etaplus = etaplus
self._etaminus = etaminus
self._stepsizemax = stepsizemax
self._stepsizemin = stepsizemin
def _create_slots(self, var_list):
'''
:param var_list:
:return:
'''
# Create the beta1 and beta2 accumulators on the same device as the first
# variable.
# Create slots for the first and second moments.
for v in var_list:
self._get_or_make_slot(
v,
tf.ones([v.get_shape().num_elements()], dtype=tf.float32) *
self._stepsize,
"step",
self._name, )
self._get_or_make_slot(
v,
tf.zeros([v.get_shape().num_elements()], dtype=tf.float32),
"delta",
self._name, )
self._get_or_make_slot(
v,
tf.zeros([v.get_shape().num_elements()], dtype=tf.float32),
"grad",
self._name, )
def _apply_dense(self, grad, var):
grad_slot = self.get_slot(var, "grad")
step_slot = self.get_slot(var, "step")
delta_slot = self.get_slot(var, "delta")
grad = tf.reshape(grad, [-1])
sign = tf.cast(tf.sign(grad_slot * grad), tf.int64)
with tf.control_dependencies([sign]):
grad = grad_slot.assign(grad)
p_indices = tf.where(tf.equal(sign, 1)) # positive indices
m_indices = tf.where(tf.equal(sign, -1)) # minus indices
z_indices = tf.where(tf.equal(sign, 0)) # zero indices
step_p_update = tf.expand_dims(
tf.minimum(
tf.gather_nd(step_slot, p_indices) * self._etaplus,
self._stepsizemax), 1)
step_m_update = tf.expand_dims(
tf.maximum(
tf.gather_nd(step_slot, m_indices) * self._etaminus,
self._stepsizemin), 1)
step_z_update = tf.expand_dims(tf.gather_nd(step_slot, z_indices), 1)
with tf.control_dependencies(
[step_p_update, step_m_update, step_z_update]):
step = tf.scatter_update(step_slot, p_indices, step_p_update)
step = tf.scatter_update(step, m_indices, step_m_update)
step = tf.scatter_update(step, z_indices, step_z_update)
step = step_slot.assign(step)
delta_p_update = tf.expand_dims(
tf.gather_nd(tf.sign(grad) * step, p_indices), 1)
delta_z_update = tf.expand_dims(
tf.gather_nd(tf.sign(grad) * step, z_indices), 1)
with tf.control_dependencies([delta_p_update, delta_z_update]):
delta = tf.scatter_update(delta_slot, p_indices, delta_p_update)
delta = tf.scatter_update(delta, z_indices, delta_z_update)
delta = delta_slot.assign(delta)
with tf.control_dependencies([sign]):
grad = tf.scatter_update(grad, m_indices,
tf.zeros_like(m_indices, tf.float32))
grad = grad_slot.assign(grad)
up = tf.reshape(delta, var.get_shape())
var_update = var.assign_sub(up, use_locking=self._use_locking)
return tf.group(*[var_update, step, delta, grad])
def _apply_sparse(self, grad, var):
raise NotImplementedError("RProp should be used only in batch_mode.")
``` |
{
"source": "joooeey/geopandas",
"score": 3
} |
#### File: geopandas/tools/sjoin.py
```python
from typing import Optional
import warnings
import numpy as np
import pandas as pd
from geopandas import GeoDataFrame
from geopandas import _compat as compat
from geopandas.array import _check_crs, _crs_mismatch_warn
def sjoin(
left_df,
right_df,
how="inner",
predicate="intersects",
lsuffix="left",
rsuffix="right",
**kwargs,
):
"""Spatial join of two GeoDataFrames.
See the User Guide page :doc:`../../user_guide/mergingdata` for details.
Parameters
----------
left_df, right_df : GeoDataFrames
how : string, default 'inner'
The type of join:
* 'left': use keys from left_df; retain only left_df geometry column
* 'right': use keys from right_df; retain only right_df geometry column
* 'inner': use intersection of keys from both dfs; retain only
left_df geometry column
predicate : string, default 'intersects'
Binary predicate. Valid values are determined by the spatial index used.
You can check the valid values in left_df or right_df as
``left_df.sindex.valid_query_predicates`` or
``right_df.sindex.valid_query_predicates``
Replaces deprecated ``op`` parameter.
lsuffix : string, default 'left'
Suffix to apply to overlapping column names (left GeoDataFrame).
rsuffix : string, default 'right'
Suffix to apply to overlapping column names (right GeoDataFrame).
Examples
--------
>>> countries = geopandas.read_file(geopandas.datasets.get_\
path("naturalearth_lowres"))
>>> cities = geopandas.read_file(geopandas.datasets.get_path("naturalearth_cities"))
>>> countries.head() # doctest: +SKIP
pop_est continent name \
iso_a3 gdp_md_est geometry
0 920938 Oceania Fiji FJI 8374.0 MULTIPOLY\
GON (((180.00000 -16.06713, 180.00000...
1 53950935 Africa Tanzania TZA 150600.0 POLYGON (\
(33.90371 -0.95000, 34.07262 -1.05982...
2 603253 Africa W. Sahara ESH 906.5 POLYGON (\
(-8.66559 27.65643, -8.66512 27.58948...
3 35623680 North America Canada CAN 1674000.0 MULTIPOLY\
GON (((-122.84000 49.00000, -122.9742...
4 326625791 North America United States of America USA 18560000.0 MULTIPOLY\
GON (((-122.84000 49.00000, -120.0000...
>>> cities.head()
name geometry
0 Vatican City POINT (12.45339 41.90328)
1 San Marino POINT (12.44177 43.93610)
2 Vaduz POINT (9.51667 47.13372)
3 Luxembourg POINT (6.13000 49.61166)
4 Palikir POINT (158.14997 6.91664)
>>> cities_w_country_data = geopandas.sjoin(cities, countries)
>>> cities_w_country_data.head() # doctest: +SKIP
name_left geometry index_right pop_est continent name_\
right iso_a3 gdp_md_est
0 Vatican City POINT (12.45339 41.90328) 141 62137802 Europe \
Italy ITA 2221000.0
1 San Marino POINT (12.44177 43.93610) 141 62137802 Europe \
Italy ITA 2221000.0
192 Rome POINT (12.48131 41.89790) 141 62137802 Europe \
Italy ITA 2221000.0
2 Vaduz POINT (9.51667 47.13372) 114 8754413 Europe Au\
stria AUT 416600.0
184 Vienna POINT (16.36469 48.20196) 114 8754413 Europe Au\
stria AUT 416600.0
See also
--------
overlay : overlay operation resulting in a new geometry
GeoDataFrame.sjoin : equivalent method
Notes
------
Every operation in GeoPandas is planar, i.e. the potential third
dimension is not taken into account.
"""
if "op" in kwargs:
op = kwargs.pop("op")
deprecation_message = (
"The `op` parameter is deprecated and will be removed"
" in a future release. Please use the `predicate` parameter"
" instead."
)
if predicate != "intersects" and op != predicate:
override_message = (
"A non-default value for `predicate` was passed"
f' (got `predicate="{predicate}"`'
f' in combination with `op="{op}"`).'
" The value of `predicate` will be overridden by the value of `op`,"
" , which may result in unexpected behavior."
f"\n{deprecation_message}"
)
warnings.warn(override_message, UserWarning, stacklevel=4)
else:
warnings.warn(deprecation_message, FutureWarning, stacklevel=4)
predicate = op
if kwargs:
first = next(iter(kwargs.keys()))
raise TypeError(f"sjoin() got an unexpected keyword argument '{first}'")
_basic_checks(left_df, right_df, how, lsuffix, rsuffix)
indices = _geom_predicate_query(left_df, right_df, predicate)
joined = _frame_join(indices, left_df, right_df, how, lsuffix, rsuffix)
return joined
def _basic_checks(left_df, right_df, how, lsuffix, rsuffix):
"""Checks the validity of join input parameters.
`how` must be one of the valid options.
`'index_'` concatenated with `lsuffix` or `rsuffix` must not already
exist as columns in the left or right data frames.
Parameters
------------
left_df : GeoDataFrame
right_df : GeoData Frame
how : str, one of 'left', 'right', 'inner'
join type
lsuffix : str
left index suffix
rsuffix : str
right index suffix
"""
if not isinstance(left_df, GeoDataFrame):
raise ValueError(
"'left_df' should be GeoDataFrame, got {}".format(type(left_df))
)
if not isinstance(right_df, GeoDataFrame):
raise ValueError(
"'right_df' should be GeoDataFrame, got {}".format(type(right_df))
)
allowed_hows = ["left", "right", "inner"]
if how not in allowed_hows:
raise ValueError(
'`how` was "{}" but is expected to be in {}'.format(how, allowed_hows)
)
if not _check_crs(left_df, right_df):
_crs_mismatch_warn(left_df, right_df, stacklevel=4)
index_left = "index_{}".format(lsuffix)
index_right = "index_{}".format(rsuffix)
# due to GH 352
if any(left_df.columns.isin([index_left, index_right])) or any(
right_df.columns.isin([index_left, index_right])
):
raise ValueError(
"'{0}' and '{1}' cannot be names in the frames being"
" joined".format(index_left, index_right)
)
def _geom_predicate_query(left_df, right_df, predicate):
"""Compute geometric comparisons and get matching indices.
Parameters
----------
left_df : GeoDataFrame
right_df : GeoDataFrame
predicate : string
Binary predicate to query.
Returns
-------
DataFrame
DataFrame with matching indices in
columns named `_key_left` and `_key_right`.
"""
with warnings.catch_warnings():
# We don't need to show our own warning here
# TODO remove this once the deprecation has been enforced
warnings.filterwarnings(
"ignore", "Generated spatial index is empty", FutureWarning
)
original_predicate = predicate
if predicate == "within":
# within is implemented as the inverse of contains
# contains is a faster predicate
# see discussion at https://github.com/geopandas/geopandas/pull/1421
predicate = "contains"
sindex = left_df.sindex
input_geoms = right_df.geometry
else:
# all other predicates are symmetric
# keep them the same
sindex = right_df.sindex
input_geoms = left_df.geometry
if sindex:
l_idx, r_idx = sindex.query_bulk(input_geoms, predicate=predicate, sort=False)
indices = pd.DataFrame({"_key_left": l_idx, "_key_right": r_idx})
else:
# when sindex is empty / has no valid geometries
indices = pd.DataFrame(columns=["_key_left", "_key_right"], dtype=float)
if original_predicate == "within":
# within is implemented as the inverse of contains
# flip back the results
indices = indices.rename(
columns={"_key_left": "_key_right", "_key_right": "_key_left"}
)
return indices
def _frame_join(join_df, left_df, right_df, how, lsuffix, rsuffix):
"""Join the GeoDataFrames at the DataFrame level.
Parameters
----------
join_df : DataFrame
Indices and join data returned by the geometric join.
Must have columns `_key_left` and `_key_right`
with integer indices representing the matches
from `left_df` and `right_df` respectively.
Additional columns may be included and will be copied to
the resultant GeoDataFrame.
left_df : GeoDataFrame
right_df : GeoDataFrame
lsuffix : string
Suffix to apply to overlapping column names (left GeoDataFrame).
rsuffix : string
Suffix to apply to overlapping column names (right GeoDataFrame).
how : string
The type of join to use on the DataFrame level.
Returns
-------
GeoDataFrame
Joined GeoDataFrame.
"""
# the spatial index only allows limited (numeric) index types, but an
# index in geopandas may be any arbitrary dtype. so reset both indices now
# and store references to the original indices, to be reaffixed later.
# GH 352
index_left = "index_{}".format(lsuffix)
left_df = left_df.copy(deep=True)
try:
left_index_name = left_df.index.name
left_df.index = left_df.index.rename(index_left)
except TypeError:
index_left = [
"index_{}".format(lsuffix + str(pos))
for pos, ix in enumerate(left_df.index.names)
]
left_index_name = left_df.index.names
left_df.index = left_df.index.rename(index_left)
left_df = left_df.reset_index()
index_right = "index_{}".format(rsuffix)
right_df = right_df.copy(deep=True)
try:
right_index_name = right_df.index.name
right_df.index = right_df.index.rename(index_right)
except TypeError:
index_right = [
"index_{}".format(rsuffix + str(pos))
for pos, ix in enumerate(right_df.index.names)
]
right_index_name = right_df.index.names
right_df.index = right_df.index.rename(index_right)
right_df = right_df.reset_index()
# perform join on the dataframes
if how == "inner":
join_df = join_df.set_index("_key_left")
joined = (
left_df.merge(join_df, left_index=True, right_index=True)
.merge(
right_df.drop(right_df.geometry.name, axis=1),
left_on="_key_right",
right_index=True,
suffixes=("_{}".format(lsuffix), "_{}".format(rsuffix)),
)
.set_index(index_left)
.drop(["_key_right"], axis=1)
)
if isinstance(index_left, list):
joined.index.names = left_index_name
else:
joined.index.name = left_index_name
elif how == "left":
join_df = join_df.set_index("_key_left")
joined = (
left_df.merge(join_df, left_index=True, right_index=True, how="left")
.merge(
right_df.drop(right_df.geometry.name, axis=1),
how="left",
left_on="_key_right",
right_index=True,
suffixes=("_{}".format(lsuffix), "_{}".format(rsuffix)),
)
.set_index(index_left)
.drop(["_key_right"], axis=1)
)
if isinstance(index_left, list):
joined.index.names = left_index_name
else:
joined.index.name = left_index_name
else: # how == 'right':
joined = (
left_df.drop(left_df.geometry.name, axis=1)
.merge(
join_df.merge(
right_df, left_on="_key_right", right_index=True, how="right"
),
left_index=True,
right_on="_key_left",
how="right",
suffixes=("_{}".format(lsuffix), "_{}".format(rsuffix)),
)
.set_index(index_right)
.drop(["_key_left", "_key_right"], axis=1)
)
if isinstance(index_right, list):
joined.index.names = right_index_name
else:
joined.index.name = right_index_name
return joined
def _nearest_query(
left_df: GeoDataFrame,
right_df: GeoDataFrame,
max_distance: float,
how: str,
return_distance: bool,
):
if not (compat.PYGEOS_GE_010 and compat.USE_PYGEOS):
raise NotImplementedError(
"Currently, only PyGEOS >= 0.10.0 supports `nearest_all`. "
+ compat.INSTALL_PYGEOS_ERROR
)
# use the opposite of the join direction for the index
use_left_as_sindex = how == "right"
if use_left_as_sindex:
sindex = left_df.sindex
query = right_df.geometry
else:
sindex = right_df.sindex
query = left_df.geometry
if sindex:
res = sindex.nearest(
query,
return_all=True,
max_distance=max_distance,
return_distance=return_distance,
)
if return_distance:
(input_idx, tree_idx), distances = res
else:
(input_idx, tree_idx) = res
distances = None
if use_left_as_sindex:
l_idx, r_idx = tree_idx, input_idx
sort_order = np.argsort(l_idx, kind="stable")
l_idx, r_idx = l_idx[sort_order], r_idx[sort_order]
if distances is not None:
distances = distances[sort_order]
else:
l_idx, r_idx = input_idx, tree_idx
join_df = pd.DataFrame(
{"_key_left": l_idx, "_key_right": r_idx, "distances": distances}
)
else:
# when sindex is empty / has no valid geometries
join_df = pd.DataFrame(
columns=["_key_left", "_key_right", "distances"], dtype=float
)
return join_df
def sjoin_nearest(
left_df: GeoDataFrame,
right_df: GeoDataFrame,
how: str = "inner",
max_distance: Optional[float] = None,
lsuffix: str = "left",
rsuffix: str = "right",
distance_col: Optional[str] = None,
) -> GeoDataFrame:
"""Spatial join of two GeoDataFrames based on the distance between their geometries.
Results will include multiple output records for a single input record
where there are multiple equidistant nearest or intersected neighbors.
Distance is calculated in CRS units and can be returned using the
`distance_col` parameter.
See the User Guide page
https://geopandas.readthedocs.io/en/latest/docs/user_guide/mergingdata.html
for more details.
Parameters
----------
left_df, right_df : GeoDataFrames
how : string, default 'inner'
The type of join:
* 'left': use keys from left_df; retain only left_df geometry column
* 'right': use keys from right_df; retain only right_df geometry column
* 'inner': use intersection of keys from both dfs; retain only
left_df geometry column
max_distance : float, default None
Maximum distance within which to query for nearest geometry.
Must be greater than 0.
The max_distance used to search for nearest items in the tree may have a
significant impact on performance by reducing the number of input
geometries that are evaluated for nearest items in the tree.
lsuffix : string, default 'left'
Suffix to apply to overlapping column names (left GeoDataFrame).
rsuffix : string, default 'right'
Suffix to apply to overlapping column names (right GeoDataFrame).
distance_col : string, default None
If set, save the distances computed between matching geometries under a
column of this name in the joined GeoDataFrame.
Examples
--------
>>> countries = geopandas.read_file(geopandas.datasets.get_\
path("naturalearth_lowres"))
>>> cities = geopandas.read_file(geopandas.datasets.get_path("naturalearth_cities"))
>>> countries.head(2).name # doctest: +SKIP
pop_est continent name \
iso_a3 gdp_md_est geometry
0 920938 Oceania Fiji FJI 8374.0 MULTIPOLY\
GON (((180.00000 -16.06713, 180.00000...
1 53950935 Africa Tanzania TZA 150600.0 POLYGON (\
(33.90371 -0.95000, 34.07262 -1.05982...
>>> cities.head(2).name # doctest: +SKIP
name geometry
0 Vatican City POINT (12.45339 41.90328)
1 San Marino POINT (12.44177 43.93610)
>>> cities_w_country_data = geopandas.sjoin_nearest(cities, countries)
>>> cities_w_country_data[['name_left', 'name_right']].head(2) # doctest: +SKIP
name_left geometry index_right pop_est continent name_\
right iso_a3 gdp_md_est
0 Vatican City POINT (12.45339 41.90328) 141 62137802 Europe \
Italy ITA 2221000.0
1 San Marino POINT (12.44177 43.93610) 141 62137802 Europe \
Italy ITA 2221000.0
To include the distances:
>>> cities_w_country_data = geopandas.sjoin_nearest\
(cities, countries, distance_col="distances")
>>> cities_w_country_data[["name_left", "name_right", \
"distances"]].head(2) # doctest: +SKIP
name_left name_right distances
0 Vatican City Italy 0.0
1 San Marino Italy 0.0
In the following example, we get multiple cities for Italy because all results are
equidistant (in this case zero because they intersect).
In fact, we get 3 results in total:
>>> countries_w_city_data = geopandas.sjoin_nearest\
(cities, countries, distance_col="distances", how="right")
>>> italy_results = \
countries_w_city_data[countries_w_city_data["name_left"] == "Italy"]
>>> italy_results # doctest: +SKIP
name_x name_y
141 Vatican City Italy
141 San Marino Italy
141 Rome Italy
See also
--------
sjoin : binary predicate joins
GeoDataFrame.sjoin_nearest : equivalent method
Notes
-----
Since this join relies on distances, results will be inaccurate
if your geometries are in a geographic CRS.
Every operation in GeoPandas is planar, i.e. the potential third
dimension is not taken into account.
"""
_basic_checks(left_df, right_df, how, lsuffix, rsuffix)
left_df.geometry.values.check_geographic_crs(stacklevel=1)
right_df.geometry.values.check_geographic_crs(stacklevel=1)
return_distance = distance_col is not None
join_df = _nearest_query(left_df, right_df, max_distance, how, return_distance)
if return_distance:
join_df = join_df.rename(columns={"distances": distance_col})
else:
join_df.pop("distances")
joined = _frame_join(join_df, left_df, right_df, how, lsuffix, rsuffix)
if return_distance:
columns = [c for c in joined.columns if c != distance_col] + [distance_col]
joined = joined[columns]
return joined
``` |
{
"source": "joook1710-cmis/joook1710-cmis-cs2",
"score": 4
} |
#### File: joook1710-cmis/joook1710-cmis-cs2/function.py
```python
def add(a,b):
return a + b
c = add(3, 4)
print c
#Subtraction two arguements
def sub(j,k):
return j - k
l = sub(5, 3)
print l
#Multiplied two arguements
def mul(r,t):
return r * t
e = mul(4,4)
print e
#Divided two arguements
def div(q,w):
return float(q / w)
y = div(2,3)
print y
#Defined hours from seconds
def hours_from_seconds_div(a,b):
return a/b
s = div(86400, 3600)
print s
#Representation of a radius of a circle
def circle_area(r):
return 3.14159265359 * (r**2)
print circle_area(5)
#Representation of a volume of the sphere
def sphere_volume(v):
return 3.14159265359 * 1.333333333333 * (v**3)
print sphere_volume(5)
#Representation of the average of the volumes
def avg_volume(a,b):
return ((1.0/6 * 3.14159265359 * a**3) + (1.0/6 * 3.14159265359 * b**3)) /2
print avg_volume (10,20)
#Representation of the 3 side lengths of a triangle
def area(a,b,c):
n= (a+b+c)/2
return (n*(n-a)*(n-b)*(n-c))**0.5
print area(1, 2, 2.5)
#Making a string an agrument and returnng it as a work with additional space
def right_align(word):
return (80-len(word))*(" ") + word
print right_align( "Hello" )
#String as an agrument that is centered
def center(word):
return (40-len(word))*(" ") + word
print center("Hello")
#Message box
#string as an argument and returns a message box
def msg_box(word):
return "+" + ((len(word)+4)*"-") + "+" + "\n" + "|" + (2*" ") + (word)+ (2*" ") + "|" + "\n" + "+" + ((len(word)+4)*"-") + "+"
print msg_box("Hello")
print msg_box("I eat cats!")
#calling functions
add1= add(5,6)
add2= add(6,3)
sub1= sub(9,3)
sub2= sub(5,4)
mul1= mul(2,3)
mul2= mul(2,4)
div1= div(5,3)
div2= div(7,4)
hoursfromsec1= hours_from_seconds_div(97000,4800)
hoursfromsec2= hours_from_seconds_div(87000,4800)
circlearea1= circle_area(4)
circlearea2= circle_area(9)
spherevolume1= sphere_volume(8)
spherevolume2= sphere_volume(3)
averagevolume1= avg_volume(6,4)
averagevolume2= avg_volume(4,4)
area1= area(1,2,3)
area2= area(4,5,6)
rightalign1= right_align("LOL")
rightalign2= right_align("YEAA")
center1= center("hahaha")
center2= center("What")
msgbox1= msg_box("Poop")
msgbox2= msg_box("yo")
#printing the functions
print msg_box (str(add1))
print msg_box (str(add2))
print msg_box (str(sub1))
print msg_box (str(sub2))
print msg_box (str(mul1))
print msg_box (str(mul2))
print msg_box (str(div1))
print msg_box (str(div2))
print msg_box (str(hoursfromsec1))
print msg_box (str(hoursfromsec2))
print msg_box (str(circlearea1))
print msg_box (str(circlearea2))
print msg_box (str(spherevolume1))
print msg_box (str(spherevolume2))
print msg_box (str(averagevolume1))
print msg_box (str(averagevolume2))
print msg_box (str(area1))
print msg_box (str(area2))
print msg_box (str(rightalign1))
print msg_box (str(rightalign2))
print msg_box (str(center1))
print msg_box (str(center2))
print msg_box (str(msgbox1))
print msg_box (str(msgbox2))
#def is a keyword that indicates that this is a function definition
#print would print out the outcome
``` |
{
"source": "jooolia/pyanno_voting",
"score": 3
} |
#### File: pyanno_voting/pyanno/voting.py
```python
import numpy as np
from numpy import log
#: In annotations arrays, this is the value used to indicate missing values
MISSING_VALUE = -1
class PyannoValueError(ValueError):
"""ValueError subclass raised by pyAnno functions and methods.
"""
pass
def labels_count(annotations, nclasses, missing_value=MISSING_VALUE):
"""Compute the total count of labels in observed annotations.
Raises a PyannoValueError if there are no valid annotations.
Arguments
---------
annotations : array-like object, shape = (n_items, n_annotators)
annotations[i,j] is the annotation made by annotator j on item i
nclasses : int
Number of label classes in `annotations`
Returns
-------
count : list of length n_classes
count[k] is the number of elements of class k in `annotations`
"""
annotations = np.asarray(annotations)
valid = annotations != missing_value
nobservations = valid.sum()
if nobservations == 0:
# no valid observations
raise PyannoValueError('No valid annotations')
return list(np.bincount(annotations[valid], minlength=nclasses))
def majority_vote(annotations, missing_value=MISSING_VALUE):
"""Compute an estimate of the real class by majority vote.
In case of ties, return the class with smallest number.
If a row only contains invalid entries, return `MISSING_VALUE`.
Arguments
---------
annotations : array-like object, shape = (n_items, n_annotators)
annotations[i,j] is the annotation made by annotator j on item i
Return
------
vote : list of length n_items
vote[i] is the majority vote estimate for item i
"""
annotations = np.asarray(annotations)
nitems = annotations.shape[0]
valid = annotations != missing_value
vote = [0] * nitems
for i in range(nitems):
if not np.any(valid[i,:]):
# no valid entries on this row
vote[i] = missing_value
else:
count = np.bincount(annotations[i, valid[i,:]])
vote[i] = count.argmax()
return vote
def labels_frequency(annotations, nclasses, missing_value=MISSING_VALUE):
"""Compute the total frequency of labels in observed annotations.
Example:
>>> labels_frequency([[1, 1, 2], [-1, 1, 2]], 4)
array([ 0. , 0.6, 0.4, 0. ])
Arguments
---------
annotations : array-like object, shape = (n_items, n_annotators)
annotations[i,j] is the annotation made by annotator j on item i
nclasses : int
Number of label classes in `annotations`
Returns
-------
freq : ndarray, shape = (n_classes, )
freq[k] is the frequency of elements of class k in `annotations`, i.e.
their count over the number of total of observed (non-missing) elements
"""
total_non_missing = np.sum(annotations!= missing_value)
freq = np.zeros(nclasses)
for i in range(0,nclasses):
count = np.sum(annotations== i)
freq[i] = count / float(total_non_missing)
return freq
``` |
{
"source": "jooonwood/flask-caslite",
"score": 2
} |
#### File: jooonwood/flask-caslite/test_flask_caslite.py
```python
import unittest
from flask import Flask, render_template_string, current_app
from flask_caslite import CasLite
class CasLiteTestCase(unittest.TestCase):
def setUp(self):
app = Flask(__name__)
app.testing = True
self.cas = CasLite(app)
@app.route('/')
def index():
return render_template_string('hello world!')
self.context = app.app_context()
self.context.push()
self.client = app.test_client()
def tearDown(self):
self.context.pop()
def test_config(self):
self.assertIn('CAS_SERVER', current_app.config)
self.assertIn('CAS_VERSION', current_app.config)
self.assertIn('CAS_TOKEN_SESSION_KEY', current_app.config)
self.assertIn('CAS_USERNAME_SESSION_KEY', current_app.config)
self.assertIn('CAS_ATTRIBUTES_SESSION_KEY', current_app.config)
def test_index(self):
response = self.client.get('/')
data = response.get_data(as_text=True)
self.assertEqual('hello world!', data)
``` |
{
"source": "jooooohannes/h2-mapping",
"score": 3
} |
#### File: jooooohannes/h2-mapping/mc_main.py
```python
from mc_geo_path import *
from mc_generation_costs import *
from mc_parameter_def import *
import timeit
import os
def mc_main(end_plant_tuple, h2_demand, year=2021, centralised=True, pipeline=True, max_pipeline_dist=2000,
iterations=1000, elec_type='Alkaline'):
"""Runs a monte carlo simulation of the model. Takes the desired end location [lat, long], the H2 demand (
kt/yr), the year, if redistribution is centralised or not, if pipelines are allowed, and the maximum allowed
pipeline distance (km) as input. Calculates the minimum of (transport + generation) cost for all possible start
locations to determine the cheapest source of renewable H2. """
df = pd.read_csv('Data/renewables.csv', index_col=0)
total_cost_per_kg_h2 = np.zeros((iterations, len(df)))
generation_cost_per_kg = np.zeros((iterations, len(df)))
solar_cost = np.zeros((iterations, len(df)))
wind_cost = np.zeros((iterations, len(df)))
# Define parameters for generation costs
year_diff, capex_extra, capex_h2, lifetime_hours, electrolyser_efficiency, elec_opex, other_capex_elec, water_cost, \
capex_wind, opex_wind, capex_solar, opex_factor_solar = define_gen_parameters(year, iterations, elec_type)
df, cost_end_nh3, cost_end_lohc, cost_end_h2_liq = initial_geo_calcs(df, end_plant_tuple,
centralised=centralised, pipeline=pipeline,
max_pipeline_dist=max_pipeline_dist)
for i in range(iterations):
df = mc_generation_costs(df, h2_demand, year_diff, capex_extra[i], capex_h2[i], lifetime_hours,
electrolyser_efficiency[i], elec_opex[i],
other_capex_elec[i], water_cost[i],
capex_wind[i], opex_wind[i], capex_solar[i], opex_factor_solar[i],
interest=0.08, full_load_hours=2000)
df = mc_transport_costs(df, end_plant_tuple, h2_demand, cost_end_nh3, cost_end_lohc, cost_end_h2_liq,
centralised=centralised, pipeline=pipeline,
max_pipeline_dist=max_pipeline_dist)
df['Total Yearly Cost'] = df['Yearly gen. cost'] + df['Yearly Transport Cost']
df['Total Cost per kg H2'] = df['Gen. cost per kg H2'] + df['Transport Cost per kg H2']
total_cost_per_kg_h2[i, :] = df['Total Cost per kg H2'].values
generation_cost_per_kg[i, :] = df['Gen. cost per kg H2'].values
solar_cost[i, :] = df['Elec Cost Solar'].values
wind_cost[i, :] = df['Elec Cost Wind'].values
return total_cost_per_kg_h2, generation_cost_per_kg, solar_cost, wind_cost
# Define parameters for the main model
end_tuple = [(23.0550, 113.4242), (29.6084, -95.0527)] # [lat, long]
h2_demand = [100] # [kt/yr]
year = [2030]
centralised = True
pipeline = True
max_pipeline_dist = 10000
iterations = 1000
elec_type = ['alkaline']
for et in end_tuple:
print('Location: ' + str(et))
for h2 in h2_demand:
print('H2 Demand: ' + str(h2))
for yr in year:
print('Year: ' + str(yr))
for elec in elec_type:
print('Elec type: ' + elec)
# start timer
start = timeit.default_timer()
total_cost_per_kg_h2, generation_cost_per_kg_h2, solar_cost, wind_cost = mc_main(et, h2, yr, centralised, pipeline, max_pipeline_dist, iterations, elec)
newpath = "Results/mc/" + str(round(et[0])) + ',' + str(round(et[1])) + '__' + str(yr) + '__' + str(h2) + '__' + elec + '__' + str(pipeline) + '__' + str(iterations)
if pipeline == True:
newpath = newpath + '__Pipe'
if centralised == False:
newpath = newpath + '__decent'
if not os.path.exists(newpath):
os.makedirs(newpath)
np.savetxt(newpath + '/' + 'total_cost_per_kg_h2.csv', total_cost_per_kg_h2, delimiter=",")
np.savetxt(newpath + '/' + 'generation_cost_per_kg_h2.csv', generation_cost_per_kg_h2, delimiter=",")
np.savetxt(newpath + '/' + 'solar_cost.csv', solar_cost, delimiter=",")
np.savetxt(newpath + '/' + 'wind_cost.csv', wind_cost, delimiter=",")
# stop timer
stop = timeit.default_timer()
print('Total Time: ', stop - start)
``` |
{
"source": "joooooooooe-star/corona-taiou-webapp",
"score": 2
} |
#### File: corona-taiou-webapp/coronataiou/app.py
```python
from flask import Flask, jsonify, render_template, request, flash, redirect, url_for
from flask_bootstrap import Bootstrap
from datetime import datetime, timedelta
import os
import urllib.parse
from coronataiou.models import db, ma, RecordData, RecordSchema, IdRecordSchema
from coronataiou.forms import AddRecord, DatePickerForm
params = urllib.parse.quote_plus(os.environ.get("SQLAZURECONNSTRPG", ""))
app = Flask(__name__)
app.config['SECRET_KEY'] = os.environ.get("FLSK_SECRET_KEY", "<KEY>")
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = "".join(["postgresql://galileo@pg-corona-taiou:", params, "@pg-corona-taiou.postgres.database.azure.com/log_data"])
db.init_app(app)
ma.init_app(app)
records_schema = RecordSchema(many=True)
id_records_schema = IdRecordSchema(many=True)
id_record_schema = IdRecordSchema()
with app.app_context():
"""Initializes the database"""
db.create_all()
Bootstrap(app)
@app.route('/')
@app.route('/home')
def index():
return render_template('index.html')
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/contact')
def contact():
return render_template('contact.html')
@app.route('/add_record', methods=['GET', 'POST'])
def add_record():
form1 = AddRecord()
if form1.validate_on_submit():
name = request.form['name']
temperature = request.form['temperature']
fatigue = request.form['fatigue']
sore_throat = request.form['sore_throat']
other_pain = request.form['other_pain']
record_data = RecordData(name, temperature, fatigue, sore_throat, other_pain)
db.session.add(record_data)
db.session.commit()
message = f"The data for {name} has been submitted"
return render_template('add_record_temperature.html', message=message)
else:
for field, errors in form1.errors.items():
for error in errors:
flash("Error in {}: {}".format(
getattr(form1, field).label.text,
error
), 'error')
return render_template('add_record_temperature.html', form1=form1)
@app.route('/edit', methods=['GET', 'POST'])
def edit_table(data=None, cols=None):
"""The endpoint containing the feature to edit and remove data"""
if request.method == "POST":
change_form = AddRecord()
if change_form.validate_on_submit():
rec = RecordData.query.get(request.form['id_field'])
rec.name = request.form['name']
rec.temperature = request.form['temperature']
rec.fatigue = request.form['fatigue']
rec.sore_throat = request.form['sore_throat']
rec.other_pain = request.form['other_pain']
db.session.commit()
message = f"The data for {request.form['name']} has been submitted"
return render_template('add_record_temperature.html', message=message)
else:
for field, errors in change_form.errors.items():
for error in errors:
flash("Error in {}: {}".format(
getattr(change_form, field).label.text,
error
), 'error')
return render_template('add_record_temperature.html', form1=change_form)
# The start point for when an ID is selected
if "edit" in request.args:
id_search = int(request.args["edit"])
query_res = RecordData.query.filter(RecordData.id == id_search).first()
data = id_record_schema.dump(query_res)
change_form = AddRecord(old_data=data)
return render_template('add_record_temperature.html', form1=change_form)
# redirect to delete if delete button clicked
if "delete" in request.args:
return redirect(url_for('delete', db_id=request.args['delete']), code=307)
# The start point for when dates are selected
form = DatePickerForm()
if all(["startdate" in request.args, "enddate" in request.args]):
default_start = request.args["startdate"]
default_end = request.args["enddate"]
convert_start = datetime.strptime(default_start, "%Y-%m-%d")
convert_end = datetime.strptime(default_end, "%Y-%m-%d")
# Validate the dates
if convert_start > convert_end:
flash("Please make sure the start date is before the end date.")
return render_template('edit.html', form=form, data=None, cols=None)
# Pull data and flash message if no data found
result = request.args
db_res = get_week_data(result)
if not db_res:
flash("no data found")
else:
data = id_records_schema.dump(db_res)
# reformat date
for datum in data:
datum["updated"] = datum["updated"][:10]
cols = ('id', 'name', 'temperature', 'sore_throat', 'fatigue', 'other_pain', 'updated')
names = ('id', 'Name', 'Body Temperature', 'Sore Throat Pain?', 'Feeling Fatigue?', 'Other Pain?', 'Submission Date')
table_header = dict(zip(cols, names))
return render_template('edit.html', form=form, data=data, cols=cols, th=table_header, ds=default_start, de=default_end)
return render_template('edit.html', form=form, data=data, cols=cols)
@app.route('/delete/<int:db_id>', methods=['GET', 'POST'])
def delete(db_id):
if request.method == "POST":
if "cancel" in request.form:
return redirect(url_for("edit_table"), code=303)
if "delete" in request.form:
data = RecordData.query.get(db_id)
db.session.delete(data)
db.session.commit()
message = "The data has been deleted."
return render_template('delete.html', message=message)
cols = ('id', 'name', 'temperature', 'sore_throat', 'fatigue', 'other_pain', 'updated')
names = ('id', 'Name', 'Body Temperature', 'Sore Throat Pain?', 'Feeling Fatigue?', 'Other Pain?', 'Submission Date')
table_header = dict(zip(cols, names))
data = RecordData.query.get(db_id)
data = id_record_schema.dump(data)
return render_template('delete.html', data=data, cols=cols, th=table_header)
"""Start of the API Routes"""
@app.route('/api/user/<string:name>', methods=['GET'])
def get_name(name):
"""Returns all records matching the name"""
query_res = RecordData.query.filter_by(name=name).all()
return jsonify(records_schema.dump(query_res))
@app.route('/api/date/<string:year>/<string:month>/<string:day>', methods=['GET'])
def get_date(year, month, day):
"""Returns all records for the selected date. Adjusts for Japan Timezone UTC+9"""
try:
converted_date = datetime.strptime("".join([year, month, day]), "%Y%m%d")
except (ValueError, TypeError):
return "Error: Use format of yyyy/mm/dd."
# timezone adjustment
converted_date = converted_date - timedelta(hours=9)
next_day = converted_date + timedelta(days=1)
query_res = RecordData.query.filter(RecordData.updated >= converted_date).filter(
RecordData.updated < next_day).all()
return jsonify(records_schema.dump(query_res))
def get_week_data(result: dict) -> dict:
convert_start = datetime.strptime(result["startdate"], "%Y-%m-%d") - timedelta(hours=9)
convert_end = datetime.strptime(result["enddate"], "%Y-%m-%d") + timedelta(hours=13)
query_res = RecordData.query.filter(RecordData.updated >= convert_start).filter(RecordData.updated <= convert_end).order_by(RecordData.updated.desc()).all()
return query_res
if __name__ == '__main__':
app.run()
``` |
{
"source": "joopert/home-assistant",
"score": 2
} |
#### File: components/smartthings/smartapp.py
```python
import asyncio
import functools
import logging
from urllib.parse import urlparse
from uuid import uuid4
from aiohttp import web
from pysmartapp import Dispatcher, SmartAppManager
from pysmartapp.const import SETTINGS_APP_ID
from pysmartthings import (
APP_TYPE_WEBHOOK,
CAPABILITIES,
CLASSIFICATION_AUTOMATION,
App,
AppOAuth,
AppSettings,
InstalledAppStatus,
SmartThings,
SourceType,
Subscription,
SubscriptionEntity,
)
from homeassistant.components import webhook
from homeassistant.const import CONF_WEBHOOK_ID
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.typing import HomeAssistantType
from .const import (
APP_NAME_PREFIX,
APP_OAUTH_CLIENT_NAME,
APP_OAUTH_SCOPES,
CONF_APP_ID,
CONF_CLOUDHOOK_URL,
CONF_INSTALLED_APP_ID,
CONF_INSTALLED_APPS,
CONF_INSTANCE_ID,
CONF_LOCATION_ID,
CONF_REFRESH_TOKEN,
DATA_BROKERS,
DATA_MANAGER,
DOMAIN,
SETTINGS_INSTANCE_ID,
SIGNAL_SMARTAPP_PREFIX,
STORAGE_KEY,
STORAGE_VERSION,
)
_LOGGER = logging.getLogger(__name__)
async def find_app(hass: HomeAssistantType, api):
"""Find an existing SmartApp for this installation of hass."""
apps = await api.apps()
for app in [app for app in apps if app.app_name.startswith(APP_NAME_PREFIX)]:
# Load settings to compare instance id
settings = await app.settings()
if (
settings.settings.get(SETTINGS_INSTANCE_ID)
== hass.data[DOMAIN][CONF_INSTANCE_ID]
):
return app
async def validate_installed_app(api, installed_app_id: str):
"""
Ensure the specified installed SmartApp is valid and functioning.
Query the API for the installed SmartApp and validate that it is tied to
the specified app_id and is in an authorized state.
"""
installed_app = await api.installed_app(installed_app_id)
if installed_app.installed_app_status != InstalledAppStatus.AUTHORIZED:
raise RuntimeWarning(
"Installed SmartApp instance '{}' ({}) is not AUTHORIZED but instead {}".format(
installed_app.display_name,
installed_app.installed_app_id,
installed_app.installed_app_status,
)
)
return installed_app
def validate_webhook_requirements(hass: HomeAssistantType) -> bool:
"""Ensure HASS is setup properly to receive webhooks."""
if (
"cloud" in hass.config.components
and hass.components.cloud.async_active_subscription()
):
return True
if hass.data[DOMAIN][CONF_CLOUDHOOK_URL] is not None:
return True
return get_webhook_url(hass).lower().startswith("https://")
def get_webhook_url(hass: HomeAssistantType) -> str:
"""
Get the URL of the webhook.
Return the cloudhook if available, otherwise local webhook.
"""
cloudhook_url = hass.data[DOMAIN][CONF_CLOUDHOOK_URL]
if (
"cloud" in hass.config.components
and hass.components.cloud.async_active_subscription()
and cloudhook_url is not None
):
return cloudhook_url
return webhook.async_generate_url(hass, hass.data[DOMAIN][CONF_WEBHOOK_ID])
def _get_app_template(hass: HomeAssistantType):
endpoint = "at " + hass.config.api.base_url
cloudhook_url = hass.data[DOMAIN][CONF_CLOUDHOOK_URL]
if cloudhook_url is not None:
endpoint = "via Nabu Casa"
description = f"{hass.config.location_name} {endpoint}"
return {
"app_name": APP_NAME_PREFIX + str(uuid4()),
"display_name": "Home Assistant",
"description": description,
"webhook_target_url": get_webhook_url(hass),
"app_type": APP_TYPE_WEBHOOK,
"single_instance": True,
"classifications": [CLASSIFICATION_AUTOMATION],
}
async def create_app(hass: HomeAssistantType, api):
"""Create a SmartApp for this instance of hass."""
# Create app from template attributes
template = _get_app_template(hass)
app = App()
for key, value in template.items():
setattr(app, key, value)
app, client = await api.create_app(app)
_LOGGER.debug("Created SmartApp '%s' (%s)", app.app_name, app.app_id)
# Set unique hass id in settings
settings = AppSettings(app.app_id)
settings.settings[SETTINGS_APP_ID] = app.app_id
settings.settings[SETTINGS_INSTANCE_ID] = hass.data[DOMAIN][CONF_INSTANCE_ID]
await api.update_app_settings(settings)
_LOGGER.debug(
"Updated App Settings for SmartApp '%s' (%s)", app.app_name, app.app_id
)
# Set oauth scopes
oauth = AppOAuth(app.app_id)
oauth.client_name = APP_OAUTH_CLIENT_NAME
oauth.scope.extend(APP_OAUTH_SCOPES)
await api.update_app_oauth(oauth)
_LOGGER.debug("Updated App OAuth for SmartApp '%s' (%s)", app.app_name, app.app_id)
return app, client
async def update_app(hass: HomeAssistantType, app):
"""Ensure the SmartApp is up-to-date and update if necessary."""
template = _get_app_template(hass)
template.pop("app_name") # don't update this
update_required = False
for key, value in template.items():
if getattr(app, key) != value:
update_required = True
setattr(app, key, value)
if update_required:
await app.save()
_LOGGER.debug(
"SmartApp '%s' (%s) updated with latest settings", app.app_name, app.app_id
)
def setup_smartapp(hass, app):
"""
Configure an individual SmartApp in hass.
Register the SmartApp with the SmartAppManager so that hass will service
lifecycle events (install, event, etc...). A unique SmartApp is created
for each SmartThings account that is configured in hass.
"""
manager = hass.data[DOMAIN][DATA_MANAGER]
smartapp = manager.smartapps.get(app.app_id)
if smartapp:
# already setup
return smartapp
smartapp = manager.register(app.app_id, app.webhook_public_key)
smartapp.name = app.display_name
smartapp.description = app.description
smartapp.permissions.extend(APP_OAUTH_SCOPES)
return smartapp
async def setup_smartapp_endpoint(hass: HomeAssistantType):
"""
Configure the SmartApp webhook in hass.
SmartApps are an extension point within the SmartThings ecosystem and
is used to receive push updates (i.e. device updates) from the cloud.
"""
data = hass.data.get(DOMAIN)
if data:
# already setup
return
# Get/create config to store a unique id for this hass instance.
store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
config = await store.async_load()
if not config:
# Create config
config = {
CONF_INSTANCE_ID: str(uuid4()),
CONF_WEBHOOK_ID: webhook.generate_secret(),
CONF_CLOUDHOOK_URL: None,
}
await store.async_save(config)
# Register webhook
webhook.async_register(
hass, DOMAIN, "SmartApp", config[CONF_WEBHOOK_ID], smartapp_webhook
)
# Create webhook if eligible
cloudhook_url = config.get(CONF_CLOUDHOOK_URL)
if (
cloudhook_url is None
and "cloud" in hass.config.components
and hass.components.cloud.async_active_subscription()
and not hass.config_entries.async_entries(DOMAIN)
):
cloudhook_url = await hass.components.cloud.async_create_cloudhook(
config[CONF_WEBHOOK_ID]
)
config[CONF_CLOUDHOOK_URL] = cloudhook_url
await store.async_save(config)
_LOGGER.debug("Created cloudhook '%s'", cloudhook_url)
# SmartAppManager uses a dispatcher to invoke callbacks when push events
# occur. Use hass' implementation instead of the built-in one.
dispatcher = Dispatcher(
signal_prefix=SIGNAL_SMARTAPP_PREFIX,
connect=functools.partial(async_dispatcher_connect, hass),
send=functools.partial(async_dispatcher_send, hass),
)
# Path is used in digital signature validation
path = (
urlparse(cloudhook_url).path
if cloudhook_url
else webhook.async_generate_path(config[CONF_WEBHOOK_ID])
)
manager = SmartAppManager(path, dispatcher=dispatcher)
manager.connect_install(functools.partial(smartapp_install, hass))
manager.connect_update(functools.partial(smartapp_update, hass))
manager.connect_uninstall(functools.partial(smartapp_uninstall, hass))
hass.data[DOMAIN] = {
DATA_MANAGER: manager,
CONF_INSTANCE_ID: config[CONF_INSTANCE_ID],
DATA_BROKERS: {},
CONF_WEBHOOK_ID: config[CONF_WEBHOOK_ID],
# Will not be present if not enabled
CONF_CLOUDHOOK_URL: config.get(CONF_CLOUDHOOK_URL),
CONF_INSTALLED_APPS: [],
}
_LOGGER.debug(
"Setup endpoint for %s",
cloudhook_url
if cloudhook_url
else webhook.async_generate_url(hass, config[CONF_WEBHOOK_ID]),
)
async def unload_smartapp_endpoint(hass: HomeAssistantType):
"""Tear down the component configuration."""
if DOMAIN not in hass.data:
return
# Remove the cloudhook if it was created
cloudhook_url = hass.data[DOMAIN][CONF_CLOUDHOOK_URL]
if (
cloudhook_url
and "cloud" in hass.config.components
and hass.components.cloud.async_is_logged_in()
):
await hass.components.cloud.async_delete_cloudhook(
hass.data[DOMAIN][CONF_WEBHOOK_ID]
)
# Remove cloudhook from storage
store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
await store.async_save(
{
CONF_INSTANCE_ID: hass.data[DOMAIN][CONF_INSTANCE_ID],
CONF_WEBHOOK_ID: hass.data[DOMAIN][CONF_WEBHOOK_ID],
CONF_CLOUDHOOK_URL: None,
}
)
_LOGGER.debug("Cloudhook '%s' was removed", cloudhook_url)
# Remove the webhook
webhook.async_unregister(hass, hass.data[DOMAIN][CONF_WEBHOOK_ID])
# Disconnect all brokers
for broker in hass.data[DOMAIN][DATA_BROKERS].values():
broker.disconnect()
# Remove all handlers from manager
hass.data[DOMAIN][DATA_MANAGER].dispatcher.disconnect_all()
# Remove the component data
hass.data.pop(DOMAIN)
async def smartapp_sync_subscriptions(
hass: HomeAssistantType,
auth_token: str,
location_id: str,
installed_app_id: str,
devices,
):
"""Synchronize subscriptions of an installed up."""
api = SmartThings(async_get_clientsession(hass), auth_token)
tasks = []
async def create_subscription(target: str):
sub = Subscription()
sub.installed_app_id = installed_app_id
sub.location_id = location_id
sub.source_type = SourceType.CAPABILITY
sub.capability = target
try:
await api.create_subscription(sub)
_LOGGER.debug(
"Created subscription for '%s' under app '%s'", target, installed_app_id
)
except Exception as error: # pylint:disable=broad-except
_LOGGER.error(
"Failed to create subscription for '%s' under app '%s': %s",
target,
installed_app_id,
error,
)
async def delete_subscription(sub: SubscriptionEntity):
try:
await api.delete_subscription(installed_app_id, sub.subscription_id)
_LOGGER.debug(
"Removed subscription for '%s' under app '%s' because it was no longer needed",
sub.capability,
installed_app_id,
)
except Exception as error: # pylint:disable=broad-except
_LOGGER.error(
"Failed to remove subscription for '%s' under app " "'%s': %s",
sub.capability,
installed_app_id,
error,
)
# Build set of capabilities and prune unsupported ones
capabilities = set()
for device in devices:
capabilities.update(device.capabilities)
capabilities.intersection_update(CAPABILITIES)
# Get current subscriptions and find differences
subscriptions = await api.subscriptions(installed_app_id)
for subscription in subscriptions:
if subscription.capability in capabilities:
capabilities.remove(subscription.capability)
else:
# Delete the subscription
tasks.append(delete_subscription(subscription))
# Remaining capabilities need subscriptions created
tasks.extend([create_subscription(c) for c in capabilities])
if tasks:
await asyncio.gather(*tasks)
else:
_LOGGER.debug("Subscriptions for app '%s' are up-to-date", installed_app_id)
async def smartapp_install(hass: HomeAssistantType, req, resp, app):
"""
Handle when a SmartApp is installed by the user into a location.
Create a config entry representing the installation if this is not
the first installation under the account, otherwise store the data
for the config flow.
"""
install_data = {
CONF_INSTALLED_APP_ID: req.installed_app_id,
CONF_LOCATION_ID: req.location_id,
CONF_REFRESH_TOKEN: req.refresh_token,
}
# App attributes (client id/secret, etc...) are copied from another entry
# with the same parent app_id. If one is not found, the install data is
# stored for the config flow to retrieve during the wait step.
entry = next(
(
entry
for entry in hass.config_entries.async_entries(DOMAIN)
if entry.data[CONF_APP_ID] == app.app_id
),
None,
)
if entry:
data = entry.data.copy()
data.update(install_data)
# Add as job not needed because the current coroutine was invoked
# from the dispatcher and is not being awaited.
await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "install"}, data=data
)
else:
# Store the data where the flow can find it
hass.data[DOMAIN][CONF_INSTALLED_APPS].append(install_data)
_LOGGER.debug(
"Installed SmartApp '%s' under parent app '%s'",
req.installed_app_id,
app.app_id,
)
async def smartapp_update(hass: HomeAssistantType, req, resp, app):
"""
Handle when a SmartApp is updated (reconfigured) by the user.
Store the refresh token in the config entry.
"""
# Update refresh token in config entry
entry = next(
(
entry
for entry in hass.config_entries.async_entries(DOMAIN)
if entry.data.get(CONF_INSTALLED_APP_ID) == req.installed_app_id
),
None,
)
if entry:
entry.data[CONF_REFRESH_TOKEN] = req.refresh_token
hass.config_entries.async_update_entry(entry)
_LOGGER.debug(
"Updated SmartApp '%s' under parent app '%s'", req.installed_app_id, app.app_id
)
async def smartapp_uninstall(hass: HomeAssistantType, req, resp, app):
"""
Handle when a SmartApp is removed from a location by the user.
Find and delete the config entry representing the integration.
"""
entry = next(
(
entry
for entry in hass.config_entries.async_entries(DOMAIN)
if entry.data.get(CONF_INSTALLED_APP_ID) == req.installed_app_id
),
None,
)
if entry:
# Add as job not needed because the current coroutine was invoked
# from the dispatcher and is not being awaited.
await hass.config_entries.async_remove(entry.entry_id)
_LOGGER.debug(
"Uninstalled SmartApp '%s' under parent app '%s'",
req.installed_app_id,
app.app_id,
)
async def smartapp_webhook(hass: HomeAssistantType, webhook_id: str, request):
"""
Handle a smartapp lifecycle event callback from SmartThings.
Requests from SmartThings are digitally signed and the SmartAppManager
validates the signature for authenticity.
"""
manager = hass.data[DOMAIN][DATA_MANAGER]
data = await request.json()
result = await manager.handle_request(data, request.headers)
return web.json_response(result)
```
#### File: components/here_travel_time/test_sensor.py
```python
import logging
from unittest.mock import patch
import urllib
import herepy
import pytest
from homeassistant.components.here_travel_time.sensor import (
ATTR_ATTRIBUTION,
ATTR_DESTINATION,
ATTR_DESTINATION_NAME,
ATTR_DISTANCE,
ATTR_DURATION,
ATTR_DURATION_IN_TRAFFIC,
ATTR_ORIGIN,
ATTR_ORIGIN_NAME,
ATTR_ROUTE,
CONF_MODE,
CONF_TRAFFIC_MODE,
CONF_UNIT_SYSTEM,
ICON_BICYCLE,
ICON_CAR,
ICON_PEDESTRIAN,
ICON_PUBLIC,
ICON_TRUCK,
NO_ROUTE_ERROR_MESSAGE,
ROUTE_MODE_FASTEST,
ROUTE_MODE_SHORTEST,
SCAN_INTERVAL,
TRAFFIC_MODE_DISABLED,
TRAFFIC_MODE_ENABLED,
TRAVEL_MODE_BICYCLE,
TRAVEL_MODE_CAR,
TRAVEL_MODE_PEDESTRIAN,
TRAVEL_MODE_PUBLIC,
TRAVEL_MODE_PUBLIC_TIME_TABLE,
TRAVEL_MODE_TRUCK,
UNIT_OF_MEASUREMENT,
)
from homeassistant.const import ATTR_ICON, EVENT_HOMEASSISTANT_START
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed, load_fixture
DOMAIN = "sensor"
PLATFORM = "here_travel_time"
APP_ID = "test"
APP_CODE = "test"
TRUCK_ORIGIN_LATITUDE = "41.9798"
TRUCK_ORIGIN_LONGITUDE = "-87.8801"
TRUCK_DESTINATION_LATITUDE = "41.9043"
TRUCK_DESTINATION_LONGITUDE = "-87.9216"
BIKE_ORIGIN_LATITUDE = "41.9798"
BIKE_ORIGIN_LONGITUDE = "-87.8801"
BIKE_DESTINATION_LATITUDE = "41.9043"
BIKE_DESTINATION_LONGITUDE = "-87.9216"
CAR_ORIGIN_LATITUDE = "38.9"
CAR_ORIGIN_LONGITUDE = "-77.04833"
CAR_DESTINATION_LATITUDE = "39.0"
CAR_DESTINATION_LONGITUDE = "-77.1"
def _build_mock_url(origin, destination, modes, app_id, app_code, departure):
"""Construct a url for HERE."""
base_url = "https://route.cit.api.here.com/routing/7.2/calculateroute.json?"
parameters = {
"waypoint0": f"geo!{origin}",
"waypoint1": f"geo!{destination}",
"mode": ";".join(str(herepy.RouteMode[mode]) for mode in modes),
"app_id": app_id,
"app_code": app_code,
"departure": departure,
}
url = base_url + urllib.parse.urlencode(parameters)
return url
def _assert_truck_sensor(sensor):
"""Assert that states and attributes are correct for truck_response."""
assert sensor.state == "14"
assert sensor.attributes.get("unit_of_measurement") == UNIT_OF_MEASUREMENT
assert sensor.attributes.get(ATTR_ATTRIBUTION) is None
assert sensor.attributes.get(ATTR_DURATION) == 13.533333333333333
assert sensor.attributes.get(ATTR_DISTANCE) == 13.049
assert sensor.attributes.get(ATTR_ROUTE) == (
"I-190; I-294 S - Tri-State Tollway; I-290 W - Eisenhower Expy W; "
"IL-64 W - E North Ave; I-290 E - Eisenhower Expy E; I-290"
)
assert sensor.attributes.get(CONF_UNIT_SYSTEM) == "metric"
assert sensor.attributes.get(ATTR_DURATION_IN_TRAFFIC) == 13.533333333333333
assert sensor.attributes.get(ATTR_ORIGIN) == ",".join(
[TRUCK_ORIGIN_LATITUDE, TRUCK_ORIGIN_LONGITUDE]
)
assert sensor.attributes.get(ATTR_DESTINATION) == ",".join(
[TRUCK_DESTINATION_LATITUDE, TRUCK_DESTINATION_LONGITUDE]
)
assert sensor.attributes.get(ATTR_ORIGIN_NAME) == ""
assert sensor.attributes.get(ATTR_DESTINATION_NAME) == "Eisenhower Expy E"
assert sensor.attributes.get(CONF_MODE) == TRAVEL_MODE_TRUCK
assert sensor.attributes.get(CONF_TRAFFIC_MODE) is False
assert sensor.attributes.get(ATTR_ICON) == ICON_TRUCK
@pytest.fixture
def requests_mock_credentials_check(requests_mock):
"""Add the url used in the api validation to all requests mock."""
modes = [ROUTE_MODE_FASTEST, TRAVEL_MODE_CAR, TRAFFIC_MODE_DISABLED]
response_url = _build_mock_url(
",".join([CAR_ORIGIN_LATITUDE, CAR_ORIGIN_LONGITUDE]),
",".join([CAR_DESTINATION_LATITUDE, CAR_DESTINATION_LONGITUDE]),
modes,
APP_ID,
APP_CODE,
"now",
)
requests_mock.get(
response_url, text=load_fixture("here_travel_time/car_response.json")
)
return requests_mock
@pytest.fixture
def requests_mock_truck_response(requests_mock_credentials_check):
"""Return a requests_mock for truck respones."""
modes = [ROUTE_MODE_FASTEST, TRAVEL_MODE_TRUCK, TRAFFIC_MODE_DISABLED]
response_url = _build_mock_url(
",".join([TRUCK_ORIGIN_LATITUDE, TRUCK_ORIGIN_LONGITUDE]),
",".join([TRUCK_DESTINATION_LATITUDE, TRUCK_DESTINATION_LONGITUDE]),
modes,
APP_ID,
APP_CODE,
"now",
)
requests_mock_credentials_check.get(
response_url, text=load_fixture("here_travel_time/truck_response.json")
)
@pytest.fixture
def requests_mock_car_disabled_response(requests_mock_credentials_check):
"""Return a requests_mock for truck respones."""
modes = [ROUTE_MODE_FASTEST, TRAVEL_MODE_CAR, TRAFFIC_MODE_DISABLED]
response_url = _build_mock_url(
",".join([CAR_ORIGIN_LATITUDE, CAR_ORIGIN_LONGITUDE]),
",".join([CAR_DESTINATION_LATITUDE, CAR_DESTINATION_LONGITUDE]),
modes,
APP_ID,
APP_CODE,
"now",
)
requests_mock_credentials_check.get(
response_url, text=load_fixture("here_travel_time/car_response.json")
)
async def test_car(hass, requests_mock_car_disabled_response):
"""Test that car works."""
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_latitude": CAR_ORIGIN_LATITUDE,
"origin_longitude": CAR_ORIGIN_LONGITUDE,
"destination_latitude": CAR_DESTINATION_LATITUDE,
"destination_longitude": CAR_DESTINATION_LONGITUDE,
"app_id": APP_ID,
"app_code": APP_CODE,
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
assert sensor.state == "30"
assert sensor.attributes.get("unit_of_measurement") == UNIT_OF_MEASUREMENT
assert sensor.attributes.get(ATTR_ATTRIBUTION) is None
assert sensor.attributes.get(ATTR_DURATION) == 30.05
assert sensor.attributes.get(ATTR_DISTANCE) == 23.903
assert sensor.attributes.get(ATTR_ROUTE) == (
"US-29 - K St NW; US-29 - Whitehurst Fwy; "
"I-495 N - Capital Beltway; MD-187 S - Old Georgetown Rd"
)
assert sensor.attributes.get(CONF_UNIT_SYSTEM) == "metric"
assert sensor.attributes.get(ATTR_DURATION_IN_TRAFFIC) == 31.016666666666666
assert sensor.attributes.get(ATTR_ORIGIN) == ",".join(
[CAR_ORIGIN_LATITUDE, CAR_ORIGIN_LONGITUDE]
)
assert sensor.attributes.get(ATTR_DESTINATION) == ",".join(
[CAR_DESTINATION_LATITUDE, CAR_DESTINATION_LONGITUDE]
)
assert sensor.attributes.get(ATTR_ORIGIN_NAME) == "22nd St NW"
assert sensor.attributes.get(ATTR_DESTINATION_NAME) == "Service Rd S"
assert sensor.attributes.get(CONF_MODE) == TRAVEL_MODE_CAR
assert sensor.attributes.get(CONF_TRAFFIC_MODE) is False
assert sensor.attributes.get(ATTR_ICON) == ICON_CAR
# Test traffic mode disabled
assert sensor.attributes.get(ATTR_DURATION) != sensor.attributes.get(
ATTR_DURATION_IN_TRAFFIC
)
async def test_traffic_mode_enabled(hass, requests_mock_credentials_check):
"""Test that traffic mode enabled works."""
modes = [ROUTE_MODE_FASTEST, TRAVEL_MODE_CAR, TRAFFIC_MODE_ENABLED]
response_url = _build_mock_url(
",".join([CAR_ORIGIN_LATITUDE, CAR_ORIGIN_LONGITUDE]),
",".join([CAR_DESTINATION_LATITUDE, CAR_DESTINATION_LONGITUDE]),
modes,
APP_ID,
APP_CODE,
"now",
)
requests_mock_credentials_check.get(
response_url, text=load_fixture("here_travel_time/car_enabled_response.json")
)
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_latitude": CAR_ORIGIN_LATITUDE,
"origin_longitude": CAR_ORIGIN_LONGITUDE,
"destination_latitude": CAR_DESTINATION_LATITUDE,
"destination_longitude": CAR_DESTINATION_LONGITUDE,
"app_id": APP_ID,
"app_code": APP_CODE,
"traffic_mode": True,
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
# Test traffic mode enabled
assert sensor.attributes.get(ATTR_DURATION) != sensor.attributes.get(
ATTR_DURATION_IN_TRAFFIC
)
async def test_imperial(hass, requests_mock_car_disabled_response):
"""Test that imperial units work."""
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_latitude": CAR_ORIGIN_LATITUDE,
"origin_longitude": CAR_ORIGIN_LONGITUDE,
"destination_latitude": CAR_DESTINATION_LATITUDE,
"destination_longitude": CAR_DESTINATION_LONGITUDE,
"app_id": APP_ID,
"app_code": APP_CODE,
"unit_system": "imperial",
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
assert sensor.attributes.get(ATTR_DISTANCE) == 14.852635608048994
async def test_route_mode_shortest(hass, requests_mock_credentials_check):
"""Test that route mode shortest works."""
origin = "38.902981,-77.048338"
destination = "39.042158,-77.119116"
modes = [ROUTE_MODE_SHORTEST, TRAVEL_MODE_CAR, TRAFFIC_MODE_DISABLED]
response_url = _build_mock_url(origin, destination, modes, APP_ID, APP_CODE, "now")
requests_mock_credentials_check.get(
response_url, text=load_fixture("here_travel_time/car_shortest_response.json")
)
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_latitude": origin.split(",")[0],
"origin_longitude": origin.split(",")[1],
"destination_latitude": destination.split(",")[0],
"destination_longitude": destination.split(",")[1],
"app_id": APP_ID,
"app_code": APP_CODE,
"route_mode": ROUTE_MODE_SHORTEST,
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
assert sensor.attributes.get(ATTR_DISTANCE) == 18.388
async def test_route_mode_fastest(hass, requests_mock_credentials_check):
"""Test that route mode fastest works."""
origin = "38.902981,-77.048338"
destination = "39.042158,-77.119116"
modes = [ROUTE_MODE_FASTEST, TRAVEL_MODE_CAR, TRAFFIC_MODE_ENABLED]
response_url = _build_mock_url(origin, destination, modes, APP_ID, APP_CODE, "now")
requests_mock_credentials_check.get(
response_url, text=load_fixture("here_travel_time/car_enabled_response.json")
)
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_latitude": origin.split(",")[0],
"origin_longitude": origin.split(",")[1],
"destination_latitude": destination.split(",")[0],
"destination_longitude": destination.split(",")[1],
"app_id": APP_ID,
"app_code": APP_CODE,
"traffic_mode": True,
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
assert sensor.attributes.get(ATTR_DISTANCE) == 23.381
async def test_truck(hass, requests_mock_truck_response):
"""Test that truck works."""
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_latitude": TRUCK_ORIGIN_LATITUDE,
"origin_longitude": TRUCK_ORIGIN_LONGITUDE,
"destination_latitude": TRUCK_DESTINATION_LATITUDE,
"destination_longitude": TRUCK_DESTINATION_LONGITUDE,
"app_id": APP_ID,
"app_code": APP_CODE,
"mode": TRAVEL_MODE_TRUCK,
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
_assert_truck_sensor(sensor)
async def test_public_transport(hass, requests_mock_credentials_check):
"""Test that publicTransport works."""
origin = "41.9798,-87.8801"
destination = "41.9043,-87.9216"
modes = [ROUTE_MODE_FASTEST, TRAVEL_MODE_PUBLIC, TRAFFIC_MODE_DISABLED]
response_url = _build_mock_url(origin, destination, modes, APP_ID, APP_CODE, "now")
requests_mock_credentials_check.get(
response_url, text=load_fixture("here_travel_time/public_response.json")
)
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_latitude": origin.split(",")[0],
"origin_longitude": origin.split(",")[1],
"destination_latitude": destination.split(",")[0],
"destination_longitude": destination.split(",")[1],
"app_id": APP_ID,
"app_code": APP_CODE,
"mode": TRAVEL_MODE_PUBLIC,
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
assert sensor.state == "89"
assert sensor.attributes.get("unit_of_measurement") == UNIT_OF_MEASUREMENT
assert sensor.attributes.get(ATTR_ATTRIBUTION) is None
assert sensor.attributes.get(ATTR_DURATION) == 89.16666666666667
assert sensor.attributes.get(ATTR_DISTANCE) == 22.325
assert sensor.attributes.get(ATTR_ROUTE) == (
"332 - Palmer/Schiller; 332 - Cargo Rd./Delta Cargo; " "332 - Palmer/Schiller"
)
assert sensor.attributes.get(CONF_UNIT_SYSTEM) == "metric"
assert sensor.attributes.get(ATTR_DURATION_IN_TRAFFIC) == 89.16666666666667
assert sensor.attributes.get(ATTR_ORIGIN) == origin
assert sensor.attributes.get(ATTR_DESTINATION) == destination
assert sensor.attributes.get(ATTR_ORIGIN_NAME) == "Mannheim Rd"
assert sensor.attributes.get(ATTR_DESTINATION_NAME) == ""
assert sensor.attributes.get(CONF_MODE) == TRAVEL_MODE_PUBLIC
assert sensor.attributes.get(CONF_TRAFFIC_MODE) is False
assert sensor.attributes.get(ATTR_ICON) == ICON_PUBLIC
async def test_public_transport_time_table(hass, requests_mock_credentials_check):
"""Test that publicTransportTimeTable works."""
origin = "41.9798,-87.8801"
destination = "41.9043,-87.9216"
modes = [ROUTE_MODE_FASTEST, TRAVEL_MODE_PUBLIC_TIME_TABLE, TRAFFIC_MODE_DISABLED]
response_url = _build_mock_url(origin, destination, modes, APP_ID, APP_CODE, "now")
requests_mock_credentials_check.get(
response_url,
text=load_fixture("here_travel_time/public_time_table_response.json"),
)
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_latitude": origin.split(",")[0],
"origin_longitude": origin.split(",")[1],
"destination_latitude": destination.split(",")[0],
"destination_longitude": destination.split(",")[1],
"app_id": APP_ID,
"app_code": APP_CODE,
"mode": TRAVEL_MODE_PUBLIC_TIME_TABLE,
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
assert sensor.state == "80"
assert sensor.attributes.get("unit_of_measurement") == UNIT_OF_MEASUREMENT
assert sensor.attributes.get(ATTR_ATTRIBUTION) is None
assert sensor.attributes.get(ATTR_DURATION) == 79.73333333333333
assert sensor.attributes.get(ATTR_DISTANCE) == 14.775
assert sensor.attributes.get(ATTR_ROUTE) == (
"330 - Archer/Harlem (Terminal); 309 - Elmhurst Metra Station"
)
assert sensor.attributes.get(CONF_UNIT_SYSTEM) == "metric"
assert sensor.attributes.get(ATTR_DURATION_IN_TRAFFIC) == 79.73333333333333
assert sensor.attributes.get(ATTR_ORIGIN) == origin
assert sensor.attributes.get(ATTR_DESTINATION) == destination
assert sensor.attributes.get(ATTR_ORIGIN_NAME) == "<NAME>"
assert sensor.attributes.get(ATTR_DESTINATION_NAME) == ""
assert sensor.attributes.get(CONF_MODE) == TRAVEL_MODE_PUBLIC_TIME_TABLE
assert sensor.attributes.get(CONF_TRAFFIC_MODE) is False
assert sensor.attributes.get(ATTR_ICON) == ICON_PUBLIC
async def test_pedestrian(hass, requests_mock_credentials_check):
"""Test that pedestrian works."""
origin = "41.9798,-87.8801"
destination = "41.9043,-87.9216"
modes = [ROUTE_MODE_FASTEST, TRAVEL_MODE_PEDESTRIAN, TRAFFIC_MODE_DISABLED]
response_url = _build_mock_url(origin, destination, modes, APP_ID, APP_CODE, "now")
requests_mock_credentials_check.get(
response_url, text=load_fixture("here_travel_time/pedestrian_response.json")
)
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_latitude": origin.split(",")[0],
"origin_longitude": origin.split(",")[1],
"destination_latitude": destination.split(",")[0],
"destination_longitude": destination.split(",")[1],
"app_id": APP_ID,
"app_code": APP_CODE,
"mode": TRAVEL_MODE_PEDESTRIAN,
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
assert sensor.state == "211"
assert sensor.attributes.get("unit_of_measurement") == UNIT_OF_MEASUREMENT
assert sensor.attributes.get(ATTR_ATTRIBUTION) is None
assert sensor.attributes.get(ATTR_DURATION) == 210.51666666666668
assert sensor.attributes.get(ATTR_DISTANCE) == 12.533
assert sensor.attributes.get(ATTR_ROUTE) == (
"Mannheim Rd; W Belmont Ave; Cullerton St; E Fullerton Ave; "
"La Porte Ave; E Palmer Ave; N Railroad Ave; W North Ave; "
"E North Ave; E Third St"
)
assert sensor.attributes.get(CONF_UNIT_SYSTEM) == "metric"
assert sensor.attributes.get(ATTR_DURATION_IN_TRAFFIC) == 210.51666666666668
assert sensor.attributes.get(ATTR_ORIGIN) == origin
assert sensor.attributes.get(ATTR_DESTINATION) == destination
assert sensor.attributes.get(ATTR_ORIGIN_NAME) == "Mannheim Rd"
assert sensor.attributes.get(ATTR_DESTINATION_NAME) == ""
assert sensor.attributes.get(CONF_MODE) == TRAVEL_MODE_PEDESTRIAN
assert sensor.attributes.get(CONF_TRAFFIC_MODE) is False
assert sensor.attributes.get(ATTR_ICON) == ICON_PEDESTRIAN
async def test_bicycle(hass, requests_mock_credentials_check):
"""Test that bicycle works."""
origin = "41.9798,-87.8801"
destination = "41.9043,-87.9216"
modes = [ROUTE_MODE_FASTEST, TRAVEL_MODE_BICYCLE, TRAFFIC_MODE_DISABLED]
response_url = _build_mock_url(origin, destination, modes, APP_ID, APP_CODE, "now")
requests_mock_credentials_check.get(
response_url, text=load_fixture("here_travel_time/bike_response.json")
)
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_latitude": origin.split(",")[0],
"origin_longitude": origin.split(",")[1],
"destination_latitude": destination.split(",")[0],
"destination_longitude": destination.split(",")[1],
"app_id": APP_ID,
"app_code": APP_CODE,
"mode": TRAVEL_MODE_BICYCLE,
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
assert sensor.state == "55"
assert sensor.attributes.get("unit_of_measurement") == UNIT_OF_MEASUREMENT
assert sensor.attributes.get(ATTR_ATTRIBUTION) is None
assert sensor.attributes.get(ATTR_DURATION) == 54.86666666666667
assert sensor.attributes.get(ATTR_DISTANCE) == 12.613
assert sensor.attributes.get(ATTR_ROUTE) == (
"Mannheim Rd; W Belmont Ave; Cullerton St; N Landen Dr; "
"E Fullerton Ave; N Wolf Rd; W North Ave; N Clinton Ave; "
"E Third St; N Caroline Ave"
)
assert sensor.attributes.get(CONF_UNIT_SYSTEM) == "metric"
assert sensor.attributes.get(ATTR_DURATION_IN_TRAFFIC) == 54.86666666666667
assert sensor.attributes.get(ATTR_ORIGIN) == origin
assert sensor.attributes.get(ATTR_DESTINATION) == destination
assert sensor.attributes.get(ATTR_ORIGIN_NAME) == "Mannheim Rd"
assert sensor.attributes.get(ATTR_DESTINATION_NAME) == ""
assert sensor.attributes.get(CONF_MODE) == TRAVEL_MODE_BICYCLE
assert sensor.attributes.get(CONF_TRAFFIC_MODE) is False
assert sensor.attributes.get(ATTR_ICON) == ICON_BICYCLE
async def test_location_zone(hass, requests_mock_truck_response):
"""Test that origin/destination supplied by a zone works."""
utcnow = dt_util.utcnow()
# Patching 'utcnow' to gain more control over the timed update.
with patch("homeassistant.util.dt.utcnow", return_value=utcnow):
zone_config = {
"zone": [
{
"name": "Destination",
"latitude": TRUCK_DESTINATION_LATITUDE,
"longitude": TRUCK_DESTINATION_LONGITUDE,
"radius": 250,
"passive": False,
},
{
"name": "Origin",
"latitude": TRUCK_ORIGIN_LATITUDE,
"longitude": TRUCK_ORIGIN_LONGITUDE,
"radius": 250,
"passive": False,
},
]
}
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_entity_id": "zone.origin",
"destination_entity_id": "zone.destination",
"app_id": APP_ID,
"app_code": APP_CODE,
"mode": TRAVEL_MODE_TRUCK,
}
}
assert await async_setup_component(hass, "zone", zone_config)
assert await async_setup_component(hass, DOMAIN, config)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
_assert_truck_sensor(sensor)
# Test that update works more than once
async_fire_time_changed(hass, utcnow + SCAN_INTERVAL)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
_assert_truck_sensor(sensor)
async def test_location_sensor(hass, requests_mock_truck_response):
"""Test that origin/destination supplied by a sensor works."""
utcnow = dt_util.utcnow()
# Patching 'utcnow' to gain more control over the timed update.
with patch("homeassistant.util.dt.utcnow", return_value=utcnow):
hass.states.async_set(
"sensor.origin", ",".join([TRUCK_ORIGIN_LATITUDE, TRUCK_ORIGIN_LONGITUDE])
)
hass.states.async_set(
"sensor.destination",
",".join([TRUCK_DESTINATION_LATITUDE, TRUCK_DESTINATION_LONGITUDE]),
)
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_entity_id": "sensor.origin",
"destination_entity_id": "sensor.destination",
"app_id": APP_ID,
"app_code": APP_CODE,
"mode": TRAVEL_MODE_TRUCK,
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
_assert_truck_sensor(sensor)
# Test that update works more than once
async_fire_time_changed(hass, utcnow + SCAN_INTERVAL)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
_assert_truck_sensor(sensor)
async def test_location_person(hass, requests_mock_truck_response):
"""Test that origin/destination supplied by a person works."""
utcnow = dt_util.utcnow()
# Patching 'utcnow' to gain more control over the timed update.
with patch("homeassistant.util.dt.utcnow", return_value=utcnow):
hass.states.async_set(
"person.origin",
"unknown",
{
"latitude": float(TRUCK_ORIGIN_LATITUDE),
"longitude": float(TRUCK_ORIGIN_LONGITUDE),
},
)
hass.states.async_set(
"person.destination",
"unknown",
{
"latitude": float(TRUCK_DESTINATION_LATITUDE),
"longitude": float(TRUCK_DESTINATION_LONGITUDE),
},
)
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_entity_id": "person.origin",
"destination_entity_id": "person.destination",
"app_id": APP_ID,
"app_code": APP_CODE,
"mode": TRAVEL_MODE_TRUCK,
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
_assert_truck_sensor(sensor)
# Test that update works more than once
async_fire_time_changed(hass, utcnow + SCAN_INTERVAL)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
_assert_truck_sensor(sensor)
async def test_location_device_tracker(hass, requests_mock_truck_response):
"""Test that origin/destination supplied by a device_tracker works."""
utcnow = dt_util.utcnow()
# Patching 'utcnow' to gain more control over the timed update.
with patch("homeassistant.util.dt.utcnow", return_value=utcnow):
hass.states.async_set(
"device_tracker.origin",
"unknown",
{
"latitude": float(TRUCK_ORIGIN_LATITUDE),
"longitude": float(TRUCK_ORIGIN_LONGITUDE),
},
)
hass.states.async_set(
"device_tracker.destination",
"unknown",
{
"latitude": float(TRUCK_DESTINATION_LATITUDE),
"longitude": float(TRUCK_DESTINATION_LONGITUDE),
},
)
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_entity_id": "device_tracker.origin",
"destination_entity_id": "device_tracker.destination",
"app_id": APP_ID,
"app_code": APP_CODE,
"mode": TRAVEL_MODE_TRUCK,
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
_assert_truck_sensor(sensor)
# Test that update works more than once
async_fire_time_changed(hass, utcnow + SCAN_INTERVAL)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
_assert_truck_sensor(sensor)
async def test_location_device_tracker_added_after_update(
hass, requests_mock_truck_response, caplog
):
"""Test that device_tracker added after first update works."""
caplog.set_level(logging.ERROR)
utcnow = dt_util.utcnow()
# Patching 'utcnow' to gain more control over the timed update.
with patch("homeassistant.util.dt.utcnow", return_value=utcnow):
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_entity_id": "device_tracker.origin",
"destination_entity_id": "device_tracker.destination",
"app_id": APP_ID,
"app_code": APP_CODE,
"mode": TRAVEL_MODE_TRUCK,
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
assert len(caplog.records) == 2
assert "Unable to find entity" in caplog.text
caplog.clear()
# Device tracker appear after first update
hass.states.async_set(
"device_tracker.origin",
"unknown",
{
"latitude": float(TRUCK_ORIGIN_LATITUDE),
"longitude": float(TRUCK_ORIGIN_LONGITUDE),
},
)
hass.states.async_set(
"device_tracker.destination",
"unknown",
{
"latitude": float(TRUCK_DESTINATION_LATITUDE),
"longitude": float(TRUCK_DESTINATION_LONGITUDE),
},
)
# Test that update works more than once
async_fire_time_changed(hass, utcnow + SCAN_INTERVAL)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
_assert_truck_sensor(sensor)
assert len(caplog.records) == 0
async def test_location_device_tracker_in_zone(
hass, requests_mock_truck_response, caplog
):
"""Test that device_tracker in zone uses device_tracker state works."""
caplog.set_level(logging.DEBUG)
zone_config = {
"zone": [
{
"name": "Origin",
"latitude": TRUCK_ORIGIN_LATITUDE,
"longitude": TRUCK_ORIGIN_LONGITUDE,
"radius": 250,
"passive": False,
}
]
}
assert await async_setup_component(hass, "zone", zone_config)
hass.states.async_set(
"device_tracker.origin", "origin", {"latitude": None, "longitude": None}
)
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_entity_id": "device_tracker.origin",
"destination_latitude": TRUCK_DESTINATION_LATITUDE,
"destination_longitude": TRUCK_DESTINATION_LONGITUDE,
"app_id": APP_ID,
"app_code": APP_CODE,
"mode": TRAVEL_MODE_TRUCK,
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
_assert_truck_sensor(sensor)
assert ", getting zone location" in caplog.text
async def test_route_not_found(hass, requests_mock_credentials_check, caplog):
"""Test that route not found error is correctly handled."""
caplog.set_level(logging.ERROR)
origin = "52.516,13.3779"
destination = "47.013399,-10.171986"
modes = [ROUTE_MODE_FASTEST, TRAVEL_MODE_CAR, TRAFFIC_MODE_DISABLED]
response_url = _build_mock_url(origin, destination, modes, APP_ID, APP_CODE, "now")
requests_mock_credentials_check.get(
response_url,
text=load_fixture("here_travel_time/routing_error_no_route_found.json"),
)
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_latitude": origin.split(",")[0],
"origin_longitude": origin.split(",")[1],
"destination_latitude": destination.split(",")[0],
"destination_longitude": destination.split(",")[1],
"app_id": APP_ID,
"app_code": APP_CODE,
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert len(caplog.records) == 1
assert NO_ROUTE_ERROR_MESSAGE in caplog.text
async def test_pattern_origin(hass, caplog):
"""Test that pattern matching the origin works."""
caplog.set_level(logging.ERROR)
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_latitude": "138.90",
"origin_longitude": "-77.04833",
"destination_latitude": CAR_DESTINATION_LATITUDE,
"destination_longitude": CAR_DESTINATION_LONGITUDE,
"app_id": APP_ID,
"app_code": APP_CODE,
}
}
assert await async_setup_component(hass, DOMAIN, config)
assert len(caplog.records) == 1
assert "invalid latitude" in caplog.text
async def test_pattern_destination(hass, caplog):
"""Test that pattern matching the destination works."""
caplog.set_level(logging.ERROR)
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_latitude": CAR_ORIGIN_LATITUDE,
"origin_longitude": CAR_ORIGIN_LONGITUDE,
"destination_latitude": "139.0",
"destination_longitude": "-77.1",
"app_id": APP_ID,
"app_code": APP_CODE,
}
}
assert await async_setup_component(hass, DOMAIN, config)
assert len(caplog.records) == 1
assert "invalid latitude" in caplog.text
async def test_invalid_credentials(hass, requests_mock, caplog):
"""Test that invalid credentials error is correctly handled."""
caplog.set_level(logging.ERROR)
modes = [ROUTE_MODE_FASTEST, TRAVEL_MODE_CAR, TRAFFIC_MODE_DISABLED]
response_url = _build_mock_url(
",".join([CAR_ORIGIN_LATITUDE, CAR_ORIGIN_LONGITUDE]),
",".join([CAR_DESTINATION_LATITUDE, CAR_DESTINATION_LONGITUDE]),
modes,
APP_ID,
APP_CODE,
"now",
)
requests_mock.get(
response_url,
text=load_fixture("here_travel_time/routing_error_invalid_credentials.json"),
)
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_latitude": CAR_ORIGIN_LATITUDE,
"origin_longitude": CAR_ORIGIN_LONGITUDE,
"destination_latitude": CAR_DESTINATION_LATITUDE,
"destination_longitude": CAR_DESTINATION_LONGITUDE,
"app_id": APP_ID,
"app_code": APP_CODE,
}
}
assert await async_setup_component(hass, DOMAIN, config)
assert len(caplog.records) == 1
assert "Invalid credentials" in caplog.text
async def test_attribution(hass, requests_mock_credentials_check):
"""Test that attributions are correctly displayed."""
origin = "50.037751372637686,14.39233448220898"
destination = "50.07993838201255,14.42582157361062"
modes = [ROUTE_MODE_SHORTEST, TRAVEL_MODE_PUBLIC_TIME_TABLE, TRAFFIC_MODE_ENABLED]
response_url = _build_mock_url(origin, destination, modes, APP_ID, APP_CODE, "now")
requests_mock_credentials_check.get(
response_url, text=load_fixture("here_travel_time/attribution_response.json")
)
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_latitude": origin.split(",")[0],
"origin_longitude": origin.split(",")[1],
"destination_latitude": destination.split(",")[0],
"destination_longitude": destination.split(",")[1],
"app_id": APP_ID,
"app_code": APP_CODE,
"traffic_mode": True,
"route_mode": ROUTE_MODE_SHORTEST,
"mode": TRAVEL_MODE_PUBLIC_TIME_TABLE,
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
assert (
sensor.attributes.get(ATTR_ATTRIBUTION)
== "With the support of HERE Technologies. All information is provided without warranty of any kind."
)
async def test_pattern_entity_state(hass, requests_mock_truck_response, caplog):
"""Test that pattern matching the state of an entity works."""
caplog.set_level(logging.ERROR)
hass.states.async_set("sensor.origin", "invalid")
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_entity_id": "sensor.origin",
"destination_latitude": TRUCK_DESTINATION_LATITUDE,
"destination_longitude": TRUCK_DESTINATION_LONGITUDE,
"app_id": APP_ID,
"app_code": APP_CODE,
"mode": TRAVEL_MODE_TRUCK,
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert len(caplog.records) == 1
assert "is not a valid set of coordinates" in caplog.text
async def test_pattern_entity_state_with_space(hass, requests_mock_truck_response):
"""Test that pattern matching the state including a space of an entity works."""
hass.states.async_set(
"sensor.origin", ", ".join([TRUCK_ORIGIN_LATITUDE, TRUCK_ORIGIN_LONGITUDE])
)
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_entity_id": "sensor.origin",
"destination_latitude": TRUCK_DESTINATION_LATITUDE,
"destination_longitude": TRUCK_DESTINATION_LONGITUDE,
"app_id": APP_ID,
"app_code": APP_CODE,
"mode": TRAVEL_MODE_TRUCK,
}
}
assert await async_setup_component(hass, DOMAIN, config)
async def test_delayed_update(hass, requests_mock_truck_response, caplog):
"""Test that delayed update does not complain about missing entities."""
caplog.set_level(logging.WARNING)
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_entity_id": "sensor.origin",
"destination_latitude": TRUCK_DESTINATION_LATITUDE,
"destination_longitude": TRUCK_DESTINATION_LONGITUDE,
"app_id": APP_ID,
"app_code": APP_CODE,
"mode": TRAVEL_MODE_TRUCK,
}
}
sensor_config = {
"sensor": {
"platform": "template",
"sensors": [
{"template_sensor": {"value_template": "{{states('sensor.origin')}}"}}
],
}
}
assert await async_setup_component(hass, DOMAIN, config)
assert await async_setup_component(hass, "sensor", sensor_config)
hass.states.async_set(
"sensor.origin", ",".join([TRUCK_ORIGIN_LATITUDE, TRUCK_ORIGIN_LONGITUDE])
)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert "Unable to find entity" not in caplog.text
``` |
{
"source": "jooray/bitcoin-20percent-simulator",
"score": 3
} |
#### File: jooray/bitcoin-20percent-simulator/bitcoin-fund.py
```python
import json
import random
import numpy as np
import matplotlib.pyplot as plt
# parameters
payout_percentage_trigger = 0.15 # trigger for payout
payout_percentage = 0.3 # what percentage is the payout
investment_window = 24 # months
invest_every_days = 7 # 28 = monthly investing, 2 = every other day, 7 = weekly
base_currency_monthly_investments = 100 # all the monthly investments combined
# price functions
# just a sample pricing
def get_linear_price(day):
return (day + 1) * 2
# real prices
historical_prices = []
start_day = 0
def load_historical_prices():
global historical_prices, start_day
with open('bitcoin-price-data.json') as data_file:
historical_prices = json.load(data_file)
if start_day == -1:
start_day=random.randint(0,len(historical_prices)-(investment_window*28))
del historical_prices[:start_day]
def get_historical_price(day):
global historical_prices
if len(historical_prices)==0:
load_historical_prices()
return historical_prices[day]
def get_bitcoin_price(day):
return get_historical_price(day)
# investment modelling
investments = []
invested_base = 0.0
withdrew_base = 0.0
def reset_investments():
global investments, invested_base, withdrew_base
investments = []
invested_base = 0.0
withdrew_base = 0.0
# invest sum in base currency (usd)
def invest(base_amount, day, record_base = True):
global invested_base, investments
investments.append(
{"day": day,
"base_amount": base_amount,
"bitcoin_amount": base_amount / get_bitcoin_price(day)})
if (record_base):
invested_base = invested_base + base_amount
def get_portfolio_invested_base_value(day=0):
base_value = 0
for investment in investments:
if (day == 0) or (investment['day'] <= day):
base_value = base_value + investment['base_amount']
return base_value
def get_portfolio_current_base_value(day):
base_value = 0
for investment in investments:
base_value = base_value + (investment['bitcoin_amount'] * get_bitcoin_price(day))
return base_value
def get_portfolio_bitcoin_value(day=0):
bitcoin_value = 0
for investment in investments:
if (day == 0) or (investment['day'] <= day):
bitcoin_value = bitcoin_value + investment['bitcoin_amount']
return bitcoin_value
# lazy man's implementation - all the investments will be replaced by one investment
def withdraw_base(base_amount, day):
global withdrew_base, investments
new_base = (get_portfolio_current_base_value(day) - base_amount)
investments = []
invest(new_base, day, False)
withdrew_base = withdrew_base + base_amount
def print_investment_data(day):
print(" Withdrew: " + str(withdrew_base))
print(" Current value: " + str(get_portfolio_current_base_value(day)))
print(" Withdrawals+current value: " + str(withdrew_base + get_portfolio_current_base_value(day)))
print(" Invested: " + str(invested_base))
g_withdraws = []
g_current_with_withdraws = []
g_just_invest = []
load_historical_prices()
###################################################################################################################
# If you want to zoom in, change the 0 in range to 400 or 500
for i in range(500, len(historical_prices)-(investment_window*28), 1):
start_day = i
load_historical_prices()
###############################################################################################################
#print("Investment strategy with withdrawals:")
reset_investments()
for month in range(investment_window):
for day in range(28):
if day % invest_every_days == 0:
invest((base_currency_monthly_investments*invest_every_days/28), day+(month*28))
if (get_portfolio_current_base_value((month*28+day)) >= (get_portfolio_invested_base_value() * (payout_percentage_trigger+1))):
#withdraw_base(get_portfolio_current_base_value((month*28)+day) * 0.2, (month*28)+day)
withdraw_base(get_portfolio_current_base_value((month*28)+day) * payout_percentage, (month*28)+day)
#print_investment_data(investment_window*28)
g_withdraws.append(withdrew_base)
g_current_with_withdraws.append(withdrew_base + get_portfolio_current_base_value(investment_window*28))
###############################################################################################################
#print("Investment strategy without any withdrawals (just buying bitcoin)")
reset_investments()
for month in range(investment_window):
for day in range(28):
if day % invest_every_days == 0:
invest((base_currency_monthly_investments*invest_every_days/28), day+(month*28))
#print_investment_data(investment_window*28)
g_just_invest.append(get_portfolio_current_base_value(investment_window*28))
bad_start_days = 0
for i in g_just_invest:
if i<invested_base:
bad_start_days = bad_start_days + 1
print('Percentage of bad days to start the just invest strategy:' + str((bad_start_days*100)/len(g_just_invest)))
plt.plot(g_current_with_withdraws, 'r')
plt.plot(g_withdraws, 'g')
plt.plot(g_just_invest, 'b')
plt.plot((0,len(g_withdraws)), (invested_base, invested_base), 'y')
plt.xlabel('starting day')
plt.ylabel('money (usd)')
plt.show()
``` |
{
"source": "jooray/lightning",
"score": 3
} |
#### File: tests/plugins/dblog.py
```python
from lightning import Plugin, RpcError
import sqlite3
plugin = Plugin()
plugin.sqlite_pre_init_cmds = []
plugin.initted = False
@plugin.init()
def init(configuration, options, plugin):
if not plugin.get_option('dblog-file'):
raise RpcError("No dblog-file specified")
plugin.conn = sqlite3.connect(plugin.get_option('dblog-file'),
isolation_level=None)
plugin.log("replaying pre-init data:")
for c in plugin.sqlite_pre_init_cmds:
plugin.conn.execute(c)
plugin.log("{}".format(c))
plugin.initted = True
plugin.log("initialized")
@plugin.hook('db_write')
def db_write(plugin, writes):
if not plugin.initted:
plugin.log("deferring {} commands".format(len(writes)))
plugin.sqlite_pre_init_cmds += writes
else:
for c in writes:
plugin.conn.execute(c)
plugin.log("{}".format(c))
return True
plugin.add_option('dblog-file', None, 'The db file to create.')
plugin.run()
```
#### File: tests/plugins/fail_htlcs.py
```python
from lightning import Plugin
plugin = Plugin()
@plugin.hook("htlc_accepted")
def on_htlc_accepted(htlc, onion, plugin):
plugin.log("Failing htlc on purpose")
plugin.log("onion: %r" % (onion))
return {"result": "fail", "failure_code": 16399}
plugin.run()
```
#### File: tests/plugins/reject_odd_funding_amounts.py
```python
from lightning import Plugin, Millisatoshi
plugin = Plugin()
@plugin.hook('openchannel')
def on_openchannel(openchannel, plugin):
print("{} VARS".format(len(openchannel.keys())))
for k in sorted(openchannel.keys()):
print("{}={}".format(k, openchannel[k]))
if Millisatoshi(openchannel['funding_satoshis']).to_satoshi() % 2 == 1:
return {'result': 'reject', 'error_message': "I don't like odd amounts"}
return {'result': 'continue'}
plugin.run()
``` |
{
"source": "joordamn/CellESignal",
"score": 2
} |
#### File: CellESignal/data_process/data_explore.py
```python
import json
import numpy as np
import matplotlib.pyplot as plt
import random
from tqdm import tqdm
import os, sys, shutil
sys.path.append("..")
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
os.chdir(sys.path[-1])
def data_split(data:np.ndarray, loc_list:list, save_path:str, split_len=150, plot=False):
"""根据loc的位置去前后截取raw_data的数据
"""
label = {
"code": "",
"label": 0,
"number of peaks": 0,
"peaks' labels": [],
"borders": [],
"description": "",
"rt":[],
"scan": [],
"intensity": [],
"mz": [],
}
for i, loc in tqdm(enumerate(loc_list[1000:1500])):
# 截取数据
# 将loc的位置随机前后位移 使得峰值点不在数据切片的中心
loc += random.randint(int(-1 * 1/3 * split_len), int(1/3 * split_len))
data_slice = data[loc - split_len: loc + split_len].tolist()
# 改写json内容
json_save_name = save_path + "peak_sample_" + str(i).zfill(4)
json_file = json_save_name + ".json"
label["code"] = "data slice NO_" + str(i).zfill(4)
label["intensity"] = data_slice
label["rt"] = [loc - split_len, loc + split_len]
label["mz"] = data_slice
with open(json_file, mode="w", encoding="utf-8") as jf:
json.dump(label, jf)
# plot
if plot:
plt.figure()
plt.plot(data_slice)
fig_save_path = save_path + "/fig/"
if not os.path.exists(fig_save_path):
os.makedirs(fig_save_path)
plt.savefig(fig_save_path + "peak_sample_" + str(i).zfill(4) + ".jpg")
plt.close("all")
if __name__ == "__main__":
raw_data_file = "./rawData.csv" # 原始数据
raw_peak_loc_file = "./raw_data_loc.txt" # 原始数据的峰值点坐标
save_path = "./peak_data/"
split_len = 50
raw_data = np.genfromtxt(raw_data_file, delimiter=",")
with open(raw_peak_loc_file, mode='r', encoding='utf-8') as f:
lines = f.readlines()
loc = []
for line in lines:
loc.append(int(line))
try:
shutil.rmtree(save_path)
except:
pass
if not os.path.exists(save_path):
os.makedirs(save_path)
data_split(data=raw_data, loc_list=loc, save_path=save_path, split_len=split_len, plot=True)
```
#### File: CellESignal/data_process/data_explore_v2.py
```python
import os, sys, shutil
sys.path.append("..")
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
os.chdir(sys.path[-1])
import json
import numpy as np
import matplotlib.pyplot as plt
import random
from tqdm import tqdm
from scipy.signal import find_peaks
from utils.utils import read_from_txt
def peak_finding(signal_data: np.ndarray):
"""找出峰值点及其坐标
"""
loc, _ = find_peaks(signal_data, prominence=0.00005)
loc = np.squeeze(loc).tolist()
return loc
def data_split(data:np.ndarray, loc_list:list, save_path:str, split_len=150, plot=False):
"""根据loc的位置去前后截取raw_data的数据
"""
label = {
"code": "",
"label": 0,
"number of peaks": 0,
"peaks' labels": [],
"borders": [],
"description": "",
"rt":[],
"scan": [],
"intensity": [],
"mz": [],
}
for i, loc in tqdm(enumerate(loc_list)):
# 截取数据
# 将loc的位置随机前后位移 使得峰值点不在数据切片的中心
loc += random.randint(int(-1 * 1/3 * split_len), int(1/3 * split_len))
data_slice = data[loc - split_len: loc + split_len].tolist()
# 改写json内容
json_save_name = save_path + "peak_sample_" + str(i).zfill(4)
json_file = json_save_name + ".json"
label["code"] = "data slice NO_" + str(i).zfill(4)
label["intensity"] = data_slice
label["rt"] = [loc - split_len, loc + split_len]
label["mz"] = data_slice
with open(json_file, mode="w", encoding="utf-8") as jf:
json.dump(label, jf)
# plot
if plot:
plt.figure()
plt.plot(data_slice)
fig_save_path = save_path + "/fig/"
if not os.path.exists(fig_save_path):
os.makedirs(fig_save_path)
plt.savefig(fig_save_path + "peak_sample_" + str(i).zfill(4) + ".jpg")
plt.close("all")
if __name__ == "__main__":
raw_data_file = r"../data\data_collection_20220115\txt_data\150dB_1V_highspeed.txt" # 原始数据
# raw_peak_loc_file = "./raw_data_loc.txt" # 原始数据的峰值点坐标
save_path = r"../data/data_collection_20220115/peak_data_03/"
split_len = 100
raw_data, _ = read_from_txt(raw_data_file)
raw_data = np.array(raw_data, dtype=np.float32)
loc = peak_finding(raw_data)
print(len(loc))
# plt.figure()
# plt.plot(loc, raw_data[loc], "xr")
# plt.plot(raw_data)
# plt.show()
try:
shutil.rmtree(save_path)
except:
pass
if not os.path.exists(save_path):
os.makedirs(save_path)
data_split(data=raw_data, loc_list=loc, save_path=save_path, split_len=split_len, plot=True)
```
#### File: CellESignal/dataset/dataset.py
```python
import os
import json
from copy import deepcopy
import numpy as np
import torch
from torch.utils.data import Dataset
from scipy.interpolate import interp1d
# to do: Reflection should take a ROI (dict)
class Reflection:
"""
class that just reflects any signal
"""
def __init__(self, p=0.5):
self.p = p
def __call__(self, signal):
if np.random.choice([True, False], p=[self.p, 1 - self.p]):
signal = signal[::-1]
return signal
class ROIDataset(Dataset):
"""
A dataset for a training
"""
def __init__(self, path, device, interpolate=False, adaptive_interpolate=False,
length=None, augmentations=None, balanced=False, return_roi_code=False,
model_type="classification"):
"""
:param path: a path to annotated ROIs
:param device: a device where training will occur (GPU / CPU)
:param interpolate: bool, if interpolation is needed
:param adaptive_interpolate: to do: add interpolation to the closest power of 2
:param length: only needed if 'interpolate' is True
:param augmentations: roi augmantations
:param balanced: bool, noise and peaks are returned 50/50
:param return_roi_code: explicitly return the code of the roi
"""
super().__init__()
self.balanced = balanced
self.device = device
self.data = {0: [], 1: []} # a dict from label2roi
self.interpolate = interpolate
self.adaptive_interpolate = interpolate
self.length = length
self.return_roi_code = return_roi_code
self.model_type = model_type
for file in os.listdir(path):
if file[0] != '.':
with open(os.path.join(path, file)) as json_file:
roi = json.load(json_file)
roi['intensity'] = np.array(roi['intensity'])
roi['borders'] = np.array(roi['borders'])
if self.interpolate:
roi = self._interpolate(roi)
self.data[roi['label']].append(roi)
self.augmentations = [] if augmentations is None else augmentations
def __len__(self):
if self.balanced:
return min(len(self.data[0]), len(self.data[1]))
else:
return len(self.data[0]) + len(self.data[1])
@staticmethod
def _get_mask(roi):
integration_mask = np.zeros_like(roi['intensity'])
if roi['number of peaks'] >= 1:
for b, e in roi['borders']:
integration_mask[int(b):int(e)] = 1
intersection_mask = np.zeros_like(roi['intensity'])
if roi['number of peaks'] >= 2:
for e, b in zip(roi['borders'][:-1, 1], roi['borders'][1:, 0]):
if b - e > 5:
intersection_mask[e + 1:b] = 1
else:
intersection_mask[e - 1:b + 2] = 1
return integration_mask, intersection_mask
def _interpolate(self, roi):
roi = deepcopy(roi)
points = len(roi['intensity'])
interpolate = interp1d(np.arange(points), roi['intensity'], kind='linear')
roi['intensity'] = interpolate(np.arange(self.length) / (self.length - 1.) * (points - 1.))
roi['borders'] = np.array(roi['borders'])
roi['borders'] = roi['borders'] * (self.length - 1) // (points - 1)
return roi
def __getitem__(self, idx):
if self.balanced:
roi = np.random.choice(self.data[idx % 2])
else:
roi = self.data[0][idx] if idx < len(self.data[0]) else self.data[1][idx - len(self.data[0])]
for aug in self.augmentations:
roi = deepcopy(roi)
roi = aug(roi)
x = roi['intensity']
x = torch.tensor(x, dtype=torch.float32, device=self.device).view(1, -1)
x = x / torch.max(x)
y = torch.tensor(roi['label'], dtype=torch.long, device=self.device)
integration_mask, intersection_mask = self._get_mask(roi)
integration_mask = torch.tensor(integration_mask, dtype=torch.float32, device=self.device)
intersection_mask = torch.tensor(intersection_mask, dtype=torch.float32, device=self.device)
if self.return_roi_code:
original_length = len(roi['mz'])
return x, y, integration_mask, intersection_mask, roi['code'], original_length
if self.model_type == "classification":
return x, y
elif self.model_type == "segment":
return x, integration_mask
else:
raise TypeError("incorrect model type: classification or segment")
```
#### File: CellESignal/tools/infer.py
```python
import json, os, sys
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import torch
from scipy.interpolate import interp1d
parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))
sys.path.insert(0, parent_path)
from models.cnn_classifier import Classifier
from models.cnn_segmentator import Segmentator
from utils.utils import read_from_txt
class PeakDetector:
def __init__(self, classifier_weight, segmentator_weight, device, interpolate_length=256):
self.device = device
self.interpolate_length = interpolate_length
# load classifier
self.classifier = Classifier().to(self.device)
self.classifier.load_state_dict(torch.load(classifier_weight, map_location=self.device))
self.classifier.eval()
# load segmentor
self.segmentator = Segmentator().to(self.device)
self.segmentator.load_state_dict(torch.load(segmentator_weight, map_location=self.device))
self.segmentator.eval()
def __call__(self, raw_data: np.ndarray):
signal = self._preprocess(raw_data, interpolate=True, length=self.interpolate_length)
# model inference
class_output= self.classifier(signal)
class_output = class_output.data.cpu().numpy()
# get label
label = np.argmax(class_output)
# peak detected
if label == 1:
seg_output = self.segmentator(signal)
seg_output = seg_output.data.sigmoid().cpu().numpy()
borders = self._get_borders(seg_output[0, 0], interpolation_factor=len(signal[0, 0]) / len(raw_data))
return 1, borders, seg_output[0, 0].tolist()
# no peak detected
else:
return 0, None, None
def _preprocess(self, signal, interpolate=True, length=None):
"""
preprocess the input signal to keep the consistence with training process
"""
if not length:
length = self.interpolate_length
if interpolate:
interpolate = interp1d(np.arange(len(signal)), signal, kind='linear')
signal = interpolate(np.arange(length) / (length - 1) * (len(signal) - 1))
# normalize
signal = torch.tensor(signal / np.max(signal), dtype=torch.float32, device=self.device)
return signal.view(1, 1, -1)
def _get_borders(self, pred_prob, threshold=0.3, interpolation_factor=1, minimum_peak_points=5):
""" post process for the predicted probability and find the peaks
"""
pred_mask = pred_prob > threshold # threshold cut the prediction
borders_roi = []
begin = 0 if pred_mask[0] else -1 # init peak begin point
peak_wide = 1 if pred_mask[0] else 0 # init peak wide
number_of_peaks = 0
for n in range(len(pred_mask) - 1): # loop the mask and analyze the peak
if pred_mask[n + 1] and not pred_mask[n]: # case1: peak begins
begin = n + 1
peak_wide = 1
elif pred_mask[n + 1] and begin != -1: # case2: peak continues
peak_wide += 1
elif not pred_mask[n + 1] and begin != -1: # case3: peak ends
if peak_wide / interpolation_factor > minimum_peak_points:
number_of_peaks += 1
b = int(begin // interpolation_factor)
e = int((n + 2) // interpolation_factor)
borders_roi.append([b, e])
# re-init the begin and peak wide for next peak
begin = -1
peak_wide = 0
# process the non-end peak
if begin != -1 and peak_wide * interpolation_factor > minimum_peak_points:
number_of_peaks += 1
b = int(begin // interpolation_factor)
e = int((n + 2) // interpolation_factor)
borders_roi.append([b, e])
return borders_roi
def infer_from_json_file(model: PeakDetector, file_path, save_path):
# parse the json file
raw_signals = []
for file in os.listdir(file_path):
if file.endswith('json'):
with open(os.path.join(file_path, file)) as json_file:
roi = json.load(json_file)
raw_signal = roi['intensity'] # signal
code = os.path.splitext(file)[0] # signal title
raw_index = roi['rt'] # signal index in the raw data
raw_signals.append([code, raw_index, raw_signal])
# infer and post process
figure = plt.figure()
for data in tqdm(raw_signals):
signal_code, signal_index, signal_intensity = data
label, borders, _ = model(np.array(signal_intensity))
# plot
figure.clear()
ax = figure.add_subplot(111)
# ax.plot(range(signal_index[0], signal_index[1]), signal_intensity, label=signal_code)
ax.plot(signal_intensity, label=signal_code)
title = "predicted border for {}".format(signal_code)
if borders:
for border in borders:
begin, end = border
ax.fill_between(range(begin, end + 1), y1=signal_intensity[begin:end + 1], y2=min(signal_intensity),
alpha=0.5)
ax.set_title(title)
ax.legend(loc='best')
plt.savefig(save_path + '/{}.jpg'.format(signal_code))
def infer_from_txt_file(model: PeakDetector, file_path, save_path):
full_signal, _ = read_from_txt(file_path)
signalLen = len(full_signal)
startPos = 0
sliceLen = 200
figure = plt.figure()
pbar = tqdm(total=signalLen)
while startPos <= signalLen - sliceLen:
signalSlice = np.array(full_signal[startPos: startPos + sliceLen], dtype=np.float32)
label, borders, _ = model(signalSlice)
if label == 1:
# plot
figure.clear()
signal_code = str(startPos)
ax = figure.add_subplot(111)
ax.plot(signalSlice, label=signal_code)
title = "predicted border for {}".format(signal_code)
if borders:
for border in borders:
begin, end = border
ax.fill_between(range(begin, end + 1), y1=signalSlice[begin:end + 1], y2=min(signalSlice),
alpha=0.5)
ax.set_title(title)
ax.legend(loc='best')
plt.savefig(save_path + '/{}.jpg'.format(signal_code))
startPos += sliceLen
pbar.update(sliceLen)
pbar.close()
if __name__ == "__main__":
# parameters
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
classifierModelPath = "../data/weights/2022_0302/Classifier"
segmentatorModelPath = "../data/weights/2022_0302/Segmentator"
dataRoot = "../data/raw_txt_data/MutiChannel_MDA_ANTIBODY_COATING_2022_02_27/"
# 文件名
fileName = "1_05psi_noGravity_meas_plotter_20220227_185626.txt"
filePath = os.path.join(dataRoot, fileName)
savePath = os.path.join(dataRoot, os.path.splitext(fileName)[0])
if not os.path.exists(savePath):
os.makedirs(savePath)
# init the model
peakDetector = PeakDetector(classifierModelPath, segmentatorModelPath, DEVICE)
# infer from file
# infer_from_json_file(peakDetector, filePath, savePath)
infer_from_txt_file(peakDetector, filePath, savePath)
```
#### File: CellESignal/tools/scheduler.py
```python
from math import pi, cos
from torch.optim.optimizer import Optimizer
class CosineWarmupLr(object):
"""Cosine lr decay function with warmup.
Lr warmup is proposed by `
Accurate, Large Minibatch SGD:Training ImageNet in 1 Hour`
`https://arxiv.org/pdf/1706.02677.pdf`
Cosine decay is proposed by `
Stochastic Gradient Descent with Warm Restarts`
`https://arxiv.org/abs/1608.03983`
Args:
optimizer (Optimizer): optimizer of a model.
batches (int): batches of one epoch.
max_epochs (int): max_epochs to train.
base_lr (float): init lr.
final_lr (float): minimum(final) lr.
warmup_epochs (int): warmup max_epochs before cosine decay.
warmup_init_lr (float): warmup starting lr.
last_iter (int): init iteration.
Attributes:
niters (int): number of iterations of all max_epochs.
warmup_iters (int): number of iterations of all warmup max_epochs.
"""
def __init__(self, optimizer, batches, max_epochs, base_lr, final_lr=0,
warmup_epochs=0, warmup_init_lr=0, last_iter=-1):
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(type(optimizer).__name__))
self.optimizer = optimizer
if last_iter == -1:
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
last_iter = 0
else:
for i, group in enumerate(optimizer.param_groups):
if 'initial_lr' not in group:
raise KeyError("param 'initial_lr' is not specified "
"in param_groups[{}] when resuming an optimizer".format(i))
self.baselr = base_lr
self.learning_rate = base_lr
self.niters = max_epochs * batches
self.targetlr = final_lr
self.warmup_iters = batches * warmup_epochs
self.warmup_init_lr = warmup_init_lr
self.last_iter = last_iter
self.step()
def get_lr(self):
if self.last_iter < self.warmup_iters:
self.learning_rate = self.warmup_init_lr + \
(self.baselr - self.warmup_init_lr) * self.last_iter / self.warmup_iters
else:
self.learning_rate = self.targetlr + (self.baselr - self.targetlr) * \
(1 + cos(pi * (self.last_iter - self.warmup_iters) /
(self.niters - self.warmup_iters))) / 2
def step(self, iteration=None):
"""Update status of lr.
Args:
iteration(int, optional): now training iteration of all max_epochs.
Normally need not to set it manually.
"""
if iteration is None:
iteration = self.last_iter + 1
self.last_iter = iteration
self.get_lr()
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.learning_rate
```
#### File: CellESignal/utils/utils.py
```python
import time, os, json
import logging
from datetime import datetime
from tqdm import tqdm
import matplotlib.pyplot as plt
plt.switch_backend('agg')
def read_from_txt(file_path):
"""read data from txt file
signal content:
% ...
% ...
float; float
...
border content:
int \t int
int \t int
...
Args:
file_path (str): txt file path
Returns:
data (list): signal data(float) or border data(int)
"""
with open(file_path, mode='r', encoding='utf-8') as f:
flag = ""
data = []
lines = f.readlines()
if lines[0].startswith("%"):
del lines[0:5]
if ";" in lines[5]:
# 此时返回信号数据
flag = "signal"
for line in lines:
tar = line.split(";")[1]
data.append(float(tar))
else:
# 此时返回border数据
flag = "border"
for line in lines:
begin, end = line.split("\t")
data.append([int(begin), int(end)])
return data, flag
def save_and_plot(save_folder:str, raw_signal:list, borders:list, pred_prob:list, timestamp:list, count:int, plot_online=True):
# TODO 开启新线程来处理保存和绘图 以免影响消费者线程
now = datetime.now()
date = now.strftime("%Y_%m_%d")
capture_time = now.strftime("%H_%M_%S_%f")
signal_number = str(count).zfill(5)
signal_code = "signal_" + signal_number + "_" + capture_time
# plot
if plot_online:
figure = plt.figure()
ax = figure.add_subplot(111)
ax.plot(raw_signal, label=signal_code)
title = "signal NO.{}, capture time:{}".format(signal_number, capture_time)
if borders:
for border in borders:
begin, end = border
ax.fill_between(range(begin, end + 1), y1=raw_signal[begin:end + 1], y2=min(raw_signal), alpha=0.5)
ax.set_title(title)
ax.legend(loc='best')
plt.savefig(save_folder + '/{}.jpg'.format(signal_code))
plt.close("all")
# save in json
save_content = {
"signal_number": signal_number,
"signal_code": signal_code,
"raw_signal": raw_signal,
"timestamp": timestamp,
"borders": borders,
"pred_prob": pred_prob,
"capture_time": capture_time,
}
with open(save_folder + "/{}.json".format(signal_code), mode="w", encoding="utf-8") as f:
json.dump(save_content, f, indent=4)
def post_plot(json_folder):
print("start plotting")
figure = plt.figure()
for file in tqdm(os.listdir(json_folder)):
if file.endswith('json'):
with open(os.path.join(json_folder, file), 'r') as json_file:
content = json.load(json_file)
raw_signal = content["raw_signal"]
signal_code = content["signal_code"]
signal_number = content["signal_number"]
borders = content["borders"]
capture_time = content["capture_time"]
figure.clear()
ax = figure.add_subplot(111)
ax.plot(raw_signal, label=signal_code)
title = "signal NO.{}, capture time:{}".format(signal_number, capture_time)
if borders:
for border in borders:
begin, end = border
ax.fill_between(range(begin, end + 1), y1=raw_signal[begin:end + 1], y2=min(raw_signal), alpha=0.5)
ax.set_title(title)
ax.legend(loc='best')
plt.savefig(json_folder + '/{}.jpg'.format(signal_code))
def plot_line(train_x, train_y, valid_x, valid_y, mode, out_dir):
"""绘制训练和验证集的loss曲线/acc曲线
Args:
train_x : epoch num range -> x axis of trian figure
train_y : y axis of train figure
valid_x : epoch num range -> x axis of valid figure
valid_y : y axis of valid figure
mode(str) : 'loss' or 'acc'
out_dir : save path of the figure
"""
plt.plot(train_x, train_y, label='Train')
plt.plot(valid_x, valid_y, label='Valid')
plt.ylabel(str(mode))
plt.xlabel('Epoch')
location = 'upper right' if mode == 'loss' else 'upper left'
plt.legend(loc=location)
plt.title('_'.join([mode]))
plt.savefig(os.path.join(out_dir, mode + '.png'))
plt.close()
def create_folder(exp_path):
now = datetime.now()
folder_name = now.strftime("%H_%M_%S")
date = now.strftime("%Y_%m_%d")
result_save_folder = os.path.join(exp_path, date, folder_name)
if not os.path.exists(result_save_folder):
os.makedirs(result_save_folder)
return result_save_folder
class Logger(object):
def __init__(self, path_log):
log_name = os.path.basename(path_log)
self.log_name = log_name if log_name else "root"
self.out_path = path_log
log_dir = os.path.dirname(self.out_path)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
def init_logger(self):
logger = logging.getLogger(self.log_name)
logger.setLevel(level=logging.INFO)
# 配置文件Handler
file_handler = logging.FileHandler(self.out_path, 'w')
file_handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler.setFormatter(formatter)
# 配置屏幕Handler
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
# 添加handler
logger.addHandler(file_handler)
logger.addHandler(console_handler)
return logger
def create_logger(log_root="./log"):
log_dir = create_folder(log_root)
path_log = os.path.join(log_dir, "log.log")
logger = Logger(path_log)
logger = logger.init_logger()
return logger, log_dir
def check_data_dir(path_tmp):
assert os.path.exists(path_tmp), \
"\n\n路径不存在,当前变量中指定的路径是:\n{}\n请检查相对路径的设置,或者文件是否存在".format(os.path.abspath(path_tmp))
if __name__ == "__main__":
exp_path = "./exp"
create_folder(exp_path)
create_logger()
``` |
{
"source": "Joorem/AdventOfCode",
"score": 4
} |
#### File: 2020/day04/day04.py
```python
import re
import sys
def str_to_dict(input_list: list) -> list:
"""
Analyze a list of strings and return a list of dict with all found fields
example:
from:
['eyr:2028 iyr:2016 byr:1995 ecl:oth pid:543685203 hcl:#c0946f hgt:152cm cid:252']
to:
[{'byr': '1995', 'iyr': '2016', 'eyr': '2028', 'hgt': '152cm', 'hcl': '#c0946f', 'ecl': 'oth', 'pid': '543685203', 'cid': '252'}]
"""
fields = ('byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid', 'cid')
list_of_dict = list()
for info in input_list:
tmp_dict = dict()
for field in fields:
search = re.search(rf"{field}:([a-z0-9#]+)", info)
if search:
tmp_dict[field] = search.group(1)
if len(tmp_dict) >= 7:
list_of_dict.append(tmp_dict)
return list_of_dict
def is_valid(f: dict) -> bool:
""" Compare an f to the policy """
BYR_MIN = 1920
BYR_MAX = 2002
ECL_VAL = ("amb", "blu", "brn", "gry", "grn", "hzl", "oth")
IYR_MIN = 2010
IYR_MAX = 2020
EYR_MIN = 2020
EYR_MAX = 2030
HCL_REG = r"^#[0-9a-f]{6}$"
HGT_CM_MIN = 150
HGT_CM_MAX = 193
HGT_IN_MIN = 59
HGT_IN_MAX = 76
HGT_REG = r"^([0-9]{2,3})(in|cm)$"
PID_REG = r"^[0-9]{9}$"
if not BYR_MIN <= int(f['byr']) <= BYR_MAX:
return False
if not IYR_MIN <= int(f['iyr']) <= IYR_MAX:
return False
if not EYR_MIN <= int(f['eyr']) <= EYR_MAX:
return False
if not re.match(HCL_REG, f['hcl']):
return False
if f['ecl'] not in ECL_VAL:
return False
if not re.match(PID_REG, f['pid']):
return False
hgt = re.match(HGT_REG, f['hgt'])
if hgt:
height = int(hgt.group(1))
unit = hgt.group(2)
if unit == 'in':
if not HGT_IN_MIN <= height <= HGT_IN_MAX:
return False
else:
if not HGT_CM_MIN <= height <= HGT_CM_MAX:
return False
else:
return False
return True
# read input file
with open(sys.argv[1], 'r') as fd:
input_content = fd.read().split("\n\n")
# init
part1 = 0
part1_list = list()
part2 = 0
# part1
for entry in str_to_dict(input_content):
size_entry = len(entry)
if size_entry == 8 or 'cid' not in entry:
part1 += 1
part1_list.append(entry)
# part2
for entry in part1_list:
if is_valid(entry):
part2 += 1
# done
print(f"part1: {part1}")
print(f"part2: {part2}")
``` |
{
"source": "joosep/genewordclouds",
"score": 2
} |
#### File: joosep/genewordclouds/server.py
```python
from __future__ import print_function
from urlparse import urlparse, parse_qs
from multiprocessing import Lock
from collections import Counter
import SimpleHTTPServer
import SocketServer
import logging
import os
import csv
import sys
import json
from watchdog.events import PatternMatchingEventHandler
from watchdog.observers import Observer
FILES = 'files'
GENE_COLUMN = 'GENE_COLUMN'
DEFAULT_TAG_HEADER = 'DEFAULT_TAG_HEADER'
FILE_DESCRIPTION = 'FILE_DESCRIPTION'
UPLOADER = 'UPLOADER'
ORGANISM = 'ORGANISM'
GENE_ID_TYPE = 'GENE_ID_TYPE'
DEFAULT_GENES = 'DEFAULT_GENES'
HEADERS = 'headers'
HEADERS_DESCRIPTIONS = 'header_descriptions'
resource_lock = Lock()
gene_map = {}
meta_data = {}
file_to_organism_map = {}
def main():
global gene_map, meta_data, file_to_organism_map
interface, port = get_interface_port_from_arguments()
read_files()
directory_observer = start_observing_files()
serve_application(interface, port)
directory_observer.stop()
directory_observer.join()
def serve_application(interface, port):
handler = ServerHandler
httpd = SocketServer.TCPServer((interface, port), handler)
print('Serving at: http://%(interface)s:%(port)s' % dict(interface=interface or 'localhost', port=port))
httpd.serve_forever()
def get_file_name(file_path):
file_name = file_path[file_path.rfind('/') + 1:]
return file_name
def delete_file_data(file_path):
global resource_lock
file_name = get_file_name(file_path)
resource_lock.acquire()
try:
organism = file_to_organism_map[file_name]
del gene_map[file_name]
del meta_data[organism][file_name]
print("removed file: " + file_name)
except KeyError as e:
print_error("Tried to delete", file_name, "when got error:", e)
finally:
resource_lock.release()
def add_file_data(file_path):
file_name = get_file_name(file_path)
add_csv_file_to_maps(file_name)
def change_file_data_name(src_path, dest_path):
global resource_lock
src_file = get_file_name(src_path)
dest_file = get_file_name(dest_path)
resource_lock.acquire()
try:
gene_map[dest_file] = gene_map.pop(src_file)
organism = file_to_organism_map[src_file]
meta_data[organism][dest_file] = meta_data[organism].pop(src_file)
file_to_organism_map[dest_file] = organism
except KeyError as e:
print_error('Tried to change', src_path, 'file name to', dest_path, 'when got error:', e)
finally:
resource_lock.release()
class InputFilesHandler(PatternMatchingEventHandler):
def on_modified(self, event):
file_name = event.src_path
delete_file_data(file_name)
add_file_data(file_name)
def on_deleted(self, event):
delete_file_data(event.src_path)
def on_created(self, event):
add_file_data(event.src_path)
def on_moved(self, event):
change_file_data_name(event.src_path, event.dest_path)
def start_observing_files():
event_handler = InputFilesHandler(patterns=['*.csv', '*.tsv'], ignore_directories=True)
dir_observer = Observer()
dir_observer.schedule(event_handler, path=FILES, recursive=False)
dir_observer.start()
return dir_observer
# reads in csv and creates map of lists, where one list contains all headers for gene
# what is key in the map for this list. List value is one string with all values for one header
def read_files():
csv_files = []
for f in os.listdir(FILES):
if f.endswith('.csv') or f.endswith('.tsv'):
csv_files.append(f)
for csv_file in csv_files:
add_csv_file_to_maps(csv_file)
return
def add_csv_file_to_maps(csv_file):
global resource_lock
global gene_map
global meta_data
global file_to_organism_map
try:
file_gene_map, file_meta_data, organism = get_gene_and_meta_data(csv_file)
resource_lock.acquire()
try:
file_to_organism_map[csv_file] = organism
if file_gene_map is not {}:
gene_map[csv_file] = file_gene_map
if organism in meta_data:
meta_data[organism][csv_file] = file_meta_data
else:
meta_data[organism] = {csv_file: file_meta_data}
finally:
resource_lock.release()
except Exception as e:
print_error("Error when reading file", csv_file, ":", e)
def get_gene_and_meta_data(csv_file):
file_gene_map = {}
file_meta_data = {}
with open(FILES + '/' + csv_file, 'rb') as csv_input:
reader = csv.reader(csv_input, delimiter='\t')
comments, headers = get_comments_and_headers(reader)
properties = comments_to_properties(comments)
organism = properties[ORGANISM]
if has_required_properties(properties):
headers_list, header_loc = get_header_list_and_loc(headers, properties)
file_meta_data = gen_csv_metadata(csv_file, headers_list, properties)
file_gene_map = gen_file_gene_map(reader, headers_list, header_loc, csv_file)
print(csv_file + ' csv loaded')
else:
raise Exception(
ORGANISM + ' or ' + GENE_COLUMN + ' or ' + UPLOADER + ' parameter is missing from ' + csv_file)
return file_gene_map, file_meta_data, organism
def get_comments_and_headers(reader):
reading_comments = True
comments = []
comment = ""
while reading_comments:
comment = reader.next()
if comment[0].startswith("#"):
comments.append('\t'.join(comment))
else:
reading_comments = False
return comments, comment
def comments_to_properties(comments):
prop_dict = dict()
for comment in comments:
prop_def = comment[1:].strip()
if len(prop_def) == 0:
continue
if prop_def[0] in ('!', '#'):
continue
punctuation = [prop_def.find(c) for c in ':= '] + [len(prop_def)]
found = min([pos for pos in punctuation if pos != -1])
name = prop_def[:found].rstrip()
value = prop_def[found:].lstrip(":= ").rstrip()
prop_dict[name] = value
return prop_dict
def has_required_properties(properties):
return GENE_ID_TYPE in properties and ORGANISM in properties and \
GENE_COLUMN in properties and UPLOADER in properties
def get_header_list_and_loc(headers, properties):
headers_list = map(str.strip, headers)
header_loc = headers_list.index(properties[GENE_COLUMN])
del headers_list[header_loc]
return headers_list, header_loc
def gen_csv_metadata(csv_file, headers_list, properties):
csv_meta_data = {ORGANISM: properties[ORGANISM], GENE_ID_TYPE: properties[GENE_ID_TYPE], HEADERS: headers_list,
HEADERS_DESCRIPTIONS: {}, UPLOADER: properties[UPLOADER]}
if FILE_DESCRIPTION in properties:
csv_meta_data[FILE_DESCRIPTION] = properties[FILE_DESCRIPTION]
else:
csv_meta_data[FILE_DESCRIPTION] = csv_file
print_warning('parameter ' + FILE_DESCRIPTION + ' missing for file ' + csv_file)
if DEFAULT_GENES in properties:
csv_meta_data[DEFAULT_GENES] = properties[DEFAULT_GENES]
else:
csv_meta_data[DEFAULT_GENES] = ''
print_warning('parameter ' + DEFAULT_GENES + ' missing for file ' + csv_file)
if DEFAULT_TAG_HEADER in properties:
if properties[DEFAULT_TAG_HEADER] in headers_list:
csv_meta_data[DEFAULT_TAG_HEADER] = properties[DEFAULT_TAG_HEADER]
else:
csv_meta_data[DEFAULT_TAG_HEADER] = headers_list[0]
print_warning('bad parameter ' + DEFAULT_TAG_HEADER + ': ' + properties[DEFAULT_TAG_HEADER] +
' in file ' + csv_file)
else:
csv_meta_data[DEFAULT_TAG_HEADER] = headers_list[0]
print_warning('parameter ' + DEFAULT_TAG_HEADER + ' missing for file ' + csv_file)
for header in headers_list:
if header in properties:
csv_meta_data[HEADERS_DESCRIPTIONS][header] = properties[header]
else:
csv_meta_data[HEADERS_DESCRIPTIONS][header] = header
# print_warning('header "' + header + '" description missing for file ' + csv_file)
return csv_meta_data
def gen_file_gene_map(reader, headers_list, header_loc, file_name):
file_gene_map = {}
for row in reader:
row_list = map(str.strip, row)
if len(row_list) == (len(headers_list) + 1):
row_gene = row_list[header_loc]
del row_list[header_loc]
if row_gene not in file_gene_map:
gene = {}
for i in range(min(len(row_list), len(headers_list))):
gene[headers_list[i]] = row_list[i]
file_gene_map[row_gene] = gene
else:
gene = file_gene_map[row_gene]
for i in range(min(len(row_list), len(headers_list))):
gene[headers_list[i]] += '|' + row_list[i]
elif len(row_list) > 0:
print_warning('in file ' + file_name + ': headers len: ', len(headers_list), ' row len:', len(row_list) - 1,
' first columns:', row_list[0])
return file_gene_map
def get_text(self):
parameters = parse_qs(urlparse(self.path).query)
if 'file' not in 'header' not in parameters or 'genes' not in parameters:
error = 'request must have file, header and genes parameters'
print_error(error)
get_error(self, error)
return
header = parameters['header'][0]
genes = parameters['genes'][0]
file_name = parameters['file'][0]
gene_list = set(genes.split(' '))
text = ''
if file_name not in gene_map:
write_key_error_result(self, "File", file_name)
return
try:
for gene in gene_list:
if gene in gene_map[file_name]:
text += '|' + gene_map[file_name][gene][header]
write_html_headers(self)
self.wfile.write(text)
except KeyError as key_error:
write_key_error_result(self, "Header", key_error)
return
def get_stats_by_genes(self):
parameters = parse_qs(urlparse(self.path).query)
if 'file' not in parameters or 'header' not in parameters or 'genes' not in parameters or 'tag' not in parameters:
error = 'request must have file, header, genes and tag parameters'
print_error(error)
get_error(self, error)
return
header = parameters['header'][0]
genes = parameters['genes'][0]
tag = parameters['tag'][0]
file_name = parameters['file'][0]
gene_list = set(genes.split(' '))
stats = []
if file_name not in gene_map:
write_key_error_result(self, "File", file_name)
return
try:
for gene in gene_list:
if gene in gene_map[file_name]:
gene_text = gene_map[file_name][gene][header].split('|')
count = gene_text.count(tag)
if count > 0:
stats.append({'gene': gene, 'count': count})
stats = sorted(stats, key=lambda k: k['count'], reverse=True)
write_html_headers(self)
self.wfile.write('count of "' + tag + '" per gene')
for stat in stats:
self.wfile.write('\r\n' + stat['gene'] + ': ' + str(stat['count']))
except KeyError as key_error:
write_key_error_result(self, "Header", key_error)
return
def get_stats_by_all_genes(self, output='json'):
parameters = parse_qs(urlparse(self.path).query)
if 'file' not in parameters or 'header' not in parameters or 'genes' not in parameters:
error = 'request must have file, header, genes and tag parameters'
print_error(error)
get_error(self, error)
return
file_name = parameters['file'][0]
header = parameters['header'][0]
genes = parameters['genes'][0]
gene_list = set(genes.split(' '))
genes_stats = {}
separator = '\t'
if file_name not in gene_map:
write_key_error_result(self, "File", file_name)
return
try:
for gene in gene_list:
if gene in gene_map[file_name]:
gene_text = gene_map[file_name][gene][header].split('|')
counter = Counter(gene_text)
genes_stats[gene] = counter
except KeyError as key_error:
write_key_error_result(self, "Header", key_error)
return
if output == 'json':
write_json_headers(self)
self.wfile.write(json.dumps(genes_stats, sort_keys=True, indent=4))
elif output == 'csv':
write_csv_headers(self)
self.wfile.write('GENE' + separator + 'TAG' + separator + 'COUNT\r\n')
for gene in genes_stats:
for tag in genes_stats[gene]:
self.wfile.write(gene + separator + tag + separator + str(genes_stats[gene][tag]) + "\r\n")
else:
error = 'file extension must be \'json\' or \'csv\'.'
print_error(error)
get_error(self, error)
return
return
def get_error(self, error):
self.send_response(400)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(error)
return
def get_metadata(self):
write_json_headers(self)
self.wfile.write(json.dumps(dict(meta_data)))
return
def write_key_error_result(self, key_type, key_error):
self.send_response(400)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(key_type + " " + key_error + " not in the database anymore.")
def write_html_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def write_json_headers(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
def write_csv_headers(self):
self.send_response(200)
self.send_header('Content-type', 'application/csv')
self.end_headers()
class ServerHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
if self.path == '/metadata':
get_metadata(self)
elif self.path.startswith('/text'):
get_text(self)
elif self.path.startswith('/statsbygenes'):
get_stats_by_genes(self)
elif self.path.startswith('/statsbyallgenes.json'):
get_stats_by_all_genes(self, 'json')
elif self.path.startswith('/statsbyallgenes.csv'):
get_stats_by_all_genes(self, 'csv')
else:
logging.info('GET path: ' + self.path)
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
def get_interface_port_from_arguments():
if len(sys.argv) > 2:
port = int(sys.argv[2])
interface = sys.argv[1]
elif len(sys.argv) > 1:
port = int(sys.argv[1])
interface = ''
else:
port = 8000
interface = ''
return interface, port
def print_warning(*objs):
print("WARNING: ", *objs, file=sys.stderr)
def print_error(*objs):
print("ERROR: ", *objs, file=sys.stderr)
if __name__ == '__main__':
main()
``` |
{
"source": "joosephook/ma-gym",
"score": 3
} |
#### File: envs/traffic_junction/traffic_junction.py
```python
import copy
import logging
import random
import gym
import numpy as np
from gym import spaces
from gym.utils import seeding
from ..utils.action_space import MultiAgentActionSpace
from ..utils.draw import draw_grid, fill_cell, write_cell_text
from ..utils.observation_space import MultiAgentObservationSpace
logger = logging.getLogger(__name__)
class TrafficJunction(gym.Env):
"""
This consists of a 4-way junction on a 14 × 14 grid. At each time step, "new" cars enter the grid with
probability `p_arrive` from each of the four directions. However, the total number of cars at any given
time is limited to `Nmax`.
Each car occupies a single cell at any given time and is randomly assigned to one of three possible routes
(keeping to the right-hand side of the road). At every time step, a car has two possible actions: gas which advances
it by one cell on its route or brake to stay at its current location. A car will be removed once it reaches its
destination at the edge of the grid.
Two cars collide if their locations overlap. A collision incurs a reward `rcoll = −10`, but does not affect
the simulation in any other way. To discourage a traffic jam, each car gets reward of `τ * r_time = −0.01τ`
at every time step, where `τ` is the number time steps passed since the car arrived. Therefore, the total
reward at time t is
r(t) = C^t * r_coll + \sum_{i=1}_{N^t} {\tau_i * r_time}
where C^t is the number of collisions occurring at time t and N^t is number of cars present. The simulation is
terminated after 'max_steps(default:40)' steps and is classified as a failure if one or more collisions have
occurred.
Each car is represented by one-hot binary vector set {n, l, r}, that encodes its unique ID, current location
and assigned route number respectively. Each agent controlling a car can only observe other cars in its vision
range (a surrounding 3 × 3 neighborhood), though low level communication is allowed in "v1" version of the game.
The state vector s_j for each agent is thus a concatenation of all these vectors, having dimension
(3^2) × (|n| + |l| + |r|).
Reference : Learning Multi-agent Communication with Backpropagation
Url : https://papers.nips.cc/paper/6398-learning-multiagent-communication-with-backpropagation.pdf
For details on various versions, please refer to "wiki"
(https://github.com/koulanurag/ma-gym/wiki/Environments#TrafficJunction)
"""
metadata = {'render.modes': ['human', 'rgb_array']}
def __init__(self, grid_shape=(14, 14), step_cost=-0.01, n_max=4, collision_reward=-10, arrive_prob=0.5,
full_observable: bool = False, max_steps: int = 100):
assert 1 <= n_max <= 10, "n_max should be range in [1,10]"
assert 0 <= arrive_prob <= 1, "arrive probability should be in range [0,1]"
assert len(grid_shape) == 2, 'only 2-d grids are acceptable'
assert 1 <= max_steps, "max_steps should be more than 1"
self._grid_shape = grid_shape
self.n_agents = n_max
self._max_steps = max_steps
self._step_count = 0 # environment step counter
self._collision_reward = collision_reward
self._total_episode_reward = None
self._arrive_prob = arrive_prob
self._n_max = n_max
self._step_cost = step_cost
self.curr_cars_count = 0
self._n_routes = 3
self._agent_view_mask = (3, 3)
# entry gates where the cars spawn
# Note: [(7, 0), (13, 7), (6, 13), (0, 6)] for (14 x 14) grid
self._entry_gates = [(self._grid_shape[0] // 2, 0),
(self._grid_shape[0] - 1, self._grid_shape[1] // 2),
(self._grid_shape[0] // 2 - 1, self._grid_shape[1] - 1),
(0, self._grid_shape[1] // 2 - 1)]
# destination places for the cars to reach
# Note: [(7, 13), (0, 7), (6, 0), (13, 6)] for (14 x 14) grid
self._destination = [(self._grid_shape[0] // 2, self._grid_shape[1] - 1),
(0, self._grid_shape[1] // 2),
(self._grid_shape[0] // 2 - 1, 0),
(self._grid_shape[0] - 1, self._grid_shape[1] // 2 - 1)]
# dict{direction_vectors: (turn_right, turn_left)}
# Note: [((7, 6), (7,7))), ((7, 7),(6,7)), ((6,6),(7, 6)), ((6, 7),(6,6))] for (14 x14) grid
self._turning_places = {(0, 1): ((self._grid_shape[0] // 2, self._grid_shape[0] // 2 - 1),
(self._grid_shape[0] // 2, self._grid_shape[0] // 2)),
(-1, 0): ((self._grid_shape[0] // 2, self._grid_shape[0] // 2),
(self._grid_shape[0] // 2 - 1, self._grid_shape[0] // 2)),
(1, 0): ((self._grid_shape[0] // 2 - 1, self._grid_shape[0] // 2 - 1),
(self._grid_shape[0] // 2, self._grid_shape[0] // 2 - 1)),
(0, -1): ((self._grid_shape[0] // 2 - 1, self._grid_shape[0] // 2),
(self._grid_shape[0] // 2 - 1, self._grid_shape[0] // 2 - 1))}
# dict{starting_place: direction_vector}
self._route_vectors = {(self._grid_shape[0] // 2, 0): (0, 1),
(self._grid_shape[0] - 1, self._grid_shape[0] // 2): (-1, 0),
(0, self._grid_shape[0] // 2 - 1): (1, 0),
(self._grid_shape[0] // 2 - 1, self._grid_shape[0] - 1): (0, -1)}
self._agent_turned = [False for _ in range(self.n_agents)] # flag if car changed direction
self._agents_routes = [-1 for _ in range(self.n_agents)] # route each car is following atm
self._agents_direction = [(0, 0) for _ in range(self.n_agents)] # cars are not on the road initially
self._agent_step_count = [0 for _ in range(self.n_agents)] # holds a step counter for each car
self.action_space = MultiAgentActionSpace([spaces.Discrete(2) for _ in range(self.n_agents)])
self.agent_pos = {_: None for _ in range(self.n_agents)}
self._on_the_road = [False for _ in range(self.n_agents)] # flag if car is on the road
self._full_obs = self.__create_grid()
self._base_img = self.__draw_base_img()
self._agent_dones = [None for _ in range(self.n_agents)]
self.viewer = None
self.full_observable = full_observable
# agent id (n_agents, onehot), obs_mask (9), pos (2), route (3)
mask_size = np.prod(self._agent_view_mask)
self._obs_high = np.ones((mask_size * (self.n_agents + self._n_routes + 2))) # 2 is for location
self._obs_low = np.zeros((mask_size * (self.n_agents + self._n_routes + 2))) # 2 is for location
if self.full_observable:
self._obs_high = np.tile(self._obs_high, self.n_agents)
self._obs_low = np.tile(self._obs_low, self.n_agents)
self.observation_space = MultiAgentObservationSpace([spaces.Box(self._obs_low, self._obs_high)
for _ in range(self.n_agents)])
def action_space_sample(self):
return [agent_action_space.sample() for agent_action_space in self.action_space]
def __init_full_obs(self):
"""
Initiates environment: inserts up to |entry_gates| cars. once the entry gates are filled, the remaining agents
stay initialized outside the road waiting to enter
"""
self._full_obs = self.__create_grid()
shuffled_gates = list(self._route_vectors.keys())
random.shuffle(shuffled_gates)
for agent_i in range(self.n_agents):
if self.curr_cars_count >= len(self._entry_gates):
self.agent_pos[agent_i] = (0, 0) # not yet on the road
else:
pos = shuffled_gates[agent_i]
# gets direction vector for agent_i that spawned in position pos
self._agents_direction[agent_i] = self._route_vectors[pos]
self.agent_pos[agent_i] = pos
self.curr_cars_count += 1
self._on_the_road[agent_i] = True
self._agents_routes[agent_i] = random.randint(1, self._n_routes) # [1,3] (inclusive)
self.__update_agent_view(agent_i)
self.__draw_base_img()
def _is_cell_vacant(self, pos):
return self.is_valid(pos) and (self._full_obs[pos[0]][pos[1]] == PRE_IDS['empty'])
def is_valid(self, pos):
return (0 <= pos[0] < self._grid_shape[0]) and (0 <= pos[1] < self._grid_shape[1])
def __update_agent_view(self, agent_i):
self._full_obs[self.agent_pos[agent_i][0]][self.agent_pos[agent_i][1]] = PRE_IDS['agent'] + str(agent_i + 1)
def __check_collision(self, pos):
"""
Verifies if a transition to the position pos will result on a collision.
:param pos: position to verify if there is collision
:type pos: tuple
:return: boolean stating true or false
:rtype: bool
"""
return self.is_valid(pos) and (self._full_obs[pos[0]][pos[1]].find(PRE_IDS['agent']) > -1)
def __is_gate_free(self):
"""
Verifies if any spawning gate is free for a car to be placed
:return: list of currently free gates
:rtype: list
"""
free_gates = []
for pos in self._entry_gates:
if pos not in self.agent_pos.values():
free_gates.append(pos)
return free_gates
def __reached_dest(self, agent_i):
"""
Verifies if the agent_i reached a destination place.
:param agent_i: id of the agent
:type agent_i: int
:return: boolean stating true or false
:rtype: bool
"""
pos = self.agent_pos[agent_i]
if pos in self._destination:
self._full_obs[pos[0]][pos[1]] = PRE_IDS['empty']
return True
return False
def get_agent_obs(self):
"""
Computes the observations for the agents. Each agent receives information about cars in it's vision
range (a surrounding 3 × 3 neighborhood),where each car is represented by one-hot binary vector set {n, l, r},
that encodes its unique ID, current location and assigned route number respectively.
The state vector s_j for each agent is thus a concatenation of all these vectors, having dimension
(3^2) × (|n| + |l| + |r|).
:return: list with observations of all agents. the full list has shape (n_agents, (3^2) × (|n| + |l| + |r|))
:rtype: list
"""
agent_no_mask_obs = []
for agent_i in range(self.n_agents):
pos = self.agent_pos[agent_i]
# agent id
_agent_i_obs = [0 for _ in range(self.n_agents)]
_agent_i_obs[agent_i] = 1
# location
_agent_i_obs += [pos[0] / self._grid_shape[0], pos[1] / (self._grid_shape[1] - 1)] # coordinates
# route
route_agent_i = np.zeros(self._n_routes)
route_agent_i[self._agents_routes[agent_i] - 1] = 1
_agent_i_obs += route_agent_i.tolist()
agent_no_mask_obs.append(_agent_i_obs)
agent_obs = []
for agent_i in range(self.n_agents):
pos = self.agent_pos[agent_i]
mask_view = np.zeros((*self._agent_view_mask, len(agent_no_mask_obs[0])))
for row in range(max(0, pos[0] - 1), min(pos[0] + 1 + 1, self._grid_shape[0])):
for col in range(max(0, pos[1] - 1), min(pos[1] + 1 + 1, self._grid_shape[1])):
if PRE_IDS['agent'] in self._full_obs[row][col]:
_id = int(self._full_obs[row][col].split(PRE_IDS['agent'])[1]) - 1
mask_view[row - (pos[0] - 1), col - (pos[1] - 1), :] = agent_no_mask_obs[_id]
agent_obs.append(mask_view.flatten())
if self.full_observable:
_obs = np.array(agent_obs).flatten().tolist()
agent_obs = [_obs for _ in range(self.n_agents)]
return agent_obs
def __draw_base_img(self):
# create grid and make everything black
img = draw_grid(self._grid_shape[0], self._grid_shape[1], cell_size=CELL_SIZE, fill=WALL_COLOR)
# draw tracks
for i, row in enumerate(self._full_obs):
for j, col in enumerate(row):
if col == PRE_IDS['empty']:
fill_cell(img, (i, j), cell_size=CELL_SIZE, fill=(143, 141, 136), margin=0.05)
elif col == PRE_IDS['wall']:
fill_cell(img, (i, j), cell_size=CELL_SIZE, fill=(242, 227, 167), margin=0.02)
return img
def __create_grid(self):
# create a grid with every cell as wall
_grid = [[PRE_IDS['wall'] for _ in range(self._grid_shape[1])] for _ in range(self._grid_shape[0])]
# draw track by making cells empty :
# horizontal tracks
_grid[self._grid_shape[0] // 2 - 1] = [PRE_IDS['empty'] for _ in range(self._grid_shape[1])]
_grid[self._grid_shape[0] // 2] = [PRE_IDS['empty'] for _ in range(self._grid_shape[1])]
# vertical tracks
for row in range(self._grid_shape[0]):
_grid[row][self._grid_shape[1] // 2 - 1] = PRE_IDS['empty']
_grid[row][self._grid_shape[1] // 2] = PRE_IDS['empty']
return _grid
def step(self, agents_action):
"""
Performs an action in the environment and steps forward. At each step a new agent enters the road by
one of the 4 gates according to a probability "_arrive_prob". A "ncoll" reward is given to an agent if it
collides and all of them receive "-0.01*step_n" to avoid traffic jams.
:param agents_action: list of actions of all the agents to perform in the environment
:type agents_action: list
:return: agents observations, rewards, if agents are done and additional info
:rtype: tuple
"""
assert len(agents_action) == self.n_agents, \
"Invalid action! It was expected to be list of {}" \
" dimension but was found to be of {}".format(self.n_agents, len(agents_action))
assert all([action_i in ACTION_MEANING.keys() for action_i in agents_action]), \
"Invalid action found in the list of sampled actions {}" \
". Valid actions are {}".format(agents_action, ACTION_MEANING.keys())
self._step_count += 1 # global environment step
rewards = [0 for _ in range(self.n_agents)] # initialize rewards array
step_collisions = 0 # counts collisions in this step
# checks if there is a collision; this is done in the __update_agent_pos method
# we still need to check both agent_dones and on_the_road because an agent may not be done
# and have not entered the road yet
for agent_i, action in enumerate(agents_action):
if not self._agent_dones[agent_i] and self._on_the_road[agent_i]:
self._agent_step_count[agent_i] += 1 # agent step count
collision_flag = self.__update_agent_pos(agent_i, action)
if collision_flag:
rewards[agent_i] += self._collision_reward
step_collisions += 1
# gives additional step punishment to avoid jams
# at every time step, where `τ` is the number time steps passed since the car arrived.
# We need to keep track of step_count of each car and that has to be multiplied.
rewards[agent_i] += self._step_cost * self._agent_step_count[agent_i]
self._total_episode_reward[agent_i] += rewards[agent_i]
# checks if destination was reached
# once a car reaches it's destination , it will never enter again in any of the tracks
# Also, if all cars have reached their destination, then we terminate the episode.
if self.__reached_dest(agent_i):
self._agent_dones[agent_i] = True
self.curr_cars_count -= 1
# if max_steps was reached, terminate the episode
if self._step_count >= self._max_steps:
self._agent_dones[agent_i] = True
# adds new car according to the probability _arrive_prob
if random.uniform(0, 1) < self._arrive_prob:
free_gates = self.__is_gate_free()
# if there are agents outside the road and if any gate is free
if not all(self._on_the_road) and free_gates:
# then gets first agent on the list which is not on the road
agent_to_enter = self._on_the_road.index(False)
pos = random.choice(free_gates)
self._agents_direction[agent_to_enter] = self._route_vectors[pos]
self.agent_pos[agent_to_enter] = pos
self.curr_cars_count += 1
self._on_the_road[agent_to_enter] = True
self._agent_turned[agent_to_enter] = False
self._agents_routes[agent_to_enter] = random.randint(1, self._n_routes) # (1, 3)
self.__update_agent_view(agent_to_enter)
return self.get_agent_obs(), rewards, self._agent_dones, {'step_collisions': step_collisions}
def __get_next_direction(self, route, agent_i):
"""
Computes the new direction vector after the cars turn on the junction for route 2 (turn right) and 3 (turn left)
:param route: route that was assigned to the car (1 - fwd, 2 - turn right, 3 - turn left)
:type route: int
:param agent_i: id of the agent
:type agent_i: int
:return: new direction vector following the assigned route
:rtype: tuple
"""
# gets current direction vector
dir_vector = self._agents_direction[agent_i]
sig = (1 if dir_vector[1] != 0 else -1) if route == 2 else (-1 if dir_vector[1] != 0 else 1)
new_dir_vector = (dir_vector[1] * sig, 0) if dir_vector[0] == 0 else (0, dir_vector[0] * sig)
return new_dir_vector
def __update_agent_pos(self, agent_i, move):
"""
Updates the agent position in the environment. Moves can be 0 (GAS) or 1 (BRAKE). If the move is 1 does nothing,
car remains stopped. If the move is 0 then evaluate the route assigned. If the route is 1 (forward) then
maintain the same direction vector. Otherwise, compute new direction vector and apply the change of direction
when the junction turning place was reached. After the move is made, verifies if it resulted into a collision
and returns the reward collision if that happens. The position is only updated if no collision occurred.
:param agent_i: id of the agent
:type agent_i: int
:param move: move picked by the agent_i
:type move: int
:return: bool flag associated to the existence or absence of a collision
:rtype: bool
"""
curr_pos = copy.copy(self.agent_pos[agent_i])
next_pos = None
route = self._agents_routes[agent_i]
if move == 0: # GAS
if route == 1:
next_pos = tuple([curr_pos[i] + self._agents_direction[agent_i][i] for i in range(len(curr_pos))])
else:
turn_pos = self._turning_places[self._agents_direction[agent_i]]
# if the car reached the turning position in the junction for his route and starting gate
if curr_pos == turn_pos[route - 2] and not self._agent_turned[agent_i]:
new_dir_vector = self.__get_next_direction(route, agent_i)
self._agents_direction[agent_i] = new_dir_vector
self._agent_turned[agent_i] = True
next_pos = tuple([curr_pos[i] + new_dir_vector[i] for i in range(len(curr_pos))])
else:
next_pos = tuple([curr_pos[i] + self._agents_direction[agent_i][i] for i in range(len(curr_pos))])
elif move == 1: # BRAKE
pass
else:
raise Exception('Action Not found!')
# if there is a collision
if next_pos is not None and self.__check_collision(next_pos):
return True
# if there is no collision and the next position is free updates agent position
if next_pos is not None and self._is_cell_vacant(next_pos):
self.agent_pos[agent_i] = next_pos
self._full_obs[curr_pos[0]][curr_pos[1]] = PRE_IDS['empty']
self.__update_agent_view(agent_i)
return False
def reset(self):
"""
Resets the environment when a terminal state is reached.
:return: list with the observations of the agents
:rtype: list
"""
self._total_episode_reward = [0 for _ in range(self.n_agents)]
self._step_count = 0
self._agent_step_count = [0 for _ in range(self.n_agents)]
self._agent_dones = [False for _ in range(self.n_agents)]
self._on_the_road = [False for _ in range(self.n_agents)]
self._agent_turned = [False for _ in range(self.n_agents)]
self.curr_cars_count = 0
self.agent_pos = {}
self.__init_full_obs()
return self.get_agent_obs()
def render(self, mode: str = 'human'):
img = copy.copy(self._base_img)
for agent_i in range(self.n_agents):
if not self._agent_dones[agent_i] and self._on_the_road[agent_i]:
fill_cell(img, self.agent_pos[agent_i], cell_size=CELL_SIZE, fill=AGENTS_COLORS[agent_i])
write_cell_text(img, text=str(agent_i + 1), pos=self.agent_pos[agent_i], cell_size=CELL_SIZE,
fill='white', margin=0.3)
img = np.asarray(img)
if mode == 'rgb_array':
return img
elif mode == 'human':
from gym.envs.classic_control import rendering
if self.viewer is None:
self.viewer = rendering.SimpleImageViewer()
self.viewer.imshow(img)
return self.viewer.isopen
def seed(self, n: int):
self.np_random, seed1 = seeding.np_random(n)
seed2 = seeding.hash_seed(seed1 + 1) % 2 ** 31
return [seed1, seed2]
def close(self):
if self.viewer is not None:
self.viewer.close()
self.viewer = None
CELL_SIZE = 30
WALL_COLOR = 'black'
# fixed colors for #agents = n_max <= 10
AGENTS_COLORS = [
"red",
"blue",
"yellow",
"orange",
"black",
"green",
"purple",
"pink",
"brown",
"grey"
]
ACTION_MEANING = {
0: "GAS",
1: "BRAKE",
}
PRE_IDS = {
'wall': 'W',
'empty': '0',
'agent': 'A'
}
``` |
{
"source": "JooseRajamaeki/ICP",
"score": 2
} |
#### File: ICP/SmallTensorflowExample/distribution_lerner.py
```python
import tensorflow as tf
import numpy as np
from matplotlib import pyplot as plt
import time
from enum import Enum
import random
#This is a more didactic Python code to do the matching
from icp_matching import matching
#This is a C++ implementation of the matching procedure.
#The example file was built for 64-bit Windows Python version 3.6
#Use the Visual Studio project in folder "PythonModule" to build it yourself.
import _icp_matcher as icp_matcher
marker_size = 1
input_dimension = 6
output_dimension = 2
conditioning_dimension = 0
########### Making the neural network ###########
hidden_dimension = 100
learning_rate = 0.001
init_weight_scale = 0.1
max_gradient_norm = 0.1
every_other = []
for i in range(hidden_dimension):
if i % 2 == 0:
every_other.append(1.0)
else:
every_other.append(-1.0)
comb = tf.constant(every_other, tf.float32)
x = tf.placeholder(tf.float32, [None, input_dimension])
W0 = tf.Variable(tf.random_normal([input_dimension, hidden_dimension])*init_weight_scale)
b0 = tf.Variable(tf.random_normal([hidden_dimension])*init_weight_scale)
hidden_activation_0 = tf.matmul(x, W0) + b0
hidden_output_0 = tf.nn.elu(hidden_activation_0)
bipolar_hiden_output_0 = hidden_output_0*comb
W1 = tf.Variable(tf.random_normal([hidden_dimension, hidden_dimension])*init_weight_scale)
b1 = tf.Variable(tf.random_normal([hidden_dimension])*init_weight_scale)
hidden_activation_1 = tf.matmul(bipolar_hiden_output_0, W1) + b1
hidden_output_1 = tf.nn.elu(hidden_activation_1)
bipolar_hiden_output_1 = hidden_output_1*comb
W_output = tf.Variable(tf.random_normal([hidden_dimension, output_dimension])*init_weight_scale)
b_output = tf.Variable(tf.random_normal([output_dimension])*init_weight_scale)
y = tf.matmul(bipolar_hiden_output_1, W_output) + b_output
y_ = tf.placeholder(tf.float32, [None, output_dimension])
loss = tf.reduce_sum(tf.square(y-y_))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
gvs = optimizer.compute_gradients(loss)
capped_gvs = [(tf.clip_by_value(grad, -max_gradient_norm, max_gradient_norm), var) for grad, var in gvs]
train_op = optimizer.apply_gradients(capped_gvs)
########### Initializing the neural network ###########
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
########### Plotting sample input noise ###########
amount_data = 10000
x_data = np.random.randn(amount_data,input_dimension)
plt.scatter(x_data[:,0], x_data[:,1],s=marker_size)
plt.title('Input noise')
plt.ion()
plt.show()
time.sleep(2.0)
plt.close('all')
plt.ioff()
########### Creating training data ###########
y_data = np.random.rand(amount_data,output_dimension)
y_data[0:round(amount_data/2),0] = y_data[0:round(amount_data/2),0] + 2
y_data[0:round(amount_data/2),1] = y_data[0:round(amount_data/2),1] - 1
'''
y_data[:,0] = y_data[:,0]*7.0
corruption_noise = np.random.randn(amount_data,1) * 0.1
y_data[:,1] = np.sin(y_data[:,0])+corruption_noise[:,0]
'''
########### Defining the training step of the algorithm ###########
def distribution_training_step(true_data,input_noise):
shuffle_idx = np.arange(amount_data)
random.shuffle(shuffle_idx)
true_data = true_data[shuffle_idx,:]
if conditioning_dimension > 0:
input_noise[:,0:conditioning_dimension] = true_data[:,0:conditioning_dimension]
generated_data = sess.run(y, feed_dict={x: input_noise})
if conditioning_dimension > 0:
generated_data[:,0:conditioning_dimension] = true_data[:,0:conditioning_dimension]
##Matching with the didactic Python code
#matched_indexes = matching(true_data,generated_data)
##Matching with the module built from the C++ code
matched_indexes = icp_matcher.alternating_icp_matching(generated_data.tolist(),true_data.tolist())
input_noise = input_noise[matched_indexes,:]
if conditioning_dimension > 0:
input_noise[:,0:conditioning_dimension] = true_data[:,0:conditioning_dimension]
minibatch_size = round(amount_data / 10)
index = np.arange(amount_data)
np.random.shuffle(index)
assert(amount_data % minibatch_size == 0)
while len(index) > 0:
del_index = np.arange(minibatch_size)
train_index = index[del_index]
train_noise = input_noise[train_index,:]
train_data = true_data[train_index,:]
sess.run(train_op, feed_dict={x: train_noise, y_: train_data})
index = np.delete(index,del_index,axis=0)
########### Actual training ###########
for iteration in range(10000):
x_data = np.random.randn(amount_data,input_dimension)
distribution_training_step(y_data,x_data)
print(iteration)
if iteration % 100 == 0:
x_data = np.random.randn(amount_data,input_dimension)
if conditioning_dimension > 0:
x_data[:,0:conditioning_dimension] = y_data[:,0:conditioning_dimension]
predictions = sess.run(y, feed_dict={x: x_data})
if conditioning_dimension > 0:
predictions[:,0:conditioning_dimension] = y_data[:,0:conditioning_dimension]
true_data = plt.scatter(y_data[:,0], y_data[:,1],s=marker_size)
generated_data = plt.scatter(predictions[:,0], predictions[:,1],s=marker_size)
plt.legend([true_data, generated_data], ['True data', 'Generated data'])
plt.savefig('distributions.png', bbox_inches='tight', pad_inches=0)
plt.ion()
plt.show()
time.sleep(2.0)
plt.close('all')
plt.ioff()
``` |
{
"source": "JooshK/FCC-Projects",
"score": 3
} |
#### File: FCC-Projects/Tensorflow Projects/predict_health_costs_with_regression.py
```python
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
try:
# %tensorflow_version only exists in Colab.
# %tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow import feature_column
import tensorflow_docs as tfdocs
import tensorflow_docs.plots
import tensorflow_docs.modeling
# Import data
!wget https://cdn.freecodecamp.org/project-data/health-costs/insurance.csv
dataset = pd.read_csv('insurance.csv')
dataset.tail()
#Create test and train datasets
from sklearn.model_selection import train_test_split
train_dataset, test_dataset = train_test_split(dataset, test_size=0.2)
#Create an input pipeline, we have to wrap the dataframes with tf.data
def df_to_dataset(dataframe, shuffle=True, batch_size=32):
dataframe = dataframe.copy()
labels = dataframe.pop('expenses')
ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels))
if shuffle:
ds = ds.shuffle(buffer_size=len(dataframe))
ds = ds.batch(batch_size)
return ds
train_ds = df_to_dataset(train_dataset)
test_dataset = df_to_dataset(test_dataset, shuffle=False)
#Get the feature cols from the categorical and numeric cols
CATEGORICAL_COLUMNS = ['sex', 'smoker', 'region']
NUMERIC_COLUMNS = ['age', 'bmi', 'children']
feature_columns = []
#Numeric
for header in NUMERIC_COLUMNS:
feature_columns.append(feature_column.numeric_column(header))
#Categorical, one hot encoding
for header in CATEGORICAL_COLUMNS:
categorical_column = feature_column.categorical_column_with_vocabulary_list(
header, dataset[header].unique())
indicator_column = feature_column.indicator_column(categorical_column)
feature_columns.append(indicator_column)
print(feature_columns)
#Create a feature layer to input them into the Keras model
feature_layer = tf.keras.layers.DenseFeatures(feature_columns)
#Create, compile and train the model
model = tf.keras.Sequential([
feature_layer,
layers.Dense(128, activation='relu'),
layers.Dense(128, activation='relu'),
layers.Dropout(.1),
layers.Dense(1)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(train_ds,
epochs=10)
# Test model by checking how well the model generalizes using the test set.
loss, mae = model.evaluate(test_dataset)
print("Testing set Mean Abs Error: {:5.2f} expenses".format(mae))
if mae < 3500:
print("You passed the challenge. Great job!")
else:
print("The Mean Abs Error must be less than 3500. Keep trying.")
# Plot predictions.
test_predictions = model.predict(test_dataset).flatten()
a = plt.axes(aspect='equal')
plt.scatter(test_labels, test_predictions)
plt.xlabel('True values (expenses)')
plt.ylabel('Predictions (expenses)')
lims = [0, 50000]
plt.xlim(lims)
plt.ylim(lims)
_ = plt.plot(lims,lims)
``` |
{
"source": "jO-Osko/adventofcode2015",
"score": 4
} |
#### File: 2015/problems/day13.py
```python
DAY = 9
from collections import defaultdict as dd
from functools import lru_cache
def solve(data, wall=False):
graph = dd(dict)
for line in data:
fro, would, gain, change, *_ , to = line.split()
change = ([-1,1][gain=="gain"]) * int(change)
to = to.strip(".")
graph[fro][to] = graph[fro].setdefault(to,0) + change
graph[to][fro] = graph[to].setdefault(fro,0) + change
if wall:
for j in graph:
graph[j]["wall"] = 0
graph["wall"] = dd(int)
cities = list(graph.keys())
@lru_cache(maxsize=None)
def shortest_path(current, visited, to_go, first):
if to_go == 0:
return graph[current][first]
rtr = float("-inf")
for j in range(len(cities)):
if not visited[j] :
_visited = list(visited)
_visited[j] = 1
rtr = max(rtr, shortest_path(cities[j], tuple(_visited), to_go-1, first) + graph[current][cities[j]])
return rtr
return shortest_path(cities[0], tuple([1] + [0 for j in range(len(cities)-1)]), len(cities)-1, cities[0])
paths = {cities[j]: shortest_path(cities[j], tuple([0 if j != i else 1 for i in range(len(cities))]), to_go=len(cities)-1) for j in range(len(cities))}
if minimize:
return min(paths.values())
return max(paths.values())
def part1(data):
return solve(data.split("\n"))
def part2(data):
return solve(data.split("\n"), True)
TEST = """Alice would gain 54 happiness units by sitting next to Bob.
Alice would lose 79 happiness units by sitting next to Carol.
Alice would lose 2 happiness units by sitting next to David.
Bob would gain 83 happiness units by sitting next to Alice.
Bob would lose 7 happiness units by sitting next to Carol.
Bob would lose 63 happiness units by sitting next to David.
Carol would lose 62 happiness units by sitting next to Alice.
Carol would gain 60 happiness units by sitting next to Bob.
Carol would gain 55 happiness units by sitting next to David.
David would gain 46 happiness units by sitting next to Alice.
David would lose 7 happiness units by sitting next to Bob.
David would gain 41 happiness units by sitting next to Carol."""
DATA = """Alice would gain 2 happiness units by sitting next to Bob.
Alice would gain 26 happiness units by sitting next to Carol.
Alice would lose 82 happiness units by sitting next to David.
Alice would lose 75 happiness units by sitting next to Eric.
Alice would gain 42 happiness units by sitting next to Frank.
Alice would gain 38 happiness units by sitting next to George.
Alice would gain 39 happiness units by sitting next to Mallory.
Bob would gain 40 happiness units by sitting next to Alice.
Bob would lose 61 happiness units by sitting next to Carol.
Bob would lose 15 happiness units by sitting next to David.
Bob would gain 63 happiness units by sitting next to Eric.
Bob would gain 41 happiness units by sitting next to Frank.
Bob would gain 30 happiness units by sitting next to George.
Bob would gain 87 happiness units by sitting next to Mallory.
Carol would lose 35 happiness units by sitting next to Alice.
Carol would lose 99 happiness units by sitting next to Bob.
Carol would lose 51 happiness units by sitting next to David.
Carol would gain 95 happiness units by sitting next to Eric.
Carol would gain 90 happiness units by sitting next to Frank.
Carol would lose 16 happiness units by sitting next to George.
Carol would gain 94 happiness units by sitting next to Mallory.
David would gain 36 happiness units by sitting next to Alice.
David would lose 18 happiness units by sitting next to Bob.
David would lose 65 happiness units by sitting next to Carol.
David would lose 18 happiness units by sitting next to Eric.
David would lose 22 happiness units by sitting next to Frank.
David would gain 2 happiness units by sitting next to George.
David would gain 42 happiness units by sitting next to Mallory.
Eric would lose 65 happiness units by sitting next to Alice.
Eric would gain 24 happiness units by sitting next to Bob.
Eric would gain 100 happiness units by sitting next to Carol.
Eric would gain 51 happiness units by sitting next to David.
Eric would gain 21 happiness units by sitting next to Frank.
Eric would gain 55 happiness units by sitting next to George.
Eric would lose 44 happiness units by sitting next to Mallory.
Frank would lose 48 happiness units by sitting next to Alice.
Frank would gain 91 happiness units by sitting next to Bob.
Frank would gain 8 happiness units by sitting next to Carol.
Frank would lose 66 happiness units by sitting next to David.
Frank would gain 97 happiness units by sitting next to Eric.
Frank would lose 9 happiness units by sitting next to George.
Frank would lose 92 happiness units by sitting next to Mallory.
George would lose 44 happiness units by sitting next to Alice.
George would lose 25 happiness units by sitting next to Bob.
George would gain 17 happiness units by sitting next to Carol.
George would gain 92 happiness units by sitting next to David.
George would lose 92 happiness units by sitting next to Eric.
George would gain 18 happiness units by sitting next to Frank.
George would gain 97 happiness units by sitting next to Mallory.
Mallory would gain 92 happiness units by sitting next to Alice.
Mallory would lose 96 happiness units by sitting next to Bob.
Mallory would lose 51 happiness units by sitting next to Carol.
Mallory would lose 81 happiness units by sitting next to David.
Mallory would gain 31 happiness units by sitting next to Eric.
Mallory would lose 73 happiness units by sitting next to Frank.
Mallory would lose 89 happiness units by sitting next to George."""
print(part1(DATA))
print(part2(DATA))
```
#### File: 2015/problems/day24.py
```python
DAY = 24
from functools import lru_cache, reduce
from operator import mul
from itertools import combinations
def part1(data):
return solve([int(j) for j in data.split("\n")])
def solve(data, parts=3):
su = sum(data)
part = su//parts
comb = [j for j in range(len(data))]
mi = float("inf")
for j in range(1, len(data)):
for comb in combinations(data, j):
if sum(comb) == part:
mi = min(reduce(mul, comb), mi)
if mi < float("inf"):
return mi
def part2(data):
return solve([int(j) for j in data.split("\n")], 4)
DATA = """1
2
3
7
11
13
17
19
23
31
37
41
43
47
53
59
61
67
71
73
79
83
89
97
101
103
107
109
113"""
print(part1(DATA))
print(part2(DATA))
```
#### File: 2015/problems/day6.py
```python
DAY = 6
def part1(data):
table = [[0 for j in range(1000)] for i in range(1000)]
solve_part(table, data.split("\n"))
return sum(map(sum, table))
def part2(data):
table = [[0 for j in range(1000)] for i in range(1000)]
solve_part(table, data.split("\n"), smart=1)
return sum(map(sum, table))
def toogle(x1,x2,y1,y2,table, smart=0):
for x in range(x1,x2+1):
for y in range(y1,y2+1):
if smart:
table[x][y] += 2
else:
table[x][y] = not table[x][y]
def turn_off(x1,x2,y1,y2,table, smart=0):
for x in range(x1,x2+1):
for y in range(y1,y2+1):
if smart:
table[x][y] = max(table[x][y] - 1, 0)
else:
table[x][y] = 0
def turn_on(x1,x2,y1,y2,table, smart=0):
for x in range(x1,x2+1):
for y in range(y1,y2+1):
if smart:
table[x][y] += 1
else:
table[x][y] = 1
def solve_part(table, data, smart=0):
for part in data:
*typ, start, through, stop = part.split()
x1,y1 = map(int, start.split(","))
x2,y2 = map(int, stop.split(","))
if typ == ["toggle"]:
toogle(x1,x2,y1,y2, table, smart)
elif typ[-1] == "off":
turn_off(x1,x2,y1,y2, table, smart)
else:
turn_on(x1,x2,y1,y2, table, smart)
```
#### File: 2016/python/helper.py
```python
import os
__author__ = "<NAME>"
def get_file(day, mode="r"):
return open("../" + "input" + os.sep + str(day) + ".in", mode=mode)
``` |
{
"source": "jO-Osko/adventofcode",
"score": 3
} |
#### File: 2015/problems/day10.py
```python
DAY = 10
from itertools import groupby
def look_and_say(ma, num="3113322113"):
while ma:
yield num
num = "".join(str(len(list(listt))) + n for n, listt in groupby(num))
ma -= 1
def part1():
for j in look_and_say(41):
pass
return(len(j))
def part2():
for j in look_and_say(51):
pass
return(len(j))
```
#### File: 2016/python/day6.py
```python
__author__ = "<NAME>"
from hashlib import md5
from helper import get_file
DAY = 6
data = [line.strip() for line in get_file(DAY)]
def part1(data):
from collections import Counter
rtr = ""
for ch in zip(*data):
c = Counter(ch)
rtr += c.most_common(1)[0][0]
return rtr
def part2(data):
from collections import Counter
rtr = ""
for ch in zip(*data):
c = Counter(ch)
rtr += c.most_common(None)[-1][0]
return rtr
print(part1(data))
print(part2(data))
``` |
{
"source": "jO-Osko/FASTENER",
"score": 2
} |
#### File: jO-Osko/FASTENER/fastener.py
```python
import math
import os
import pickle
import time
from dataclasses import dataclass, field
from functools import wraps
import random_utils
from random_utils import shuffle
from typing import Dict, List, Callable, Any, Tuple, Optional, \
Counter as CounterType, Set
import numpy as np
from collections import Counter
from item import Item, Result, Population, flatten_population, FitnessFunction, \
Genes, EvalItem, RandomFlipMutationStrategy, RandomEveryoneWithEveryone, \
IntersectionMating, UnionMating, IntersectionMatingWithInformationGain, \
IntersectionMatingWithWeightedRandomInformationGain, UnevaluatedPopulation, \
MatingStrategy, MutationStrategy, MatingSelectionStrategy
Front = Dict[int, EvalItem]
@dataclass
class LogData:
generation: int
population: Population
front: Front
mated: UnevaluatedPopulation
mutated: UnevaluatedPopulation
cache_counter: CounterType[int]
cache_data: Dict[int, Result]
random_state: Any
timestamp: float = field(init=False)
def __post_init__(self):
self.timestamp = time.time()
@staticmethod
def discard_model(cache_data: Dict[int, Tuple[Result, Any]]) -> Dict[int, Result]:
return {num: item[0] for num, item in cache_data.items()}
def dump_log(self, config: "Config") -> None:
pickle.dump(
self,
open(os.path.join(
config.output_folder,
f"generation_{self.generation}.pickle"),
"wb")
)
@dataclass
class Config:
output_folder: str
random_seed: int
number_of_rounds: int = 1000 # Number of round of genetic algorithm
max_bucket_size: int = 3 # Max number of items in same size bucket
reset_to_pareto_rounds: Optional[int] = 5 # Reset population to Pareto front every n-rounds (False is never)
cache_fitness_function: bool = True
def __post_init__(self) -> None:
if not self.reset_to_pareto_rounds:
self.reset_to_pareto_rounds = self.number_of_rounds
self.output_folder = os.path.join("log", self.output_folder)
random_utils.seed(self.random_seed)
class EntropyOptimizer:
def __init__(self,
model: Any,
train_data: np.array,
train_target: np.array,
evaluator: Callable[
[Any, "Genes", Optional[List[int]]], "Result"],
number_of_genes: int,
mating_selection_strategy: MatingSelectionStrategy,
mutation_strategy: MutationStrategy,
reset_to_front_predicate: Optional[
Callable[[int, Population, Front], bool]] = None,
initial_population: Optional[UnevaluatedPopulation] = None,
initial_genes: Optional[List[List[int]]] = None,
config: Optional[Config] = None) -> None:
self._model = model
self.evaluator = evaluator
self.train_data = train_data
self.train_target = train_target
self.number_of_genes = number_of_genes
if config is None:
config = Config("data", 2020)
self.config = config
os.makedirs(self.config.output_folder, exist_ok=False)
self.mating_selection_strategy = mating_selection_strategy
self.mutation_strategy = mutation_strategy
self.initial_population = initial_population
self.initial_genes = initial_genes
if reset_to_front_predicate is None:
self.reset_to_front_predicate = EntropyOptimizer.default_reset_to_pareto
else:
self.reset_to_front_predicate = reset_to_front_predicate
self.cache_counter: CounterType[int] = Counter()
self.cache_data: Dict[int, Tuple[Result, Any]] = {}
self.population: Population = {}
self.pareto_front: Front = {}
self.fitness_function = self._fitness_function
def train_model(self, genes: "Genes") -> Any:
return self._model().fit(self.train_data[:, genes], self.train_target)
def _fitness_function(self, genes: "Genes") -> "Tuple[Result, Any]":
model = self.train_model(genes)
return self.evaluator(model, genes, None), model
def cached_fitness(self, genes: "Genes") -> "Tuple[Result, Any]":
#print("Trying:", np.where(genes))
number = Item.to_number(genes)
result = self.cache_data.get(number, None)
self.cache_counter[number] += 1
if result is None:
result = self._fitness_function(genes)
self.cache_data[number] = result
else:
#print("Hitted", self.cache_counter)
pass
return result
def purge_front_with_information_gain(self):
new_items = []
for num, item in self.pareto_front.items():
if num == 1: # Can't remove features
continue
new_item = self.purge_item_with_information_gain(item)
assert new_item.size == num - 1
new_items.append(new_item)
better = 0
new = 0
for item in new_items:
if item.size in self.pareto_front:
if item.result > self.pareto_front[item.size].result:
self.pareto_front[item.size] = item
better += 1
else:
self.pareto_front[item.size] = item
new += 1
# print("Purged:", "better:", better, "new:", new)
self.remove_pareto_non_optimal()
def purge_item_with_information_gain(self, item: "Item") -> "EvalItem":
base_result, model = self.fitness_function(item.genes)
on_genes = np.where(item.genes)[0]
changes = [(1.0, -1) for _ in on_genes]
for change_ind, gene_ind in enumerate(on_genes):
sh_result = self.evaluator(model, item.genes, [change_ind])
changes[change_ind] = (base_result.score - sh_result.score, gene_ind)
changes.sort() # Sort them so that first cause the smallest change
# print(base_result)
# print("CHANGES:", changes)
# And therefore seem a good fit for removal
# Maybe some random selection?
genes2 = item.genes.copy()
# Unset the gene with the smallest change
genes2[changes[0][1]] = False
rt_item = Item(genes2, item.generation + 1, None, None).\
evaluate(self.fitness_function)
return rt_item
@staticmethod
def default_reset_to_pareto(round_num, _population, _front):
return round_num % 3 == 0
def prepare_loop(self) -> None:
if self.config.cache_fitness_function:
self.fitness_function = self.cached_fitness
if self.initial_population:
self.population = self.evaluate_unevaluated(self.initial_population)
if self.initial_genes:
for in_gene in self.initial_genes:
zeros = np.zeros(self.number_of_genes, dtype=bool)
zeros[in_gene] = 1
itm = Item(list(zeros), 0, None, None).evaluate(self.fitness_function)
self.population.setdefault(itm.size, []).append(itm)
assert self.population, "Some sort of initial population must be provided"
self.mating_selection_strategy.use_data_information(self.train_data,
self.train_target)
pickle.dump(
self, open(
os.path.join(self.config.output_folder, "experiment.pickle"),
"wb")
)
def mainloop(self) -> None:
self.prepare_loop()
self.purge_oversize_buckets()
self.update_front_from_population()
for round_n in range(1, self.config.number_of_rounds + 1):
print(f"Round: {round_n}")
mated = self.mating_selection_strategy.process_population(
self.population, round_n
)
mutated = self.mutation_strategy.process_population(mated)
mutated = self.clear_duplicates_after_mating(mutated)
# print(mutated)
# print(len(mutated))
# Evaluate new population
# Do not evaluate "empty" genes
self.population = self.evaluate_unevaluated(mutated)
self.purge_oversize_buckets()
self.update_front_from_population()
bef = len(self.pareto_front)
self.remove_pareto_non_optimal()
aft = len(self.pareto_front)
# print(len(flatten_population(self.population)), bef, aft)
if self.config.reset_to_pareto_rounds and \
round_n % self.config.reset_to_pareto_rounds == 0:
self.purge_front_with_information_gain()
self.reset_population_to_front()
# print("RESETING POPULATION TO FRONT")
log_data = LogData(round_n, self.population, self.pareto_front, mated,
mutated, self.cache_counter,
LogData.discard_model(self.cache_data),
random_utils.get_state())
# Log during evaluation
log_data.dump_log(self.config)
def purge_oversize_buckets(self) -> None:
new_population: Population = {}
for num, items in self.population.items():
new_population[num] = sorted(items, reverse=True)[:self.config.max_bucket_size]
self.population = new_population
def update_front_from_population(self) -> None:
# Assume that population is sorted
for num, items in self.population.items():
if num in self.pareto_front:
if items:
self.pareto_front[num] = \
max(self.pareto_front[num], items[0])
else:
if items:
self.pareto_front[num] = items[0]
@classmethod
def clear_duplicates_after_mating(cls, un_pop: "UnevaluatedPopulation") \
-> "UnevaluatedPopulation":
new_un_pop: UnevaluatedPopulation = {}
for num, items in un_pop.items():
new: List[Item] = []
taken: Set[int] = set()
for item in items:
if item.number not in taken:
taken.add(item.number)
new.append(item)
new_un_pop[num] = new
return new_un_pop
def remove_pareto_non_optimal(self) -> None:
# Make a copy as to not change dict during looping,
# sort them by the number of genes
sorted_items = list(sorted(
self.pareto_front.items(), key=lambda item: item[0]
))
current_peak = sorted_items[0][1]
for j in range(1, len(sorted_items)):
n, item = sorted_items[j]
if current_peak.pareto_better(item):
del self.pareto_front[n]
else:
current_peak = item
def reset_population_to_front(self) -> None:
new_population: Population = {}
# Keep Pareto front in itemized order
for num, item in sorted(self.pareto_front.items(),
key=lambda itm: itm[0]):
new_population[num] = [item]
self.population = new_population
def evaluate_unevaluated(self, un_pop: "UnevaluatedPopulation") \
-> Population:
return {
num: [item.evaluate(self.fitness_function) for item in items
if item.size]
for num, items in un_pop.items()
}
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import f1_score
general_model = DecisionTreeClassifier
from dummy_data import XX_train, XX_test, labels_train, labels_test
def eval_fun(model: Any, genes: "Genes",
shuffle_indices: Optional[List[int]] = None) -> "Result":
test_data = XX_test[:, genes]
if shuffle_indices:
test_data = test_data.copy()
for j in shuffle_indices:
shuffle(test_data[:, j])
pred = model.predict(test_data)
res = Result(f1_score(labels_test, pred, average='weighted'))
return res
def main() -> None:
number_of_genes = XX_train.shape[1]
initial_genes = [
[0]
]
# Select mating strategies
mating = RandomEveryoneWithEveryone(
pool_size=3,
mating_strategy=IntersectionMatingWithWeightedRandomInformationGain())
# Random mutation
mutation = RandomFlipMutationStrategy(1 / number_of_genes)
entropy_optimizer = EntropyOptimizer(
general_model, XX_train, labels_train, eval_fun,
number_of_genes, mating, mutation, initial_genes=initial_genes,
config=Config(output_folder="dummy_data_output", random_seed=2020,
reset_to_pareto_rounds=5)
)
entropy_optimizer.mainloop()
if __name__ == '__main__':
main()
``` |
{
"source": "joosm/visual_kinematics",
"score": 3
} |
#### File: visual_kinematics/examples/hydraulic_robot_trajectory.py
```python
from visual_kinematics.RobotSerial import *
from visual_kinematics.RobotTrajectory import *
import numpy as np
from math import pi
import csv
import robot_kinematics_tools as RKT
from ctypes import cdll, c_int, POINTER, c_double
def get_data_from_csv_20layers(path_to_csv_file):
f = open(path_to_csv_file, 'r', newline = '') #, encoding='utf-8')
csv_reader = csv.reader(f)
temp_data = []
line_no = 0
for line in csv_reader:
if line_no == 0:
line_no += 1
else:
temp_data.append([float(f) for f in line[1:23]])
line_no += 1
print(path_to_csv_file,' ',len(temp_data),' ',line_no-1)
temp_data_array = np.asarray(temp_data, dtype=np.float32)
X = temp_data_array[:,6:12] #x,y,z,rx,ry,rz
Y = temp_data_array[:,0:6] #theta1, ... , theta6
return X, Y
def get_data_from_csv(path_to_csv_file):
f = open(path_to_csv_file, 'r', newline = '') #, encoding='utf-8')
csv_reader = csv.reader(f)
temp_data = []
line_no = 0
for line in csv_reader:
if line_no == 0:
line_no += 1
else:
temp_data.append([float(f) for f in line[1:13]])
line_no += 1
print(path_to_csv_file,' ',len(temp_data),' ',line_no-1)
temp_data_array = np.asarray(temp_data,dtype=np.float32)
X = temp_data_array[:,6:12] #x,y,z,rx,ry,rz
Y = temp_data_array[:,0:6] #theta1, ... , theta6
return X, Y
#data_file = "/home/joosm/Workplace/Hydraulic_robot/hybrid_calibration/training_data_joo/simulated_training_data_theta_quaternion_test_20210930_20layers.csv"
#X,Y = get_data_from_csv_20layers(data_file)
data_file = "/home/joosm/Workplace/Hydraulic_robot/hybrid_calibration/training_data_hyun_simulation/HydRobotLearningPt_csv.csv"
#data_file = "/home/joosm/Workplace/Hydraulic_robot/hybrid_calibration/training_data_joo/simulated_training_data_test.csv"
X,Y = get_data_from_csv(data_file)
#X: x,y,z,rx,ry,rz
#Y: theta1, ... , theta6 -- in degree
X_data_temp = X #[]
T_Base_0 = [[ -0.04626181, -0.99892327, -0.00348464, 1346.71285/1000],
[ 0.99892232, -0.04627428, 0.00358895, -1561.24376/1000],
[ -0.00374633, -0.00331485, 0.99998749, 830.66027/1000],
[ 0. , 0. , 0. , 1. ]]
trGripper = RKT.transform_from_translation_only([0.0, 160.0/1000, 340.0/1000])
T_6_TCP = trGripper
#a,alpha,beta,d,theta
HR_MDH_parameter_nominal = [[ 0, 0, 0, 0, 0], #T_0_1
[245.5, -np.pi/2.0, 0, 0, 0], #T_1_2
[1300, 0, 0, 0, 0], #T_2_3
[-300, np.pi/2.0, 0, 800.5, 0], #T_3_4
[ 0, -np. pi/2.0, 0, 0, 0], #T_4_5
[ 0, np.pi/2.0, 0, 457.3, 0]] #T_5_6
d = np.asarray(HR_MDH_parameter_nominal)[:,3]/1000
a = np.asarray(HR_MDH_parameter_nominal)[:,0]/1000
alpha = np.asarray(HR_MDH_parameter_nominal)[:,1]
theta = np.asarray(HR_MDH_parameter_nominal)[:,4]
#a,alpha,beta,d,theta
'''
HR_DH_parameter_nominal = [[245.5, -np.pi/2.0, 0, 0, 0], #T_1_2
[1300, 0, 0, 0, 0], #T_2_3
[-300, np.pi/2.0, 0, 0, 0], #T_3_4
[ 0, -np.pi/2.0, 0, 800.5, 0], #T_4_5
[ 0, np.pi/2.0, 0, 0, 0],
[ 0, 0 , 0, 457.3, 0]] #T_5_6
d = np.asarray(HR_DH_parameter_nominal)[:,3]/1000
a = np.asarray(HR_DH_parameter_nominal)[:,0]/1000
alpha = np.asarray(HR_DH_parameter_nominal)[:,1]
theta = np.asarray(HR_DH_parameter_nominal)[:,4]
'''
print("d ",d)
print("a ",a)
print("alpha: ",alpha)
print("theta: ",theta)
mdh_params = np.array([[d[0],a[0],alpha[0],theta[0]],
[d[1],a[1],alpha[1],theta[1]],
[d[2],a[2],alpha[2],theta[2]],
[d[3],a[3],alpha[3],theta[3]],
[d[4],a[4],alpha[4],theta[4]],
[d[5],a[5],alpha[5],theta[5]]])
'''
dh_params = np.array([[d[0],a[0],alpha[0],theta[0]],
[d[1],a[1],alpha[1],theta[1]],
[d[2],a[2],alpha[2],theta[2]],
[d[3],a[3],alpha[3],theta[3]],
[d[4],a[4],alpha[4],theta[4]],
[d[5],a[5],alpha[5],theta[5]]])
'''
#IK = cdll.LoadLibrary("/home/joosm/Workplace/cpp_project/hydraulic_robot_kinematics/IK_test_visual_kinematics.so")
IK = cdll.LoadLibrary("/home/joosm/Workplace/cpp_project/hydraulic_robot_kinematics/IK_test.so")
#IK = cdll.LoadLibrary("/home/joosm/Workplace/cpp_project/hydraulic_robot_kinematics/IK_test_hyun.so")
IK.InverseKinematics.restype = POINTER(c_double)
IK.InverseKinematics.argtypes = [POINTER(c_double),POINTER(c_double),POINTER(c_double),POINTER(c_double),POINTER(c_double),POINTER(c_double)]
A = a #0.001*HR_MDH_parameter_nominal[:,0]
#print(A)
#alpha = HR_MDH_parameter_nominal[:,1]
beta = np.asarray(HR_MDH_parameter_nominal)[:,2]
D = d# 0.001*HR_MDH_parameter_nominal[:,3]
#print(D)
#theta = HR_MDH_parameter_nominal[:,4]
#print(len(A))
A = (c_double * len(A))(*A)
alpha = (c_double * len(alpha))(*alpha)
beta = (c_double * len(beta))(*beta)
D = (c_double * len(D))(*D)
theta = (c_double * len(theta))(*theta)
'''
def HR_analytical_IK(dh_params,TCP_target):
#TCP_target_position = T_0_6[0:3,3].flatten() # unit: mm
#TCP_target_orientation = np.rad2deg(RKT.RotationMatrixToRPYeulerAngles(T_0_6[0:3,0:3]))
#print(TCP_target_position, " ", np.rad2deg(TCP_target_orientation))
#TCP_target = np.array([TCP_target_position[0],TCP_target_position[1],TCP_target_position[2],TCP_target_orientation[0],TCP_target_orientation[1],TCP_target_orientation[2] ])
#TCP_target = np.hstack((TCP_target_position,TCP_target_orientation))
#print(TCP_target)
#print(TCP_target.shape)
#TCP_target = (c_double * len(TCP_target))(*TCP_target)
q = IK.InverseKinematics(A,alpha,beta,D,theta,(c_double * len(TCP_target))(*TCP_target))[0:6]
return True, q
'''
def main():
np.set_printoptions(precision=3, suppress=True)
#| d | a | alpha | theta |
#dh_params = np.array([[0.163, 0., 0.5 * pi, 0.],
# [0., 0.632, pi, 0.5 * pi],
# [0., 0.6005, pi, 0.],
# [0.2013, 0., -0.5 * pi, -0.5 * pi],
# [0.1025, 0., 0.5 * pi, 0.],
# [0.094, 0., 0., 0.]])
def HR_analytical_IK(mdh_params,f):
#TCP_target_position = T_0_6[0:3,3].flatten() # unit: mm
#TCP_target_orientation = np.rad2deg(RKT.RotationMatrixToRPYeulerAngles(T_0_6[0:3,0:3]))
#print(TCP_target_position, " ", np.rad2deg(TCP_target_orientation))
#TCP_target = np.array([TCP_target_position[0],TCP_target_position[1],TCP_target_position[2],TCP_target_orientation[0],TCP_target_orientation[1],TCP_target_orientation[2] ])
#TCP_target = np.hstack((TCP_target_position,TCP_target_orientation))
#print(TCP_target)
#print(TCP_target.shape)
#TCP_target = (c_double * 6)(*TCP_target)
#q = IK.InverseKinematics(A,alpha,beta,D,theta,TCP_target)[0:6]
#print(f)
TCP_target_position = f[0:3,3].flatten() # unit: mm
TCP_target_orientation = np.rad2deg(RKT.RotationMatrixToRPYeulerAngles(f[0:3,0:3]))
TCP_target = np.asarray([TCP_target_position[0]*1000, TCP_target_position[1]*1000, TCP_target_position[2]*1000, TCP_target_orientation[0], TCP_target_orientation[1], TCP_target_orientation[2]])
q = IK.InverseKinematics(A,alpha,beta,D,theta,(c_double * 6)(*TCP_target))[0:6]
#print(q)
return True, q
robot = RobotSerial(mdh_params,dh_type="modified",analytical_inv = HR_analytical_IK, plot_xlim=[-1.5, 1.5], plot_ylim=[-1.5, 1.5], plot_zlim=[-0.5, 1.0] )
#robot = RobotSerial(mdh_params,dh_type="modified",plot_xlim=[-1.5, 1.5], plot_ylim=[-1.5, 1.5], plot_zlim=[-0.5, 1.0] )
#robot = RobotSerial(dh_params,dh_type="normal" )
# =====================================
# trajectory
# =====================================
# construct a frame using ZYX euler angle and translation vector
#@staticmethod
#def from_euler_3(euler_3, t_3_1):
# r_3_3 = Rotation.from_euler("ZYX", euler_3, degrees=False).as_matrix()
# return Frame.from_r_3_3(r_3_3, t_3_1)
frames = []
#for i in range(int(len(X)/2)):
for i in range(100,len(X)):
#for i in range(10000):
T_Base_TCP_given = RKT.transform_from_pose(X_data_temp[i]) #T_0_TCP
#print(T_Base_TCP_given)
T_Base_TCP_given[0,3] = T_Base_TCP_given[0,3]/1000
T_Base_TCP_given[1,3] = T_Base_TCP_given[1,3]/1000
T_Base_TCP_given[2,3] = T_Base_TCP_given[2,3]/1000
#print(T_Base_TCP_given)
#print(T_Base_TCP_given)
T_0_TCP = np.dot(np.linalg.inv(T_Base_0),T_Base_TCP_given)
#print(T_temp)
T_0_6 = np.dot(T_0_TCP,np.linalg.inv(T_6_TCP))
TCP_target_position = T_0_6[0:3,3].flatten() # unit: mm
TCP_target_orientation = RKT.RotationMatrixToRPYeulerAngles(T_0_6[0:3,0:3])
#print("TCP_target_position: ",TCP_target_position, TCP_target_position[0]," ", TCP_target_position[1]," ", TCP_target_position[2])
#print("TCP_target_orientation: ",TCP_target_orientation)
#frames.append(Frame.from_euler_3(np.array([TCP_target_orientation[0],TCP_target_orientation[1],TCP_target_orientation[2]]), np.array([[TCP_target_position[0]], [TCP_target_position[1]], [TCP_target_position[2]]])))
#print("TCP x,y,z: ", TCP_target_position[0], " ", TCP_target_position[1]," ",TCP_target_position[2])
#if TCP_target_position[0]>=0:
frames.append(Frame.from_euler_3(np.array([TCP_target_orientation[2],TCP_target_orientation[1],TCP_target_orientation[0]]), np.array([[TCP_target_position[0]], [TCP_target_position[1]], [TCP_target_position[2]]])))
#HR_analytical_IK(mdh_params,Frame.from_euler_3(np.array([TCP_target_orientation[2],TCP_target_orientation[1],TCP_target_orientation[0]]), np.array([[TCP_target_position[0]], [TCP_target_position[1]], [TCP_target_position[2]]])))
#frames = [Frame.from_euler_3(np.array([0.5 * pi, 0., pi]), np.array([[0.28127], [0.], [1.13182]])),
# Frame.from_euler_3(np.array([0.25 * pi, 0., 0.75 * pi]), np.array([[0.48127], [0.], [1.13182]])),
# Frame.from_euler_3(np.array([0.5 * pi, 0., pi]), np.array([[0.48127], [0.], [0.63182]]))]
print("frame set built")
trajectory = RobotTrajectory(robot, frames)
print("trajectory built")
trajectory.show(num_segs=100, motion="lin", method="linear") #motion="p2p" or "lin", method="linear")
print("show end")
if __name__ == "__main__":
main()
```
#### File: visual_kinematics/visual_kinematics/utility.py
```python
import numpy as np
from math import pi
# ================== constrain angle between -pi and pi
def simplify_angle(angle):
while angle > pi:
angle -= 2 * pi
while angle < -pi:
angle += 2 * pi
return angle
# ================== constrain angles[n, ] between -pi and pi
def simplify_angles(angles):
for i in range(angles.shape[0]):
angles[i] = simplify_angle(angles[i])
return angles
``` |
{
"source": "joostaafjes/CarND-Behavioral-Cloning-P3",
"score": 3
} |
#### File: joostaafjes/CarND-Behavioral-Cloning-P3/train.py
```python
import csv
import cv2
import numpy as np
import sys
from keras.models import Sequential
from keras.layers import Flatten, Dense, Cropping2D
from keras.layers.core import Lambda
from keras.layers.convolutional import Convolution2D, MaxPooling2D
import tensorflow as tf
import matplotlib.pyplot as plt
flags = tf.app.flags
FLAGS = flags.FLAGS
root = ''
# command line flags
flags.DEFINE_integer('epochs', 1, "# of epochs")
class Logger(object):
def __init__(self):
self.terminal = sys.stdout
self.log = open("training_results.log", "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
self.flush()
def flush(self):
pass
sys.stdout = Logger()
def get_data_for_ride(ride, track='track1', dir='forw'):
base_path = root + './data/' + track + '/' + dir + '/' + ride + '/'
print('Start reading file for ride ' + ride + '...')
lines = []
with open(base_path + 'driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
images = []
measurements = []
steering_correction = 0.2
for line in lines:
image_center = cv2.imread(base_path + './IMG/' + line[0].split('/')[-1])
image_left = cv2.imread(base_path + './IMG/' + line[1].split('/')[-1])
image_right = cv2.imread(base_path + './IMG/' + line[2].split('/')[-1])
images.extend([image_center, image_left, image_right])
steering_angle = float(line[3])
measurements.extend([steering_angle, steering_angle + steering_correction, steering_angle - steering_correction])
# augment with flipped image
augmented_images = []
augmented_measurements = []
for image, measurement in zip(images, measurements):
augmented_images.append(image)
augmented_measurements.append(measurement)
augmented_images.append(np.fliplr(image))
augmented_measurements.append(measurement * -1.0)
return augmented_images, augmented_measurements
def train(ride, images, measurements):
X_train = np.array(images)
y_train = np.array(measurements)
print('------------------------')
print('ride:', ride)
print('------------------------')
print('min:', np.min(y_train))
print('max:', np.max(y_train))
print('mean:', np.mean(y_train))
print('median:', np.median(y_train))
model = Sequential()
model.add(Cropping2D(cropping=((60, 25), (0, 0))))
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160, 320, 3)))
# model.add(Flatten())
# model.add(Dense(1))
model.add(Convolution2D(6, 5, 5, activation='relu'))
model.add(MaxPooling2D())
model.add(Convolution2D(6, 5, 5, activation='relu'))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(120))
model.add(Dense(84))
model.add(Dense(1))
#model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
# train model
history_object = model.fit(X_train, y_train, validation_split=0.2, shuffle=True, nb_epoch=FLAGS.epochs, verbose=2)
model.save(root + 'model' + ride + '.h5')
### print the keys contained in the history object
print(history_object.history.keys())
### plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show()
print('using gpu?')
#tf.test.gpu_device_name()
print('No of epochs:' + str(FLAGS.epochs))
images01, measurements01 = get_data_for_ride('01')
images02, measurements02 = get_data_for_ride('02')
images04, measurements04 = get_data_for_ride('04', dir='back')
train('01', images01, measurements01)
train('02', images02, measurements02)
train('04', images04, measurements04)
train('0102', images01 + images02, measurements01 + measurements02)
train('010204', images01 + images02 + images04, measurements01 + measurements02 + measurements04)
# history
# 1. alleen Flatten en Dense: wielen op neer gaan
# 2. Normalisation: nog steeds
# 3. LeNet alleen: more stable but 'afwijking' naar links
# 4. LeNet and augmenting mirror:
``` |
{
"source": "joostaafjes/openpilot",
"score": 2
} |
#### File: lib/tests/test_latcontrol.py
```python
import unittest
from parameterized import parameterized
from cereal import car, log
from selfdrive.car.car_helpers import interfaces
from selfdrive.car.honda.values import CAR as HONDA
from selfdrive.car.toyota.values import CAR as TOYOTA
from selfdrive.car.nissan.values import CAR as NISSAN
from selfdrive.controls.lib.latcontrol_pid import LatControlPID
from selfdrive.controls.lib.latcontrol_lqr import LatControlLQR
from selfdrive.controls.lib.latcontrol_indi import LatControlINDI
from selfdrive.controls.lib.latcontrol_angle import LatControlAngle
from selfdrive.controls.lib.vehicle_model import VehicleModel
class TestLatControl(unittest.TestCase):
@parameterized.expand([(HONDA.CIVIC, LatControlPID), (TOYOTA.RAV4, LatControlLQR), (TOYOTA.PRIUS, LatControlINDI), (NISSAN.LEAF, LatControlAngle)])
def test_saturation(self, car_name, controller):
CarInterface, CarController, CarState = interfaces[car_name]
CP = CarInterface.get_params(car_name)
CI = CarInterface(CP, CarController, CarState)
VM = VehicleModel(CP)
controller = controller(CP, CI)
CS = car.CarState.new_message()
CS.vEgo = 30
last_actuators = car.CarControl.Actuators.new_message()
params = log.LiveParametersData.new_message()
for _ in range(1000):
_, _, lac_log = controller.update(True, CS, CP, VM, params, last_actuators, 1, 0)
self.assertTrue(lac_log.saturated)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "joostdevries/werkzeug",
"score": 2
} |
#### File: examples/cupoftee/pages.py
```python
import operator
from werkzeug.utils import redirect
from werkzeug.exceptions import NotFound
from cupoftee.application import Page
from cupoftee.utils import unicodecmp
class ServerList(Page):
url_rule = '/'
def order_link(self, name, title):
cls = ''
link = '?order_by=' + name
desc = False
if name == self.order_by:
desc = not self.order_desc
cls = ' class="%s"' % (desc and 'down' or 'up')
if desc:
link += '&dir=desc'
return '<a href="%s"%s>%s</a>' % (link, cls, title)
def process(self):
self.order_by = self.request.args.get('order_by') or 'name'
sort_func = {
'name': lambda x: x,
'map': lambda x: x.map,
'gametype': lambda x: x.gametype,
'players': lambda x: x.player_count,
'progression': lambda x: x.progression,
}.get(self.order_by)
if sort_func is None:
return redirect(self.url_for('serverlist'))
self.servers = self.cup.master.servers.values()
self.servers.sort(key=sort_func)
if self.request.args.get('dir') == 'desc':
self.servers.reverse()
self.order_desc = True
else:
self.order_desc = False
self.players = reduce(lambda a, b: a + b.players, self.servers, [])
self.players.sort(lambda a, b: unicodecmp(a.name, b.name))
class Server(Page):
url_rule = '/server/<id>'
def process(self, id):
try:
self.server = self.cup.master.servers[id]
except KeyError:
raise NotFound()
class Search(Page):
url_rule = '/search'
def process(self):
self.user = self.request.args.get('user')
if self.user:
self.results = []
for server in self.cup.master.servers.itervalues():
for player in server.players:
if player.name == self.user:
self.results.append(server)
class MissingPage(Page):
def get_response(self):
response = super(MissingPage, self).get_response()
response.status_code = 404
return response
```
#### File: examples/plnt/views.py
```python
from datetime import datetime, date
from plnt.database import Blog, Entry
from plnt.utils import Pagination, expose, render_template
#: number of items per page
PER_PAGE = 30
@expose('/', defaults={'page': 1})
@expose('/page/<int:page>')
def index(request, page):
"""Show the index page or any an offset of it."""
days = []
days_found = set()
query = Entry.query.order_by(Entry.pub_date.desc())
pagination = Pagination(query, PER_PAGE, page, 'index')
for entry in pagination.entries:
day = date(*entry.pub_date.timetuple()[:3])
if day not in days_found:
days_found.add(day)
days.append({'date': day, 'entries': []})
days[-1]['entries'].append(entry)
return render_template('index.html', days=days, pagination=pagination)
@expose('/about')
def about(request):
"""Show the about page, so that we have another view func ;-)"""
return render_template('about.html')
```
#### File: werkzeug/scripts/make-release.py
```python
import sys
import os
import re
from datetime import datetime, date
from subprocess import Popen, PIPE
_date_clean_re = re.compile(r'(\d+)(st|nd|rd|th)')
def parse_changelog():
with open('CHANGES') as f:
lineiter = iter(f)
for line in lineiter:
match = re.search('^Version\s+(.*)', line.strip())
if match is None:
continue
length = len(match.group(1))
version = match.group(1).strip()
if lineiter.next().count('-') != len(match.group(0)):
continue
while 1:
change_info = lineiter.next().strip()
if change_info:
break
match = re.search(r'released on (\w+\s+\d+\w+\s+\d+)'
r'(?:, codename (.*))?(?i)', change_info)
if match is None:
continue
datestr, codename = match.groups()
return version, parse_date(datestr), codename
def bump_version(version):
try:
parts = map(int, version.split('.'))
except ValueError:
fail('Current version is not numeric')
parts[-1] += 1
return '.'.join(map(str, parts))
def parse_date(string):
string = _date_clean_re.sub(r'\1', string)
return datetime.strptime(string, '%B %d %Y')
def set_filename_version(filename, version_number, pattern):
changed = []
def inject_version(match):
before, old, after = match.groups()
changed.append(True)
return before + version_number + after
with open(filename) as f:
contents = re.sub(r"^(\s*%s\s*=\s*')(.+?)(')(?sm)" % pattern,
inject_version, f.read())
if not changed:
fail('Could not find %s in %s', pattern, filename)
with open(filename, 'w') as f:
f.write(contents)
def set_init_version(version):
info('Setting __init__.py version to %s', version)
set_filename_version('werkzeug/__init__.py', version, '__version__')
def set_setup_version(version):
info('Setting setup.py version to %s', version)
set_filename_version('setup.py', version, 'version')
def build_and_upload():
Popen([sys.executable, 'setup.py', 'release', 'sdist', 'upload']).wait()
def fail(message, *args):
print >> sys.stderr, 'Error:', message % args
sys.exit(1)
def info(message, *args):
print >> sys.stderr, message % args
def get_git_tags():
return set(Popen(['git', 'tag'], stdout=PIPE).communicate()[0].splitlines())
def git_is_clean():
return Popen(['git', 'diff', '--quiet']).wait() == 0
def make_git_commit(message, *args):
message = message % args
Popen(['git', 'commit', '-am', message]).wait()
def make_git_tag(tag):
info('Tagging "%s"', tag)
Popen(['git', 'tag', tag]).wait()
def main():
os.chdir(os.path.join(os.path.dirname(__file__), '..'))
rv = parse_changelog()
if rv is None:
fail('Could not parse changelog')
version, release_date, codename = rv
dev_version = bump_version(version) + '-dev'
info('Releasing %s (codename %s, release date %s)',
version, codename, release_date.strftime('%d/%m/%Y'))
tags = get_git_tags()
if version in tags:
fail('Version "%s" is already tagged', version)
if release_date.date() != date.today():
fail('Release date is not today (%s != %s)',
release_date.date(), date.today())
if not git_is_clean():
fail('You have uncommitted changes in git')
set_init_version(version)
set_setup_version(version)
make_git_commit('Bump version number to %s', version)
make_git_tag(version)
build_and_upload()
set_init_version(dev_version)
set_setup_version(dev_version)
if __name__ == '__main__':
main()
``` |
{
"source": "Joost-dm/jooster",
"score": 2
} |
#### File: jooster/authorization/models.py
```python
from io import BytesIO
from PIL import Image
from django.contrib.auth.models import AbstractUser
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.core.mail import send_mail
from django.db import models
from resizeimage import resizeimage
from rest_framework.authtoken.models import Token
from main.settings import USER_SETTINGS
from django.db.models import ObjectDoesNotExist
def generate_avatar_path(obj, filename):
""" Generates an unique path to user's avatar dir according to user's id. """
return 'images/avatars/' + str(obj.id) + '/' + filename
class CustomUser(AbstractUser):
"""Extends base django user model"""
displayed = models.CharField(
max_length=40,
unique=True,
verbose_name='отоброжаемое имя'
)
avatar = models.ImageField(
default='images/avatars/default_avatar.png',
upload_to=generate_avatar_path,
blank=True,
null=True,
verbose_name='аватар',
)
foreign_avatar_url = models.URLField(
null=True,
blank=True,
verbose_name='аватар из стороних источников'
)
def save(self, *args, **kwargs):
""" User's profile update handler. """
try:
self.avatar_update_handler()
except ObjectDoesNotExist:
send_mail('Новый пользователь!', 'Зарегистрирован новый пользователь: ' + self.displayed,
'<EMAIL>', ['<EMAIL>'], fail_silently=True)
super(CustomUser, self).save(*args, **kwargs)
def avatar_update_handler(self):
""" Downloaded avatar image update handler. """
user = CustomUser.objects.get(id=self.id)
if user.avatar != self.avatar:
self.get_avatar_ext()
self.generate_avatar_name()
self.resize_avatar()
# self.delete_current_avatar()
def get_avatar_ext(self):
""" Parses an avatar image extension. """
try:
user_avatar_ext = self.avatar.name.split('.')[-1]
if user_avatar_ext.upper() == 'JPG':
user_avatar_ext = 'jpeg'
self.user_avatar_ext = user_avatar_ext
except AttributeError:
self.avatar = 'images/avatars/default_avatar.png'
raise ObjectDoesNotExist
def resize_avatar(self):
""" Compresses user's avatar image. New sizes declared at project settings. """
user_avatar = Image.open(self.avatar)
avatar_settings = USER_SETTINGS['USER_AVATAR_SETTINGS']
new_user_avatar = resizeimage.resize_cover(
user_avatar,
[avatar_settings['COMPRESSED_WIDTH'], avatar_settings['COMPRESSED_HEIGHT']]
)
new_user_avatar_io = BytesIO()
new_user_avatar.save(new_user_avatar_io, format=self.user_avatar_ext)
self.avatar = InMemoryUploadedFile(new_user_avatar_io, None, self.avatar.name, 'image/' + self.user_avatar_ext,
new_user_avatar_io.tell(), None)
# For using with local storage
"""
def delete_current_avatar(self):
try:
user = CustomUser.objects.get(id=self.id)
except IntegrityError:
raise ValidationError('Некорректный пользователь.')
storage, path = user.avatar.storage, user.avatar.path
if self.avatar.name in path:
storage.delete(path)"""
def generate_avatar_name(self):
""" Generates an user's avatar image name according project settings."""
avatar_settings = USER_SETTINGS['USER_AVATAR_SETTINGS']
self.avatar.name = avatar_settings['AVATAR_IMAGE_NAME'] + '.' + self.user_avatar_ext
AVATAR_FIELD = 'avatar'
REQUIRED_FIELDS = ['email', 'avatar', 'displayed', 'foreign_avatar_url']
```
#### File: jooster/forum/models.py
```python
from django.db import models
from django.db.models import Model
from django.contrib.auth.models import AbstractUser
from authorization.models import CustomUser
from rest_framework.authtoken.models import Token
class Forum(Model):
""" Forum model. """
title = models.CharField(max_length=20, verbose_name='название', unique=True)
description = models.TextField(max_length=10000, verbose_name='описание', blank=True)
pub_date = models.DateTimeField(auto_now_add=True, verbose_name='дата создания')
author = models.ForeignKey(CustomUser, verbose_name='Автор', on_delete=models.CASCADE)
is_private = models.BooleanField(default=False, verbose_name='Приватный')
members = models.ManyToManyField(
CustomUser,
through='ForumMembership',
through_fields=('forum', 'user'),
related_name='ForumMembership'
)
def __str__(self):
return self.title
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
if self.is_private:
ForumMembership.objects.create(forum=self, user=self.author)
class Meta:
verbose_name = 'форум'
verbose_name_plural = 'форумы'
class ForumMembership(Model):
""" Model of membership in private forums. """
forum = models.ForeignKey(Forum, verbose_name='форум', on_delete=models.CASCADE)
user = models.ForeignKey(CustomUser, verbose_name='пользователь', on_delete=models.CASCADE)
class Meta:
unique_together = ['forum', 'user']
verbose_name = 'участники форума'
verbose_name_plural = 'участники форумов'
class Branch(Model):
""" Branch model. """
title = models.CharField(max_length=20, verbose_name='заголовок')
pub_date = models.DateTimeField(auto_now_add=True, verbose_name='дата создания')
author = models.ForeignKey(CustomUser, verbose_name='Автор', on_delete=models.CASCADE)
is_private = models.BooleanField(default=False, verbose_name='Приватный')
parent_forum = models.ForeignKey('Forum', default='1', related_name='children', on_delete=models.CASCADE,
verbose_name='родительский форум')
members = models.ManyToManyField(
CustomUser,
through='BranchMembership',
through_fields=('branch', 'user'),
related_name='BranchMembership'
)
def __str__(self):
return self.title
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
if self.is_private:
BranchMembership.objects.create(branch=self, user=self.author)
class Meta:
verbose_name = 'ветка'
verbose_name_plural = 'ветки'
class BranchMembership(Model):
""" Model of membership in private branches. """
branch = models.ForeignKey(Branch, verbose_name='ветка', on_delete=models.CASCADE)
user = models.ForeignKey(CustomUser, verbose_name='пользователь', on_delete=models.CASCADE)
class Meta:
unique_together = ['branch', 'user']
verbose_name = 'участники ветки'
verbose_name_plural = 'участники веток'
class Thread(Model):
""" Thread model. """
text = models.TextField(max_length=10000, verbose_name='текст')
pub_date = models.DateTimeField(auto_now_add=True, verbose_name='дата публикации')
author = models.ForeignKey(CustomUser, verbose_name='Автор', on_delete=models.CASCADE)
parent_forum = models.ForeignKey('Forum', default='1', related_name='children_threads', on_delete=models.CASCADE,
verbose_name='родительский форум')
parent_branch = models.ForeignKey('Branch', default='1', related_name='children', on_delete=models.CASCADE,
verbose_name='родительская ветка')
likes = models.ManyToManyField(
CustomUser,
through='ThreadLike',
through_fields=('thread', 'user'),
related_name='threadLike'
)
viewers = models.ManyToManyField(
CustomUser,
through='ThreadViewer',
through_fields=('thread', 'user'),
related_name='threadViewer'
)
def __str__(self):
if (len(self.text) >= 30):
return self.text[:30] + '...'
else:
return self.text
class Meta:
verbose_name = 'тема'
verbose_name_plural = 'темы'
class ThreadViewer(Model):
""" Model, created for collecting and counting user's views of the thread. """
thread = models.ForeignKey(Thread, verbose_name='Тема', on_delete=models.CASCADE)
user = models.ForeignKey(CustomUser, verbose_name='Пользователь', on_delete=models.CASCADE)
counter = models.SmallIntegerField(default=0, verbose_name='Счетчик просмотров')
class Meta:
unique_together = ['thread', 'user']
verbose_name = 'просмотр'
verbose_name_plural = 'просмотры'
class ThreadLike(Model):
""" Model for collecting users opinions of the thread. """
thread = models.ForeignKey(Thread, verbose_name='Тема', default='1', on_delete=models.CASCADE)
user = models.ForeignKey(CustomUser, verbose_name='Пользователь', default='1', on_delete=models.CASCADE)
like = models.BooleanField(default=True, verbose_name='Нравится')
class Meta:
unique_together = ['thread', 'user']
verbose_name = 'мнение'
verbose_name_plural = 'мнения'
class Post(Model):
""" Post model. """
text = models.CharField(max_length=10000, verbose_name='текст')
pub_date = models.DateTimeField(auto_now_add=True, verbose_name='дата публикации')
author = models.ForeignKey(CustomUser, verbose_name='Автор', on_delete=models.CASCADE)
parent_thread = models.ForeignKey('Thread', default='1', related_name='children', on_delete=models.CASCADE,
verbose_name='родительский элемент')
parent_forum = models.ForeignKey('Forum', default='1', related_name='children_posts', on_delete=models.CASCADE,
verbose_name='родительский форум')
parent_branch = models.ForeignKey('Branch', default='1', related_name='children_posts', on_delete=models.CASCADE,
verbose_name='родительская ветка')
likes = models.ManyToManyField(
CustomUser,
through='PostLike',
through_fields=('post', 'user'),
related_name='postLike'
)
viewers = models.ManyToManyField(
CustomUser,
through='PostViewer',
through_fields=('post', 'user'),
related_name='viewers'
)
def __str__(self):
if (len(self.text) >= 30):
return self.text[:30] + '...'
else:
return self.text
class Meta:
verbose_name = 'пост'
verbose_name_plural = 'посты'
class PostViewer(Model):
""" Model, created for collecting and counting user's views of the post. """
post = models.ForeignKey(Post, verbose_name='Пост', on_delete=models.CASCADE)
user = models.ForeignKey(CustomUser, verbose_name='Пользователь', on_delete=models.CASCADE)
counter = models.SmallIntegerField(default=0, verbose_name='Счетчик просмотров')
class Meta:
unique_together = ['post', 'user']
verbose_name = 'просмотр'
verbose_name_plural = 'просмотры'
class PostLike(Model):
""" Model for collecting users opinions of the post. """
post = models.ForeignKey(Post, verbose_name='Пост', on_delete=models.CASCADE)
user = models.ForeignKey(CustomUser, verbose_name='Пользователь', on_delete=models.CASCADE)
like = models.BooleanField(default=True, verbose_name='Нравится')
class Meta:
unique_together = ['post', 'user']
verbose_name = 'лайк'
verbose_name_plural = 'лайки'
```
#### File: jooster/forum/serializers.py
```python
from django.core.exceptions import ObjectDoesNotExist
from django.db import IntegrityError
from rest_framework import serializers
from forum.models import Forum, Branch, Thread, Post, PostLike, ThreadLike, ForumMembership, \
BranchMembership, PostViewer, ThreadViewer
from authorization.models import CustomUser
from authorization.serializers import UserDetailSerializer
class ForumMembershipSerializer(serializers.Serializer):
""" Forum membership serializer. """
user = serializers.PrimaryKeyRelatedField(queryset=CustomUser.objects.all())
forum = serializers.PrimaryKeyRelatedField(queryset=Forum.objects.all())
def create(self, validated_data):
user = validated_data['user']
forum = validated_data['forum']
membership = ForumMembership.objects.create(user=user, forum=forum)
return membership
class BranchMembershipSerializer(serializers.Serializer):
""" Branch membership serializer. """
user = serializers.PrimaryKeyRelatedField(queryset=CustomUser.objects.all())
branch = serializers.PrimaryKeyRelatedField(queryset=Branch.objects.all())
def create(self, validated_data):
user = validated_data['user']
branch = validated_data['branch']
membership = BranchMembership.objects.create(user=user, branch=branch)
return membership
class PostLikeSerializer(serializers.Serializer):
""" User's opinion about post serializer. """
user = serializers.HiddenField(default=serializers.CurrentUserDefault())
post = serializers.PrimaryKeyRelatedField(queryset=Post.objects.all())
like = serializers.BooleanField()
def create(self, validated_data):
user = validated_data['user']
post = validated_data['post']
like = validated_data['like']
carma = PostLike.objects.create(user=user, post=post, like=like)
return carma
class ThreadLikeSerializer(serializers.Serializer):
""" User's opinion about thread serializer. """
user = serializers.HiddenField(default=serializers.CurrentUserDefault())
thread = serializers.PrimaryKeyRelatedField(queryset=Thread.objects.all())
like = serializers.BooleanField()
def create(self, validated_data):
user = validated_data['user']
thread = validated_data['thread']
like = validated_data['like']
carma = ThreadLike.objects.create(user=user, thread=thread, like=like)
return carma
class ForumCreateSerializer(serializers.ModelSerializer):
""" Forum creation serializer. """
author = serializers.HiddenField(default=serializers.CurrentUserDefault())
class Meta:
model = Forum
fields = '__all__'
class BranchCreateSerializer(serializers.ModelSerializer):
""" Branch creation serializer. """
author = serializers.HiddenField(default=serializers.CurrentUserDefault())
class Meta:
model = Branch
fields = '__all__'
class ThreadCreateSerializer(serializers.ModelSerializer):
""" Thread creation serializer. """
author = serializers.HiddenField(default=serializers.CurrentUserDefault())
class Meta:
model = Thread
fields = '__all__'
class PostCreateSerializer(serializers.ModelSerializer):
""" Post creation serializer. """
author = serializers.HiddenField(default=serializers.CurrentUserDefault())
class Meta:
model = Post
fields = '__all__'
class PostDetailSerializer(serializers.ModelSerializer):
""" Post details serializer. """
author = UserDetailSerializer(CustomUser)
carma = serializers.SerializerMethodField('total_carma')
users_liked_list = serializers.SerializerMethodField('users_liked')
users_disliked_list = serializers.SerializerMethodField('users_disliked')
def users_liked(self, post):
""" Returns users list with positive opinion of the current post."""
users = CustomUser.objects.filter(postlike__post=post, postlike__like=True)
id_list = []
for user in users:
id_list.append(user.id)
return id_list
def users_disliked(self, post):
""" Returns users list with negative opinion of the current post."""
users = CustomUser.objects.filter(postlike__post=post, postlike__like=False)
id_list = []
for user in users:
id_list.append(user.id)
return id_list
def total_carma(self, post):
""" Returns value of current carma of the post. """
likes = PostLike.objects.filter(post=post, like=True)
dislikes = PostLike.objects.filter(post=post, like=False)
return likes.count() - dislikes.count()
class Meta:
model = Post
fields = ['id', 'author', 'carma', 'users_liked_list', 'users_disliked_list',
'text', 'pub_date', 'parent_forum', 'parent_branch', 'parent_thread', 'viewers']
class ThreadDetailSerializer(serializers.ModelSerializer):
""" Thread details serializer. """
author = UserDetailSerializer(CustomUser)
children_count = serializers.SerializerMethodField('count_children')
unread_count = serializers.SerializerMethodField('check_unread')
carma = serializers.SerializerMethodField('total_carma')
users_liked_list = serializers.SerializerMethodField('users_liked')
users_disliked_list = serializers.SerializerMethodField('users_disliked')
parent_branch_title = serializers.SerializerMethodField('get_parent_branch_title')
def get_parent_branch_title(self, thread):
""" Returns the name of the parent branch of this thread. (It's used for creating back-link
(to the parent branch) from the secondary(thread) window at the interface."""
return Branch.objects.get(id=thread.parent_branch.id).title
def users_liked(self, thread):
""" Returns users list with positive opinion of the current thread."""
users = CustomUser.objects.filter(threadlike__thread=thread, threadlike__like=True)
id_list = []
for user in users:
id_list.append(user.id)
return id_list
def users_disliked(self, thread):
""" Returns users list with negative opinion of the current post."""
users = CustomUser.objects.filter(threadlike__thread=thread, threadlike__like=False)
id_list = []
for user in users:
id_list.append(user.id)
return id_list
def total_carma(self, thread):
""" Returns value of current carma of the thread. """
likes = ThreadLike.objects.filter(thread=thread, like=True)
dislikes = ThreadLike.objects.filter(thread=thread, like=False)
return likes.count() - dislikes.count()
def count_children(self, thread):
""" Returns the count of thread's children. """
return thread.children.count()
def check_unread(self, thread):
""" Returns count of the unread posts in the thread. """
posts = thread.children.all()
user = self.context['request'].user
unread_counter = 0
for post in posts:
try:
PostViewer.objects.get(user=user, post=post)
except ObjectDoesNotExist:
unread_counter += 1
return unread_counter
class Meta:
model = Thread
fields = ['id', 'author', 'children_count', 'unread_count', 'carma', 'users_liked_list', 'users_disliked_list',
'text', 'pub_date', 'parent_forum', 'parent_branch', 'viewers', 'parent_branch_title']
class BranchDetailSerializer(serializers.ModelSerializer):
""" Branch detail serializer. """
author = UserDetailSerializer(CustomUser)
children_count = serializers.SerializerMethodField('count_children')
unread_count = serializers.SerializerMethodField('check_unread')
def count_children(self, branch):
""" Returns the count of branch's children. """
return branch.children.count()
def check_unread(self, branch):
""" Returns the count of unread threads in the branch. """
threads = branch.children.all()
user = self.context['request'].user
unread_counter = 0
for thread in threads:
try:
ThreadViewer.objects.get(user=user, thread=thread)
except ObjectDoesNotExist:
unread_counter += 1
return unread_counter
class Meta:
model = Branch
fields = '__all__'
class ForumDetailSerializer(serializers.ModelSerializer):
""" Forum detail serializer. """
author = UserDetailSerializer(CustomUser)
children_count = serializers.SerializerMethodField('count_children')
def count_children(self, forum):
""" Returns the count of the forum's children. """
return forum.children.count()
class Meta:
model = Forum
fields = '__all__'
``` |
{
"source": "JoostGevaert/gimli",
"score": 2
} |
#### File: pygimli/testing/test_PhysicsManagers.py
```python
import sys
import unittest
import numpy as np
import pygimli as pg
from pygimli.physics import VESManager, ERTManager
from pygimli.physics.em import VMDTimeDomainModelling
class TestManagers(unittest.TestCase):
def test_ERT(self, showProgress=False):
dat = pg.getExampleFile('ert/gallery.dat', load=True, verbose=True)
mesh = pg.meshtools.createParaMesh(dat.sensors(), quality=33.4,
paraDX=0.3, paraMaxCellSize=0.5, paraDepth=8)
#with SR
ert = ERTManager(sr=True, useBert=True, verbose=False, debug=False)
mod = ert.invert(dat, mesh=mesh, maxIter=20, lam=10)
np.testing.assert_approx_equal(ert.inv.chi2(), 1.003, significant=3)
#without SR
ert = ERTManager(sr=False, useBert=True, verbose=False, debug=False)
mod = ert.invert(dat, mesh=mesh, maxIter=20, lam=10)
# np.testing.assert_approx_equal(ert.inv.chi2(), 0.9833, significant=3)
def test_TT(self, showProgress=False):
pass
def test_VMD(self, showProgress=False):
t = np.logspace(-5.5, -2.2, 20)
verbose = False
fop = VMDTimeDomainModelling(times=t, txArea=10000.0, rxArea=10000.0,
verbose=verbose)
# [thick[3], res[4]] nLay=4
vmdMgr = pg.frameworks.MethodManager1d(fop)
synthModel = np.array([25., 5., 100., 150., 1., 10., 4.])
ra = vmdMgr.simulate(synthModel)
err = abs(np.log(t)/2) * 0.01
ra *= 1. + pg.math.randn(len(ra)) * err
model = vmdMgr.invert(ra, err, nLayers=4, layerLimits=[2, 500],
maxIter=50,
showProgress=showProgress, verbose=verbose)
np.testing.assert_array_less(vmdMgr.fw.chi2(), 1.5)
if showProgress:
axs = vmdMgr.showResult()
fop.drawModel(ax=axs[0], model=synthModel, label='Synth')
def test_VES(self, showProgress=False):
"""
"""
thicks = [2., 10.]
res = [100., 5., 30]
phi = [0., 20., 0.]
# model fails
# thicks = [2., 6., 10.]
# res = [100., 500., 20., 800.]
# phi = [0., 20., 50., 0]
synthModel = pg.cat(thicks, res)
ab2 = np.logspace(np.log10(1.5), np.log10(100.), 25)
mgr = VESManager(verbose=False, debug=False)
if showProgress:
mgr.verbose = True
fig, axs = pg.plt.subplots(2, 4, figsize=(12,7))
mgr.inv.axs = [axs[0][0], axs[1][0]]
### Test -- basic
ra, err = mgr.simulate(synthModel, ab2=ab2, mn2=1.0, noiseLevel=0.01)
mgr.exportData('synth.ves', ra, err)
mgr.invert(ra, err, nLayers=4, lam=100, layerLimits=False,
showProgress=showProgress)
mgr.fop.drawModel(ax=axs[0][0], model=synthModel, label='Synth')
np.testing.assert_array_less(mgr.fw.chi2(), 1.0)
### Test -- reinit with new parameter count
mgr.inv.axs = [axs[0][1], axs[1][1]]
mgr.invert(ra, err, nLayers=5, layerLimits=False,
showProgress=showProgress)
mgr.fop.drawModel(ax=axs[0][1], model=synthModel, label='Synth')
# axs[0][1].legend()
np.testing.assert_array_less(mgr.inv.inv.chi2(), 1)
### Test -- reinit with new data basis
ab2_2 = np.logspace(np.log10(1.5), np.log10(50.), 10)
ra, err = mgr.simulate(synthModel, ab2=ab2_2, mn2=1.0, noiseLevel=0.01)
mgr.inv.axs = [axs[0][2], axs[1][2]]
mgr.invert(ra, err, nLayers=4, ab2=ab2_2, mn2=1.0, layerLimits=False,
showProgress=showProgress)
mgr.fop.drawModel(ax=axs[0][2], model=synthModel, label='Synth')
# axs[0][2].legend()
np.testing.assert_array_less(mgr.inv.inv.chi2(), 1)
### Test -- reinit with complex resistivies
mgr.complex = True
synthModel = pg.cat(synthModel, phi)
ra, err = mgr.simulate(synthModel, ab2=ab2, mn2=1.0, noiseLevel=0.01)
mgr.inv.axs = [axs[0][3], axs[1][3]]
mgr.invert(ra, err, layerLimits=False,
showProgress=showProgress, maxIter=50)
np.testing.assert_array_less(mgr.inv.inv.chi2(), 2)
if __name__ == '__main__':
if len(sys.argv) > 1:
test = TestManagers()
if sys.argv[1].lower() == 'ves':
test.test_VES(showProgress=True)
elif sys.argv[1].lower() == 'vmd':
test.test_VMD(showProgress=True)
elif sys.argv[1].lower() == 'ert':
test.test_ERT(showProgress=True)
pg.info("test done")
pg.wait()
else:
unittest.main()
``` |
{
"source": "JoostGevaert/hello_python",
"score": 4
} |
#### File: hellopython/1_hello/args_kwargs.py
```python
import os
import sys
import io
from pprint import pprint as pp
### *args & **kwargs tutorial | Real Python ###
# This function can only add 2 values
def my_sum(a, b):
return a + b
# This function can sum over many POSITIONAL ARGUMENTS
def my_sum(*args):
result = 0
# Iterating over the Python args TUPLE
for x in args:
result += x
return result
print(my_sum(1, 2, 3))
# This function concatenates a string
def concatenate(**kwargs):
result = ""
# Iterating over the Python kwargs DICTIONARY
for arg in kwargs.values():
result += arg
return result
print(concatenate(a="Real", b="Python", c="Is", d="Great", e="!"))
# Combined funciton arguments NEED to come in this order:
# 1. Standard arguments
# 2. *args arguments
# 3. **kwargs arguments
# Unpacking Operators * and **
# * and ** are operators that unpack the values from iterable objects.
# * can be used on any iterable
# ** can only be used on dictionaries
my_list = [1, 2, 3, 4, 5, 6, 7, 8, 9]
first, *middle, last = my_list
print(my_list, *my_list, sep=' | ')
print(first, middle, last, sep='\n')
# Merging lists and dictionaries with * and **
first_list = [1, 2, 3]
second_list = [4, 5, 6]
merged_list = [*first_list, *second_list]
nested_list = [first_list, second_list]
# Dicts
first_dict = {"A": 1, "B": 2}
second_dict = {"C": 3, "D": 4}
merged_dict = {**first_dict, **second_dict}
print(merged_dict)
# WHAT IS RETURNED BY THE ** OPERATOR???
# ** operator is ONLY usable on dictionaries. It returns keys and their
# corresponding values. As such, this output can only be put into a dict().
# String to list of 1-letter strings
a = [*"RealPython"]
print(a)
```
#### File: hellopython/func_programming/d_reduce.py
```python
from typing import Tuple, NamedTuple, Dict, List
from functools import reduce
from collections import defaultdict
import c_map
### 4: reduce function ###
# reduce(function: lambda inputs: expression, squence: iterable, initial: value) -> value:
def scntst_tot_age(scientists: Tuple[NamedTuple]) -> int:
names_and_ages = c_map.scntst_map(scientists)
# With the reduce function
total_age = reduce(
lambda acc, val: acc + val.age, # inputs (accumulator & age value): expression
names_and_ages, # Iterable
0) # Initial value for accumulator
# The more pythonic way
total_age = sum(x.age for x in names_and_ages)
return total_age
# For more complicated applications the reduce() function IS handy
def reducer(acc, val):
acc[val.field].append(val.first_name + " " + val.last_name)
return acc
def scntsts_by_field(scientists: Tuple[NamedTuple]) -> Dict[str, List]:
scientists_by_field = dict(reduce(
reducer,
scientists,
defaultdict(list)
))
return scientists_by_field
if __name__ == '__main__':
import a_immutable_data as imm
from pprint import pprint
total_age = scntst_tot_age(imm.scntsts_manual())
print(total_age)
scientists_by_field = scntsts_by_field(imm.scntsts_manual())
pprint(scientists_by_field)
print(type(scientists_by_field))
print(type(scientists_by_field['math']))
``` |
{
"source": "joosthooz/arrow-benchmarks-ci",
"score": 2
} |
#### File: arrow-benchmarks-ci/api/runs.py
```python
from flask import request
from flask_restful import Resource
from api.auth import api_access_token_required
from models.run import Run
class Runs(Resource):
@api_access_token_required
def post(self, current_machine, id):
run = Run.first(id=id, machine_name=current_machine.name)
if not run:
return "", 404
run.update(request.get_json())
return "", 201
```
#### File: migrations/versions/4dee912a87fa_initial_schema.py
```python
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "4dee912a87fa"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"benchmark_group_execution",
sa.Column("id", sa.String(), nullable=False),
sa.Column("lang", sa.String(), nullable=False),
sa.Column("name", sa.String(), nullable=False),
sa.Column("options", sa.String(), nullable=True),
sa.Column("flags", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.Column("benchmarkable_id", sa.String(), nullable=True),
sa.Column("run_id", sa.String(), nullable=True),
sa.Column("run_name", sa.String(), nullable=True),
sa.Column("machine", sa.String(), nullable=True),
sa.Column("process_pid", sa.Integer(), nullable=False),
sa.Column("command", sa.String(), nullable=True),
sa.Column("started_at", sa.DateTime(), nullable=True),
sa.Column("finished_at", sa.DateTime(), nullable=True),
sa.Column("total_run_time", sa.Interval(), nullable=True),
sa.Column("failed", sa.Boolean(), nullable=True),
sa.Column("return_code", sa.Integer(), nullable=True),
sa.Column("stderr", sa.Text(), nullable=True),
sa.Column("total_machine_virtual_memory", sa.BigInteger(), nullable=True),
sa.Column(
"created_at", sa.DateTime(), server_default=sa.text("now()"), nullable=False
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"benchmarkable",
sa.Column("id", sa.String(), nullable=False),
sa.Column("type", sa.String(), nullable=False),
sa.Column("baseline_id", sa.String(), nullable=True),
sa.Column("data", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.Column("pull_number", sa.Integer(), nullable=True),
sa.Column("reason", sa.String(), nullable=False),
sa.Column(
"created_at", sa.DateTime(), server_default=sa.text("now()"), nullable=False
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"machine",
sa.Column("name", sa.String(), nullable=False),
sa.Column("info", sa.String(), nullable=True),
sa.Column(
"default_filters", postgresql.JSONB(astext_type=sa.Text()), nullable=False
),
sa.Column("supported_filters", postgresql.ARRAY(sa.String()), nullable=False),
sa.Column("supported_langs", postgresql.ARRAY(sa.String()), nullable=False),
sa.Column(
"offline_warning_enabled",
sa.Boolean(),
server_default="false",
nullable=False,
),
sa.Column(
"publish_benchmark_results",
sa.Boolean(),
server_default="false",
nullable=False,
),
sa.Column("hostname", sa.String(), nullable=True),
sa.Column("ip_address", sa.String(), nullable=True),
sa.Column("port", sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint("name"),
)
op.create_table(
"memory_usage",
sa.Column("id", sa.String(), nullable=False),
sa.Column("benchmark_group_execution_id", sa.String(), nullable=False),
sa.Column("process_pid", sa.Integer(), nullable=False),
sa.Column("parent_process_pid", sa.Integer(), nullable=False),
sa.Column("process_name", sa.String(), nullable=False),
sa.Column("process_cmdline", postgresql.ARRAY(sa.String()), nullable=True),
sa.Column("mem_rss_bytes", sa.BigInteger(), nullable=False),
sa.Column("mem_percent", sa.Float(), nullable=True),
sa.Column(
"created_at", sa.DateTime(), server_default=sa.text("now()"), nullable=False
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"notification",
sa.Column("id", sa.String(), nullable=False),
sa.Column("type", sa.String(), nullable=False),
sa.Column("benchmarkable_id", sa.String(), nullable=False),
sa.Column("message", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.Column("finished_at", sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(
["benchmarkable_id"],
["benchmarkable.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"run",
sa.Column("id", sa.String(), nullable=False),
sa.Column("benchmarkable_id", sa.String(), nullable=False),
sa.Column("machine", sa.String(), nullable=False),
sa.Column("filters", postgresql.JSONB(astext_type=sa.Text()), nullable=False),
sa.Column("reason", sa.String(), nullable=False),
sa.Column("env", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.Column(
"buildkite_data", postgresql.JSONB(astext_type=sa.Text()), nullable=True
),
sa.Column("status", sa.String(), server_default="created", nullable=False),
sa.Column("skip_reason", sa.String(), nullable=True),
sa.Column(
"created_at", sa.DateTime(), server_default=sa.text("now()"), nullable=False
),
sa.Column("scheduled_at", sa.DateTime(), nullable=True),
sa.Column("finished_at", sa.DateTime(), nullable=True),
sa.Column("total_run_time", sa.Interval(), nullable=True),
sa.Column("context", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.Column(
"machine_info", postgresql.JSONB(astext_type=sa.Text()), nullable=True
),
sa.Column("conda_packages", sa.Text(), nullable=True),
sa.ForeignKeyConstraint(
["benchmarkable_id"],
["benchmarkable.id"],
),
sa.ForeignKeyConstraint(
["machine"],
["machine.name"],
),
sa.PrimaryKeyConstraint("id"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("run")
op.drop_table("notification")
op.drop_table("memory_usage")
op.drop_table("machine")
op.drop_table("benchmarkable")
op.drop_table("benchmark_group_execution")
# ### end Alembic commands ###
```
#### File: arrow-benchmarks-ci/models/benchmark_group_execution.py
```python
import sqlalchemy as s
from sqlalchemy.dialects import postgresql
from db import Base
from models.base import BaseMixin, NotNull, Nullable
from models.run import Run
from utils import UnauthorizedException
class BenchmarkGroupExecution(Base, BaseMixin):
__tablename__ = "benchmark_group_execution"
id = NotNull(s.String, primary_key=True)
lang = NotNull(s.String)
name = NotNull(s.String)
options = Nullable(s.String)
flags = Nullable(postgresql.JSONB)
benchmarkable_id = Nullable(s.String)
run_id = Nullable(s.String)
run_name = Nullable(s.String)
machine = Nullable(s.String)
process_pid = NotNull(s.Integer)
command = Nullable(s.String)
started_at = Nullable(s.DateTime(timezone=False))
finished_at = Nullable(s.DateTime(timezone=False))
total_run_time = Nullable(s.Interval)
failed = Nullable(s.Boolean)
return_code = Nullable(s.Integer)
stderr = Nullable(s.Text)
total_machine_virtual_memory = Nullable(s.BigInteger)
created_at = NotNull(s.DateTime(timezone=False), server_default=s.sql.func.now())
@classmethod
def create(cls, data):
bge = cls.get(data["id"])
if bge:
for attr in [
"finished_at",
"total_run_time",
"failed",
"return_code",
"stderr",
]:
setattr(bge, attr, data[attr])
else:
bge = cls(**data)
bge.save()
@classmethod
def validate_data(cls, current_machine, data):
if not Run.first(id=data["run_id"], machine_name=current_machine.name):
raise UnauthorizedException
```
#### File: tests/api/test_logs.py
```python
import json
from copy import deepcopy
from datetime import datetime, timedelta
from buildkite.schedule_and_publish.get_commits import get_commits
from models.benchmark_group_execution import BenchmarkGroupExecution
from models.machine import Machine
from models.memory_usage import MemoryUsage
from models.run import Run
from utils import generate_uuid
benchmark_group_execution_id = generate_uuid()
memory_usage_id = generate_uuid()
benchmark_group_execution_data = {
"type": "BenchmarkGroupExecution",
"id": benchmark_group_execution_id,
"lang": "Python",
"name": "file-read",
"options": "options",
"flags": "flags",
"benchmarkable_id": "1",
"run_id": "run_id",
"run_name": "rune_name",
"machine": "machine",
"process_pid": 2,
"command": "command",
"started_at": str(datetime.now() - timedelta(minutes=3)),
"finished_at": str(datetime.now()),
"total_run_time": str(timedelta(minutes=3)),
"failed": True,
"return_code": 137,
"stderr": "stderr",
"total_machine_virtual_memory": 16624467968,
}
memory_usage_data = {
"type": "MemoryUsage",
"id": memory_usage_id,
"benchmark_group_execution_id": benchmark_group_execution_id,
"process_pid": 3,
"parent_process_pid": 2,
"process_name": "R",
"process_cmdline": [
"/var/lib/buildkite-agent/miniconda3/envs/arrow-commit/lib/R/bin/exec/R",
"-e",
'library(arrowbench);~+~run_one(write_file,~+~source="nyctaxi_2010-01",~+~format="feather",~+~compression="lz4",~+~input="data_frame",~+~cpu_count=NULL)',
],
"mem_percent": 0.4413227066347222,
"mem_rss_bytes": 27533,
}
def log_benchmark_group_execution(client, data=None, api_access_token=None):
get_commits()
machine = Machine.first()
if not data:
run = Run.first(machine_name=machine.name)
data = deepcopy(benchmark_group_execution_data)
data["run_id"] = run.id
if not api_access_token:
api_access_token = machine.create_api_access_token()
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_access_token}",
}
return client.post("/logs", data=json.dumps(data), headers=headers)
def log_memory_usage(client, data=None, api_access_token=None):
if not data:
data = deepcopy(memory_usage_data)
if not api_access_token:
machine = Machine.first()
api_access_token = machine.create_api_access_token()
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_access_token}",
}
return client.post("/logs", data=json.dumps(data), headers=headers)
def test_benchmark_group_execution_logs_201(client):
assert not BenchmarkGroupExecution.get(benchmark_group_execution_data["id"])
response = log_benchmark_group_execution(client, data=None, api_access_token=None)
assert response.status_code == 201
assert BenchmarkGroupExecution.get(benchmark_group_execution_data["id"])
def test_benchmark_group_execution_logs_401_invalid_token(client):
assert not BenchmarkGroupExecution.get(benchmark_group_execution_data["id"])
response = log_benchmark_group_execution(
client, data=None, api_access_token="invalid token"
)
assert response.status_code == 401
assert not BenchmarkGroupExecution.get(benchmark_group_execution_data["id"])
def test_benchmark_group_execution_logs_401_invalid_run_id(client):
assert not BenchmarkGroupExecution.get(benchmark_group_execution_data["id"])
response = log_benchmark_group_execution(
client, data=benchmark_group_execution_data, api_access_token=None
)
assert response.status_code == 401
assert not BenchmarkGroupExecution.get(benchmark_group_execution_data["id"])
def test_memory_usage_logs_201(client):
assert not MemoryUsage.get(memory_usage_data["id"])
machine = Machine.first()
api_access_token = machine.create_api_access_token()
log_benchmark_group_execution(client, data=None, api_access_token=api_access_token)
response = log_memory_usage(client, data=None, api_access_token=api_access_token)
assert response.status_code == 201
assert MemoryUsage.get(memory_usage_data["id"])
def test_memory_usage_logs_401_invalid_token(client):
assert not MemoryUsage.get(memory_usage_data["id"])
machine = Machine.first()
api_access_token = machine.create_api_access_token()
log_benchmark_group_execution(client, data=None, api_access_token=api_access_token)
response = log_memory_usage(client, data=None, api_access_token="invalid token")
assert response.status_code == 401
assert not MemoryUsage.get(memory_usage_data["id"])
def test_memory_usage_logs_401_not_existing_benchmark_execution_group(client):
assert not MemoryUsage.get(memory_usage_data["id"])
machine = Machine.first()
api_access_token = machine.create_api_access_token()
log_benchmark_group_execution(client, data=None, api_access_token=api_access_token)
data = deepcopy(memory_usage_data)
data["benchmark_group_execution_id"] = "not_existing_benchmark_execution_group_id"
response = log_memory_usage(client, data=data, api_access_token=api_access_token)
assert response.status_code == 401
assert not MemoryUsage.get(memory_usage_data["id"])
def test_memory_usage_logs_401_unauthorized_machine(client):
assert not MemoryUsage.get(memory_usage_data["id"])
machines = Machine.all()
api_access_token_1 = machines[0].create_api_access_token()
api_access_token_2 = machines[1].create_api_access_token()
log_benchmark_group_execution(
client, data=None, api_access_token=api_access_token_1
)
response = log_memory_usage(client, data=None, api_access_token=api_access_token_2)
assert response.status_code == 401
assert not MemoryUsage.get(memory_usage_data["id"])
```
#### File: buildkite/schedule_and_publish/test_get_commits.py
```python
import json
from buildkite.schedule_and_publish.get_commits import get_commits
from models.benchmarkable import Benchmarkable
from models.machine import Machine
from models.notification import Notification
from models.run import Run
commit_dicts = json.load(open("tests/mocked_integrations/github/get_commits.json"))
def verify_benchmarkable(commit_dict):
benchmarkable = Benchmarkable.get(commit_dict["sha"])
assert benchmarkable
assert benchmarkable.type == "arrow-commit"
assert benchmarkable.data == commit_dict
assert benchmarkable.baseline_id == commit_dict["parents"][-1]["sha"]
assert benchmarkable.reason == "arrow-commit"
assert benchmarkable.pull_number == 10973
def verify_benchmarkable_runs(commit_dict):
benchmarkable_id = commit_dict["sha"]
for machine in Machine.all():
run = Run.first(benchmarkable_id=benchmarkable_id, machine_name=machine.name)
assert run
assert run.filters == machine.default_filters["arrow-commit"]
assert run.reason == "arrow-commit"
assert run.status == "created"
assert not run.finished_at
def verify_benchmarkable_notifications(commit_dict):
benchmarkable_id = commit_dict["sha"]
for _type in ["slack_message", "pull_comment"]:
assert Notification.first(benchmarkable_id=benchmarkable_id, type=_type)
def test_get_commits():
get_commits()
for commit_dict in commit_dicts:
verify_benchmarkable(commit_dict)
verify_benchmarkable_runs(commit_dict)
verify_benchmarkable_notifications(commit_dict)
``` |
{
"source": "joosthooz/smarttimers",
"score": 3
} |
#### File: smarttimers/smarttimers/smarttimer.py
```python
__all__ = [
'TimerStat',
'SmartTimer'
]
import os
import re
import numpy
import cProfile
import time as std_time
from .timer import Timer
from functools import wraps, partial
from .clocks import are_clocks_compatible
from collections import namedtuple, defaultdict
_TimerStat_fields = ('min', 'max', 'total', 'avg',)
TimerStat = namedtuple('TimerStat', _TimerStat_fields)
class SmartTimer:
"""`Timer`_ container to perform time measurements in code blocks.
Args:
name (str, optional): Name of container. Default is *smarttimer*.
kwargs (dict, optional): Map of options to configure the internal
`Timer`_. Default is `Timer`_ defaults.
A :class:`SmartTimer` allows recording elapsed time in an arbitrary
number of code blocks. Specified points in the code are marked as either
the beginning of a block to measure, :meth:`tic`, or as the end of a
measured block, :meth:`toc`. Times are managed internally and ordered
based on :meth:`tic` calls. Times can be queried, operated on, and
written to file.
The following schemes are supported for timing code blocks
* Consecutive: ``tic('A')``, ``toc()``, ..., ``tic('B')``, ``toc()``
* Cascade: ``tic('A')``, ``toc()``, ``toc()``, ...
* Nested: ``tic('A')``, ``tic('B')``, ..., ``toc()``, ``toc()``
* Label-paired: ``tic('A')``, ``tic('B')``, ..., ``toc('A')``,
``toc('B')``
* Mixed: arbitrary combinations of schemes
.. _`namedtuple`:
https://docs.python.org/3.3/library/collections.html#collections.namedtuple
Attributes:
name (str): Name of container. May be used for filename in
:meth:`write_to_file`.
labels (list, str): Label identifiers of completed timed code blocks.
active_labels (list, str): Label identifiers of active code blocks.
seconds (list, float): Elapsed time for completed code blocks.
minutes (list, float): Elapsed time for completed code blocks.
times (dict): Map of times elapsed for completed blocks. Keys are the
labels used when invoking :meth:`tic`.
walltime (float): Elapsed time between first and last timings.
"""
DEFAULT_CLOCK_NAME = 'process_time'
_LABELS = ('label', 'seconds', 'minutes', 'rel_percent', 'cum_sec',
'cum_min', 'cum_percent')
def __init__(self, name=None, **kwargs):
self.name = name
self._timer = Timer(label=None, **kwargs) # internal Timer
self._first_tic = None # pointer used to calculate walltime
self._last_tic = self._timer # pointer used to support cascade scheme
self._timers = [] # completed time blocks
self._timer_stack = [] # stack of active time blocks
self._prof = None # profiling object
@property
def labels(self):
return tuple(t.label for t in self._filter_timers())
@property
def active_labels(self):
return tuple(t.label for t in self._timer_stack)
@property
def seconds(self):
return tuple(t.seconds for t in self._filter_timers())
@property
def minutes(self):
return tuple(t.minutes for t in self._filter_timers())
@property
def relative_percent(self):
return tuple(t.relative_percent for t in self._filter_timers())
@property
def cumulative_seconds(self):
return tuple(t.cumulative_seconds for t in self._filter_timers())
@property
def cumulative_minutes(self):
return tuple(t.cumulative_minutes for t in self._filter_timers())
@property
def cumulative_percent(self):
return tuple(t.cumulative_percent for t in self._filter_timers())
@property
def times(self):
times_map = defaultdict(list)
for t in self._filter_timers():
times_map[t.label].append(t.seconds)
return times_map
@property
def clock_name(self):
return self._timer.clock_name
@clock_name.setter
def clock_name(self, clock_name):
if not are_clocks_compatible(self._timer.clock_name, clock_name):
self._timers = list(self._filter_timers())
self._timer_stack = []
self._first_tic = None
self._last_tic = self._timer
self._timer.clock_name = clock_name
@property
def info(self):
return self._timer.info
@property
def walltime(self):
if not any(self._timers):
return 0.
return self._timer.seconds - self._first_tic.seconds
def _filter_timers(self):
return filter(None, self._timers)
def __repr__(self):
return "{cls}(name={name},"\
" timer={timer})"\
.format(cls=type(self).__qualname__,
name=repr(self.name),
timer=repr(self._timer))
def __str__(self):
if not self.labels:
return ""
lw = max(len('label'), max(map(len, self.labels)))
fmt_head = "{:>" + str(lw) + "}" + 6 * " {:>12}" + os.linesep
fmt_data = "{:>" + str(lw) + "}" + 6 * " {:12.4f}" + os.linesep
data = fmt_head.format(*type(self)._LABELS)
for t in self._filter_timers():
data += fmt_data.format(t.label, t.seconds, t.minutes,
t.relative_percent, t.cumulative_seconds,
t.cumulative_minutes, t.cumulative_percent)
return data
def __enter__(self):
self.tic()
return self
def __eq__(self, other):
return NotImplemented
__hash__ = None
def __exit__(self, *args):
self.toc()
def __getitem__(self, key):
value = self.times[key]
return value[0] if len(value) == 1 else value
def _update_cumulative_and_percent(self):
total_seconds = sum(self.seconds)
for i, t in enumerate(self._filter_timers()):
# Skip timers already processed, only update percentages
if t.cumulative_seconds < 0. or t.cumulative_minutes < 0.:
t.cumulative_seconds = t.seconds
t.cumulative_minutes = t.minutes
if i > 0:
t_prev = self._timers[i - 1]
t.cumulative_seconds += t_prev.cumulative_seconds
t.cumulative_minutes += t_prev.cumulative_minutes
t.relative_percent = t.seconds / total_seconds
t.cumulative_percent = t.cumulative_seconds / total_seconds
def tic(self, label=None):
"""Start measuring time.
Measure time at the latest moment possible to minimize noise from
internal operations.
Args:
label (str): Label identifier for current code block.
"""
# _last_tic -> timer of most recent tic
self._last_tic = Timer(label=label, clock_name=self._timer.clock_name)
# _first_tic -> timer of first tic
if self._first_tic is None:
self._first_tic = self._last_tic
# Insert Timer into stack, then record time to minimize noise
self._timer_stack.append(self._last_tic)
# Use 'None' as an indicator of active code blocks
self._timers.append(None)
# Measure time
self._last_tic.time()
def toc(self, label=None):
"""Stop measuring time at end of code block.
Args:
label (str): Label identifier for current code block.
Returns:
float: Measured time in seconds.
Raises:
Exception, KeyError: If there is not a matching :meth:`tic`.
"""
# Error if no tic pair (e.g., toc() after instance creation)
# _last_tic -> _timer
if self._last_tic is self._timer:
raise Exception("'toc()' has no matching 'tic()'")
# Measure time at the soonest moment possible to minimize noise from
# internal operations.
self._timer.time()
# Stack is not empty so there is a matching tic
if self._timer_stack:
# Last item or item specified by label
stack_idx = -1
# Label-paired timer.
# Label can be "", so explicitly check against None.
if label is not None:
# Find index of last timer in stack with matching label
for i, t in enumerate(self._timer_stack[::-1]):
if label == t.label:
stack_idx = len(self._timer_stack) - i - 1
break
else:
raise KeyError("'{}' has no matching label".format(label))
# Calculate time elapsed
t_first = self._timer_stack.pop(stack_idx)
t_diff = self._timer - t_first
# Add extra attributes, use a negative sentinel value
t_diff.relative_percent = -1.
t_diff.cumulative_seconds = -1.
t_diff.cumulative_minutes = -1.
t_diff.cumulative_percent = -1.
# Place time in corresponding position
idx = [i for i, v in enumerate(self._timers)
if v is None][stack_idx]
self._timers[idx] = t_diff
# Empty stack, use _last_tic -> timer from most recent tic
else:
t_diff = self._timer - self._last_tic
# Add extra attributes, use a negative sentinel value
t_diff.relative_percent = -1.
t_diff.cumulative_seconds = -1.
t_diff.cumulative_minutes = -1.
t_diff.cumulative_percent = -1.
# Use label.
# Label can be "", so explicitly check against None.
if label is not None:
t_diff.label = label
self._timers.append(t_diff)
# Update cumulative and percent times when all timers have completed
if all(self._timers):
self._update_cumulative_and_percent()
return t_diff.seconds
def print_info(self):
self._timer.print_info()
def remove(self, *keys):
"""Remove time(s) of completed code blocks.
Args:
keys (str): Keys to select times for removal based on the label
used in :meth:`tic`.
"""
for key in keys:
for t in filter(None, self._timers[:]):
if key == t.label:
self._timers.remove(t)
def clear(self):
self._timers = []
self._timer_stack = []
self._timer.clear()
self._first_tic = None
self._last_tic = self._timer
if self._prof:
self._prof.clear()
self._prof = None
def reset(self):
self.name = None
self._timer.reset()
self._timer.clock_name = type(self).DEFAULT_CLOCK_NAME
self.clear()
def dump_times(self, filename=None, mode='w'):
"""Write timing results to a CSV file.
If *filename* is provided, then it will be used as the filename.
Otherwise :attr:`name` is used if non-empty, else the default filename
is used. The suffix and extension *-times.csv* are appended only if
filename does not already has an extension. Using *mode* the file can
be overwritten or appended with timing data.
.. _`open`: https://docs.python.org/3/library/functions.html#open
Args:
filename (str, optional): Name of file.
mode (str, optional): Mode flag passed to `open`_. Default is *w*.
"""
if not filename:
if not self.name:
raise ValueError("either provide an explicit filename or set"
" 'name' attribute")
filename = self.name
if not os.path.splitext(filename)[1]:
filename += '-times.csv'
with open(filename, mode) as fd:
fd.write(','.join(type(self)._LABELS))
fd.write('\n')
for t in self._filter_timers():
data = (t.label, t.seconds, t.minutes, t.relative_percent,
t.cumulative_seconds, t.cumulative_minutes,
t.cumulative_percent)
fd.write(','.join((str(datum) for datum in data)))
fd.write('\n')
def stats(self, label=None):
"""Compute total, min, max, and average stats for timings.
Note:
* *label* is compared as a word-bounded expression.
Args:
label (str, iterable, None, optional): String used to match timer
labels to select. To use as a regular expression, *label*
has to be a raw string. If None, then all completed timings are
used.
Returns:
TimerStat, None: Stats in seconds and minutes (`namedtuple`_).
"""
timers = list(self._filter_timers())
# Label can be "", so explicitly check against None
if label is None:
seconds = self.seconds
minutes = self.minutes
selected = timers
else:
# Make strings iterate as strings, not characters
if isinstance(label, str):
label = [label]
seconds = []
minutes = []
selected = []
for ll in label:
for t in timers:
if (ll.isalnum() \
and re.search(r"\b{}\b".format(ll), t.label)) \
or ll == t.label and t not in selected:
seconds.append(t.seconds)
minutes.append(t.minutes)
selected.append(t)
if not selected:
return None
total_seconds = sum(seconds)
total_minutes = sum(minutes)
return TimerStat(
min=(min(seconds), min(minutes)),
max=(max(seconds), max(minutes)),
total=(total_seconds, total_minutes),
avg=(total_seconds / len(seconds), total_minutes / len(minutes)))
def asarray(self):
"""Return timing data as a list or numpy array (no labels).
Data is arranged as a transposed view of :meth:`__str__` and
:meth:`to_file` formats.
.. _`numpy.ndarray`: https://www.numpy.org/devdocs/index.html
Returns:
`numpy.ndarray`_, list: Timing data.
"""
return numpy.array([self.seconds, self.minutes, self.relative_percent,
self.cumulative_seconds, self.cumulative_minutes,
self.cumulative_percent])
def pic(self, subcalls=True, builtins=True):
"""Start profiling.
.. _`profile`: https://docs.python.org/3.3/library/profile.html
See `profile`_
"""
self._prof = cProfile.Profile(timer=self._timer.clock,
subcalls=subcalls,
builtins=builtins)
self._prof.enable()
def poc(self):
"""Stop profiling."""
self._prof.disable()
self._prof.create_stats()
self._prof.clear()
def print_profile(self, sort='time'):
self._prof.print_stats(sort)
def get_profile(self):
return self._prof.getstats()
def dump_profile(self, filename=None, mode='w'):
"""Write profiling results to a file.
If *filename* is provided, then it will be used as the filename.
Otherwise :attr:`name` is used if non-empty, else the default filename
is used. The extension *.prof* is appended only if filename does not
already has an extension. Using *mode* the file can be overwritten or
appended with timing data.
.. _`open`: https://docs.python.org/3/library/functions.html#open
Args:
filename (str, optional): Name of file.
mode (str, optional): Mode flag passed to `open`_. Default is *w*.
"""
if not filename:
if not self.name:
raise ValueError("either provide an explicit filename or set"
" 'name' attribute")
filename = self.name
if not os.path.splitext(filename)[1]:
filename += '.prof'
self._prof.dump_stats(filename)
sleep = std_time.sleep
```
#### File: smarttimers/tests/test_TimerClassProperties.py
```python
import os
import time
import unittest
from smarttimers import (Timer, TimerDict)
from .utiltest import TestStack
class TimerClassPropertiesTestCase(unittest.TestCase):
def test_TimerDict(self):
td1 = TimerDict()
# Invalid
for value in [0, 0., 'clock', ['clock', time.clock],
('clock', time.clock)]:
with self.subTest(value=value):
with self.assertRaises(TypeError):
td1.update(value)
def test_DefaultClockName(self):
TestStack.push(Timer.DEFAULT_CLOCK_NAME)
# Invalid
for clock_name in [1, 1., ['clock'], ('clock',),
{'DEFAULT_CLOCK_NAME': 'clock'}]:
with self.subTest(clock_name=clock_name):
with self.assertRaises(TypeError):
Timer.DEFAULT_CLOCK_NAME = clock_name
Timer.DEFAULT_CLOCK_NAME = 'clock'
self.assertEqual(Timer.DEFAULT_CLOCK_NAME, 'clock')
Timer.DEFAULT_CLOCK_NAME = TestStack.pop()
def test_Clocks(self):
TestStack.push(Timer.CLOCKS)
# Invalid
for value in [1, 1., 'clock', ['clock'], ('clock',)]:
with self.subTest(value=value):
with self.assertRaises(TypeError):
Timer.CLOCKS = value
# Invalid key, valid value
for keyval in [{1: 'clock'}, {1.: 'clock'}]:
with self.subTest(keyval=keyval):
with self.assertRaises(KeyError):
Timer.CLOCKS.update(keyval)
# Valid key, invalid value
for keyval in [{'clock': 1}, {'clock': 1.}, {'clock': 'clock'}]:
with self.subTest(keyval=keyval):
with self.assertRaises(ValueError):
Timer.CLOCKS.update(keyval)
# Valid
for keyval in [{'clock': time.clock},
TimerDict({'clock': time.clock})]:
Timer.CLOCKS = keyval
with self.subTest(keyval=keyval):
self.assertTrue(keyval.items() == Timer.CLOCKS.items())
Timer.CLOCKS = TestStack.pop()
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JoostHuizinga/ea-plotting-scripts",
"score": 2
} |
#### File: JoostHuizinga/ea-plotting-scripts/configure_plots.py
```python
import os
import numpy as np
from typing import List, Dict, Union, Optional
import subprocess as sp
import matplotlib
import matplotlib.transforms
import matplotlib.pyplot as plt
import matplotlib.gridspec as gs
import matplotlib.transforms as tf
import matplotlib.cm as cm
from matplotlib.axes import Axes
from matplotlib.artist import Artist
from matplotlib.figure import Figure
from createPlotUtils import debug_print, get_renderer
from dataclasses import dataclass
import global_options as go
import parse_file as pf
@dataclass
class PlotConfiguration:
plot_id: int
fig: Figure
gridspec_dict: Dict[str, Union[gs.GridSpec, gs.GridSpecFromSubplotSpec]]
subplot_dict: Dict[int, Axes]
extra_artists: List[Artist]
legend_handles: List[Artist]
def latex_available():
with open(os.devnull, "w") as f:
try:
status = sp.call(["latex", "--version"], stdout=f, stderr=f)
except OSError:
status = 1
if status:
return False
else:
return True
def init_params():
# Setup the matplotlib params
preamble = [r'\usepackage[T1]{fontenc}',
r'\usepackage{amsmath}',
r'\usepackage{txfonts}',
r'\usepackage{textcomp}']
matplotlib.rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
matplotlib.rc('text.latex', preamble="\n".join(preamble))
params = {'backend': 'pdf',
'axes.labelsize': go.get_int("font_size"),
'font.size': go.get_int("font_size"),
'legend.fontsize': go.get_int("legend_font_size"),
'xtick.labelsize': go.get_int("tick_font_size"),
'ytick.labelsize': go.get_int("tick_font_size"),
'text.usetex': latex_available(),
'figure.dpi': 100,
'savefig.dpi': 100}
matplotlib.rcParams.update(params)
def init_subplot(plot_config: PlotConfiguration, subplot_id, subplot_spec):
fig = plt.figure(plot_config.plot_id)
ax = fig.add_subplot(subplot_spec, label=str(subplot_id))
ax.set_ylim(go.get_float("y_axis_min", plot_config.plot_id, when_not_exist=go.RETURN_FIRST, default=None),
go.get_float("y_axis_max", plot_config.plot_id, when_not_exist=go.RETURN_FIRST, default=None))
ax.set_xlim(go.get_float("x_axis_min", plot_config.plot_id, when_not_exist=go.RETURN_FIRST, default=None),
go.get_float("x_axis_max", plot_config.plot_id, when_not_exist=go.RETURN_FIRST, default=None))
ax.set_ylabel(go.get_str("y_labels", plot_config.plot_id, when_not_exist=go.RETURN_FIRST))
ax.set_xlabel(go.get_str("x_labels", plot_config.plot_id, when_not_exist=go.RETURN_FIRST))
if go.get_bool("title"):
ax.set_title(go.get_str_list(
"titles",
plot_config.plot_id,
when_not_exist=go.RETURN_FIRST
)[subplot_id], fontsize=go.get_int("title_size"))
if go.get_exists("x_ticks"):
ax.set_xticks(go.get_float_list("x_ticks", plot_config.plot_id, when_not_exist=go.RETURN_FIRST))
if go.get_exists("y_ticks"):
ax.set_yticks(go.get_float_list("y_ticks", plot_config.plot_id, when_not_exist=go.RETURN_FIRST))
# ax.set_aspect(1.0)
# ax.apply_aspect()
plot_config.subplot_dict[subplot_id] = ax
return ax
def setup_figure(plot_id: int, gridspec: gs.GridSpec = gs.GridSpec(1, 1)) -> PlotConfiguration:
"""
Sets up a figure based on plot id.
By default, we assume there will only be one sub-figure, which is the main plot.
:param plot_id: The plot id.
:param gridspec: Gridspec layout for if the plot should contain multiple sub-figures.
:return: Returns the plot configuration for this figure.
"""
fig = plt.figure(plot_id, figsize=go.get_float_list("fig_size"))
plot_config = PlotConfiguration(
plot_id=plot_id,
fig=fig,
gridspec_dict={"main": gridspec},
subplot_dict={},
extra_artists=[],
legend_handles=[],
)
return plot_config
def get_plot_ids() -> List[int]:
"""
Currently we assume that the list of file-names holds the ground-truth on the
number of plots we want to create.
:return: A list of plot-ids.
"""
return list(range(len(go.get_indices("file_names"))))
def setup_plot(plot_config: PlotConfiguration, gridspec: Optional[gs.GridSpec] = None):
if gridspec is None:
gridspec = plot_config.gridspec_dict["main"]
init_subplot(plot_config, 0, gridspec[0])
def setup_plots(plot_ids: List[int] = None, gridspec=gs.GridSpec(1, 1)):
"""
A setup for the different plots
(both the main plot and the small bar at the bottom).
"""
init_params()
if plot_ids is None:
plot_ids = [0]
plot_configs = []
for plot_id in plot_ids:
plot_configs.append(setup_figure(plot_id, gridspec))
# We assume that the first entry in the gridspec will contain the "main" plot,
# so we initialize it with the parameters we read from the global options.
init_subplot(plot_configs[-1], 0, gridspec[0])
# axis = [init_subplot(plot_id, grid_spec[0]) for i, plot_id in enumerate(plot_ids)]
return plot_configs
class ParseColumns:
def __init__(self, columns: List[int]):
self.data = {col: [] for col in columns}
self.generations: List[int] = []
def __call__(self, split_line: List[str], generation: int):
self.generations.append(generation)
for col in self.data:
self.data[col].append(float(split_line[col]))
def plot_annotations(ax):
for index in go.get_indices("line_from_file"):
line_file = go.get_str("line_from_file", index)
x_column = go.get_int("line_from_file_x_column", index, when_not_exist=go.RETURN_FIRST)
y_column = go.get_int("line_from_file_y_column", index, when_not_exist=go.RETURN_FIRST)
color = go.get_str("line_from_file_color", index, when_not_exist=go.RETURN_FIRST)
linestyle = go.get_str("line_from_file_linestyle", index, when_not_exist=go.RETURN_FIRST)
linewidth = go.get_float("line_from_file_linewidth", index, when_not_exist=go.RETURN_FIRST)
column_parser = ParseColumns([x_column, y_column])
pf.read_file(line_file, column_parser)
ax.plot(column_parser.data[x_column],
column_parser.data[y_column],
color=color,
linestyle=linestyle,
linewidth=linewidth)
def plot_background(ax):
"""
Draw a gradient image based on a provided function.
:param ax: Axes The axes to draw on.
"""
y_min = go.get_float("y_axis_min")
y_max = go.get_float("y_axis_max")
x_max = go.get_float("x_axis_max")
x_min = go.get_float("x_axis_min")
background_func = go.get_str("background")
cmap = go.get_str("background_colormap")
cmap_min = go.get_float("background_colormap_min")
cmap_max = go.get_float("background_colormap_max")
x_res = round(ax.bbox.width)
y_res = round(ax.bbox.height)
image = np.zeros((y_res, x_res), dtype=np.float64)
for x in range(x_res):
for y in range(y_res):
x_val = (x * (x_max - x_min) / (x_res - 1))
y_val = (y * (y_max - y_min) / (y_res - 1))
val = eval(background_func, {}, {"x_val": x_val, "y_val": y_val})
image[y, x] = cmap_min + (cmap_max - cmap_min) * val
interpolation = 'nearest'
im = ax.imshow(image, extent=(x_min, x_max, y_min, y_max),
interpolation=interpolation,
vmin=0, vmax=1, aspect="equal", origin="lower",
cmap=plt.get_cmap(cmap))
return im
def create_color_bar(plot_config):
cmap = go.get_str("color_bar_colormap")
current_box = tf.Bbox.union([ax.get_position() for ax in plot_config.fig.axes])
cax = plot_config.fig.add_axes([
current_box.xmax + go.get_float("color_bar_margin"),
current_box.ymin,
go.get_float("color_bar_width"),
current_box.height
])
cbar = plot_config.fig.colorbar(cm.ScalarMappable(norm=None, cmap=plt.get_cmap(cmap)), cax=cax)
cbar.set_label(
go.get_str("color_bar_label"),
rotation=go.get_float("color_bar_label_rotation"),
fontsize=go.get_float("color_bar_label_font_size"),
labelpad=go.get_float("color_bar_label_pad"),
)
def export_legend(plot_config):
output_dir = go.get_str("output_directory")
ext = "." + go.get_str("type")
out_file_path = output_dir + "/" + go.get_str("file_names", plot_config.plot_id) + "_legend" + ext
# Create a new figure specifically for the legend
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off')
# Setup the legend as normal, except always in the lower left of the figure
# and without any offset
lgd = _setup_legend(ax, plot_config.legend_handles, "lower left", (0, 0, 1, 1))
# Figure out the size of the legend if it would be rendered, and adjust the
# figure accordingly
renderer = get_renderer(fig)
bbox = lgd.get_window_extent(renderer).transformed(fig.dpi_scale_trans.inverted())
fig.set_size_inches(bbox.width, bbox.height)
# Save the legend to a file
fig.savefig(out_file_path, dpi="figure", bbox_inches=bbox)
def _setup_legend(ax, handles, legend_loc, bbox_to_anchor):
columns = go.get_int("legend_columns")
legend_label_spacing = go.get_float("legend_label_spacing")
legend_column_spacing = go.get_float("legend_column_spacing")
legend_handle_text_pad = go.get_float("legend_handle_text_pad")
debug_print("legend", "location:", legend_loc, "columns:", columns)
lgd = ax.legend(handles=handles,
loc=legend_loc, ncol=columns,
bbox_to_anchor=bbox_to_anchor,
labelspacing=legend_label_spacing,
columnspacing=legend_column_spacing,
handletextpad=legend_handle_text_pad)
return lgd
def setup_legend(plot_config: PlotConfiguration):
fig = plt.figure(plot_config.plot_id)
ax = fig.get_axes()[0]
# if getFloat("box_sep") == 0:
# plt.tight_layout()
legend_loc = go.get_str("legend_loc", plot_config.plot_id, when_not_exist=go.RETURN_FIRST)
if legend_loc != "none":
anchor_x = go.get_float("legend_x_offset")
anchor_y = go.get_float("legend_y_offset")
bbox_to_anchor = (anchor_x, anchor_y, 1, 1)
handles = None
if len(plot_config.legend_handles) > 0:
handles = plot_config.legend_handles
lgd = _setup_legend(ax, handles, legend_loc, bbox_to_anchor)
plot_config.extra_artists.append(lgd)
def write_plot(plot_config: PlotConfiguration):
print("Writing plot " + str(plot_config.plot_id) + " ...")
output_dir = go.get_str("output_directory")
ext = "." + go.get_str("type")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
setup_legend(plot_config)
fig = plt.figure(plot_config.plot_id)
out_file_path = output_dir + "/" + go.get_str("file_names", plot_config.plot_id) + ext
print(f"Writing plot to: {out_file_path}")
# Determine custom bounding box
if go.get_str("bb") == "custom":
fig_size = go.get_float_list("fig_size")
renderer = get_renderer(fig)
# bb = fig.get_window_extent(renderer)
bb = fig.get_tightbbox(renderer)
target_bb = matplotlib.transforms.Bbox.from_bounds(0, 0, fig_size[0], fig_size[1])
trans2 = matplotlib.transforms.BboxTransformTo(target_bb)
trans = fig.transFigure.inverted()
print("Figure size:", fig_size)
print("Original bb box:", bb.get_points())
for artist in plot_config.extra_artists:
other_bb = artist.get_window_extent(renderer)
other_bb = other_bb.transformed(trans)
other_bb = other_bb.transformed(trans2)
print(other_bb.get_points())
bb = matplotlib.transforms.BboxBase.union([bb, other_bb])
target_aspect = fig_size[0] / fig_size[1]
bb_aspect = bb.width / bb.height
print(target_aspect, bb_aspect)
if target_aspect < bb_aspect:
bb = bb.expanded(1, bb_aspect / target_aspect)
else:
bb = bb.expanded(target_aspect / bb_aspect, 1)
bb = bb.padded(0.2)
print("Extended bb box:", bb.get_points())
plt.savefig(out_file_path,
bbox_extra_artists=plot_config.extra_artists, bbox_inches=bb)
elif go.get_str("bb") == "manual":
fig_size = go.get_float_list("fig_size")
renderer = get_renderer(fig)
ext_width = go.get_float("bb_width")
ext_heigth = go.get_float("bb_height")
x_offset = go.get_float("bb_x_offset")
y_offset = go.get_float("bb_y_offset")
x_tight_center = go.get_float("bb_x_center_includes_labels")
y_tight_center = go.get_float("bb_y_center_includes_labels")
# Get the transformations that we need
inches_to_pixels = fig.dpi_scale_trans
pixels_to_inches = inches_to_pixels.inverted()
# Get the bounding box of the window
win_bb_in_pixels = fig.get_window_extent(renderer)
# Get the bounding box of the actual figure, including labels
fig_bb_in_inches = fig.get_tightbbox(renderer)
fig_bb_in_pixels = fig_bb_in_inches.transformed(inches_to_pixels)
# Get a new bounding box just as wide as the window, but with the
# center of the figure bounding box
new_bb_in_pixels = win_bb_in_pixels.frozen()
if x_tight_center:
width_ratio = win_bb_in_pixels.width / fig_bb_in_pixels.width
new_bb_in_pixels.x0 = fig_bb_in_pixels.x0
new_bb_in_pixels.x1 = fig_bb_in_pixels.x1
new_bb_in_pixels = new_bb_in_pixels.expanded(width_ratio, 1)
if y_tight_center:
height_ratio = win_bb_in_pixels.height / fig_bb_in_pixels.height
new_bb_in_pixels.y0 = fig_bb_in_pixels.y0
new_bb_in_pixels.y1 = fig_bb_in_pixels.y1
new_bb_in_pixels = new_bb_in_pixels.expanded(1, height_ratio)
# Transform to inch space
bb_in_inches = new_bb_in_pixels.transformed(pixels_to_inches)
# Apply custom transformations
bb_in_inches = bb_in_inches.expanded(
float(ext_width) / float(fig_size[0]),
float(ext_heigth) / float(fig_size[1]))
bb_in_inches.y0 += y_offset
bb_in_inches.y1 += y_offset
bb_in_inches.x0 += x_offset
bb_in_inches.x1 += x_offset
plt.savefig(out_file_path,
bbox_extra_artists=plot_config.extra_artists,
bbox_inches=bb_in_inches)
elif go.get_str("bb") == "default":
plt.savefig(out_file_path,
bbox_extra_artists=plot_config.extra_artists)
elif go.get_str("bb") == "tight":
plt.savefig(out_file_path,
bbox_extra_artists=plot_config.extra_artists,
bbox_inches='tight')
else:
raise Exception("Invalid bounding box option.")
print("Writing plot " + str(plot_config.plot_id) + " done.")
def write_plots(plot_configs: List[PlotConfiguration]):
print("Writing plots...")
for plot_config in plot_configs:
write_plot(plot_config)
def def_legend_font_size(): return go.get_int("font_size") - 4
def def_title_font_size(): return go.get_int("font_size") + 4
def def_tick_font_size(): return go.get_int("font_size") - 6
def def_color_bar_label_font_size(): return go.get_float("font_size")
def def_color_bar_colormap(): return go.get_str("background_colormap")
def add_options():
def def_output_dir():
if go.get_exists("config_file"):
return pf.base(go.get_str("config_file")) + "_out"
else:
number = 1
name = "my_plot_" + str(number)
while os.path.exists(name):
number += 1
name = "my_plot_" + str(number)
return name
go.add_option("output_directory", def_output_dir, nargs=1,
help_str="Resulting plots will be put into this directory.")
go.add_option("type", "pdf", nargs=1,
help_str="The file type in which the plot will be written.")
go.add_option("fig_size", [[8, 6]], nargs=2,
help_str="The size of the resulting figure.")
go.add_option("title", True, nargs=1,
help_str="Show the title of the plot.")
# Font settings
go.add_option("font_size", 18, nargs=1,
help_str="The base font-size for the plot "
"(other font-sizes are relative to this one).")
go.add_option("title_size", def_title_font_size, nargs=1,
aliases=["title_font_size"],
help_str="Font size for the title.")
go.add_option("legend_font_size", def_legend_font_size, nargs=1,
help_str="Font size for the legend.")
go.add_option("tick_font_size", def_tick_font_size, nargs=1,
help_str="Font size for the tick-labels.")
# Per plot settings
go.add_option("file_names", "my_plot", aliases=["plot_output"],
help_str="The names of the output files for each plotted column.")
go.add_option("titles", "Unnamed plot", aliases=["plot_title"],
help_str="The titles for each plot.")
go.add_option("x_labels", "Number of Generations", aliases=["plot_x_label"],
help_str="The x labels for each plot.")
go.add_option("y_labels", "Value", aliases=["plot_y_label"],
help_str="The x labels for each plot.")
go.add_option("legend_loc", "best", aliases=["plot_legend_loc"],
help_str="Legend location for each plot.")
go.add_option("y_axis_min", aliases=["plot_y_min"],
help_str="The minimum value for the y axis.")
go.add_option("y_axis_max", aliases=["plot_y_max"],
help_str="The maximum value for the y axis.")
go.add_option("x_axis_max", aliases=["plot_x_max"],
help_str="The minimum value for the x axis.")
go.add_option("x_axis_min", aliases=["plot_x_min"],
help_str="The maximum value for the x axis.")
go.add_option("x_ticks",
help_str="Use the provided strings as labels for the x-ticks.")
go.add_option("y_ticks",
help_str="Use the provided strings as labels for the y-ticks.")
# Legend settings
go.add_option("legend_columns", 1, nargs=1,
help_str="Number of columns for the legend.")
go.add_option("legend_x_offset", 0, nargs=1,
help_str="Allows for fine movement of the legend.")
go.add_option("legend_y_offset", 0, nargs=1,
help_str="Allows for fine movement of the legend.")
go.add_option("legend_label_spacing", 0.5, nargs=1,
help_str="Space between legend labels.")
go.add_option("legend_column_spacing", 2.0, nargs=1,
help_str="Horizontal space between legend labels.")
go.add_option("legend_handle_text_pad", 0.8, nargs=1,
help_str="Horizontal space between legend labels.")
# Bounding box settings
go.add_option("bb", "tight", nargs=1,
help_str="How the bounding box of the image is determined. Options are "
"default (keep aspect ratio and white space), "
"tight (sacrifice aspect ratio to prune white space), "
"manual (specify the bounding box yourself),"
"and custom (keep aspect ratio but prune some white space).")
go.add_option("bb_width", nargs=1,
help_str="The width of the bounding box, in inches.")
go.add_option("bb_height", nargs=1,
help_str="The height of the bounding box, in inches.")
go.add_option("bb_x_offset", 0, nargs=1,
help_str="The x offset of the bounding box, in inches.")
go.add_option("bb_y_offset", 0, nargs=1,
help_str="The y offset of the bounding box, in inches.")
go.add_option("bb_x_center_includes_labels", True, nargs=1,
help_str="If True, take the figure labels into account when horizontally "
"centering the bounding box. If false, ignore the labels when "
"horizontally centering.")
go.add_option("bb_y_center_includes_labels", True, nargs=1,
help_str="If True, take the figure labels into account when vertically "
"centering the bounding box. If false, ignore the labels when "
"vertically centering.")
# Annotations
go.add_option("line_from_file", None,
help_str="")
go.add_option("line_from_file_x_column", 0,
help_str="")
go.add_option("line_from_file_y_column", 1,
help_str="")
go.add_option("line_from_file_color", "#000000",
help_str="")
go.add_option("line_from_file_linestyle", "-",
help_str="")
go.add_option("line_from_file_linewidth", 1,
help_str="")
# Background options
go.add_option("background", None, nargs=1,
help_str="")
go.add_option("background_colormap", "Greys", nargs=1,
help_str="'Accent', 'Accent_r', 'Blues', 'Blues_r', 'BrBG', 'BrBG_r', "
"'BuGn', 'BuGn_r', 'BuPu', 'BuPu_r', 'CMRmap', 'CMRmap_r', "
"'Dark2', 'Dark2_r', 'GnBu', 'GnBu_r', 'Greens', 'Greens_r', "
"'Greys', 'Greys_r', 'OrRd', 'OrRd_r', 'Oranges', 'Oranges_r', "
"'PRGn', 'PRGn_r', 'Paired', 'Paired_r', 'Pastel1', 'Pastel1_r', "
"'Pastel2', 'Pastel2_r', 'PiYG', 'PiYG_r', 'PuBu', 'PuBuGn', "
"'PuBuGn_r', 'PuBu_r', 'PuOr', 'PuOr_r', 'PuRd', 'PuRd_r', "
"'Purples', 'Purples_r', 'RdBu', 'RdBu_r', 'RdGy', 'RdGy_r', "
"'RdPu', 'RdPu_r', 'RdYlBu', 'RdYlBu_r', 'RdYlGn', 'RdYlGn_r', "
"'Reds', 'Reds_r', 'Set1', 'Set1_r', 'Set2', 'Set2_r', 'Set3', "
"'Set3_r', 'Spectral', 'Spectral_r', 'Wistia', 'Wistia_r', 'YlGn', "
"'YlGnBu', 'YlGnBu_r', 'YlGn_r', 'YlOrBr', 'YlOrBr_r', 'YlOrRd', "
"'YlOrRd_r', 'afmhot', 'afmhot_r', 'autumn', 'autumn_r', 'binary', "
"'binary_r', 'bone', 'bone_r', 'brg', 'brg_r', 'bwr', 'bwr_r', "
"'cividis', 'cividis_r', 'cool', 'cool_r', 'coolwarm', 'coolwarm_r', "
"'copper', 'copper_r', 'cubehelix', 'cubehelix_r', 'flag', 'flag_r', "
"'gist_earth', 'gist_earth_r', 'gist_gray', 'gist_gray_r', 'gist_heat', "
"'gist_heat_r', 'gist_ncar', 'gist_ncar_r', 'gist_rainbow', 'gist_rainbow_r', "
"'gist_stern', 'gist_stern_r', 'gist_yarg', 'gist_yarg_r', "
"'gnuplot', 'gnuplot2', 'gnuplot2_r', 'gnuplot_r', 'gray', "
"'gray_r', 'hot', 'hot_r', 'hsv', 'hsv_r', 'inferno', 'inferno_r', "
"'jet', 'jet_r', 'magma', 'magma_r', 'nipy_spectral', 'nipy_spectral_r', "
"'ocean', 'ocean_r', 'pink', 'pink_r', 'plasma', 'plasma_r', 'prism', "
"'prism_r', 'rainbow', 'rainbow_r', 'seismic', 'seismic_r', 'spring', "
"'spring_r', 'summer', 'summer_r', 'tab10', 'tab10_r', 'tab20', 'tab20_r', "
"'tab20b', 'tab20b_r', 'tab20c', 'tab20c_r', 'terrain', 'terrain_r', 'turbo', "
"'turbo_r', 'twilight', 'twilight_r', 'twilight_shifted', 'twilight_shifted_r', "
"'viridis', 'viridis_r', 'winter', 'winter_r'")
go.add_option("background_colormap_min", 0.0, nargs=1,
help_str="")
go.add_option("background_colormap_max", 1.0, nargs=1,
help_str="")
# Color bar options
go.add_option("color_bar_colormap", "Greys", nargs=1,
help_str="The colormap used for the color bar.")
go.add_option("color_bar_margin", 0.005, nargs=1,
help_str="The distance between the main plot and the color bar in a "
"percentage of the overall figure.")
go.add_option("color_bar_width", 0.015, nargs=1,
help_str="The width of the color bar as a percentage of the overall figure.")
go.add_option("color_bar_label", "", nargs=1,
help_str="The label next to the color bar.")
go.add_option("color_bar_label_rotation", 0, nargs=1,
help_str="The width of the color bar as a percentage of the overall figure.")
go.add_option("color_bar_label_font_size", def_color_bar_label_font_size, nargs=1,
help_str="The font size of the color bar label.")
go.add_option("color_bar_label_pad", 0, nargs=1,
help_str="The padding (x-offset of the color bar label.")
```
#### File: JoostHuizinga/ea-plotting-scripts/createBarplot.py
```python
import os.path
import math
import sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors
import matplotlib.cm
from scipy.optimize import curve_fit
from scipy.stats.stats import pearsonr
import createPlotUtils as util
import global_options as go
import parse_file as pf
import treatment_list as tl
import configure_plots as cp
# Derived defaults
def def_output_dir(): return pf.base(go.get_str("config_file")) + "_out"
def def_y_data_columns(): return go.get_list("to_plot")
def def_input(): return go.get_list("input_directories")
def def_xticks():
if go.get_bool("one_plot_per_treatment"):
return [go.get_str_list('treatment_names', i) for i in go.get_indices("treatment_names")]
else:
return [[go.get_str('treatment_names', i) for i in go.get_indices("treatment_names")]]
def def_legend_font_size(): return go.get_int("font_size") - 4
def def_title_font_size(): return go.get_int("font_size") + 4
def def_tick_font_size(): return go.get_int("font_size") - 6
###################
##### CLASSES #####
###################
class MedianAndCI:
def __init__(self):
self.median_and_ci = dict()
def __getitem__(self, column):
return self.median_and_ci[column]
def __setitem__(self, column, y_value):
self.median_and_ci[column] = y_value
def keys(self):
return self.median_and_ci.keys()
def add(self, column, x_value, median, ci_min, ci_max, nr_of_items):
if column not in self.median_and_ci:
self.median_and_ci[column] = dict()
self.median_and_ci[column][x_value] = (median, ci_min, ci_max, nr_of_items)
def to_cache(self, cache_file_name):
with open(cache_file_name, 'w') as cache_file:
print("Writing " + cache_file_name + "...")
for column in self.keys():
median_array = self.get_median_array(column)
ci_min_array = self.get_ci_min_array(column)
ci_max_array = self.get_ci_max_array(column)
nr_of_items_array = self.get_ci_max_array(column)
cache_file.write(str(column) + " ")
for i in range(len(median_array)):
cache_file.write(str(median_array[i]) + " ")
cache_file.write(str(ci_min_array[i]) + " ")
cache_file.write(str(ci_max_array[i]) + "\n")
cache_file.write(str(nr_of_items_array[i]) + "\n")
def get_median_array(self, column):
local_list = []
sorted_keys = sorted(self.median_and_ci[column].keys())
for key in sorted_keys:
local_list.append(self.median_and_ci[column][key][0])
return np.array(local_list)
def get_ci_min_array(self, column):
local_list = []
sorted_keys = sorted(self.median_and_ci[column].keys())
for key in sorted_keys:
local_list.append(self.median_and_ci[column][key][1])
return np.array(local_list)
def get_ci_max_array(self, column):
local_list = []
sorted_keys = sorted(self.median_and_ci[column].keys())
for key in sorted_keys:
local_list.append(self.median_and_ci[column][key][2])
return np.array(local_list)
def get_nr_of_items_array(self, column):
local_list = []
sorted_keys = sorted(self.median_and_ci[column].keys())
for key in sorted_keys:
local_list.append(self.median_and_ci[column][key][3])
return np.array(local_list)
class RawData:
def __init__(self):
self.x_data_raw = dict()
self.x_data_binned = dict()
self.y_data = dict()
self.map = dict()
def get_x_data(self, y_data_column):
return self.x_data_binned[y_data_column]
def get_x_data_raw(self, y_data_column):
return self.x_data_raw[y_data_column]
def get_y_data(self, y_data_column):
return self.y_data[y_data_column]
def add(self, y_data_column, x_value, y_value):
bin_greater_than = go.get_any("bin_greater_than")
x_bin_size = go.get_float("x_bin_size")
if y_data_column not in self.x_data_raw:
self.x_data_raw[y_data_column] = list()
if y_data_column not in self.x_data_binned:
self.x_data_binned[y_data_column] = list()
if y_data_column not in self.y_data:
self.y_data[y_data_column] = list()
if y_data_column not in self.map:
self.map[y_data_column] = dict()
self.x_data_raw[y_data_column].append(x_value)
if bin_greater_than is not None:
bin_greater_than = float(bin_greater_than)
if x_value > bin_greater_than:
x_value = bin_greater_than
if x_bin_size > 0:
bin_nr = math.ceil(x_value / x_bin_size)
x_value = bin_nr * x_bin_size
self.x_data_binned[y_data_column].append(x_value)
self.y_data[y_data_column].append(y_value)
if x_value not in self.map[y_data_column]:
self.map[y_data_column][x_value] = list()
self.map[y_data_column][x_value].append(y_value)
def get(self, y_data_column, x_value):
return self.map[y_data_column][x_value]
def merge(self, other):
for y_data_column in self.map:
self.x_data_raw[y_data_column].append(other.x_data_raw[y_data_column])
self.x_data_binned[y_data_column].append(other.x_data_binned[y_data_column])
self.y_data[y_data_column].append(other.y_data[y_data_column])
for key in other.map[y_data_column]:
self.map[y_data_column][key] = other.map[y_data_column][key]
class DataSingleTreatment:
def __init__(self, treatment):
self.treatment = treatment
self.raw_data = None
self.median_and_ci = dict()
# self.max_generation = None
self.max_x = None
self.min_x = None
def get_raw_data(self):
if not self.raw_data:
self.init_raw_data()
return self.raw_data
def get_median_and_ci(self):
if not self.median_and_ci:
self.init_median_and_ci()
return self.median_and_ci
def get_max_x(self):
if self.max_x is None:
self.init_raw_data()
return self.max_x
def get_min_x(self):
if self.min_x is None:
self.init_raw_data()
return self.min_x
def init_raw_data(self):
# Read global data
separator = go.get_str("separator")
parse_last_line = go.get_bool("parse_last_line")
generation_based_file = go.get_exists("max_generation")
generation = go.get_int("max_generation")
# Init raw data
self.raw_data = RawData()
for file_name in self.treatment.files:
with open(file_name, 'r') as separated_file:
print("Reading raw data from " + file_name + "...")
# If the first line of the file is a header line, skip it, otherwise start from the beginning again.
first_line = separated_file.readline()
if not pf.is_header_line(first_line):
util.debug_print("input", "skipping header")
separated_file.seek(0)
if parse_last_line:
# Parse only the last line of the input files,
# useful to plot the properties of the last generation
# of an evolutionary run.
util.debug_print("input", "parsing last line only")
for line in separated_file:
last_line = line
self._add_raw_data(last_line.split(separator))
elif generation_based_file:
# Parse the file, assuming that the first number on each line indicates the current generation
util.debug_print("input", "parsing as generation based file")
for line in separated_file:
split_line = line.split(separator)
if int(split_line[0]) == generation:
self._add_raw_data(line.split(separator))
else:
# Parse the entire file as raw data without making any assumptions
util.debug_print("input", "parsing as raw data")
for line in separated_file:
self._add_raw_data(line.split(separator))
def _add_raw_data(self, split_line):
# Read global data
read_x_data = go.get_exists("x_data_column")
x_data_column = go.get_int("x_data_column")
y_data_columns = go.get_int_list("y_data_column")
one_plot_per_treatment = go.get_bool("one_plot_per_treatment")
if x_data_column >= 0 and read_x_data:
x_value = float(split_line[x_data_column])
elif one_plot_per_treatment:
x_value = 0
else:
x_value = self.treatment.get_id()
if self.max_x is None or self.max_x < x_value:
self.max_x = x_value
if self.min_x is None or self.min_x > x_value:
self.min_x = x_value
for y_data_column in y_data_columns:
self.raw_data.add(y_data_column, x_value, float(split_line[int(y_data_column)]))
def init_median_and_ci(self):
# Get global data
self.init_median_and_ci_from_data()
def init_median_and_ci_from_data(self):
# Read global data
bootstrap = go.get_bool("bootstrap")
plot_means = go.get_bool("plot_means")
# Initialize empty median and ci
self.median_and_ci = MedianAndCI()
# Calculate median and confidence intervals
for column in self.get_raw_data().map.keys():
for key in self.get_raw_data().map[column].keys():
item = self.get_raw_data().map[column][key]
util.debug_print("input", "calculating median and ci over:", item)
if bootstrap:
if plot_means:
median, ci_min, ci_max = util.calc_stats(item, "mean_and_bootstrap_pivotal")
else:
median, ci_min, ci_max = util.calc_stats(item, "median_and_bootstrap_pivotal")
else:
if plot_means:
median, ci_min, ci_max = util.calc_mean_and_std_error(item)
else:
median, ci_min, ci_max = util.calc_median_and_interquartile_range(item)
util.debug_print("input", "median:", median, "ci:", ci_min, ci_max)
self.median_and_ci.add(column, key, median, ci_min, ci_max, len(item))
def merge(self, other):
self.raw_data = self.get_raw_data()
other_raw_data = other.get_raw_data()
self.raw_data.merge(other_raw_data)
self.median_and_ci = self.get_median_and_ci()
for column in other.get_median_and_ci().keys():
for key in other.get_median_and_ci()[column].keys():
self.median_and_ci[column][key] = other.get_median_and_ci()[column][key]
# self.max_generation = max(self.max_generation, other.max_generation)
self.max_x = max(self.max_x, other.max_x)
self.min_x = min(self.min_x, other.min_x)
class DataOfInterest:
def __init__(self, treatment_list):
self.treatment_list = treatment_list
self.treatment_data = dict()
self.comparison_cache = None
# self.max_generation = None
def get_treatment_list(self):
return self.treatment_list
def get_treatment(self, treatment_id):
return self.treatment_list[treatment_id]
def get_treatment_data(self, treatment):
treatment_id = treatment.get_id()
if treatment_id not in self.treatment_data:
self.treatment_data[treatment_id] = DataSingleTreatment(self.treatment_list[treatment_id])
return self.treatment_data[treatment_id]
def merge_treatment_data(self):
merged_data = DataSingleTreatment(self.treatment_list[0])
for treatment_index in range(1, len(self.treatment_list)):
merged_data.merge(self.get_treatment_data(self.treatment_list[treatment_index]))
return merged_data
######################
# PLOTTING FUNCTIONS #
######################
def func(x, a, b, c):
return a * np.exp(-b * x) + c
def create_barplot(treatment_list, data_single_treatment, plot_id):
column = go.get_int("y_data_column", plot_id, when_not_exist=go.RETURN_FIRST)
# x_data_column = go.get_int("x_data_column", plot_id, when_not_exist=go.RETURN_FIRST)
# set_y_lim = go.get_exists("y_axis_min") or go.get_exists("y_axis_max")
# y_min = go.get_float("y_axis_min", plot_id, when_not_exist=go.RETURN_FIRST)
# y_max = go.get_float("y_axis_max", plot_id, when_not_exist=go.RETURN_FIRST)
set_x_lim = go.get_exists("x_axis_min") or go.get_exists("x_axis_max")
x_min = go.get_float("x_axis_min", plot_id, when_not_exist=go.RETURN_FIRST, default=None)
x_max = go.get_float("x_axis_max", plot_id, when_not_exist=go.RETURN_FIRST, default=None)
# outputFileName = go.get_str("output", plot_id)
use_color_map = go.get_bool("add_color_map")
perform_linear_fit = go.get_bool("linear_fit")
perform_curve_fit = go.get_bool("curve_fit")
calculate_pearson_correlation = go.get_bool("pearson_correlation")
color_map = go.get_str("color_map")
one_plot_per_treatment = go.get_bool("one_plot_per_treatment")
set_x_labels = go.get_exists("x_tick_labels")
colors_provided = go.get_exists("colors")
if one_plot_per_treatment:
x_labels = go.get_str_list("x_tick_labels", plot_id)
provided_colors = go.get_str_list("colors", plot_id)
else:
x_labels = go.get_str_list("x_tick_labels")
provided_colors = [go.get_str("colors", i) for i in go.get_indices("colors")]
x_bin_size = go.get_float("x_bin_size")
bar_width = go.get_float("bar_width")
bar_align = go.get_str("bar_align")
nr_of_treatments = len(treatment_list)
align_ticks = go.get_bool("align_ticks")
tick_rotation = go.get_float("tick_rotation")
output_dir = go.get_str("output_directory")
nr_of_bars = len(data_single_treatment.get_median_and_ci().get_nr_of_items_array(column))
# Setup plot details
# fig, ax = setup_plot(plot_id)
fig = plt.figure(plot_id)
ax = fig.gca()
# Set defaults
if not one_plot_per_treatment and not set_x_lim:
x_min = -x_bin_size / 2
x_max = (nr_of_treatments - 1) + x_bin_size / 2
set_x_lim = True
elif not set_x_lim:
x_min = min(data_single_treatment.get_median_and_ci()[column].keys()) - x_bin_size / 2
x_max = max(data_single_treatment.get_median_and_ci()[column].keys()) + x_bin_size / 2
set_x_lim = True
# Normal
# if set_y_lim:
# plt.ylim([y_min, y_max])
if set_x_lim:
plt.xlim([x_min, x_max])
plt.xticks(np.arange(x_min + x_bin_size/2, x_max, x_bin_size))
if set_x_labels or align_ticks:
candidate_ticks = sorted(data_single_treatment.get_median_and_ci()[column].keys())
actual_ticks = []
for candidate_tick in candidate_ticks:
if (x_min is None or candidate_tick >= x_min) and (x_max is None or candidate_tick <= x_max):
actual_ticks.append(candidate_tick)
plt.xticks(np.array(actual_ticks))
plt.xticks(rotation=tick_rotation, ha='center')
# ax = plt.gca()
# help(ax.tick_params)align_ticks
# ax.tick_params(direction='out', pad=15)
# for tick in ax.xaxis.get_major_ticks():
# print tick.label1.get_text()
# tick.label1.set_text(tick.label1.get_text() + "\n\n\n")
# Zoom
# plt.ylim([0.235, 0.36])
# plt.xlim([0.0, 10.0])
# Set color map
if use_color_map:
util.debug_print("color", "Colormap:", color_map)
# normalize_class = mpl.colors.Normalize()
normalize_class = matplotlib.colors.LogNorm()
bin_size_array = data_single_treatment.get_median_and_ci().get_nr_of_items_array(column)
colorMap = matplotlib.cm.ScalarMappable(norm=normalize_class, cmap=color_map)
colorMap.set_array(bin_size_array)
colors = colorMap.to_rgba(bin_size_array)
color_bar = plt.colorbar(colorMap)
color_bar.set_label("Number of Images")
elif colors_provided:
colors = provided_colors
while len(colors) < nr_of_bars:
colors.append("#000082")
else:
colors = [0.0, 0.0, 0.8, 1.0] * nr_of_bars
# Create bar plot
x_axis = np.array(sorted(data_single_treatment.get_median_and_ci()[column].keys()))
y_data = data_single_treatment.get_median_and_ci().get_median_array(column)
ci_lower = y_data - data_single_treatment.get_median_and_ci().get_ci_min_array(column)
ci_upper = data_single_treatment.get_median_and_ci().get_ci_max_array(column) - y_data
# if one_plot_per_treatment: x_axis += (bar_width/2)
util.debug_print("data", "x-data:", x_axis)
util.debug_print("data", "y-data:", y_data)
util.debug_print("data", "bar_width:", bar_width)
rects1 = ax.bar(x_axis, y_data, bar_width, color=colors, yerr=[ci_lower, ci_upper], align=bar_align)
plt.axhline(0, color='black')
# Perform linear fit
if perform_linear_fit:
x_data = data_single_treatment.get_raw_data().get_x_data_raw(column)
y_data = data_single_treatment.get_raw_data().get_y_data(column)
z = np.polyfit(x_data, y_data, 1)
p = np.poly1d(z)
max_x = data_single_treatment.get_max_x() + 1
linear_fit = plt.plot([x_min, max_x], [p(x_min), p(max_x)], "k-", label='Linear fit')
# Perform curve fit
if perform_curve_fit:
x_data = data_single_treatment.get_raw_data().get_x_data(column)
y_data = data_single_treatment.get_raw_data().get_y_data(column)
x_axis_array = np.array(x_data)
y_axis_array = np.array(y_data)
popt, pcov = curve_fit(func, x_axis_array, y_axis_array)
x_axis_array_assymp = np.arange(0, max_x, 0.1)
y_fit = func(x_axis_array_assymp, *popt)
exponential_fit = plt.plot(x_axis_array_assymp, y_fit, "g-", label='Exponential fit')
# Calculate correlation
if calculate_pearson_correlation:
correlation_coefficient, two_tailed_p_value = pearsonr(x_data, y_data)
print("Correlation coefficient: ", correlation_coefficient, " P-value: ", two_tailed_p_value)
with open(output_dir + '/statistics.txt', 'w') as output_file:
output_file.write("Correlation coefficient: ")
output_file.write(str(correlation_coefficient))
output_file.write(" P-value: ")
output_file.write(str(two_tailed_p_value))
# Setup plot details
if perform_linear_fit or perform_curve_fit:
if go.get_exists("legend_loc", plot_id) and go.get_str("legend_loc", plot_id) != "none":
plt.legend(loc=go.get_str("legend_loc", plot_id))
if set_x_labels:
ax.set_xticklabels(x_labels)
return fig
######################
# CONFIGURE PLOTS #
######################
# def setup_plot(plot_id):
# """A setup for the different plots"""
#
# # Setup the matplotlib params
# preamble = [r'\usepackage[T1]{fontenc}',
# r'\usepackage{amsmath}',
# r'\usepackage{txfonts}',
# r'\usepackage{textcomp}']
# matplotlib.rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
# matplotlib.rc('text.latex', preamble=preamble)
# params = {'backend': 'pdf',
# 'axes.labelsize': go.get_int("font_size"),
# 'font.size': go.get_int("font_size"),
# 'legend.fontsize': go.get_int("legend_font_size"),
# 'xtick.labelsize': go.get_int("tick_font_size"),
# 'ytick.labelsize': go.get_int("tick_font_size"),
# 'text.usetex': util.latex_available()}
# matplotlib.rcParams.update(params)
#
# fig, ax = plt.subplots(figsize=go.get_float_list("fig_size"))
# if go.get_exists("y_labels", plot_id):
# ax.set_ylabel(go.get_str("y_labels", plot_id))
# if go.get_exists("x_labels", plot_id):
# ax.set_xlabel(go.get_str("x_labels", plot_id))
# if go.get_bool("title") and go.get_exists("titles", plot_id):
# plt.title(go.get_str("titles", plot_id), fontsize=go.get_int("title_size"))
# return fig, ax
def write_plot(fig, filename):
fig.set_tight_layout(True)
print("Writing plot to:", filename)
fig.savefig(filename)
def add_options():
tl.add_options()
pf.add_options()
cp.add_options()
# Directory settings
# go.add_option("templates", ".*")
# go.add_option("output_directory", def_output_dir, nargs=1)
# General plot settings
# go.add_option("title", True)
# go.add_option("titles", "")
# go.add_option("x_labels")
# go.add_option("y_labels")
# go.add_option("y_axis_min")
# go.add_option("y_axis_max")
# go.add_option("x_axis_min")
# go.add_option("x_axis_max")
go.add_option("to_plot", 1)
# go.add_option("file_names")
go.add_option("max_generation")
go.add_option("parse_last_line", False)
go.add_option("x_data_column")
go.add_option("y_data_column", def_y_data_columns)
# go.add_option("legend_loc", "upper right")
# go.add_option("input_directories")
go.add_option("input", def_input)
go.add_option("output", "")
# go.add_option("colors")
# go.add_option("fig_size", [8, 6])
# go.add_option("separator", " ")
go.add_option("bootstrap", False)
# go.add_option("treatment_names")
go.add_option("x_tick_labels", def_xticks)
go.add_option("align_ticks", False)
go.add_option("linear_fit", False)
go.add_option("curve_fit", False)
go.add_option("pearson_correlation", False)
go.add_option("bin_greater_than", None)
go.add_option("color_map", "jet")
go.add_option("add_color_map", False)
go.add_option("x_bin_size", 1.0)
go.add_option("bar_width", 0.7)
go.add_option("plot_means", False)
go.add_option("bar_align", "center")
go.add_option("one_plot_per_treatment", False)
go.add_option("tick_rotation", 0)
# Font settings
# go.add_option("font_size", 18, nargs=1)
# go.add_option("title_size", def_title_font_size, nargs=1)
# go.add_option("legend_font_size", def_legend_font_size, nargs=1)
# go.add_option("tick_font_size", def_tick_font_size, nargs=1)
def init_options():
go.init_options("Script for creating bar-plots.", "[input [input ...]] [OPTIONS]", "2.0")
add_options()
######################
# PARSE OPTIONS #
######################
def parse_options(command_line_args):
go.parse_global_options(command_line_args)
treatment_list = tl.read_treatments()
# treatment_list = util.TreatmentList()
# for i in range(len(go.get_list("input"))):
# input_dir = go.get_str("input", i)
# treat_name = go.get_str("treatment_names", i)
# treat_name_s = go.get_str("treatment_names_short", i)
# treatment_list.add_treatment(input_dir, treat_name, treat_name_s)
if len(treatment_list) < 1:
print("No treatments provided")
sys.exit(1)
data_of_interest = DataOfInterest(treatment_list)
return treatment_list, data_of_interest
def create_plots(data_of_interest, treatment_list):
cp.init_params()
output_dir = go.get_str("output_directory")
one_plot_per_treatment = go.get_bool("one_plot_per_treatment")
nr_of_columns = len(go.get_list("y_data_column"))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
for plot_id in range(nr_of_columns):
if one_plot_per_treatment:
for treatment_nb, treatment in enumerate(treatment_list):
print("Writing plot for treatment:", treatment)
l_plot_id = plot_id * len(treatment_list) + treatment_nb
print("file_names:", go.get_glb("file_names"), "plot_id:", l_plot_id)
plot_config = cp.setup_figure(l_plot_id)
cp.setup_plot(plot_config)
fig = create_barplot(treatment_list, data_of_interest.get_treatment_data(treatment), l_plot_id)
# write_plot(fig, output_dir + "/" + go.get_str("file_names", l_plot_id) + ".pdf")
cp.write_plot(plot_config)
else:
plot_config = cp.setup_figure(plot_id)
cp.setup_plot(plot_config)
fig = create_barplot(treatment_list, data_of_interest.merge_treatment_data(), plot_id)
# write_plot(fig, output_dir + "/" + go.get_str("file_names", plot_id) + ".pdf")
cp.write_plot(plot_config)
def execute_plots(command_line_args):
treatment_list, data_of_interest = parse_options(command_line_args)
# Plot all treatments
create_plots(data_of_interest, treatment_list)
######################
# MAIN #
######################
def main():
init_options()
execute_plots(sys.argv[1:])
# output_dir = go.get_str("output_directory")
# one_plot_per_treatment = go.get_bool("one_plot_per_treatment")
# nr_of_columns = len(go.get_list("y_data_column"))
#
# if not os.path.exists(output_dir):
# os.makedirs(output_dir)
# for plot_id in range(nr_of_columns):
# if one_plot_per_treatment:
# for treatment in treatment_list:
# fig = createBarplot(data_of_interest.get_treatment_data(treatment), plot_id)
# write_plot(fig, output_dir + "/" + go.get_str("file_names", plot_id) + ".pdf")
# else:
# fig = createBarplot(data_of_interest.merge_treatment_data(), plot_id)
# write_plot(fig, output_dir + "/" + go.get_str("file_names", plot_id) + ".pdf")
if __name__ == '__main__':
main()
``` |
{
"source": "JoostJM/nrrdify_suv",
"score": 2
} |
#### File: nrrdify_suv/nrrdify_suv/__init__.py
```python
import datetime
import logging
import nrrdify
from nrrdify import commandline
import SimpleITK as sitk
import pydicom
pydicom.datadict.add_private_dict_entries('Philips PET Private Group', { # {tag: (VR, VM, description) ...}
0x70531000: ('DS', '1', 'SUV Scale Factor'),
0x70531009: ('DS', '1', 'Activity Concentration Scale Factor')
})
logger = logging.getLogger('nrrdify.suv')
def main(args=None):
nrrdify.post_processing = post_processing
commandline.main(args)
def post_processing(im, slices):
global logger
patient_name = str(getattr(slices[0], 'PatientName', '')).split('^')[0]
study_date = getattr(slices[0], 'StudyDate', '19000101')
series_description = getattr(slices[0], 'SeriesDescription', 'Unkn')
series_number = getattr(slices[0], 'SeriesNumber', -1)
radionuclide_sq = getattr(slices[0], 'RadiopharmaceuticalInformationSequence', None)
if radionuclide_sq is None:
logger.warning("Radionuclide information sequence not found in volume (patient %s, studydate %s series %d. %s), skipping...",
patient_name, study_date, series_number, series_description)
return im # probably not PET
sanitychecks = {}
sanity_keys = ('CorrectedImage', 'DecayCorrection', 'Units')
for k in sanity_keys:
sanitychecks[k] = getattr(slices[0], k, None)
if sanitychecks[k] is None:
logger.warning('Missing required sanity check tag "%s" in volume (patient %s, studydate %s series %d. %s), skipping...',
k, patient_name, study_date, series_number, series_description)
return im
if not ('ATTN' in sanitychecks['CorrectedImage'] and ('DECAY' in sanitychecks['CorrectedImage'] or 'DECY' in sanitychecks['CorrectedImage']) and
(sanitychecks['DecayCorrection'] == 'START' or sanitychecks['DecayCorrection'] == 'ADMIN')):
logger.warning('Sanity checks failed for volume (patient %s, studydate %s series %d. %s), skipping...',
patient_name, study_date, series_number, series_description)
return im
if 0x70531000 in slices[0]:
if sanitychecks['Units'] == 'BQML':
SUV_conversion_factor = slices[0][0x70531000].value
CNT_conversion_factor = slices[0][0x70531009].value
SUV_constant = SUV_conversion_factor / CNT_conversion_factor
logger.info('Patient %s, studydate %s series %d. %s: Applying SUV conversion (SUV conversion constant %g, SUV conversion factor %g, parsed from (7053, 1000) / CNT conversion factor %g, parsed from (7053, 1009)',
patient_name, study_date, series_number, series_description, SUV_constant, SUV_conversion_factor, CNT_conversion_factor)
elif sanitychecks['Units'] == 'CNTS':
SUV_constant = slices[0][0x70531000].value
logger.info('Patient %s, studydate %s series %d. %s: Applying SUV conversion (SUV conversion constant %g, parsed from (7053, 1000)',
patient_name, study_date, series_number, series_description, SUV_constant)
else:
logger.warning('Expecting unit to be BQML or CNTS, skipping...')
return im
else:
if sanitychecks['Units'] != 'BQML':
logger.warning('Expecting unit to be BQML, skipping...')
return im
required_tags = {}
required_base_keys = ('SeriesTime', 'PatientWeight')
required_seq_keys = ('RadionuclideHalfLife', 'RadionuclideTotalDose', 'RadiopharmaceuticalStartTime')
for k in required_base_keys:
required_tags[k] = getattr(slices[0], k, None)
if required_tags[k] is None:
logger.warning('Missing required tag "%s" in volume (patient %s, studydate %s series %d. %s), skipping...',
k, patient_name, study_date, series_number, series_description)
return im
for k in required_seq_keys:
required_tags[k] = getattr(radionuclide_sq[0], k, None)
if required_tags[k] is None:
logger.warning('Missing required tag "%s" in volume (patient %s, studydate %s series %d. %s), skipping...',
k, patient_name, study_date, series_number, series_description)
return im
# Force cast to float
injected_dose = float(required_tags['RadionuclideTotalDose'])
bodyweight = float(required_tags['PatientWeight'])
half_life = float(required_tags['RadionuclideHalfLife'])
if sanitychecks['DecayCorrection'] == 'START': # images are decay-corrected to image acquisition time (additional correction for interval administration-acquisition is needed)
# Convert times to datetime and compute difference
series_time = datetime.datetime.strptime(required_tags['SeriesTime'], '%H%M%S')
admin_time = datetime.datetime.strptime(required_tags['RadiopharmaceuticalStartTime'], '%H%M%S')
decay_time = (series_time - admin_time).total_seconds()
# Compute total dose at acquisition start time
decayed_dose = injected_dose * (2 ** (-decay_time / half_life))
else: # images are decay-corrected to administration time (so no additional correction needed)
decayed_dose = injected_dose
# Compute the SUV conversion factor
SUV_constant = bodyweight * 1000 / decayed_dose
logger.info('Patient %s, studydate %s series %d. %s: Applying SUV conversion (SUV conversion constant %g, injected dose (at acquisition start time) %g, body weight %g)',
patient_name, study_date, series_number, series_description, SUV_constant, decayed_dose, bodyweight)
im = sitk.Cast(im, sitk.sitkFloat32)
im *= SUV_constant
return im
if __name__ == '__main__':
main()
``` |
{
"source": "JoostJM/remote_kernel",
"score": 2
} |
#### File: remote_kernel/remote_kernel/ssh_client.py
```python
import logging
import os
import re
import paramiko
from sshtunnel import SSHTunnelForwarder
logger = logging.getLogger('remote_kernel.ssh_client')
try:
from . import dialog
except ImportError as e:
logger.warning('Could not import GUI module!\n\t' + str(e))
dialog = None
class ParamikoClient(paramiko.SSHClient):
# Regex pattern to parse out ssh connection arguments of format [username@]host[:port]
host_pattern = re.compile(r'((?P<user>[^@]+)@)?(?P<host>[^:]+)(:(?P<port>\d+))?')
def __init__(self, hostkeys='~/.ssh/known_hosts'):
super(ParamikoClient, self).__init__()
self.host = None
self.port = paramiko.config.SSH_PORT
self.username = None
self.private_key = None
self.load_host_keys(os.path.expanduser(hostkeys))
self._jump_host = None
self.tunnels = []
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def connect_override(self, host, pkey=None, jump_host=None, use_jump_pkey=True):
"""
Alternative function to connect to SSH client. provides an override to paramiko.SSHClient.connect, with
fewer arguments.
:param host: SSH host to connect to. Expected format: [username@]host[:port]
:param pkey: paramiko.RSAKey or string pointing to ssh key to use for authentication
:param jump_host: Optional (list or tuple of) string (format same as `host`) or instance of ParamikoClient connected
to the jump server. When an item in the list is a ParmikoClient, all subsequent items are ignored.
:param use_jump_pkey: If True and jump_host is not None, re-use the jump_host.private_key.
If successful, pkey is ignored.
:return: None
"""
if jump_host is not None:
# First check if jump host is a list/tuple or a single item
if isinstance(jump_host, (tuple, list)):
# Get the last item, as the first real connection is made at the bottom of this recursive function
if len(jump_host) > 0:
jump_client = jump_host[-1]
next_jump = jump_host[:-1]
else:
jump_client = None
next_jump = None
else:
jump_client = jump_host
next_jump = None
# set the jump_host, connect to it if the item is just the host address
if jump_client is None:
pass
elif isinstance(jump_client, ParamikoClient):
self._jump_host = jump_client
elif isinstance(jump_client, str):
self._jump_host = ParamikoClient().connect_override(jump_client, pkey, next_jump, use_jump_pkey)
else:
raise ValueError("Jump host items should either be ParamikoClient or string, found type %s" % type(jump_client))
self.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# Parse out the connection string
host_match = self.host_pattern.fullmatch(host)
if host_match is None:
raise ValueError('Host string "%s" is invalid. Should match [username@]host[:port]')
host_dict = host_match.groupdict()
self.host = host_dict['host']
if host_dict['port'] is not None:
self.port = int(host_dict['port'])
self.username = host_dict['user']
if self.username is None:
if dialog is None:
raise ValueError('username is required, but password dialog does not work!')
self.username = dialog.PromptDialog(prompt='Connecting to\n%s:%i\nUsername:' % (self.host, self.port),
title="Username?").showDialog()
assert self.username is not None and self.username != ''
# Set up the authentication variables
pwd = None
if self._jump_host is not None and use_jump_pkey and self._jump_host.private_key is not None:
self.private_key = self._jump_host.private_key
elif pkey is not None:
if isinstance(pkey, paramiko.RSAKey):
self.private_key = pkey
elif isinstance(pkey, str):
try:
self.private_key = paramiko.RSAKey.from_private_key_file(os.path.expanduser(pkey))
except paramiko.PasswordRequiredException:
if dialog is None:
raise ValueError('Provided key requires password, but password dialog does not work!')
pwd = dialog.PwdDialog(prompt='Loading SSH Key:\n%s\nRSA passphrase' % pkey,
title="RSA Passphrase").showDialog()
self.private_key = paramiko.RSAKey.from_private_key_file(os.path.expanduser(pkey), pwd)
elif dialog is None:
raise ValueError('Cannot start client without private key when password dialog does not work.')
else:
pwd = dialog.PwdDialog(prompt='Connecting to\n%s@%s:%i\nPassword:' % (self.username, self.host, self.port),
title='Password').showDialog()
jump_channel = None
if self._jump_host is not None:
src_addr = (self._jump_host.host, self._jump_host.port)
dest_addr = (self.host, self.port)
jump_transport = self._jump_host.get_transport()
jump_channel = jump_transport.open_channel('direct-tcpip', dest_addr=dest_addr, src_addr=src_addr)
self._jump_host = self._jump_host
self.connect(self.host, self.port, self.username, pwd, self.private_key, sock=jump_channel)
return self
def close(self):
# Clean up SSH connection
for tunnel in self.tunnels:
tunnel.close()
self.tunnels = None
super(ParamikoClient, self).close()
if self._jump_host is not None:
self._jump_host.close()
self._jump_host = None
def create_forwarding_tunnel(self, local_bind_addresses, remote_bind_addresses):
# Set up the tunnel. Though we pass the target host, port, user and dummy password, these are not used.
# This is done to make sure the initialization does not fail (does type checking on the connection args)
# Instead, we manually set the transport we get from the existing connection.
# This prevents the tunnel from trying to open up a new connection
# Suppress log output from sshtunnel
ssh_logger = logging.getLogger('ssh_tunnel')
ssh_logger.addHandler(logging.NullHandler())
tunnel = SSHTunnelForwarder((self.host, self.port),
ssh_username=self.username, ssh_password='<PASSWORD>',
local_bind_addresses=local_bind_addresses,
remote_bind_addresses=remote_bind_addresses,
logger=ssh_logger)
tunnel._transport = self.get_transport()
self.tunnels.append(tunnel)
return tunnel
``` |
{
"source": "JoostJM/torchsample",
"score": 3
} |
#### File: torchsample/transforms/affine3d_transforms.py
```python
import math
import random
import torch as th
from ..utils import th_affine3d, th_random_choice
class RandomAffine3D(object):
def __init__(self,
rotation_range=None,
translation_range=None,
shear_range=None,
zoom_range=None,
interp='trilinear',
lazy=False):
"""
Perform an affine transforms with various sub-transforms, using
only one interpolation and without having to instantiate each
sub-transform individually.
Arguments
---------
rotation_range : one integer or float
image will be rotated randomly between (-degrees, degrees)
translation_range : float or 3-tuple of float between [0, 1)
first value:
fractional bounds of total depth to shift image
image will be depth shifted between
(-depth_range * depth_dimension, depth_range * depth_dimension)
second value:
fractional bounds of total width to shift image
Image will be vertically shifted between
(-width_range * width_dimension, width_range * width_dimension)
third value:
fractional bounds of total heigth to shift image
image will be horizontally shifted between
(-height_range * height_dimension, height_range * height_dimension)
shear_range : float
image will be sheared randomly between (-degrees, degrees)
zoom_range : list/tuple with two floats between [0, infinity).
first float should be less than the second
lower and upper bounds on percent zoom.
Anything less than 1.0 will zoom in on the image,
anything greater than 1.0 will zoom out on the image.
e.g. (0.7, 1.0) will only zoom in,
(1.0, 1.4) will only zoom out,
(0.7, 1.4) will randomly zoom in or out
interp : string in {'trilinear', 'nearest'} or list of strings
type of interpolation to use. You can provide a different
type of interpolation for each input, e.g. if you have two
inputs then you can say `interp=['trilinear','nearest']
"""
self.transforms = []
if rotation_range is not None:
rotation_tform = RandomRotate3D(rotation_range, lazy=True)
self.transforms.append(rotation_tform)
if translation_range is not None:
translation_tform = RandomTranslate3D(translation_range, lazy=True)
self.transforms.append(translation_tform)
if shear_range is not None:
shear_tform = RandomShear3D(shear_range, lazy=True)
self.transforms.append(shear_tform)
if zoom_range is not None:
zoom_tform = RandomZoom3D(zoom_range, lazy=True)
self.transforms.append(zoom_tform)
self.interp = interp
self.lazy = lazy
if len(self.transforms) == 0:
raise Exception('Must give at least one transform parameter')
def __call__(self, *inputs):
# collect all of the lazily returned tform matrices
tform_matrix = self.transforms[0](inputs[0])
for tform in self.transforms[1:]:
tform_matrix = tform_matrix.mm(tform(inputs[0]))
self.tform_matrix = tform_matrix
if self.lazy:
return tform_matrix
else:
outputs = Affine3D(tform_matrix,
interp=self.interp)(*inputs)
return outputs
class Affine3D(object):
def __init__(self,
tform_matrix,
interp='trilinear'):
"""
Perform an affine transforms with various sub-transforms, using
only one interpolation and without having to instantiate each
sub-transform individually.
Arguments
---------
tform_matrix : a 3x3 or 3x4 matrix
affine transformation matrix to apply
interp : string in {'trilinear', 'nearest'} or list of strings
type of interpolation to use. You can provide a different
type of interpolation for each input, e.g. if you have two
inputs then you can say `interp=['trilinear','nearest']
"""
self.tform_matrix = tform_matrix
self.interp = interp
def __call__(self, *inputs):
if not isinstance(self.interp, (tuple,list)):
interp = [self.interp]*len(inputs)
else:
interp = self.interp
outputs = []
for idx, _input in enumerate(inputs):
input_tf = th_affine3d(_input,
self.tform_matrix,
mode=interp[idx])
outputs.append(input_tf)
return outputs if idx >= 1 else outputs[0]
class Affine3DCompose(object):
def __init__(self,
transforms,
interp='trilinear'):
"""
Apply a collection of explicit affine transforms to an input image,
and to a target image if necessary
Arguments
---------
transforms : list or tuple
each element in the list/tuple should be an affine transform.
currently supported transforms:
- Rotate3D()
- Translate3D()
- Shear3D()
- Zoom3D()
interp : string in {'trilinear', 'nearest'} or list of strings
type of interpolation to use. You can provide a different
type of interpolation for each input, e.g. if you have two
inputs then you can say `interp=['trilinear','nearest']
"""
self.transforms = transforms
self.interp = interp
# set transforms to lazy so they only return the tform matrix
for t in self.transforms:
t.lazy = True
def __call__(self, *inputs):
# collect all of the lazily returned tform matrices
tform_matrix = self.transforms[0](inputs[0])
for tform in self.transforms[1:]:
tform_matrix = tform_matrix.mm(tform(inputs[0]))
if not isinstance(self.interp, (tuple,list)):
interp = [self.interp]*len(inputs)
else:
interp = self.interp
outputs = []
for idx, _input in enumerate(inputs):
input_tf = th_affine3d(_input,
tform_matrix,
mode=interp[idx])
outputs.append(input_tf)
return outputs if idx >= 1 else outputs[0]
class RandomRotate3D(object):
def __init__(self,
rotation_range,
axis=0,
interp='trilinear',
lazy=False):
"""
Randomly rotate an image between (-degrees, degrees). If the image
has multiple channels, the same rotation will be applied to each channel.
Arguments
---------
rotation_range : integer or float
image will be rotated between (-degrees, degrees) degrees
axis: integer in (0, 1, 2)
axis (z, y, x) for rotation. This axis will be fixed.
interp : string in {'trilinear', 'nearest'} or list of strings
type of interpolation to use. You can provide a different
type of interpolation for each input, e.g. if you have two
inputs then you can say `interp=['trilinear','nearest']
lazy : boolean
if true, only create the affine transform matrix and return that
if false, perform the transform on the tensor and return the tensor
"""
self.rotation_range = rotation_range
self.axis = axis
self.interp = interp
self.lazy = lazy
def __call__(self, *inputs):
degree = random.uniform(-self.rotation_range, self.rotation_range)
if self.lazy:
return Rotate3D(degree, axis=self.axis, lazy=True)(inputs[0])
else:
outputs = Rotate3D(degree, axis=self.axis,
interp=self.interp)(*inputs)
return outputs
class RandomChoiceRotate3D(object):
def __init__(self,
values,
axis=0,
p=None,
interp='trilinear',
lazy=False):
"""
Randomly rotate an image from a list of values. If the image
has multiple channels, the same rotation will be applied to each channel.
Arguments
---------
values : a list or tuple
the values from which the rotation value will be sampled
axis: integer in (0, 1, 2)
axis (z, y, x) for rotation. This axis will be fixed.
p : a list or tuple the same length as `values`
the probabilities of sampling any given value. Must sum to 1.
interp : string in {'trilinear', 'nearest'} or list of strings
type of interpolation to use. You can provide a different
type of interpolation for each input, e.g. if you have two
inputs then you can say `interp=['trilinear','nearest']
lazy : boolean
if true, only create the affine transform matrix and return that
if false, perform the transform on the tensor and return the tensor
"""
if isinstance(values, (list, tuple)):
values = th.FloatTensor(values)
self.values = values
self.axis = axis
if p is None:
p = th.ones(len(values)) / len(values)
else:
if abs(1.0-sum(p)) > 1e-3:
raise ValueError('Probs must sum to 1')
self.p = p
self.interp = interp
self.lazy = lazy
def __call__(self, *inputs):
degree = th_random_choice(self.values, p=self.p)
if self.lazy:
return Rotate3D(degree, axis=self.axis, lazy=True)(inputs[0])
else:
outputs = Rotate3D(degree, axis=self.axis,
interp=self.interp)(*inputs)
return outputs
class Rotate3D(object):
def __init__(self,
value,
axis=0,
interp='trilinear',
lazy=False):
"""
Randomly rotate an image between (-degrees, degrees). If the image
has multiple channels, the same rotation will be applied to each channel.
Arguments
---------
value : integer or float
image will be rotated degrees
axis: integer in (0, 1, 2)
axis (z, y, x) for rotation. This axis will be fixed.
interp : string in {'trilinear', 'nearest'} or list of strings
type of interpolation to use. You can provide a different
type of interpolation for each input, e.g. if you have two
inputs then you can say `interp=['trilinear','nearest']
lazy : boolean
if true, only create the affine transform matrix and return that
if false, perform the transform on the tensor and return the tensor
"""
self.value = value
self.axis = axis
self.interp = interp
self.lazy = lazy
def __call__(self, *inputs):
if not isinstance(self.interp, (tuple,list)):
interp = [self.interp]*len(inputs)
else:
interp = self.interp
theta = math.pi / 180 * self.value
if self.axis == 0:
rotation_matrix = th.FloatTensor([[1, 0, 0, 0],
[0, math.cos(theta), -math.sin(theta), 0],
[0, math.sin(theta), math.cos(theta), 0],
[0, 0, 0, 1]])
elif self.axis == 1:
rotation_matrix = th.FloatTensor([[math.cos(theta), 0, math.sin(theta), 0],
[0, 1, 0, 0],
[-math.sin(theta), 0, math.cos(theta), 0],
[0, 0, 0, 1]])
elif self.axis == 2:
rotation_matrix = th.FloatTensor([[math.cos(theta), -math.sin(theta), 0, 0],
[math.sin(theta), math.cos(theta), 0, 0],
[ 0, 0, 1, 0],
[ 0, 0, 0, 1]])
else:
raise ValueError('axis out of range [0-2]')
if self.lazy:
return rotation_matrix
else:
outputs = []
for idx, _input in enumerate(inputs):
input_tf = th_affine3d(_input,
rotation_matrix,
mode=interp[idx],
center=True)
outputs.append(input_tf)
return outputs if idx >= 1 else outputs[0]
class RandomTranslate3D(object):
def __init__(self,
translation_range,
interp='trilinear',
lazy=False):
"""
Randomly translate an image some fraction of total height and/or
some fraction of total width. If the image has multiple channels,
the same translation will be applied to each channel. Assumes CDWH
ordering.
Arguments
---------
translation_range : float or 3-tuple of float between [0, 1)
first value:
fractional bounds of total depth to shift image
image will be depth shifted between
(-depth_range * depth_dimension, depth_range * depth_dimension)
second value:
fractional bounds of total width to shift image
Image will be vertically shifted between
(-width_range * width_dimension, width_range * width_dimension)
third value:
fractional bounds of total heigth to shift image
image will be horizontally shifted between
(-height_range * height_dimension, height_range * height_dimension)
interp : string in {'trilinear', 'nearest'} or list of strings
type of interpolation to use. You can provide a different
type of interpolation for each input, e.g. if you have two
inputs then you can say `interp=['trilinear','nearest']
lazy : boolean
if true, only create the affine transform matrix and return that
if false, perform the transform on the tensor and return the tensor
"""
if isinstance(translation_range, float):
translation_range = (translation_range, translation_range, translation_range)
self.depth_range = translation_range[0]
self.width_range = translation_range[1]
self.height_range = translation_range[2]
self.interp = interp
self.lazy = lazy
def __call__(self, *inputs):
# height shift
random_height = random.uniform(-self.height_range, self.height_range)
# width shift
random_width = random.uniform(-self.width_range, self.width_range)
# depth shift
random_depth = random.uniform(-self.depth_range, self.depth_range)
if self.lazy:
return Translate3D([random_depth, random_width, random_height],
lazy=True)(inputs[0])
else:
outputs = Translate3D([random_depth, random_width, random_height],
interp=self.interp)(*inputs)
return outputs
class RandomChoiceTranslate3D(object):
def __init__(self,
values,
p=None,
interp='trilinear',
lazy=False):
"""
Randomly translate an image some fraction of total height and/or
some fraction of total width from a list of potential values.
If the image has multiple channels,
the same translation will be applied to each channel.
Arguments
---------
values : a list or tuple
the values from which the translation value will be sampled
p : a list or tuple the same length as `values`
the probabilities of sampling any given value. Must sum to 1.
interp : string in {'trilinear', 'nearest'} or list of strings
type of interpolation to use. You can provide a different
type of interpolation for each input, e.g. if you have two
inputs then you can say `interp=['trilinear','nearest']
lazy : boolean
if true, only create the affine transform matrix and return that
if false, perform the transform on the tensor and return the tensor
"""
if isinstance(values, (list, tuple)):
values = th.FloatTensor(values)
self.values = values
if p is None:
p = th.ones(len(values)) / len(values)
else:
if abs(1.0-sum(p)) > 1e-3:
raise ValueError('Probs must sum to 1')
self.p = p
self.interp = interp
self.lazy = lazy
def __call__(self, *inputs):
random_height = th_random_choice(self.values, p=self.p)
random_width = th_random_choice(self.values, p=self.p)
random_depth = th_random_choice(self.values, p=self.p)
if self.lazy:
return Translate3D([random_depth, random_width, random_height],
lazy=True)(inputs[0])
else:
outputs = Translate3D([random_depth, random_width, random_height],
interp=self.interp)(*inputs)
return outputs
class Translate3D(object):
def __init__(self,
value,
interp='trilinear',
lazy=False):
"""
Arguments
---------
value : float or 3-tuple of float
if single value, both horizontal, vertical and depth translation
will be this value * total height/width. Thus, value should
be a fraction of total height/width with range (-1, 1)
interp : string in {'trilinear', 'nearest'} or list of strings
type of interpolation to use. You can provide a different
type of interpolation for each input, e.g. if you have two
inputs then you can say `interp=['trilinear','nearest']
"""
if not isinstance(value, (tuple,list)):
value = (value, value, value)
if value[0] > 1 or value[0] < -1:
raise ValueError('Translation must be between -1 and 1')
if value[1] > 1 or value[1] < -1:
raise ValueError('Translation must be between -1 and 1')
if value[2] > 1 or value[2] < -1:
raise ValueError('Translation must be between -1 and 1')
self.depth_range = value[0]
self.width_range = value[1]
self.height_range = value[2]
self.interp = interp
self.lazy = lazy
def __call__(self, *inputs):
if not isinstance(self.interp, (tuple,list)):
interp = [self.interp]*len(inputs)
else:
interp = self.interp
tz = self.depth_range * inputs[0].size(1)
ty = self.width_range * inputs[0].size(2)
tx = self.height_range * inputs[0].size(3)
translation_matrix = th.FloatTensor([[1, 0, 0, tz],
[0, 1, 0, ty],
[0, 0, 1, tx],
[0, 0, 0, 1]])
if self.lazy:
return translation_matrix
else:
outputs = []
for idx, _input in enumerate(inputs):
input_tf = th_affine3d(_input,
translation_matrix,
mode=interp[idx],
center=True)
outputs.append(input_tf)
return outputs if idx >= 1 else outputs[0]
class RandomShear3D(object):
def __init__(self,
shear_range,
interp='trilinear',
lazy=False):
"""
Randomly shear an image with radians (-shear_range, shear_range)
Arguments
---------
shear_range : float
radian bounds on the shear transform
interp : string in {'trilinear', 'nearest'} or list of strings
type of interpolation to use. You can provide a different
type of interpolation for each input, e.g. if you have two
inputs then you can say `interp=['trilinear','nearest']
lazy : boolean
if false, perform the transform on the tensor and return the tensor
if true, only create the affine transform matrix and return that
"""
self.shear_range = shear_range
self.interp = interp
self.lazy = lazy
def __call__(self, *inputs):
shear_x = random.uniform(-self.shear_range, self.shear_range)
shear_y = random.uniform(-self.shear_range, self.shear_range)
if self.lazy:
return Shear3D([shear_x, shear_y],
lazy=True)(inputs[0])
else:
outputs = Shear3D([shear_x, shear_y],
interp=self.interp)(*inputs)
return outputs
class RandomChoiceShear3D(object):
def __init__(self,
values,
p=None,
interp='trilinear',
lazy=False):
"""
Randomly shear an image with a value sampled from a list of values.
Arguments
---------
values : a list or tuple
the values from which the rotation value will be sampled
p : a list or tuple the same length as `values`
the probabilities of sampling any given value. Must sum to 1.
interp : string in {'trilinear', 'nearest'} or list of strings
type of interpolation to use. You can provide a different
type of interpolation for each input, e.g. if you have two
inputs then you can say `interp=['trilinear','nearest']
lazy : boolean
if false, perform the transform on the tensor and return the tensor
if true, only create the affine transform matrix and return that
"""
if isinstance(values, (list, tuple)):
values = th.FloatTensor(values)
self.values = values
if p is None:
p = th.ones(len(values)) / len(values)
else:
if abs(1.0-sum(p)) > 1e-3:
raise ValueError('Probs must sum to 1')
self.p = p
self.interp = interp
self.lazy = lazy
def __call__(self, *inputs):
shear_x = th_random_choice(self.values, p=self.p)
shear_y = th_random_choice(self.values, p=self.p)
if self.lazy:
return Shear3D([shear_x, shear_y],
lazy=True)(inputs[0])
else:
outputs = Shear3D([shear_x, shear_y],
interp=self.interp)(*inputs)
return outputs
class Shear3D(object):
def __init__(self,
value,
interp='trilinear',
lazy=False):
if isinstance(value, (list, tuple)):
self.value = value
else:
self.value = (value, 0)
self.interp = interp
self.lazy = lazy
def __call__(self, *inputs):
if not isinstance(self.interp, (tuple,list)):
interp = [self.interp]*len(inputs)
else:
interp = self.interp
theta_x = (math.pi * self.value[0]) / 180
theta_y = (math.pi * self.value[1]) / 180
shear_matrix = th.FloatTensor([[1, 0, 0, 0],
[0, math.cos(theta_x), math.sin(theta_y), 0],
[0, -math.sin(theta_x), math.cos(theta_y), 0],
[0, 0, 0, 1]])
if self.lazy:
return shear_matrix
else:
outputs = []
for idx, _input in enumerate(inputs):
input_tf = th_affine3d(_input,
shear_matrix,
mode=interp[idx],
center=True)
outputs.append(input_tf)
return outputs if idx >= 1 else outputs[0]
class RandomZoom3D(object):
def __init__(self,
zoom_range,
interp='trilinear',
lazy=False):
"""
Randomly zoom in and/or out on an image
Arguments
---------
zoom_range : tuple or list with 2 values, both between (0, infinity)
lower and upper bounds on percent zoom.
Anything less than 1.0 will zoom in on the image,
anything greater than 1.0 will zoom out on the image.
e.g. (0.7, 1.0) will only zoom in,
(1.0, 1.4) will only zoom out,
(0.7, 1.4) will randomly zoom in or out
interp : string in {'trilinear', 'nearest'} or list of strings
type of interpolation to use. You can provide a different
type of interpolation for each input, e.g. if you have two
inputs then you can say `interp=['trilinear','nearest']
lazy : boolean
if false, perform the transform on the tensor and return the tensor
if true, only create the affine transform matrix and return that
"""
if not isinstance(zoom_range, list) and not isinstance(zoom_range, tuple):
raise ValueError('zoom_range must be tuple or list with 2 values')
self.zoom_range = zoom_range
self.interp = interp
self.lazy = lazy
def __call__(self, *inputs):
zx = random.uniform(self.zoom_range[0], self.zoom_range[1])
zy = random.uniform(self.zoom_range[0], self.zoom_range[1])
zz = random.uniform(self.zoom_range[0], self.zoom_range[1])
if self.lazy:
return Zoom3D([zz, zy, zx], lazy=True)(inputs[0])
else:
outputs = Zoom3D([zz, zy, zx],
interp=self.interp)(*inputs)
return outputs
class RandomChoiceZoom3D(object):
def __init__(self,
values,
p=None,
interp='trilinear',
lazy=False):
"""
Randomly zoom in and/or out on an image with a value sampled from
a list of values
Arguments
---------
values : a list or tuple
the values from which the applied zoom value will be sampled
p : a list or tuple the same length as `values`
the probabilities of sampling any given value. Must sum to 1.
interp : string in {'trilinear', 'nearest'} or list of strings
type of interpolation to use. You can provide a different
type of interpolation for each input, e.g. if you have two
inputs then you can say `interp=['trilinear','nearest']
lazy : boolean
if false, perform the transform on the tensor and return the tensor
if true, only create the affine transform matrix and return that
"""
if isinstance(values, (list, tuple)):
values = th.FloatTensor(values)
self.values = values
if p is None:
p = th.ones(len(values)) / len(values)
else:
if abs(1.0-sum(p)) > 1e-3:
raise ValueError('Probs must sum to 1')
self.p = p
self.interp = interp
self.lazy = lazy
def __call__(self, *inputs):
zx = th_random_choice(self.values, p=self.p)
zy = th_random_choice(self.values, p=self.p)
zz = th_random_choice(self.values, p=self.p)
if self.lazy:
return Zoom3D([zz, zy, zx], lazy=True)(inputs[0])
else:
outputs = Zoom3D([zz, zy, zx],
interp=self.interp)(*inputs)
return outputs
class Zoom3D(object):
def __init__(self,
value,
interp='trilinear',
lazy=False):
"""
Arguments
---------
value : float
Fractional zoom.
=1 : no zoom
>1 : zoom-in (value-1)%
<1 : zoom-out (1-value)%
interp : string in {'trilinear', 'nearest'} or list of strings
type of interpolation to use. You can provide a different
type of interpolation for each input, e.g. if you have two
inputs then you can say `interp=['trilinear','nearest']
lazy: boolean
If true, just return transformed
"""
if not isinstance(value, (tuple,list)):
value = (value, value, value)
self.value = value
self.interp = interp
self.lazy = lazy
def __call__(self, *inputs):
if not isinstance(self.interp, (tuple,list)):
interp = [self.interp]*len(inputs)
else:
interp = self.interp
zz, zy, zx = self.value
zoom_matrix = th.FloatTensor([[zz, 0, 0, 0],
[0, zy, 0, 0],
[0, 0, zx, 0],
[0, 0, 0, 1]])
if self.lazy:
return zoom_matrix
else:
outputs = []
for idx, _input in enumerate(inputs):
input_tf = th_affine3d(_input,
zoom_matrix,
mode=interp[idx],
center=True)
outputs.append(input_tf)
return outputs if idx >= 1 else outputs[0]
```
#### File: torchsample/transforms/medic_transforms.py
```python
import numpy as np
import torch as th
'''
Transforms specific to biomedical images
'''
class NormalizeMedicPercentile(object):
"""
Given min_val: float and max_val: float,
will normalize each channel of the th.*Tensor to
the provided min and max values.
Works by calculating :
a = (max'-min')/(max-min)
b = max' - a * max
new_value = a * value + b
where min' & max' are given values,
and min & max are observed min/max for each channel
"""
def __init__(self,
min_val=0.0,
max_val=1.0,
perc_threshold=(1.0, 95.0),
norm_flag=True):
"""
Normalize a tensor between a min and max value
:param min_val: (float) lower bound of normalized tensor
:param max_val: (float) upper bound of normalized tensor
:param perc_threshold: (float, float) percentile of image intensities used for scaling
:param norm_flag: [bool] list of flags for normalisation
"""
self.min_val = min_val
self.max_val = max_val
self.perc_threshold = perc_threshold
self.norm_flag = norm_flag
def __call__(self, *inputs):
# prepare the normalisation flag
if isinstance(self.norm_flag, bool):
norm_flag = [self.norm_flag] * len(inputs)
else:
norm_flag = self.norm_flag
outputs = []
for idx, _input in enumerate(inputs):
if norm_flag[idx]:
# determine the percentiles and threshold the outliers
_min_val, _max_val = np.percentile(_input.numpy(), self.perc_threshold)
_input[th.le(_input, _min_val)] = _min_val
_input[th.ge(_input, _max_val)] = _max_val
# scale the intensity values
a = (self.max_val - self.min_val) / (_max_val - _min_val)
b = self.max_val - a * _max_val
_input = _input.mul(a).add(b)
outputs.append(_input)
return outputs if idx >= 1 else outputs[0]
class NormalizeMedic(object):
"""
Normalises given slice/volume to zero mean
and unit standard deviation.
"""
def __init__(self,
norm_flag=True):
"""
:param norm_flag: [bool] list of flags for normalisation
"""
self.norm_flag = norm_flag
def __call__(self, *inputs):
# prepare the normalisation flag
if isinstance(self.norm_flag, bool):
norm_flag = [self.norm_flag] * len(inputs)
else:
norm_flag = self.norm_flag
outputs = []
for idx, _input in enumerate(inputs):
if norm_flag[idx]:
# subtract the mean intensity value
mean_val = np.mean(_input.numpy().flatten())
_input = _input.add(-1.0 * mean_val)
# scale the intensity values to be unit norm
std_val = np.std(_input.numpy().flatten())
_input = _input.div(float(std_val))
outputs.append(_input)
return outputs if idx >= 1 else outputs[0]
``` |
{
"source": "joostkremers/pymal",
"score": 3
} |
#### File: joostkremers/pymal/pymal.py
```python
import readline # so input() uses editable input
import sys
# Local imports
import reader
import printer
import mal_types as mal
import mal_env as menv
import core
repl_env = None
def READ(line):
return reader.read_str(line)
def EVAL(ast, env):
while True:
if ast is None: # comments
return None
if type(ast) is mal.Error:
return ast
elif type(ast) is not mal.List:
return eval_ast(ast, env)
else: # if ast is a list
if len(ast) == 0: # if ast is the empty list, just return it
return ast
# perform macro expansion
ast = macroexpand(ast, env)
if type(ast) is not mal.List:
return eval_ast(ast, env)
# apply
if type(ast[0]) is mal.Symbol:
symbol = ast[0].name
# Special forms
if symbol == "def!":
return mal_def(env, ast[1:])
elif symbol == "defmacro!":
return mal_defmacro(env, ast[1:])
elif symbol == "try*":
catch = ast[2]
if not (catch[0].name == "catch*"):
return mal.Error("TryError",
"Failing 'catch*' clause")
A = EVAL(ast[1], env)
if type(A) is mal.Error:
# The error is wrapped in a HandledError instance, so
# that evaluation is not halted.
A = mal.HandledError(A)
B = catch[1]
C = catch[2]
env = menv.mal.Env(outer=env, binds=[B], exprs=[A])
ast = C
continue
else:
return A
elif symbol == "let*":
ast, env = mal_let(env, ast[1], ast[2])
continue
elif symbol == "do":
evalled = eval_ast(mal.List(ast[1:-1]), env)
if type(evalled) is mal.Error:
return evalled
ast = ast[-1]
continue
elif symbol == "if":
ast = mal_if(env, ast[1:])
continue
elif symbol == "fn*":
return mal_fn(env, ast[1], ast[2])
elif symbol == "quote":
return ast[1]
elif symbol == "quasiquote":
ast = mal_quasiquote(ast[1])
continue
elif symbol == "macroexpand":
return macroexpand(ast[1], env)
# If the list does not start with a symbol or if the symbol is not a
# special form, we evaluate and apply:
evalled = eval_ast(ast, env)
if type(evalled) is mal.Error:
return evalled
elif type(evalled[0]) is mal.Builtin:
return evalled[0].fn(*evalled[1:])
elif type(evalled[0]) is mal.Function:
ast = evalled[0].ast
env = menv.mal.Env(outer=evalled[0].env,
binds=evalled[0].params,
exprs=evalled[1:])
continue
else:
return mal.Error("ApplyError",
"'{}' is not callable".format(evalled[0]))
# Special forms
def mal_def(environment, ast):
if len(ast) != 2:
return mal.Error("ArgError",
"'def!' requires 2 arguments, "
"received {}".format(len(ast)))
symbol = ast[0]
value = ast[1]
evalled = EVAL(value, environment)
if type(evalled) is not mal.Error:
environment.set(symbol.name, evalled)
return evalled
def mal_defmacro(environment, ast):
if len(ast) != 2:
return mal.Error("ArgError",
"'defmacro!' requires 2 arguments, "
"received {}".format(len(ast)))
symbol = ast[0]
value = ast[1]
evalled = EVAL(value, environment)
if type(evalled) is mal.Function:
evalled.is_macro = True
if type(evalled) is not mal.Error:
environment.set(symbol.name, evalled)
return evalled
def mal_let(environment, bindings, body):
if not isinstance(bindings, (mal.List, mal.Vector)):
return (mal.Error("LetError", "Invalid bind form"), None)
if (len(bindings) % 2 != 0):
return (mal.Error("LetError", "Insufficient bind forms"), None)
new_env = menv.mal.Env(outer=environment)
for i in range(0, len(bindings), 2):
if type(bindings[i]) is not mal.Symbol:
return (mal.Error("LetError", "Attempt to bind to non-symbol"),
None)
evalled = EVAL(bindings[i + 1], new_env)
if type(evalled) is mal.Error:
return (evalled, None)
new_env.set(bindings[i].name, evalled)
return (body, new_env)
def mal_if(environment, args):
if len(args) < 2:
return mal.Error("ArgError",
"'if' requires 2-3 arguments, "
"received {}".format(len(args)))
condition = EVAL(args[0], environment)
if type(condition) is mal.Error:
return condition
if not (condition == mal.NIL or condition == mal.Boolean(False)):
return args[1]
else:
if len(args) == 3:
return args[2]
else:
return mal.NIL
def mal_fn(environment, syms, body):
if '&' in syms:
if syms.index('&') != len(syms) - 2:
return mal.Error("BindsError", "Illegal binds list")
def mal_closure(*params):
new_env = menv.mal.Env(outer=environment, binds=syms, exprs=params)
return EVAL(body, new_env)
return mal.Function(mal_closure, syms, body, environment)
def is_pair(arg):
"""Return True if ARG is a non-empty list or vector."""
if isinstance(arg, list) and len(arg) > 0:
return True
else:
return False
def mal_quasiquote(ast):
# not a list (or empty list)
if not is_pair(ast):
return mal.List((mal.Symbol("quote"), ast))
# unquote
elif type(ast[0]) is mal.Symbol and ast[0].name == "unquote":
return ast[1]
# splice-unquote
elif (is_pair(ast[0]) and
type(ast[0][0]) is mal.Symbol and
ast[0][0].name == "splice-unquote"):
first = mal.Symbol("concat")
second = ast[0][1]
rest = mal_quasiquote(mal.List(ast[1:]))
return mal.List((first, second, rest))
# otherwise
else:
first = mal.Symbol("cons")
second = mal_quasiquote(ast[0])
rest = mal_quasiquote(mal.List(ast[1:]))
return mal.List((first, second, rest))
def is_macro_call(ast, env):
if type(ast) is not mal.List:
return False
if type(ast[0]) is not mal.Symbol:
return False
fn = env.get(ast[0].name)
if type(fn) is mal.Function:
return fn.is_macro
else:
return False
def macroexpand(ast, env):
while is_macro_call(ast, env):
fn = env.get(ast[0].name)
ast = fn.fn(*ast[1:])
return ast
def PRINT(data):
return printer.pr_str(data, print_readably=True)
def eval_ast(ast, env):
if type(ast) is mal.Symbol:
return env.get(ast.name)
elif type(ast) is mal.List:
res = []
for elem in ast:
val = EVAL(elem, env)
if type(val) is mal.Error:
return val
res.append(val)
return mal.List(res)
elif type(ast) is mal.Vector:
res = []
for elem in ast:
val = EVAL(elem, env)
if type(val) is mal.Error:
return val
res.append(val)
return mal.Vector(res)
elif type(ast) is mal.Hash:
res = {}
for key, val in ast.items():
newval = EVAL(val, env)
if type(newval) is mal.Error:
return newval
res[key] = newval
return mal.Hash(res)
else:
return ast
# These builtins are defined here and not in core.py because they call EVAL:
def mal_eval(ast):
global repl_env
return EVAL(ast, repl_env)
def mal_swap(atom, fn, *args):
global repl_env
if type(atom) is not mal.Atom:
return mal.Error("TypeError",
"Expected atom, received {}".format(type(atom)))
evalled = fn.fn(atom.value, *args)
atom.set(evalled)
return evalled
def rep(line, env):
ast = READ(line)
result = EVAL(ast, env)
return PRINT(result)
def Mal(args=[]):
global repl_env
repl_env = menv.mal.Env()
for sym in core.ns:
repl_env.set(sym, core.ns[sym])
# Add eval and swap! to repl_env:
repl_env.set("eval", mal.Builtin(mal_eval))
repl_env.set("swap!", mal.Builtin(mal_swap))
# Add the command line arguments to repl_env:
repl_env.set("*ARGV*", mal.List(args[1:]))
# Add *host-language*:
repl_env.set("*host-language*", "Python3")
# Add a 'load-file' function:
rep("(def! load-file (fn* (f)"
" (eval (read-string (str \"(do \" (slurp f) \")\")))))", repl_env)
# Load Mal core
rep('(load-file "prelude.mal")', repl_env)
if len(args) >= 1:
rep('(load-file "{}")'.format(args[0]), repl_env)
return
rep("(println (str \"Mal [\" *host-language* \"]\"))", repl_env)
while True:
try:
line = input("user> ")
except EOFError:
print("\n\nBye")
break
if line == "quit":
print("\n\nBye")
break
result = rep(line, repl_env)
print(result)
if __name__ == '__main__':
Mal(sys.argv[1:])
```
#### File: joostkremers/pymal/reader.py
```python
import re
import mal_types as mal
class Reader:
"""A Reader object for storing a list of tokens and an index into that list.
"""
def __init__(self, tokens):
self.tokens = tokens
self.position = 0
def next(self):
"""Return the current token and increment the index.
If the list of tokens has been exausted, return the empty string.
"""
token = self.peek()
self.position += 1
return token
def peek(self):
"""Return the current token.
If the list of tokens has been exhausted, return the empty string.
"""
if self.position >= len(self.tokens):
return ''
else:
return self.tokens[self.position]
reader_macros = {"'": "quote",
"`": "quasiquote",
"~": "unquote",
"~@": "splice-unquote",
"@": "deref"}
def read_str(input_str):
"""Convert INPUT_STR into a Mal object."""
tokens = tokenize(input_str)
mal_object = read_form(Reader(tokens))
return mal_object
def tokenize(input_str):
"""Tokenize INPUT_STR.
Return a list of tokens."""
token_regexp = (r'[\s,]*'
r'(~@|'
r'[\[\]{}()\'`~^@]|'
r'"(?:\\.|[^\\"])*"'
r'|;.*|'
r'[^\s\[\]{}(\'"`,;)]*)')
tokens = re.findall(token_regexp, input_str)
# The re.findall() call adds an empty match to the end of the list. I'm not
# sure how to remove this other than by checking for it explictly. We also
# filter out comments at this point.
return [token for token in tokens if token != '' and token[0] != ';']
def read_form(form):
token = form.next()
if token in ['(', '[', '{']:
return read_sequence(form, token)
elif token == '^': # with-meta reader macro
return apply_with_meta_macro(form)
elif token in reader_macros:
return apply_reader_macro(form, token)
elif token == '':
return None
else:
return read_atom(token)
def read_sequence(form, token):
"""Read a sequence from FORM.
This function reads list, vectors and hash tables.
"""
res = []
end_token = {'(': ')', '[': ']', '{': '}'}[token]
while True:
token = form.peek()
if token == end_token: # We've found the end of the list.
break
if token == '': # We've reached the end of FORM.
return mal.Error("ParenError", "Missing closing parenthesis")
next_form = read_form(form)
if type(next_form) is mal.Error:
return next_form
res.append(next_form)
# Now we need to move past the end token
form.next()
if end_token == ')':
return mal.List(res)
elif end_token == '}':
return create_hash(res)
else:
return mal.Vector(res)
def create_hash(items):
"""Create a hash table from ITEMS."""
# Hash tables in Mal can have strings or keywords as keys. mal.Keyword are
# hashable, so there's no need to use a rare Unicode character as prefix in
# order to distinguish them from strings, as suggested in the mal_guide.
if (len(items) % 2) != 0:
return mal.Error("HashError", "Insufficient number of items")
res = {}
for i in range(0, len(items), 2):
key = items[i]
if not isinstance(key, (str, mal.Keyword)):
return mal.Error("HashError",
"Cannot hash on {}".format(type(key)))
value = items[i + 1]
res[key] = value
return mal.Hash(res)
def apply_with_meta_macro(form):
data = read_form(form)
if type(data) is mal.Error:
return data
obj = read_form(form)
if type(obj) is mal.Error:
return obj
return mal.List([mal.Symbol('with-meta'), obj, data])
def apply_reader_macro(form, token):
next_form = read_form(form)
if type(next_form) is mal.Error:
return next_form
replacement = mal.Symbol(reader_macros[token])
return mal.List([replacement, next_form])
def read_atom(token):
# integers
if re.match(r'\A-?[0-9]+\Z', token):
return int(token)
# strings
if re.match(r'\A"(.*)"\Z', token):
string = token[1:-1]
string = string.replace(r'\"', '"')
string = string.replace(r'\n', '\n')
string = string.replace(r'\\', '\\')
return string
# keywords
if re.match(r'\A:.*\Z', token):
return mal.Keyword(token)
# boolean
if token == "true":
return mal.Boolean(True)
if token == "false":
return mal.Boolean(False)
# nil
if token == "nil":
return mal.NIL
# symbols
if re.match(r"[^\s\[\]{}('\"`,;)]*", token):
return mal.Symbol(token)
# Found nothing parsable. (Shouldn't really happen, since symbols are a
# catch-all already.)
return mal.Error("ParseError", "Could not parse token: '{}'".
format(token))
def main():
form = '(def (fn a (b c)) (print (+ a b)))'
print(read_str(form))
if __name__ == '__main__':
main()
```
#### File: pymal/tests/tests_step5.py
```python
import unittest
import pymal
import core
import mal_env as menv
from eval_assert import EvalAssert
class TestStep5(unittest.TestCase, EvalAssert):
def setUp(self):
self.env = menv.MalEnv()
for sym in core.ns:
self.env.set(sym, core.ns[sym])
def test_tco(self): # 34
pymal.rep('(def! sum2 (fn* (n acc)'
' (if (= n 0)'
' acc'
' (sum2 (- n 1) (+ n acc)))))', self.env)
self.assertEval('(sum2 10 0)', self.env, '55')
pymal.rep('(def! res2 nil)', self.env)
pymal.rep('(def! res2 (sum2 10000 0))', self.env)
self.assertEval('res2', self.env, '50005000')
def test_multiple_recursive_tco(self): # 35
pymal.rep('(def! foo (fn* (n)'
' (if (= n 0)'
' 0'
' (bar (- n 1)))))', self.env)
pymal.rep('(def! bar (fn* (n)'
' (if (= n 0)'
' 0'
' (foo (- n 1)))))', self.env)
self.assertEval('(foo 10000)', self.env, '0')
def test_do_under_tco(self): # 36
self.assertEval('(do (do 1 2))', self.env, '2')
def test_vector_params(self): # 37
pymal.rep('(def! g (fn* [] 78))', self.env)
self.assertEval('(g)', self.env, '78')
pymal.rep('(def! g (fn* [a] (+ a 78)))', self.env)
self.assertEval('(g 3)', self.env, '81')
```
#### File: pymal/tests/tests_step7.py
```python
import unittest
import pymal
import core
import mal_env as menv
from eval_assert import EvalAssert
class TestStep7(unittest.TestCase, EvalAssert):
def setUp(self):
self.env = menv.MalEnv()
for sym in core.ns:
self.env.set(sym, core.ns[sym])
def test_cons(self): # 47
self.assertEval('(cons 1 (list))', self.env, '(1)')
self.assertEval('(cons 1 (list 2))', self.env, '(1 2)')
self.assertEval('(cons 1 (list 2 3))', self.env, '(1 2 3)')
self.assertEval('(cons (list 1) (list 2 3))', self.env, '((1) 2 3)')
pymal.rep('(def! a (list 2 3))', self.env)
self.assertEval('(cons 1 a)', self.env, '(1 2 3)')
self.assertEval('a', self.env, '(2 3)')
def test_concat(self): # 48
self.assertEval('(concat)', self.env, '()')
self.assertEval('(concat (list 1 2))', self.env, '(1 2)')
self.assertEval('(concat (list 1 2) (list 3 4))', self.env,
'(1 2 3 4)')
self.assertEval('(concat (list 1 2) (list 3 4) (list 5 6))',
self.env, '(1 2 3 4 5 6)')
self.assertEval('(concat (concat))', self.env, '()')
pymal.rep('(def! a (list 1 2))', self.env)
pymal.rep('(def! b (list 3 4))', self.env)
self.assertEval('(concat a b (list 5 6))', self.env, '(1 2 3 4 5 6)')
self.assertEval('a', self.env, '(1 2)')
self.assertEval('b', self.env, '(3 4)')
def test_regular_quote(self): # 49
self.assertEval("(quote 7)", self.env, '7')
self.assertEval("'7", self.env, '7')
self.assertEval("(quote (1 2 3))", self.env, '(1 2 3)')
self.assertEval("'(1 2 3)", self.env, '(1 2 3)')
self.assertEval("(quote (1 2 (3 4)))", self.env, '(1 2 (3 4))')
self.assertEval("'(1 2 (3 4))", self.env, '(1 2 (3 4))')
def test_quasiquote(self): # 50
self.assertEval('(quasiquote 7)', self.env, "7")
self.assertEval('`7', self.env, "7")
self.assertEval('(quasiquote (1 2 3))', self.env, "(1 2 3)")
self.assertEval('`(1 2 3)', self.env, "(1 2 3)")
self.assertEval('(quasiquote (1 2 (3 4)))', self.env, "(1 2 (3 4))")
self.assertEval('`(1 2 (3 4))', self.env, "(1 2 (3 4))")
self.assertEval('(quasiquote (nil))', self.env, "(nil)")
self.assertEval('`(nil)', self.env, "(nil)")
def test_unquote(self): # 51
self.assertEval("`~7", self.env, '7')
self.assertEval("(def! a 8)", self.env, '8')
self.assertEval("`a", self.env, 'a')
self.assertEval("`~a", self.env, '8')
self.assertEval("`(1 a 3)", self.env, '(1 a 3)')
self.assertEval("`(1 ~a 3)", self.env, '(1 8 3)')
self.assertEval('(def! b \'(1 "b" "d"))', self.env, '(1 "b" "d")')
self.assertEval("`(1 b 3)", self.env, '(1 b 3)')
self.assertEval("`(1 ~b 3)", self.env, '(1 (1 "b" "d") 3)')
def test_splice_unquote(self): # 52
self.assertEval('(def! c \'(1 "b" "d"))', self.env, '(1 "b" "d")')
self.assertEval('`(1 c 3)', self.env, '(1 c 3)')
self.assertEval('`(1 ~@c 3)', self.env, '(1 1 "b" "d" 3)')
def test_symbol_equality(self): # 53
self.assertEval('(= \'abc \'abc)', self.env, 'true')
self.assertEval('(= \'abc \'abcd)', self.env, 'false')
self.assertEval('(= \'abc "abc")', self.env, 'false')
self.assertEval('(= "abc" \'abc)', self.env, 'false')
self.assertEval('(= "abc" (str \'abc))', self.env, 'true')
self.assertEval('(= \'abc nil)', self.env, 'false')
self.assertEval('(= nil \'abc)', self.env, 'false')
def test_cons_with_vectors(self): # 54
self.assertEval('(cons [1] [2 3])', self.env, '([1] 2 3)')
self.assertEval('(cons 1 [2 3])', self.env, '(1 2 3)')
self.assertEval('(concat [1 2] (list 3 4) [5 6])', self.env,
'(1 2 3 4 5 6)')
def test_unquote_with_vectors(self): # 55
self.assertEval('(def! a 8)', self.env, '8')
self.assertEval('`[1 a 3]', self.env, '(1 a 3)')
def test_splice_unquote_with_vectors(self): # 56
self.assertEval('(def! c \'(1 "b" "d"))', self.env, '(1 "b" "d")')
self.assertEval("`[1 ~@c 3]", self.env, '(1 1 "b" "d" 3)')
```
#### File: pymal/tests/tests_step9.py
```python
import unittest
from io import StringIO
from contextlib import redirect_stdout
import pymal
import mal_types as mal
import core
import mal_env as menv
from eval_assert import EvalAssert
class TestStep9(unittest.TestCase, EvalAssert):
def setUp(self):
self.env = menv.MalEnv()
for sym in core.ns:
self.env.set(sym, core.ns[sym])
# Add 'eval' and 'swap!' functions
self.env.set("eval", mal.Builtin(pymal.mal_eval))
self.env.set("swap!", mal.Builtin(pymal.mal_swap))
# set repl_env for 'eval'
pymal.repl_env = self.env
# Add 'load-file' and use it to load the prelude
pymal.rep('(def! load-file (fn* (f)'
' (eval'
' (read-string (str "(do " (slurp f) ")")))))',
self.env)
pymal.rep('(load-file "prelude.mal")', self.env)
def test_try_catch(self): # 69
self.assertEval('(try* 123 (catch* e 456))', self.env, '123')
f = StringIO()
with redirect_stdout(f):
res = pymal.rep('(try* (abc 1 2)'
' (catch* exc (prn "exc is:" exc)))', self.env)
self.assertEqual(res, 'nil')
self.assertEqual(f.getvalue(),
'"exc is:" Symbol value is void: \'abc\'\n')
g = StringIO()
with redirect_stdout(g):
res = pymal.rep('(try* (throw (list 1 2 3))'
' (catch* exc (do (prn "err:" exc) 7)))',
self.env)
self.assertEqual(res, '7')
self.assertEqual(g.getvalue(),
'"err:" (1 2 3)\n')
h = StringIO()
with redirect_stdout(h):
res = pymal.rep('(try* (throw "my exception")'
' (catch* exc (do (prn "err:" exc) 7)))',
self.env)
self.assertEqual(res, '7')
self.assertEqual(h.getvalue(),
'"err:" my exception\n')
def test_throw_is_function(self): # 70
self.assertEval('(try* (map throw (list 7)) (catch* exc exc))',
self.env, '7')
def test_builin_functions(self): # 71
self.assertEval("(symbol? 'abc)", self.env, 'true')
self.assertEval('(symbol? "abc")', self.env, 'false')
self.assertEval('(nil? nil)', self.env, 'true')
self.assertEval('(nil? true)', self.env, 'false')
self.assertEval('(true? true)', self.env, 'true')
self.assertEval('(true? false)', self.env, 'false')
self.assertEval('(true? true?)', self.env, 'false')
self.assertEval('(false? false)', self.env, 'true')
self.assertEval('(false? true)', self.env, 'false')
def test_apply_with_core_functions(self): # 72
self.assertEval('(apply + (list 2 3))', self.env, '5')
self.assertEval('(apply + 4 (list 5))', self.env, '9')
f = StringIO()
with redirect_stdout(f):
res = pymal.rep('(apply prn (list 1 2 "3" (list)))', self.env)
self.assertEqual(res, 'nil')
self.assertEqual(f.getvalue(), '1 2 "3" ()\n')
g = StringIO()
with redirect_stdout(g):
res = pymal.rep('(apply prn 1 2 (list "3" (list)))', self.env)
self.assertEqual(res, 'nil')
self.assertEqual(g.getvalue(), '1 2 "3" ()\n')
def test_apply_with_user_functions(self): # 73
self.assertEval('(apply (fn* (a b) (+ a b)) (list 2 3))',
self.env, '5')
self.assertEval('(apply (fn* (a b) (+ a b)) 4 (list 5))',
self.env, '9')
def test_map_function(self): # 74
pymal.rep('(def! nums (list 1 2 3))', self.env)
pymal.rep('(def! double (fn* (a) (* 2 a)))', self.env)
self.assertEval('(double 3)', self.env, '6')
self.assertEval('(map double nums) ', self.env, '(2 4 6)')
self.assertEval('(map (fn* (x)'
' (symbol? x))'
' (list 1 (symbol "two") "three"))',
self.env, '(false true false)')
def test_symbol_and_keyword_functions(self): # 75
self.assertEval('(symbol? :abc)', self.env, 'false')
self.assertEval("(symbol? 'abc)", self.env, 'true')
self.assertEval('(symbol? "abc")', self.env, 'false')
self.assertEval('(symbol? (symbol "abc"))', self.env, 'true')
self.assertEval('(keyword? :abc)', self.env, 'true')
self.assertEval("(keyword? 'abc)", self.env, 'false')
self.assertEval('(keyword? "abc")', self.env, 'false')
self.assertEval('(keyword? "")', self.env, 'false')
self.assertEval('(keyword? (keyword "abc"))', self.env, 'true')
self.assertEval('(symbol "abc")', self.env, 'abc')
self.assertEval('(keyword :abc)', self.env, ':abc')
self.assertEval('(keyword "abc")', self.env, ':abc')
def test_sequentialp(self): # 76
self.assertEval('(sequential? (list 1 2 3))', self.env, 'true')
self.assertEval('(sequential? [15])', self.env, 'true')
self.assertEval('(sequential? sequential?)', self.env, 'false')
self.assertEval('(sequential? nil)', self.env, 'false')
self.assertEval('(sequential? "abc")', self.env, 'false')
def test_apply_core_functions_with_vector(self): # 77
self.assertEval('(apply + 4 [5])', self.env, '9')
f = StringIO()
with redirect_stdout(f):
res = pymal.rep('(apply prn 1 2 ["3" 4])', self.env)
self.assertEqual(res, 'nil')
self.assertEqual(f.getvalue(), '1 2 "3" 4\n')
def test_apply_user_functions_with_vector(self): # 78
self.assertEval('(apply (fn* (a b) (+ a b)) [2 3])', self.env, '5')
self.assertEval('(apply (fn* (a b) (+ a b)) 4 [5])', self.env, '9')
def test_map_with_vector(self): # 79
self.assertEval('(map (fn* (a) (* 2 a)) [1 2 3])', self.env, '(2 4 6)')
def test_vector_functions(self): # 80
self.assertEval('(vector? [10 11])', self.env, 'true')
self.assertEval("(vector? '(12 13))", self.env, 'false')
self.assertEval('(vector 3 4 5)', self.env, '[3 4 5]')
self.assertEval('(map? {})', self.env, 'true')
self.assertEval("(map? '())", self.env, 'false')
self.assertEval('(map? [])', self.env, 'false')
self.assertEval("(map? 'abc)", self.env, 'false')
self.assertEval('(map? :abc)', self.env, 'false')
def test_hash_maps(self): # 81
self.assertEval('(hash-map "a" 1)', self.env, '{"a" 1}')
self.assertEval('{"a" 1}', self.env, '{"a" 1}')
self.assertEval('(assoc {} "a" 1)', self.env, '{"a" 1}')
self.assertEval('(get (assoc (assoc {"a" 1 } "b" 2) "c" 3) "a")',
self.env, '1')
self.assertEval('(def! hm1 (hash-map))', self.env, '{}')
self.assertEval('(map? hm1)', self.env, 'true')
self.assertEval('(map? 1)', self.env, 'false')
self.assertEval('(map? "abc")', self.env, 'false')
self.assertEval('(get nil "a")', self.env, 'nil')
self.assertEval('(get hm1 "a")', self.env, 'nil')
self.assertEval('(contains? hm1 "a")', self.env, 'false')
self.assertEval('(def! hm2 (assoc hm1 "a" 1))', self.env, '{"a" 1}')
self.assertEval('(get hm1 "a")', self.env, 'nil')
self.assertEval('(contains? hm1 "a")', self.env, 'false')
self.assertEval('(get hm2 "a")', self.env, '1')
self.assertEval('(contains? hm2 "a")', self.env, 'true')
# TODO: fix. Clojure returns nil but this breaks mal impl
self.assertEval('(keys hm1)', self.env, '()')
self.assertEval('(keys hm2)', self.env, '("a")')
# TODO: fix. Clojure returns nil but this breaks mal impl
self.assertEval('(vals hm1)', self.env, '()')
self.assertEval('(vals hm2)', self.env, '(1)')
self.assertEval('(count (keys (assoc hm2 "b" 2 "c" 3)))',
self.env, '3')
pymal.rep('(def! hm3 (assoc hm2 "b" 2))', self.env)
self.assertEval('(count (keys hm3))', self.env, '2')
self.assertEval('(count (vals hm3))', self.env, '2')
self.assertEval('(dissoc hm3 "a")', self.env, '{"b" 2}')
self.assertEval('(dissoc hm3 "a" "b")', self.env, '{}')
self.assertEval('(dissoc hm3 "a" "b" "c")', self.env, '{}')
self.assertEval('(count (keys hm3))', self.env, '2')
def test_keywords_as_hash_keys(self): # 82
self.assertEval('(get {:abc 123} :abc)', self.env, '123')
self.assertEval('(contains? {:abc 123} :abc)', self.env, 'true')
self.assertEval('(contains? {:abcd 123} :abc)', self.env, 'false')
self.assertEval('(assoc {} :bcd 234)', self.env, '{:bcd 234}')
self.assertEval('(dissoc {:cde 345 :fgh 456} :cde)',
self.env, '{:fgh 456}')
self.assertEval('(keyword? (nth (keys {:abc 123 :def 456}) 0))',
self.env, 'true')
self.assertEval('(keyword? (nth (keys {":abc" 123 ":def" 456}) 0))',
self.env, 'false')
self.assertEval('(keyword? (nth (vals {"a" :abc "b" :def}) 0))',
self.env, 'true')
def test_nil_as_hash_value(self): # 83
self.assertEval('(contains? {:abc nil} :abc)', self.env, 'true')
self.assertEval('(assoc {} :bcd nil)', self.env, '{:bcd nil}')
self.assertEval('(dissoc {:cde nil :fgh 456} :cde)',
self.env, '{:fgh 456}')
def test_equality_of_hash_maps(self): # 84
self.assertEval('(= {} {})', self.env, 'true')
self.assertEval('(= {:a 11 :b 22} (hash-map :b 22 :a 11))',
self.env, 'true')
self.assertEval('(= {:a 11 :b [22 33]} (hash-map :b [22 33] :a 11))',
self.env, 'true')
self.assertEval('(= {:a 11 :b {:c 33}} (hash-map :b {:c 33} :a 11))',
self.env, 'true')
self.assertEval('(= {:a 11 :b 22} (hash-map :b 23 :a 11))',
self.env, 'false')
self.assertEval('(= {:a 11 :b 22} (hash-map :a 11))',
self.env, 'false')
self.assertEval('(= {:a [11 22]} {:a (list 11 22)})',
self.env, 'true')
self.assertEval('(= {:a 11 :b 22} (list :a 11 :b 22))',
self.env, 'false')
self.assertEval('(= {} [])', self.env, 'false')
self.assertEval('(= [] {})', self.env, 'false')
def test_additional_str_and_pr_str(self): # 85
self.assertEval('(str "A" {:abc "val"} "Z")',
self.env, '"A{:abc val}Z"')
self.assertEval('(str true "." false "." nil "." :keyw "." \'symb)',
self.env, '"true.false.nil.:keyw.symb"')
self.assertEval('(pr-str "A" {:abc "val"} "Z")',
self.env, r'"\"A\" {:abc \"val\"} \"Z\""')
self.assertEval('(pr-str true "." false "." nil "." :keyw "." \'symb)',
self.env,
r'"true \".\" false \".\" nil \".\" :keyw \".\" symb"')
pymal.rep('(def! s (str {:abc "val1" :def "val2"}))', self.env)
self.assertEval('(or (= s "{:abc val1 :def val2}")'
' (= s "{:def val2 :abc val1}"))', self.env, 'true')
pymal.rep('(def! p (pr-str {:abc "val1" :def "val2"}))', self.env)
self.assertEval(r'(or (= p "{:abc \"val1\" :def \"val2\"}")'
r' (= p "{:def \"val2\" :abc \"val1\"}"))',
self.env, 'true')
``` |
{
"source": "joostrijneveld/bogrod",
"score": 3
} |
#### File: bogrod/banking/asn_sync.py
```python
import csv
import io
from decimal import Decimal
import requests
from bs4 import BeautifulSoup
from django.utils.timezone import datetime
from banking.models import Account, Transaction
def login(username, password):
session = requests.Session()
url = 'https://www.asnbank.nl/onlinebankieren/secure/loginparticulier.html'
src = session.get(url).text
soup = BeautifulSoup(src, 'lxml')
token = soup.find('input', {'name': 'ibp.integrity.token'})['value']
session.integrity_token = token
payload = {
'orgurl': '',
'ibp.integrity.token': session.integrity_token,
'j_username': username,
'j_password': password,
'action_sendWithDigicode': 'Inloggen',
}
url = 'https://www.asnbank.nl/onlinebankieren/secure/j_security_check'
response = session.post(url, data=payload)
return session, response
def logout(session):
url = ("https://www.asnbank.nl"
"/onlinebankieren/secure/logout/logoutConfirm.html")
response = session.get(url)
return session, response
def import_accounts(session):
url = ("https://www.asnbank.nl/onlinebankieren"
"/homepage/secure/homepage/homepage.html")
response = session.get(url)
soup = BeautifulSoup(response.text, 'lxml')
for table in soup.find_all('table'):
account_type = table.find('th').text
if 'Betalen' in account_type:
account_type = 'checking'
elif 'Sparen' in account_type:
account_type = 'savings'
elif 'Beleggen' in account_type:
account_type = 'investment'
continue # TODO handle this properly; has IBAN uniqueness issues
else:
account_type = 'other'
for tr in table.find('tbody').find_all('tr'):
try:
iban = tr['account']
except KeyError:
continue
account, cr = Account.objects.get_or_create(iban=iban)
account.account_type = account_type
account.save()
return session, response
def import_transactions(session, account, since=None, until=None):
url = ("https://www.asnbank.nl/onlinebankieren"
"/bankieren/secure/transacties/transactieoverzicht.html"
"?accountNr={}").format(account.iban)
src = session.get(url).text
soup = BeautifulSoup(src, 'lxml')
# TODO this can probably be minimized; this was copied verbatim
payload = {
'ibp.integrity.token':
soup.find('input', {'name': 'ibp.integrity.token'})['value'],
'formId': 'transactionForm',
"pageName": "betalen",
"accountNr": account.iban,
"tabstop_sl_accountNr_rekening": "",
"range": "LAST_MONTH",
"creditAccount": "",
"nameSecondParty": "",
"reference": "",
"description": "",
"dateStart": "",
"dateEnd": "",
"amountLow": "",
"amountLowCents": "",
"amountHigh": "",
"amountHighCents": "",
"searchDebetCredit":
soup.find('input', {'name': 'searchDebetCredit'})['value'],
"accountNumber": "",
"downloadDateStart": "",
"downloadDateEnd": "",
"downloadtype": "ALLE_TRANSACTIES", # TODO use since and until here
"cbValue_downloadFilter": "cbExists",
"filetype": "CSVIBAN",
"action_downloadTransactions": "Start+downloaden",
"action": "Print+deze+transactie+(pdf)",
"action": "Print+deze+transactie+(pdf)",
"action": "Print+deze+transactie+(pdf)",
"action": "Print+deze+transactie+(pdf)",
"action": "Print+deze+transactie+(pdf)",
"action": "Print+deze+transactie+(pdf)",
"action": "Print+deze+transactie+(pdf)",
"action": "Print+deze+transactie+(pdf)",
"action": "Print+deze+transactie+(pdf)",
"action": "Print+deze+transactie+(pdf)",
"action": "Print+deze+transactie+(pdf)",
"action": "Print+deze+transactie+(pdf)",
"action": "Print+deze+transactie+(pdf)",
"action": "Print+deze+transactie+(pdf)",
"action": "Print+deze+transactie+(pdf)",
"pagingSize": (soup.find('select', {'name': 'pagingSize'})
.find_all('option', selected=True)[0]['value']),
"transactionOffset": "",
"receivings": soup.find('input', {'name': 'receivings'})['value'],
"expenses": soup.find('input', {'name': 'expenses'})['value'],
"showSearch": "",
"sequenceNr": "",
"journalDate": "",
"action": "",
}
response = session.post(url, data=payload)
csvreader = csv.reader(io.StringIO(response.text), delimiter=',')
for row in csvreader:
try:
t = Transaction.objects.get(
journal_date=datetime.strptime(row[11], '%d-%m-%Y'),
sequence_number=int(row[15]),
)
except Transaction.DoesNotExist:
t = Transaction(
journal_date=datetime.strptime(row[11], '%d-%m-%Y'),
sequence_number=int(row[15]),
)
t.booking_date = datetime.strptime(row[0], '%d-%m-%Y')
t.account, cr = Account.objects.get_or_create(iban=row[1])
t.counter_account, cr = Account.objects.get_or_create(iban=row[2])
t.counter_name = row[3]
t.account_currency = row[7]
t.balance_before = Decimal(row[8])
t.mutation_currency = row[9]
t.mutation_value = Decimal(row[10])
t.value_date = datetime.strptime(row[12], '%d-%m-%Y')
t.internal_code = int(row[13])
t.global_code = row[14]
t.reference = row[16]
t.description = row[17]
t.statement_number = int(row[18])
t.save()
return session, response
``` |
{
"source": "joostrijneveld/dominosa",
"score": 3
} |
#### File: joostrijneveld/dominosa/dominosa.py
```python
from itertools import combinations_with_replacement as cwr
from collections import Counter
board = [[2, 3, 2, 2, 1],
[1, 1, 0, 2, 3],
[0, 3, 3, 1, 0],
[0, 3, 1, 0, 2]]
board = [[5, 5, 0, 4, 5, 5, 1],
[5, 0, 3, 4, 1, 1, 4],
[1, 0, 3, 1, 2, 5, 1],
[2, 4, 4, 3, 2, 2, 1],
[0, 2, 4, 2, 4, 0, 0],
[3, 2, 3, 5, 0, 3, 3]]
# board = [[2, 1, 5, 0, 5, 2, 1, 5, 6],
# [5, 1, 7, 2, 0, 0, 5, 3, 6],
# [1, 5, 6, 0, 4, 3, 1, 4, 7],
# [1, 5, 4, 2, 2, 3, 0, 0, 6],
# [2, 4, 3, 7, 3, 2, 4, 5, 2],
# [1, 7, 1, 4, 4, 7, 3, 7, 7],
# [1, 6, 3, 5, 0, 7, 6, 0, 0],
# [4, 6, 2, 4, 6, 3, 5, 3, 7]]
board = list(zip(*board)) # makes coordinates more intuitive
N = max([x for row in board for x in row]) + 1
class Pair(object):
def __init__(self, v, x, y, horizontal=False):
self.v = v
self.x = x
self.y = y
self.horizontal = horizontal
def __eq__(self, other):
if isinstance(other, type(self.v)):
return self.v == other
else:
return self.v == other.v
def __hash__(self):
return self.v.__hash__()
def __repr__(self):
return "({},{})-{}-{}".format(self.x, self.y, self.horizontal, self.v)
def possible_pairs(board):
for x in range(N+1):
for y in range(N):
if x > 0:
yield Pair(frozenset([board[x][y], board[x-1][y]]), x, y, True)
if y > 0:
yield Pair(frozenset([board[x][y], board[x][y-1]]), x, y)
def find_all_xy(pairs, x, y):
for p in list(pairs): # copy the list to prevent modification errors
if p.x == x and p.y == y:
yield p
elif p.horizontal and p.x-1 == x and p.y == y:
yield p
elif not p.horizontal and p.x == x and p.y-1 == y:
yield p
def remove_all_overlapping(pairs, pair):
def remove_all_xy(pairs, x, y):
for p in find_all_xy(pairs, x, y):
# because of __eq__ trickery, we cannot use the regular list.remove
for i, el in enumerate(pairs):
if el is p:
del pairs[i]
remove_all_xy(pairs, pair.x, pair.y)
if pair.horizontal:
remove_all_xy(pairs, pair.x-1, pair.y)
else:
remove_all_xy(pairs, pair.x, pair.y-1)
def remove_eq_except(all_pairs, pair, except_pairs):
for i, p in enumerate(all_pairs):
if not any(p is x for x in except_pairs) and p == pair:
del all_pairs[i]
def pretty_print(board, found_pairs):
output = [['.'] * (2*N + 1) for x in range(2*N + 3)]
for p in found_pairs:
output[2*p.x+1][2*p.y+1] = board[p.x][p.y]
output[2*p.x+2][2*p.y+1] = '|'
output[2*p.x+1][2*p.y+2] = '-'
if p.horizontal:
output[2*p.x-1][2*p.y+1] = board[p.x-1][p.y]
output[2*p.x][2*p.y+1] = ' '
for i in [-1, 1]:
output[2*p.x+i][2*p.y] = '-'
output[2*p.x+i][2*p.y+2] = '-'
output[2*p.x-2][2*p.y+1] = '|'
else:
output[2*p.x+1][2*p.y-1] = board[p.x][p.y-1]
output[2*p.x+1][2*p.y] = ' '
for i in [-1, 1]:
output[2*p.x][2*p.y-i] = '|'
output[2*p.x+2][2*p.y-i] = '|'
output[2*p.x+1][2*p.y-2] = '-'
for row in list(zip(*output)):
for s in row:
print(s, end=' ')
print()
def solve(board):
boardpairs = list(possible_pairs(board))
found_pairs = set([])
while boardpairs:
# check for unique pairs
for p, c in Counter(boardpairs).items():
if c == 1:
found_pairs.add(p)
remove_all_overlapping(boardpairs, p)
boardpairs = [x for x in boardpairs if x not in found_pairs]
for x in range(N+1):
for y in range(N):
# for all unused fields
if len(list(find_all_xy(found_pairs, x, y))) == 0:
# test if there is only one alternative
pairs = list(find_all_xy(boardpairs, x, y))
if len(pairs) == 1:
found_pairs.add(pairs[0])
remove_all_overlapping(boardpairs, pairs[0])
# or if all of these alternatives are identical
elif len(set(pairs)) == 1:
# remove other occurrences of this pair
remove_eq_except(boardpairs, pairs[0], pairs)
boardpairs = [x for x in boardpairs if x not in found_pairs]
return found_pairs
pretty_print(board, solve(board))
``` |
{
"source": "joostrijneveld/eetvoorkeur",
"score": 2
} |
#### File: joostrijneveld/eetvoorkeur/app.py
```python
from flask import Flask
from flask import render_template
from flask.ext.socketio import SocketIO, emit
from hashlib import sha256
import sys
app = Flask(__name__)
app.config['SECRET_KEY'] = 'replaceme'
app.config['ADMIN_URL'] = '/admin'
app.config['DEBUG'] = True
# Replace the above secrets and specify other overrides here, or alternatively,
# create a config.py file that has a configure(app) function that adds these.
try:
import config
config.configure(app)
except ImportError:
pass
socketio = SocketIO(app)
admin_secret = app.config['SECRET_KEY'] + "ADMIN_SECRET"
app.config['ADMIN_SECRET'] = sha256(admin_secret.encode('utf-8')).hexdigest()
# eetvoorkeur relies completely on a run-time state. This means that the state
# is reset whenever the app is restarted. Future versions might rely on a
# database of some kind, but for now, this was the easiest prototype.
state = {"step": 1,
"options": [{'name': '<NAME>', 'votes': 0},
{'name': '<NAME>', 'votes': 0},
{'name': 'Lotus', 'votes': 0},
],
"deadlines": ["16:00", "17:00", "18:15"],
}
@app.route('/')
def index(admin=False):
return render_template('index.html', admin=admin, state=state)
@app.route(app.config['ADMIN_URL'])
def admin():
return index(admin=app.config['ADMIN_SECRET'])
@socketio.on('state update')
def update_state(message):
if ('admin_secret' not in message or
message['admin_secret'] != app.config['ADMIN_SECRET']):
return
if state['step'] == 0 and 'deadlines' in message:
state['step'] = 1
state['deadlines'] = message['deadlines']
emit('state change', state, broadcast=True)
@socketio.on('vote')
def vote(message):
if 'option' in message and message['option'] < len(state['options']):
state['options'][message['option']]['votes'] += 1
emit('state change', state, broadcast=True)
@socketio.on('new option')
def new_option(message):
if ('newoption' in message and
message['newoption'] not in [x['name'] for x in state['options']]):
state['options'].append({'name': message['newoption'], 'votes': 0})
emit('state change', state, broadcast=True)
app.run(debug=True, threaded=True)
``` |
{
"source": "joostrijneveld/merkletreetraversal",
"score": 3
} |
#### File: joostrijneveld/merkletreetraversal/common.py
```python
import struct
from hashlib import sha256
from collections import namedtuple
from functools import wraps
Node = namedtuple('Node', ['h', 'v'])
cost = 0
def countcost(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
global cost
cost += 1
return fn(*args, **kwargs)
return wrapper
@countcost
def leafcalc(j):
return sha256(struct.pack("I", j)).digest()
@countcost
def g(v):
return sha256(v).digest()
def recursive_hash(h, i=0):
"""Computes the root node of a hashtree naively."""
if h == 0:
return leafcalc(i)
return g(recursive_hash(h - 1, i) + recursive_hash(h-1, i + (2 ** (h-1))))
def treehash(h):
"""Computes the root node using treehash."""
stack = []
for j in range(2 ** h):
node1 = Node(h=0, v=leafcalc(j))
while stack and stack[-1].h == node1.h:
node2 = stack.pop()
node1 = Node(h=node1.h+1, v=g(node2.v + node1.v))
stack.append(node1)
return stack.pop()
def end_of_tree(idx, tree_h, level):
return ((idx + 1) & ((1 << ((level+1)*tree_h)) - 1)) == 0
def compute_root(H, idx, authpath):
"""Computes the root node of the tree from leaf idx using the auth path."""
v = leafcalc(idx)
for authnode in authpath:
if idx & 1:
v = g(authnode.v + v)
else:
v = g(v + authnode.v)
idx >>= 1
return v
``` |
{
"source": "joostrijneveld/rss-to-ical-flask",
"score": 3
} |
#### File: joostrijneveld/rss-to-ical-flask/rss-to-ical.py
```python
from flask import Flask
import icalendar
import feedparser
import pytz
import datetime
import os
app = Flask(__name__)
if not os.environ['RSS_FEED_URL']:
sys.exit(1)
@app.route("/")
def get_feed():
feed = feedparser.parse(os.environ['RSS_FEED_URL'])
cal = icalendar.Calendar()
cal.add('prodid', '-//rss-to-ical-flask//')
cal.add('version', '2.0')
for post in feed.entries:
event = icalendar.Event()
event.add('dtstart', datetime.datetime(*post.published_parsed[:6],
tzinfo=pytz.timezone("GMT")))
event.add('dtend', datetime.datetime(*post.published_parsed[:6],
tzinfo=pytz.timezone("GMT")) +
datetime.timedelta(hours=2))
event.add('dtstamp', datetime.datetime(*post.published_parsed[:6],
tzinfo=pytz.timezone("GMT")))
event.add('summary', post.title)
event.add('url', post.link)
cal.add_component(event)
return cal.to_ical()
``` |
{
"source": "joostrijneveld/sequencefinder",
"score": 4
} |
#### File: joostrijneveld/sequencefinder/sequencefinding.py
```python
import docopt
def differences(a):
return [t - s for s, t in zip(a, a[1:])]
def constant_difference(a):
if len(a) <= 1:
return None
if all(a[1] - a[0] == x for x in differences(a)):
return a[1] - a[0]
else:
return None
def linear_difference(a):
depth = 1
while len(a) > 2:
cdiff = constant_difference(a)
if cdiff is not None:
return cdiff, depth
a = differences(a)
depth += 1
return None
args = docopt.docopt(__doc__)
numbers = list(map(int, args['NUMBERS']))
lin_diff = linear_difference(numbers)
if lin_diff is not None:
print("Found a linear difference of {} on layer {}.".format(*lin_diff))
else:
print("No sequence found.")
``` |
{
"source": "JoostScheffer/vim-pyref",
"score": 3
} |
#### File: misc/pyref/spider.py
```python
import os
import re
import sys
import time
import urllib
DEBUG = False
indexfile = os.path.expanduser('~/.vim/misc/pyref_index')
scriptname = os.path.split(sys.argv[0])[1]
def message(text, *args):
text = '%s: ' + text + '\n'
text %= (scriptname,) + args
sys.stderr.write(text)
def verbose(text, *args):
if DEBUG:
message(text, *args)
def error(text, *args):
message(text, *args)
sys.exit(1)
# Make sure the Beautiful Soup HTML parser is available.
try:
from BeautifulSoup import BeautifulSoup
except ImportError:
error("""You'll need to install the Beautiful Soup HTML parser. If you're running
Debian/Ubuntu try the following: sudo apt-get install python-beautifulsoup""")
# Make sure the user provided a location to spider.
if len(sys.argv) < 2:
error("Please provide the URL to spider as a command line argument.")
# Validate/munge the location so it points to an index.html page.
root = sys.argv[1].replace('file://', '')
if not root.startswith('http://'):
root = os.path.realpath(root)
if os.path.isdir(root):
page = os.path.join(root, 'index.html')
if os.path.isfile(root):
root = page
else:
error("Failed to determine index page in %r!", root)
elif not os.path.isfile(root):
error("The location %r doesn't seem to exist!", root)
root = 'file://' + root
first_page = root
root = os.path.split(root)[0]
# If the index file already exists, read it so we can merge the results.
anchors = {}
if os.path.isfile(indexfile):
message("Reading existing entries from %s", indexfile)
handle = open(indexfile)
nfiltered = 0
for line in handle:
anchor, target = line.strip().split('\t')
if target.startswith(root):
nfiltered += 1
else:
anchors[anchor] = target
handle.close()
message("Read %i and filtered %i entries", len(anchors), nfiltered)
# Start from the given location and collect anchors from all related pages.
queued_pages = [first_page]
visited_pages = {}
while queued_pages:
location = queued_pages.pop()
# Fetch the selected page.
try:
verbose("Fetching %r", location)
handle = urllib.urlopen(location)
contents = handle.read()
handle.close()
if not location.startswith('file://'):
# Rate limit fetching of remote pages.
time.sleep(1)
except:
verbose("Failed to fetch %r!", location)
continue
# Mark the current page as visited so we don't fetch it again.
visited_pages[location] = True
# Parse the page's HTML to extract links and anchors.
verbose("Parsing %r", location)
tagsoup = BeautifulSoup(contents)
npages = 0
for tag in tagsoup.findAll('a', href=True):
target = tag['href']
# Strip anchors and ignore anchor-only links.
target = re.sub('#.*$', '', target)
if target:
# Convert the link target to an absolute, canonical URL?
if not re.match(r'^\w+://', target):
target = os.path.join(os.path.split(location)[0], target)
scheme, target = target.split('://')
target = scheme + '://' + os.path.normpath(target)
# Ignore links pointing outside the root URL and don't process any page more than once.
if target.startswith(root) and target not in visited_pages and target not in queued_pages:
queued_pages.append(target)
npages += 1
nidents = 0
for tag in tagsoup.findAll(True, id=True):
anchor = tag['id']
if anchor not in anchors:
anchors[anchor] = '%s#%s' % (location, anchor)
nidents += 1
else:
verbose("Ignoring identifier %r duplicate target %r!", anchor, location)
message("Extracted %i related pages, %i anchors from %r..", npages, nidents, location)
message("Scanned %i pages, extracted %i anchors", len(visited_pages), len(anchors))
# Write the tab delimited list of (keyword, URL) pairs to the index file.
message("Writing index file %r", indexfile)
handle = open(indexfile, 'w')
bytes_written = 0
for anchor in sorted(anchors.keys()):
line = '%s\t%s\n' % (anchor, anchors[anchor])
handle.write(line)
bytes_written += len(line)
handle.close()
message("Done, wrote %i KB to %r", bytes_written / 1024, indexfile)
# vim: ts=2 sw=2 et
``` |
{
"source": "joostsijm/python_supremacy1914",
"score": 3
} |
#### File: python_supremacy1914/supremacy1914_wrapper/wrapper.py
```python
import time
import json
import requests
class Supremacy():
"""The supremacy class allow easy asses to the Supremacy 1914 API"""
game_id = None
url = None
debug = 0
default_params = {
"@c": "ultshared.action.UltUpdateGameStateAction",
"playerID": 0,
"userAuth": "<PASSWORD>",
"tstamp": int(time.time())
}
headers = {
"Host": "xgs8.c.bytro.com",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:57.0) " +
"Gecko/20100101 Firefox/57.0",
"Accept": "text/plain, */*; q=0.01",
"Accept-Language": "en-US,en;q=0.5",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Origin": "https://www.supremacy1914.nl",
"DNT": "1",
"Connection": "keep-alive",
"Pragma": "no-cache",
"Cache-Control": "no-cache"
}
def __init__(self, game_id, url=None, debug=None):
"""Initialize api"""
self.game_id = game_id
self.url = url if url else "http://xgs1.c.bytro.com"
self.default_params["gameID"] = game_id
if debug and isinstance(debug, int):
self.debug = debug
def all(self):
"""Return all information"""
return self._request()
def game(self):
"""Return game information"""
return self._request(12)
def coalitions(self):
"""Return coalition list and members"""
result = self._request(2)
return result["teams"] if "teams" in result else None
def players(self):
"""Return list of players"""
return self._request(1)
def market(self):
"""Return market prices"""
return self._request(4)
def score(self, day):
"""Return score of specified day"""
return self._request(2, day)
def relations(self):
"""Return list of relations between people"""
return self._request(5)
def _request(self, state_type=None, day=None):
"""Make request to the server"""
params = self.default_params
if state_type is not None:
params["stateType"] = state_type
if day is not None:
params["option"] = day
request = requests.post(self.url, headers=self.headers, json=params)
response = json.loads(request.text)
if self.debug >= 2:
print_json(response)
if "@c" in response["result"] and \
response["result"]["@c"] == "ultshared.rpc.UltSwitchServerException":
if "newHostName" in response["result"]:
new_url = "http://%s" % response["result"]["newHostName"]
if self.debug >= 1:
print("new host: %s for %s" % (new_url, self.game_id))
raise ServerChangeError(new_url)
if self.debug >= 1:
print("Game %s does not exist" % self.game_id)
raise GameDoesNotExistError("Game %s is not found" % self.game_id)
return response["result"]
class GameDoesNotExistError(Exception):
"""Raise when game does not exist"""
class ServerChangeError(Exception):
"""Raise when server has changed"""
def print_json(json_text):
"""Print data to console"""
print(json.dumps(json_text, sort_keys=True, indent=4))
``` |
{
"source": "joostsijm/rival_regions_wrapper",
"score": 3
} |
#### File: rival_regions_wrapper/wrapper/conference.py
```python
from rival_regions_wrapper import LOGGER, api
from rival_regions_wrapper.wrapper.abstract_wrapper import AbstractWrapper
class Conference(AbstractWrapper):
"""Wrapper class for confernce"""
def __init__(self, middleware, conference_id):
AbstractWrapper.__init__(self, middleware)
self.conference_id = conference_id
def message(self, message):
"""Send message to conference"""
LOGGER.info(
'"%s": CONF "%s": start send message',
self.middleware.username,
self.conference_id,
)
api.conference_message(self.middleware, self.conference_id, message)
def notification(self, message, sound):
"""Send notification to conference"""
LOGGER.info(
'"%s": CONF: %s notification',
self.middleware.username,
self.conference_id,
)
return api.conference_notification(
self.middleware, self.conference_id, message, sound
)
def change_title(self, title):
"""Change title of conference"""
LOGGER.info(
'"%s": CONF: %s change title: %s',
self.middleware.username,
self.conference_id,
title,
)
return api.conference_change_title(
self.middleware, self.conference_id, title
)
```
#### File: rival_regions_wrapper/wrapper/market.py
```python
import re
from bs4 import BeautifulSoup
from rival_regions_wrapper import util
from rival_regions_wrapper.wrapper.abstract_wrapper import AbstractWrapper
class Market(AbstractWrapper):
"""Wrapper class for profile"""
def info(self, resource):
"""Get profile"""
if isinstance(resource, str) and resource in util.ITEM_KEYS:
resource = util.ITEM_KEYS[resource]
path = "storage/listed/{}".format(resource)
response = self.middleware.get(path)
soup = BeautifulSoup(response, "html.parser")
offers_tree = soup.find_all(class_="list_link")
offers = []
for offer_tree in offers_tree:
offers.append(
{
"player_id": int(
re.sub(
r"^.*\/",
"",
offer_tree.select_one(".results_date")["action"],
)
),
"player_name": offer_tree.select_one(
".results_date"
).string,
"price": int(
float(offer_tree.select(".list_level")[1]["rat"])
* 100
),
"amount": int(
offer_tree.select_one(".list_level.imp.small")["rat"]
),
}
)
return offers
```
#### File: rival_regions_wrapper/wrapper/overview.py
```python
from bs4 import BeautifulSoup
from rival_regions_wrapper.wrapper.abstract_wrapper import AbstractWrapper
from rival_regions_wrapper.wrapper.perks import Perks
class Overview(AbstractWrapper):
"""Wrapper class for perks"""
def info(self):
"""Get overview"""
path = "main/content"
response = self.middleware.get(path)
soup = BeautifulSoup(response, "html.parser")
perks = Perks.info_parse(soup)
auto_war = soup.select_one(".war_index_war span.pointer:nth-child(4)")
if auto_war and auto_war.has_attr("action"):
auto_war = auto_war["action"].replace("war/details/", "")
else:
auto_war = None
overview = {
"perks": perks,
"war": {
"auto_war": auto_war,
},
}
return overview
def status(self):
"""Get current status"""
path = "main"
response = self.middleware.get(path)
soup = BeautifulSoup(response, "html.parser")
profile_url = soup.select_one("#header_my_avatar")["action"]
party_url = soup.select_one("#party_menu_members")["action"]
stats = {
"profile_id": int(profile_url.replace("slide/profile/", "")),
"party_id": int(party_url.replace("listed/party/", "")),
"gold": int(soup.select_one("#g").text.replace(".", "")),
"money": int(soup.select_one("#m").text.replace(".", "")),
"level": int(soup.select_one("#exp_level").text),
"exp": int(soup.select_one("#exp_points").text),
}
return stats
``` |
{
"source": "joostsijm/ssg",
"score": 2
} |
#### File: modules/backend/app.py
```python
import os
import shutil
from flask_login import login_required
from flask_menu import register_menu
from flask import render_template, request, redirect, url_for, flash, Blueprint
from app.models import Page, File, User
BLUEPRINT = Blueprint(
'backend',
__name__,
template_folder='templates'
)
BASE_PATH = 'app/modules/static/pages/'
@BLUEPRINT.route('/')
@register_menu(BLUEPRINT, 'index', 'Home')
@login_required
def index():
"""Show homepage"""
pages = Page.query.filter(Page.parent_id == None).all()
files = File.query.all()
users = User.query.all()
return render_template(
'site/index.j2',
pages=pages,
files=files,
users=users
)
@BLUEPRINT.route('/render')
@register_menu(BLUEPRINT, 'render', 'Render')
@login_required
def render():
"""Render pages to file"""
pages = Page.query.filter(Page.parent_id == None).all()
menu = []
for page in pages:
if page.title != 'index':
menu.append(generate_menu(page))
path_base = 'app/modules/static/pages/'
path_public = path_base + "public"
path_private = path_base + "private"
if os.path.exists(path_public):
shutil.rmtree(path_public)
os.makedirs(path_public)
if os.path.exists(path_private):
shutil.rmtree(path_private)
os.makedirs(path_private)
for page in pages:
generate_directory('', page)
for page in pages:
render_page('', page, menu, False)
flash('Successfully rendered pages.', 'success')
return redirect(request.referrer, code=302)
def generate_menu(page):
"""Generate menu based on pages"""
menu_item = {}
menu_item['title'] = page.title
menu_item['url'] = page.path()
menu_item['private'] = page.private
if page.children.count():
menu_item['children'] = []
for child_page in page.children:
menu_item['children'].append(generate_menu(child_page))
return menu_item
def generate_directory(path, page):
"""Generate directories for pages"""
if page.children.count():
parent_path = path + page.url() + '/'
public_path = BASE_PATH + 'public/' + path + page.url()
private_path = BASE_PATH + 'private/' + path + page.url()
if not os.path.exists(public_path):
os.makedirs(public_path)
if not os.path.exists(private_path):
os.makedirs(private_path)
for child_page in page.children:
generate_directory(parent_path, child_page)
def render_page(path, page, menu, private):
"""Function for page generation, recursive"""
if page.private:
private = True
if page.children.count():
parent_path = path + page.url() + '/'
for child_page in page.children:
render_page(parent_path, child_page, menu, private)
path += page.url()
private_path = '%s%s/%s.html' % (BASE_PATH, 'private', path)
with open(private_path, 'w') as file:
rendered_page = render_template(
'public/private.j2',
page=page,
menu=menu
)
file.write(rendered_page)
if not private:
public_path = '%s%s/%s.html' % (BASE_PATH, 'public', path)
with open(public_path, 'w') as file:
rendered_page = render_template(
'public/public.j2',
page=page,
menu=menu
)
file.write(rendered_page)
```
#### File: migrations/versions/476b167aef80_initial_migration.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('password', sa.String(length=255), nullable=True),
sa.Column('registration_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('name')
)
op.create_table('page',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(), nullable=False),
sa.Column('datetime', sa.DateTime(), nullable=True),
sa.Column('source', sa.String(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('page')
op.drop_table('user')
``` |
{
"source": "joostsijm/supremapy",
"score": 3
} |
#### File: supremapy/app/flaskr.py
```python
import requests
from subprocess import call
from datetime import datetime, timedelta
from flask import render_template, jsonify, request, redirect, url_for, flash
from flask_breadcrumbs import register_breadcrumb
from flask_menu import register_menu
from flask_login import login_required, login_user, logout_user, current_user
from sqlalchemy.sql.expression import false, true
from app import app, login_manager, webhook, db
from app.models import Game, User, Player, Relation, Resource, Price
from app.util.job import Job, MarketJob
from app.util import sync
@login_manager.user_loader
def load_user(user_id):
"""Return user"""
return User.query.get(user_id)
@register_breadcrumb(app, '.login', 'Login')
@app.route("/login", methods=["GET", "POST"])
def login():
"""Handle login page and data"""
if request.method == 'POST':
email = request.form['email']
password = request.form['password']
user = User.query.filter(User.email == email).first()
if user is not None:
if user.check_password(password):
login_user(user, remember=True)
flash('You were successfully logged in.', 'success')
if request.args.get("next") is not None:
return redirect(request.args.get("next"))
return redirect(url_for('index'))
else:
flash('Incorrect password.', 'danger')
else:
flash('User not found.', 'danger')
return redirect(url_for('login'))
else:
return render_template('user/login.html')
@app.route("/register", methods=["POST"])
def register():
"""Register a new user"""
if request.method != "POST":
return redirect(url_for('login'))
if "name" not in request.form or not request.form['name']:
flash('Fill in the name.', 'warning')
return render_template('user/login.html')
if "email" not in request.form or not request.form['email']:
flash('Fill in the email.', 'warning')
return render_template('user/login.html', name=request.form['name'])
if "password" not in request.form or not request.form['password']:
flash('Fill in the password.', 'warning')
return render_template(
'user/login.html',
name=request.form['name'],
email=request.form['email']
)
user = User.query.filter(User.name == request.form['name']).first()
if user is None:
flash('Name not found.', 'warning')
return render_template(
'user/login.html',
name=request.form['name'],
email=request.form['email']
)
if user.email is not None:
flash('User already taken.', 'warning')
return render_template(
'user/login.html',
name=request.form['name'],
email=request.form['email']
)
user.email = request.form['email']
user.password = request.form['password']
db.session.commit()
login_user(user, remember=True)
flash('Succesfully registered account "%s".' % (user.name), 'success')
if request.args.get("next") is not None:
return redirect(request.args.get("next"))
else:
return redirect(url_for('index'))
@app.route("/logout")
@login_required
def logout():
"""Logout function for users"""
logout_user()
flash('succesfully logged out.', 'success')
return redirect(url_for('login'))
@app.route('/')
@register_menu(app, '.', 'Home')
@register_breadcrumb(app, '.', 'Home')
def index():
"""Show homepage"""
if current_user.is_authenticated:
games = current_user.players.filter(
Player.game.has(Game.end_of_game == false())
).order_by(
Player.game_id.desc()
).all()
else:
games = None
return render_template(
'site/index.html',
games=games
)
@app.route('/games')
@register_menu(app, 'games', 'Games')
@register_breadcrumb(app, '.games', 'Games')
def game_index():
"""Return game index"""
games = Game.query.all()
return render_template('game/index.html', games=games)
def game_overview_dlc(*args, **kwargs):
"""Generate dynamic_list for games"""
game_id = request.view_args['game_id']
game = Game.query.filter(Game.game_id == game_id).first()
return [{'text': game.game_id, 'url': game.url}]
@app.route('/game/<int:game_id>')
@register_breadcrumb(app, '.games.game_id', '',
dynamic_list_constructor=game_overview_dlc)
def game_overview(game_id):
"""Show game overview"""
game_id = int(game_id)
game = Game.query.filter(Game.game_id == game_id).first()
players = game.active_players()
return render_template('game/overview.html', game=game, players=players)
@app.route('/game/<int:game_id>/players')
@register_breadcrumb(app, '.games.game_id', '',
dynamic_list_constructor=game_overview_dlc)
def game_players(game_id):
"""Show game overview"""
game_id = int(game_id)
game = Game.query.filter(Game.game_id == game_id).first()
players = game.all_players()
return render_template('game/players.html', game=game, players=players)
@app.route('/game/<int:game_id>/relations')
@register_breadcrumb(app, '.games.game_id', '',
dynamic_list_constructor=game_overview_dlc)
def game_relations(game_id):
"""Show game relations"""
game_id = int(game_id)
game = Game.query.filter(Game.game_id == game_id).first()
players = game.active_players()
return render_template('game/relations.html', game=game, players=players)
@app.route('/game/<int:game_id>/market')
@register_breadcrumb(app, '.games.game_id', '',
dynamic_list_constructor=game_overview_dlc)
def game_market(game_id):
"""Show game market"""
game_id = int(game_id)
game = Game.query.filter(Game.game_id == game_id).first()
market_job = MarketJob(game)
return render_template('game/market.html', game=game, market_job=market_job)
@app.route('/game/<int:game_id>/config')
@register_breadcrumb(app, '.games.game_id', '',
dynamic_list_constructor=game_overview_dlc)
def game_config(game_id):
"""Show game config"""
game_id = int(game_id)
game = Game.query.filter(Game.game_id == game_id).first()
return render_template('game/config.html', game=game)
@app.route('/api/game/<int:game_id>/config', methods=['POST'])
def api_game_config(game_id):
"""Save game config"""
game_id = int(game_id)
game = Game.query.filter(Game.game_id == game_id).first()
game.track_game = True if request.form.get('track_game') == 'on' else False
game.track_score = True if request.form.get('track_score') == 'on' else False
game.track_players = True if request.form.get('track_players') == 'on' else False
game.track_relations = True if request.form.get('track_relations') == 'on' else False
game.track_market = True if request.form.get('track_market') == 'on' else False
game.track_coalitions = True if request.form.get('track_coalitions') == 'on' else False
MarketJob(game).check()
Job(game).check()
db.session.commit()
return redirect(request.referrer, code=302)
@app.route('/api/game/<int:game_id>/score/<string:score_type>')
def api_game_score(game_id, score_type):
"""Returns list days with players"""
game_id = int(game_id)
game = Game.query.filter(Game.game_id == game_id).first()
day_dict = {}
for day in game.days:
if day.day not in day_dict:
day_dict[day.day] = {}
day_dict[day.day]["day"] = day.day
day_dict[day.day][day.player.name] = day.points
day_list = []
for day in day_dict:
day_list.append(day_dict[day])
player_list = []
if score_type == "players":
players = game.players.filter(Player.user_id != None).all()
elif score_type == "active":
three_days_ago = datetime.now() - timedelta(days=3)
players = game.players.filter(Player.last_login >= three_days_ago).all()
else:
players = game.players
for player in players:
player_list.append({
"title": player.nation_name,
"valueField": player.name,
"lineColor": player.primary_color,
})
score = {
"days": day_list,
"players": player_list,
}
return jsonify(score)
@app.route('/api/game/<int:game_id>/relations/<relation_type>')
def api_game_relations(game_id, relation_type):
"""Returns list of players with relationships"""
game_id = int(game_id)
game = Game.query.filter(Game.game_id == game_id).first()
player_list = []
for player in game.players:
native_relations = player.native_relations.filter(Relation.end_day == None).filter(Relation.status == relation_type)
foreign_relations = player.foreign_relations.filter(Relation.end_day == None).filter(Relation.status == relation_type)
if native_relations.count() or foreign_relations.count():
relation_list = []
for relation in native_relations.all():
relation_list.append(relation.player_foreign.nation_name)
player_list.append({
"name": player.nation_name,
"imports": relation_list,
})
return jsonify(player_list)
@app.route('/api/game/<int:game_id>/force_relations')
def api_game_force_relations(game_id):
"""Returns list of players with relationships"""
game_id = int(game_id)
game = Game.query.filter(Game.game_id == game_id).first()
relation_list = []
for player in game.players:
for relation in player.native_relations.filter(Relation.end_day == None):
relation_list.append({
"source": relation.player_native.nation_name,
"target": relation.player_foreign.nation_name,
"type": relation.status_formatted,
})
return jsonify(relation_list)
@app.route('/api/game/<int:game_id>/edge_relations')
def api_game_edge_relations(game_id):
"""Returns list of players with relationships"""
game_id = int(game_id)
game = Game.query.filter(Game.game_id == game_id).first()
player_list = []
for player in game.players.order_by(Player.nation_name).all():
war = []
right_of_way = []
share_map = []
share_info = []
native_relations = player.native_relations.filter(Relation.end_day == None)
foreign_relations = player.foreign_relations.filter(Relation.end_day == None)
if native_relations.count() or foreign_relations.count():
for relation in native_relations.all():
if relation.status == -2:
war.append(relation.player_foreign.nation_name)
elif relation.status == 3:
right_of_way.append(relation.player_foreign.nation_name)
elif relation.status == 4:
share_map.append(relation.player_foreign.nation_name)
elif relation.status == 6:
share_info.append(relation.player_foreign.nation_name)
player_list.append({
"name": player.nation_name,
"wars": war,
"right_of_ways": right_of_way,
"share_maps": share_map,
"share_infos": share_info,
})
return jsonify(player_list)
@app.route('/api/game/<int:game_id>/market/<string:resource_type>')
@app.route('/api/game/<int:game_id>/market', defaults={'resource_type': None})
def api_market(game_id, resource_type):
"""Returns list of markets with prices"""
game_id = int(game_id)
game = Game.query.filter(Game.game_id == game_id).first()
market_dict = {}
resources = {
"grain": 0,
"fish": 1,
"iron": 2,
"wood": 3,
"coal": 4,
"oil": 5,
"gas": 6
}
resource_id = resources.get(resource_type, None)
for market in game.markets:
dict_ = {}
# prices = market.prices
if resource_id is None:
prices = market.prices.all()
else:
prices = market.prices.filter(Price.resource_id == resource_id).all()
for price in prices:
name = price.resource.name
if price.buy:
name = "sell_%s" % name
else:
name = "buy_%s" % name
dict_[name] = str(price.value)
dict_["date"] = market.datetime.strftime('%m-%d %H:%M')
market_dict[market.datetime] = dict_
market_list = []
for market in market_dict:
market_list.append(market_dict[market])
resource_list = []
if resource_id is None:
resources = Resource.query.all()
else:
resources = Resource.query.filter(Resource.id == resource_id).all()
for resource in resources:
resource_list.append({
"title": "buy %s" % resource.name,
"valueField": "buy_%s" % resource.name,
"lineColor": resource.color,
"price_type": "buy",
})
resource_list.append({
"title": "sell %s" % resource.name,
"valueField": "sell_%s" % resource.name,
"lineColor": resource.color,
"price_type": "sell",
})
market_prices = {
"markets": market_list,
"resources": resource_list,
}
return jsonify(market_prices)
@app.route('/api/game/sync', methods=['POST'])
def api_sync_game():
"""Update game in the database"""
game_id = request.form.get('game_id')
sync_type = request.form.get('sync_type')
game = Game.query.filter(Game.game_id == game_id).first()
try:
if game is not None:
if sync_type == 'score':
sync.update_score(game)
elif sync_type == 'relations':
sync.update_relations(game)
elif sync_type == 'players':
sync.update_players(game)
elif sync_type == 'market':
sync.update_market(game)
elif sync_type == 'game':
sync.update_game(game)
else:
game = sync.new_game(game_id)
sync.update_players(game)
except sync.GameDoesNotExistError:
flash('Game %s doesn\'t exist anymore.' % game_id, 'danger')
except requests.exceptions.ConnectionError:
flash('Supremacy server connection error.', 'warning')
if "games" in request.referrer:
return redirect(url_for("game_overview", game_id=game_id), code=302)
return redirect(request.referrer, code=302)
@app.route('/users')
@register_menu(app, 'users', 'Users')
@register_breadcrumb(app, '.users', 'Users')
def user_index():
"""Return user index"""
users = User.query.all()
return render_template('user/index.html', users=users)
def user_overview_dlc(*args, **kwargs):
"""Generate dynamic_list for user"""
site_id = request.view_args['site_id']
user = User.query.filter(User.site_id == site_id).first()
return [{'text': user.name, 'url': user.url}]
@app.route('/user/<int:site_id>')
@register_breadcrumb(app, '.users.site_id', '',
dynamic_list_constructor=user_overview_dlc)
def user_overview(site_id):
"""Show user overview"""
site_id = int(site_id)
user = User.query.filter(User.site_id == site_id).first()
return render_template('user/overview.html', user=user)
@app.route('/user_claim', methods=['POST'])
def user_claim():
if "name" in request.form:
return render_template(
'user/login.html',
name=request.form['name'],
)
return redirect(url_for('login'))
@app.errorhandler(404)
def page_not_found(error):
return render_template('site/404.html'), 404
@app.errorhandler(500)
def internal_server_error(error):
return render_template('site/500.html'), 500
@webhook.hook()
@app.route('/deploy/<int:data>')
def on_push(data):
call(["git", "pull"])
call(["yarn"])
call(["touch", "flask.wsgi"])
call(["yarn", "gulp"])
return jsonify(True)
```
#### File: app/util/sync.py
```python
import json
from datetime import datetime
from sqlalchemy.sql import and_
from supremacy1914_wrapper import Supremacy, ServerChangeError, GameDoesNotExistError
from app import db
from app.models import Game, Map, Player, User, Relation, Day, SyncLog, Market, Order, Price
# with open('reference/output4.json') as file:
# result = json.load(file)
def server_change_handler(func):
"""Add catch for exception"""
def wrapper(game):
print("Running %s function" % func.__name__)
log = SyncLog()
log.function = func.__name__
log.game_id = game.id
db.session.add(log)
db.session.commit()
try:
func(game)
except ServerChangeError as exception:
game.game_host = str(exception)
db.session.commit()
func(game)
except GameDoesNotExistError:
game.end_of_game = True
game.end_at = datetime.now()
db.session.commit()
log.succes = True
db.session.commit()
return wrapper
@server_change_handler
def update_score(game):
"""Update result to current day"""
supremacy = Supremacy(game.game_id, game.game_host)
current_day = game.day
for day_index in range(game.last_day, current_day):
day_index += 1
result = supremacy.score(day_index)
ranking = result["ranking"]["ranking"]
ranking.pop(0)
player_id = 0
for score in ranking:
player_id += 1
if score >= 20:
player = game.players.filter(Player.player_id == player_id).first()
day = player.days.filter(Day.day == day_index).first()
if day is None:
day = Day()
day.day = day_index
day.points = score
day.game_id = game.id
day.player_id = player.id
db.session.add(day)
db.session.commit()
def new_game(game_id):
"""Save new game results to database"""
game = Game()
game.game_id = game_id
game.game_host = 'https://xgs8.c.bytro.com/'
supremacy = Supremacy(game.game_id, game.game_host)
while True:
try:
result = supremacy.game()
except ServerChangeError as exception:
new_server = str(exception)
game.game_host = new_server
supremacy.url = new_server
continue
break
_update_game(game, result)
game.start_at = datetime.fromtimestamp(result["startOfGame"])
game.password = result["password"]
game.scenario = result["scenarioID"]
game.ranked = result["ranked"]
game.gold_round = result["goldRound"]
game.ai_level = result["aiLevel"]
game.country_selection = result["countrySelection"]
game.time_scale = result["timeScale"]
# game.team_setting = result["teamSettings"]
game.victory_points = result["victoryPoints"]
game.research_days_offset = result["researchDaysOffset"]
if "researchTimeScale" in result:
game.research_time_scale = result["researchTimeScale"]
else:
game.research_time_scale = 1.0
game.team_victory_points = result["teamVictoryPoints"]
game_map = Map.query.filter(Map.map_id == result["mapID"]).first()
if game_map is None:
game_map = Map()
game_map.map_id = result["mapID"]
game_map.name = result["mapID"]
game_map.slots = result["openSlots"] + result["numberOfPlayers"]
db.session.add(game_map)
db.session.commit()
game.map_id = game_map.id
db.session.add(game)
db.session.commit()
return game
@server_change_handler
def update_game(game):
"""Update game to database"""
supremacy = Supremacy(game.game_id, game.game_host)
result = supremacy.game()
_update_game(game, result)
db.session.commit()
return game
def _update_game(game, result):
"""Update game stats that change"""
game.number_of_players = result["numberOfPlayers"] - result["openSlots"]
game.end_of_game = result["endOfGame"]
game.day_of_game = result["dayOfGame"]
game.next_day_time = datetime.fromtimestamp(
result["nextDayTime"] / 1000
)
return game
@server_change_handler
def update_players(game):
"""Update players to database"""
supremacy = Supremacy(game.game_id, game.game_host)
result = supremacy.players()
result = result["players"]
for player_id in result:
player_data = result[player_id]
if "playerID" in player_data:
player_id = int(player_data["playerID"])
if player_id > 0:
player = Player.query.filter(
and_(
Player.game_id == game.id,
Player.player_id == player_id
)
).first()
if player is None:
player = Player()
player.start_day = game.last_day
player.nation_name = player_data["nationName"]
player.primary_color = player_data["primaryColor"]
player.secondary_color = player_data["secondaryColor"]
player.game_id = game.id
player.player_id = player_id
if "userName" in player_data and not player.user_id:
user = User.query.filter(
User.name == player_data["userName"]
).first()
if user is None:
user = User()
user.site_id = player_data["siteUserID"]
user.name = player_data["userName"]
db.session.add(user)
db.session.commit()
player.user_id = user.id
player.title = player_data["title"]
player.name = player_data["name"]
player.flag_image_id = player_data["flagImageID"]
player.player_image_id = player_data["playerImageID"]
player.computer_player = player_data["computerPlayer"]
player.native_computer = player_data["nativeComputer"]
player.defeated = player_data["defeated"]
if player_data["lastLogin"] != 0:
player.last_login = datetime.fromtimestamp(
player_data["lastLogin"] / 1000
)
db.session.add(player)
db.session.commit()
@server_change_handler
def update_relations(game):
"""Get the relations"""
supremacy = Supremacy(game.game_id, game.game_host)
result = supremacy.relations()
result = result["relations"]["neighborRelations"]
game.relations.update({Relation.end_day: game.last_day})
for native_id in result:
relations = result[native_id]
for foreign_id in relations:
if foreign_id != native_id:
relation_status = relations[foreign_id]
native_player = game.players.filter(
Player.player_id == native_id
).first()
foreign_player = game.players.filter(
Player.player_id == foreign_id
).first()
relation = game.relations.filter(and_(
Relation.player_native_id == native_player.id,
Relation.player_foreign_id == foreign_player.id
)).order_by(Relation.start_day.desc()).first()
if relation is None:
relation = Relation()
relation.game_id = game.id
relation.player_native_id = native_player.id
relation.player_foreign_id = foreign_player.id
relation.start_day = game.day
relation.status = relation_status
db.session.add(relation)
elif relation_status == relation.status:
relation.end_day = None
db.session.commit()
@server_change_handler
def update_coalitions(game):
"""Get game coalitions"""
@server_change_handler
def update_market(game):
"""Get market prices"""
supremacy = Supremacy(game.game_id, game.game_host)
result = supremacy.market()
orders = result["asks"][1] + result["bids"][1]
market = Market()
market.game_id = game.id
market.datetime = datetime.now()
db.session.add(market)
prices = {}
for resource in orders:
if resource[1]:
lowest_order = resource[1][0]
price = Price()
price.value = lowest_order["limit"]
price.buy = lowest_order["buy"]
price.resource_id = lowest_order["resourceType"]
market.prices.append(price)
prices[price.resource_id] = price
for order_json in resource[1]:
player = game.players.filter(Player.player_id == order_json["playerID"]).first()
order = Order()
order.order_id = order_json["orderID"]
order.amount = order_json["amount"]
order.buy = order_json["buy"]
order.limit = order_json["limit"]
order.resource_id = order_json["resourceType"]
market.orders.append(order)
if player is not None:
player.orders.append(order)
db.session.add(order)
db.session.commit()
prev_market = market.previous
if prev_market:
prev_prices = prev_market.price_list
if prev_prices:
for resource, price in prices.items():
if prev_prices[resource]:
price.previous_id = prev_prices[resource].id
for resource, price in prices.items():
prev_price = price.previous
if prev_price:
prev_prev_price = prev_price.previous
if prev_prev_price:
if prev_prev_price.value == prev_price.value and \
prev_price.value == price.value:
price.previous_id = prev_prev_price.id
db.session.commit()
db.session.delete(prev_price)
db.session.commit()
def print_json(json_text):
"""Print data to console"""
print(json.dumps(json_text, sort_keys=True, indent=4))
if __name__ == "__main__":
update_score.__module__ = "sync"
```
#### File: migrations/versions/66ea45aadc78_add_track_settings.py
```python
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '66ea45aadc78'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('sp_games', sa.Column('track_coalitions', sa.Boolean(), nullable=True))
op.add_column('sp_games', sa.Column('track_game', sa.Boolean(), nullable=True))
op.add_column('sp_games', sa.Column('track_market', sa.Boolean(), nullable=True))
op.add_column('sp_games', sa.Column('track_players', sa.Boolean(), nullable=True))
op.add_column('sp_games', sa.Column('track_relations', sa.Boolean(), nullable=True))
op.add_column('sp_games', sa.Column('track_score', sa.Boolean(), nullable=True))
op.drop_column('sp_games', 'fetch_at')
op.drop_column('sp_games', 'last_result_time')
def downgrade():
op.add_column('sp_games', sa.Column('last_result_time', postgresql.TIMESTAMP(), autoincrement=False, nullable=True))
op.add_column('sp_games', sa.Column('fetch_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=True))
op.drop_column('sp_games', 'track_score')
op.drop_column('sp_games', 'track_relations')
op.drop_column('sp_games', 'track_players')
op.drop_column('sp_games', 'track_market')
op.drop_column('sp_games', 'track_game')
op.drop_column('sp_games', 'track_coalitions')
``` |
{
"source": "JoostvanPinxten/ConstraintPuzzler",
"score": 2
} |
#### File: gui/puzzlemodel/puzzletreemodel.py
```python
from PySide import QtGui, QtCore
from PySide.QtCore import Qt
import sys
import gui.icons.icons_rc
ParentRole = QtCore.Qt.UserRole + 1
class ConstraintPuzzleModel(QtCore.QAbstractItemModel):
def __init__(self, parent=None):
super(ConstraintPuzzleModel, self).__init__(parent)
self.root = ConstraintPuzzleRootItem()
def rowCount(self, parent = QtCore.QModelIndex()):
parentItem = self.getItem(parent)
return parentItem.childCount()
def getItem(self, index):
if index.isValid():
item = index.internalPointer()
if item:
return item
return self.root
def columnCount(self, parent):
return 2
def index(self, row, column, parent):
if parent.isValid() and parent.column() != 0:
return QtCore.QModelIndex()
parentItem = self.getItem(parent)
childItem = parentItem.child(row)
if childItem:
return self.createIndex(row, column, childItem)
else:
return QtCore.QModelIndex()
def parent(self, index):
# the index must be valid
if not index.isValid():
return QtCore.QModelIndex()
childItem = self.getItem(index)
parentItem = childItem.parent()
if parentItem == self.root:
return QtCore.QModelIndex()
return self.createIndex(parentItem.childNumber(), 0, parentItem)
def data(self, index, role = Qt.DisplayRole):
if not index.isValid():
return None
if role == Qt.DisplayRole:
item = self.getItem(index)
if (not item):
return self.tr("not set")
else:
return item.data(index.column())
elif role == Qt.DecorationRole:
if(index.column() <> 0):
return None
item = self.getItem(index)
if (not item):
return None
else:
return item.getIcon()
else:
return None
def headerData(self, section, orientation, role = QtCore.Qt.DisplayRole):
sections = [self.tr("Name"), self.tr("Type")]
if role == QtCore.Qt.DisplayRole:
return sections[section]
else:
return QtCore.QAbstractItemModel.headerData(self, section, orientation, role)
class ProxyItem(QtCore.QObject):
def __init__(self, parent=None):
super(ProxyItem, self).__init__()
self.parentItem = parent
self.childItems = []
if parent <> None:
parent.addChild(self)
def parent(self):
return self.parentItem
def childCount(self):
return len(self.childItems)
def children(self):
return list(self.childItems)
def child(self, row):
try:
return self.childItems[row]
except IndexError:
#print row
pass
def childNumber(self):
if self.parentItem != None:
return self.parentItem.childItems.index(self)
return 0
def addChild(self, child):
self.childItems.append(child)
def data(self, column):
if(column == 0):
return self.getName()
elif(column == 1):
return self.getType()
def getName(self):
raise NotImplementedError
def getType(self):
raise NotImplementedError
def getIcon(self):
return QtGui.QColor(255,255,255)
def setParent(self, parent):
parent.addChild(self)
self.parentItem = parent
def getItem(self):
raise NotImplementedError
class ConstraintPuzzleRootItem(ProxyItem):
def __init__(self):
super(ConstraintPuzzleRootItem, self).__init__()
def getName(self):
return self.tr("Root")
def getType(self):
return self.tr("Root")
class PuzzleProxyItem(ProxyItem):
def __init__(self, puzzle, parent=None):
super(PuzzleProxyItem, self).__init__(parent)
self.puzzle = puzzle
def getName(self):
return self.puzzle.name
def setName(self, name):
self.puzzle.name = name
def getType(self):
return self.tr("Puzzle")
class GridProxyItem(ProxyItem):
def __init__(self, grid, parent):
super(GridProxyItem, self).__init__(parent)
self.grid = grid
def getName(self):
return self.tr("Grid")
def getType(self):
return self.tr("Grid")
def getIcon(self):
return QtGui.QIcon(":/icons/gridIcon")
def getCells(self):
s = set()
[s.add(c) for c in self.grid.getCells()]
return s
def getPuzzle(self):
return self.parent()
def getItem(self):
return self.grid
class CellProxyItem(ProxyItem):
def __init__(self, cell, parent):
super(CellProxyItem, self).__init__(parent)
self.cell = cell
def getName(self):
if(self.cell.getName):
return self.cell.getName()
return self.tr("Cell")
def getType(self):
return self.tr("Cell")
def getIcon(self):
return QtGui.QIcon(":/icons/cellIcon")
def getCells(self):
s = set()
s.add(self.cell)
return s
def getItem(self):
return self.cell
class ReferencedCellProxyItem(ProxyItem):
def __init__(self, cell, parent):
super(ReferencedCellProxyItem, self).__init__(parent)
self.cell = cell
def getName(self):
if(self.cell.getName):
return self.cell.getName()
return self.tr("Cell")
def getType(self):
return self.tr("Cell")
def getIcon(self):
return QtGui.QIcon(":/icons/cellIcon")
def getCells(self):
s = set()
s.add(self.cell)
return s
def getItem(self):
return self.cell
class ConstraintGroupProxyItem(ProxyItem):
def __init__(self, cg, parent):
super(ConstraintGroupProxyItem, self).__init__(parent)
self.constraintGroup = cg
def getName(self):
if(self.constraintGroup.getName):
return self.constraintGroup.getName()
return "<not set>"
def getType(self):
return self.tr("Constraint Group")
def getCells(self):
s = set()
[s.add(c) for c in self.constraintGroup.getCells()]
return s
def getItem(self):
return self.constraintGroup
class ConstraintProxyItem(ProxyItem):
def __init__(self, constraint, parent):
super(ConstraintProxyItem, self).__init__(parent)
self.constraint = constraint
def getName(self):
return "Untitled"
def getType(self):
return self.tr(self.constraint.getType())
def getIcon(self):
return QtGui.QIcon(":/icons/constraintIcon")
def getCells(self):
return self.parent().getCells()
def getItem(self):
return self.constraint
```
#### File: gui/puzzlerepresentation/valuetriangle.py
```python
from PySide import QtGui, QtCore
from math import *
def centerTextItem(text):
form = QtGui.QTextBlockFormat()
form.setAlignment(QtCore.Qt.AlignCenter)
cursor = text.textCursor()
cursor.select(QtGui.QTextCursor.Document)
cursor.mergeBlockFormat(form)
cursor.clearSelection()
class ValueTriangle(QtGui.QGraphicsPolygonItem):
TOPLEFT=0
TOPRIGHT=1
BOTTOMRIGHT=2
BOTTOMLEFT=3
def __init__(self, constraint, cellSize, position, alignment=None, parent=None):
super(ValueTriangle, self).__init__(
QtGui.QPolygon(), parent)
self.setPos(position*cellSize)
self.constraint = constraint
self.position = position
self.cellSize = cellSize
if ( alignment == None):
self.alignment = ValueTriangle.TOPRIGHT
else:
self.alignment = alignment
# combine a horizontal flag with a vertical flag: e.g. AlignLeft with AlignTop to create a layout thingy
self.setCacheMode(QtGui.QGraphicsItem.DeviceCoordinateCache)
self.instantiateRepresentation()
self.updateRepresentation()
self.setPen(QtGui.QPen(QtCore.Qt.white))
self.surrounding = QtGui.QGraphicsRectItem(QtCore.QRect(0,0, self.cellSize, self.cellSize), self)
self.surrounding.setBrush(QtCore.Qt.transparent)
pass
def mousePressEvent(self, event):
# TODO: be able to set the value?
return QtGui.QGraphicsRectItem.mousePressEvent(self, event)
def instantiateRepresentation(self):
# add a big text item to show the set value, hidden by default
self.valueTextItem = QtGui.QGraphicsTextItem(str(self.constraint.getTotalValue()))
self.valueTextItem.setParentItem(self)
self.valueTextItem.setPos(0, self.cellSize/6)
f = QtGui.QFont("Sans serif", self.cellSize/4 ,0)
self.valueTextItem.setDefaultTextColor(QtCore.Qt.white)
self.valueTextItem.setFont(f)
self.valueTextItem.setTextWidth(self.cellSize)
self.setBrush(QtCore.Qt.black)
# align to center of cell
centerTextItem(self.valueTextItem)
self.valueTextItem.setOpacity(1)
def updateRepresentation(self):
# TODO: add the two other cases
if(self.alignment == ValueTriangle.TOPRIGHT):
self.setPolygon(QtGui.QPolygon([QtCore.QPoint(0,0),QtCore.QPoint(self.cellSize,self.cellSize),QtCore.QPoint(self.cellSize,0)]))
self.valueTextItem.setPos(self.cellSize/6,0)
centerTextItem(self.valueTextItem)
else:
self.setPolygon(QtGui.QPolygon([QtCore.QPoint(0,0),QtCore.QPoint(self.cellSize,self.cellSize),QtCore.QPoint(0,self.cellSize)]))
#self.valueTextItem.setPos(0, self.cellSize/6)
self.valueTextItem.setPos(-self.cellSize/6,self.cellSize/2)
centerTextItem(self.valueTextItem)
pass
```
#### File: gui/ui/mainWindowUi.py
```python
from PySide import QtCore, QtGui
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(858, 646)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtGui.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.puzzleGraphicsView = QtGui.QGraphicsView(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(50)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.puzzleGraphicsView.sizePolicy().hasHeightForWidth())
self.puzzleGraphicsView.setSizePolicy(sizePolicy)
self.puzzleGraphicsView.setObjectName("puzzleGraphicsView")
self.horizontalLayout.addWidget(self.puzzleGraphicsView)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 858, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.puzzleStructureDockWidget = QtGui.QDockWidget(MainWindow)
self.puzzleStructureDockWidget.setFeatures(QtGui.QDockWidget.DockWidgetFloatable|QtGui.QDockWidget.DockWidgetMovable)
self.puzzleStructureDockWidget.setObjectName("puzzleStructureDockWidget")
self.dockWidgetContents = QtGui.QWidget()
self.dockWidgetContents.setObjectName("dockWidgetContents")
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.dockWidgetContents)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.puzzleTreeView = QtGui.QTreeView(self.dockWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(150)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.puzzleTreeView.sizePolicy().hasHeightForWidth())
self.puzzleTreeView.setSizePolicy(sizePolicy)
self.puzzleTreeView.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.puzzleTreeView.setObjectName("puzzleTreeView")
self.verticalLayout_2.addWidget(self.puzzleTreeView)
self.solverGroupBox = QtGui.QGroupBox(self.dockWidgetContents)
self.solverGroupBox.setObjectName("solverGroupBox")
self.verticalLayout_3 = QtGui.QVBoxLayout(self.solverGroupBox)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.solvePushButton = QtGui.QPushButton(self.solverGroupBox)
self.solvePushButton.setObjectName("solvePushButton")
self.verticalLayout_3.addWidget(self.solvePushButton)
self.verticalLayout_2.addWidget(self.solverGroupBox)
self.horizontalLayout_2.addLayout(self.verticalLayout_2)
self.puzzleStructureDockWidget.setWidget(self.dockWidgetContents)
MainWindow.addDockWidget(QtCore.Qt.DockWidgetArea(1), self.puzzleStructureDockWidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "MainWindow", None, QtGui.QApplication.UnicodeUTF8))
self.puzzleStructureDockWidget.setWindowTitle(QtGui.QApplication.translate("MainWindow", "Puzzle structure", None, QtGui.QApplication.UnicodeUTF8))
self.solverGroupBox.setTitle(QtGui.QApplication.translate("MainWindow", "Solver steps", None, QtGui.QApplication.UnicodeUTF8))
self.solvePushButton.setText(QtGui.QApplication.translate("MainWindow", "Solve", None, QtGui.QApplication.UnicodeUTF8))
```
#### File: solver/tests/solverTests.py
```python
import unittest
from utility.puzzlefactory import PuzzleFactory
from solver.Solver import Solver
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testNakedPair(self):
sudoku = PuzzleFactory.createEmptySudoku()
#initialize a grid that contains a naked pair
sudoku.grid.setConstraintGridValue(3,1,7)
sudoku.grid.setConstraintGridValue(4,1,4)
sudoku.grid.setConstraintGridValue(6,2,7)
sudoku.grid.setConstraintGridValue(7,2,4)
sudoku.grid.setConstraintGridValue(0,3,7)
sudoku.grid.setConstraintGridValue(0,4,4)
# now the second and third cell must contain the possibleValues ([4,7]) as these are the only
# two cells that can contain these values, they are also the only possibleValues
c1_0 = sudoku.grid.getCellFromSquareGrid(1,0)
c2_0 = sudoku.grid.getCellFromSquareGrid(2,0)
solver = Solver(sudoku)
solver.solve()
print sudoku.grid
self.assertEqual(c1_0.getPossibleValues(), set([4,7]), "Did not correctly identify naked pair")
self.assertEqual(c2_0.getPossibleValues(), set([4,7]), "Did not correctly identify naked pair")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
```
#### File: ConstraintPuzzler/structure/puzzle.py
```python
from math import *
import constraints
from structure.cell import Cell, PositionedCell
from gui.puzzlemodel.puzzletreemodel import PuzzleProxyItem
from structure.item import Item
from structure.grid import Grid
class Puzzle(Item):
""" Puzzle is the top-level item which holds the constraint groups and grid """
def __init__(self, name, values):
self.values = list(values)
self.constraintGroups = []
self.item = PuzzleProxyItem(self)
self.name = name
self.grid = Grid(self.values, self)
def getGrid(self):
return self.grid
def getValues(self):
return list(self.values)
def getItem(self):
return self.item
def getParentItem(self):
return self.parent
def setParentItem(self, parent):
self.item.setParent(parent)
def addConstraintGroup(self, name):
cg = constraints.ConstraintGroup(self, name)
self.constraintGroups.append(cg)
return cg
def getConstraintGroups(self):
return self.constraintGroups
def getNumberOfOpenCells(self):
nr = 0
for c in self.grid.cells:
if(c.hasValue()):
nr += 1
return nr
``` |
{
"source": "joostvanzwieten/nutils",
"score": 2
} |
#### File: nutils/devtools/_log_gha.py
```python
from typing import Any
def debug(*args: Any) -> None:
print(*args)
info = debug
def warning(*args: Any) -> None:
for line in ' '.join(map(str, args)).split('\n'):
print('::warning ::{}'.format(line))
def error(*args: Any) -> None:
for line in ' '.join(map(str, args)).split('\n'):
print('::error ::{}'.format(line))
def set_output(key: str, value: str) -> None:
print('::set-output name={}::{}'.format(key, value))
print('OUTPUT: {}={}'.format(key, value))
``` |
{
"source": "joostvanzwieten/treelog",
"score": 2
} |
#### File: treelog/treelog/_io.py
```python
import os, contextlib, random, functools, typing, types, sys
supports_fd = os.supports_dir_fd >= {os.open, os.link, os.unlink}
_devnull = os.open(os.devnull, os.O_WRONLY)
_opener = lambda path, flags: os.dup(_devnull)
devnull = functools.partial(open, os.devnull, opener=_opener)
class directory:
'''Directory with support for dir_fd.'''
def __init__(self, path: str) -> None:
os.makedirs(path, exist_ok=True)
if supports_fd:
# convert to file descriptor
self._fd = os.open(path, flags=os.O_RDONLY) # type: typing.Optional[int]
self._path = None # type: typing.Optional[str]
else:
self._fd = None
self._path = path
self._rng = randomnames()
def _join(self, name: str) -> str:
return name if self._path is None else os.path.join(self._path, name)
def open(self, filename: str, mode: str, *, encoding: typing.Optional[str] = None, umask: int = 0o666) -> typing.IO[typing.Any]:
if mode not in ('w', 'wb'):
raise ValueError('invalid mode: {!r}'.format(mode))
return open(self._join(filename), mode+'+', encoding=encoding, opener=lambda name, flags: os.open(name, flags|os.O_CREAT|os.O_EXCL, mode=umask, dir_fd=self._fd))
def openfirstunused(self, filenames: typing.Iterable[str], mode: str, *, encoding: typing.Optional[str] = None, umask: int = 0o666) -> typing.Tuple[typing.IO[typing.Any], str]:
for filename in filenames:
try:
return self.open(filename, mode, encoding=encoding, umask=umask), filename
except FileExistsError:
pass
raise ValueError('all filenames are in use')
@contextlib.contextmanager
def temp(self, mode: str) -> typing.Generator[typing.IO[typing.Any], None, None]:
try:
f, name = self.openfirstunused(self._rng, mode)
with f:
yield f
finally:
os.unlink(f.name, dir_fd=self._fd)
def link(self, src: typing.IO[typing.Any], dst: str) -> None:
os.link(src.name, self._join(dst), src_dir_fd=self._fd, dst_dir_fd=self._fd)
def linkfirstunused(self, src: typing.IO[typing.Any], dsts: typing.Iterable[str]) -> str:
for dst in dsts:
try:
self.link(src, dst)
except FileExistsError:
pass
else:
return dst
raise ValueError('all destinations are in use')
def __del__(self) -> None:
if os and os.close and self._fd is not None:
os.close(self._fd)
def sequence(filename: str) -> typing.Generator[str, None, None]:
'''Generate file names a.b, a-1.b, a-2.b, etc.'''
yield filename
splitext = os.path.splitext(filename)
i = 1
while True:
yield '-{}'.format(i).join(splitext)
i += 1
def randomnames(characters: str = 'abcdefghijklmnopqrstuvwxyz0123456789_', length: int = 8) -> typing.Generator[str, None, None]:
rng = random.Random()
while True:
yield ''.join(rng.choice(characters) for dummy in range(length))
def set_ansi_console() -> None:
if sys.platform == "win32":
import platform
if platform.version() < '10.':
raise RuntimeError('ANSI console mode requires Windows 10 or higher, detected {}'.format(platform.version()))
import ctypes
handle = ctypes.windll.kernel32.GetStdHandle(-11) # https://docs.microsoft.com/en-us/windows/console/getstdhandle
mode = ctypes.c_uint32() # https://docs.microsoft.com/en-us/windows/desktop/WinProg/windows-data-types#lpdword
ctypes.windll.kernel32.GetConsoleMode(handle, ctypes.byref(mode)) # https://docs.microsoft.com/en-us/windows/console/getconsolemode
mode.value |= 4 # add ENABLE_VIRTUAL_TERMINAL_PROCESSING
ctypes.windll.kernel32.SetConsoleMode(handle, mode) # https://docs.microsoft.com/en-us/windows/console/setconsolemode
# vim:sw=2:sts=2:et
```
#### File: treelog/treelog/iter.py
```python
import itertools, functools, warnings, inspect, typing, types
from . import proto
T = typing.TypeVar('T')
T0 = typing.TypeVar('T0')
T1 = typing.TypeVar('T1')
T2 = typing.TypeVar('T2')
T3 = typing.TypeVar('T3')
T4 = typing.TypeVar('T4')
T5 = typing.TypeVar('T5')
T6 = typing.TypeVar('T6')
T7 = typing.TypeVar('T7')
T8 = typing.TypeVar('T8')
T9 = typing.TypeVar('T9')
class wrap(typing.Generic[T]):
'''Wrap iterable in consecutive title contexts.
The wrapped iterable is identical to the original, except that prior to every
next item a new log context is opened taken from the ``titles`` iterable. The
wrapped object should be entered before use in order to ensure that this
context is properly closed in case the iterator is prematurely abandoned.'''
def __init__(self, titles: typing.Union[typing.Iterable[str], typing.Generator[str, T, None]], iterable: typing.Iterable[T]) -> None:
self._titles = iter(titles)
self._iterable = iter(iterable)
self._log = None # type: typing.Optional[proto.Log]
self._warn = False
def __enter__(self) -> typing.Iterator[T]:
if self._log is not None:
raise Exception('iter.wrap is not reentrant')
from . import current
self._log = current
self._log.pushcontext(next(self._titles))
return iter(self)
def __iter__(self) -> typing.Generator[T, None, None]:
if self._log is not None:
cansend = inspect.isgenerator(self._titles)
for value in self._iterable:
self._log.recontext(typing.cast(typing.Generator[str, T, None], self._titles).send(value) if cansend else next(self._titles))
yield value
else:
with self:
self._warn = True
yield from self
def __exit__(self, exctype: typing.Optional[typing.Type[BaseException]], excvalue: typing.Optional[BaseException], tb: typing.Optional[types.TracebackType]) -> None:
if self._log is None:
raise Exception('iter.wrap has not yet been entered')
if self._warn and exctype is GeneratorExit:
warnings.warn('unclosed iter.wrap', ResourceWarning)
self._log.popcontext()
self._log = None
@typing.overload
def plain(title: str, __arg0: typing.Iterable[T0]) -> wrap[T0]: ...
@typing.overload
def plain(title: str, __arg0: typing.Iterable[T0], __arg1: typing.Iterable[T1]) -> wrap[typing.Tuple[T0, T1]]: ...
@typing.overload
def plain(title: str, __arg0: typing.Iterable[T0], __arg1: typing.Iterable[T1], __arg2: typing.Iterable[T2]) -> wrap[typing.Tuple[T0, T1, T2]]: ...
@typing.overload
def plain(title: str, __arg0: typing.Iterable[T0], __arg1: typing.Iterable[T1], __arg2: typing.Iterable[T2], __arg3: typing.Iterable[T3]) -> wrap[typing.Tuple[T0, T1, T2, T3]]: ...
@typing.overload
def plain(title: str, __arg0: typing.Iterable[T0], __arg1: typing.Iterable[T1], __arg2: typing.Iterable[T2], __arg3: typing.Iterable[T3], __arg4: typing.Iterable[T4]) -> wrap[typing.Tuple[T0, T1, T2, T3, T4]]: ...
@typing.overload
def plain(title: str, __arg0: typing.Iterable[T0], __arg1: typing.Iterable[T1], __arg2: typing.Iterable[T2], __arg3: typing.Iterable[T3], __arg4: typing.Iterable[T4], __arg5: typing.Iterable[T5]) -> wrap[typing.Tuple[T0, T1, T2, T3, T4, T5]]: ...
@typing.overload
def plain(title: str, __arg0: typing.Iterable[T0], __arg1: typing.Iterable[T1], __arg2: typing.Iterable[T2], __arg3: typing.Iterable[T3], __arg4: typing.Iterable[T4], __arg5: typing.Iterable[T5], __arg6: typing.Iterable[T6]) -> wrap[typing.Tuple[T0, T1, T2, T3, T4, T5, T6]]: ...
@typing.overload
def plain(title: str, __arg0: typing.Iterable[T0], __arg1: typing.Iterable[T1], __arg2: typing.Iterable[T2], __arg3: typing.Iterable[T3], __arg4: typing.Iterable[T4], __arg5: typing.Iterable[T5], __arg6: typing.Iterable[T6], __arg7: typing.Iterable[T7]) -> wrap[typing.Tuple[T0, T1, T2, T3, T4, T5, T6, T7]]: ...
@typing.overload
def plain(title: str, __arg0: typing.Iterable[T0], __arg1: typing.Iterable[T1], __arg2: typing.Iterable[T2], __arg3: typing.Iterable[T3], __arg4: typing.Iterable[T4], __arg5: typing.Iterable[T5], __arg6: typing.Iterable[T6], __arg7: typing.Iterable[T7], __arg8: typing.Iterable[T8]) -> wrap[typing.Tuple[T0, T1, T2, T3, T4, T5, T6, T7, T8]]: ...
@typing.overload
def plain(title: str, __arg0: typing.Iterable[T0], __arg1: typing.Iterable[T1], __arg2: typing.Iterable[T2], __arg3: typing.Iterable[T3], __arg4: typing.Iterable[T4], __arg5: typing.Iterable[T5], __arg6: typing.Iterable[T6], __arg7: typing.Iterable[T7], __arg8: typing.Iterable[T8], __arg9: typing.Iterable[T9]) -> wrap[typing.Tuple[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]]: ...
@typing.overload
def plain(title: str, *args: typing.Any) -> wrap[typing.Any]: ...
def plain(title: str, *args: typing.Any) -> wrap[typing.Any]:
'''Wrap arguments in simple enumerated contexts.
Example: my context 1, my context 2, etc.
'''
titles = map((_escape(title) + ' {}').format, itertools.count())
return wrap(titles, zip(*args) if len(args) > 1 else args[0])
@typing.overload
def fraction(title: str, __arg0: typing.Iterable[T0], *, length: typing.Optional[int] = ...) -> wrap[T0]: ...
@typing.overload
def fraction(title: str, __arg0: typing.Iterable[T0], __arg1: typing.Iterable[T1], *, length: typing.Optional[int]= ...) -> wrap[typing.Tuple[T0, T1]]: ...
@typing.overload
def fraction(title: str, __arg0: typing.Iterable[T0], __arg1: typing.Iterable[T1], __arg2: typing.Iterable[T2], *, length: typing.Optional[int]= ...) -> wrap[typing.Tuple[T0, T1, T2]]: ...
@typing.overload
def fraction(title: str, __arg0: typing.Iterable[T0], __arg1: typing.Iterable[T1], __arg2: typing.Iterable[T2], __arg3: typing.Iterable[T3], *, length: typing.Optional[int]= ...) -> wrap[typing.Tuple[T0, T1, T2, T3]]: ...
@typing.overload
def fraction(title: str, __arg0: typing.Iterable[T0], __arg1: typing.Iterable[T1], __arg2: typing.Iterable[T2], __arg3: typing.Iterable[T3], __arg4: typing.Iterable[T4], *, length: typing.Optional[int]= ...) -> wrap[typing.Tuple[T0, T1, T2, T3, T4]]: ...
@typing.overload
def fraction(title: str, __arg0: typing.Iterable[T0], __arg1: typing.Iterable[T1], __arg2: typing.Iterable[T2], __arg3: typing.Iterable[T3], __arg4: typing.Iterable[T4], __arg5: typing.Iterable[T5], *, length: typing.Optional[int]= ...) -> wrap[typing.Tuple[T0, T1, T2, T3, T4, T5]]: ...
@typing.overload
def fraction(title: str, __arg0: typing.Iterable[T0], __arg1: typing.Iterable[T1], __arg2: typing.Iterable[T2], __arg3: typing.Iterable[T3], __arg4: typing.Iterable[T4], __arg5: typing.Iterable[T5], __arg6: typing.Iterable[T6], *, length: typing.Optional[int]= ...) -> wrap[typing.Tuple[T0, T1, T2, T3, T4, T5, T6]]: ...
@typing.overload
def fraction(title: str, __arg0: typing.Iterable[T0], __arg1: typing.Iterable[T1], __arg2: typing.Iterable[T2], __arg3: typing.Iterable[T3], __arg4: typing.Iterable[T4], __arg5: typing.Iterable[T5], __arg6: typing.Iterable[T6], __arg7: typing.Iterable[T7], *, length: typing.Optional[int]= ...) -> wrap[typing.Tuple[T0, T1, T2, T3, T4, T5, T6, T7]]: ...
@typing.overload
def fraction(title: str, __arg0: typing.Iterable[T0], __arg1: typing.Iterable[T1], __arg2: typing.Iterable[T2], __arg3: typing.Iterable[T3], __arg4: typing.Iterable[T4], __arg5: typing.Iterable[T5], __arg6: typing.Iterable[T6], __arg7: typing.Iterable[T7], __arg8: typing.Iterable[T8], *, length: typing.Optional[int]= ...) -> wrap[typing.Tuple[T0, T1, T2, T3, T4, T5, T6, T7, T8]]: ...
@typing.overload
def fraction(title: str, __arg0: typing.Iterable[T0], __arg1: typing.Iterable[T1], __arg2: typing.Iterable[T2], __arg3: typing.Iterable[T3], __arg4: typing.Iterable[T4], __arg5: typing.Iterable[T5], __arg6: typing.Iterable[T6], __arg7: typing.Iterable[T7], __arg8: typing.Iterable[T8], __arg9: typing.Iterable[T9], *, length: typing.Optional[int]= ...) -> wrap[typing.Tuple[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]]: ...
@typing.overload
def fraction(title: str, *args: typing.Any, length: typing.Optional[int] = ...) -> wrap[typing.Any]: ...
def fraction(title: str, *args: typing.Any, length: typing.Optional[int] = None) -> wrap[typing.Any]:
'''Wrap arguments in enumerated contexts with length.
Example: my context 1/5, my context 2/5, etc.
'''
if length is None:
length = min(len(arg) for arg in args)
titles = map((_escape(title) + ' {}/' + str(length)).format, itertools.count())
return wrap(titles, zip(*args) if len(args) > 1 else args[0])
@typing.overload
def percentage(title: str, __arg0: typing.Iterable[T0], *, length: typing.Optional[int] = ...) -> wrap[T0]: ...
@typing.overload
def percentage(title: str, __arg0: typing.Iterable[T0], __arg1: typing.Iterable[T1], *, length: typing.Optional[int]= ...) -> wrap[typing.Tuple[T0, T1]]: ...
@typing.overload
def percentage(title: str, __arg0: typing.Iterable[T0], __arg1: typing.Iterable[T1], __arg2: typing.Iterable[T2], *, length: typing.Optional[int]= ...) -> wrap[typing.Tuple[T0, T1, T2]]: ...
@typing.overload
def percentage(title: str, __arg0: typing.Iterable[T0], __arg1: typing.Iterable[T1], __arg2: typing.Iterable[T2], __arg3: typing.Iterable[T3], *, length: typing.Optional[int]= ...) -> wrap[typing.Tuple[T0, T1, T2, T3]]: ...
@typing.overload
def percentage(title: str, __arg0: typing.Iterable[T0], __arg1: typing.Iterable[T1], __arg2: typing.Iterable[T2], __arg3: typing.Iterable[T3], __arg4: typing.Iterable[T4], *, length: typing.Optional[int]= ...) -> wrap[typing.Tuple[T0, T1, T2, T3, T4]]: ...
@typing.overload
def percentage(title: str, __arg0: typing.Iterable[T0], __arg1: typing.Iterable[T1], __arg2: typing.Iterable[T2], __arg3: typing.Iterable[T3], __arg4: typing.Iterable[T4], __arg5: typing.Iterable[T5], *, length: typing.Optional[int]= ...) -> wrap[typing.Tuple[T0, T1, T2, T3, T4, T5]]: ...
@typing.overload
def percentage(title: str, __arg0: typing.Iterable[T0], __arg1: typing.Iterable[T1], __arg2: typing.Iterable[T2], __arg3: typing.Iterable[T3], __arg4: typing.Iterable[T4], __arg5: typing.Iterable[T5], __arg6: typing.Iterable[T6], *, length: typing.Optional[int]= ...) -> wrap[typing.Tuple[T0, T1, T2, T3, T4, T5, T6]]: ...
@typing.overload
def percentage(title: str, __arg0: typing.Iterable[T0], __arg1: typing.Iterable[T1], __arg2: typing.Iterable[T2], __arg3: typing.Iterable[T3], __arg4: typing.Iterable[T4], __arg5: typing.Iterable[T5], __arg6: typing.Iterable[T6], __arg7: typing.Iterable[T7], *, length: typing.Optional[int]= ...) -> wrap[typing.Tuple[T0, T1, T2, T3, T4, T5, T6, T7]]: ...
@typing.overload
def percentage(title: str, __arg0: typing.Iterable[T0], __arg1: typing.Iterable[T1], __arg2: typing.Iterable[T2], __arg3: typing.Iterable[T3], __arg4: typing.Iterable[T4], __arg5: typing.Iterable[T5], __arg6: typing.Iterable[T6], __arg7: typing.Iterable[T7], __arg8: typing.Iterable[T8], *, length: typing.Optional[int]= ...) -> wrap[typing.Tuple[T0, T1, T2, T3, T4, T5, T6, T7, T8]]: ...
@typing.overload
def percentage(title: str, __arg0: typing.Iterable[T0], __arg1: typing.Iterable[T1], __arg2: typing.Iterable[T2], __arg3: typing.Iterable[T3], __arg4: typing.Iterable[T4], __arg5: typing.Iterable[T5], __arg6: typing.Iterable[T6], __arg7: typing.Iterable[T7], __arg8: typing.Iterable[T8], __arg9: typing.Iterable[T9], *, length: typing.Optional[int]= ...) -> wrap[typing.Tuple[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]]: ...
@typing.overload
def percentage(title: str, *args: typing.Any, length: typing.Optional[int] = ...) -> wrap[typing.Any]: ...
def percentage(title: str, *args: typing.Any, length: typing.Optional[int] = None) -> wrap[typing.Any]:
'''Wrap arguments in contexts with percentage counter.
Example: my context 5%, my context 10%, etc.
'''
if length is None:
length = min(len(arg) for arg in args)
if length:
titles = map((_escape(title) + ' {:.0f}%').format, itertools.count(step=100/length)) # type: typing.Iterable[str]
else:
titles = title + ' 100%',
return wrap(titles, zip(*args) if len(args) > 1 else args[0])
def _escape(s: str) -> str:
return s.replace('{', '{{').replace('}', '}}')
``` |
{
"source": "JoostvDoorn/pywren",
"score": 2
} |
#### File: pywren/pywren/ec2standalone.py
```python
import base64
import logging
import os
import time
import datetime
import boto3
import pywren
logger = logging.getLogger(__name__)
def b64s(string):
"""
Base-64 encode a string and return a string
"""
return base64.b64encode(string.encode('utf-8')).decode('ascii')
def sd(filename):
"""
get the file in the standalone dir
"""
return os.path.join(pywren.SOURCE_DIR,
'ec2_standalone_files', filename)
def create_instance_profile(instance_profile_name):
iam = boto3.resource('iam')
#iam.create_instance_profile(InstanceProfileName=INSTANCE_PROFILE_NAME)
iam.InstanceProfile(instance_profile_name)
#instance_profile.add_role(RoleName='pywren_exec_role_refactor8')
def launch_instances(number, tgt_ami, aws_region, my_aws_key, instance_type,
instance_name, instance_profile_name, sqs_queue_name,
default_volume_size=100,
max_idle_time=60, idle_terminate_granularity=600,
pywren_git_branch='master',
spot_price=None,
availability_zone=None,
fast_io=False,
parallelism=1,
pywren_git_commit=None):
logger.info("launching {} {} instances in {} (zone {}) ".format(number,
instance_type,
aws_region,
availability_zone))
if fast_io:
BlockDeviceMappings = [
{
'DeviceName': '/dev/xvda',
'Ebs': {
'VolumeSize': default_volume_size,
'DeleteOnTermination': True,
'VolumeType': 'gp2',
#'Iops' : 10000,
},
},
]
else:
BlockDeviceMappings = None
template_file = sd('ec2standalone.cloudinit.template')
user_data = open(template_file, 'r').read()
supervisord_init_script = open(sd('supervisord.init'), 'r').read()
supervisord_init_script_64 = b64s(supervisord_init_script)
supervisord_conf = open(sd('supervisord.conf'), 'r').read()
logger.info("Running with idle_terminate_granularity={}".format(idle_terminate_granularity))
supervisord_conf = supervisord_conf.format(
run_dir="/tmp/pywren.runner",
sqs_queue_name=sqs_queue_name,
aws_region=aws_region,
max_idle_time=max_idle_time,
idle_terminate_granularity=idle_terminate_granularity,
num_procs=parallelism)
supervisord_conf_64 = b64s(supervisord_conf)
cloud_agent_conf = open(sd("cloudwatch-agent.config"),
'r').read()
cloud_agent_conf_64 = b64s(cloud_agent_conf)
if pywren_git_commit is not None:
# use a git commit
git_checkout_string = str(pywren_git_commit)
else:
git_checkout_string = " {}".format(pywren_git_branch)
user_data = user_data.format(supervisord_init_script=supervisord_init_script_64,
supervisord_conf=supervisord_conf_64,
git_checkout_string=git_checkout_string,
aws_region=aws_region,
cloud_agent_conf=cloud_agent_conf_64)
# FIXME debug
open("/tmp/user_data", 'w').write(user_data)
iam = boto3.resource('iam')
instance_profile = iam.InstanceProfile(instance_profile_name)
instance_profile_dict = {'Name' : instance_profile.name}
instances = _create_instances(number, aws_region,
spot_price, ami=tgt_ami,
key_name=my_aws_key,
instance_type=instance_type,
block_device_mappings=BlockDeviceMappings,
security_group_ids=[],
ebs_optimized=True,
instance_profile=instance_profile_dict,
availability_zone=availability_zone,
user_data=user_data) ###FIXME DEBUG DEBUG
# FIXME there's a race condition where we could end up with two
# instances with the same name but that's ok
existing_instance_names = [a[0] for a in list_instances(aws_region,
instance_name)]
new_instances_with_names = []
def generate_unique_instance_name():
inst_pos = 0
while True:
name_string = "{}-{}".format(instance_name, inst_pos)
if (name_string not in [a[0] for a in new_instances_with_names]) and \
(name_string not in existing_instance_names):
return name_string
inst_pos += 1
for inst in instances:
unique_instance_name = generate_unique_instance_name()
logger.info("setting instance name to {}".format(unique_instance_name))
inst.reload()
inst.create_tags(
Resources=[
inst.instance_id
],
Tags=[
{
'Key': 'Name',
'Value': unique_instance_name
},
]
)
new_instances_with_names.append((unique_instance_name, inst))
for inst in instances:
inst.wait_until_running()
return new_instances_with_names
def _create_instances(num_instances,
region,
spot_price,
ami,
key_name,
instance_type,
block_device_mappings,
security_group_ids,
ebs_optimized,
instance_profile,
availability_zone,
user_data):
''' Function graciously borrowed from Flintrock ec2 wrapper
https://raw.githubusercontent.com/nchammas/flintrock/00cce5fe9d9f741f5999fddf2c7931d2cb1bdbe8/flintrock/ec2.py
'''
ec2 = boto3.resource(service_name='ec2', region_name=region)
spot_requests = []
try:
if spot_price is not None:
if spot_price > 0:
print("Requesting {c} spot instances at a max price of ${p}...".format(
c=num_instances, p=spot_price))
else:
print("Requesting {c} spot instances at the on-demand price...".format(
c=num_instances))
client = ec2.meta.client
LaunchSpecification = {
'ImageId': ami,
'InstanceType': instance_type,
'SecurityGroupIds': security_group_ids,
'EbsOptimized': ebs_optimized,
'IamInstanceProfile' : instance_profile,
'UserData' : b64s(user_data)}
if availability_zone is not None:
LaunchSpecification['Placement'] = {"AvailabilityZone":availability_zone}
if block_device_mappings is not None:
LaunchSpecification['BlockDeviceMappings'] = block_device_mappings
if key_name is not None:
LaunchSpecification['KeyName'] = key_name
if spot_price > 0:
spot_requests = client.request_spot_instances(
SpotPrice=str(spot_price),
InstanceCount=num_instances,
LaunchSpecification=LaunchSpecification)['SpotInstanceRequests']
else:
spot_requests = client.request_spot_instances(
InstanceCount=num_instances,
LaunchSpecification=LaunchSpecification)['SpotInstanceRequests']
request_ids = [r['SpotInstanceRequestId'] for r in spot_requests]
pending_request_ids = request_ids
time.sleep(5)
while pending_request_ids:
spot_requests = client.describe_spot_instance_requests(
SpotInstanceRequestIds=request_ids)['SpotInstanceRequests']
failed_requests = [r for r in spot_requests if r['State'] == 'failed']
if failed_requests:
failure_reasons = {r['Status']['Code'] for r in failed_requests}
raise Exception(
"The spot request failed for the following reason{s}: {reasons}"
.format(
s='' if len(failure_reasons) == 1 else 's',
reasons=', '.join(failure_reasons)))
pending_request_ids = [
r['SpotInstanceRequestId'] for r in spot_requests
if r['State'] == 'open']
if pending_request_ids:
print("{grant} of {req} instances granted. Waiting...".format(
grant=num_instances - len(pending_request_ids),
req=num_instances))
time.sleep(30)
print("All {c} instances granted.".format(c=num_instances))
cluster_instances = list(
ec2.instances.filter(
Filters=[
{'Name': 'instance-id', 'Values': [r['InstanceId'] for r in spot_requests]}
]))
else:
# Move this to flintrock.py?
print("Launching {c} instance{s}...".format(
c=num_instances,
s='' if num_instances == 1 else 's'))
# TODO: If an exception is raised in here, some instances may be
# left stranded.
LaunchSpecification = {
"MinCount" : num_instances,
"MaxCount" : num_instances,
"ImageId" : ami,
"InstanceType" : instance_type,
"SecurityGroupIds" : security_group_ids,
"EbsOptimized" : ebs_optimized,
"IamInstanceProfile" : instance_profile,
"InstanceInitiatedShutdownBehavior" : 'terminate',
"UserData" : user_data}
if block_device_mappings is not None:
LaunchSpecification['BlockDeviceMappings'] = block_device_mappings
if key_name is not None:
LaunchSpecification['KeyName'] = key_name
cluster_instances = ec2.create_instances(**LaunchSpecification)
time.sleep(10) # AWS metadata eventual consistency tax.
return cluster_instances
except (Exception, KeyboardInterrupt) as e:
if not isinstance(e, KeyboardInterrupt):
print(e)
if spot_requests:
request_ids = [r['SpotInstanceRequestId'] for r in spot_requests]
if any([r['State'] != 'active' for r in spot_requests]):
print("Canceling spot instance requests...")
client.cancel_spot_instance_requests(
SpotInstanceRequestIds=request_ids)
# Make sure we have the latest information on any launched spot instances.
spot_requests = client.describe_spot_instance_requests(
SpotInstanceRequestIds=request_ids)['SpotInstanceRequests']
instance_ids = [
r['InstanceId'] for r in spot_requests
if 'InstanceId' in r]
if instance_ids:
cluster_instances = list(
ec2.instances.filter(
Filters=[
{'Name': 'instance-id', 'Values': instance_ids}
]))
raise Exception("Launch failure")
def tags_to_dict(d):
if d is None:
return {}
return {a['Key'] : a['Value'] for a in d}
def list_instances(aws_region, instance_name):
"""
List all instances whose names match the main
Returns [(name, instance_object)]
"""
ec2 = boto3.resource('ec2', region_name=aws_region)
insts = []
for i in ec2.instances.all():
if i.state['Name'] == 'running':
d = tags_to_dict(i.tags)
if 'Name' in d and instance_name in d['Name']:
insts.append((d['Name'], i))
return insts
def terminate_instances(instance_list):
"""
# FIXME delete individuals
"""
for instance_name, instance_obj in instance_list:
logger.debug('Terminating instance %s', instance_name)
instance_obj.terminate()
def prettyprint_instances(inst_list):
for instance_name, instance_obj in inst_list:
print(instance_name, instance_obj.public_dns_name)
def prettyprint_instance_uptimes(inst_list):
for instance_name, instance_obj in inst_list:
launch_time = instance_obj.launch_time
delta = str(datetime.datetime.now(launch_time.tzinfo) - launch_time).split('.')[0]
print(instance_name, delta)
```
#### File: pywren/jobrunner/jobrunner.py
```python
from __future__ import print_function
import os
import base64
import shutil
import json
import sys
import time
import boto3
from botocore.vendored.requests.packages.urllib3.exceptions import ReadTimeoutError
from six.moves import cPickle as pickle
from tblib import pickling_support
pickling_support.install()
BACKOFF = 1
MAX_TRIES = 5
def b64str_to_bytes(str_data):
str_ascii = str_data.encode('ascii')
byte_data = base64.b64decode(str_ascii)
return byte_data
# initial output file in case job fails
output_dict = {'result' : None,
'success' : False}
pickled_output = pickle.dumps(output_dict)
jobrunner_config_filename = sys.argv[1]
jobrunner_config = json.load(open(jobrunner_config_filename, 'r'))
# FIXME someday switch to storage handler
# download the func data into memory
s3_client = boto3.client("s3")
func_bucket = jobrunner_config['func_bucket']
func_key = jobrunner_config['func_key']
data_bucket = jobrunner_config['data_bucket']
data_key = jobrunner_config['data_key']
data_byte_range = jobrunner_config['data_byte_range']
output_bucket = jobrunner_config['output_bucket']
output_key = jobrunner_config['output_key']
## Jobrunner stats are fieldname float
jobrunner_stats_filename = jobrunner_config['stats_filename']
# open the stats filename
stats_fid = open(jobrunner_stats_filename, 'w')
def write_stat(stat, val):
stats_fid.write("{} {:f}\n".format(stat, val))
stats_fid.flush()
def get_object_with_backoff(client, bucket, key, tries=MAX_TRIES, backoff=BACKOFF, **extra_args):
num_tries = 0
while (num_tries < tries):
try:
f_stream = client.get_object(Bucket=bucket, Key=key, **extra_args)
break
except ReadTimeoutError:
time.sleep(backoff)
backoff *= 2
num_tries += 1
return f_stream
try:
func_download_time_t1 = time.time()
func_obj_stream = get_object_with_backoff(s3_client, bucket=func_bucket, key=func_key)
loaded_func_all = pickle.loads(func_obj_stream['Body'].read())
func_download_time_t2 = time.time()
write_stat('func_download_time',
func_download_time_t2-func_download_time_t1)
# save modules, before we unpickle actual function
PYTHON_MODULE_PATH = jobrunner_config['python_module_path']
shutil.rmtree(PYTHON_MODULE_PATH, True) # delete old modules
os.mkdir(PYTHON_MODULE_PATH)
sys.path.append(PYTHON_MODULE_PATH)
for m_filename, m_data in loaded_func_all['module_data'].items():
m_path = os.path.dirname(m_filename)
if len(m_path) > 0 and m_path[0] == "/":
m_path = m_path[1:]
to_make = os.path.join(PYTHON_MODULE_PATH, m_path)
try:
os.makedirs(to_make)
except OSError as e:
if e.errno == 17:
pass
else:
raise e
full_filename = os.path.join(to_make, os.path.basename(m_filename))
#print "creating", full_filename
with open(full_filename, 'wb') as fid:
fid.write(b64str_to_bytes(m_data))
# logger.info("Finished wrting {} module files".format(len(d['module_data'])))
# logger.debug(subprocess.check_output("find {}".format(PYTHON_MODULE_PATH), shell=True))
# logger.debug(subprocess.check_output("find {}".format(os.getcwd()), shell=True))
# now unpickle function; it will expect modules to be there
loaded_func = pickle.loads(loaded_func_all['func'])
extra_get_args = {}
if data_byte_range is not None:
range_str = 'bytes={}-{}'.format(*data_byte_range)
extra_get_args['Range'] = range_str
data_download_time_t1 = time.time()
data_obj_stream = get_object_with_backoff(s3_client, bucket=data_bucket,
key=data_key,
**extra_get_args)
# FIXME make this streaming
loaded_data = pickle.loads(data_obj_stream['Body'].read())
data_download_time_t2 = time.time()
write_stat('data_download_time',
data_download_time_t2-data_download_time_t1)
#print("loaded")
y = loaded_func(loaded_data)
#print("success")
output_dict = {'result' : y,
'success' : True,
'sys.path' : sys.path}
pickled_output = pickle.dumps(output_dict)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
#traceback.print_tb(exc_traceback)
# Shockingly often, modules like subprocess don't properly
# call the base Exception.__init__, which results in them
# being unpickleable. As a result, we actually wrap this in a try/catch block
# and more-carefully handle the exceptions if any part of this save / test-reload
# fails
try:
pickled_output = pickle.dumps({'result' : e,
'exc_type' : exc_type,
'exc_value' : exc_value,
'exc_traceback' : exc_traceback,
'sys.path' : sys.path,
'success' : False})
# this is just to make sure they can be unpickled
pickle.loads(pickled_output)
except Exception as pickle_exception:
pickled_output = pickle.dumps({'result' : str(e),
'exc_type' : str(exc_type),
'exc_value' : str(exc_value),
'exc_traceback' : exc_traceback,
'exc_traceback_str' : str(exc_traceback),
'sys.path' : sys.path,
'pickle_fail' : True,
'pickle_exception' : pickle_exception,
'success' : False})
finally:
output_upload_timestamp_t1 = time.time()
s3_client.put_object(Body=pickled_output,
Bucket=output_bucket,
Key=output_key)
output_upload_timestamp_t2 = time.time()
write_stat("output_upload_time", output_upload_timestamp_t2 - output_upload_timestamp_t1) # pylint: disable=line-too-long
```
#### File: pywren/tests/extmoduleutf8.py
```python
from __future__ import print_function
TEST_STR = "ᚠᛇᚻ᛫ᛒᛦᚦ᛫ᚠᚱᚩᚠᚢᚱ᛫ᚠᛁᚱᚪ᛫ᚷᛖᚻᚹ"
def unicode_str(x):
return TEST_STR
def foo_add(x):
return x+1
# For some reason I don't understand this character is in anaconda/lib/python2.7/email/message.py and causes all sorts of headaches. including it here as a safety check
```
#### File: pywren/tests/test_logs.py
```python
import pytest
import time
import boto3
import uuid
import numpy as np
import time
import pywren
import subprocess
import logging
import unittest
import numpy as np
from flaky import flaky
class CloudwatchLogTest(unittest.TestCase):
"""
Simple test to see if we can get any logs
"""
def setUp(self):
self.wrenexec = pywren.default_executor()
@pytest.mark.skip(reason="This test is way too noisy")
def test_simple(self):
def sum_list(x):
return np.sum(x)
x = np.arange(10)
fut = self.wrenexec.call_async(sum_list, x)
res = fut.result()
self.assertEqual(res, np.sum(x))
time.sleep(10) # wait for logs to propagate
logs = self.wrenexec.get_logs(fut, True)
assert len(logs) >= 3 # make sure we have start, end, report
```
#### File: pywren/tests/test_util.py
```python
import unittest
import pytest
import pywren
import pywren.wrenutil
class S3HashingTest(unittest.TestCase):
def test_s3_split(self):
good_s3_url = "s3://bucket_name/and/the/key"
bucket, key = pywren.wrenutil.split_s3_url(good_s3_url)
self.assertEqual(bucket, "bucket_name")
self.assertEqual(key, "and/the/key")
with pytest.raises(ValueError):
bad_s3_url = "notS3://foo/bar"
bucket, key = pywren.wrenutil.split_s3_url(bad_s3_url)
def test_version():
"""
test that __version__ exists
"""
assert pywren.__version__ is not None
``` |
{
"source": "JoostVisser/ml-assignment2",
"score": 3
} |
#### File: ml-assignment2/mglearn/plot_2d_separator.py
```python
import numpy as np
import matplotlib.pyplot as plt
from .plot_helpers import cm2, cm3, discrete_scatter
def plot_2d_classification(classifier, X, fill=False, ax=None, eps=None, alpha=1, cm=cm3):
# multiclass
if eps is None:
eps = X.std() / 2.
if ax is None:
ax = plt.gca()
x_min, x_max = X[:, 0].min() - eps, X[:, 0].max() + eps
y_min, y_max = X[:, 1].min() - eps, X[:, 1].max() + eps
xx = np.linspace(x_min, x_max, 1000)
yy = np.linspace(y_min, y_max, 1000)
X1, X2 = np.meshgrid(xx, yy)
X_grid = np.c_[X1.ravel(), X2.ravel()]
decision_values = classifier.predict(X_grid)
ax.imshow(decision_values.reshape(X1.shape), extent=(x_min, x_max,
y_min, y_max),
aspect='auto', origin='lower', alpha=alpha, cmap=cm)
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
ax.set_xticks(())
ax.set_yticks(())
def plot_2d_scores(classifier, X, ax=None, eps=None, alpha=1, cm="viridis", function=None):
# binary with fill
if eps is None:
eps = X.std() / 2.
if ax is None:
ax = plt.gca()
x_min, x_max = X[:, 0].min() - eps, X[:, 0].max() + eps
y_min, y_max = X[:, 1].min() - eps, X[:, 1].max() + eps
xx = np.linspace(x_min, x_max, 100)
yy = np.linspace(y_min, y_max, 100)
X1, X2 = np.meshgrid(xx, yy)
X_grid = np.c_[X1.ravel(), X2.ravel()]
if function is None:
function = getattr(classifier, "decision_function", getattr(classifier, "predict_proba"))
else:
function = getattr(classifier, function)
decision_values = function(X_grid)
if decision_values.ndim > 1 and decision_values.shape[1] > 1:
# predict_proba
decision_values = decision_values[:, 1]
grr = ax.imshow(decision_values.reshape(X1.shape),
extent=(x_min, x_max, y_min, y_max), aspect='auto',
origin='lower', alpha=alpha, cmap=cm)
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
ax.set_xticks(())
ax.set_yticks(())
return grr
def plot_2d_separator(classifier, X, fill=False, ax=None, eps=None, alpha=1,
cm=cm2, linewidth=None, threshold=None, linestyle="solid"):
# binary?
if eps is None:
eps = X.std() / 2.
if ax is None:
ax = plt.gca()
x_min, x_max = X[:, 0].min() - eps, X[:, 0].max() + eps
y_min, y_max = X[:, 1].min() - eps, X[:, 1].max() + eps
xx = np.linspace(x_min, x_max, 100)
yy = np.linspace(y_min, y_max, 100)
X1, X2 = np.meshgrid(xx, yy)
X_grid = np.c_[X1.ravel(), X2.ravel()]
try:
decision_values = classifier.decision_function(X_grid)
levels = [0] if threshold is None else [threshold]
fill_levels = [decision_values.min()] + levels + [decision_values.max()]
except AttributeError:
# no decision_function
decision_values = classifier.predict_proba(X_grid)[:, 1]
levels = [.5] if threshold is None else [threshold]
fill_levels = [0] + levels + [1]
if fill:
ax.contourf(X1, X2, decision_values.reshape(X1.shape),
levels=fill_levels, alpha=alpha, cmap=cm)
else:
ax.contour(X1, X2, decision_values.reshape(X1.shape), levels=levels,
colors="black", alpha=alpha, linewidths=linewidth,
linestyles=linestyle, zorder=5)
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
ax.set_xticks(())
ax.set_yticks(())
if __name__ == '__main__':
from sklearn.datasets import make_blobs
from sklearn.linear_model import LogisticRegression
X, y = make_blobs(centers=2, random_state=42)
clf = LogisticRegression().fit(X, y)
plot_2d_separator(clf, X, fill=True)
discrete_scatter(X[:, 0], X[:, 1], y)
plt.show()
``` |
{
"source": "Jootoi/imneversorry",
"score": 3
} |
#### File: Jootoi/imneversorry/tarot.py
```python
from os import listdir
from random import shuffle, randint
from PIL import Image
from tempfile import NamedTemporaryFile
from telegram import Update
from telegram.ext import CallbackContext
import db
import re
class Tarot:
def __init__(self):
self.card_data = db.readSelitykset()
def getCommands(self):
return dict()
def get_reading(self, amount):
# cards in resources folder
cards = listdir("resources/tarot")
# magic shuffling
shuffle(cards)
reading = []
for i in range(amount):
# how 2 reverse a queue
reading.append(cards.pop())
# return the tempfile with the image
return(self.make_image(reading))
def make_image(self, reading):
reading_image = Image.new('RGB', (250 * len(reading), 429))
for i in range(len(reading)):
# chance for flipped card
if randint(0,10) == 0:
card_image = Image.open("resources/tarot/" + reading[i])
image_flipped = card_image.transpose(Image.FLIP_TOP_BOTTOM)
reading_image.paste(im=image_flipped, box=(250 * i, 0))
#normal card
else:
reading_image.paste(im=Image.open("resources/tarot/" + reading[i]), box=(250 * i, 0))
# do NamedTempFile because Linux and Windows require completely different methods for this
# the old Win method of making a non-delete file and then deleting it borks on Linux
# this will bork on Windows but who cares
fp = NamedTemporaryFile()
fp.seek(0)
reading_image.save(fp, 'jpeg', quality=75)
return(fp)
def explain_card(self, text):
explanations_to_return = ""
for datum in self.card_data:
name = datum[0]
lname = name.lower()
if lname in text:
if "reversed " + lname in text or "ylösalaisin " + lname in text or lname + " reversed" in text or lname + " ylösalaisin" in text:
rev_exp = datum[2]
explanations_to_return += "Reversed " + name + ": " + rev_exp + "\n\n"
continue
explanation = datum[1]
explanations_to_return += name + ": " + explanation + "\n\n"
return explanations_to_return
def getTarot(self, update: Update, context: CallbackContext):
try:
size = int(update.message.text.lower().split(' ')[1])
except ValueError :
context.bot.sendMessage(chat_id=update.message.chat_id, text=":--D")
return
if size < 1 or size > 78:
context.bot.sendMessage(chat_id=update.message.chat_id, text=":--D")
return
image_file = self.get_reading(size)
image_file.seek(0)
if size > 10:
context.bot.sendDocument(chat_id=update.message.chat_id, document=open(image_file.name, 'rb'))
else:
context.bot.send_photo(chat_id=update.message.chat_id, photo=open(image_file.name, 'rb'))
image_file.close()
def getReading(self, update: Update, context: CallbackContext):
message = self.explain_card(update.message.text.lower())
if message != "":
context.bot.sendMessage(chat_id=update.message.chat_id, text=message)
def messageHandler(self, update: Update, context: CallbackContext):
msg = update.message
if msg.text is not None:
if re.match(r'^/tarot [0-9]+(?!\S)', msg.text.lower()):
self.getTarot(update, context)
elif "selitä" in msg.text.lower() or "selitys" in msg.text.lower():
self.getReading(update, context)
``` |
{
"source": "jootuom/multicmd",
"score": 3
} |
#### File: jootuom/multicmd/multicmd.py
```python
from tkinter import Menu, filedialog, messagebox
import tkinter.ttk
from jsonconfig import JSONConfig
import multiprocessing
import subprocess
def parsefile(fn=None):
with open(fn, "r", encoding="utf-8") as tf:
for num, line in enumerate(tf):
yield (num,) + tuple(line.rstrip().split("\t"))
def worker(idn, cmd):
rv = subprocess.call(cmd,
stdin=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
shell=True
)
return (idn, rv)
class Settings(JSONConfig):
def reset(self):
self.store = {"commands": []}
class GUI(tkinter.Frame):
def cb_success(self, result):
self.progress.step()
self.cmdlist.set(result[0], column="Result", value=result[1])
def cb_error(self, error):
self.progress.step()
print(error)
def start(self, event=None):
cmdline = self.cmdline.get()
procs = int(self.proccount.get())
self.pool = multiprocessing.Pool(processes=procs)
for entry in self.cmdlist.get_children():
item = self.cmdlist.item(entry)
idn = item.get("text")
values = item.get("values")
# Skip rows that have a Result
if values[-1] != "":
self.progress.step()
continue
# If the cmdline is bad
try:
cmd = cmdline.format(*values)
except IndexError as e:
messagebox.showerror("Bad commandline", "Bad commandline")
break
self.pool.apply_async(
worker,
(idn, cmd,),
callback=self.cb_success,
error_callback=self.cb_error
)
self.pool.close()
def stop(self, event=None):
self.pool.terminate()
self.progress["value"] = 0
def browse(self, event=None):
fn = filedialog.askopenfilename()
if not fn: return
# Clear old items
curitems = self.cmdlist.get_children()
if curitems: self.cmdlist.delete(*curitems)
entries = parsefile(fn)
headers = next(entries)[1:]
self.cmdlist["columns"] = headers + ("Result",)
for header in headers:
self.cmdlist.heading(header, text=header)
self.cmdlist.column(header, stretch=False, minwidth=10, width=100)
for entry in entries:
# Skip the id number but add an empty Result
cols = entry[1:] + ("",)
self.cmdlist.insert("", "end", text=entry[0], iid=entry[0], values=cols)
self.cmdlist.heading("Result", text="Result")
self.cmdlist.column("Result", stretch=True, minwidth=10)
self.progress["value"] = 0
self.progress["maximum"] = len(self.cmdlist.get_children())
def exit(self, event=None):
self.quit()
def save(self, event=None):
cmdline = self.cmdline.get()
Settings["commands"] += [cmdline]
self.cmdline["values"] = Settings["commands"]
def forget(self, event=None):
cmdline = self.cmdline.get()
if cmdline in Settings["commands"]:
Settings["commands"].remove(cmdline)
Settings.save()
self.cmdline["values"] = Settings["commands"]
self.cmdline.set("")
def reset(self, event=None):
for entry in self.cmdlist.get_children():
self.cmdlist.set(entry, column="Result", value="")
def prune(self, event=None):
for entry in self.cmdlist.get_children():
item = self.cmdlist.item(entry)
if item.get("values")[-1] == 0:
self.cmdlist.delete(entry)
else:
self.cmdlist.set(entry, column="Result", value="")
def __init__(self, master=None):
tkinter.ttk.Frame.__init__(self, master)
self.master = master
self.master.geometry("500x315")
self.master.minsize(500, 315)
self.master.title("MultiCMD")
menubar = Menu(self.master)
filemenu = Menu(menubar, tearoff=0)
cmdmenu = Menu(menubar, tearoff=0)
resmenu = Menu(menubar, tearoff=0)
filemenu.add_command(label="Open...", accelerator="Ctrl+O", command=self.browse)
filemenu.add_command(label="Quit", accelerator="Ctrl+Q", command=self.quit)
cmdmenu.add_command(label="Save", accelerator="Ctrl+S", command=self.save)
cmdmenu.add_command(label="Forget", accelerator="Ctrl+F", command=self.forget)
resmenu.add_command(label="Reset", accelerator="Ctrl+E", command=self.reset)
resmenu.add_command(label="Prune", accelerator="Ctrl+R", command=self.prune)
menubar.add_cascade(label="File", menu=filemenu)
menubar.add_cascade(label="Cmdline", menu=cmdmenu)
menubar.add_cascade(label="Results", menu=resmenu)
self.master.config(menu=menubar)
# Top row
topframe = tkinter.ttk.Frame(self)
self.cmdline = tkinter.ttk.Combobox(topframe, values=Settings["commands"])
self.proccount = tkinter.Spinbox(topframe, from_=1, to=100)
# Mid row
midframe = tkinter.ttk.Frame(self)
self.cmdlist = tkinter.ttk.Treeview(midframe)
self.cmdlist["columns"] = ("Result",)
self.cmdlist.heading("#0", text="#")
self.cmdlist.column("#0", stretch=False, minwidth=10, width=50)
self.cmdlist.heading("Result", text="Result")
self.cmdlist.column("Result", stretch=True, minwidth=10)
yscroller = tkinter.ttk.Scrollbar(midframe, orient="vertical", command=self.cmdlist.yview)
self.cmdlist.configure(yscroll=yscroller.set)
self.progress = tkinter.ttk.Progressbar(self)
# Bottom row
startbutton = tkinter.ttk.Button(self, text="Start", command=self.start)
stopbutton = tkinter.ttk.Button(self, text="Stop", command=self.stop)
openbutton = tkinter.ttk.Button(self, text="Open...", command=self.browse)
# Pack widgets
self.pack(expand=True, fill="both")
topframe.pack(expand=False, fill="x", padx=3, pady=3)
self.cmdline.pack(expand=True, fill="x", side="left")
self.proccount.pack(side="right")
midframe.pack(expand=True, fill="both", padx=3)
self.cmdlist.pack(expand=True, fill="both", side="left")
yscroller.pack(fill="y", side="right")
self.progress.pack(expand=False, fill="x", padx=3,pady=3)
startbutton.pack(side="left", padx=3,pady=3)
stopbutton.pack(side="left", padx=3, pady=3)
openbutton.pack(side="right", padx=3, pady=3)
# Keybindings
self.bind_all("<Control-o>", self.browse)
self.bind_all("<Control-q>", self.exit)
self.bind_all("<Control-s>", self.save)
self.bind_all("<Control-f>", self.forget)
self.bind_all("<Control-e>", self.reset)
self.bind_all("<Control-r>", self.prune)
self.bind_all("<Control-Return>", self.start)
self.bind_all("<Control-BackSpace>", self.stop)
self.master.mainloop()
if __name__ == "__main__":
multiprocessing.freeze_support()
Settings = Settings("multicmd-settings.json")
root = tkinter.Tk()
app = GUI(master=root)
``` |
{
"source": "joouha/euporie-binder",
"score": 2
} |
#### File: euporie-binder/euporie_binder/__init__.py
```python
__version__ = "0.1.0"
from .app import EuporieBinderApp
# This is needed for jupyter server to know how to load the extension
def _jupyter_server_extension_points():
return [{"module": __name__, "app": EuporieBinderApp}]
# This is required for classic notebook compatibility
def load_jupyter_server_extension(serverapp):
extension = EuporieBinderApp()
extension.serverapp = serverapp
extension.load_config_file()
extension.update_config(serverapp.config)
extension.parse_command_line(serverapp.extra_args)
extension.initialize()
``` |
{
"source": "joouha/euporie",
"score": 2
} |
#### File: euporie/app/tui.py
```python
from __future__ import annotations
import logging
from pathlib import Path
from typing import TYPE_CHECKING, cast
from prompt_toolkit.completion import PathCompleter
from prompt_toolkit.enums import EditingMode
from prompt_toolkit.filters import Condition
from prompt_toolkit.formatted_text import (
HTML,
AnyFormattedText,
fragment_list_to_text,
to_formatted_text,
)
from prompt_toolkit.layout import (
ConditionalContainer,
DynamicContainer,
Float,
HSplit,
VSplit,
Window,
WindowAlign,
)
from prompt_toolkit.layout.controls import FormattedTextControl
from prompt_toolkit.layout.dimension import Dimension
from prompt_toolkit.layout.menus import CompletionsMenu
from prompt_toolkit.widgets import Button, Dialog, Label, TextArea
from euporie import __app_name__, __copyright__, __logo__, __strapline__, __version__
from euporie.app.base import EuporieApp
from euporie.box import Pattern
from euporie.components.menu import MenuContainer
from euporie.components.menu.contents import load_menu_items
from euporie.config import config
from euporie.keys import KeyBindingsInfo
from euporie.log import LogView
from euporie.notebook import TuiNotebook
from euporie.text import FormattedTextArea
if TYPE_CHECKING:
from asyncio import AbstractEventLoop
from typing import Any, Callable, Literal, Mapping, Optional
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.completion import Completer
from prompt_toolkit.formatted_text import StyleAndTextTuples
from prompt_toolkit.layout.containers import AnyContainer
from euporie.cell import InteractiveCell
from euporie.tab import Tab
log = logging.getLogger(__name__)
class TuiApp(EuporieApp):
"""A text user interface euporie application."""
def __init__(self, **kwargs: "Any") -> "None":
"""Create a new euporie text user interface application instance."""
self.notebook_class = TuiNotebook
super().__init__(
full_screen=True,
mouse_support=True,
editing_mode=self.get_edit_mode(),
**kwargs,
)
# Ensure an opened tab is focused
if self.tab:
self.pre_run_callables.append(self.tab.focus)
def format_title(self) -> "StyleAndTextTuples":
"""Formats the tab's title for display in the top right of the app."""
if self.tab:
return [("bold class:status.field", f" {self.tab.title} ")]
else:
return []
def format_status(self, part: "Literal['left', 'right']") -> "StyleAndTextTuples":
"""Formats the fields in the statusbar generated by the current tab.
Args:
part: ``'left'`` to return the fields on the left side of the statusbar,
and ``'right'`` to return the fields on the right
Returns:
A list of style and text tuples for display in the statusbar
"""
if self.tab:
entries = self.tab.statusbar_fields()
else:
entries = (
[HTML("Press <b>Ctrl+n</b> to start a new notebook")],
[HTML("Press <b>Ctrl+q</b> to quit")],
)
output: "StyleAndTextTuples" = []
# Show selected menu description if set
if part == "left" and self.root_container.status_text:
output.append(
("class:status.field", f" {self.root_container.status_text} ")
)
# Show the tab's status fields
else:
for field in entries[0 if part == "left" else 1]:
if field:
if isinstance(field, tuple):
ft = [field]
else:
ft = to_formatted_text(field, style="class:status.field")
output += [
("class:status.field", " "),
*ft,
("class:status.field", " "),
("class:status", " "),
]
if output:
output.pop()
return output
def load_container(self) -> "AnyContainer":
"""Builds the main application layout."""
self.logo = Window(
FormattedTextControl(
[("", f" {__logo__} ")],
focusable=True,
show_cursor=False,
style="class:menu-bar,logo",
),
height=1,
dont_extend_width=True,
)
self.title_bar = ConditionalContainer(
Window(
content=FormattedTextControl(
self.format_title, focusable=True, show_cursor=False
),
height=1,
style="class:menu.item",
dont_extend_width=True,
align=WindowAlign.RIGHT,
),
filter=self.has_tab,
)
tabs = DynamicContainer(self.tab_container)
status_bar = ConditionalContainer(
content=VSplit(
[
Window(
FormattedTextControl(lambda: self.format_status(part="left")),
style="class:status",
),
Window(
FormattedTextControl(lambda: self.format_status(part="right")),
style="class:status.right",
align=WindowAlign.RIGHT,
),
],
height=1,
),
filter=Condition(lambda: config.show_status_bar),
)
body = HSplit([tabs, status_bar], style="class:body")
self.root_container = MenuContainer(
body=body,
menu_items=load_menu_items(), # type: ignore
floats=[
Float(
xcursor=True,
ycursor=True,
content=CompletionsMenu(max_height=16, scroll_offset=1),
)
],
left=[self.logo],
right=[self.title_bar],
)
return self.root_container
def tab_container(self) -> "AnyContainer":
"""Returns a container with all opened tabs.
Returns:
A vertical split containing the opened tab containers.
"""
if self.tabs:
return VSplit(
self.tabs,
padding=1,
padding_char=" ",
padding_style="class:chrome",
)
else:
return Pattern()
def dialog(
self,
title: "AnyFormattedText",
body: "AnyContainer",
buttons: "dict[str, Optional[Callable]]",
to_focus: "Optional[AnyContainer]" = None,
) -> None:
"""Display a modal dialog above the application.
Returns focus to the previously selected control when closed.
Args:
title: The title of the dialog. Can be formatted text.
body: The container to use as the main body of the dialog.
buttons: A dictionary mapping text to display as dialog buttons to
callbacks to run when the button is clicked. If the callback is
`None`, the dialog will be closed without running a callback.
to_focus: The control to focus when the dialog is displayed.
"""
focused = self.layout.current_control
def _make_handler(cb: "Optional[Callable]" = None) -> "Callable":
def inner() -> "None":
assert isinstance(self.root_container.floats, list)
self.root_container.floats.remove(dialog)
if focused in self.layout.find_all_controls():
try:
self.layout.focus(focused)
except ValueError:
pass
if callable(cb):
cb()
return inner
# kb = KeyBindingsInfo()
# kb.add("escape")(lambda event: _make_handler())
button_widgets = []
for text, cb in buttons.items():
handler = _make_handler(cb)
button_widgets.append(
Button(text, handler, left_symbol="[", right_symbol="]")
)
# kb.add(text[:1].lower())(lambda event: handler)
dialog = Float(
Dialog(
title=title,
body=body,
buttons=button_widgets,
modal=True,
with_background=True,
)
)
assert isinstance(self.root_container.floats, list)
self.root_container.floats.insert(0, dialog)
if to_focus is None:
to_focus = button_widgets[0]
self.layout.focus(to_focus)
self.invalidate()
def ask_new_file(self) -> "None":
"""Prompts the user to name a file."""
return self.ask_file(
validate=False,
completer=PathCompleter(),
)
def ask_open_file(self) -> "None":
"""Prompts the user to open a file."""
self.ask_file(
completer=PathCompleter(),
)
def ask_file(
self,
default: "str" = "",
validate: "bool" = True,
error: "Optional[str]" = None,
completer: "Completer" = None,
) -> None:
"""Display a dialog asking for file name input.
Args:
default: The default filename to display in the text entry box
validate: Whether to disallow files which do not exist
error: An optional error message to display below the file name
completer: The completer to use for the input field
"""
def _open_cb() -> None:
path = Path(filepath.text)
if not validate or path.expanduser().exists():
self.open_file(path)
else:
self.ask_file(
default=filepath.text,
validate=validate,
error="File not found",
completer=completer,
)
def _accept_text(buf: "Buffer") -> "bool":
"""Accepts the text in the file input field and focuses the next field."""
self.layout.focus_next()
buf.complete_state = None
return True
filepath = TextArea(
text=default,
multiline=False,
completer=completer,
accept_handler=_accept_text,
)
root_contents: "list[AnyContainer]" = [
Label("Enter file name:"),
filepath,
]
if error:
root_contents.append(Label(error, style="red"))
self.dialog(
title="Select file",
body=HSplit(root_contents),
buttons={
"OK": _open_cb,
"Cancel": None,
},
to_focus=filepath,
)
def help_keys(self) -> None:
"""Displays details of registered key-bindings in a dialog."""
key_details = KeyBindingsInfo.to_formatted_text()
max_line_width = max(
[len(line) for line in fragment_list_to_text(key_details).split("\n")]
)
body = FormattedTextArea(
formatted_text=key_details,
multiline=True,
focusable=True,
wrap_lines=False,
width=Dimension(preferred=max_line_width + 2),
scrollbar=True,
)
self.dialog(
title="Keyboard Shortcuts",
body=body,
buttons={"OK": None},
)
def help_logs(self) -> None:
"""Displays a dialog with logs."""
for tab in self.tabs:
if isinstance(tab, LogView):
break
else:
tab = LogView()
self.tabs.append(tab)
self.layout.focus(tab)
def help_about(self) -> None:
"""Displays an about dialog."""
self.dialog(
title="About",
body=Window(
FormattedTextControl(
[
("class:logo", __logo__),
("", " "),
("bold", __app_name__),
("", f"Version {__version__}\n\n".rjust(27, " ")),
("", __strapline__),
("", "\n"),
("class:hr", "─" * 34 + "\n\n"),
("", __copyright__),
]
),
dont_extend_height=True,
),
buttons={"OK": None},
)
def _handle_exception(
self, loop: "AbstractEventLoop", context: "dict[str, Any]"
) -> "None":
exception = context.get("exception")
# Log observed exceptions to the log
log.exception("An unhandled exception occured", exc_info=exception)
# Also display a dialog to the user
self.dialog(
title="Error",
body=Window(
FormattedTextControl(
[
("bold", "An error occured:\n\n"),
("", exception.__repr__()),
]
)
),
buttons={"OK": None},
)
def exit(self, **kwargs: "Any") -> "None":
"""Check for unsaved files before closing.
Creates a chain of close file commands, where the callback for each triggers
the closure of the next. The closing process can be cancelled anywhere along
the chain.
Args:
**kwargs: Unused key word arguments
"""
really_close = super().exit
if self.tabs:
def final_cb() -> "None":
"""Really exit after the last tab in the chain is closed."""
self.cleanup_closed_tab(self.tabs[0])
really_close()
def create_cb(
close_tab: "Tab", cleanup_tab: "Tab", cb: "Callable"
) -> "Callable":
"""Generate a tab close chaining callbacks.
Cleans up after the previously closed tab, and requests to close the
next tab in the chain.
Args:
close_tab: The tab to close
cleanup_tab: The previously closed tab to cleanup
cb: The callback to call when work is complete
Returns:
A callback function which cleans up `cleanup_tab` and closes
`close_tab`.
"""
def inner() -> None:
self.cleanup_closed_tab(cleanup_tab)
close_tab.close(cb=cb)
return inner
cb = final_cb
for close_tab, cleanup_tab in zip(self.tabs, self.tabs[1:]):
cb = create_cb(close_tab, cleanup_tab, cb)
self.tabs[-1].close(cb)
else:
really_close()
def set_edit_mode(self, mode: "EditingMode") -> "None":
"""Sets the keybindings for editing mode.
Args:
mode: One of default, vi, or emacs
"""
config.edit_mode = str(mode)
self.editing_mode = self.get_edit_mode()
log.debug("Editing mode set to: %s", self.editing_mode)
def get_edit_mode(self) -> "EditingMode":
"""Returns the editing mode enum defined in the configuration."""
return cast(
EditingMode,
{
"micro": "MICRO",
"vi": EditingMode.VI,
"emacs": EditingMode.EMACS,
}.get(str(config.edit_mode), "micro"),
)
def tab_op(
self,
operation: "str",
tab: "Optional[Tab]" = None,
args: "Optional[list[Any]]" = None,
kwargs: "Optional[Mapping[str, Any]]" = None,
) -> None:
"""Call a method on the current tab if it exits.
Args:
operation: The name of the function to attempt to call.
tab: The instance of the tab to use. If `None`, the currently selected tab
will be used.
args: List of parameter arguments to pass to the function
kwargs: Mapping of keyword arguments to pass to the function
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
if tab is None:
tab = self.tab
if tab and hasattr(tab, operation):
func = getattr(self.tab, operation)
if callable(func):
func(*args, **kwargs)
@property
def notebook(self) -> "Optional[TuiNotebook]":
"""Return the currently active notebook."""
if isinstance(self.tab, TuiNotebook):
return self.tab
return None
@property
def cell(self) -> "Optional[InteractiveCell]":
"""Return the currently active cell."""
if isinstance(self.tab, TuiNotebook):
return self.tab.cell
return None
```
#### File: euporie/commands/notebook.py
```python
import logging
from prompt_toolkit.filters import buffer_has_focus
from euporie.app.current import get_tui_app as get_app
from euporie.commands.registry import add
from euporie.filters import notebook_has_focus
log = logging.getLogger(__name__)
@add(
keys="c-s",
filter=notebook_has_focus,
group="Notebook",
)
def save_notebook() -> "None":
"""Save the current notebook."""
nb = get_app().notebook
if nb is not None:
nb.save()
@add(
filter=notebook_has_focus,
group="Cell",
)
def run_all_cells() -> "None":
"""Run or render all the cells in the current notebook."""
nb = get_app().notebook
if nb is not None:
nb.run_all()
@add(
keys="a",
filter=notebook_has_focus & ~buffer_has_focus,
group="Notebook",
)
def add_cell_above() -> "None":
"""Add a new cell above the current."""
nb = get_app().notebook
if nb is not None:
nb.add_cell_above()
@add(
keys="b",
filter=notebook_has_focus & ~buffer_has_focus,
group="Notebook",
)
def add_cell_below() -> "None":
"""Add a new cell below the current."""
nb = get_app().notebook
if nb is not None:
nb.add_cell_below()
@add(
keys=("d", "d"),
filter=notebook_has_focus & ~buffer_has_focus,
group="Notebook",
)
def delete_cell() -> "None":
"""Delete the current cell."""
nb = get_app().notebook
if nb is not None:
nb.delete()
@add(
keys="x",
filter=notebook_has_focus & ~buffer_has_focus,
group="Notebook",
)
def cut_cell() -> "None":
"""Cut the current cell."""
nb = get_app().notebook
if nb is not None:
nb.cut()
@add(
keys="c",
filter=notebook_has_focus & ~buffer_has_focus,
group="Notebook",
)
def copy_cell() -> "None":
"""Copy the current cell."""
nb = get_app().notebook
if nb is not None:
nb.copy()
@add(
keys="v",
filter=notebook_has_focus & ~buffer_has_focus,
group="Notebook",
)
def paste_cell() -> "None":
"""Paste the last copied cell."""
nb = get_app().notebook
if nb is not None:
nb.paste()
@add(
keys=("I", "I"),
filter=notebook_has_focus & ~buffer_has_focus,
group="Notebook",
)
def interrupt_kernel() -> "None":
"""Interrupt the notebook's kernel."""
nb = get_app().notebook
if nb is not None:
nb.interrupt_kernel()
@add(
keys=("0", "0"),
filter=notebook_has_focus & ~buffer_has_focus,
group="Notebook",
)
def restart_kernel() -> "None":
"""Restart the notebook's kernel."""
nb = get_app().notebook
if nb is not None:
nb.restart_kernel()
@add(
filter=notebook_has_focus & ~buffer_has_focus,
group="Notebook",
)
def change_kernel() -> "None":
"""Change the notebook's kernel."""
nb = get_app().notebook
if nb is not None:
nb.change_kernel()
@add(
keys="[",
filter=notebook_has_focus & ~buffer_has_focus,
group="Notebook",
)
@add(
keys="<scroll-up>",
filter=notebook_has_focus,
group="Notebook",
)
def scroll_up() -> "None":
"""Scroll the page up a line."""
nb = get_app().notebook
if nb is not None:
nb.page.scroll(1)
@add(
keys="]",
filter=notebook_has_focus & ~buffer_has_focus,
group="Notebook",
)
@add(
keys="<scroll-down>",
filter=notebook_has_focus,
group="Notebook",
)
def scroll_down() -> "None":
"""Scroll the page down a line."""
nb = get_app().notebook
if nb is not None:
nb.page.scroll(-1)
@add(
keys="{",
filter=notebook_has_focus & ~buffer_has_focus,
group="Notebook",
)
def scroll_up_5_lines() -> "None":
"""Scroll the page up 5 lines."""
nb = get_app().notebook
if nb is not None:
nb.page.scroll(5)
@add(
keys="}",
filter=notebook_has_focus & ~buffer_has_focus,
group="Notebook",
)
def scroll_down_5_lines() -> "None":
"""Scroll the page down 5 lines."""
nb = get_app().notebook
if nb is not None:
nb.page.scroll(-5)
@add(
keys=["home", "c-up"],
group="Notebook",
filter=notebook_has_focus & ~buffer_has_focus,
)
def first_child() -> "None":
"""Select the first cell in the notebook."""
nb = get_app().notebook
if nb is not None:
nb.page.selected_index = 0
@add(
keys="pageup",
group="Notebook",
filter=notebook_has_focus & ~buffer_has_focus,
)
def select_5th_previous_cell() -> "None":
"""Go up 5 cells."""
nb = get_app().notebook
if nb is not None:
nb.page.selected_index -= 5
@add(
keys=["up", "k"],
group="Notebook",
filter=notebook_has_focus & ~buffer_has_focus,
)
def select_previous_cell() -> "None":
"""Go up one cell."""
nb = get_app().notebook
if nb is not None:
nb.page.selected_index -= 1
@add(
keys=["down", "j"],
group="Navigation",
filter=notebook_has_focus & ~buffer_has_focus,
)
def next_child() -> "None":
"""Select the next cell."""
nb = get_app().notebook
if nb is not None:
nb.page.selected_index += 1
@add(
keys="pagedown",
group="Notebook",
filter=~buffer_has_focus,
)
def select_5th_next_cell() -> "None":
"""Go down 5 cells."""
nb = get_app().notebook
if nb is not None:
nb.page.selected_index += 5
@add(
keys=["end", "c-down"],
group="Notebook",
filter=notebook_has_focus & ~buffer_has_focus,
)
def select_last_cell() -> "None":
"""Select the last cell in the notebook."""
nb = get_app().notebook
if nb is not None:
nb.page.selected_index = len(list(nb.page.children))
```
#### File: euporie/commands/registry.py
```python
from typing import TYPE_CHECKING
from euporie.commands.base import Command
if TYPE_CHECKING:
from typing import Any, Callable, Dict
commands: "Dict[str, Command]" = {}
def add(**kwargs: "Any") -> "Callable":
"""Adds a command to the centralized command system."""
def decorator(handler: "Callable") -> "Callable":
cmd = Command(handler, **kwargs)
commands[cmd.name] = cmd
return handler
return decorator
def get(name: "str") -> "Command":
"""Get a command from the centralized command system by name."""
try:
return commands[name]
except KeyError as e:
raise KeyError("Unknown command: %r" % name) from e
```
#### File: components/menu/contents.py
```python
from pygments.styles import get_all_styles # type: ignore
from euporie.commands.registry import get
from euporie.components.menu.item import MenuItem
from euporie.config import config
def load_menu_items() -> "list[MenuItem]":
"""Loads the list of menu items to display in the menu."""
separator = MenuItem(separator=True)
return [
MenuItem(
"File",
children=[
get("new-notebook").menu,
get("open-file").menu,
separator,
get("save-notebook").menu,
get("close-file").menu,
separator,
get("quit").menu,
],
),
MenuItem(
"Edit",
children=[
get("cut-cell").menu,
get("copy-cell").menu,
get("paste-cell").menu,
],
),
MenuItem(
"Run",
children=[
get("run-cell").menu,
get("run-all-cells").menu,
],
),
MenuItem(
"Kernel",
children=[
get("interrupt-kernel").menu,
get("restart-kernel").menu,
get("change-kernel").menu,
],
),
MenuItem(
"Settings",
children=[
MenuItem(
"Editor key bindings",
children=[
get(f"set-edit-mode-{choice}").menu
for choice in config.choices("edit_mode")
],
),
separator,
MenuItem(
"Color scheme",
children=[
get(f"set-color-scheme-{choice}").menu
for choice in config.choices("color_scheme")
],
),
MenuItem(
"Syntax Theme",
children=[
get(f"set-syntax-theme-{choice}").menu
for choice in sorted(get_all_styles())
],
),
get("switch-background-pattern").menu,
get("show-cell-borders").menu,
separator,
get("use-full-width").menu,
get("show-line-numbers").menu,
get("show-status-bar").menu,
separator,
get("autocomplete").menu,
get("autosuggest").menu,
get("run-after-external-edit").menu,
],
),
MenuItem(
"Help",
children=[
get("keyboard-shortcuts").menu,
get("view-logs").menu,
separator,
get("about").menu,
],
),
]
```
#### File: euporie/euporie/control.py
```python
from __future__ import annotations
from typing import Any, Dict, Optional, Type
from prompt_toolkit.cache import SimpleCache
from prompt_toolkit.layout.controls import GetLinePrefixCallable, UIContent, UIControl
from euporie.render import (
DataRenderer,
HTMLRenderer,
ImageRenderer,
RichRenderer,
SVGRenderer,
)
from euporie.text import ANSI
__all__ = [
"Control",
"RichControl",
"HTMLControl",
"BaseImageControl",
"ImageControl",
"SVGControl",
]
class Control(UIControl):
"""Base class for rich cell output.
Will re-render it's output when the display is resized. Output is generated by
`Control.renderer`, and is cached per output size.
"""
renderer: "Type[DataRenderer]"
def __init__(self, data: "Any", render_args: "Optional[Dict]" = None) -> "None":
"""Initalize the control.
Args:
data: Raw cell output data
render_args: Additional keyword arguments to pass to the renderer.
"""
self.data = data
if render_args is None:
render_args = {}
self.render_args = render_args
self.renderer_instance: "Optional[DataRenderer]" = None
self.rendered_lines: "list" = []
self._format_cache: SimpleCache = SimpleCache(maxsize=20)
self._content_cache: SimpleCache = SimpleCache(maxsize=20)
def preferred_height(
self,
width: "int",
max_available_height: "int",
wrap_lines: "bool",
get_line_prefix: Optional[GetLinePrefixCallable],
) -> "int":
"""Returns the number of lines in the rendered content."""
if not self.rendered_lines:
self.rendered_lines = self._format_cache.get(
(width, max_available_height),
lambda: self.render(width, max_available_height),
)
return len(self.rendered_lines)
def create_content(self, width: "int", height: "int") -> "UIContent":
"""Generates rendered output at a given size.
Args:
width: The desired output width
height: The desired output height
Returns:
`UIContent` for the given output size.
"""
self.rendered_lines = self._format_cache.get(
(width,),
lambda: self.render(width, height),
)
def get_content() -> Optional[UIContent]:
if self.rendered_lines is not None:
return UIContent(
get_line=lambda i: ANSI(
self.rendered_lines[i]
).__pt_formatted_text__(),
line_count=len(self.rendered_lines),
)
else:
return None
return self._content_cache.get((width,), get_content)
def render(self, width: "int", height: "int") -> "list[str]":
"""Calls the renderer."""
if self.renderer_instance is None:
self.renderer_instance = self.renderer.select()
result = self.renderer_instance.render(
self.data, width=width, height=height, render_args=self.render_args
)
rendered_lines = result.rstrip().split("\n")
return rendered_lines
class RichControl(Control):
"""Control for rich renderables."""
renderer = RichRenderer
class HTMLControl(Control):
"""Control for rendered HTML."""
renderer = HTMLRenderer
class BaseImageControl(Control):
"""Base class for image controls."""
def create_content(self, width: "int", height: "int") -> "UIContent":
"""Additionally cache rendered content by cell obscurity status."""
cell_obscured = self.render_args["cell"].obscured()
self.rendered_lines: "list" = self._format_cache.get(
(cell_obscured, width),
lambda: self.render(width, height),
)
def get_content() -> UIContent:
return UIContent(
get_line=lambda i: ANSI(self.rendered_lines[i]).__pt_formatted_text__(),
line_count=len(self.rendered_lines),
)
return self._content_cache.get((cell_obscured, width), get_content)
class ImageControl(BaseImageControl):
"""Control for rendered raster images."""
renderer = ImageRenderer
class SVGControl(BaseImageControl):
"""Class for rendered SVG iamges."""
renderer = SVGRenderer
```
#### File: euporie/key_binding/micro_state.py
```python
from enum import Enum
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import List, Optional
from prompt_toolkit.key_binding.key_processor import KeyPress
__all__ = ["InputMode", "MicroState"]
class InputMode(str, Enum):
"""Enum to define edit mode state types."""
value: "str"
INSERT = "insert"
REPLACE = "replace"
class MicroState:
"""Mutable class to hold Micro specific state."""
def __init__(self) -> "None":
"""Initiates the editing mode state."""
self.macro: "Optional[List[KeyPress]]" = []
self.current_recording: "Optional[List[KeyPress]]" = None
self.input_mode: "InputMode" = InputMode.INSERT
def reset(self) -> "None":
"""Reset the editing mode state."""
self.input_mode = InputMode.INSERT
self.current_recording = None
@property
def is_recording(self) -> "bool":
"""Tell whether we are recording a macro."""
return self.current_recording is not None
def start_macro(self) -> "None":
"""Start recording a macro."""
self.current_recording = []
def end_macro(self) -> "None":
"""End recording a macro."""
self.macro = self.current_recording
self.current_recording = None
```
#### File: euporie/euporie/style.py
```python
from functools import lru_cache
from typing import TYPE_CHECKING
from prompt_toolkit.styles import DEFAULT_ATTRS, AdjustBrightnessStyleTransformation
if TYPE_CHECKING:
from typing import Any
@lru_cache
def color_series(n: "int" = 6, interval: "float" = 0.05, **kwargs: "Any") -> "dict":
"""Create a series of dimmed colours."""
series: "dict[str, list]" = {key: [] for key in kwargs.keys()}
for i in range(n):
tr = AdjustBrightnessStyleTransformation(
min_brightness=interval * i, max_brightness=1 - (interval * i)
)
for name, color in kwargs.items():
series[name].append(
"#{}".format(
tr.transform_attrs(
DEFAULT_ATTRS._replace(color=color.lstrip("#"))
).color
)
)
return series
```
#### File: euporie/euporie/text.py
```python
from __future__ import annotations
import re
from typing import TYPE_CHECKING
from prompt_toolkit.formatted_text import ANSI as PTANSI
from prompt_toolkit.formatted_text import (
fragment_list_to_text,
split_lines,
to_formatted_text,
)
from prompt_toolkit.layout.margins import ScrollbarMargin
from prompt_toolkit.layout.processors import DynamicProcessor, Processor, Transformation
from prompt_toolkit.widgets import TextArea
if TYPE_CHECKING:
from typing import Any, Generator
from prompt_toolkit.formatted_text import StyleAndTextTuples
from prompt_toolkit.layout.processors import TransformationInput
__all__ = ["FormatTextProcessor", "FormattedTextArea", "ANSI"]
class FormatTextProcessor(Processor):
"""Applies formatted text to a TextArea."""
def __init__(self, formatted_text: "StyleAndTextTuples"):
"""Initiate the processor.
Args:
formatted_text: The text in a buffer but with formatting applied.
"""
self.formatted_text = formatted_text
super().__init__()
def apply_transformation(
self, transformation_input: "TransformationInput"
) -> "Transformation":
"""Apply text formatting to a line in a buffer."""
if not hasattr(self, "formatted_lines"):
self.formatted_lines = list(split_lines(self.formatted_text))
lineno = transformation_input.lineno
max_lineno = len(self.formatted_lines) - 1
if lineno > max_lineno:
lineno = max_lineno
line = self.formatted_lines[lineno]
return Transformation(line)
class FormattedTextArea(TextArea):
"""Applies formatted text to a TextArea."""
def __init__(
self, formatted_text: "StyleAndTextTuples", *args: "Any", **kwargs: "Any"
):
"""Initialise a `FormattedTextArea` instance.
Args:
formatted_text: A list of `(style, text)` tuples to display.
*args: Arguments to pass to `prompt_toolkit.widgets.TextArea`.
**kwargs: Key-word arguments to pass to `prompt_toolkit.widgets.TextArea`.
"""
input_processors = kwargs.pop("input_processors", [])
input_processors.append(DynamicProcessor(self.get_processor))
# The following is not type checked due to a currently open mypy bug
# https://github.com/python/mypy/issues/6799
super().__init__(
*args,
input_processors=input_processors,
**kwargs,
) # type: ignore
# Set the formatted text to display
self.formatted_text: "StyleAndTextTuples" = formatted_text
for margin in self.window.right_margins:
if isinstance(margin, ScrollbarMargin):
margin.up_arrow_symbol = "▲"
margin.down_arrow_symbol = "▼"
def get_processor(self) -> "FormatTextProcessor":
"""Generate a processor for the formatted text."""
return FormatTextProcessor(self.formatted_text)
@property
def formatted_text(self) -> "StyleAndTextTuples":
"""The formatted text."""
return self._formatted_text
@formatted_text.setter
def formatted_text(self, value: "StyleAndTextTuples") -> None:
"""Sets the formatted text."""
self._formatted_text = to_formatted_text(value)
self.text = fragment_list_to_text(value)
class ANSI(PTANSI):
"""Converts ANSI text into formatted text, preserving all control sequences."""
def __init__(self, value: "str") -> None:
"""Initiate the ANSI processor instance.
This replaces carriage returns to emulate terminal output.
Args:
value: The ANSI string to process.
"""
# Replace windows style newlines
value = value.replace("\r\n", "\n")
# Remove anything before a carriage return if there is something after it to
# emulate a carriage return in the output
value = re.sub("^.*\\r(?!\\n)", "", value, 0, re.MULTILINE)
super().__init__(value)
def _parse_corot(self) -> Generator[None, str, None]:
"""Coroutine that parses the ANSI escape sequences.
This is modified version of the ANSI parser from prompt_toolkit retains
all CSI escape sequences.
Yields:
Accepts characters from a string.
"""
style = ""
formatted_text = self._formatted_text
while True:
char = yield
sequence = char
# Everything between \001 and \002 should become a ZeroWidthEscape.
if char == "\001":
sequence = ""
while char != "\002":
char = yield
if char == "\002":
formatted_text.append(("[ZeroWidthEscape]", sequence))
break
else:
sequence += char
continue
# Check for backspace
elif char == "\x08":
# TODO - remove last character from last non-ZeroWidthEscape fragment
formatted_text.pop()
continue
elif char in ("\x1b", "\x9b"):
# Got a CSI sequence, try to compile a control sequence
char = yield
# Check for sixels
if char == "P":
# Got as DEC code
sequence += char
# We expect "p1;p2;p3;q" + sixel data + "\x1b\"
char = yield
while char != "\x1b":
sequence += char
char = yield
sequence += char
char = yield
if ord(char) == 0x5C:
sequence += char
formatted_text.append(("[ZeroWidthEscape]", sequence))
# char = yield
continue
# Check for hyperlinks
elif char == "]":
sequence += char
char = yield
if char == "8":
sequence += char
char = yield
if char == ";":
sequence += char
char = yield
while True:
sequence += char
if sequence[-2:] == "\x1b\\":
break
char = yield
formatted_text.append(("[ZeroWidthEscape]", sequence))
continue
elif (char == "[" and sequence == "\x1b") or sequence == "\x9b":
if sequence == "\x1b":
sequence += char
char = yield
# Next are any number (including none) of "parameter bytes"
params = []
current = ""
while 0x30 <= ord(char) <= 0x3F:
# Parse list of integer parameters
sequence += char
if char.isdigit():
current += char
else:
params.append(min(int(current or 0), 9999))
if char == ";":
current = ""
char = yield
if current:
params.append(min(int(current or 0), 9999))
# then any number of "intermediate bytes"
while 0x20 <= ord(char) <= 0x2F:
sequence += char
char = yield
# finally by a single "final byte"
if 0x40 <= ord(char) <= 0x7E:
sequence += char
# Check if that escape sequence was a style:
if char == "m":
self._select_graphic_rendition(params)
style = self._create_style_string()
# Otherwise print a zero-width control sequence
else:
formatted_text.append(("[ZeroWidthEscape]", sequence))
continue
formatted_text.append((style, sequence))
``` |
{
"source": "joouha/ranger_tmux",
"score": 3
} |
#### File: ranger_tmux/ranger_tmux/dropdown.py
```python
from ranger.api.commands import Command
from ranger_tmux.__main__ import tmux_keybindings
from . import util
SETTINGS = {
"tmux_dropdown_percent": {"type": int, "default": 60},
"tmux_dropdown_animate": {"type": bool, "default": True},
"tmux_dropdown_duration": {"type": int, "default": 100},
}
class install_tmux_dropdown_shortcut(Command):
def execute(self):
def callback(answer):
if answer == "y":
tmux_cmds = tmux_keybindings(install=True)
# Add shortcut to current session
for cmd in tmux_cmds:
util.tmux(*cmd)
util.tmux("display", "Tmux shortcut for drop-down ranger installed")
self.fm.ui.console.ask(
"Are you sure you want to install the drop-down ranger key-binding in"
" ~/.tmux.conf? (y/n)",
callback,
"yn",
)
def init(fm, *args):
fm.execute_console(
'map xh eval fm.execute_console("install_tmux_dropdown_shortcut")'
)
```
#### File: ranger_tmux/ranger_tmux/splits.py
```python
def init(fm, *args):
"""Extra tmux key-bindings to splt tmux windows."""
fm.execute_console("map x- shell tmux split-window -v -c %d")
fm.execute_console("map x| shell tmux split-window -h -c %d")
``` |
{
"source": "joov/movidius",
"score": 2
} |
#### File: joov/movidius/backend.py
```python
def backend_factory(backend_id):
backend_index = int(backend_id)
if backend_index == 0:
print('Using Keras/Tensorflow backend.')
return _KerasBackend()
elif backend_index == 1:
print('Using Movidius NCS backend.')
return _YoloV2NCS_Backend()
elif backend_index == 9:
print('Using fake backend.')
return _FakeBackend()
class _KerasBackend(object):
def __init__(self):
from vendor.keras_yolo3.yolo import YOLO
self.yolo = YOLO()
def __del__(self):
self.yolo.close_session()
def process_frame(self, frame):
import numpy as np
from PIL import Image
image = Image.fromarray(frame)
image = self.yolo.detect_image(image)
return np.asarray(image)
def detect(self, detect_type):
return detect_type in self.yolo.get_predicted_classes()
class _YoloV2NCS_Backend(object):
def __init__(self):
from vendor.YoloV2NCS.detectionExample.ObjectWrapper import ObjectWrapper
self.wrapper = ObjectWrapper('vendor/YoloV2NCS/graph')
self.results = []
def process_frame(self, frame):
from vendor.YoloV2NCS.detectionExample.Visualize import Visualize
self.results = self.wrapper.Detect(frame)
return Visualize(frame, self.results)
def detect(self, detect_type):
return any(x.name == detect_type for x in self.results)
class _FakeBackend(object):
def __init__(self):
pass
def process_frame(self, frame):
return frame
def detect(self, detect_type):
import random
return random.randint(0, 1000) < 50
```
#### File: joov/movidius/run.py
```python
import argparse
import cv2
import signal
import util.environ_check
from util.camera import camera_factory
from util.window import Window
from util.sense_hat import Hat
from backend import backend_factory
from detection_fsm import DetectionFSM
class Cleanup(object):
def __init__(self):
self._stop_now = False
signal.signal(signal.SIGINT, self._handler)
signal.signal(signal.SIGTERM, self._handler)
@property
def stop_now(self):
return self._stop_now
def _handler(self, signum, frame):
self._stop_now = True
print('Signal %d' % signum)
def main(camera_id, backend_id, show_fps, detect_type, use_led,
play_speech):
backend = backend_factory(backend_id)
camera = camera_factory(camera_id)
camera.open()
window = Window(show_fps)
window.open()
hat = None
if use_led:
hat = Hat()
hat.open()
cleanup = Cleanup()
detection_fsm = DetectionFSM(play_speech)
while not cleanup.stop_now:
ret, frame = camera.read()
if not ret:
break
detected = detection_fsm.update_status(detect_type and backend.detect(detect_type))
window.show_frame(backend.process_frame(frame),
detect_type if detected else None)
if hat:
hat.update_led(detected)
key = cv2.waitKey(5) & 0xFF
if key == ord('q') or key == 27: # Pressing 'q' or ESC.
break
if hat:
hat.close()
window.close()
camera.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Show camera preview")
# -1 selects default camera.
parser.add_argument('-c', '--camera', dest='camera_id', default='-1',
help='Set camera id.')
parser.add_argument('-b', '--backend', dest='backend_id', default='0',
help='Set backend id.')
parser.add_argument('-f', '--fps', dest='show_fps', default=False,
action='store_true', help='Show FPS info.')
parser.add_argument('-d', '--detect', dest='detect_type', default=None,
help='Set object type to detect.')
parser.add_argument('-l', '--led', dest='use_led', default=False,
action='store_true', help='Use Raspberry Pi Sense HAT Led.')
parser.add_argument('-s', '--speech', dest='play_speech', default=False,
action='store_true', help='Play speech on detection')
args = parser.parse_args()
main(args.camera_id, args.backend_id, args.show_fps, args.detect_type, args.use_led,
args.play_speech)
```
#### File: movidius/util/text.py
```python
import cv2
def draw_text_in_box(frame, box_top, box_right, box_bottom, box_left,
font_face, font_scale, font_thickness,
text, text_color, fill_color):
cv2.rectangle(frame, (box_left, box_top), (box_right, box_bottom),
fill_color, -1)
text_size, baseline = cv2.getTextSize(text, font_face, font_scale, font_thickness)
text_width, text_height = text_size[0], text_size[1]
text_x = int(box_left + (box_right - box_left - text_width) / 2)
text_y = int(box_top + (box_bottom - box_top - text_height) / 2) + text_height
cv2.putText(frame, text, (text_x, text_y), font_face, font_scale,
text_color, font_thickness)
``` |
{
"source": "Joovvhan/dcase2020_task1_baseline",
"score": 3
} |
#### File: Joovvhan/dcase2020_task1_baseline/model_size_calculation.py
```python
import numpy
import dcase_util
def get_keras_model_size(keras_model, verbose=True, ui=None, excluded_layers=None):
"""Calculate keras model size (non-zero parameters on disk)
Parameters
----------
keras_model : keras.models.Model
keras model for the size calculation
verbose : bool
Print layer by layer information
Default value True
ui : dcase_util.ui.FancyLogger or dcase_util.ui.FancyPrinter
Print handler
Default value None
excluded_layers : list
List of layers to be excluded from the calculation
Default value [keras.layers.normalization.BatchNormalization, kapre.time_frequency.Melspectrogram]
Returns
-------
nothing
"""
parameters_count = 0
parameters_count_nonzero = 0
parameters_bytes = 0
parameters_bytes_nonzero = 0
if verbose and ui is None:
# Initialize print handler
ui = dcase_util.ui.ui.FancyPrinter()
if excluded_layers is None:
# Populate excluded_layers list
excluded_layers = []
try:
import keras
except ImportError:
raise ImportError('Unable to import keras module. You can install it with `pip install keras`.')
excluded_layers.append(
keras.layers.normalization.BatchNormalization
)
# Include kapre layers only if kapre is installed
try:
import kapre
excluded_layers.append(
kapre.time_frequency.Melspectrogram
)
except ImportError:
pass
if verbose:
# Set up printing
ui.row_reset()
ui.row(
'Name', 'Param', 'NZ Param', 'Size', 'NZ Size',
widths=[30, 12, 12, 30, 30],
types=['str', 'int', 'int', 'str', 'str'],
separators=[True, False, True, False]
)
ui.row_sep()
for l in keras_model.layers:
# Loop layer by layer
current_parameters_count = 0
current_parameters_count_nonzero = 0
current_parameters_bytes = 0
current_parameters_bytes_nonzero = 0
weights = l.get_weights()
for w in weights:
current_parameters_count += numpy.prod(w.shape)
current_parameters_count_nonzero += numpy.count_nonzero(w.flatten())
if w.dtype in ['single', 'float32', 'int32', 'uint32']:
bytes = 32 / 8
elif w.dtype in ['float16', 'int16', 'uint16']:
bytes = 16 / 8
elif w.dtype in ['double', 'float64', 'int64', 'uint64']:
bytes = 64 / 8
elif w.dtype in ['int8', 'uint8']:
bytes = 16 / 8
else:
print('UNKNOWN TYPE', w.dtype)
current_parameters_bytes += numpy.prod(w.shape) * bytes
current_parameters_bytes_nonzero += numpy.count_nonzero(w.flatten()) * bytes
if l.__class__ not in excluded_layers:
parameters_count += current_parameters_count
parameters_count_nonzero += current_parameters_count_nonzero
parameters_bytes += current_parameters_bytes
parameters_bytes_nonzero += current_parameters_bytes_nonzero
if verbose:
ui.row(
l.name,
current_parameters_count,
current_parameters_count_nonzero,
dcase_util.utils.get_byte_string(current_parameters_bytes, show_bytes=False),
dcase_util.utils.get_byte_string(current_parameters_bytes_nonzero, show_bytes=False),
)
if verbose:
ui.row_sep()
ui.row(
'Total',
parameters_count,
parameters_count_nonzero,
dcase_util.utils.get_byte_string(parameters_bytes, show_bytes=True),
dcase_util.utils.get_byte_string(parameters_bytes_nonzero, show_bytes=True),
)
ui.line()
return {
'parameters': {
'all': {
'count': parameters_count,
'bytes': parameters_bytes
},
'non_zero': {
'count': parameters_count_nonzero,
'bytes': parameters_bytes_nonzero
}
}
}
``` |
{
"source": "joowani/colorpedia",
"score": 3
} |
#### File: colorpedia/colorpedia/inputs.py
```python
import re
from typing import Optional, Union
from colorpedia.exceptions import InputValueError
from colorpedia.hexcodes import HEX_REGEX
def validate_indent_width(value: int) -> int:
if type(value) == int and 0 <= value <= 8:
return value
raise InputValueError("indent width", "an integer between 0 and 8")
def validate_boolean_flag(value: Optional[bool]) -> Optional[bool]:
if value is True or value is False or value is None:
return value
raise InputValueError("boolean flag", "True, False or no value")
def validate_shades_count(value: Union[bool, int]) -> int:
if (type(value) in (bool, int)) and 0 <= value <= 100:
return value
raise InputValueError("shades count", "an integer between 0 and 100")
def validate_editor(value: Optional[str]) -> Optional[str]:
if value is None or (type(value) == str and len(value) > 0 and " " not in value):
return value
raise InputValueError("editor", "a shell-executable command without whitespaces")
def validate_rgb_value(value: int) -> int:
if type(value) == int and 0 <= value <= 255:
return value
raise InputValueError("RGB value", "an integer between 0 and 255")
def normalize_degree_angle(value: Union[float, int]) -> float:
if (type(value) in (float, int)) and 0 <= value <= 360:
return value / 360
raise InputValueError("degree angle", "a float between 0.0 and 360.0")
def normalize_hex_code(value: Union[int, str]) -> str:
if isinstance(value, str) and re.search(HEX_REGEX, value):
return value if len(value) == 6 else "".join(c * 2 for c in value)
raise InputValueError("hex code", f"a string matching {HEX_REGEX}")
def normalize_percent_value(value: Union[float, int]) -> float:
if (type(value) in (float, int)) and 0 <= value <= 100:
return value / 100
raise InputValueError("percent value", "a float between 0.0 and 100.0")
```
#### File: colorpedia/tests/test_exceptions.py
```python
from colorpedia.exceptions import (
ConfigFileError,
ConfigKeyError,
ConfigValueError,
InputValueError,
)
def test_config_file_error() -> None:
error = ConfigFileError("A")
assert str(error) == "A"
error = ConfigFileError("A", FileNotFoundError(1, "B"))
assert str(error) == "A: B (errno: 1)"
error = ConfigFileError("A", ValueError("B"))
assert str(error) == "A: B"
def test_config_key_error() -> None:
error = ConfigKeyError("A")
assert str(error) == 'Bad configuration key "A"'
def test_config_value_error() -> None:
error = ConfigValueError("A", "B")
assert str(error) == 'Bad value for configuration key "A" (expecting B)'
def test_input_error() -> None:
error = InputValueError("A", "B")
assert str(error) == "Bad A (expecting B)"
``` |
{
"source": "joowani/dtags",
"score": 2
} |
#### File: dtags/commands/activate.py
```python
import sys
from typing import List, Optional
from dtags.commons import dtags_command, get_argparser
USAGE = "dtags-activate {bash,fish,zsh}"
BASH_ACTIVATE_SCRIPT = """
unalias tag > /dev/null 2>&1
unalias untag > /dev/null 2>&1
unalias tags > /dev/null 2>&1
unalias d > /dev/null 2>&1
unalias run > /dev/null 2>&1
d() {
if [[ $# -eq 1 ]] && [[ -d $1 ]]
then
cd "${1}"
elif [[ $# -eq 1 ]] && [[ $1 = - ]]
then
cd -
else
dtags-d "$@"
if [[ -f ~/.dtags/destination ]]
then
cd "$(cat ~/.dtags/destination)"
rm -f ~/.dtags/destination
fi
fi
}
_dtags_elem_not_in () {
local e match="$1"
shift
for e; do [[ "$e" == "$match" ]] && return 1; done
return 0
}
_dtags_d() {
declare CWORD="${COMP_WORDS[COMP_CWORD]}"
if [[ -f ~/.dtags/completion ]]
then
COMPREPLY+=($(compgen -W "$(cat ~/.dtags/completion)" -- "${CWORD}"))
fi
if _dtags_elem_not_in "-t" "${COMP_WORDS[@]}"
then
COMPREPLY+=($(compgen -W "-t" -- "${CWORD}"))
fi
if [[ ${COMP_CWORD} -eq 1 ]]
then
COMPREPLY+=($(compgen -W "-h --help -v --version" -- "${CWORD}"))
fi
}
_dtags_tag() {
declare CWORD="${COMP_WORDS[COMP_CWORD]}"
if [[ -f ~/.dtags/completion ]]
then
COMPREPLY+=($(compgen -W "$(cat ~/.dtags/completion)" -- "${CWORD}"))
fi
if _dtags_elem_not_in "-t" "${COMP_WORDS[@]}"
then
COMPREPLY+=($(compgen -W "-t" -- "${CWORD}"))
fi
COMPREPLY+=($(compgen -W "-y --yes -r --replace" -- "${CWORD}"))
if [[ ${COMP_CWORD} -eq 1 ]]
then
COMPREPLY+=($(compgen -W "-h --help -v --version" -- "${CWORD}"))
fi
}
_dtags_untag() {
declare CWORD="${COMP_WORDS[COMP_CWORD]}"
if [[ -f ~/.dtags/completion ]]
then
COMPREPLY+=($(compgen -W "$(cat ~/.dtags/completion)" -- "${CWORD}"))
fi
if _dtags_elem_not_in "-t" "${COMP_WORDS[@]}"
then
COMPREPLY+=($(compgen -W "-t" -- "${CWORD}"))
fi
COMPREPLY+=($(compgen -W "-y --yes" -- "${CWORD}"))
if [[ ${COMP_CWORD} -eq 1 ]]
then
COMPREPLY+=($(compgen -W "-h --help -v --version" -- "${CWORD}"))
fi
}
_dtags_tags() {
declare CWORD="${COMP_WORDS[COMP_CWORD]}"
if [[ -f ~/.dtags/completion ]]
then
COMPREPLY+=($(compgen -W "$(cat ~/.dtags/completion)" -- "${CWORD}"))
fi
COMPREPLY+=($(compgen -W "-j --json -r --reverse -y --yes" -- "${CWORD}"))
COMPREPLY+=($(compgen -W "-c --clean -p --purge -t" -- "${CWORD}"))
if [[ ${COMP_CWORD} -eq 1 ]]
then
COMPREPLY+=($(compgen -W "-h --help -v --version" -- "${CWORD}"))
fi
}
_dtags_run() {
declare CWORD="${COMP_WORDS[COMP_CWORD]}"
if [[ -f ~/.dtags/completion ]]
then
COMPREPLY+=($(compgen -W "$(cat ~/.dtags/completion)" -- "${CWORD}"))
fi
if _dtags_elem_not_in "-c" "${COMP_WORDS[@]}"
then
COMPREPLY+=($(compgen -W "-c" -- "${CWORD}"))
fi
if [[ ${COMP_CWORD} -eq 1 ]]
then
COMPREPLY+=($(compgen -W "-h --help -v --version" -- "${CWORD}"))
fi
}
complete -d -F _dtags_tag tag
complete -d -F _dtags_untag untag
complete -F _dtags_tags tags
complete -d -F _dtags_d d
complete -d -F _dtags_run run
"""
ZSH_ACTIVATE_SCRIPT = BASH_ACTIVATE_SCRIPT
FISH_ACTIVATE_SCRIPT = """
functions -e tag > /dev/null 2>&1
functions -e untag > /dev/null 2>&1
functions -e tags > /dev/null 2>&1
functions -e d > /dev/null 2>&1
functions -e run > /dev/null 2>&1
function d
if [ (count $argv) -eq 1 ]
if test -d $argv[1]
cd $argv[1]
return 0
else if [ $argv[1] = "-" ]
cd -
return 0
end
end
dtags-d $argv
if test -f ~/.dtags/destination
cd (cat ~/.dtags/destination)
rm -f ~/.dtags/destination
end
end
function __dtags_cond_no_args
set cmd (commandline -opc)
if [ (count $cmd) -eq 1 ]
return 0
else
return 1
end
end
function __dtags_complete_tags
if test -f ~/.dtags/completion
string split ' ' (cat ~/.dtags/completion)
end
end
complete -c tag -a '(__dtags_complete_tags)' -d 'Tag'
complete -c tag -a '(__fish_complete_directories)'
complete -c tag -n '__dtags_cond_no_args' -s h -l help -d 'Flag'
complete -c tag -n '__dtags_cond_no_args' -s v -l version -d 'Flag'
complete -c tag -s t -d 'Flag'
complete -c tag -s y -l yes -d 'Flag'
complete -c tag -s r -l replace -d 'Flag'
complete -c untag -a '(__dtags_complete_tags)' -d 'Tag'
complete -c untag -a '(__fish_complete_directories)'
complete -c untag -n '__dtags_cond_no_args' -s h -l help -d 'Flag'
complete -c untag -n '__dtags_cond_no_args' -s v -l version -d 'Flag'
complete -c untag -s t -d 'Flag'
complete -c untag -s y -l yes -d 'Flag'
complete -c tags -a '(__dtags_complete_tags)' -d 'Tag'
complete -c tags -n '__dtags_cond_no_args' -s h -l help -d 'Flag'
complete -c tags -n '__dtags_cond_no_args' -s v -l version -d 'Flag'
complete -c tags -s t -d 'Flag'
complete -c tags -s j -l json -d 'Flag'
complete -c tags -s c -l clean -d 'Flag'
complete -c tags -s p -l purge -d 'Flag'
complete -c tags -s r -l reverse -d 'Flag'
complete -c tags -s y -l yes -d 'Flag'
complete -c d -a '(__dtags_complete_tags)' -d 'Tag'
complete -c d -a '(__fish_complete_directories)'
complete -c d -n '__dtags_cond_no_args' -s h -l help -d 'Flag'
complete -c d -n '__dtags_cond_no_args' -s v -l version -d 'Flag'
complete -c d -s t -l tag -d 'Flag'
complete -c run -a '(__dtags_complete_tags)' -d 'Tag'
complete -c run -a '(__fish_complete_directories)'
complete -c run -n '__dtags_cond_no_args' -s h -l help -d 'Flag'
complete -c run -n '__dtags_cond_no_args' -s v -l version -d 'Flag'
complete -c run -s c -l cmd -d 'Flag'
"""
@dtags_command
def execute(args: Optional[List[str]] = None) -> None:
parser = get_argparser(
prog="dtags-activate",
desc="Activate dtags",
usage=USAGE,
)
parser.add_argument(
"shell",
nargs="?",
choices=("bash", "fish", "zsh"),
const="bash",
default="bash",
help="Name of the shell",
)
parsed_args = parser.parse_args(sys.argv[1:] if args is None else args)
if parsed_args.shell == "bash":
print(BASH_ACTIVATE_SCRIPT)
elif parsed_args.shell == "fish":
print(FISH_ACTIVATE_SCRIPT)
else:
print(ZSH_ACTIVATE_SCRIPT)
```
#### File: dtags/commands/d.py
```python
import sys
from pathlib import Path
from typing import List, Optional
from dtags import style
from dtags.commons import dtags_command, get_argparser
from dtags.exceptions import DtagsError
from dtags.files import load_config_file, save_destination_file
USAGE = "d [-t] DEST"
DESCRIPTION = f"""
Change directory by path or tag.
Tag names are automatically slugified (e.g "foo bar" to "foo-bar").
Paths take precedence over tags on name collisions.
examples:
# change directory by path
{style.command("d /home/user/foo")}
# change directory by tag
{style.command("d my-tag")}
# use -t/--tag to always interpret the argument as a tag
{style.command("d -t foo")}
"""
@dtags_command
def execute(args: Optional[List[str]] = None) -> None:
parser = get_argparser(prog="d", desc=DESCRIPTION, usage=USAGE)
parser.add_argument(
"destination",
metavar="DEST",
help="directory path or tag",
)
parser.add_argument(
"-t",
"--tag",
action="store_true",
dest="tag",
help="assume the argument is a tag",
)
parsed_args = parser.parse_args(sys.argv[1:] if args is None else args)
if parsed_args.destination:
change_directory(parsed_args.destination, parsed_args.tag)
def change_directory(dest: str, is_tag: bool = False) -> None:
config = load_config_file()
tag_config = config["tags"]
dirpaths = {dirpath for dirpath, tags in tag_config.items() if dest in tags}
if not is_tag:
path = Path(dest).expanduser()
if path.is_dir():
dirpaths.add(path.resolve())
if not dirpaths:
raise DtagsError(f"Invalid destination: {dest}")
elif len(dirpaths) == 1:
save_destination_file(dirpaths.pop())
else: # pragma: no cover
save_destination_file(prompt_user_selection(sorted(dirpaths)))
def prompt_user_selection(dirpaths: List[Path]) -> Path: # pragma: no cover
for index, dirpath in enumerate(dirpaths, start=1):
print(f"{index}. {style.path(dirpath)}")
while True:
print(f"\nSelect directory [1 - {len(dirpaths)}]: ", end="")
try:
return dirpaths[int(input()) - 1]
except (ValueError, IndexError):
print(f"Please select an integer from 1 to {len(dirpaths)}")
```
#### File: dtags/commands/tag.py
```python
import sys
from pathlib import Path
from typing import List, Optional, Set, Tuple
from dtags import style
from dtags.commons import (
dtags_command,
get_argparser,
normalize_dirs,
normalize_tag,
normalize_tags,
prompt_user,
)
from dtags.files import load_config_file, save_config_file
USAGE = "tag [-y] [-r] DIR [DIR ...] -t TAG [TAG ...]"
DESCRIPTION = f"""
Tag directories.
Tag names are automatically slugified (e.g "foo bar" to "foo-bar").
If no tags are specified, directory basenames are used instead.
examples:
# tag ~/Foo_Bar with "foo-bar" (slugified)
{style.command("tag ~/Foo_Bar")}
# tag ~/foo and ~/bar with "work" and "app" (many-to-many)
{style.command("tag ~/foo ~/bar -t work app")}
# replace existing tags with -r/--replace
{style.command("tag -r ~/foo -t work")}
# skip confirmation prompts with -y/--yes
{style.command("tag -y ~/foo -t work app")}
"""
@dtags_command
def execute(args: Optional[List[str]] = None) -> None:
parser = get_argparser(prog="tag", desc=DESCRIPTION, usage=USAGE)
parser.add_argument(
"-y",
"--yes",
action="store_true",
dest="yes",
help="assume yes to prompts",
)
parser.add_argument(
"-r",
"--replace",
action="store_true",
dest="replace",
help="replace existing tags",
)
parser.add_argument(
"dirs",
metavar="DIR",
nargs="+",
help="directories or tags",
)
parser.add_argument(
"-t",
dest="tags",
metavar="TAG",
nargs="+",
help="tag names",
)
parsed_args = parser.parse_args(sys.argv[1:] if args is None else args)
tag_directories(
dirs=parsed_args.dirs,
tags=parsed_args.tags,
replace=parsed_args.replace,
skip_prompts=parsed_args.yes,
)
def tag_directories(
dirs: Optional[List[str]] = None,
tags: Optional[List[str]] = None,
replace: bool = False,
skip_prompts: bool = True,
) -> None:
config = load_config_file()
tag_config = config["tags"]
norm_dirs = normalize_dirs(dirs)
norm_tags = normalize_tags(tags)
diffs: List[Tuple[Path, Set[str], Set[str]]] = []
for dirpath in sorted(norm_dirs):
cur_tags = tag_config.get(dirpath, set())
new_tags = norm_tags or {normalize_tag(dirpath.name)}
add_tags = new_tags - cur_tags
del_tags = (cur_tags - new_tags) if replace else set()
if add_tags or del_tags:
diffs.append((dirpath, add_tags, del_tags))
tag_config[dirpath] = cur_tags.union(add_tags) - del_tags
if not diffs:
print("Nothing to do")
else:
for dirpath, add_tags, del_tags in diffs:
print(style.diff(dirpath, add_tags, del_tags))
if skip_prompts or prompt_user():
save_config_file(config)
print("Tags saved successfully")
```
#### File: dtags/dtags/files.py
```python
import json
from pathlib import Path
from typing import Dict, Set
from dtags.commons import normalize_tags
from dtags.exceptions import DtagsError
CONFIG_ROOT = ".dtags"
CONFIG_FILE = "config.json"
COMP_FILE = "completion" # used for tag name completion
DEST_FILE = "destination" # used for d command
ConfigType = Dict[str, Dict[Path, Set[str]]]
def get_file_path(filename: str) -> Path:
return Path.home() / CONFIG_ROOT / filename
def get_new_config() -> ConfigType:
return {"tags": {}}
def load_config_file() -> ConfigType:
config_file_path = get_file_path(CONFIG_FILE)
try:
with open(config_file_path, "r") as fp:
config_data = json.load(fp)
except FileNotFoundError:
new_data = get_new_config()
save_config_file(new_data)
return new_data
except ValueError as err: # pragma no cover
raise DtagsError(f"Bad data in {config_file_path.as_posix()}: {err}")
else:
tag_config = config_data["tags"]
return {
"tags": {
Path(dirpath): normalize_tags(tags)
for dirpath, tags in tag_config.items()
}
}
def save_config_file(config: ConfigType) -> None:
config_file_path = get_file_path(CONFIG_FILE)
config_file_path.parent.mkdir(mode=0o755, exist_ok=True)
config_data = {
"tags": {
dirpath.as_posix(): sorted(tags)
for dirpath, tags in config["tags"].items()
if len(tags) > 0
}
}
with open(config_file_path, "w") as fp:
json.dump(config_data, fp, sort_keys=True, indent=2)
save_completion_file(config)
def save_completion_file(config: ConfigType) -> None:
all_tags: Set[str] = set()
for tags in config["tags"].values():
all_tags.update(tags)
with open(get_file_path(COMP_FILE), "w") as fp:
fp.write(" ".join(all_tags))
def save_destination_file(dirpath: Path) -> None:
with open(get_file_path(DEST_FILE), "w") as fp:
fp.write(dirpath.as_posix())
```
#### File: dtags/tests/helpers.py
```python
import re
from typing import List
from dtags.files import COMP_FILE, DEST_FILE, get_file_path
ANSI_ESCAPE = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
def clean_str(value: str) -> str:
return ANSI_ESCAPE.sub("", value)
def load_completion() -> List[str]:
with open(get_file_path(COMP_FILE)) as fp:
return sorted(tag for tag in fp.read().split(" ") if tag)
def load_destination() -> str:
with open(get_file_path(DEST_FILE)) as fp:
return fp.read().strip()
``` |
{
"source": "joowlim/pytorch-3dunet",
"score": 2
} |
#### File: joowlim/pytorch-3dunet/predict.py
```python
import os
import h5py
import numpy as np
import torch
from datasets.hdf5 import get_test_loaders
from unet3d import utils
from unet3d.config import load_config
from unet3d.model_191127 import get_model
from unet3d.metrics import get_evaluation_metric
logger = utils.get_logger('UNet3DPredictor')
def predict_in_memory(model, data_loader, output_file, config):
"""
Return prediction masks by applying the model on the given dataset
Args:
model (Unet3D): trained 3D UNet model used for prediction
data_loader (torch.utils.data.DataLoader): input data loader
output_file (str): path to the output H5 file
config (dict): global config dict
Returns:
prediction_maps (numpy array): prediction masks for given dataset
"""
def _volume_shape(dataset):
# TODO: support multiple internal datasets
raw = dataset.raws[0]
if raw.ndim == 3:
return raw.shape
else:
return raw.shape[1:]
out_channels = config['model'].get('out_channels')
if out_channels is None:
out_channels = config['model']['dt_out_channels']
prediction_channel = config.get('prediction_channel', None)
if prediction_channel is not None:
logger.info(f"Using only channel '{prediction_channel}' from the network output")
device = config['device']
output_heads = config['model'].get('output_heads', 1)
logger.info(f'Running prediction on {len(data_loader)} patches...')
# dimensionality of the the output (CxDxHxW)
volume_shape = _volume_shape(data_loader.dataset)
if prediction_channel is None:
prediction_maps_shape = (out_channels,) + volume_shape
else:
# single channel prediction map
prediction_maps_shape = (1,) + volume_shape
logger.info(f'The shape of the output prediction maps (CDHW): {prediction_maps_shape}')
# initialize the output prediction arrays
prediction_maps = [np.zeros(prediction_maps_shape, dtype='float32') for _ in range(output_heads)]
# initialize normalization mask in order to average out probabilities of overlapping patches
normalization_masks = [np.zeros(prediction_maps_shape, dtype='float32') for _ in range(output_heads)]
# Sets the module in evaluation mode explicitly, otherwise the final Softmax/Sigmoid won't be applied!
model.eval()
# Run predictions on the entire input dataset
with torch.no_grad():
for patch, index in data_loader:
logger.info(f'Predicting slice:{index}')
# save patch index: (C,D,H,W)
if prediction_channel is None:
channel_slice = slice(0, out_channels)
else:
channel_slice = slice(0, 1)
index = (channel_slice,) + tuple(index)
# send patch to device
patch = patch.to(device)
# forward pass
#predictions, _ = model(patch)
predictions, _ = model(patch)
# wrap predictions into a list if there is only one output head from the network
if output_heads == 1:
predictions = [predictions]
for prediction, prediction_map, normalization_mask in zip(predictions, prediction_maps,
normalization_masks):
# squeeze batch dimension and convert back to numpy array
prediction = prediction.squeeze(dim=0).cpu().numpy()
if prediction_channel is not None:
# use only the 'prediction_channel'
logger.info(f"Using channel '{prediction_channel}'...")
prediction = np.expand_dims(prediction[prediction_channel], axis=0)
# unpad in order to avoid block artifacts in the output probability maps
u_prediction, u_index = utils.unpad(prediction, index, volume_shape)
# accumulate probabilities into the output prediction array
prediction_map[u_index] += u_prediction
# count voxel visits for normalization
normalization_mask[u_index] += 1
# save probability maps
prediction_datasets = _get_dataset_names(config, output_heads, prefix='predictions')
##<< [ SC : Evaluate result ]
with h5py.File(output_file, 'w') as f:
for prediction_map, normalization_mask, prediction_dataset in zip(prediction_maps, normalization_masks,
prediction_datasets):
#print('prediction map shape:',prediction_map.shape)
prediction_map = prediction_map / normalization_mask
logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...')
f.create_dataset(prediction_dataset, data=prediction_map, compression="gzip")
# exit(0)
def predict(model, data_loader, output_file, config):
"""
Return prediction masks by applying the model on the given dataset.
The predictions are saved in the output H5 file on a patch by patch basis.
If your dataset fits into memory use predict_in_memory() which is much faster.
Args:
model (Unet3D): trained 3D UNet model used for prediction
data_loader (torch.utils.data.DataLoader): input data loader
output_file (str): path to the output H5 file
config (dict): global config dict
"""
def _volume_shape(dataset):
# TODO: support multiple internal datasets
raw = dataset.raws[0]
if raw.ndim == 3:
return raw.shape
else:
return raw.shape[1:]
out_channels = config['model'].get('out_channels')
if out_channels is None:
out_channels = config['model']['dt_out_channels']
prediction_channel = config.get('prediction_channel', None)
if prediction_channel is not None:
logger.info(f"Using only channel '{prediction_channel}' from the network output")
device = config['device']
output_heads = config['model'].get('output_heads', 1)
logger.info(f'Running prediction on {len(data_loader)} patches...')
# dimensionality of the the output (CxDxHxW)
volume_shape = _volume_shape(data_loader.dataset)
if prediction_channel is None:
prediction_maps_shape = (out_channels,) + volume_shape
else:
# single channel prediction map
prediction_maps_shape = (1,) + volume_shape
logger.info(f'The shape of the output prediction maps (CDHW): {prediction_maps_shape}')
with h5py.File(output_file, 'w') as f:
# allocate datasets for probability maps
prediction_datasets = _get_dataset_names(config, output_heads, prefix='predictions')
prediction_maps = [
f.create_dataset(dataset_name, shape=prediction_maps_shape, dtype='float32', chunks=True,
compression='gzip')
for dataset_name in prediction_datasets]
# allocate datasets for normalization masks
normalization_datasets = _get_dataset_names(config, output_heads, prefix='normalization')
normalization_masks = [
f.create_dataset(dataset_name, shape=prediction_maps_shape, dtype='uint8', chunks=True,
compression='gzip')
for dataset_name in normalization_datasets]
# Sets the module in evaluation mode explicitly, otherwise the final Softmax/Sigmoid won't be applied!
model.eval()
# Run predictions on the entire input dataset
with torch.no_grad():
for patch, index in data_loader:
logger.info(f'Predicting slice:{index}')
# save patch index: (C,D,H,W)
if prediction_channel is None:
channel_slice = slice(0, out_channels)
else:
channel_slice = slice(0, 1)
index = (channel_slice,) + tuple(index)
# send patch to device
patch = patch.to(device)
# forward pass
predictions, loss = model(patch)
# wrap predictions into a list if there is only one output head from the network
if output_heads == 1:
predictions = [predictions]
for prediction, prediction_map, normalization_mask in zip(predictions, prediction_maps,
normalization_masks):
# squeeze batch dimension and convert back to numpy array
prediction = prediction.squeeze(dim=0).cpu().numpy()
if prediction_channel is not None:
# use only the 'prediction_channel'
logger.info(f"Using channel '{prediction_channel}'...")
prediction = np.expand_dims(prediction[prediction_channel], axis=0)
# unpad in order to avoid block artifacts in the output probability maps
u_prediction, u_index = utils.unpad(prediction, index, volume_shape)
# accumulate probabilities into the output prediction array
prediction_map[u_index] += u_prediction
# count voxel visits for normalization
normalization_mask[u_index] += 1
# normalize the prediction_maps inside the H5
for prediction_map, normalization_mask, prediction_dataset, normalization_dataset in zip(prediction_maps,
normalization_masks,
prediction_datasets,
normalization_datasets):
# TODO: iterate block by block
# split the volume into 4 parts and load each into the memory separately
logger.info(f'Normalizing {prediction_dataset}...')
z, y, x = volume_shape
mid_x = x // 2
mid_y = y // 2
prediction_map[:, :, 0:mid_y, 0:mid_x] /= normalization_mask[:, :, 0:mid_y, 0:mid_x]
prediction_map[:, :, mid_y:, 0:mid_x] /= normalization_mask[:, :, mid_y:, 0:mid_x]
prediction_map[:, :, 0:mid_y, mid_x:] /= normalization_mask[:, :, 0:mid_y, mid_x:]
prediction_map[:, :, mid_y:, mid_x:] /= normalization_mask[:, :, mid_y:, mid_x:]
logger.info(f'Deleting {normalization_dataset}...')
del f[normalization_dataset]
##<< [ SC : add result path to specify the path to save result files ]
def _get_output_file(dataset, result_path='', suffix='_predictions'):
if not(os.path.exists(result_path)):
os.mkdir(result_path)
#print(dataset)
basename = os.path.basename(dataset.file_path)
path = os.path.join(result_path,basename)
#print(path)
#exit(0)
return f'{os.path.splitext(path)[0]}{suffix}.h5'
def _get_dataset_names(config, number_of_datasets, prefix='predictions'):
dataset_names = config.get('dest_dataset_name')
if dataset_names is not None:
if isinstance(dataset_names, str):
return [dataset_names]
else:
return dataset_names
else:
if number_of_datasets == 1:
return [prefix]
else:
return [f'{prefix}{i}' for i in range(number_of_datasets)]
def main():
# Load configuration
config = load_config()
# Create the model
model = get_model(config)
# Load model state
model_path = config['model_path']
logger.info(f'Loading model from {model_path}...')
# print("Model's state_dict:")
# for param_tensor in model.state_dict():
# print(param_tensor, "\t", model.state_dict()[param_tensor].size())
# # print ("model state dict : ", torch.load(model_path)['model_state_dict'])
utils.load_checkpoint(model_path, model)
logger.info(f"Sending the model to '{config['device']}'")
model = model.to(config['device'])
logger.info('Loading HDF5 datasets...')
store_predictions_in_memory = config.get('store_predictions_in_memory', True)
if store_predictions_in_memory:
logger.info('Predictions will be stored in memory. Make sure you have enough RAM for you dataset.')
for test_loader in get_test_loaders(config):
logger.info(f"Processing '{test_loader.dataset.file_path}'...")
#print('test_path:',config['datasets']['test_path'])
output_file = _get_output_file(test_loader.dataset, result_path=config['datasets']['output_path'][0])
#print('output_file:',output_file)
# run the model prediction on the entire dataset and save to the 'output_file' H5
if store_predictions_in_memory:
predict_in_memory(model, test_loader, output_file, config)
else:
predict(model, test_loader, output_file, config)
if __name__ == '__main__':
main()
``` |
{
"source": "joowon-dm-snu/manta",
"score": 3
} |
#### File: manta_client/base/config.py
```python
from typing import Any, Dict, Tuple # , Union
# from pathlib import Path
import manta_client.util as util
# TODO: (kjw) need to think depth. ex) config.param1.param2 or config.param1.param2.params3...
class Config(object):
"""Config
Config object is used to save all hyperparams.
Config can be over-written with 2 stages.
- project config
- user control
Examples:
Basic
```python
mc.config.param = 0
```
From ArgumentParser
```python
parser = argparse.ArgumentParser()
parser.add_argument('--something', type=int, default=123)
args = parser.parse_args()
mc.config.something = 0
mc.config.update(args)
```
From yaml
```python
mc.config.update_yaml(yaml_path)
```
Input by initiation phase
```python
mc.init(config={'param1': 1, 'param2': 2})
```
"""
def __init__(self) -> None:
object.__setattr__(self, "_items", dict())
object.__setattr__(self, "_callback", None)
object.__setattr__(self, "_settings", None)
self._load_defaults()
def _load_defaults(self):
conf_dict = util.read_config_yaml(path="config_defaults.yaml")
self.update(conf_dict)
def _assert_dict_values(self, v: Any) -> None:
"""Config will be sent to server with json formats
all values should be real values for avoid unintended changes
"""
return True
# TODO: add documentations
def _sanitize(self, k: str, v: Any) -> Tuple:
k = k.rstrip("_|-")
v = util.json_value_sanitize(v)
return k, v
# TODO: add documentations
def _sanitize_dict(self, config_dict: Dict) -> Dict:
sanitized = {}
self._assert_dict_values(config_dict)
for k, v in config_dict.items():
k, v = self._sanitize(k, v)
if isinstance(v, Dict):
sanitized[k] = self._sanitize_dict(v)
else:
sanitized[k] = v
return sanitized
def __setitem__(self, k, v):
self._assert_dict_values(v)
k, v = self._sanitize(k, v)
self._items[k] = v
def __setattr__(self, k, v):
return self.__setitem__(k, v)
def update(self, param):
if isinstance(param, str):
data = util.read_config_yaml(path=param)
else:
# TODO: (kjw): try-except usage
data = util.to_dict(param)
data = self._sanitize_dict(data)
self._items.update(data)
def __getitem__(self, k):
return self._items[k]
def __getattr__(self, k):
return self.__getitem__(k)
def __contains__(self, k):
return k in self._items
def keys(self):
return [k for k in self._items.keys() if not k.startswith("_")]
def values(self):
return [v for k, v in self._items.items() if not k.startswith("_")]
def items(self):
return [(k, v) for k, v in self._items.items() if not k.startswith("_")]
def set_callback(self, fn):
self._callback = fn
def get(self, *args):
return self._items.get(*args)
def as_dict(self):
return self._items
```
#### File: manta_client/cli/entry.py
```python
import click
import manta_client as mc
class RunGroup(click.Group):
def get_command(self, ctx, cmd_name):
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
return None
@click.command(cls=RunGroup, invoke_without_command=True)
@click.version_option(version=mc.__version__)
@click.pass_context
def cli(ctx):
if ctx.invoked_subcommand is None:
click.echo(ctx.get_help())
```
#### File: sdk/internal/history.py
```python
import time
class History(object):
def __init__(self, experiment):
self._experiment = experiment
self._step = 0
self._data = dict()
self._callback = None
self._start_time = time.time()
def __len__(self):
return len(self._data)
def __getitem__(self, __name: str):
return self._data[__name]
def _row_update(self, data):
self._data.update(data)
self.flush()
def set_callback(self, cb):
# TODO: check callback gets arguments for row
self._callback = cb
def flush(self):
if len(self._data) > 0:
self._data["_step"] = self._step
self._data["_runtime"] = int(self._data.get("_runtime", time.time() - self._start_time))
if self._callback:
self._callback(row=self._data)
self._data = dict()
if __name__ == "__main__":
import manta_client as mc
exp = mc.init()
exp.log({})
```
#### File: sdk/internal/process.py
```python
from ..interface.interface import Interface
def manta_internal():
pass
```
#### File: manta_client/sdk/manta_experiment.py
```python
import atexit
import time
from typing import Any, Dict, Optional, Sequence, Type
import manta_client as mc
from manta_client import Settings
from manta_client.base.packet import ExperimentPacket
from .internal import ( # noqa: F401
alarm,
artifact,
console,
history,
meta,
stats,
summary,
)
class ProcessController(object):
def __init__(self) -> None:
pass
def start(self):
pass
def stop(self):
pass
def join(self):
pass
EXPERIMENT_PREFIX = "experiment_"
class Experiment(object):
def __init__(
self, settings: Settings = None, config: Optional[Dict[str, Any]] = None, meta: Optional[Dict[str, Any]] = None
) -> None:
self._settings = settings
self._settings.update_times()
self._config = config
self._meta = meta
# by set functions
self._backend = None
self._observers = None
# by property & setters from settings
self._entity = None
self._project = None
self._id = settings.experiment_id
self._name = None
self._memo = None
self._tags = None
self._group = None
self._job_type = None
self._start_time = time.time() # TODO: history start time? settings start time?
# initiated at on_start
self.history = None
self.summary = None
self.console = None
self._controller = None
self._setup_from_settings(settings)
def _setup_from_settings(self, settings):
"""TODO: Need to decide keep tracking value changes at settings instance or at experiment object
if settings object is frozen, need to keep them in here
"""
for k, v in settings.__dict__.items():
try:
k = k.replace(EXPERIMENT_PREFIX, "")
setattr(self, f"_{k}", v)
except KeyError:
pass
def _setup_from_packet(self, pkt: ExperimentPacket) -> None:
self._packet = pkt
self._entity = pkt.entity
self._project = pkt.project
# TODO: add config, meta, history ...
def _setup_packet_offline(self, pkt: ExperimentPacket) -> None:
self._packet = pkt
def _as_packet(self, pkt: ExperimentPacket) -> None:
# TODO: iterate experiment properties for copying
for k, v in self.__dict__.items():
# TODO: Add try/except
pkt[k] = v
def set_api(self, api):
# TODO: merge into set_backend?
self._api = api
def set_backend(self, backend):
self._backend = backend
def set_observers(self, observer):
self._observers = observer
def _history_callback(self, row):
if self._backend and self._backend.interface:
self._backend.interface.publish_history(row)
def _console_callback(self, name, data):
if not data:
return
if self._backend and self._backend.interface:
self._backend.interface.publish_console(_stream=name, lines=data)
@property
def entity(self) -> str:
return self._entity
@property
def project(self) -> str:
return self._project
@property
def id(self) -> str:
return self._id
@property
def path(self) -> str:
return "/".join([self.entity, self.project, self.id])
@property
def config(self) -> Dict[str, Any]:
return self._config
@property
def meta(self) -> Dict[str, Any]:
return self._meta
@property
def dir(self) -> str:
return self._name
@property
def name(self) -> str:
return self._settings.experiment_name
@property
def memo(self) -> str:
return self._memo
@memo.setter
def memo(self, memo: str) -> None:
self._memo = memo
# TODO: notify to server memo is changed
@property
def group(self) -> str:
return self._group
@property
def job_type(self) -> str:
return self._job_type
@property
def tags(self) -> str:
return self._tags
@tags.setter
def tags(self, tags: Sequence) -> None:
self._tags = tuple(tags)
@property
def mode(self) -> str:
return self._mode
@property
def start_time(self) -> int:
return self._start_time
def on_init(self):
# TODO: log codes. do it on meta
# self._save_codes()
# TODO: show exp info
self._display()
mc.util.mkdir(self._settings.experiemnt_dir)
self.history = history.History(self)
self.history.set_callback(self._history_callback)
# TODO: init summary
self.summary = None
def on_start(self):
self._controller_start()
self._console_start()
# TODO: code location can be changed
if not self._settings._disable_stats:
self._stats_start()
if not self._settings._disable_meta:
self._meta_start()
atexit.register(lambda: self.cleanup())
def on_finish(self):
"""
closing all process, threads
"""
if self._controller:
self._controller.stop()
self.history.flush()
self._console_stop()
# TODO: polling for all data be uploaded
# TODO: show final summary
#
if self._backend:
print("start backend cleanup")
self._backend.cleanup()
if self._controller:
self._controller.join()
def on_exit(self):
"""
show summarized messages, url, summary, artifacts ...
"""
pass
def _save_code(self):
# TODO: Do this on meta save?
pass
def _display(self):
# TODO: show experiment information
pass
def _stats_start(self):
self._stats = stats.SystemStats(interface=self._backend.interface)
self._stats.start()
def _meta_start(self):
self._meta = meta.Meta()
self._meta.start()
def _controller_start(self):
self._controller = ProcessController()
self._controller.start()
def _console_start(self):
# sync option = REDIRECT, WRAP, OFF
self.console = console.ConsoleSync(self)
self.console.set_callback(self._console_callback)
self.console.sync(option="wrap")
def _console_stop(self):
self.console.stop()
def log(self, data: Dict[str, Any]):
self.history._row_update(data)
def save(self):
pass
def alarm(self):
pass
alarm
def use_artifact(self):
pass
def log_artifact(self):
pass
def finish(self):
pass
def cleanup(self, exitcode: int = None):
# TODO: pre-checks?
# TODO: exitcodes?
self._exitcode = exitcode
# TODO: try - except
self.on_finish()
self.on_exit()
def __exit__(
self,
exc_type: Type[BaseException],
) -> bool:
exitcode = 0 if exc_type is None else 1
self.finish(exitcode)
```
#### File: manta_client/sdk/manta_login.py
```python
from manta_client import Settings
from manta_client.api import MantaAPI
class _MantaLogin(object):
def __init__(self, silent: bool = False) -> None:
self._api_key = None
self._silent = silent
def display(self):
pass
def setup(self, kwargs):
pass
def validate_api_key(self, key):
pass
def prompt_api_key(self):
"""request user to put api key on terminal"""
pass
def login(self):
pass
def login(
api_key: str = None, base_url: str = None, settings: Settings = None, api: MantaAPI = None, silent: bool = False
):
kwargs = dict(locals())
silent = kwargs.pop("silent")
_login = _MantaLogin(silent=silent)
_login.setup(kwargs)
logged_in = _login.login()
key = kwargs.get("key")
_login.validate_api_key(key)
if logged_in:
return logged_in
if key is None:
_login.prompt_api_key()
return _login._api_key or False
``` |
{
"source": "Joozlum/SketchyMaths",
"score": 4
} |
#### File: sketchymaths/sketchymathmethods/logic.py
```python
def logic(*args, **kwargs): # More complicated example of custom method. Allows for adding logic gates.
"""
Simple logic gate construct that can take any number of inputs
:param args: first arg is name of gate, all following args are input values
:param kwargs: true=true_condition(default=1) false=false_condition(default=0)
:return: boolean
"""
true = 1
if 'true' in kwargs:
true = kwargs['true']
false = 0
if 'false' in kwargs:
false = kwargs['false']
gate_types = ['AND', 'OR', 'NOT', 'NAND', 'NOR', 'XOR', 'XNOR']
# args[0] is evaluated to find the name of the gate
gate_type = str(args[0])
gate_type = gate_type.upper()
if gate_type not in gate_types:
return "gate not recognized"
if gate_type == 'AND':
for arg in args[1:]: # tests all args excluding the first, as it is the gate name
if arg != true:
return False
return True
if gate_type == 'OR':
for arg in args[1:]:
if arg == true:
return True
return False
if gate_type == 'NOT': # since a NOT gate only takes one argument, any extra will be ignored
for arg in args[1:]:
if arg == true:
return False
else:
return True
if gate_type == 'NAND':
for arg in args[1:]:
if arg == false:
return True
return False
if gate_type == 'NOR':
for arg in args[1:]:
if arg == true:
return False
return True
if gate_type == 'XOR':
x = None
for arg in args[1:]:
if x is None:
if arg == true:
x = True
if arg == false:
x = False
if arg == true:
if x is False:
return True
if arg == false:
if x is True:
return True
return False
if gate_type == 'XNOR':
x = None
for arg in args[1:]:
if x is None:
if arg == true:
x = True
if arg == false:
x = False
if arg == true:
if x is False:
return False
if arg == false:
if x is True:
return False
return True
def filter_logic(test, true_result, false_result): # Very basic function to compliment logic
"""
Function to take in a bool and return a custom value for true or false
:param test: bool
:param true_result:
:param false_result:
:return:
"""
if test:
return true_result
else:
return false_result
```
#### File: sketchymaths/sketchymathsapp/evaluateequation.py
```python
from sketchymaths.sketchymathmethods import sketchy_dict
def evaluate_equation_text(equation_text: str):
if '#' in equation_text:
return equation_text, ''
if 'Self Reference' in equation_text:
return equation_text, 'Infinite Loop happening here!'
try:
result = eval(equation_text, {'__builtins__': None}, sketchy_dict)
error_message = ''
except ArithmeticError as e:
result = equation_text
error_message = e
except SyntaxError:
result = equation_text
error_message = None
except EOFError as e:
result = equation_text
error_message = e
except Exception as e:
result = equation_text
error_message = e
return result, error_message
```
#### File: SketchyMaths/tests/test_sketchymathsapp.py
```python
import unittest
from unittest import TestCase
from sketchymaths.sketchymathsapp import SketchyMathsApp
class TestSketchymathsApp(unittest.TestCase):
"""TestCase for SketchymathsApp.
"""
def setUp(self):
self.app = SketchyMathsApp()
def test_name(self):
self.assertEqual(self.app.name, 'sketchymaths')
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
class TestSketchyMathsApp(TestCase):
def test_build(self):
self.fail()
``` |
{
"source": "jop611/chips_circuits",
"score": 4
} |
#### File: code/helpers/helpers.py
```python
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
def trace(paths, destination):
"""
Traces path from destination to origin.
Input:
paths; dictionary
Return:
List containing tuples of x-, y-, z-coordinates.
"""
coordinates = destination
path = [coordinates]
# iterate over keys in dictionary until the path is traced
while coordinates in paths:
coordinates = paths[coordinates]
path.append(coordinates)
path.reverse()
return path
def matlib_convert(path):
"""
Convert tuples of x-, y-, z-coordinates to x-, y-, z-coordinate lists for visualisation via matplotlib
Input:
path; list containing tuples of x-, y-, z-coordinates.
Return:
Tuple of x-, y-, z-coordinate lists.
"""
x_list = []
y_list = []
z_list = []
for coordinate in path:
x_list.append(coordinate[0])
y_list.append(coordinate[1])
z_list.append(coordinate[2])
return (x_list, y_list, z_list)
def plot(x_gates, y_gates, z_gates, boundaries, paths, count_wires):
"""
Plot gates and connections in a 3D grid.
Input:
x_gates; list of x-coordinates of all gates.
y_gates; list of y-coordinates of all gates.
z_gates; list of z-coordinates of all gates.
boundaries; tuple of x-, y-, z-coordinates.
paths; dictionary containing all paths between gates.
count_wires; integer.
Return:
None
"""
# create figure with correct axes
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
plt.xticks(np.arange(0, boundaries[1][0] + 1, 1))
plt.yticks(np.arange(0, boundaries[1][1] + 1, 1))
plt.title(f"Total wire length: {count_wires}")
ax.set_xlim3d(0, boundaries[1][0], 1)
ax.set_ylim3d(0, boundaries[1][1], 1)
ax.set_zlim3d(0, 7)
# plot all gates
for m, zlow, zhigh in [('s', 0, 7)]:
x = x_gates
y = y_gates
z = z_gates
ax.scatter(x, y, z, marker=m)
# plot all connections
for connection in paths:
ax.plot(paths[connection][0], paths[connection][1], paths[connection][2], '-')
# axis names
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
plt.show()
```
#### File: jop611/chips_circuits/main.py
```python
from code.classes.netlist import Netlist
from code.algorithms.breadthfirst import BreadthFirst
from code.algorithms.a_star import A_Star
from code.algorithms.hillclimber import HillClimber
def main():
algorithm = input(f"\nChoose algorithm to perform\n"
"***************************\n\n"
"Options:\n"
"A: A* algorithm\n"
"B: Breadth-first search algorithm\n"
"C: Hillclimber algorithm on previously found solution\n").upper()
# choice of print can be either 1 or 2
print_nr = input(f"\nChoose a print to use (1/2): ")
# for print 1, options are 0-3. netlist 0 is a simple netlist for testing purposes.
# for print 2, options are 4-6.
netlist_nr = input("Choose a netlist to solve (0-6): ")
# a_star is the main algorithm that is guaranteed to solve netlist 0-5 in a matter of minutes.
# no solution is currently known for netlist 6.
if algorithm == "A":
a_star = A_Star(print_nr, netlist_nr)
a_star.run()
# breadth first search suffices for netlist 0 but is not recommended for more complex netlists
elif algorithm == "B":
bfs = BreadthFirst(print_nr, netlist_nr)
bfs.run()
# hillclimber algorithm can be used on obtained results. it requires the correct length of the result to be improved as input.
# options: 409 for netlist 0, 709 for netlist 1, 1047 for netlist 2,
# 1219 for netlist 3, 1460 for netlist 4, 1610 for netlist 5.
elif algorithm == "C":
length = input("Length of solution to perform hillclimber on: ")
hillclimber = HillClimber(print_nr, netlist_nr, length)
hillclimber.run()
if __name__ == "__main__":
main()
``` |
{
"source": "jopagel/regression-analysis",
"score": 3
} |
#### File: jopagel/regression-analysis/test.py
```python
import unittest
import pandas as pd
from regressionmodels.model import Regression
X = pd.DataFrame([2, 3, 4])
y = pd.DataFrame([6, 9, 12])
X_test = pd.DataFrame([3,4,5])
class MyTestCase(unittest.TestCase):
def setUp(self):
self.linearregression = Regression()
self.polynomialregression = Regression(deg=2)
def test_linear_fit(self):
self.assertEqual(self.linearregression.fit(X, y)[1], 3.0)
def test_polynomial_fit(self):
self.assertEqual(self.polynomialregression.fit(X, y)[0], 0.0)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "joparram/openDataCleaner",
"score": 2
} |
#### File: backend/app/plugin_loader.py
```python
import app.plugins
import importlib
import pkgutil
import sys
def iter_namespace(ns_pkg):
return pkgutil.iter_modules(ns_pkg.__path__, ns_pkg.__name__ + ".")
def load_plugins():
for _, name, _ in iter_namespace(app.plugins):
print(name)
importlib.import_module(name)
```
#### File: app/templates/processor.py
```python
from ..api import _v1
from pathlib import Path
from app.error import Error
import pandas as pd
from app.components._data import dataframeHandler
import numpy as np
from sklearn.impute import KNNImputer
from sklearn import preprocessing
# ** ALL CONTENT COMENTED BETWEEN ASTERISCS MUST BE EDITED **
# ** Set the plugin id inside the internal api, it must be unique **
pluginId = "processorPluginExample"
# ** Set the plugin name, must be equals than the class name, and variable must be pluginName **
pluginName = "ProcessorPluginExample"
# ** Set the plugin description **
pluginDescription = "Plugin description"
# ** Name of the plugin in the interface **
pluginInterfaceName = "Procesar..."
# ** List of implemented actions with their parameters. It will be rendered in the UI forms. **
Actions = [ _v1.Action(
name="exampleAction",
description="example action",
params=[
_v1.Param(name="exampleSelect", kind="select", options=["option1", "option2", "option3"]),
_v1.Param(name="exampleNumber", kind="number"),
_v1.Param(name="exampleString", kind="string"),
_v1.Param(name="exampleFile", kind="file"),
]),
_v1.Action(
name="exampleAction",
description="example action",
params=[
_v1.Param(name="exampleSelect", kind="select", options=["option1", "option2", "option3"]),
_v1.Param(name="exampleNumber", kind="number"),
_v1.Param(name="exampleString", kind="string"),
_v1.Param(name="exampleFile", kind="file"),
])
]
class ProcessorPluginExample:
def __init__(self):
# ** Actions dict must be updated with new actions **
self.actions = {
"default": self.exampleActionHandler,
"exampleAction": self.exampleActionHandler,
}
self.pagination = {
"startRow": None,
"endRow": None,
}
def exampleActionHandler(self, request):
df = dataframeHandler.getDataframe()
column = request.form.get('column')
axis = request.form.get('axis')
# ** HERE YOUR CODE FOR EXAMPLE ACTION HANDLER OF THIS PLUGIN **
# modify df and it will be saved with dataframeHandler class in the
# local cache and then returned in
# Obtain the params from the request
exampleSelect = request.form.get('exampleSelect')
exampleNumber = request.form.get('exampleNumber')
exampleString = request.form.get('exampleString')
exampleFile = request.files['exampleFile']
# do something like print params
print("exampleSelect: ", exampleSelect)
print("exampleNumber: ", exampleNumber)
print("exampleString: ", exampleString)
print("exampleFile: ", exampleFile)
# always save the dataframe in the local cache
dataframeHandler.saveDataframe(df)
# ** add new handlers for aditional actions and then place it in the actions dict **
# Don't change this method if is not necessary
def _updatePagination (self, request: any):
startRowParam = request.args.get('startRow')
endRowParam = request.args.get('endRow')
self.pagination["startRow"] = None if startRowParam is None else int(startRowParam)
self.pagination["endRow"]= None if endRowParam is None else int(endRowParam)
# Don't change this method if is not necessary
def __call__(self, request: any):
print("ProcessorPluginExample called")
self._updatePagination(request)
action = request.args.get("action")
if action is None:
self.actions["default"](request)
elif action not in self.actions:
raise Error('Accion {} desconocida'.format(action))
else:
self.actions[action](request)
return dataframeHandler.getAllData(self.pagination)
# Don't change that if is not necessary
component = _v1.ProcessorPlugin(name=pluginName, description=pluginDescription, interfacename=pluginInterfaceName, actions=Actions, handler_class=eval(pluginName))
_v1.register_processor_plugin(component)
```
#### File: app/utils/encoders.py
```python
import dataclasses, json
class _dataclassJsonEncoder(json.JSONEncoder):
def default(self, o):
nonSerializableHandler = lambda obj: f"<<non-serializable: {type(obj).__name__}>>"
if dataclasses.is_dataclass(o):
dc = dataclasses.asdict(o)
return dataclasses.asdict(o)
try:
super().default(o)
except:
return nonSerializableHandler(o)
return super().default(o)
def dataclassToJson(dc):
return json.loads(json.dumps(dc, cls=_dataclassJsonEncoder))
``` |
{
"source": "jopasserat/he-transformer",
"score": 3
} |
#### File: examples/MNIST-Cryptonets/common.py
```python
import tensorflow as tf
def conv2d_stride_2_valid(x, W, name=None):
"""returns a 2d convolution layer with stride 2, valid pooling"""
return tf.nn.conv2d(x, W, strides=[1, 2, 2, 1], padding='VALID')
def avg_pool_3x3_same_size(x):
"""3x3 avg_pool using same padding, keeping original feature map size"""
return tf.nn.avg_pool(
x, ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1], padding='SAME')
```
#### File: examples/MNIST-Cryptonets/test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import time
import numpy as np
import itertools
import glob
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import common
import ngraph_bridge
import os
FLAGS = None
def cryptonets_test_squashed(x):
"""Constructs test network for Cryptonets using saved weights.
Assumes linear layers have been squashed."""
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
with tf.name_scope('reshape'):
x_image = tf.reshape(x, [-1, 28, 28, 1])
# First conv layer: maps one grayscale image to 5 feature maps of 13 x 13
with tf.name_scope('conv1'):
W_conv1 = tf.constant(
np.loadtxt('W_conv1.txt', dtype=np.float32).reshape([5, 5, 1, 5]))
h_conv1_no_pad = tf.square(
common.conv2d_stride_2_valid(x_image, W_conv1))
paddings = tf.constant([[0, 0], [0, 1], [0, 1], [0, 0]],
name='pad_const')
h_conv1 = tf.pad(h_conv1_no_pad, paddings)
with tf.name_scope('squash'):
W_squash = tf.constant(
np.loadtxt("W_squash.txt",
dtype=np.float32).reshape([5 * 13 * 13, 100]))
with tf.name_scope('fc1'):
h_pool2_flat = tf.reshape(h_conv1, [-1, 5 * 13 * 13])
h_fc1 = tf.matmul(h_pool2_flat, W_squash)
# h_fc1 = tf.Print(h_fc1, [h_fc1], summarize=200, message="After dot\n")
h_fc1 = tf.square(h_fc1)
# Map the 100 features to 10 classes, one for each digit
with tf.name_scope('fc2'):
W_fc2 = tf.constant(
np.loadtxt('W_fc2.txt', dtype=np.float32).reshape([100, 10]))
y_conv = tf.matmul(h_fc1, W_fc2)
y_conv = tf.Print(y_conv, [y_conv], summarize=100, message="Result\n")
return y_conv
def cryptonets_test_original(x):
"""Constructs test network for Cryptonets using saved weights"""
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images
# are grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
with tf.name_scope('reshape'):
x_image = tf.reshape(x, [-1, 28, 28, 1])
# First conv layer - maps one grayscale image to 5 feature maps of 13 x 13
with tf.name_scope('conv1'):
W_conv1 = tf.constant(
np.loadtxt('W_conv1.txt', dtype=np.float32).reshape([5, 5, 1, 5]))
h_conv1_no_pad = tf.square(
common.conv2d_stride_2_valid(x_image, W_conv1))
paddings = tf.constant([[0, 0], [0, 1], [0, 1], [0, 0]],
name='pad_const')
h_conv1 = tf.pad(h_conv1_no_pad, paddings)
# Pooling layer
with tf.name_scope('pool1'):
h_pool1 = common.avg_pool_3x3_same_size(h_conv1) # To 5 x 13 x 13
# Second convolution
with tf.name_scope('conv2'):
W_conv2 = tf.constant(
np.loadtxt('W_conv2.txt', dtype=np.float32).reshape([5, 5, 5, 50]))
h_conv2 = common.conv2d_stride_2_valid(h_pool1, W_conv2)
# Second pooling layer.
with tf.name_scope('pool2'):
h_pool2 = common.avg_pool_3x3_same_size(h_conv2)
# Fully connected layer 1
# Input: N x 5 x 5 x 50
# Output: N x 100
with tf.name_scope('fc1'):
W_fc1 = tf.constant(
np.loadtxt('W_fc1.txt',
dtype=np.float32).reshape([5 * 5 * 50, 100]))
h_pool2_flat = tf.reshape(h_pool2, [-1, 5 * 5 * 50])
h_fc1 = tf.square(tf.matmul(h_pool2_flat, W_fc1))
# Map the 100 features to 10 classes, one for each digit
with tf.name_scope('fc2'):
W_fc2 = tf.constant(
np.loadtxt('W_fc2.txt', dtype=np.float32).reshape([100, 10]))
y_conv = tf.matmul(h_fc1, W_fc2)
return y_conv
def test_mnist_cnn(FLAGS, network):
# Import data
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])
# Build the graph for the deep net
if network == 'orig':
y_conv = cryptonets_test_original(x)
else:
y_conv = cryptonets_test_squashed(x)
with tf.Session() as sess:
start_time = time.time()
x_test = mnist.test.images[:FLAGS.batch_size]
y_test = mnist.test.labels[:FLAGS.batch_size]
# Run model
y_conv_val = y_conv.eval(feed_dict={x: x_test, y_: y_test})
elasped_time = time.time() - start_time
print("total time(s)", elasped_time)
x_test_batch = mnist.test.images[:FLAGS.batch_size]
y_test_batch = mnist.test.labels[:FLAGS.batch_size]
x_test = mnist.test.images
y_test = mnist.test.labels
y_label_batch = np.argmax(y_test_batch, 1)
if FLAGS.save_batch:
x_test_batch.tofile("x_test_" + str(FLAGS.batch_size) + ".bin")
y_label_batch.astype('float32').tofile("y_label_" +
str(FLAGS.batch_size) + ".bin")
correct_prediction = np.equal(np.argmax(y_conv_val, 1), y_label_batch)
error_count = np.size(correct_prediction) - np.sum(correct_prediction)
test_accuracy = np.mean(correct_prediction)
print('Error count', error_count, 'of', FLAGS.batch_size, 'elements.')
print('Accuracy with ' + network + ': %g ' % test_accuracy)
# Rename serialized graph
try:
serialized_graphs = glob.glob("tf_function_ngraph*.json")
if os.environ.get('NGRAPH_ENABLE_SERIALIZE',
'') == "1" and len(serialized_graphs) == 1:
src_path = serialized_graphs[0]
dst_path = "mnist_cryptonets_batch_%s.json" % (FLAGS.batch_size, )
print("Moving", src_path, "to", dst_path)
os.rename(src_path, dst_path)
except:
print("Renaming serialized graph not successful")
def main(_):
# Disable mnist dataset deprecation warning
tf.logging.set_verbosity(tf.logging.ERROR)
# Test using the original graph
# test_mnist_cnn(FLAGS, 'orig')
# Test using squashed graph
test_mnist_cnn(FLAGS, 'squash')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_dir',
type=str,
default='/tmp/tensorflow/mnist/input_data',
help='Directory where input data is stored')
parser.add_argument('--batch_size', type=int, default=1, help='Batch size')
parser.add_argument(
'--test_image_count',
type=int,
default=None,
help="Number of test images to evaluate on")
parser.add_argument(
'--save_batch',
type=bool,
default=False,
help='Whether or not to save the test image and label.')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
``` |
{
"source": "jopdorp/classical-polyphony-rnn",
"score": 3
} |
#### File: interfaces/midi/midi_interaction.py
```python
import abc
import threading
import time
# internal imports
import tensorflow as tf
import magenta
from magenta.protobuf import generator_pb2
from magenta.protobuf import music_pb2
class MidiInteractionException(Exception):
"""Base class for exceptions in this module."""
pass
def adjust_sequence_times(sequence, delta_time):
"""Adjusts note and total NoteSequence times by `delta_time`."""
retimed_sequence = music_pb2.NoteSequence()
retimed_sequence.CopyFrom(sequence)
for note in retimed_sequence.notes:
note.start_time += delta_time
note.end_time += delta_time
retimed_sequence.total_time += delta_time
return retimed_sequence
class MidiInteraction(threading.Thread):
"""Base class for handling interaction between MIDI and SequenceGenerator.
Child classes will provided the "main loop" of an interactive session between
a MidiHub used for MIDI I/O and sequences generated by a SequenceGenerator in
their `run` methods.
Should be started by calling `start` to launch in a separate thread.
Args:
midi_hub: The MidiHub to use for MIDI I/O.
sequence_generators: A collection of SequenceGenerator objects.
qpm: The quarters per minute to use for this interaction. May be overriden
by control changes sent to `tempo_control_number`.
generator_select_control_number: An optional MIDI control number whose
value to use for selection a sequence generator from the collection.
Must be provided if `sequence_generators` contains multiple
SequenceGenerators.
tempo_control_number: An optional MIDI control number whose value to use to
determine the qpm for this interaction. On receipt of a control change,
the qpm will be set to 60 more than the control change value.
temperature_control_number: The optional control change number to use for
controlling generation softmax temperature.
Raises:
ValueError: If `generator_select_control_number` is None and
`sequence_generators` contains multiple SequenceGenerators.
"""
_metaclass__ = abc.ABCMeta
# Base QPM when set by a tempo control change.
_BASE_QPM = 60
def __init__(self,
midi_hub,
sequence_generators,
qpm,
generator_select_control_number=None,
tempo_control_number=None,
temperature_control_number=None):
if generator_select_control_number is None and len(sequence_generators) > 1:
raise ValueError(
'`generator_select_control_number` cannot be None if there are '
'multiple SequenceGenerators.')
self._midi_hub = midi_hub
self._sequence_generators = sequence_generators
self._default_qpm = qpm
self._generator_select_control_number = generator_select_control_number
self._tempo_control_number = tempo_control_number
self._temperature_control_number = temperature_control_number
# A signal to tell the main loop when to stop.
self._stop_signal = threading.Event()
super(MidiInteraction, self).__init__()
@property
def _sequence_generator(self):
"""Returns the SequenceGenerator selected by the current control value."""
if len(self._sequence_generators) == 1:
return self._sequence_generators[0]
val = self._midi_hub.control_value(self._generator_select_control_number)
val = 0 if val is None else val
return self._sequence_generators[val % len(self._sequence_generators)]
@property
def _qpm(self):
"""Returns the qpm based on the current tempo control value."""
val = self._midi_hub.control_value(self._tempo_control_number)
return self._default_qpm if val is None else val + self._BASE_QPM
@property
def _temperature(self, min_temp=0.1, max_temp=2.0, default=1.0):
"""Returns the temperature based on the current control value.
Linearly interpolates between `min_temp` and `max_temp`.
Args:
min_temp: The minimum temperature, which will be returned when value is 0.
max_temp: The maximum temperature, which will be returned when value is
127.
default: The temperature to return if control value is None.
Returns:
A float temperature value based on the 8-bit MIDI control value.
"""
val = self._midi_hub.control_value(self._temperature_control_number)
if val is None:
return default
return min_temp + (val / 127.) * (max_temp - min_temp)
@abc.abstractmethod
def run(self):
"""The main loop for the interaction.
Must exit shortly after `self._stop_signal` is set.
"""
pass
def stop(self):
"""Stops the main loop, and blocks until the interaction is stopped."""
self._stop_signal.set()
self.join()
class CallAndResponseMidiInteraction(MidiInteraction):
"""Implementation of a MidiInteraction for interactive "call and response".
Alternates between receiving input from the MidiHub ("call") and playing
generated sequences ("response"). During the call stage, the input is captured
and used to generate the response, which is then played back during the
response stage.
The call phrase is started when notes are received and ended by an external
signal (`end_call_signal`) or after receiving no note events for a full tick.
The response phrase is immediately generated and played. Its length is
optionally determined by a control value set for
`response_ticks_control_number` or by the length of the call.
Args:
midi_hub: The MidiHub to use for MIDI I/O.
sequence_generators: A collection of SequenceGenerator objects.
qpm: The quarters per minute to use for this interaction. May be overriden
by control changes sent to `tempo_control_number`.
generator_select_control_number: An optional MIDI control number whose
value to use for selection a sequence generator from the collection.
Must be provided if `sequence_generators` contains multiple
SequenceGenerators.
clock_signal: An optional midi_hub.MidiSignal to use as a clock. Each tick
period should have the same duration. No other assumptions are made
about the duration, but is typically equivalent to a bar length. Either
this or `tick_duration` must be specified.be
tick_duration: An optional float specifying the duration of a tick period in
seconds. No assumptions are made about the duration, but is typically
equivalent to a bar length. Either this or `clock_signal` must be
specified.
end_call_signal: The optional midi_hub.MidiSignal to use as a signal to stop
the call phrase at the end of the current tick.
panic_signal: The optional midi_hub.MidiSignal to use as a signal to end
all open notes and clear the playback sequence.
mutate_signal: The optional midi_hub.MidiSignal to use as a signal to
generate a new response sequence using the current response as the
input.
allow_overlap: A boolean specifying whether to allow the call to overlap
with the response.
enable_metronome: A boolean specifying whether to enable the metronome.
min_listen_ticks_control_number: The optional control change number to use
for controlling the minimum call phrase length in clock ticks.
max_listen_ticks_control_number: The optional control change number to use
for controlling the maximum call phrase length in clock ticks. Call
phrases will automatically be ended and responses generated when this
length is reached.
response_ticks_control_number: The optional control change number to use for
controlling the length of the response in clock ticks.
tempo_control_number: An optional MIDI control number whose value to use to
determine the qpm for this interaction. On receipt of a control change,
the qpm will be set to 60 more than the control change value.
temperature_control_number: The optional control change number to use for
controlling generation softmax temperature.
loop_control_number: The optional control change number to use for
determining whether the response should be looped. Looping is enabled
when the value is 127 and disabled otherwise.
state_control_number: The optinal control change number to use for sending
state update control changes. The values are 0 for `IDLE`, 1 for
`LISTENING`, and 2 for `RESPONDING`.
Raises:
ValueError: If exactly one of `clock_signal` or `tick_duration` is not
specified.
"""
class State(object):
"""Class holding state value representations."""
IDLE = 0
LISTENING = 1
RESPONDING = 2
_STATE_NAMES = {
IDLE: 'Idle', LISTENING: 'Listening', RESPONDING: 'Responding'}
@classmethod
def to_string(cls, state):
return cls._STATE_NAMES[state]
def __init__(self,
midi_hub,
sequence_generators,
qpm,
generator_select_control_number,
clock_signal=None,
tick_duration=None,
end_call_signal=None,
panic_signal=None,
mutate_signal=None,
allow_overlap=False,
enable_metronome=False,
min_listen_ticks_control_number=None,
max_listen_ticks_control_number=None,
response_ticks_control_number=None,
tempo_control_number=None,
temperature_control_number=None,
loop_control_number=None,
state_control_number=None):
super(CallAndResponseMidiInteraction, self).__init__(
midi_hub, sequence_generators, qpm, generator_select_control_number,
tempo_control_number, temperature_control_number)
if [clock_signal, tick_duration].count(None) != 1:
raise ValueError(
'Exactly one of `clock_signal` or `tick_duration` must be specified.')
self._clock_signal = clock_signal
self._tick_duration = tick_duration
self._end_call_signal = end_call_signal
self._panic_signal = panic_signal
self._mutate_signal = mutate_signal
self._allow_overlap = allow_overlap
self._enable_metronome = enable_metronome
self._min_listen_ticks_control_number = min_listen_ticks_control_number
self._max_listen_ticks_control_number = max_listen_ticks_control_number
self._response_ticks_control_number = response_ticks_control_number
self._loop_control_number = loop_control_number
self._state_control_number = state_control_number
# Event for signalling when to end a call.
self._end_call = threading.Event()
# Event for signalling when to flush playback sequence.
self._panic = threading.Event()
# Even for signalling when to mutate response.
self._mutate = threading.Event()
def _update_state(self, state):
"""Logs and sends a control change with the state."""
if self._state_control_number is not None:
self._midi_hub.send_control_change(self._state_control_number, state)
tf.logging.info('State: %s', self.State.to_string(state))
def _end_call_callback(self, unused_captured_seq):
"""Method to use as a callback for setting the end call signal."""
self._end_call.set()
tf.logging.info('End call signal received.')
def _panic_callback(self, unused_captured_seq):
"""Method to use as a callback for setting the panic signal."""
self._panic.set()
tf.logging.info('Panic signal received.')
def _mutate_callback(self, unused_captured_seq):
"""Method to use as a callback for setting the mutate signal."""
self._mutate.set()
tf.logging.info('Mutate signal received.')
@property
def _min_listen_ticks(self):
"""Returns the min listen ticks based on the current control value."""
val = self._midi_hub.control_value(
self._min_listen_ticks_control_number)
return 0 if val is None else val
@property
def _max_listen_ticks(self):
"""Returns the max listen ticks based on the current control value."""
val = self._midi_hub.control_value(
self._max_listen_ticks_control_number)
return float('inf') if not val else val
@property
def _should_loop(self):
return (self._loop_control_number and
self._midi_hub.control_value(self._loop_control_number) == 127)
def _generate(self, input_sequence, zero_time, response_start_time,
response_end_time):
"""Generates a response sequence with the currently-selected generator.
Args:
input_sequence: The NoteSequence to use as a generation seed.
zero_time: The float time in seconds to treat as the start of the input.
response_start_time: The float time in seconds for the start of
generation.
response_end_time: The float time in seconds for the end of generation.
Returns:
The generated NoteSequence.
"""
# Generation is simplified if we always start at 0 time.
response_start_time -= zero_time
response_end_time -= zero_time
generator_options = generator_pb2.GeneratorOptions()
generator_options.input_sections.add(
start_time=0,
end_time=response_start_time)
generator_options.generate_sections.add(
start_time=response_start_time,
end_time=response_end_time)
# Get current temperature setting.
generator_options.args['temperature'].float_value = self._temperature
# Generate response.
tf.logging.info(
"Generating sequence using '%s' generator.",
self._sequence_generator.details.id)
tf.logging.debug('Generator Details: %s',
self._sequence_generator.details)
tf.logging.debug('Bundle Details: %s',
self._sequence_generator.bundle_details)
tf.logging.debug('Generator Options: %s', generator_options)
response_sequence = self._sequence_generator.generate(
adjust_sequence_times(input_sequence, -zero_time), generator_options)
response_sequence = magenta.music.trim_note_sequence(
response_sequence, response_start_time, response_end_time)
return adjust_sequence_times(response_sequence, zero_time)
def run(self):
"""The main loop for a real-time call and response interaction."""
start_time = time.time()
self._captor = self._midi_hub.start_capture(self._qpm, start_time)
if not self._clock_signal and self._enable_metronome:
self._midi_hub.start_metronome(self._qpm, start_time)
# Set callback for end call signal.
if self._end_call_signal is not None:
self._captor.register_callback(self._end_call_callback,
signal=self._end_call_signal)
if self._panic_signal is not None:
self._captor.register_callback(self._panic_callback,
signal=self._panic_signal)
if self._mutate_signal is not None:
self._captor.register_callback(self._mutate_callback,
signal=self._mutate_signal)
# Keep track of the end of the previous tick time.
last_tick_time = time.time()
# Keep track of the duration of a listen state.
listen_ticks = 0
# Start with an empty response sequence.
response_sequence = music_pb2.NoteSequence()
response_start_time = 0
response_duration = 0
player = self._midi_hub.start_playback(
response_sequence, allow_updates=True)
# Enter loop at each clock tick.
for captured_sequence in self._captor.iterate(signal=self._clock_signal,
period=self._tick_duration):
if self._stop_signal.is_set():
break
if self._panic.is_set():
response_sequence = music_pb2.NoteSequence()
player.update_sequence(response_sequence)
self._panic.clear()
tick_time = captured_sequence.total_time
# Set to current QPM, since it might have changed.
if self._enable_metronome:
self._midi_hub.start_metronome(self._qpm, tick_time)
captured_sequence.tempos[0].qpm = self._qpm
tick_duration = tick_time - last_tick_time
last_end_time = (max(note.end_time for note in captured_sequence.notes)
if captured_sequence.notes else 0.0)
# True iff there was no input captured during the last tick.
silent_tick = last_end_time <= last_tick_time
if not silent_tick:
listen_ticks += 1
if not captured_sequence.notes:
# Reset captured sequence since we are still idling.
if response_sequence.total_time <= tick_time:
self._update_state(self.State.IDLE)
if self._captor.start_time < tick_time:
self._captor.start_time = tick_time
self._end_call.clear()
listen_ticks = 0
elif (self._end_call.is_set() or
silent_tick or
listen_ticks >= self._max_listen_ticks):
if listen_ticks < self._min_listen_ticks:
tf.logging.info(
'Input too short (%d vs %d). Skipping.',
listen_ticks,
self._min_listen_ticks)
self._captor.start_time = tick_time
else:
# Create response and start playback.
self._update_state(self.State.RESPONDING)
capture_start_time = self._captor.start_time
if silent_tick:
# Move the sequence forward one tick in time.
captured_sequence = adjust_sequence_times(
captured_sequence, tick_duration)
captured_sequence.total_time = tick_time
capture_start_time += tick_duration
# Compute duration of response.
num_ticks = self._midi_hub.control_value(
self._response_ticks_control_number)
if num_ticks:
response_duration = num_ticks * tick_duration
else:
# Use capture duration.
response_duration = tick_time - capture_start_time
response_start_time = tick_time
response_end_time = response_start_time + response_duration
response_sequence = self._generate(
captured_sequence,
capture_start_time,
response_start_time,
response_end_time)
# If it took too long to generate, push response to next tick.
if (time.time() - response_start_time) >= tick_duration / 4:
push_ticks = (
(time.time() - response_start_time) // tick_duration + 1)
response_start_time += push_ticks * tick_duration
response_end_time += push_ticks * tick_duration
response_sequence = adjust_sequence_times(
response_sequence, push_ticks * tick_duration)
tf.logging.warn(
'Response too late. Pushing back %d ticks.', push_ticks)
# Start response playback. Specify the start_time to avoid stripping
# initial events due to generation lag.
player.update_sequence(
response_sequence, start_time=response_start_time)
# Optionally capture during playback.
if self._allow_overlap:
self._captor.start_time = response_start_time
else:
self._captor.start_time = response_end_time
# Clear end signal and reset listen_ticks.
self._end_call.clear()
listen_ticks = 0
else:
# Continue listening.
self._update_state(self.State.LISTENING)
# Potentially loop or mutate previous response.
if (response_sequence.total_time <= tick_time and
(self._should_loop or self._mutate.is_set())):
if self._mutate.is_set():
response_sequence = self._generate(
response_sequence,
response_start_time,
response_end_time,
response_end_time + response_duration)
self._mutate.clear()
response_start_time = response_end_time
response_sequence = adjust_sequence_times(
response_sequence, tick_time - response_start_time)
response_start_time = tick_time
player.update_sequence(
response_sequence, start_time=tick_time)
last_tick_time = tick_time
player.stop()
def stop(self):
self._stop_signal.set()
self._captor.stop()
self._midi_hub.stop_metronome()
super(CallAndResponseMidiInteraction, self).stop()
``` |
{
"source": "jopereira/horus-tracer",
"score": 2
} |
#### File: core/bpf/program.py
```python
from bcc import BPF
import logging
syscall_regex = "^[s]y[s]_"
class BpfProgram():
def __init__(self, text):
self._contents = text
self._bpf = None
self._probes = None
self._perf_buffer_size = 64 * 1024
def bpf_instance(self):
return self._bpf
def prepare(self):
assert self._bpf is None
self._bpf = BPF(text=self._contents)
def attach_probes(self):
self._attach_socket_probes()
self._attach_process_probes()
self._bpf.attach_tracepoint(tp="sched:sched_process_fork", fn_name="on_fork")
self._bpf.attach_tracepoint(tp="sched:sched_process_exit", fn_name="on_exit")
def detach_probes(self):
self._bpf.detach_tracepoint(tp="sched:sched_process_fork")
self._bpf.detach_tracepoint(tp="sched:sched_process_exit")
self._bpf.cleanup()
def filter_pid(self, pid):
assert isinstance(pid, int)
logging.info('Filtering events from PID [' + str(pid) + ']')
self._contents = self._contents.replace(
'//PID_FILTER//', str(pid))
def filter_comm(self, comm):
logging.info('Filtering events from COMM [' + str(comm) + ']')
if comm is None:
filter = 'NULL'
else:
filter = '"' + comm + '"'
self._contents = self._contents.replace(
'//COMM_FILTER//', filter)
def open_event_buffer(self, name, handler):
self._bpf[name].open_perf_buffer(handler, page_cnt=self._perf_buffer_size)
def _attach_probes_set(self, probes):
for event, (entry, exit) in probes.items():
if event.startswith('re_'):
event = event[3:]
entry is not None and self._bpf.attach_kprobe(event_re=event, fn_name=entry)
exit is not None and self._bpf.attach_kretprobe(event_re=event, fn_name=exit, maxactive=100)
else:
entry is not None and self._bpf.attach_kprobe(event=event, fn_name=entry)
exit is not None and self._bpf.attach_kretprobe(event=event, fn_name=exit, maxactive=100)
def _attach_process_probes(self):
self._attach_probes_set(self.get_probes()['process'])
def _attach_socket_probes(self):
self._attach_probes_set(self.get_probes()['socket'])
def get_probes(self):
if self._probes is not None:
return self._probes
socket_probes = {}
socket_probes['tcp_connect'] = ('entry__tcp_connect', 'exit__tcp_connect')
socket_probes['inet_csk_accept'] = (None, 'exit__inet_csk_accept')
socket_probes['sock_sendmsg'] = ('entry__sock_sendmsg', 'exit__sock_sendmsg')
socket_probes['kernel_sendpage'] = ('entry__sock_sendmsg', 'exit__sock_sendmsg')
socket_probes['sock_recvmsg'] = ('entry__sock_recvmsg', 'exit__sock_recvmsg')
syscall_probes = {}
# Prefix with 're' to indicate it is a regex.
syscall_probes['wake_up_new_task'] = ('entry__wake_up_new_task', None)
syscall_probes['do_wait'] = (None, 'exit__do_wait')
syscall_probes['re_' + syscall_regex + 'fsync'] = (None, 'exit__sys_fsync')
self._probes = {'socket': socket_probes, 'process': syscall_probes}
return self._probes
```
#### File: core/events/base_event.py
```python
import ctypes
import struct
import socket
import time
TASK_COMM_LEN = 16 # linux/sched.h
class EventType():
SOCKET_SEND = 8
SOCKET_RECEIVE = 9
SOCKET_CONNECT = 11
SOCKET_ACCEPT = 12
PROCESS_CREATE = 1
PROCESS_START = 2
PROCESS_END = 3
PROCESS_JOIN = 4
FSYNC = 13
@staticmethod
def is_socket(type):
return type == EventType.SOCKET_SEND or \
type == EventType.SOCKET_RECEIVE or \
type == EventType.SOCKET_CONNECT or \
type == EventType.SOCKET_ACCEPT
@staticmethod
def is_process(type):
return type == EventType.PROCESS_CREATE or \
type == EventType.PROCESS_START or \
type == EventType.PROCESS_END or \
type == EventType.PROCESS_JOIN or \
type == EventType.FSYNC
class ExtraData(ctypes.Union):
_fields_ = [
("bytes", ctypes.c_uint),
("child_pid", ctypes.c_uint),
]
class SocketData(ctypes.Structure):
_fields_ = [
("sport", ctypes.c_ushort),
("dport", ctypes.c_ushort),
("saddr", (ctypes.c_ulonglong * 2)),
("daddr", (ctypes.c_ulonglong * 2)),
("family", ctypes.c_ushort),
("type", ctypes.c_ushort),
]
class EventData(ctypes.Structure):
_fields_ = [
("type", ctypes.c_uint),
("pid", ctypes.c_uint),
("tgid", ctypes.c_uint),
("ktime", ctypes.c_ulonglong),
("comm", ctypes.c_char * TASK_COMM_LEN),
("socket", SocketData),
("extra", ExtraData)
]
class Event(object):
hostname = socket.getfqdn()
event_counter = 0
def __init__(self, pid, tgid, comm, timestamp=None, host=None):
self._id = Event._get_next_event_id()
self._timestamp = timestamp
self._host = host
if self._timestamp is None:
self._timestamp = int(round(time.time() * 1000))
if self._host is None:
self._host = Event.hostname
self._tid = pid
self._pid = tgid
self._comm = comm
self._ktime = None
self._data = {}
def is_process(self):
return not self.is_thread()
def is_thread(self):
return not self.tid != self.pid
def get_thread_id(self):
return self._generate_thread_id(self._tid, self._host)
def to_json(self):
return NotImplemented
def to_bytes(self):
return NotImplemented
def _generate_thread_id(self, pid, host):
return '{}@{}'.format(pid, host)
@staticmethod
def _get_next_event_id():
Event.event_counter += 1
return Event.hostname + str(Event.event_counter)
```
#### File: events/handling/base_handler.py
```python
class BaseHandler(object):
def boot(self):
pass
def handle(self, cpu, data, size):
raise NotImplementedError
def shutdown(self):
pass
```
#### File: handling/writers/kafka_writer.py
```python
import os
import logging
import hashlib
import json
from kafka import SimpleClient as KafkaClient
from confluent_kafka import Producer
from falcon.core.events.base_event import EventType
class KafkaWriter:
def __init__(self, servers):
self._servers = servers
self._topics = ['events']
self._client = None
self._partitions_count = {}
def open(self):
self._boot_topics()
self._producer = Producer({'bootstrap.servers': self._servers})
def write(self, event):
self._producer.poll(0)
# Asynchronously produce a message, the delivery report callback will
# will be triggered (from poll or flush), when the message has
# been successfully delivered or failed permanently.
topic = self.topic_for_event(event)
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
logging.debug('Delivering event type [{}] to {} [partition:{}]'.format(
event._type, topic['name'], topic['partition']))
self._producer.produce(topic['name'], buffer(event.to_bytes()), partition=topic['partition'], callback=KafkaWriter.delivery_report)
def close(self):
self._producer.flush()
self._client.close()
def topic_for_event(self, event):
topic = self._topics[0]
key = event._host
return {
'name': topic,
'partition': int(hashlib.sha512(key).hexdigest(), 16) % self._partitions_count[topic]
}
def _boot_topics(self):
self._client = KafkaClient(self._servers)
for topic in self._topics:
if not self._client.has_metadata_for_topic(topic):
raise IOError('Kafka topic ['+topic+'] was not found.')
topic_partitions_count = len(
self._client.get_partition_ids_for_topic(topic))
if topic_partitions_count == 0:
raise IOError('Kafka topic ['+topic+'] does not have any partition.')
self._partitions_count[topic] = topic_partitions_count
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
logging.debug('Booted topics and partitions: ' + json.dumps(self._partitions_count))
@staticmethod
def delivery_report(err, msg):
if err is not None:
logging.error('Event delivery failed: {}'.format(err))
elif logging.getLogger().getEffectiveLevel() == logging.DEBUG:
logging.debug('Event delivered to {} [partition:{}]'.format(
msg.topic(), msg.partition()))
```
#### File: events/types/socket_events.py
```python
from falcon.core.events.base_event import Event, EventType
import ujson as json
import socket
import struct
import logging
import falcon.core.protocol.fbs.FalconEvent as FlatFalconEvent
import falcon.core.protocol.fbs.EventData as FlatEventData
import falcon.core.protocol.fbs.SocketEvent as FlatSocketEvent
import falcon.core.protocol.fbs.SocketAccept as FlatSocketAccept
import falcon.core.protocol.fbs.SocketConnect as FlatSocketConnect
import falcon.core.protocol.fbs.SocketSend as FlatSocketSend
import falcon.core.protocol.fbs.SocketReceive as FlatSocketReceive
import flatbuffers
class SocketEvent(Event):
def __init__(self, pid, tgid, comm, sport, dport, saddr, daddr, family, timestamp=None, host=None):
self._sport = sport
self._dport = dport
self._saddr = saddr
self._daddr = daddr
self._family = family
self._socket_type = socket.SOCK_STREAM
self._socket_from = None
self._socket_to = None
self._socket_id = None
self._fill_socket_data()
super(SocketEvent, self).__init__(pid, tgid, comm, timestamp, host)
def _fill_socket_data(self):
sock_saddr = self._saddr
sock_daddr = self._daddr
# Handle IPv4 sockets
if self._family == socket.AF_INET:
sock_saddr = self._saddr[1]
sock_daddr = self._daddr[1]
sock_from = socket.inet_ntop(socket.AF_INET, struct.pack("I", sock_saddr))
sock_to = socket.inet_ntop(socket.AF_INET, struct.pack("I", sock_daddr))
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
logging.debug('ipv4 saddr: {} -> {}'.format(hex(sock_saddr), sock_from))
logging.debug('ipv4 daddr: {} -> {}'.format(hex(sock_daddr), sock_to))
# Handle IPv6 sockets
elif self._family == socket.AF_INET6:
# Handle IPv4-mapped IPv6 source socket addresses
if self._saddr[0] == 0x0 and self._saddr[1] & 0xffff0000 == 0xffff0000:
sock_saddr = self._saddr[1] >> 32
sock_from = socket.inet_ntop(socket.AF_INET, struct.pack("I", sock_saddr))
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
logging.debug('ipv4-mapped saddr: {} -> {}'.format(hex(sock_saddr), sock_from))
else:
sock_from = socket.inet_ntop(socket.AF_INET6, self._saddr)
# Handle IPv4-mapped IPv6 destination socket addresses
if self._daddr[0] == 0x0 and self._daddr[1] & 0xffff0000 == 0xffff0000:
# Convert IPv4-mapped destination IPv6 address to IPv4
sock_daddr = self._daddr[1] >> 32
sock_to = socket.inet_ntop(socket.AF_INET, struct.pack("I", sock_daddr))
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
logging.debug('ipv4-mapped daddr: {} -> {}'.format(hex(sock_daddr), sock_from))
else:
sock_to = socket.inet_ntop(socket.AF_INET6, self._daddr)
else:
raise ValueError(
'Undefined socket family: {}'.format(self._family))
# Generate socket id
self._socket_from = sock_from
self._socket_to = sock_to
self._socket_id = SocketEvent._generate_socket_id(
sock_daddr, sock_from, sock_daddr, sock_to, self._sport, self._dport)
@staticmethod
def _generate_socket_id(addr1, addr1_str, addr2, addr2_str, port1, port2):
socket_id = None
if addr1 < addr2:
socket_id = "%s:%d-%s:%d" % (addr1_str, port1, addr2_str, port2)
elif addr2 < addr1:
socket_id = "%s:%d-%s:%d" % (addr2_str, port2, addr1_str, port1)
else:
if port1 < port2:
socket_id = "%s:%d-%s:%d" % (addr1_str, port1, addr2_str, port2)
else:
socket_id = "%s:%d-%s:%d" % (addr2_str, port2, addr1_str, port1)
return socket_id
class SocketConnect(SocketEvent):
def __init__(self, pid, tgid, comm, sport, dport, saddr, daddr, family, timestamp=None, host=None):
self._type = EventType.SOCKET_CONNECT
super(SocketConnect, self).__init__(pid, tgid, comm, sport, dport, saddr, daddr, family, timestamp, host)
def to_json(self):
return json.dumps({
"type": self._type,
"timestamp": self._timestamp,
"thread": self.get_thread_id(),
"socket": self._socket_id,
"socket_type": "TCP" if self._socket_type == socket.SOCK_STREAM else "UDP",
"src": self._socket_from,
"src_port": self._sport,
"dst": self._socket_to,
"dst_port": self._dport,
"data": {
"host": self._host,
"comm": self._comm,
}
})
def to_bytes(self):
builder = flatbuffers.Builder(0)
id_field = builder.CreateString(self._id)
comm_field = builder.CreateString(self._comm)
host_field = builder.CreateString(self._host)
extra_data_field = builder.CreateString(json.dumps(self._data))
socket_from_field = builder.CreateString(self._socket_from)
socket_to_field = builder.CreateString(self._socket_to)
socket_id_field = builder.CreateString(self._socket_id)
# Create SocketConnect event
FlatSocketConnect.SocketConnectStart(builder)
event_data = FlatSocketConnect.SocketConnectEnd(builder)
# Create SocketEvent event
FlatSocketEvent.SocketEventStart(builder)
FlatSocketEvent.SocketEventAddSourcePort(builder, self._sport)
FlatSocketEvent.SocketEventAddDestinationPort(builder, self._dport)
FlatSocketEvent.SocketEventAddSocketFamily(builder, self._family)
FlatSocketEvent.SocketEventAddSocketType(builder, self._socket_type)
FlatSocketEvent.SocketEventAddSocketFrom(builder, socket_from_field)
FlatSocketEvent.SocketEventAddSocketTo(builder, socket_to_field)
FlatSocketEvent.SocketEventAddSocketId(builder, socket_id_field)
FlatSocketEvent.SocketEventAddEvent(builder, event_data)
socket_event_data = FlatSocketEvent.SocketEventEnd(builder)
# Create FalconEvent
FlatFalconEvent.FalconEventStart(builder)
FlatFalconEvent.FalconEventAddId(builder, id_field)
FlatFalconEvent.FalconEventAddUserTime(builder, self._timestamp)
FlatFalconEvent.FalconEventAddKernelTime(builder, self._ktime)
FlatFalconEvent.FalconEventAddType(builder, self._type)
FlatFalconEvent.FalconEventAddPid(builder, self._pid)
FlatFalconEvent.FalconEventAddTid(builder, self._tid)
FlatFalconEvent.FalconEventAddComm(builder, comm_field)
FlatFalconEvent.FalconEventAddHost(builder, host_field)
FlatFalconEvent.FalconEventAddEventType(builder, FlatEventData.EventData().SocketEvent)
FlatFalconEvent.FalconEventAddEvent(builder, socket_event_data)
FlatFalconEvent.FalconEventAddExtraData(builder, extra_data_field)
builder.Finish(FlatFalconEvent.FalconEventEnd(builder))
return builder.Output()
class SocketAccept(SocketEvent):
def __init__(self, pid, tgid, comm, sport, dport, saddr, daddr, family, timestamp=None, host=None):
self._type = EventType.SOCKET_ACCEPT
super(SocketAccept, self).__init__(pid, tgid, comm, sport, dport, saddr, daddr, family, timestamp, host)
def to_json(self):
return json.dumps({
"type": self._type,
"timestamp": self._timestamp,
"thread": self.get_thread_id(),
"socket": self._socket_id,
"socket_type": "TCP" if self._socket_type == socket.SOCK_STREAM else "UDP",
"src": self._socket_from,
"src_port": self._sport,
"dst": self._socket_to,
"dst_port": self._dport,
"data": {
"host": self._host,
"comm": self._comm,
}
})
def to_bytes(self):
builder = flatbuffers.Builder(0)
id_field = builder.CreateString(self._id)
comm_field = builder.CreateString(self._comm)
host_field = builder.CreateString(self._host)
extra_data_field = builder.CreateString(json.dumps(self._data))
socket_from_field = builder.CreateString(self._socket_from)
socket_to_field = builder.CreateString(self._socket_to)
socket_id_field = builder.CreateString(self._socket_id)
# Create SocketAccept event
FlatSocketAccept.SocketAcceptStart(builder)
event_data = FlatSocketAccept.SocketAcceptEnd(builder)
# Create SocketEvent event
FlatSocketEvent.SocketEventStart(builder)
FlatSocketEvent.SocketEventAddSourcePort(builder, self._sport)
FlatSocketEvent.SocketEventAddDestinationPort(builder, self._dport)
FlatSocketEvent.SocketEventAddSocketFamily(builder, self._family)
FlatSocketEvent.SocketEventAddSocketType(builder, self._socket_type)
FlatSocketEvent.SocketEventAddSocketFrom(builder, socket_from_field)
FlatSocketEvent.SocketEventAddSocketTo(builder, socket_to_field)
FlatSocketEvent.SocketEventAddSocketId(builder, socket_id_field)
FlatSocketEvent.SocketEventAddEvent(builder, event_data)
socket_event_data = FlatSocketEvent.SocketEventEnd(builder)
# Create FalconEvent
FlatFalconEvent.FalconEventStart(builder)
FlatFalconEvent.FalconEventAddId(builder, id_field)
FlatFalconEvent.FalconEventAddUserTime(builder, self._timestamp)
FlatFalconEvent.FalconEventAddKernelTime(builder, self._ktime)
FlatFalconEvent.FalconEventAddType(builder, self._type)
FlatFalconEvent.FalconEventAddPid(builder, self._pid)
FlatFalconEvent.FalconEventAddTid(builder, self._tid)
FlatFalconEvent.FalconEventAddComm(builder, comm_field)
FlatFalconEvent.FalconEventAddHost(builder, host_field)
FlatFalconEvent.FalconEventAddEventType(builder, FlatEventData.EventData().SocketEvent)
FlatFalconEvent.FalconEventAddEvent(builder, socket_event_data)
FlatFalconEvent.FalconEventAddExtraData(builder, extra_data_field)
builder.Finish(FlatFalconEvent.FalconEventEnd(builder))
return builder.Output()
class SocketSend(SocketEvent):
def __init__(self, pid, tgid, comm, sport, dport, saddr, daddr, family, size, timestamp=None, host=None):
self._type = EventType.SOCKET_SEND
self._size = size
super(SocketSend, self).__init__(pid, tgid, comm, sport, dport, saddr, daddr, family, timestamp, host)
def to_json(self):
return json.dumps({
"type": self._type,
"timestamp": self._timestamp,
"thread": self.get_thread_id(),
"socket": self._socket_id,
"socket_type": "TCP" if self._socket_type == socket.SOCK_STREAM else "UDP",
"src": self._socket_from,
"src_port": self._sport,
"dst": self._socket_to,
"dst_port": self._dport,
"size": self._size,
"data": {
"host": self._host,
"comm": self._comm,
}
})
def to_bytes(self):
builder = flatbuffers.Builder(0)
id_field = builder.CreateString(self._id)
comm_field = builder.CreateString(self._comm)
host_field = builder.CreateString(self._host)
extra_data_field = builder.CreateString(json.dumps(self._data))
socket_from_field = builder.CreateString(self._socket_from)
socket_to_field = builder.CreateString(self._socket_to)
socket_id_field = builder.CreateString(self._socket_id)
# Create SocketSend event
FlatSocketSend.SocketSendStart(builder)
FlatSocketSend.SocketSendAddSize(builder, self._size)
event_data = FlatSocketSend.SocketSendEnd(builder)
# Create SocketEvent event
FlatSocketEvent.SocketEventStart(builder)
FlatSocketEvent.SocketEventAddSourcePort(builder, self._sport)
FlatSocketEvent.SocketEventAddDestinationPort(builder, self._dport)
FlatSocketEvent.SocketEventAddSocketFamily(builder, self._family)
FlatSocketEvent.SocketEventAddSocketType(builder, self._socket_type)
FlatSocketEvent.SocketEventAddSocketFrom(builder, socket_from_field)
FlatSocketEvent.SocketEventAddSocketTo(builder, socket_to_field)
FlatSocketEvent.SocketEventAddSocketId(builder, socket_id_field)
FlatSocketEvent.SocketEventAddEvent(builder, event_data)
socket_event_data = FlatSocketEvent.SocketEventEnd(builder)
# Create FalconEvent
FlatFalconEvent.FalconEventStart(builder)
FlatFalconEvent.FalconEventAddId(builder, id_field)
FlatFalconEvent.FalconEventAddUserTime(builder, self._timestamp)
FlatFalconEvent.FalconEventAddKernelTime(builder, self._ktime)
FlatFalconEvent.FalconEventAddType(builder, self._type)
FlatFalconEvent.FalconEventAddPid(builder, self._pid)
FlatFalconEvent.FalconEventAddTid(builder, self._tid)
FlatFalconEvent.FalconEventAddComm(builder, comm_field)
FlatFalconEvent.FalconEventAddHost(builder, host_field)
FlatFalconEvent.FalconEventAddEventType(builder, FlatEventData.EventData().SocketEvent)
FlatFalconEvent.FalconEventAddEvent(builder, socket_event_data)
FlatFalconEvent.FalconEventAddExtraData(builder, extra_data_field)
builder.Finish(FlatFalconEvent.FalconEventEnd(builder))
return builder.Output()
class SocketReceive(SocketEvent):
def __init__(self, pid, tgid, comm, sport, dport, saddr, daddr, family, size, timestamp=None, host=None):
self._type = EventType.SOCKET_RECEIVE
self._size = size
super(SocketReceive, self).__init__(pid, tgid, comm, sport, dport, saddr, daddr, family, timestamp, host)
def to_json(self):
return json.dumps({
"type": self._type,
"timestamp": self._timestamp,
"thread": self.get_thread_id(),
"socket": self._socket_id,
"socket_type": "TCP" if self._socket_type == socket.SOCK_STREAM else "UDP",
"src": self._socket_from,
"src_port": self._sport,
"dst": self._socket_to,
"dst_port": self._dport,
"size": self._size,
"data": {
"host": self._host,
"comm": self._comm,
}
})
def to_bytes(self):
builder = flatbuffers.Builder(0)
id_field = builder.CreateString(self._id)
comm_field = builder.CreateString(self._comm)
host_field = builder.CreateString(self._host)
extra_data_field = builder.CreateString(json.dumps(self._data))
socket_from_field = builder.CreateString(self._socket_from)
socket_to_field = builder.CreateString(self._socket_to)
socket_id_field = builder.CreateString(self._socket_id)
# Create SocketReceive event
FlatSocketReceive.SocketReceiveStart(builder)
FlatSocketReceive.SocketReceiveAddSize(builder, self._size)
event_data = FlatSocketReceive.SocketReceiveEnd(builder)
# Create SocketEvent event
FlatSocketEvent.SocketEventStart(builder)
FlatSocketEvent.SocketEventAddSourcePort(builder, self._sport)
FlatSocketEvent.SocketEventAddDestinationPort(builder, self._dport)
FlatSocketEvent.SocketEventAddSocketFamily(builder, self._family)
FlatSocketEvent.SocketEventAddSocketType(builder, self._socket_type)
FlatSocketEvent.SocketEventAddSocketFrom(builder, socket_from_field)
FlatSocketEvent.SocketEventAddSocketTo(builder, socket_to_field)
FlatSocketEvent.SocketEventAddSocketId(builder, socket_id_field)
FlatSocketEvent.SocketEventAddEvent(builder, event_data)
socket_event_data = FlatSocketEvent.SocketEventEnd(builder)
# Create FalconEvent
FlatFalconEvent.FalconEventStart(builder)
FlatFalconEvent.FalconEventAddId(builder, id_field)
FlatFalconEvent.FalconEventAddUserTime(builder, self._timestamp)
FlatFalconEvent.FalconEventAddKernelTime(builder, self._ktime)
FlatFalconEvent.FalconEventAddType(builder, self._type)
FlatFalconEvent.FalconEventAddPid(builder, self._pid)
FlatFalconEvent.FalconEventAddTid(builder, self._tid)
FlatFalconEvent.FalconEventAddComm(builder, comm_field)
FlatFalconEvent.FalconEventAddHost(builder, host_field)
FlatFalconEvent.FalconEventAddEventType(builder, FlatEventData.EventData().SocketEvent)
FlatFalconEvent.FalconEventAddEvent(builder, socket_event_data)
FlatFalconEvent.FalconEventAddExtraData(builder, extra_data_field)
builder.Finish(FlatFalconEvent.FalconEventEnd(builder))
return builder.Output()
```
#### File: falcon/util/file.py
```python
import os
def check_pid(pid):
""" Check For the existence of a unix pid. """
if pid is None:
return False
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
def clean_pid(file):
try:
os.remove(file)
except:
pass
def write_pid(file):
with open(file, 'w') as f:
pid = str(os.getpid())
f.write(pid)
def read_pid(file):
try:
with open(file, 'r') as f:
pid = f.readline()
return int(pid)
except:
return None
```
#### File: falcon/util/net.py
```python
import ctypes
import ctypes.util
libc = ctypes.CDLL(ctypes.util.find_library('c'))
# Get network device's name
def if_indextoname (index):
if not isinstance (index, int):
raise TypeError ('Index must be an integer.')
libc.if_indextoname.argtypes = [ctypes.c_uint32, ctypes.c_char_p]
libc.if_indextoname.restype = ctypes.c_char_p
ifname = ctypes.create_string_buffer(32)
ifname = libc.if_indextoname (index, ifname)
if not ifname:
raise RuntimeError ("Invalid network interface index.")
return ifname
# Generate socket id
def to_socket_id (addr1, addr1_str, addr2, addr2_str, port1, port2):
socket_id = None
if addr1 < addr2:
socket_id = "%s:%d-%s:%d" % (addr1_str, port1, addr2_str, port2)
elif addr2 < addr1:
socket_id = "%s:%d-%s:%d" % (addr2_str, port2, addr1_str, port1)
else:
if port1 < port2:
socket_id = "%s:%d-%s:%d" % (addr1_str, port1, addr2_str, port2)
else:
socket_id = "%s:%d-%s:%d" % (addr2_str, port2, addr1_str, port1)
return socket_id
``` |
{
"source": "jopesy/django-form-designer-ai",
"score": 3
} |
#### File: django-form-designer-ai/form_designer/email.py
```python
import re
import django
from django.core.mail import EmailMessage
from django.utils.encoding import force_text
from form_designer.utils import string_template_replace
DJANGO_18 = django.VERSION[:2] >= (1, 8)
def _template_replace_list(input_str, context_dict):
"""
Split the input string by commas or semicolons, then template-replace.
Falsy input values yield empty lists.
:param input_str: Comma-or-semicolon-separated list of values
:type input_str: str|None
:param context_dict: The context for template replacement
:return: List of strings
:rtype: list[str]
"""
if not input_str:
return []
return [
string_template_replace(email, context_dict)
for email
in re.compile(r'\s*[,;]+\s*').split(force_text(input_str))
]
def build_form_mail(form_definition, form, files=None):
"""
Build a form-submission email based on the given form definition and associated submitted form
:param form_definition: Form definition object
:param form: The freshly submitted form
:param files: Associated files
:return: Django email message
"""
if not files:
files = []
form_data = form_definition.get_form_data(form)
message = form_definition.compile_message(form_data)
context_dict = form_definition.get_form_data_context(form_data)
mail_to = _template_replace_list(form_definition.mail_to, context_dict)
if form_definition.mail_from:
from_email = string_template_replace(form_definition.mail_from, context_dict)
else:
from_email = None
reply_to = _template_replace_list(form_definition.mail_reply_to, context_dict)
mail_subject = string_template_replace(
(form_definition.mail_subject or form_definition.title),
context_dict
)
kwargs = {
'subject': mail_subject,
'body': message,
'from_email': from_email,
'to': mail_to,
}
if DJANGO_18: # the reply_to kwarg is only supported in Django 1.8+ . . .
kwargs['reply_to'] = reply_to
message = EmailMessage(**kwargs)
if not DJANGO_18: # so do it manually when not on Django 1.8
message.extra_headers['Reply-To'] = ', '.join(map(force_text, reply_to))
if form_definition.is_template_html:
message.content_subtype = "html"
if form_definition.mail_uploaded_files:
for file_path in files:
message.attach_file(file_path)
return message
``` |
{
"source": "jopetty/ml-research-kit",
"score": 2
} |
#### File: jopetty/ml-research-kit/run.py
```python
import dotenv
import hydra
from omegaconf import DictConfig
dotenv.load_dotenv(override=True)
@hydra.main(config_path="conf/", config_name="config.yaml")
def main(cfg: DictConfig):
from src.eval import eval
from src.train import train
from src.utils import utils
if cfg.get("print_config"):
utils.print_config(cfg, resolve=True)
if cfg.mode == "train":
return train(cfg)
elif cfg.mode == "eval":
return eval(cfg)
else:
raise ValueError(f"Unknown mode '{cfg.mode}'")
if __name__ == "__main__":
main()
``` |
{
"source": "jopetty/transd-dev",
"score": 2
} |
#### File: jopetty/transd-dev/run.py
```python
import dotenv
import hydra
from omegaconf import DictConfig
dotenv.load_dotenv(override=True)
@hydra.main(config_path="conf/", config_name="config.yaml")
def main(config: DictConfig):
from src.eval import eval
from src.train import train
from src.utils import utils
if config.get("print_config"):
utils.print_config(config, resolve=True)
if config.mode.name == "train":
return train(config)
elif config.mode.name == "eval":
return eval(config)
else:
raise ValueError(f"Unknown mode '{config.mode}'")
if __name__ == "__main__":
main()
```
#### File: models/modules/rnn_decoder.py
```python
import random
from typing import Dict
import torch
from torch import Tensor, nn
from torch.nn import functional as F
class RNNDecoder(nn.Module):
@property
def max_gen_length(self) -> int:
return self.hparams["dec_max_gen_length"]
@property
def EOS_idx(self) -> int:
return self.hparams["dec_EOS_idx"]
def __init__(self, hparams: dict) -> None:
super().__init__()
self.hparams = hparams
self.embedding = nn.Embedding(
hparams["dec_vocab_size"], hparams["dec_embedding_size"]
)
self.unit = nn.RNN(
hparams["dec_embedding_size"],
hparams["dec_hidden_size"],
num_layers=hparams["dec_num_layers"],
batch_first=True,
)
self.output = nn.Linear(hparams["dec_hidden_size"], hparams["dec_vocab_size"])
def forward_step(self, step_input: Dict[str, Tensor]) -> Dict[str, Tensor]:
# Unsqueeze if only one batch is present
no_squeeze = lambda a: a.unsqueeze(0) if a.shape == 2 else a
# print("Step Input")
# for key in step_input:
# print(f"{key}: {step_input[key].shape}")
h = no_squeeze(step_input["h"])
unit_input = no_squeeze(F.relu(self.embedding(step_input["x"])))
_, state = self.unit(unit_input, h)
y = self.output(no_squeeze(state[-1, :, :]))
# print(f"h: {h.shape}")
# print(f"unit_input: {unit_input.shape}")
# print(f"unk: {unk.shape}")
# print(f"state: {state.shape}")
# print(f"state[-1]: {state[-1].shape}")
# print(f"y: {y.shape}")
return {"y": y, "h": state}
def get_step_input(self, dec_input: Dict[str, Tensor]) -> Dict[str, Tensor]:
if "h" in dec_input:
h = dec_input["h"]
elif "encoder_last_state" in dec_input:
h = torch.transpose(dec_input["encoder_last_state"], 0, 1)
else:
raise ValueError(
f"You must provide a hidden input in dec_input '{dec_input}'"
)
if "x" in dec_input:
x = dec_input["x"]
elif "transform" in dec_input:
# print("No x found")
# print(dec_input["transform"][:, 1:-1].shape)
x = dec_input["transform"][:, 1:-1]
else:
raise ValueError(
f"You must provide a step input in dec_input '{dec_input}'"
)
step_input = {"x": x, "h": h}
if "encoder_output" in dec_input:
step_input["encoder_output"] = dec_input["encoder_output"]
return step_input
def forward(self, dec_input: Dict[str, Tensor], tf_ratio) -> Dict[str, Tensor]:
is_teacher_forcing = random.random() < tf_ratio
batch_size: int = dec_input["encoder_output"].shape[0]
hidden_size: int = self.output.in_features
vocab_size: int = self.output.out_features
gen_length = (
dec_input["target"][0].shape[0]
if is_teacher_forcing
else self.max_gen_length
)
dec_step_input = self.get_step_input(dec_input)
has_finished = torch.zeros(batch_size, dtype=torch.bool)
dec_output = torch.zeros(gen_length, batch_size, vocab_size)
dec_hidden = torch.zeros(gen_length, batch_size, hidden_size)
for i in range(gen_length):
# print(f"STEP {i} (tf={is_teacher_forcing})")
step_result = self.forward_step(dec_step_input)
step_prediction = step_result["y"].argmax(dim=-1)
# for key in step_result:
# print(f"step_result[{key}]: {step_result[key].shape}")
# print("dec_hidden: ", dec_hidden.shape)
dec_output[i] = step_result["y"]
dec_hidden[i] = step_result["h"]
has_finished[step_prediction == self.EOS_idx] = True
if all(has_finished):
break
else:
x = dec_input["target"][:, i] if is_teacher_forcing else step_prediction
step_result["x"] = x.unsqueeze(-1)
step_result["encoder_output"] = dec_input["encoder_output"]
dec_step_input = self.get_step_input(step_result)
output = {
"logits": torch.transpose(dec_output, 0, 1),
"predictions": torch.transpose(dec_output, 0, 1).argmax(dim=-1),
"decoder_hiddens": dec_hidden,
}
return output
```
#### File: models/modules/rnn_encoder.py
```python
from typing import Dict
import torch
from torch import Tensor, nn
class RNNEncoder(nn.Module):
def __init__(self, hparams: dict):
super().__init__()
self.model = nn.Sequential(
nn.Embedding(hparams["enc_vocab_size"], hparams["enc_embedding_size"]),
nn.RNN(
hparams["enc_embedding_size"],
hparams["enc_hidden_size"],
num_layers=hparams["enc_num_layers"],
batch_first=True,
),
)
def forward(self, enc_input: Dict[str, Tensor]):
outputs, last_state = self.model(enc_input["source"])
enc_output = {
"encoder_output": outputs,
"encoder_last_state": torch.transpose(last_state, 0, 1),
}
return enc_output
```
#### File: src/utils/utils.py
```python
import logging
from typing import List, Sequence
import pytorch_lightning as pl
import rich.syntax
import rich.tree
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import seed_everything
from pytorch_lightning.utilities import rank_zero_only
def set_all_seeds(seed: int, workers: bool = True):
seed_everything(seed=seed, workers=workers)
def get_logger(name=__name__) -> logging.Logger:
logger = logging.getLogger(name)
for level in (
"debug",
"info",
"warning",
"error",
"exception",
"fatal",
"critical",
):
setattr(logger, level, rank_zero_only(getattr(logger, level)))
return logger
@rank_zero_only
def print_config(
config: DictConfig,
fields: Sequence[str] = (
"trainer",
"model",
"datamodule",
"callbacks",
"logger",
"test_after_training",
"seed",
"name",
),
resolve: bool = True,
) -> None:
tree = rich.tree.Tree("CONFIG")
for field in fields:
branch = tree.add(field)
config_section = config.get(field)
branch_content = str(config_section)
if isinstance(config_section, DictConfig):
branch_content = OmegaConf.to_yaml(config_section, resolve=resolve)
branch.add(
rich.syntax.Syntax(
branch_content, "yaml", theme="default", background_color="default"
)
)
rich.print(tree)
with open("config_tree.log", "w") as fp:
rich.print(tree, file=fp)
@rank_zero_only
def log_hyperparameters(
config: DictConfig,
model: pl.LightningModule,
datamodule: pl.LightningDataModule,
trainer: pl.Trainer,
callbacks: List[pl.Callback],
logger: List[pl.loggers.LightningLoggerBase],
):
hparams = {}
hparams["trainer"] = config["trainer"]
hparams["model"] = config["model"]
hparams["datamodule"] = config["datamodule"]
if "seed" in config:
hparams["seed"] = config["seed"]
if "callbacks" in config:
hparams["callbacks"] = config["callbacks"]
hparams["model/params/total"] = sum(p.numel() for p in model.parameters())
hparams["model/params/trainable"] = sum(
p.numel() for p in model.parameters() if not p.requires_grad
)
trainer.logger.log_hyperparams(hparams)
def finish(
config: DictConfig,
model: pl.LightningModule,
datamodule: pl.LightningDataModule,
trainer: pl.Trainer,
callbacks: List[pl.Callback],
logger: List[pl.loggers.LightningLoggerBase],
):
for lg in logger:
if isinstance(lg, pl.loggers.wandb.WandbLogger):
import wandb
wandb.finish()
``` |
{
"source": "JoPfeiff/nlp-data-loading-framework-",
"score": 3
} |
#### File: nlp-data-loading-framework-/embeddings/polyglot_embeddings.py
```python
import os.path
import numpy as np
from embeddings import Embeddings
from data_loading.data_utils import pickle_call
POLYGLOT_NAMES = ['Polyglot']
"""
THIS IS A CHILD OF EMBEDDINGS!
THIS SCRIPT SHOULD BE USED TO INITIALIZE A NEW EMBEDDING DATASET
"""
class PolyglotEmbeddings(Embeddings):
def __init__(self):
"""
This class calls the FastText data
"""
# Load the super class
super(PolyglotEmbeddings, self).__init__()
# Name of the embedding dataset
self.name = 'Polyglot'
path = '../data/embeddings/polyglot-en.pkl'
if not os.path.isfile(path):
path = 'data/embeddings/polyglot-en.pkl'
if not os.path.isfile(path):
raise Exception(
"please load Polyglot Embeddings from http://bit.ly/19bSoAS and store in data/embeddings/")
self.path = path
self.poly_data = pickle_call(self.path)
def load_top_k(self, K, preload=False):
"""
Option for loading strategy: Only load top k of embeddings assuming that they are ordered in frequency
:param K: Number of top k embeddings to be retrieved
:return:embeddings matrix as numpy
"""
vocab_list = self.poly_data[0]
embeddings = self.poly_data[1]
K = min(K,len(embeddings))
for i in range(K):
self.add_term(vocab_list[i], preload=preload)
special_embeddings = self.add_special_embeddings(len(embeddings[0]), preload=preload)
if special_embeddings != []:
embeddings = embeddings + special_embeddings
return embeddings[:K]
def get_name(self):
return self.name
```
#### File: JoPfeiff/nlp-data-loading-framework-/testing.py
```python
import unittest
from data_loading.data_loader import DataLoader
from data_loading.data_utils import pickle_call
import numpy as np
class DataLoadingTest(unittest.TestCase):
def test_generator_data_length_fast_text_SNLI_in_dict(self):
dl = DataLoader(embeddings_initial='FastText-Crawl', embedding_loading='in_dict')
# dl = DataLoader(embeddings_initial='FastText', embedding_loading='load_dict',
# embedding_params={'first_time_emb_load': False})
dl.load('data/pickles/')
dl.get_all_and_dump('data/pickles/')
gen = dl.get_generator(drop_last=False, initialize=True)
tr = dl.get_train_data()
nr_data_points = 0
# short analysis if the amount of data yielded equals the total amount of data points in the training set.
# TLDC; yes it does
while True:
data, batch = gen.next()
if data is None:
break
nr_data_points += len(data)
self.assertEqual(nr_data_points, len(tr))
def test_generator_data_length_Polyglot_SNLI_in_dict(self):
dl = DataLoader(embeddings_initial='Polyglot', embedding_loading='in_dict')
# dl = DataLoader(embeddings_initial='FastText', embedding_loading='load_dict',
# embedding_params={'first_time_emb_load': False})
dl.load('data/pickles/')
dl.get_all_and_dump('data/pickles/')
gen = dl.get_generator(drop_last=False, initialize=True)
tr = dl.get_train_data()
nr_data_points = 0
# short analysis if the amount of data yielded equals the total amount of data points in the training set.
# TLDC; yes it does
while True:
data, batch = gen.next()
if data is None:
break
nr_data_points += len(data)
self.assertEqual(nr_data_points, len(tr))
def test_loaded_polyglot_embeddings(self):
data = pickle_call('data/embeddings/polyglot-en.pkl')
dl = DataLoader(embeddings_initial='Polyglot', embedding_loading='in_dict')
dl.load('data/pickles/')
dl.get_all_and_dump('data/pickles/')
all_true = None
for i in range(len(data[0])):
term = data[0][i]
embedding = data[1][i]
if term in dl.embedding.vocab_dict:
position = dl.embedding.vocab_dict[term]
stored_embedding = dl.embedding.embeddings.weight[position].data.numpy()
if all_true is None:
all_true = np.array_equal(embedding, stored_embedding)
else:
all_true = all_true and np.array_equal(embedding, stored_embedding)
self.assertTrue(all_true)
unittest.main()
``` |
{
"source": "jophex/SHOPPY-ADMIN",
"score": 2
} |
#### File: jophex/SHOPPY-ADMIN/main.py
```python
from kivy import utils
import datetime
from kivy.core.window import Window
from kivy.properties import StringProperty, NumericProperty
from kivy.uix.image import AsyncImage
from kivymd.app import MDApp
from kivymd.uix.card import MDCard
from kivymd.uix.label import MDLabel
from kivy import Config
import os
Config.set('graphics', 'multisamples', '0')
os.environ['KIVY_GL_BACKEND'] = 'angle_sdl2'
if utils.platform != 'android':
Window.size = (360, 640)
class Foods(MDCard):
pass
class Products(MDCard):
pass
class Labels(MDLabel):
pass
class MainApp(MDApp):
# ------------------- SIZE----------------------- #
size_x = NumericProperty(0)
size_y = NumericProperty(0)
# ------------- MAIN VARIABLES ---------------------#
orders = StringProperty('')
oders2 = NumericProperty(1)
times = str(datetime.datetime.now())
timess = times.split('.')
products_screen = StringProperty(' ')
def test(self):
for i in range(self.oders2):
card = Foods()
scroll = self.root.ids.Orders
self.orders = f'{i}'
self.product_name = f'product {i}'
self.product_price = f'price {i}/tsh'
self.time = f'{self.timess[0]}'
self.company_name = i * 'aa'
card.md_bg_color = 8 / 225, 18 / 225, 115 / 225, 1
card.add_widget(AsyncImage(source='images/test.png'))
card.add_widget(Labels(text=self.product_name, halign='center'))
card.add_widget(Labels(text=self.product_price, halign='center'))
card.add_widget(Labels(text=self.time, halign='center'))
card.add_widget(Labels(text=self.company_name, halign='center'))
card.add_widget(Labels(text=self.orders, halign='center'))
card.id = f'product{i}'
scroll.add_widget(card)
def product(self):
for i in range(1):
products = Products(on_release=self.details)
scroll = self.root.ids.products
self.product_names = f'name {i}'
self.price_products = f'price {i}'
products.add_widget(AsyncImage(source='images/test.png'))
products.add_widget(Labels(text=self.product_names, halign='center'))
products.add_widget(Labels(text=self.price_products, halign='center'))
products.id = f'product{i}'
scroll.add_widget(products)
def details(self, instance):
sm = self.root
sm.current = "products_details"
self.products_screen = instance.id
def build(self):
self.theme_cls.theme_style = "Light"
self.theme_cls.primary_palette = "LightGreen"
self.theme_cls.accent = "Brown"
self.title = 'Shoppy Admin'
self.size_x, self.size_y = Window.size
MainApp().run()
``` |
{
"source": "jophinep/python-design-patterns",
"score": 4
} |
#### File: jophinep/python-design-patterns/flyweight.py
```python
from functools import wraps
def flyweight(cls):
"""
flyweight class decorator method
This is an python implementation of flyweight design pattern
Ref: https://www.geeksforgeeks.org/flyweight-design-pattern/
"""
instances = {}
kwd_mark = object()
@wraps(cls)
def getinstance(*args, **kwargs):
"""
Wapper method
"""
key = (cls.__name__,) + args + (kwd_mark,) + tuple(sorted(kwargs.items()))
hash_key = hash(key)
if hash_key not in instances:
instances[hash_key] = cls(*args, **kwargs)
return instances[hash_key]
return getinstance
``` |
{
"source": "Jophish/aiohttp-client-cache",
"score": 3
} |
#### File: aiohttp_client_cache/backends/gridfs.py
```python
import pickle
from typing import Iterable, Optional
from gridfs import GridFS
from pymongo import MongoClient
from aiohttp_client_cache.backends import BaseCache, CacheBackend, ResponseOrKey
from aiohttp_client_cache.backends.mongo import MongoDBCache
from aiohttp_client_cache.forge_utils import extend_signature
class GridFSBackend(CacheBackend):
"""An async-compatible interface for caching objects in MongoDB GridFS.
Use this if you need to support documents greater than 16MB.
Args:
connection: Optional client object to use instead of creating a new one
See :py:class:`.CacheBackend` for additional args.
"""
@extend_signature(CacheBackend.__init__)
def __init__(self, cache_name: str = 'http-cache', connection: MongoClient = None, **kwargs):
super().__init__(cache_name=cache_name, **kwargs)
self.responses = GridFSCache(cache_name, connection)
self.keys_map = MongoDBCache(cache_name, 'http_redirects', self.responses.connection)
# TODO: Incomplete/untested
# TODO: Fully async implementation. Current implementation uses blocking operations.
# Methods are currently defined as async only for compatibility with BaseCache API.
class GridFSCache(BaseCache):
"""A dictionary-like interface for MongoDB GridFS
Args:
db_name: database name (be careful with production databases)
connection: MongoDB connection instance to use instead of creating a new one
"""
def __init__(self, db_name, connection: MongoClient = None):
self.connection = connection or MongoClient()
self.db = self.connection[db_name]
self.fs = GridFS(self.db)
# TODO
async def contains(self, key: str) -> bool:
raise NotImplementedError
async def clear(self):
self.db['fs.files'].drop()
self.db['fs.chunks'].drop()
async def delete(self, key: str):
res = self.fs.find_one({'_id': key})
if res is not None:
self.fs.delete(res._id)
async def keys(self) -> Iterable[str]:
return [d._id for d in self.fs.find()]
async def read(self, key: str) -> Optional[ResponseOrKey]:
result = self.fs.find_one({'_id': key})
if result is None:
raise KeyError
return pickle.loads(bytes(result.read()))
async def size(self) -> int:
return self.db['fs.files'].count()
# TODO
async def values(self) -> Iterable[ResponseOrKey]:
raise NotImplementedError
async def write(self, key: str, item: ResponseOrKey):
await self.delete(key)
self.fs.put(pickle.dumps(item, protocol=-1), **{'_id': key})
```
#### File: aiohttp-client-cache/aiohttp_client_cache/forge_utils.py
```python
from typing import Callable, Iterable
import forge
def extend_signature(template_func: Callable) -> Callable:
"""Copy another function's signature, and extend it with the wrapped function's signature"""
def wrapper(target_func: Callable):
revision = get_combined_revision([template_func, target_func])
return revision(target_func)
return wrapper
def get_combined_revision(functions: Iterable[Callable]) -> forge.Revision:
"""Combine the parameters of all revisions into a single revision"""
params = {}
for func in functions:
params.update(forge.copy(func).signature.parameters)
return forge.sign(*params.values())
```
#### File: aiohttp-client-cache/aiohttp_client_cache/response.py
```python
import json
from datetime import datetime
from http.cookies import SimpleCookie
from typing import Any, Dict, Iterable, Mapping, Optional, Union
import attr
from aiohttp import ClientResponse, ClientResponseError
from aiohttp.client_reqrep import ContentDisposition
from aiohttp.typedefs import StrOrURL
# CachedResponse attributes to not copy directly from ClientResponse
EXCLUDE_ATTRS = {
'_body',
'created_at',
'encoding',
'history',
'is_expired',
'request_info',
}
JsonResponse = Optional[Dict[str, Any]]
@attr.s(auto_attribs=True, slots=True)
class RequestInfo:
"""A picklable version of aiohttp.client_reqrep.RequestInfo"""
url: str
method: str
headers: dict
real_url: str
@classmethod
def from_object(cls, request_info):
return cls(
url=str(request_info.url),
method=request_info.method,
headers=dict(request_info.headers),
real_url=str(request_info.real_url),
)
@attr.s(slots=True)
class CachedResponse:
"""A dataclass containing cached response information. Will mostly behave the same as a
:py:class:`aiohttp.ClientResponse` that has been read.
"""
method: str = attr.ib()
reason: str = attr.ib()
status: int = attr.ib()
url: StrOrURL = attr.ib()
version: str = attr.ib()
_body: Any = attr.ib(default=None)
content_disposition: ContentDisposition = attr.ib(default=None)
cookies: SimpleCookie = attr.ib(default=None)
created_at: datetime = attr.ib(factory=datetime.utcnow)
encoding: str = attr.ib(default=None)
headers: Mapping = attr.ib(factory=dict)
history: Iterable = attr.ib(factory=tuple)
is_expired: bool = attr.ib(default=False)
request_info: RequestInfo = attr.ib(default=None)
@classmethod
async def from_client_response(cls, client_response: ClientResponse):
# Response may not have been read yet, if fetched by something other than CachedSession
if not client_response._released:
await client_response.read()
# Copy most attributes over as is
copy_attrs = set(attr.fields_dict(cls).keys()) - EXCLUDE_ATTRS
response = cls(**{k: getattr(client_response, k) for k in copy_attrs})
# Set some remaining attributes individually
response._body = client_response._body
response.headers = dict(client_response.headers)
# The encoding may be unset even if the response has been read
try:
response.encoding = client_response.get_encoding()
except RuntimeError:
pass
response.request_info = RequestInfo.from_object(client_response.request_info)
response.url = str(client_response.url)
if client_response.history:
response.history = (
*[await cls.from_client_response(r) for r in client_response.history],
)
return response
@property
def ok(self) -> bool:
"""Returns ``True`` if ``status`` is less than ``400``, ``False`` if not"""
try:
self.raise_for_status()
return True
except ClientResponseError:
return False
def get_encoding(self):
return self.encoding
async def json(self, encoding: Optional[str] = None, **kwargs) -> Optional[Dict[str, Any]]:
"""Read and decode JSON response"""
stripped = self._body.strip()
if not stripped:
return None
return json.loads(stripped.decode(encoding or self.encoding))
def raise_for_status(self) -> None:
if self.status >= 400:
raise ClientResponseError(
self.request_info, # type: ignore # These types are interchangeable
tuple(),
status=self.status,
message=self.reason,
headers=self.headers,
)
def read(self):
"""No-op function for compatibility with ClientResponse"""
def release(self):
"""No-op function for compatibility with ClientResponse"""
async def text(self, encoding: Optional[str] = None, errors: str = "strict") -> str:
"""Read response payload and decode"""
return self._body.decode(encoding or self.encoding, errors=errors)
AnyResponse = Union[ClientResponse, CachedResponse]
``` |
{
"source": "Jophish/sqlfluff",
"score": 2
} |
#### File: sqlfluff/core/linter.py
```python
import os
import time
import logging
import traceback
from typing import (
Any,
Dict,
Generator,
Iterable,
Iterator,
List,
NamedTuple,
Optional,
Tuple,
Union,
cast,
overload,
)
from typing_extensions import Literal
from benchit import BenchIt
import pathspec
from sqlfluff.core.errors import (
SQLBaseError,
SQLLexError,
SQLLintError,
SQLParseError,
CheckTuple,
)
from sqlfluff.core.parser import Lexer, Parser
from sqlfluff.core.string_helpers import findall
from sqlfluff.core.templaters import TemplatedFile
from sqlfluff.core.rules import get_ruleset
from sqlfluff.core.config import FluffConfig, ConfigLoader
# Classes needed only for type checking
from sqlfluff.core.parser.segments.base import BaseSegment, FixPatch
from sqlfluff.core.parser.segments.meta import MetaSegment
from sqlfluff.core.parser.segments.raw import RawSegment
from sqlfluff.core.rules.base import BaseCrawler
# Instantiate the linter logger
linter_logger: logging.Logger = logging.getLogger("sqlfluff.linter")
class RuleTuple(NamedTuple):
"""Rule Tuple object for describing rules."""
code: str
description: str
class ProtoFile(NamedTuple):
"""Proto object to be inherited by LintedFile."""
path: str
violations: list
time_dict: dict
tree: Any
ignore_mask: list
class ParsedString(NamedTuple):
"""An object to store the result of parsing a string."""
tree: Optional[BaseSegment]
violations: List[SQLBaseError]
time_dict: dict
templated_file: TemplatedFile
config: FluffConfig
class EnrichedFixPatch(NamedTuple):
"""An edit patch for a source file."""
source_slice: slice
templated_slice: slice
fixed_raw: str
# The patch type, functions mostly for debugging and explanation
# than for function. It allows traceability of *why* this patch was
# generated.
patch_type: str
templated_str: str
source_str: str
def dedupe_tuple(self):
"""Generate a tuple of this fix for deduping."""
return (self.source_slice, self.fixed_raw)
class LintedFile(NamedTuple):
"""A class to store the idea of a linted file."""
path: str
violations: list
time_dict: dict
tree: Optional[BaseSegment]
ignore_mask: list
templated_file: TemplatedFile
def check_tuples(self) -> List[CheckTuple]:
"""Make a list of check_tuples.
This assumes that all the violations found are
linting violations (and therefore implement `check_tuple()`).
If they don't then this function raises that error.
"""
vs: List[CheckTuple] = []
v: SQLLintError
for v in self.get_violations():
if hasattr(v, "check_tuple"):
vs.append(v.check_tuple())
else:
raise v
return vs
def get_violations(
self,
rules: Optional[Union[str, Tuple[str, ...]]] = None,
types: Optional[Union[Any, Iterable[Any]]] = None,
filter_ignore: bool = True,
fixable: bool = None,
) -> list:
"""Get a list of violations, respecting filters and ignore options.
Optionally now with filters.
"""
violations = self.violations
# Filter types
if types:
try:
types = tuple(types)
except TypeError:
types = (types,)
violations = [v for v in violations if isinstance(v, types)]
# Filter rules
if rules:
if isinstance(rules, str):
rules = (rules,)
else:
rules = tuple(rules)
violations = [v for v in violations if v.rule_code() in rules]
# Filter fixable
if fixable is not None:
# Assume that fixable is true or false if not None
violations = [v for v in violations if v.fixable is fixable]
# Filter ignorable violations
if filter_ignore:
violations = [v for v in violations if not v.ignore]
# Ignore any rules in the ignore mask
if self.ignore_mask:
for line_no, rules in self.ignore_mask:
violations = [
v
for v in violations
if not (
v.line_no() == line_no
and (rules is None or v.rule_code() in rules)
)
]
return violations
def num_violations(self, **kwargs) -> int:
"""Count the number of violations.
Optionally now with filters.
"""
violations = self.get_violations(**kwargs)
return len(violations)
def is_clean(self) -> bool:
"""Return True if there are no ignorable violations."""
return not any(self.get_violations(filter_ignore=True))
def fix_string(self) -> Tuple[Any, bool]:
"""Obtain the changes to a path as a string.
We use the source mapping features of TemplatedFile
to generate a list of "patches" which cover the non
templated parts of the file and refer back to the locations
in the original file.
NB: This is MUCH FASTER than the original approach
using difflib in pre 0.4.0.
There is an important distinction here between Slices and
Segments. A Slice is a portion of a file which is determined
by the templater based on which portions of the source file
are templated or not, and therefore before Lexing and so is
completely dialect agnostic. A Segment is determined by the
Lexer from portions of strings after templating.
"""
bencher = BenchIt()
bencher("fix_string: start")
linter_logger.debug("Original Tree: %r", self.templated_file.templated_str)
linter_logger.debug("Fixed Tree: %r", self.tree.raw) # type: ignore
# The sliced file is contiguous in the TEMPLATED space.
# NB: It has gaps and repeats in the source space.
# It's also not the FIXED file either.
linter_logger.debug("### Templated File.")
for idx, file_slice in enumerate(self.templated_file.sliced_file):
t_str = self.templated_file.templated_str[file_slice.templated_slice]
s_str = self.templated_file.source_str[file_slice.source_slice]
if t_str == s_str:
linter_logger.debug(
" File slice: %s %r [invariant]", idx, file_slice
)
else:
linter_logger.debug(" File slice: %s %r", idx, file_slice)
linter_logger.debug(" \t\t\ttemplated: %r\tsource: %r", t_str, s_str)
original_source = self.templated_file.source_str
# Make sure no patches overlap and divide up the source file into slices.
# Any Template tags in the source file are off limits.
source_only_slices = self.templated_file.source_only_slices()
linter_logger.debug("Source-only slices: %s", source_only_slices)
# Iterate patches, filtering and translating as we go:
linter_logger.debug("### Beginning Patch Iteration.")
filtered_source_patches = []
dedupe_buffer = []
# We use enumerate so that we get an index for each patch. This is entirely
# so when debugging logs we can find a given patch again!
patch: Union[EnrichedFixPatch, FixPatch]
for idx, patch in enumerate(
self.tree.iter_patches(templated_str=self.templated_file.templated_str) # type: ignore
):
linter_logger.debug(" %s Yielded patch: %s", idx, patch)
# This next bit is ALL FOR LOGGING AND DEBUGGING
if patch.templated_slice.start >= 10:
pre_hint = self.templated_file.templated_str[
patch.templated_slice.start - 10 : patch.templated_slice.start
]
else:
pre_hint = self.templated_file.templated_str[
: patch.templated_slice.start
]
if patch.templated_slice.stop + 10 < len(self.templated_file.templated_str):
post_hint = self.templated_file.templated_str[
patch.templated_slice.stop : patch.templated_slice.stop + 10
]
else:
post_hint = self.templated_file.templated_str[
patch.templated_slice.stop :
]
linter_logger.debug(
" Templated Hint: ...%r <> %r...", pre_hint, post_hint
)
# Attempt to convert to source space.
try:
source_slice = self.templated_file.templated_slice_to_source_slice(
patch.templated_slice,
)
except ValueError:
linter_logger.info(
" - Skipping. Source space Value Error. i.e. attempted insertion within templated section."
)
# If we try and slice within a templated section, then we may fail
# in which case, we should skip this patch.
continue
# Check for duplicates
dedupe_tuple = (source_slice, patch.fixed_raw)
if dedupe_tuple in dedupe_buffer:
linter_logger.info(
" - Skipping. Source space Duplicate: %s", dedupe_tuple
)
continue
# We now evaluate patches in the source-space for whether they overlap
# or disrupt any templated sections.
# The intent here is that unless explicitly stated, a fix should never
# disrupt a templated section.
# NOTE: We rely here on the patches being sorted.
# TODO: Implement a mechanism for doing templated section fixes. For
# now it's just not allowed.
# Get the affected raw slices.
local_raw_slices = self.templated_file.raw_slices_spanning_source_slice(
source_slice
)
local_type_list = [slc.slice_type for slc in local_raw_slices]
enriched_patch = EnrichedFixPatch(
source_slice=source_slice,
templated_slice=patch.templated_slice,
patch_type=patch.patch_type,
fixed_raw=patch.fixed_raw,
templated_str=self.templated_file.templated_str[patch.templated_slice],
source_str=self.templated_file.source_str[source_slice],
)
# Deal with the easy case of only literals
if set(local_type_list) == {"literal"}:
linter_logger.info(
" * Keeping patch on literal-only section: %s", enriched_patch
)
filtered_source_patches.append(enriched_patch)
dedupe_buffer.append(enriched_patch.dedupe_tuple())
# Is it a zero length patch.
elif (
enriched_patch.source_slice.start == enriched_patch.source_slice.stop
and enriched_patch.source_slice.start == local_raw_slices[0].source_idx
):
linter_logger.info(
" * Keeping insertion patch on slice boundary: %s",
enriched_patch,
)
filtered_source_patches.append(enriched_patch)
dedupe_buffer.append(enriched_patch.dedupe_tuple())
# If it's ONLY templated then we should skip it.
elif "literal" not in local_type_list:
linter_logger.info(
" - Skipping patch over templated section: %s", enriched_patch
)
# If we span more than two slices then we should just skip it. Too Hard.
elif len(local_raw_slices) > 2:
linter_logger.info(
" - Skipping patch over more than two raw slices: %s",
enriched_patch,
)
# If it's an insertion (i.e. the string in the pre-fix template is '') then we
# won't be able to place it, so skip.
elif not enriched_patch.templated_str:
linter_logger.info(
" - Skipping insertion patch in templated section: %s",
enriched_patch,
)
# If the string from the templated version isn't in the source, then we can't fix it.
elif enriched_patch.templated_str not in enriched_patch.source_str:
linter_logger.info(
" - Skipping edit patch on templated content: %s",
enriched_patch,
)
else:
# Identify all the places the string appears in the source content.
positions = list(
findall(enriched_patch.templated_str, enriched_patch.source_str)
)
if len(positions) != 1:
linter_logger.debug(
" - Skipping edit patch on non-unique templated content: %s",
enriched_patch,
)
continue
# We have a single occurrences of the thing we want to patch. This
# means we can use its position to place our patch.
new_source_slice = slice(
enriched_patch.source_slice.start + positions[0],
enriched_patch.source_slice.start
+ positions[0]
+ len(enriched_patch.templated_str),
)
enriched_patch = EnrichedFixPatch(
source_slice=new_source_slice,
templated_slice=enriched_patch.templated_slice,
patch_type=enriched_patch.patch_type,
fixed_raw=enriched_patch.fixed_raw,
templated_str=enriched_patch.templated_str,
source_str=enriched_patch.source_str,
)
linter_logger.debug(
" * Keeping Tricky Case. Positions: %s, New Slice: %s, Patch: %s",
positions,
new_source_slice,
enriched_patch,
)
filtered_source_patches.append(enriched_patch)
dedupe_buffer.append(enriched_patch.dedupe_tuple())
continue
# Sort the patches before building up the file.
filtered_source_patches = sorted(
filtered_source_patches, key=lambda x: x.source_slice.start
)
# We now slice up the file using the patches and any source only slices.
# This gives us regions to apply changes to.
slice_buff = []
source_idx = 0
for patch in filtered_source_patches:
# Are there templated slices at or before the start of this patch?
while (
source_only_slices
and source_only_slices[0].source_idx < patch.source_slice.start
):
next_so_slice = source_only_slices.pop(0).source_slice()
# Add a pre-slice before the next templated slices if needed.
if next_so_slice.start > source_idx:
slice_buff.append(slice(source_idx, next_so_slice.start))
# Add the templated slice.
slice_buff.append(next_so_slice)
source_idx = next_so_slice.stop
# Is there a gap between current position and this patch?
if patch.source_slice.start > source_idx:
# Add a slice up to this patch.
slice_buff.append(slice(source_idx, patch.source_slice.start))
# Is this patch covering an area we've already covered?
if patch.source_slice.start < source_idx:
linter_logger.info(
"Skipping overlapping patch at Index %s, Patch: %s",
source_idx,
patch,
)
# Ignore the patch for now...
continue
# Add this patch.
slice_buff.append(patch.source_slice)
source_idx = patch.source_slice.stop
# Add a tail slice.
if source_idx < len(self.templated_file.source_str):
slice_buff.append(slice(source_idx, len(self.templated_file.source_str)))
linter_logger.debug("Final slice buffer: %s", slice_buff)
# Iterate through the patches, building up the new string.
str_buff = ""
for source_slice in slice_buff:
# Is it one in the patch buffer:
for patch in filtered_source_patches:
if patch.source_slice == source_slice:
# Use the patched version
linter_logger.debug(
"%-30s %s %r > %r",
"Appending {} Patch:".format(patch.patch_type),
patch.source_slice,
patch.source_str,
patch.fixed_raw,
)
str_buff += patch.fixed_raw
break
else:
# Use the raw string
linter_logger.debug(
"Appending Raw: %s %r",
source_slice,
self.templated_file.source_str[source_slice],
)
str_buff += self.templated_file.source_str[source_slice]
bencher("fix_string: Fixing loop done")
# The success metric here is whether anything ACTUALLY changed.
return str_buff, str_buff != original_source
def persist_tree(self, suffix: str = "") -> bool:
"""Persist changes to the given path."""
write_buff, success = self.fix_string()
if success:
fname = self.path
# If there is a suffix specified, then use it.s
if suffix:
root, ext = os.path.splitext(fname)
fname = root + suffix + ext
# Actually write the file.
with open(fname, "w") as f:
f.write(write_buff)
return success
class LintedPath:
"""A class to store the idea of a collection of linted files at a single start path."""
def __init__(self, path: str) -> None:
self.files: List[LintedFile] = []
self.path: str = path
def add(self, file: LintedFile) -> None:
"""Add a file to this path."""
self.files.append(file)
@overload
def check_tuples(self, by_path: Literal[False]) -> List[CheckTuple]:
"""Return a List of CheckTuples when by_path is False."""
...
@overload
def check_tuples(self, by_path: Literal[True]) -> Dict[str, List[CheckTuple]]:
"""Return a Dict of paths and CheckTuples when by_path is True."""
...
@overload
def check_tuples(self, by_path: bool = False):
"""Default overload method."""
...
def check_tuples(self, by_path=False):
"""Compress all the tuples into one list.
NB: This is a little crude, as you can't tell which
file the violations are from. Good for testing though.
For more control set the `by_path` argument to true.
"""
if by_path:
return {file.path: file.check_tuples() for file in self.files}
else:
tuple_buffer: List[CheckTuple] = []
for file in self.files:
tuple_buffer += file.check_tuples()
return tuple_buffer
def num_violations(self, **kwargs) -> int:
"""Count the number of violations in the path."""
return sum(file.num_violations(**kwargs) for file in self.files)
def get_violations(self, **kwargs) -> list:
"""Return a list of violations in the path."""
buff: list = []
for file in self.files:
buff += file.get_violations(**kwargs)
return buff
def violation_dict(self, **kwargs) -> Dict[str, list]:
"""Return a dict of violations by file path."""
return {file.path: file.get_violations(**kwargs) for file in self.files}
def stats(self) -> Dict[str, int]:
"""Return a dict containing linting stats about this path."""
return dict(
files=len(self.files),
clean=sum(file.is_clean() for file in self.files),
unclean=sum(not file.is_clean() for file in self.files),
violations=sum(file.num_violations() for file in self.files),
)
def persist_changes(
self, formatter: Any = None, fixed_file_suffix: str = "", **kwargs
) -> Dict[str, Union[bool, str]]:
"""Persist changes to files in the given path.
This also logs the output as we go using the formatter if present.
"""
# Run all the fixes for all the files and return a dict
buffer: Dict[str, Union[bool, str]] = {}
for file in self.files:
if file.num_violations(fixable=True, **kwargs) > 0:
buffer[file.path] = file.persist_tree(suffix=fixed_file_suffix)
result = buffer[file.path]
else:
buffer[file.path] = True
result = "SKIP"
if formatter:
formatter.dispatch_persist_filename(filename=file.path, result=result)
return buffer
@property
def tree(self) -> Optional[BaseSegment]:
"""A convenience method for when there is only one file and we want the tree."""
if len(self.files) > 1:
raise ValueError(
".tree() cannot be called when a LintedPath contains more than one file."
)
return self.files[0].tree
class LintingResult:
"""A class to represent the result of a linting operation.
Notably this might be a collection of paths, all with multiple
potential files within them.
"""
def __init__(self) -> None:
self.paths: List[LintedPath] = []
@staticmethod
def sum_dicts(d1: Dict[str, Any], d2: Dict[str, Any]) -> Dict[str, Any]:
"""Take the keys of two dictionaries and add them."""
keys = set(d1.keys()) | set(d2.keys())
return {key: d1.get(key, 0) + d2.get(key, 0) for key in keys}
@staticmethod
def combine_dicts(*d: dict) -> dict:
"""Take any set of dictionaries and combine them."""
dict_buffer: dict = {}
for dct in d:
dict_buffer.update(dct)
return dict_buffer
def add(self, path: LintedPath) -> None:
"""Add a new `LintedPath` to this result."""
self.paths.append(path)
@overload
def check_tuples(self, by_path: Literal[False]) -> List[CheckTuple]:
"""Return a List of CheckTuples when by_path is False."""
...
@overload
def check_tuples(
self, by_path: Literal[True]
) -> Dict[LintedPath, List[CheckTuple]]:
"""Return a Dict of LintedPath and CheckTuples when by_path is True."""
...
@overload
def check_tuples(self, by_path: bool = False):
"""Default overload method."""
...
def check_tuples(self, by_path=False):
"""Fetch all check_tuples from all contained `LintedPath` objects.
Args:
by_path (:obj:`bool`, optional): When False, all the check_tuples
are aggregated into one flat list. When True, we return a `dict`
of paths, each with its own list of check_tuples. Defaults to False.
"""
if by_path:
buff: Dict[LintedPath, List[CheckTuple]] = {}
for path in self.paths:
buff.update(path.check_tuples(by_path=by_path))
return buff
else:
tuple_buffer: List[CheckTuple] = []
for path in self.paths:
tuple_buffer += path.check_tuples()
return tuple_buffer
def num_violations(self, **kwargs) -> int:
"""Count the number of violations in the result."""
return sum(path.num_violations(**kwargs) for path in self.paths)
def get_violations(self, **kwargs):
"""Return a list of violations in the result."""
buff = []
for path in self.paths:
buff += path.get_violations(**kwargs)
return buff
def violation_dict(self, **kwargs):
"""Return a dict of paths and violations."""
return self.combine_dicts(path.violation_dict(**kwargs) for path in self.paths)
def stats(self) -> Dict[str, Any]:
"""Return a stats dictionary of this result."""
all_stats: Dict[str, Any] = dict(files=0, clean=0, unclean=0, violations=0)
for path in self.paths:
all_stats = self.sum_dicts(path.stats(), all_stats)
if all_stats["files"] > 0:
all_stats["avg per file"] = (
all_stats["violations"] * 1.0 / all_stats["files"]
)
all_stats["unclean rate"] = all_stats["unclean"] * 1.0 / all_stats["files"]
else:
all_stats["avg per file"] = 0
all_stats["unclean rate"] = 0
all_stats["clean files"] = all_stats["clean"]
all_stats["unclean files"] = all_stats["unclean"]
all_stats["exit code"] = 65 if all_stats["violations"] > 0 else 0
all_stats["status"] = "FAIL" if all_stats["violations"] > 0 else "PASS"
return all_stats
def as_records(self) -> List[dict]:
"""Return the result as a list of dictionaries.
Each record contains a key specifying the filepath, and a list of violations. This
method is useful for serialization as all objects will be builtin python types
(ints, strs).
"""
return [
{
"filepath": path,
"violations": sorted(
# Sort violations by line and then position
[v.get_info_dict() for v in violations],
# The tuple allows sorting by line number, then position, then code
key=lambda v: (v["line_no"], v["line_pos"], v["code"]),
),
}
for lintedpath in self.paths
for path, violations in lintedpath.violation_dict().items()
if violations
]
def persist_changes(self, formatter, **kwargs) -> dict:
"""Run all the fixes for all the files and return a dict."""
return self.combine_dicts(
*[
path.persist_changes(formatter=formatter, **kwargs)
for path in self.paths
]
)
@property
def tree(self) -> Optional[BaseSegment]:
"""A convenience method for when there is only one file and we want the tree."""
if len(self.paths) > 1:
raise ValueError(
".tree() cannot be called when a LintingResult contains more than one path."
)
return self.paths[0].tree
class Linter:
"""The interface class to interact with the linter."""
def __init__(
self,
config: Optional[FluffConfig] = None,
formatter: Any = None,
dialect: Optional[str] = None,
rules: Optional[Union[str, List[str]]] = None,
user_rules: Optional[Union[str, List[str]]] = None,
) -> None:
# Store the config object
self.config = FluffConfig.from_kwargs(
config=config, dialect=dialect, rules=rules
)
# Get the dialect and templater
self.dialect = self.config.get("dialect_obj")
self.templater = self.config.get("templater_obj")
# Store the formatter for output
self.formatter = formatter
# Store references to user rule classes
self.user_rules = user_rules or []
def get_ruleset(self, config: Optional[FluffConfig] = None) -> List[BaseCrawler]:
"""Get hold of a set of rules."""
rs = get_ruleset()
# Register any user rules
for rule in self.user_rules:
rs.register(rule)
cfg = config or self.config
return rs.get_rulelist(config=cfg)
def rule_tuples(self) -> List[RuleTuple]:
"""A simple pass through to access the rule tuples of the rule set."""
rs = self.get_ruleset()
return [RuleTuple(rule.code, rule.description) for rule in rs]
def parse_string(
self,
in_str: str,
fname: Optional[str] = None,
recurse: bool = True,
config: Optional[FluffConfig] = None,
) -> ParsedString:
"""Parse a string.
Returns:
`ParsedString` of (`parsed`, `violations`, `time_dict`, `templated_file`).
`parsed` is a segment structure representing the parsed file. If
parsing fails due to an unrecoverable violation then we will
return None.
`violations` is a :obj:`list` of violations so far, which will either be
templating, lexing or parsing violations at this stage.
`time_dict` is a :obj:`dict` containing timings for how long each step
took in the process.
`templated_file` is a :obj:`TemplatedFile` containing the details
of the templated file.
"""
violations = []
t0 = time.monotonic()
bencher = BenchIt() # starts the timer
if fname:
short_fname: Optional[str] = fname.replace("\\", "/").split("/")[-1]
else:
# this handles the potential case of a null fname
short_fname = fname
bencher("Staring parse_string for {0!r}".format(short_fname))
# Dispatch the output for the parse header (including the config diff)
if self.formatter:
self.formatter.dispatch_parse_header(fname, self.config, config)
# Just use the local config from here:
config = config or self.config
# Scan the raw file for config commands.
for raw_line in in_str.splitlines():
if raw_line.startswith("-- sqlfluff"):
# Found a in-file config command
config.process_inline_config(raw_line)
linter_logger.info("TEMPLATING RAW [%s] (%s)", self.templater.name, fname)
templated_file, templater_violations = self.templater.process(
in_str=in_str, fname=fname, config=config
)
violations += templater_violations
# Detect the case of a catastrophic templater fail. In this case
# we don't continue. We'll just bow out now.
if not templated_file:
linter_logger.info("TEMPLATING FAILED: %s", templater_violations)
tokens = None
t1 = time.monotonic()
bencher("Templating {0!r}".format(short_fname))
if templated_file:
linter_logger.info("LEXING RAW (%s)", fname)
# Get the lexer
lexer = Lexer(config=config)
# Lex the file and log any problems
try:
tokens, lex_vs = lexer.lex(templated_file)
# We might just get the violations as a list
violations += lex_vs
except SQLLexError as err:
linter_logger.info("LEXING FAILED! (%s): %s", fname, err)
violations.append(err)
tokens = None
else:
tokens = None
if tokens:
linter_logger.info("Lexed tokens: %s", [seg.raw for seg in tokens])
else:
linter_logger.info("NO LEXED TOKENS!")
if tokens:
# Check that we've got sensible indentation from the lexer.
# We might need to suppress if it's a complicated file.
templating_blocks_indent = config.get(
"template_blocks_indent", "indentation"
)
if isinstance(templating_blocks_indent, str):
force_block_indent = templating_blocks_indent.lower().strip() == "force"
else:
force_block_indent = False
templating_blocks_indent = bool(templating_blocks_indent)
# If we're forcing it through we don't check.
if templating_blocks_indent and not force_block_indent:
indent_balance = sum(
getattr(elem, "indent_val", 0)
for elem in cast(Tuple[BaseSegment, ...], tokens)
)
if indent_balance != 0:
linter_logger.warning(
"Indent balance test failed for %r. Template indents will not be linted for this file.",
fname,
)
# Don't enable the templating blocks.
templating_blocks_indent = False
# Disable the linting of L003 on templated tokens.
config.set_value(["rules", "L003", "lint_templated_tokens"], False)
# The file will have been lexed without config, so check all indents
# are enabled.
new_tokens = []
for token in cast(Tuple[BaseSegment, ...], tokens):
if token.is_meta:
token = cast(MetaSegment, token)
if token.indent_val != 0:
# Don't allow it if we're not linting templating block indents.
if not templating_blocks_indent:
continue
# Don't allow if it's not configure to function.
elif not token.is_enabled(
indent_config=config.get_section("indentation")
):
continue
new_tokens.append(token)
# Swap the buffers
tokens = new_tokens # type: ignore
t2 = time.monotonic()
bencher("Lexing {0!r}".format(short_fname))
linter_logger.info("PARSING (%s)", fname)
parser = Parser(config=config)
# Parse the file and log any problems
if tokens:
try:
parsed: Optional[BaseSegment] = parser.parse(tokens, recurse=recurse)
except SQLParseError as err:
linter_logger.info("PARSING FAILED! (%s): %s", fname, err)
violations.append(err)
parsed = None
if parsed:
linter_logger.info("\n###\n#\n# {0}\n#\n###".format("Parsed Tree:"))
linter_logger.info("\n" + parsed.stringify())
# We may succeed parsing, but still have unparsable segments. Extract them here.
for unparsable in parsed.iter_unparsables():
# No exception has been raised explicitly, but we still create one here
# so that we can use the common interface
violations.append(
SQLParseError(
"Found unparsable section: {0!r}".format(
unparsable.raw
if len(unparsable.raw) < 40
else unparsable.raw[:40] + "..."
),
segment=unparsable,
)
)
linter_logger.info("Found unparsable segment...")
linter_logger.info(unparsable.stringify())
else:
parsed = None
t3 = time.monotonic()
time_dict = {"templating": t1 - t0, "lexing": t2 - t1, "parsing": t3 - t2}
bencher("Finish parsing {0!r}".format(short_fname))
return ParsedString(parsed, violations, time_dict, templated_file, config)
@staticmethod
def extract_ignore_from_comment(comment: RawSegment):
"""Extract ignore mask entries from a comment segment."""
# Also trim any whitespace afterward
comment_content = comment.raw_trimmed().strip()
if comment_content.startswith("noqa"):
# This is an ignore identifier
comment_remainder = comment_content[4:]
if comment_remainder:
if not comment_remainder.startswith(":"):
return SQLParseError(
"Malformed 'noqa' section. Expected 'noqa: <rule>[,...]",
segment=comment,
)
comment_remainder = comment_remainder[1:]
rules = [r.strip() for r in comment_remainder.split(",")]
return (comment.pos_marker.line_no, tuple(rules))
else:
return (comment.pos_marker.line_no, None)
return None
@staticmethod
def _warn_unfixable(code: str):
linter_logger.warning(
f"One fix for {code} not applied, it would re-cause the same error."
)
def lint_fix(
self, tree: BaseSegment, config: Optional[FluffConfig] = None, fix: bool = False
) -> Tuple[BaseSegment, List[SQLLintError]]:
"""Lint and optionally fix a tree object."""
config = config or self.config
# Keep track of the linting errors
all_linting_errors = []
# A placeholder for the fixes we had on the previous loop
last_fixes = None
# Keep a set of previous versions to catch infinite loops.
previous_versions = {tree.raw}
# If we are fixing then we want to loop up to the runaway_limit, otherwise just once for linting.
loop_limit = config.get("runaway_limit") if fix else 1
for loop in range(loop_limit):
changed = False
for crawler in self.get_ruleset(config=config):
# fixes should be a dict {} with keys edit, delete, create
# delete is just a list of segments to delete
# edit and create are list of tuples. The first element is the
# "anchor", the segment to look for either to edit or to insert BEFORE.
# The second is the element to insert or create.
linting_errors, _, fixes, _ = crawler.crawl(
tree, dialect=config.get("dialect_obj")
)
all_linting_errors += linting_errors
if fix and fixes:
linter_logger.info(f"Applying Fixes: {fixes}")
# Do some sanity checks on the fixes before applying.
if fixes == last_fixes:
self._warn_unfixable(crawler.code)
else:
last_fixes = fixes
new_tree, _ = tree.apply_fixes(fixes)
# Check for infinite loops
if new_tree.raw not in previous_versions:
# We've not seen this version of the file so far. Continue.
tree = new_tree
previous_versions.add(tree.raw)
changed = True
continue
else:
# Applying these fixes took us back to a state which we've
# seen before. Abort.
self._warn_unfixable(crawler.code)
if loop == 0:
# Keep track of initial errors for reporting.
initial_linting_errors = all_linting_errors.copy()
if fix and not changed:
# We did not change the file. Either the file is clean (no fixes), or
# any fixes which are present will take us back to a previous state.
linter_logger.info(
f"Fix loop complete. Stability achieved after {loop}/{loop_limit} loops."
)
break
if fix and loop + 1 == loop_limit:
linter_logger.warning(f"Loop limit on fixes reached [{loop_limit}].")
if config.get("ignore_templated_areas", default=True):
initial_linting_errors = self.remove_templated_errors(
initial_linting_errors
)
return tree, initial_linting_errors
def remove_templated_errors(
self, linting_errors: List[SQLLintError]
) -> List[SQLLintError]:
"""Filter a list of lint errors, removing those which only occur in templated slices."""
# Filter out any linting errors in templated sections if relevant.
linting_errors = list(
filter(
lambda e: getattr(e.segment.pos_marker, "is_literal", True),
linting_errors,
)
)
return linting_errors
def fix(
self, tree: BaseSegment, config: Optional[FluffConfig] = None
) -> Tuple[BaseSegment, List[SQLLintError]]:
"""Return the fixed tree and violations from lintfix when we're fixing."""
fixed_tree, violations = self.lint_fix(tree, config, fix=True)
return fixed_tree, violations
def lint(
self, tree: BaseSegment, config: Optional[FluffConfig] = None
) -> List[SQLLintError]:
"""Return just the violations from lintfix when we're only linting."""
_, violations = self.lint_fix(tree, config, fix=False)
return violations
def lint_string(
self,
in_str: str = "",
fname: str = "<string input>",
fix: bool = False,
config: Optional[FluffConfig] = None,
) -> LintedFile:
"""Lint a string.
Returns:
:obj:`LintedFile`: an object representing that linted file.
"""
# Sort out config, defaulting to the built in config if no override
config = config or self.config
# Using the new parser, read the file object.
parsed = self.parse_string(in_str=in_str, fname=fname, config=config)
time_dict = parsed.time_dict
vs = parsed.violations
tree = parsed.tree
# Look for comment segments which might indicate lines to ignore.
ignore_buff = []
if tree:
for comment in tree.recursive_crawl("comment"):
if comment.name == "inline_comment":
ignore_entry = self.extract_ignore_from_comment(comment)
if isinstance(ignore_entry, SQLParseError):
vs.append(ignore_entry)
elif ignore_entry:
ignore_buff.append(ignore_entry)
if ignore_buff:
linter_logger.info("Parsed noqa directives from file: %r", ignore_buff)
if tree:
t0 = time.monotonic()
linter_logger.info("LINTING (%s)", fname)
if fix:
tree, initial_linting_errors = self.fix(tree, config=config)
else:
initial_linting_errors = self.lint(tree, config=config)
# Update the timing dict
t1 = time.monotonic()
time_dict["linting"] = t1 - t0
# We're only going to return the *initial* errors, rather
# than any generated during the fixing cycle.
vs += initial_linting_errors
# We process the ignore config here if appropriate
if config:
for violation in vs:
violation.ignore_if_in(config.get("ignore"))
linted_file = LintedFile(
fname,
vs,
time_dict,
tree,
ignore_mask=ignore_buff,
templated_file=parsed.templated_file,
)
# This is the main command line output from linting.
if self.formatter:
self.formatter.dispatch_file_violations(
fname, linted_file, only_fixable=fix
)
# Safety flag for unset dialects
if config.get("dialect") == "ansi" and linted_file.get_violations(
fixable=True if fix else None, types=SQLParseError
):
if self.formatter:
self.formatter.dispatch_dialect_warning()
return linted_file
def paths_from_path(
self,
path: str,
ignore_file_name: str = ".sqlfluffignore",
ignore_non_existent_files: bool = False,
ignore_files: bool = True,
working_path: str = os.getcwd(),
) -> List[str]:
"""Return a set of sql file paths from a potentially more ambiguous path string.
Here we also deal with the .sqlfluffignore file if present.
When a path to a file to be linted is explicitly passed
we look for ignore files in all directories that are parents of the file,
up to the current directory.
If the current directory is not a parent of the file we only
look for an ignore file in the direct parent of the file.
"""
if not os.path.exists(path):
if ignore_non_existent_files:
return []
else:
raise IOError("Specified path does not exist")
# Files referred to exactly are also ignored if
# matched, but we warn the users when that happens
is_exact_file = not os.path.isdir(path)
if is_exact_file:
# When the exact file to lint is passed, we
# fill path_walk with an input that follows
# the structure of `os.walk`:
# (root, directories, files)
dirpath = os.path.dirname(path)
files = [os.path.basename(path)]
ignore_file_paths = ConfigLoader.find_ignore_config_files(
path=path, working_path=working_path, ignore_file_name=ignore_file_name
)
# Add paths that could contain "ignore files"
# to the path_walk list
path_walk_ignore_file = [
(
os.path.dirname(ignore_file_path),
None,
# Only one possible file, since we only
# have one "ignore file name"
[os.path.basename(ignore_file_path)],
)
for ignore_file_path in ignore_file_paths
]
path_walk: Union[
Iterator[Tuple[str, List[str], List[str]]],
List[Tuple[str, None, List[str]]],
] = [(dirpath, None, files)] + path_walk_ignore_file
else:
path_walk = os.walk(path)
# If it's a directory then expand the path!
buffer = []
ignore_set = set()
for dirpath, _, filenames in path_walk:
for fname in filenames:
fpath = os.path.join(dirpath, fname)
# Handle potential .sqlfluffignore files
if ignore_files and fname == ignore_file_name:
with open(fpath, "r") as fh:
spec = pathspec.PathSpec.from_lines("gitwildmatch", fh)
matches = spec.match_tree(dirpath)
for m in matches:
ignore_path = os.path.join(dirpath, m)
ignore_set.add(os.path.abspath(ignore_path))
# We don't need to process the ignore file any futher
continue
# We won't purge files *here* because there's an edge case
# that the ignore file is processed after the sql file.
# Scan for remaining files
for ext in self.config.get("sql_file_exts", default=".sql").split(","):
# is it a sql file?
if fname.endswith(ext):
buffer.append(fpath)
if not ignore_files:
return sorted(buffer)
# Check the buffer for ignore items and normalise the rest.
filtered_buffer = []
for fpath in buffer:
if os.path.abspath(fpath) not in ignore_set:
filtered_buffer.append(os.path.normpath(fpath))
elif is_exact_file:
linter_logger.warning(
"Exact file path %s was given but "
"it was ignored by a %s pattern, "
"re-run with `--disregard-sqlfluffignores` to "
"skip %s"
% (
path,
ignore_file_name,
ignore_file_name,
)
)
# Return
return sorted(filtered_buffer)
def lint_string_wrapped(
self, string: str, fname: str = "<string input>", fix: bool = False
) -> LintingResult:
"""Lint strings directly."""
result = LintingResult()
linted_path = LintedPath(fname)
linted_path.add(self.lint_string(string, fname=fname, fix=fix))
result.add(linted_path)
return result
def lint_path(
self,
path: str,
fix: bool = False,
ignore_non_existent_files: bool = False,
ignore_files: bool = True,
) -> LintedPath:
"""Lint a path."""
linted_path = LintedPath(path)
if self.formatter:
self.formatter.dispatch_path(path)
for fname in self.paths_from_path(
path,
ignore_non_existent_files=ignore_non_existent_files,
ignore_files=ignore_files,
):
config = self.config.make_child_from_path(fname)
# Handle unicode issues gracefully
with open(
fname, "r", encoding="utf8", errors="backslashreplace"
) as target_file:
try:
linted_path.add(
self.lint_string(
target_file.read(), fname=fname, fix=fix, config=config
)
)
except IOError as e: # IOErrors caught in commands.py, so still raise it
raise (e)
except Exception:
linter_logger.warning(
f"""Unable to lint {fname} due to an internal error. \
Please report this as an issue with your query's contents and stacktrace below!
To hide this warning, add the failing file to .sqlfluffignore
{traceback.format_exc()}""",
)
return linted_path
def lint_paths(
self,
paths: Tuple[str, ...],
fix: bool = False,
ignore_non_existent_files: bool = False,
ignore_files: bool = True,
) -> LintingResult:
"""Lint an iterable of paths."""
# If no paths specified - assume local
if len(paths) == 0:
paths = (os.getcwd(),)
# Set up the result to hold what we get back
result = LintingResult()
for path in paths:
# Iterate through files recursively in the specified directory (if it's a directory)
# or read the file directly if it's not
result.add(
self.lint_path(
path,
fix=fix,
ignore_non_existent_files=ignore_non_existent_files,
ignore_files=ignore_files,
)
)
return result
def parse_path(
self, path: str, recurse: bool = True
) -> Generator[ParsedString, None, None]:
"""Parse a path of sql files.
NB: This a generator which will yield the result of each file
within the path iteratively.
"""
for fname in self.paths_from_path(path):
if self.formatter:
self.formatter.dispatch_path(path)
config = self.config.make_child_from_path(fname)
# Handle unicode issues gracefully
with open(
fname, "r", encoding="utf8", errors="backslashreplace"
) as target_file:
yield self.parse_string(
target_file.read(), fname=fname, recurse=recurse, config=config
)
```
#### File: rules/std/L007.py
```python
from sqlfluff.core.rules.base import BaseCrawler, LintResult
class Rule_L007(BaseCrawler):
"""Operators near newlines should be after, not before the newline.
| **Anti-pattern**
| The • character represents a space.
| In this example, the operator '+' should not be at the end of the second line.
.. code-block:: sql
SELECT
a +
b
FROM foo
| **Best practice**
| Place the operator after the newline.
.. code-block:: sql
SELECT
a
+ b
FROM foo
"""
def _eval(self, segment, memory, parent_stack, **kwargs):
"""Operators near newlines should be after, not before the newline.
We use the memory to keep track of whitespace up to now, and
whether the last code segment was an operator or not.
Anchor is our signal as to whether there's a problem.
We only trigger if we have an operator FOLLOWED BY a newline
before the next meaningful code segment.
"""
anchor = None
# The parent stack tells us whether we're in an expression or not.
if parent_stack and parent_stack[-1].is_type("expression"):
if segment.is_code:
# This is code, what kind?
if segment.is_type("binary_operator", "comparison_operator"):
# We only trigger if the last was an operator, not if this is.
pass
elif memory["last_code"] and memory["last_code"].is_type(
"binary_operator", "comparison_operator"
):
# It's not an operator, but the last code was. Now check to see
# there is a newline between us and the last operator.
for s in memory["since_code"]:
if s.name == "newline":
anchor = memory["last_code"]
# TODO: Work out a nice fix for this.
# Prepare memory for later
memory["last_code"] = segment
memory["since_code"] = []
else:
# This isn't a code segment...
# Prepare memory for later
memory["since_code"].append(segment)
else:
# Reset the memory if we're not in an expression
memory = {"last_code": None, "since_code": []}
# Anchor is our signal as to whether there's a problem
if anchor:
return LintResult(anchor=anchor, memory=memory)
else:
return LintResult(memory=memory)
```
#### File: sqlfluff/test/test_testing.py
```python
from _pytest.outcomes import Failed, Skipped
import pytest
from sqlfluff.testing.rules import (
assert_rule_fail_in_sql,
assert_rule_pass_in_sql,
rules__test_helper,
RuleTestCase,
)
def test_assert_rule_fail_in_sql_handle_parse_error():
"""Util assert_rule_fail_in_sql should handle parse errors."""
with pytest.raises(Failed) as failed_test:
assert_rule_fail_in_sql(code="L000", sql="select from")
failed_test.match("Found the following parse errors in test case:")
def test_assert_rule_fail_in_sql_should_fail_queries_that_unexpectedly_pass():
"""Util assert_rule_fail_in_sql should fail tests when a query passes rules that it violates."""
with pytest.raises(Failed) as failed_test:
assert_rule_fail_in_sql(code="L001", sql="select 1")
failed_test.match("No L001 failures found in query which should fail")
def test_assert_rule_pass_in_sql_should_handle_parse_error():
"""Util assert_rule_pass_in_sql should handle parse errors."""
with pytest.raises(Failed) as failed_test:
assert_rule_pass_in_sql(code="L001", sql="select from")
failed_test.match("Found unparsable section:")
def test_assert_rule_pass_in_sql_should_fail_when_there_are_violations():
"""Util assert_rule_pass_in_sql should fail when there are violations."""
with pytest.raises(Failed) as failed_test:
assert_rule_pass_in_sql(code="L005", sql="select a , b from t")
failed_test.match("Found L005 failures in query which should pass")
def test_rules__test_helper_skipped_when_test_case_skipped():
"""Util rules__test_helper should skip the test when test case is "skipped"."""
rule_test_case = RuleTestCase(skip="Skip this one for now")
with pytest.raises(Skipped) as skipped_test:
rules__test_helper(rule_test_case)
skipped_test.match("Skip this one for now")
``` |
{
"source": "joph/Machine-Learning-Workshop",
"score": 3
} |
#### File: code/modules/functions_pattern_recognition.py
```python
from PIL import Image
import numpy as np
from skimage import transform
from matplotlib.pyplot import imshow
import pandas as pd
import os
from shutil import copyfile
from keras import backend as K
from keras import models
from keras.preprocessing import image
from pathlib import Path
import cv2
def load(filename):
np_image = Image.open(filename)
#imshow(np.asarray(np_image))
np_image = np.array(np_image).astype('float32')/255
np_image = transform.resize(np_image, (256, 256, 3))
np_image = np.expand_dims(np_image, axis=0)
return np_image
def load_show_image(file):
img_path = file
img = image.load_img(img_path, target_size=(256, 256))
imshow(img)
def check_image(file, model):
#K.clear_session()
#model = models.load_model("models/simple-model-027-0.988224-1.000000.h5")
img = image.load_img(file, target_size=(256, 256))
#imshow(img)
# `x` is a float32 Numpy array of shape (224, 224, 3)
x = image.img_to_array(img)
# We add a dimension to transform our array into a "batch"
# of size (1, 224, 224, 3)
x = np.expand_dims(x, axis=0)
# Finally we preprocess the batch
# (this does channel-wise color normalization)
x /=255
#return()
print(model.predict(x))
m_out = model.output[:,0]
# The is the output feature map of the `block5_conv3` layer,
# the last convolutional layer in VGG16
last_conv_layer = model.get_layer('conv2d_4')
#last_conv_layer = model.get_layer("vgg16").get_layer('block5_conv3')
# This is the gradient of the "african elephant" class with regard to
# the output feature map of `block5_conv3`
grads = K.gradients(m_out, last_conv_layer.output)[0]
# This is a vector of shape (512,), where each entry
# is the mean intensity of the gradient over a specific feature map channel
pooled_grads = K.mean(grads, axis=(0, 1, 2))
# This function allows us to access the values of the quantities we just defined:
# `pooled_grads` and the output feature map of `block5_conv3`,
# given a sample image
iterate = K.function([model.input], [pooled_grads, last_conv_layer.output[0]])
# These are the values of these two quantities, as Numpy arrays,
# given our sample image of two elephants
pooled_grads_value, conv_layer_output_value = iterate([x])
# We multiply each channel in the feature map array
# by "how important this channel is" with regard to the elephant class
for i in range(127):
conv_layer_output_value[:, :, i] *= pooled_grads_value[i]
# The channel-wise mean of the resulting feature map
# is our heatmap of class activation
heatmap = np.mean(conv_layer_output_value, axis=-1)
heatmap = np.maximum(heatmap, 0)
heatmap /= np.max(heatmap)
#plt.matshow(heatmap)
#plt.show()
# We use cv2 to load the original image
img = cv2.imread(file)
cv2_img = np.array(img)
# Convert RGB to BGR
cv2_img = cv2_img [:, :, ::-1].copy()
# imshow(img)
# We resize the heatmap to have the same size as the original image
#heatmap_ = cv2.resize(heatmap, (img.shape[1], img.shape[0]))
heatmap_ = heatmap
#heatmap_ = heatmap
#img = cv2.resize(img, (heatmap.shape[1], heatmap.shape[0]))
# imshow(heatmap_)
# We convert the heatmap to RGB
heatmap_ = np.uint8(255 * heatmap_)
# imshow(heatmap_)
# We apply the heatmap to the original image
heatmap_ = cv2.applyColorMap(heatmap_, cv2.COLORMAP_JET)
#imshow(heatmap_)
heatmap_ = cv2.resize(heatmap_, (img.shape[1], img.shape[0]),cv2.INTER_AREA)
superimposed_img = np.around(heatmap_ * 0.7 + img).astype(int)
imshow(superimposed_img)
#image.save_img("presentations/figures/"+filename, superimposed_img)
# Save the image to disk
#cv2.imwrite('/Users/fchollet/Downloads/elephant_cam.jpg', superimposed_img)
``` |
{
"source": "jophy/fasttld",
"score": 3
} |
#### File: fasttld/fasttld/FastTLDExtract.py
```python
import re
import socket
import idna
from fasttld.psl import getPublicSuffixList, update
IP_RE = re.compile(
r"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}"
r"([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$"
)
# Characters valid in scheme names
SCHEME_RE = re.compile(r"^[A-Za-z0-9+-.]+://")
def looks_like_ip(maybe_ip):
"""Does the given str look like an IP address?"""
try:
socket.inet_aton(maybe_ip)
return True
except socket.error: # for Python 2 compatibility
pass
except (AttributeError, UnicodeError):
if IP_RE.match(maybe_ip):
return True
return False
def check_numeric(maybe_numeric):
try:
int(maybe_numeric)
except ValueError:
return False
return True
class FastTLDExtract(object):
def __init__(self, exclude_private_suffix=False, file_path=""):
self.trie = self._trie_construct(exclude_private_suffix, file_path)
def update(self, *args, **kwargs):
update(*args, **kwargs)
def nested_dict(self, dic, keys):
"""
The idea of this function is based on https://stackoverflow.com/questions/13687924
:param dic:
:param keys:
:return:
"""
end = False
for key in keys[:-1]:
dic_bk = dic
if key not in dic:
dic[key] = {}
dic = dic[key]
if isinstance(dic, bool):
end = True
dic = dic_bk
dic[keys[-2]] = {"_END": True, keys[-1]: True}
if not end:
dic[keys[-1]] = True
def _trie_construct(self, exclude_private_suffix, file_path=""):
"""
This function for building a trie structure based on Mozilla Public Suffix List.
In order to construct this, all suffixes sorted in a reverse order.
For example, www.google.com -> com.google.www
:return: a trie dict
"""
tld_trie = {}
PublicSuffixList, PrivateSuffixList, AllSuffixList = getPublicSuffixList(file_path)
SuffixList = PublicSuffixList if exclude_private_suffix else AllSuffixList
for suffix in SuffixList:
if "." in suffix:
sp = suffix.split(".")
sp.reverse()
self.nested_dict(tld_trie, sp)
else:
tld_trie[suffix] = {"_END": True}
for key, val in tld_trie.items():
if len(val) == 1 and "_END" in val:
tld_trie[key] = True
return tld_trie
def __call__(self, *args, **kwargs):
return self.extract(*args, **kwargs)
def extract(self, raw_url, subdomain=True, format=False):
"""
Extract suffix and subdomain from a Domain.
:param raw_url:
:param subdomain: Output options. This option will reduce efficiency. Maybe 10%
:param format: To format raw_url string.
:return: Tuple(subdomain, domain, suffix, domain_name)
>>> FastTLDExtract.extract('www.google.com.hk', subdomain=True)
>>> ('www', 'google', 'com.hk', 'google.com.hk')
>>> FastTLDExtract.extract('127.0.0.1', subdomain=True)
>>> ('', '127.0.0.1', '', '127.0.0.1')
"""
ret_scheme = ret_userinfo = ret_subdomain = ret_domain = ""
ret_suffix = ret_port = ret_path = ret_domain_name = ""
if format:
raw_url = self.format(raw_url)
# Borrowed from tldextract library (https://github.com/john-kurkowski/tldextract)
# Use regex to strip raw_url of scheme subcomponent and anything after host subcomponent
# Reference: https://en.wikipedia.org/wiki/Uniform_Resource_Identifier#Syntax
netloc_with_scheme = raw_url.strip(". \n\t\r\uFEFF") # \u200b\u200c\u200d
netloc = SCHEME_RE.sub("", netloc_with_scheme)
ret_scheme = netloc_with_scheme[:len(netloc_with_scheme)-len(netloc)]
after_host = ""
# Extract URL userinfo
try:
at_idx = netloc.index("@")
except ValueError:
pass
else:
ret_userinfo = netloc[:at_idx]
netloc = netloc[at_idx+1:]
# Separate URL host from subcomponents thereafter
try:
host_end_index = next(i for i, c in enumerate(netloc) if c in {':', '/', '?', '&', '#'})
except StopIteration:
pass
else:
after_host = netloc[host_end_index:]
netloc = netloc[:host_end_index]
# extract port and "Path" if any
if len(after_host):
try:
path_start_index = after_host.index("/")
except ValueError:
path_start_index = -1
invalid_port = False
if after_host[0] == ':':
if path_start_index == -1:
maybe_port = after_host[1:]
else:
maybe_port = after_host[1:path_start_index]
if not(check_numeric(maybe_port) and 0 <= int(maybe_port) <= 65535):
invalid_port = True
else:
ret_port = maybe_port
if not invalid_port and path_start_index != -1 and path_start_index != len(after_host):
ret_path = after_host[path_start_index+1:]
# Determine if raw_url is an IP address
if len(netloc) != 0 and looks_like_ip(netloc):
return ("", netloc, "", netloc)
labels = netloc.split(".")
labels.reverse()
node = self.trie # define the root node
suffix = []
for label in labels:
if node is True: # or alternatively if type(node) is not dict:
# This node is an end node.
ret_domain = label
break
# This node has sub-nodes and maybe an end-node.
# eg. cn -> (cn, gov.cn)
if "_END" in node:
# check if there is a sub node
# eg. gov.cn
if label in node:
suffix.append(label)
node = node[label]
continue
if "*" in node:
# check if there is a sub node
# eg. www.ck
if ("!%s" % label) in node:
ret_domain = label
else:
suffix.append(label)
break
# check a TLD in PSL
if label in node:
suffix.append(label)
node = node[label]
else:
break
suffix.reverse()
len_suffix = len(suffix)
len_labels = len(labels)
ret_suffix = ".".join(suffix)
if 0 < len_suffix < len_labels:
ret_domain = labels[len_suffix]
if subdomain:
if len_suffix + 1 < len_labels:
ret_subdomain = netloc[: -(len(ret_domain) + len(ret_suffix) + 2)]
if ret_domain and ret_suffix:
ret_domain_name = "%s.%s" % (ret_domain, ret_suffix)
return (ret_scheme, ret_userinfo, ret_subdomain, ret_domain,
ret_suffix, ret_port, ret_path, ret_domain_name)
def format(self, raw_url):
"""
Now we provide simple rules to format strings.
eg. lower case, punycode transform
Todo:
1.URL Parser to extract domain.
2.idna domain parser
:param raw_url:
:return: input
"""
# idna_url = idna.encode(raw_url.strip().lower()).decode()
# input_ = urlparse.urlparse(idna_url).netloc
# if '//' in input_:
# _, _, input_ = input_.rpartition('//')
# if '/' in input_:
# input_, _, _ = input_.lpartition('//')
# return input_
# Punycode costs too much time! Make sure you really need it.
return idna.encode(raw_url.strip().lower()).decode()
```
#### File: fasttld/tests/maintest.py
```python
import unittest
from fasttld import FastTLDExtract
all_suffix = FastTLDExtract(exclude_private_suffix=False)
no_private_suffix = FastTLDExtract(exclude_private_suffix=True)
class FastTLDExtractCase(unittest.TestCase):
def test_all_suffix_trie(self):
trie = all_suffix.trie
self.assertEqual(trie["cn"]["com"], True)
self.assertEqual("blogspot" in trie["uk"]["co"], True)
self.assertEqual("*" in trie["uk"], False)
self.assertEqual("_END" in trie["cn"], True)
self.assertEqual(trie["ck"]["*"], True)
self.assertEqual(trie["ck"]["!www"], True)
self.assertEqual(trie["ir"]["xn--mgba3a4f16a"], True)
# private domain test
self.assertEqual(trie["com"]["appspot"], True)
self.assertEqual(trie["ee"]["com"]["blogspot"], True)
self.assertEqual(trie["com"]["0emm"]["*"], True)
def test_idn_suffix_trie(self):
trie = all_suffix.trie
self.assertEqual(trie["香港"]["公司"], True)
self.assertEqual(trie["新加坡"], True)
def test_no_private_domain_trie(self):
trie = no_private_suffix.trie
self.assertEqual(trie["cn"]["com"], True)
self.assertEqual(trie["uk"]["co"], True)
# private domain test
self.assertEqual(trie["com"], True) # *.0emm.com or the domains like hk.com, cn.com ,etc.
self.assertEqual("no-ip.biz" in trie, False)
self.assertEqual("github" in trie["io"], False)
def test_no_private_suffix_extract(self):
self.assertEqual(
no_private_suffix.extract("www.myownblog.blogspot.ca"),
("", "", "www.myownblog", "blogspot", "ca", "", "", "blogspot.ca"),
)
self.assertEqual(
no_private_suffix.extract("192.168.1.1.no-ip.co.uk"),
("", "", "192.168.1.1", "no-ip", "co.uk", "", "", "no-ip.co.uk"),
)
def test_private_suffix_extract(self):
self.assertEqual(
all_suffix.extract("www.myownblog.blogspot.ca"),
("", "", "www", "myownblog", "blogspot.ca", "", "", "myownblog.blogspot.ca"),
)
self.assertEqual(
all_suffix.extract("192.168.1.1.no-ip.co.uk"),
("", "", "192.168.1", "1", "no-ip.co.uk", "", "", "1.no-ip.co.uk"),
)
def test_all_extract(self):
todo = [
"www.google.co.uk",
"ditu.baidu.com.cn",
"global.prod.fastly.net",
"www.global.prod.fastly.net",
"map.global.prod.fastly.net",
"www.map.global.prod.fastly.net",
]
assert_list = [
("", "", "www", "google", "co.uk", "", "", "google.co.uk"),
("", "", "ditu", "baidu", "com.cn", "", "", "baidu.com.cn"),
("", "", "", "", "global.prod.fastly.net", "", "", ""),
("", "", "", "www", "global.prod.fastly.net", "", "", "www.global.prod.fastly.net"),
("", "", "", "map", "global.prod.fastly.net", "", "", "map.global.prod.fastly.net"),
("", "", "www", "map", "global.prod.fastly.net", "", "", "map.global.prod.fastly.net"),
]
for t, a in zip(todo, assert_list):
self.assertEqual(all_suffix.extract(t), a)
def test_wildcard(self):
todo = [
"ck",
"www.ck",
"news.www.ck",
"big.news.www.ck",
"abc.ck",
"123.abc.ck",
"foo.123.abc.ck",
]
assert_list = [
("", "", "", "", "ck", "", "", ""),
("", "", "", "www", "ck", "", "", "www.ck"),
("", "", "news", "www", "ck", "", "", "www.ck"),
("", "", "big.news", "www", "ck", "", "", "www.ck"),
("", "", "", "", "abc.ck", "", "", ""),
("", "", "", "123", "abc.ck", "", "", "123.abc.ck"),
("", "", "foo", "123", "abc.ck", "", "", "123.abc.ck"),
]
for t, a in zip(todo, assert_list):
self.assertEqual(all_suffix.extract(t), a)
def test_not_tld(self):
self.assertEqual(all_suffix.extract("www.abc.noexists"), ("", "", "", "", "", "", "", ""))
self.assertEqual(no_private_suffix.extract("www.abc.noexists"),
("", "", "", "", "", "", "", ""))
def test_only_dot_tld(self):
self.assertEqual(all_suffix.extract(".com"), ("", "", "", "", "com", "", "", ""))
self.assertEqual(no_private_suffix.extract(".com"), ("", "", "", "", "com", "", "", ""))
def test_one_rule(self):
self.assertEqual(all_suffix.extract("domain.biz"),
("", "", "", "domain", "biz", "", "", "domain.biz"))
self.assertEqual(
no_private_suffix.extract("domain.biz"),
("", "", "", "domain", "biz", "", "", "domain.biz")
)
def test_only_one_wildcard(self):
self.assertEqual(all_suffix.extract("mm"), ("", "", "", "", "mm", "", "", ""))
self.assertEqual(all_suffix.extract("c.mm"), ("", "", "", "", "c.mm", "", "", ""))
self.assertEqual(all_suffix.extract("b.c.mm"), ("", "", "", "b", "c.mm", "", "", "b.c.mm"))
self.assertEqual(no_private_suffix.extract("mm"), ("", "", "", "", "mm", "", "", ""))
self.assertEqual(no_private_suffix.extract("c.mm"), ("", "", "", "", "c.mm", "", "", ""))
self.assertEqual(no_private_suffix.extract("b.c.mm"),
("", "", "", "b", "c.mm", "", "", "b.c.mm"))
def test_us_k12(self):
# k12.ak.us is a public TLD
self.assertEqual(all_suffix.extract("ak.us"), ("", "", "", "", "ak.us", "", "", ""))
self.assertEqual(
all_suffix.extract("test.k12.ak.us"),
("", "", "", "test", "k12.ak.us", "", "", "test.k12.ak.us")
)
self.assertEqual(
all_suffix.extract("www.test.k12.ak.us"),
("", "", "www", "test", "k12.ak.us", "", "", "test.k12.ak.us")
)
self.assertEqual(no_private_suffix.extract("ak.us"),
("", "", "", "", "ak.us", "", "", ""))
self.assertEqual(
no_private_suffix.extract("test.k12.ak.us"),
("", "", "", "test", "k12.ak.us", "", "", "test.k12.ak.us")
)
self.assertEqual(
no_private_suffix.extract("www.test.k12.ak.us"),
("", "", "www", "test", "k12.ak.us", "", "", "test.k12.ak.us"),
)
def test_idn(self):
self.assertEqual(all_suffix.extract("食狮.com.cn"),
("", "", "", "食狮", "com.cn", "", "", "食狮.com.cn"))
self.assertEqual(no_private_suffix.extract("食狮.com.cn"),
("", "", "", "食狮", "com.cn", "", "", "食狮.com.cn"))
def test_punycode(self):
self.assertEqual(
all_suffix.extract("xn--85x722f.com.cn"),
("", "", "", "xn--85x722f", "com.cn", "", "", "xn--85x722f.com.cn"),
)
self.assertEqual(
no_private_suffix.extract("xn--85x722f.com.cn"),
("", "", "", "xn--85x722f", "com.cn", "", "", "xn--85x722f.com.cn"),
)
def test_scheme_port_path(self):
# no_private_suffix
no_private_suffix_asserts = [
("https://", "", "", "blogspot", "com", "", "", "blogspot.com"),
("https://", "", "google", "blogspot", "com", "", "", "blogspot.com"),
("https://", "", "google", "blogspot", "com", "8080", "", "blogspot.com"),
("ftp://", "", "google", "blogspot", "com", "8080",
"a/long/path?query=42things", "blogspot.com"),
("ftp://", "", "", "blogspot", "com", "8080",
"a/long/path?query=42things", "blogspot.com"),
("https://", "", "", "blogspot", "com", "8080", "", "blogspot.com"),
]
self.assertEqual(
no_private_suffix.extract("https://blogspot.com"), no_private_suffix_asserts[0]
)
self.assertEqual(
no_private_suffix.extract("https://blogspot.com", subdomain=False),
no_private_suffix_asserts[0],
)
self.assertEqual(
no_private_suffix.extract("https://google.blogspot.com"), no_private_suffix_asserts[1]
)
self.assertEqual(
no_private_suffix.extract("https://google.blogspot.com", subdomain=False),
no_private_suffix_asserts[0],
)
self.assertEqual(
no_private_suffix.extract("https://google.blogspot.com:8080"),
no_private_suffix_asserts[2],
)
self.assertEqual(
no_private_suffix.extract("https://google.blogspot.com:8080", subdomain=False),
no_private_suffix_asserts[5],
)
self.assertEqual(
no_private_suffix.extract(
"ftp://google.blogspot.com:8080/a/long/path?query=42things"
),
no_private_suffix_asserts[3],
)
self.assertEqual(
no_private_suffix.extract(
"ftp://google.blogspot.com:8080/a/long/path?query=42things", subdomain=False
),
no_private_suffix_asserts[4],
)
# all_suffix
all_suffix_asserts = [
("https://", "", "abc", "google", "blogspot.com", "", "", "google.blogspot.com"),
("https://", "", "", "google", "blogspot.com", "", "", "google.blogspot.com"),
("ftp://", "", "abc", "google", "blogspot.com", "8080",
"a/long/path?query=42things", "google.blogspot.com"),
("ftp://", "", "", "google", "blogspot.com", "8080",
"a/long/path?query=42things", "google.blogspot.com"),
("https://", "", "abc", "google", "blogspot.com", "8080",
"", "google.blogspot.com"),
("https://", "", "", "google", "blogspot.com", "8080",
"", "google.blogspot.com"),
]
self.assertEqual(
all_suffix.extract("https://abc.google.blogspot.com"), all_suffix_asserts[0]
)
self.assertEqual(
all_suffix.extract("https://abc.google.blogspot.com", subdomain=False),
all_suffix_asserts[1],
)
self.assertEqual(
all_suffix.extract("https://abc.google.blogspot.com"), all_suffix_asserts[0]
)
self.assertEqual(
all_suffix.extract("https://abc.google.blogspot.com", subdomain=False),
all_suffix_asserts[1],
)
self.assertEqual(
all_suffix.extract("https://abc.google.blogspot.com:8080"), all_suffix_asserts[4]
)
self.assertEqual(
all_suffix.extract("https://abc.google.blogspot.com:8080", subdomain=False),
all_suffix_asserts[5],
)
self.assertEqual(
all_suffix.extract("ftp://abc.google.blogspot.com:8080" "/a/long/path?query=42things"),
all_suffix_asserts[2],
)
self.assertEqual(
all_suffix.extract(
"ftp://abc.google.blogspot.com:8080" "/a/long/path?query=42things", subdomain=False
),
all_suffix_asserts[3],
)
def test_nested_dict(self):
d = {}
all_suffix.nested_dict(d, keys=["ac"])
all_suffix.nested_dict(d, keys=["ac", "com"])
all_suffix.nested_dict(d, keys=["ac", "edu"])
all_suffix.nested_dict(d, keys=["ac", "gov"])
all_suffix.nested_dict(d, keys=["ac", "net"])
all_suffix.nested_dict(d, keys=["ac", "mil"])
all_suffix.nested_dict(d, keys=["ac", "org"])
all_suffix.nested_dict(d, keys=["ck", "*"])
all_suffix.nested_dict(d, keys=["ck", "!www"])
self.assertDictEqual(
d,
{
"ac": {
"_END": True,
"com": True,
"edu": True,
"gov": True,
"net": True,
"mil": True,
"org": True,
},
"ck": {"*": True, "!www": True},
},
)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jopijuco/local",
"score": 3
} |
#### File: local/model/picture.py
```python
from PIL import Image, ImageOps
from constants import *
class Picture:
def __init__(self, id, name, thumbnail):
self.id = id
self.name = name
self.thumbnail = thumbnail
def create_thumbnail(self):
img = Image.open("static/"+self.name)
img_thumbnail = ImageOps.fit(img, (IMG_THUMBNAIL_SIZE, IMG_THUMBNAIL_SIZE), centering=(1.0, 0.0))
destname = 'static/thumbnail_'+self.name
img_thumbnail.save(destname)
self.thumbnail = img_thumbnail
def name_thumbnail(self):
self.thumbnail = "thumbnail_"+self.name
``` |
{
"source": "jopineda/Clair",
"score": 2
} |
#### File: Clair/clair/learning_rate_finder.py
```python
import sys
import logging
import random
import numpy as np
import pandas as pd
from os.path import abspath
from time import time
from argparse import ArgumentParser
from threading import Thread
import clair.evaluate as evaluate
from clair.model import Clair
import clair.utils as utils
from clair.task.main import GT21, GENOTYPE, VARIANT_LENGTH_1, VARIANT_LENGTH_2
import shared.param as param
logging.basicConfig(format='%(message)s', level=logging.INFO)
def accuracy(y_pred, y_true):
gt21, genotype, indel_length_1, indel_length_2 = y_pred
batch_size = len(gt21) + 0.0
gt21_TP = 0
genotype_TP = 0
indel1_TP = 0
indel2_TP = 0
for gt21_prediction, gt21_true_label in zip(
gt21,
y_true[:, GT21.y_start_index:GT21.y_end_index]
):
true_label_index = np.argmax(gt21_true_label)
predict_label_index = np.argmax(gt21_prediction)
if true_label_index == predict_label_index:
gt21_TP += 1
for genotype_prediction, true_genotype_label in zip(
genotype,
y_true[:, GENOTYPE.y_start_index:GENOTYPE.y_end_index]
):
true_label_index = np.argmax(true_genotype_label)
predict_label_index = np.argmax(genotype_prediction)
if true_label_index == predict_label_index:
genotype_TP += 1
for indel_length_prediction_1, true_indel_length_label_1, indel_length_prediction_2, true_indel_length_label_2 in zip(
indel_length_1,
y_true[:, VARIANT_LENGTH_1.y_start_index:VARIANT_LENGTH_1.y_end_index],
indel_length_2,
y_true[:, VARIANT_LENGTH_2.y_start_index:VARIANT_LENGTH_2.y_end_index]
):
true_label_index_1 = np.argmax(true_indel_length_label_1)
true_label_index_2 = np.argmax(true_indel_length_label_2)
predict_label_index_1 = np.argmax(indel_length_prediction_1)
predict_label_index_2 = np.argmax(indel_length_prediction_2)
if true_label_index_1 > true_label_index_2:
true_label_index_1, true_label_index_2 = true_label_index_2, true_label_index_1
if predict_label_index_1 > predict_label_index_2:
predict_label_index_1, predict_label_index_2 = predict_label_index_2, predict_label_index_1
if true_label_index_1 == predict_label_index_1:
indel1_TP += 1
if true_label_index_2 == predict_label_index_2:
indel2_TP += 1
gt21_acc = gt21_TP / batch_size
genotype_acc = genotype_TP / batch_size
indel1_acc = indel1_TP / batch_size
indel2_acc = indel2_TP / batch_size
acc = (gt21_acc + genotype_acc + indel1_acc + indel2_acc) / 4
return acc
def lr_finder(lr_accuracy):
df = pd.DataFrame(lr_accuracy, columns=["lr", "accuracy", "loss"])
df['diff'] = df['accuracy'].diff()
df = df.dropna().reset_index(drop=True)
minimum_lr = df[df['diff'] == max(df['diff'])]['lr'].sort_values(ascending=False).item()
maximum_lr = df[df['diff'] == min(df['diff'])]['lr'].sort_values(ascending=True).item()
if minimum_lr > maximum_lr:
minimum_lr, maximum_lr = maximum_lr, minimum_lr
return minimum_lr, maximum_lr, df
logging.basicConfig(format='%(message)s', level=logging.INFO)
def shuffle_first_n_items(array, n):
if len(array) <= n:
np.random.shuffle(array)
return array
# pylint: disable=unbalanced-tuple-unpacking
a1, a2 = np.split(array, [n])
np.random.shuffle(a1)
return np.append(a1, a2)
def train_model(m, training_config):
learning_rate = param.min_lr
l2_regularization_lambda = training_config.l2_regularization_lambda
output_file_path_prefix = training_config.output_file_path_prefix
summary_writer = training_config.summary_writer
model_initalization_file_path = training_config.model_initalization_file_path
dataset_info = training_config.dataset_info
dataset_size = dataset_info.dataset_size
training_losses = []
validation_losses = []
lr_accuracy = []
if model_initalization_file_path is not None:
m.restore_parameters(abspath(model_initalization_file_path))
logging.info("[INFO] Start training...")
logging.info("[INFO] Learning rate: %.2e" % m.set_learning_rate(learning_rate))
logging.info("[INFO] L2 regularization lambda: %.2e" % m.set_l2_regularization_lambda(l2_regularization_lambda))
# Model Constants
training_start_time = time()
no_of_training_examples = (
dataset_info.no_of_training_examples_from_train_binary or int(dataset_size * param.trainingDatasetPercentage)
)
no_of_validation_examples = dataset_info.dataset_size - no_of_training_examples
no_of_blosc_blocks = utils.no_of_blosc_blocks_from(
dataset_info=dataset_info,
no_of_training_examples=no_of_training_examples,
blosc_block_size=param.bloscBlockSize
)
no_of_training_blosc_blocks = int(no_of_training_examples / param.bloscBlockSize)
tensor_block_index_list = np.arange(no_of_blosc_blocks, dtype=int)
total_numbers_of_iterations = np.ceil(no_of_training_examples / param.trainBatchSize+1)
step_size = param.stepsizeConstant * total_numbers_of_iterations
# Initialize variables
epoch_count = 1
if model_initalization_file_path is not None:
epoch_count = int(model_initalization_file_path[-param.parameterOutputPlaceHolder:])+1
global_step = 0
mini_batches_loaded = []
def load_mini_batch(data_index, blosc_index, first_blosc_block_data_index, tensor_block_index_list):
mini_batch = utils.new_mini_batch(
data_index=data_index,
blosc_start_index=blosc_index,
first_blosc_block_data_index=first_blosc_block_data_index,
no_of_training_examples=no_of_training_examples,
no_of_blosc_blocks=no_of_blosc_blocks,
dataset_info=dataset_info,
tensor_block_index_list=tensor_block_index_list,
)
_, _, next_first_blosc_block_data_index, next_blosc_start_index = mini_batch
if next_first_blosc_block_data_index < 0 or next_blosc_start_index < 0:
return
mini_batches_loaded.append(mini_batch)
while epoch_count <= param.lr_finder_max_epoch:
# init variables for process one epoch
epoch_start_time = time()
training_loss_sum = 0
validation_loss_sum = 0
data_index = 0
blosc_index = 0
first_blosc_block_data_index = 0
x_batch, y_batch = None, None
gt21_loss_sum = 0
genotype_loss_sum = 0
indel_length_loss_sum_1 = 0
indel_length_loss_sum_2 = 0
l2_loss_sum = 0
while True:
is_with_batch_data = x_batch is not None and y_batch is not None
is_training = is_with_batch_data and data_index < no_of_training_examples
is_validation = is_with_batch_data and not is_training
thread_pool = []
if is_training:
thread_pool.append(Thread(target=m.train, args=(x_batch, y_batch)))
elif is_validation:
thread_pool.append(Thread(target=m.validate, args=(x_batch, y_batch)))
thread_pool.append(
Thread(
target=load_mini_batch,
args=(data_index, blosc_index, first_blosc_block_data_index, tensor_block_index_list)
)
)
for t in thread_pool:
t.start()
for t in thread_pool:
t.join()
# add training loss or validation loss
if is_training:
training_loss_sum += m.training_loss_on_one_batch
batch_acc = accuracy(y_pred=m.prediction, y_true=y_batch)
lr_accuracy.append((learning_rate, batch_acc, m.training_loss_on_one_batch))
if summary_writer is not None:
summary = m.training_summary_on_one_batch
summary_writer.add_summary(summary, epoch_count)
elif is_validation:
validation_loss_sum += m.validation_loss_on_one_batch
gt21_loss_sum += m.gt21_loss
genotype_loss_sum += m.genotype_loss
indel_length_loss_sum_1 += m.indel_length_loss_1
indel_length_loss_sum_2 += m.indel_length_loss_2
l2_loss_sum += m.l2_loss
if is_with_batch_data:
data_index += np.shape(x_batch)[0]
have_next_mini_batch = len(mini_batches_loaded) > 0
is_processed_a_mini_batch = len(thread_pool) > 0
if have_next_mini_batch:
x_batch, y_batch, first_blosc_block_data_index, blosc_index = mini_batches_loaded.pop(0)
learning_rate, global_step, _max_learning_rate = m.clr(
global_step, step_size, param.max_lr, "tri"
)
if not have_next_mini_batch and not is_processed_a_mini_batch:
break
logging.info(
" ".join([str(epoch_count), "Training loss:", str(training_loss_sum/no_of_training_examples)])
)
logging.info(
"\t".join([
"{} Validation loss (Total/Base/Genotype/Indel_1_2):".format(epoch_count),
str(validation_loss_sum/no_of_validation_examples),
str(gt21_loss_sum/no_of_validation_examples),
str(genotype_loss_sum/no_of_validation_examples),
str(indel_length_loss_sum_1/no_of_validation_examples),
str(indel_length_loss_sum_2/no_of_validation_examples)
])
)
logging.info("[INFO] Epoch time elapsed: %.2f s" % (time() - epoch_start_time))
training_losses.append((training_loss_sum, epoch_count))
validation_losses.append((validation_loss_sum, epoch_count))
# Output the model
if output_file_path_prefix != None:
parameter_output_path = "%s-%%0%dd" % (output_file_path_prefix, param.parameterOutputPlaceHolder)
m.save_parameters(abspath(parameter_output_path % epoch_count))
# variables update per epoch
epoch_count += 1
minimum_lr, maximum_lr, df = lr_finder(lr_accuracy)
logging.info("[INFO] min_lr: %g, max_lr: %g" % (minimum_lr, maximum_lr))
df.to_csv("lr_finder.txt", sep=',', index=False)
# shuffle data on each epoch
tensor_block_index_list = shuffle_first_n_items(tensor_block_index_list, no_of_training_blosc_blocks)
logging.info("[INFO] Shuffled: " + ' '.join(
[str(x) for x in np.append(tensor_block_index_list[:5], tensor_block_index_list[-5:])]
))
logging.info("[INFO] Training time elapsed: %.2f s" % (time() - training_start_time))
return training_losses, validation_losses
if __name__ == "__main__":
random.seed(param.RANDOM_SEED)
np.random.seed(param.RANDOM_SEED)
parser = ArgumentParser(description="Learning rate finder")
# binary file path
parser.add_argument('--bin_fn', type=str, default=None,
help="Binary tensor input generated by tensor2Bin.py, tensor_fn, var_fn and bed_fn will be ignored")
# tensor file path
parser.add_argument('--tensor_fn', type=str, default="vartensors", help="Tensor input")
# variant file path
parser.add_argument('--var_fn', type=str, default="truthvars", help="Truth variants list input")
# bed file path
parser.add_argument('--bed_fn', type=str, default=None,
help="High confident genome regions input in the BED format")
# checkpoint file path
parser.add_argument('--chkpnt_fn', type=str, default=None,
help="Input a checkpoint for testing or continue training")
# learning rate, with default value stated in param
parser.add_argument('--learning_rate', type=float, default=param.initialLearningRate,
help="Set the initial learning rate, default: %(default)s")
# l2 regularization
parser.add_argument('--lambd', type=float, default=param.l2RegularizationLambda,
help="Set the l2 regularization lambda, default: %(default)s")
# output checkpint file path prefix
parser.add_argument('--ochk_prefix', type=str, default=None,
help="Prefix for checkpoint outputs at each learning rate change, REQUIRED")
parser.add_argument('--olog_dir', type=str, default=None,
help="Directory for tensorboard log outputs, optional")
args = parser.parse_args()
if len(sys.argv[1:]) == 0:
parser.print_help()
sys.exit(1)
# initialize
logging.info("[INFO] Initializing")
utils.setup_environment()
m = Clair()
m.init()
dataset_info = utils.dataset_info_from(
binary_file_path=args.bin_fn,
tensor_file_path=args.tensor_fn,
variant_file_path=args.var_fn,
bed_file_path=args.bed_fn
)
training_config = utils.TrainingConfig(
dataset_info=dataset_info,
learning_rate=args.learning_rate,
l2_regularization_lambda=args.lambd,
output_file_path_prefix=args.ochk_prefix,
model_initalization_file_path=args.chkpnt_fn,
summary_writer=m.get_summary_file_writer(args.olog_dir) if args.olog_dir != None else None,
)
_training_losses, validation_losses = train_model(m, training_config)
# show the parameter set with the smallest validation loss
validation_losses.sort()
best_validation_epoch = validation_losses[0][1]
logging.info("[INFO] Best validation loss at epoch: %d" % best_validation_epoch)
# load best validation model and evaluate it
model_file_path = "%s-%%0%dd" % (training_config.output_file_path_prefix, param.parameterOutputPlaceHolder)
best_validation_model_file_path = model_file_path % best_validation_epoch
m.restore_parameters(abspath(best_validation_model_file_path))
evaluate.evaluate_model(m, dataset_info)
```
#### File: clair/post_processing/overlap_variant.py
```python
from sys import stdin, stderr
from collections import namedtuple
Variant = namedtuple('Variant', [
'chromosome',
'position',
'reference_base',
'alternate_base',
'alternate_base_multi',
'quality_score',
'genotype',
'depth',
'allele_frequency',
])
VariantIntervals = namedtuple('VariantIntervals', [
'snp_interval',
'deletion_interval',
'insertion_intervals',
])
EMPTY_INTERVAL = (-1, -1)
DEBUG_OVERLAPPED_VARIANT = False
def maximum_deletion_length_of(variant):
return len(variant.reference_base) - min(
len(variant.alternate_base),
1024 if variant.alternate_base_multi is None else len(variant.alternate_base_multi),
)
def snp_interval_from(variant):
# need to handle the case like [ACGT]Del / [ACGT]Ins
is_snp = (
len(variant.reference_base) == len(variant.alternate_base) or
(
False if variant.alternate_base_multi is None else len(
variant.reference_base) == len(variant.alternate_base_multi)
)
)
return EMPTY_INTERVAL if not is_snp else (variant.position - 1, variant.position)
def deletion_interval_from(variant):
maximum_deletion_length = maximum_deletion_length_of(variant)
is_deletion = maximum_deletion_length > 0
return EMPTY_INTERVAL if not is_deletion else (variant.position - 1, variant.position + maximum_deletion_length)
def insertion_intervals_from(variant):
insertion_intervals = []
if len(variant.alternate_base) > len(variant.reference_base):
insertion_intervals.append(
(
variant.position - 1,
variant.position + len(variant.alternate_base) - len(variant.reference_base)
)
)
else:
insertion_intervals.append(EMPTY_INTERVAL)
if (
variant.alternate_base_multi is not None and
len(variant.alternate_base_multi) > len(variant.reference_base)
):
insertion_intervals.append(
(
variant.position - 1,
variant.position + len(variant.alternate_base_multi) - len(variant.reference_base)
)
)
else:
insertion_intervals.append(EMPTY_INTERVAL)
return insertion_intervals
# all intervals is suppose to be zero-base and [start, end) half open interval
def variant_intervals_from(variant):
return VariantIntervals(
snp_interval=snp_interval_from(variant),
deletion_interval=deletion_interval_from(variant),
insertion_intervals=insertion_intervals_from(variant),
)
def is_two_intervals_overlap(interval1, interval2):
if interval1 is EMPTY_INTERVAL or interval2 is EMPTY_INTERVAL:
return False
begin1, end1 = interval1
begin2, _ = interval2
# return begin1 <= begin2 <= end1 or begin2 <= end1 <= end2
return begin1 <= begin2 < end1
def is_two_intervals_overlap_for_ins_snp(insertion_interval, snp_interval):
if insertion_interval is EMPTY_INTERVAL or snp_interval is EMPTY_INTERVAL:
return False
insert_begin, insert_end = insertion_interval
_, snp_end = snp_interval
return insert_end - insert_begin == 2 and insert_end == snp_end
# for insertion intervals overlap, current implementation needs with the same ending position
def is_two_intervals_overlap_for_ins_ins(interval1, interval2):
if interval1 is EMPTY_INTERVAL or interval2 is EMPTY_INTERVAL:
return False
_, end1 = interval1
_, end2 = interval2
return end1 == end2
def is_two_variants_overlap(variant1, variant2):
if variant1.chromosome != variant2.chromosome:
return False
if variant1.position > variant2.position:
return is_two_variants_overlap(variant2, variant1)
intervals_1 = variant_intervals_from(variant1)
intervals_2 = variant_intervals_from(variant2)
# return (
# is_two_intervals_overlap(intervals_1.deletion_interval, intervals_2.snp_interval) or
# is_two_intervals_overlap(intervals_1.deletion_interval, intervals_2.deletion_interval) or
# is_two_intervals_overlap_for_ins_snp(intervals_1.insertion_intervals[0], intervals_2.snp_interval) or
# is_two_intervals_overlap_for_ins_snp(intervals_1.insertion_intervals[1], intervals_2.snp_interval) or
# is_two_intervals_overlap_for_ins_ins(intervals_1.insertion_intervals[0], intervals_2.insertion_intervals[0]) or
# is_two_intervals_overlap_for_ins_ins(intervals_1.insertion_intervals[0], intervals_2.insertion_intervals[1]) or
# is_two_intervals_overlap_for_ins_ins(intervals_1.insertion_intervals[1], intervals_2.insertion_intervals[0]) or
# is_two_intervals_overlap_for_ins_ins(intervals_1.insertion_intervals[1], intervals_2.insertion_intervals[1])
# )
# return (
# is_two_intervals_overlap(intervals_1.deletion_interval, intervals_2.snp_interval) or
# is_two_intervals_overlap(intervals_1.deletion_interval, intervals_2.deletion_interval) or
# is_two_intervals_overlap_for_ins_snp(intervals_1.insertion_intervals[0], intervals_2.snp_interval) or
# is_two_intervals_overlap_for_ins_snp(intervals_1.insertion_intervals[1], intervals_2.snp_interval)
# )
return (
is_two_intervals_overlap(intervals_1.deletion_interval, intervals_2.snp_interval) or
is_two_intervals_overlap(intervals_1.deletion_interval, intervals_2.deletion_interval)
)
def variant_from(variant_row):
if variant_row[0] == "#":
return
columns = str(variant_row).split("\t")
chromosome = columns[0]
position = int(columns[1])
reference_base = columns[3]
alternates = columns[4].split(",")
alternate_base = alternates[0]
alternate_base_multi = None if len(alternates) == 1 else alternates[1]
quality_score = int(float(columns[5]))
last_column = columns[-1]
last_columns = last_column.split(":")
genotype = last_columns[0]
depth = last_columns[2]
allele_frequency = last_columns[3]
return Variant(
chromosome=chromosome,
position=position,
reference_base=reference_base,
alternate_base=alternate_base,
alternate_base_multi=alternate_base_multi,
quality_score=quality_score,
genotype=genotype,
depth=depth,
allele_frequency=allele_frequency,
)
def variant_row_from(variant):
alternates = ",".join(
[variant.alternate_base] +
([] if variant.alternate_base_multi is None else [variant.alternate_base_multi])
)
quality_score_str = str(variant.quality_score)
last_column = ":".join([
variant.genotype,
quality_score_str,
variant.depth,
variant.allele_frequency,
])
return "\t".join([
variant.chromosome,
str(variant.position),
".",
variant.reference_base,
alternates,
str(variant.quality_score),
".",
".",
"GT:GQ:DP:AF",
last_column,
])
def header_and_variant_rows_from_stdin():
header_rows = []
variant_rows = []
for row in stdin.readlines():
if row[0] == "#":
header_rows.append(row[:-1])
else:
variant_rows.append(row[:-1])
return header_rows, variant_rows
def variant_to_output_for(variant1, variant2):
# return variant1 if variant1.quality_score > variant2.quality_score else variant2
score1 = variant1.quality_score
score2 = variant2.quality_score
# score1 = variant1.quality_score * float(variant1.allele_frequency)
# score2 = variant2.quality_score * float(variant2.allele_frequency)
return variant1 if score1 > score2 else variant2
def filter_variants_with(variants):
filtered_variants = []
overlapped_variants_count = 0
for variant in variants:
if len(filtered_variants) == 0:
filtered_variants.append(variant)
continue
last_variant = filtered_variants[-1]
if not is_two_variants_overlap(last_variant, variant):
filtered_variants.append(variant)
continue
if DEBUG_OVERLAPPED_VARIANT:
overlapped_variants_count += 1
print("\n[INFO] variants overlapped.", file=stderr)
print(variant_row_from(last_variant), file=stderr)
print(variant_row_from(variant), file=stderr)
# variant_to_append = last_variant if last_variant.quality_score >= variant.quality_score else variant
variant_to_append = variant_to_output_for(last_variant, variant)
if variant_to_append != last_variant:
filtered_variants.pop()
filtered_variants.append(variant)
if DEBUG_OVERLAPPED_VARIANT:
print("[INFO] {} variants overlapped.".format(overlapped_variants_count), file=stderr)
return filtered_variants
def output(header_rows, variants):
for header_row in header_rows:
print(header_row)
for variant in variants:
print(variant_row_from(variant))
def main():
header_rows, variant_rows = header_and_variant_rows_from_stdin()
variants = [variant_from(variant_row) for variant_row in variant_rows]
filtered_variants = filter_variants_with(variants)
output(header_rows, filtered_variants)
if __name__ == "__main__":
main()
```
#### File: Clair/dataPrepScripts/CombineBins.py
```python
import os
import pickle
from random import shuffle
from argparse import ArgumentParser
from collections import namedtuple
Data = namedtuple('Data', ['x', 'y', 'pos', 'total'])
def process_command():
parser = ArgumentParser(description="Combine small bins into a large bin.")
parser.add_argument(
'--src', type=str, default=os.path.join(os.curdir, "all_bins"),
help="Path to directory that stores small bins. (default: %(default)s)"
)
parser.add_argument(
'--dst', type=str, default=os.curdir,
help="Path of the output folder. (default: %(default)s)"
)
parser.add_argument(
'--bin_name', type=str, default="tensor.bin",
help="Name of the large bin. (default: %(default)s)"
)
parser.add_argument(
'--shuffle_data', type=bool, default=False,
help="Shuffle data after loaded all data. (default: %(default)s)"
)
return parser.parse_args()
def load_data_from_one_file_path(file_path):
X = []
Y = []
pos = []
total = 0
with open(file_path, "rb") as f:
total = int(pickle.load(f))
X = pickle.load(f)
Y = pickle.load(f)
pos = pickle.load(f)
return Data(x=X, y=Y, pos=pos, total=total)
def load_data_from(directory_path, need_shuffle_file_paths=False):
X = []
Y = []
pos = []
total = 0
file_paths = os.listdir(directory_path)
file_paths.sort()
if need_shuffle_file_paths:
shuffle(file_paths)
absolute_file_paths = []
for file_path in file_paths:
absolute_file_paths.append(os.path.abspath(os.path.join(directory_path, file_path)))
for absolute_file_path in absolute_file_paths:
data = load_data_from_one_file_path(absolute_file_path)
total += data.total
X += data.x
Y += data.y
pos += data.pos
print("[INFO] Data loaded: {}".format(absolute_file_path))
return Data(x=X, y=Y, pos=pos, total=total)
def pickle_dump(obj, file):
return pickle.dump(obj, file, protocol=pickle.HIGHEST_PROTOCOL)
def output_data(dst, data):
print("[INFO] Output: {}".format(os.path.abspath(dst)))
with open(dst, "wb") as f:
pickle_dump(data.total, f)
pickle_dump(data.x, f)
pickle_dump(data.y, f)
pickle_dump(data.pos, f)
def main():
args = process_command()
data = load_data_from(
directory_path=args.src,
need_shuffle_file_paths=args.shuffle_data
)
output_data(
dst=os.path.join(args.dst, args.bin_name),
data=data
)
if __name__ == "__main__":
main()
```
#### File: Clair/dataPrepScripts/GetTruth.py
```python
import sys
import shlex
from subprocess import PIPE
from argparse import ArgumentParser
from collections import namedtuple
from shared.utils import file_path_from, executable_command_string_from, subprocess_popen
VariantInfo = namedtuple('VariantInfo', ['chromosome', 'position', 'reference', 'alternate', 'genotype_1', 'genotype_2'])
class TruthStdout(object):
def __init__(self, handle):
self.stdin = handle
def __del__(self):
self.stdin.close()
def GetBase(chromosome, position, ref_fn):
fp = subprocess_popen(shlex.split("samtools faidx %s %s:%s-%s" % (ref_fn, chromosome, position, position)))
for line in fp.stdout:
if line[0] == ">":
continue
else:
return line.strip()
def GetLineFromInfo(variant_info):
return (" ".join(variant_info) + "\n")
def GetInfosFromVar(variant_info, ref_fn):
chromosome, position, reference, alternate, genotype_1, genotype_2 = variant_info
if "*" not in alternate:
return [variant_info]
else:
if ref_fn is None:
sys.exit("Please provide a reference file correspond to the vcf.")
try:
alternate_list = alternate.split(",")
except e:
print(e, file=sys.stderr)
sys.exit("Exception occured when getting true variant, exiting ...")
if alternate_list[1] == "*":
alternate_list[0], alternate_list[1] = alternate_list[1], alternate[0]
lines = []
for alt in alternate_list:
if alt == "*":
new_pos = str(int(position)-1)
new_alt = GetBase(chromosome, new_pos, ref_fn)
new_ref = new_alt + reference[0]
lines.append(VariantInfo(chromosome, new_pos, new_ref, new_alt, "0", "1"))
else:
lines.append(VariantInfo(chromosome, position, reference, alt, "0", "1"))
return lines
def MergeInfos(info_1, info_2):
if "," in info_1.reference or "," in info_1.alternate:
return info_1
if info_1.reference == info_2.reference:
if info_1.alternate == info_2.alternate:
return info_1
else:
new_alternate = "{},{}".format(info_1.alternate, info_2.alternate)
return VariantInfo(info_1.chromosome, info_1.position, info_1.reference, new_alternate, "1", "2")
else:
if len(info_1.alternate) > len(info_2.alternate):
info_1, info_2 = info_2, info_1
new_ref = info_2.reference
new_alternate = "{},{}".format(info_1.alternate + info_2.reference[len(info_1.reference)-len(info_2.reference):], info_2.alternate)
return VariantInfo(info_1.chromosome, info_1.position, new_ref, new_alternate, "1", "2")
def OutputVariant(args):
var_fn = args.var_fn
vcf_fn = args.vcf_fn
ref_fn = args.ref_fn
ctg_name = args.ctgName
ctg_start = args.ctgStart
ctg_end = args.ctgEnd
if args.var_fn != "PIPE":
var_fpo = open(var_fn, "wb")
var_fp = subprocess_popen(shlex.split("gzip -c"), stdin=PIPE, stdout=var_fpo)
else:
var_fp = TruthStdout(sys.stdout)
is_ctg_region_provided = ctg_start is not None and ctg_end is not None
if (
is_ctg_region_provided and
file_path_from("%s.tbi" % (vcf_fn)) is not None and
executable_command_string_from("tabix") is not None
):
vcf_fp = subprocess_popen(shlex.split("tabix -f -p vcf %s %s:%s-%s" % (vcf_fn, ctg_name, ctg_start, ctg_end)))
else:
vcf_fp = subprocess_popen(shlex.split("gzip -fdc %s" % (vcf_fn)))
buffer_line = None
buffer_line_pos = -1
for row in vcf_fp.stdout:
columns = row.strip().split()
if columns[0][0] == "#":
continue
# position in vcf is 1-based
chromosome, position = columns[0], columns[1]
if chromosome != ctg_name:
continue
if is_ctg_region_provided and not (ctg_start <= int(position) <= ctg_end):
continue
reference, alternate, last_column = columns[3], columns[4], columns[-1]
# normal GetTruth
genotype = last_column.split(":")[0].replace("/", "|").replace(".", "0").split("|")
genotype_1, genotype_2 = genotype
# 1000 Genome GetTruth (format problem) (no genotype is given)
# genotype_1, genotype_2 = "1", "1"
# if alternate.find(',') >= 0:
# genotype_1, genotype_2 = "1", "2"
if int(genotype_1) > int(genotype_2):
genotype_1, genotype_2 = genotype_2, genotype_1
info_line = VariantInfo(chromosome, position, reference, alternate, genotype_1, genotype_2)
for info in GetInfosFromVar(info_line, ref_fn):
if int(info.position) == buffer_line_pos:
buffer_line = MergeInfos(buffer_line, info)
else:
if buffer_line != None:
var_fp.stdin.write(GetLineFromInfo(buffer_line))
buffer_line = info
buffer_line_pos = int(buffer_line.position)
var_fp.stdin.write(GetLineFromInfo(buffer_line))
vcf_fp.stdout.close()
vcf_fp.wait()
if args.var_fn != "PIPE":
var_fp.stdin.close()
var_fp.wait()
var_fpo.close()
def main():
parser = ArgumentParser(description="Extract variant type and allele from a Truth dataset")
parser.add_argument('--vcf_fn', type=str, default="input.vcf",
help="Truth vcf file input, default: %(default)s")
parser.add_argument('--var_fn', type=str, default="PIPE",
help="Truth variants output, use PIPE for standard output, default: %(default)s")
parser.add_argument('--ref_fn', type=str, default=None,
help="Reference file input, must be provided if the vcf contains '*' in ALT field.")
parser.add_argument('--ctgName', type=str, default="chr17",
help="The name of sequence to be processed, default: %(default)s")
parser.add_argument('--ctgStart', type=int, default=None,
help="The 1-based starting position of the sequence to be processed")
parser.add_argument('--ctgEnd', type=int, default=None,
help="The 1-based inclusive ending position of the sequence to be processed")
args = parser.parse_args()
if len(sys.argv[1:]) == 0:
parser.print_help()
sys.exit(1)
OutputVariant(args)
if __name__ == "__main__":
main()
``` |
{
"source": "joplin-vieweb/joplin_vieweb",
"score": 2
} |
#### File: joplin_vieweb/joplin_vieweb/hyperlink_preview_cache.py
```python
import hyperlink_preview as HLP
import threading
from collections import OrderedDict
_hlp_cache = OrderedDict()
_hlp_cache_lock = threading.Lock()
_HLP_DICT_MAX_SIZE = 50
class _HlpCache:
def __init__(self) -> None:
self.data_get = threading.Event()
self.hlp: HLP.HyperLinkPreview = None
def get_hyperlink_preview(link_url):
is_new = True
with _hlp_cache_lock:
try:
hlp_cache = _hlp_cache[link_url]
is_new = False
except:
# target link is not in cache: we compute it
hlp_cache = _HlpCache()
_hlp_cache[link_url] = hlp_cache
if len(_hlp_cache) > _HLP_DICT_MAX_SIZE:
_hlp_cache.popitem(last=False)
# following code outside with _hlp_cache_lock to release the lock ASAP.
if is_new:
try:
hlp_cache.hlp = HLP.HyperLinkPreview(url=link_url)
except:
del _hlp_cache[link_url]
return None
hlp_cache.data_get.set()
else:
hlp_cache.data_get.wait()
if not hlp_cache.hlp.is_valid:
return None
return hlp_cache.hlp.get_data(wait_for_imgs=False)
def get_hyperlink_preview_image(link_url):
with _hlp_cache_lock:
try:
hlp_cache = _hlp_cache[link_url]
except:
return None # unexpected... but not a big issue.
return hlp_cache.hlp.get_data(wait_for_imgs = True)
```
#### File: joplin_vieweb/joplin_vieweb/joplin_x_api.py
```python
import requests
import logging
class Api():
def __init__(self, url: str) -> None:
self.url = url
if not self.url.endswith("/"):
self.url = self.url + "/"
def _request(self, method: str, path: str, data = None) -> requests.models.Response:
password_safe_data = None
if data is not None:
password_safe_data = {key: value if not "password" in key.lower() else "*****" for key, value in data.items()}
logging.debug(f"joplin-x-api: {method} request: path={path}, data={password_safe_data}")
try:
response: requests.models.Response = getattr(requests, method)(
f"{self.url}{path}",
json=data
)
logging.debug(f"joplin-x-api: response {response.text}")
response.raise_for_status()
except requests.exceptions.HTTPError as err:
err.args = err.args + (response.text,)
raise
return response
def get_conf(self):
return self._request("get", "joplin/config").json()
def set_conf(self, config_data):
return self._request("post", "joplin/config", data=config_data)
def test_conf(self, config_data):
return self._request("post", "joplin/config/test", data=config_data)
def start_synch(self):
self._request("post", "joplin/synch/")
def get_synch(self):
return self._request("get", "joplin/synch").json()
```
#### File: management/commands/initadmin.py
```python
import logging
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Add a admin/admin accoutn if no user exists.'
def handle(self, *args, **options):
User = get_user_model()
users = User.objects.all()
if users.count() == 0:
username = "admin"
email = "<EMAIL>"
password = '<PASSWORD>'
logging.info('Creating account for %s (%s)' % (username, email))
User.objects.create_superuser(username, email, password)
else:
logging.info('Admin accounts can only be initialized if no Accounts exist')
``` |
{
"source": "jopo666/HistoPrep",
"score": 2
} |
#### File: HistoPrep/histoprep/_cutter.py
```python
import os
from os.path import dirname, join, basename, exists
from typing import List, Tuple, Callable, Union
import itertools
import numpy as np
import pandas as pd
from tqdm import tqdm
from PIL import Image, ImageDraw
from openslide import OpenSlide
from ._functional import (
get_thumbnail,
get_downsamples,
try_thresholds,
resize
)
from .preprocess.functional import preprocess, tissue_mask
from ._czi_reader import OpenSlideCzi
from .helpers._utils import (
remove_extension,
remove_images,
multiprocess_map
)
from ._logger import logger
__all__ = [
'Cutter',
'TMACutter'
]
class Cutter(object):
"""
Cut tiles from histological images.
This class detectct tissue on the slide and cuts tiles of desired width
from the image.
Args:
slide_path (str): Path to the slide image. All formats that are
supported by openslide can be used.
width (int): Tile width.
overlap (float, optional): Overlap between neighbouring tiles. Defaults
to 0.0.
threshold (int or float, optional): Threshold value for tissue
detection. Defaults to 1.1.
If threshold is an integer between [1, 255]:
This value will be used as an threshold for tissue detection.
Different thresholds can be easily searched with the
Cutter.try_thresholds() function.
If threshold is a float:
In this case, Otsu's binarization is used and the found
threshold is multiplied by `threshold` as Otsu isn't optimal
for histological images.
downsample (int, optional): Downsample used for the thumbnail.
When a lower downsample is used, the thumbnail-based background
detection is more accurate but slower. Good results are achieved
with downsample=16. Defaults to 16.
max_background (float, optional): Maximum amount of background allowed
for a tile. Due to the thumbnail-based background detection, tiles
with higher background percentage may pass through but rarely the
other way around. Defaults to 0.999.
create_thumbnail (bool, optional): Create a thumbnail if downsample is
not available. Defaults to False.
thumbnail_path (str, optional): Load a created thumbnail from a file.
Defaults to None.
Raises:
IOError: slide_path not found.
ValueError: downsample is not available and create_thumbnail=False.
IOError: thumbnail_path not found.
Example::
import histoprep as hp
cutter = hp.Cutter(slide_path='path/to/slide', width=512, overlap=0.2)
metadata = cutter.save('/path/to/output_dir')
"""
def __init__(
self,
slide_path: str,
width: int,
overlap: float = 0.0,
threshold: Union[int, float] = 1.1,
downsample: int = 16,
max_background: float = 0.999,
create_thumbnail: bool = False,
thumbnail_path: str = None):
super().__init__()
# Define slide reader.
if not exists(slide_path):
raise IOError(f'{slide_path} not found.')
if slide_path.endswith('czi'):
logger.warning(
"Support for czi-files is in alpha phase! If "
"you run into errors, please submit an issue to "
"https://github.com/jopo666/HistoPrep/issues"
)
self.reader = OpenSlideCzi(slide_path)
self._czi = True
else:
self.reader = OpenSlide(slide_path)
self._czi = False
# Assing basic stuff that user can see/check.
self.slide_path = slide_path
self.slide_name = remove_extension(basename(slide_path))
self.dimensions = self.reader.dimensions
self.downsample = downsample
self.width = width
self.overlap = overlap
self.threshold = threshold
self.max_background = max_background
self.all_coordinates = self._get_all_coordinates()
# Filter coordinates.
if thumbnail_path is not None:
if not exists(thumbnail_path):
raise IOError(f'{thumbnail_path} not found.')
self.thumbnail = Image.open(thumbnail_path).convert('RGB')
else:
self.thumbnail = get_thumbnail(
slide_path=self.slide_path,
downsample=self.downsample,
create_thumbnail=create_thumbnail
)
if self.thumbnail is None:
# Downsample not available.
raise ValueError(
f'Thumbnail not available for downsample {self.downsample}. '
'Please set create_thumbnail=True or select downsample from\n'
f'{self._downsamples()}'
)
self.threshold, self._tissue_mask = tissue_mask(
image=self.thumbnail,
threshold=self.threshold,
return_threshold=True
)
self.filtered_coordinates = self._filter_coordinates()
# Annotate thumbnail
self._annotate()
def __repr__(self):
return self.__class__.__name__ + '()'
def __len__(self):
return len(self.filtered_coordinates)
def available_downsamples(self):
"""
Returns available downsamples for the slide.
"""
print(self._downsamples())
def _downsamples(self):
string = 'Downsample Dimensions'
if self._czi:
d = {1: self.dimensions}
else:
d = get_downsamples(self.slide_path)
for item, val in d.items():
string += f'\n{str(item).ljust(12)}{val}'
return string
def summary(self):
"""Returns a summary of the cutting process."""
print(self._summary())
def _summary(self):
return (
f"{self.slide_name}"
f"\n Tile width: {self.width}"
f"\n Tile overlap: {self.overlap}"
f"\n Threshold: {self.threshold}"
f"\n Max background: {self.max_background}"
f"\n Thumbnail downsample: {self.downsample}"
f"\n Total number of tiles: {len(self.all_coordinates)}"
f"\n After background filtering: {len(self.filtered_coordinates)}"
)
def get_annotated_thumbnail(self,
max_pixels: int = 1_000_000) -> Image.Image:
"""
Returns an Pillow Image of the annotated thumbnail for inspection.
Args:
max_pixels (int, optional): Downsample the image until the image
has less than max_pixles pixels. Defaults to 1_000_000.
Returns:
Image.Image: Annotated thumbnail.
"""
return resize(self._annotated_thumbnail, max_pixels)
def get_thumbnail(self, max_pixels: int = 1_000_000) -> Image.Image:
"""
Returns an Pillow Image of the thumbnail for inspection.
Args:
max_pixels (int, optional): Downsample the image until the image
has less than max_pixles pixels. Defaults to 1_000_000.
Returns:
Image.Image: Thumbnail.
"""
return resize(self.thumbnail, max_pixels)
def get_tissue_mask(self, max_pixels: int = 1_000_000) -> Image.Image:
"""
Returns an Pillow Image of the tissue mask for inspection.
Args:
max_pixels (int, optional): Downsample the image until the image
has less than max_pixles pixels. Defaults to 1_000_000.
Returns:
Image.Image: Tissue mask.
"""
mask = self._tissue_mask
# Flip for a nicer image
mask = 1 - mask
mask = mask/mask.max()*255
mask = Image.fromarray(mask.astype(np.uint8))
return resize(mask, max_pixels)
def _prepare_directories(self, output_dir: str) -> None:
out_dir = join(output_dir, self.slide_name)
# Save paths.
self._meta_path = join(out_dir, 'metadata.csv')
self._thumb_path = join(out_dir, f'thumbnail_{self.downsample}.jpeg')
self._annotated_path = join(out_dir, 'thumbnail_annotated.jpeg')
self._param_path = join(out_dir, 'parameters.p')
self._summary_path = join(out_dir, 'summary.txt')
self._image_dir = join(out_dir, 'tiles')
# Make dirs.
os.makedirs(out_dir, exist_ok=True)
os.makedirs(self._image_dir, exist_ok=True)
def _annotate(self) -> None:
# Draw tiles to the thumbnail.
self._annotated_thumbnail = self.thumbnail.copy()
annotated = ImageDraw.Draw(self._annotated_thumbnail)
w = h = int(self.width/self.downsample)
for (x, y), __ in self.filtered_coordinates:
x_d = round(x/self.downsample)
y_d = round(y/self.downsample)
annotated.rectangle([x_d, y_d, x_d+w, y_d+h],
outline='red', width=4)
def try_thresholds(
self,
thresholds: List[int] = [250, 240, 230,
220, 200, 190, 180, 170, 160, 150, 140],
max_pixels: int = 1_000_000
) -> Image.Image:
"""
Try out different thresholds for tissue detection.
The function prepares tissue masks with given thresholds and slaps them
all together in one summary image.
Args:
thresholds (List[int], optional): Thresholds to try. Defaults to
[250, 240, 230, 220, 200, 190, 180, 170, 160, 150, 140].
max_pixels (int, optional): Downsample the image until the image
has less than max_pixles pixels. Defaults to 1_000_000.
Returns:
Image.Image: [description]
"""
return try_thresholds(thumbnail=self.thumbnail, thresholds=thresholds)
def save(
self,
output_dir: str,
overwrite: bool = False,
image_format: str = 'jpeg',
quality: int = 95,
custom_preprocess: Callable[[Image.Image], dict] = None
) -> pd.DataFrame:
"""
Save tile images and metadata.
The function saves all the detected tiles in the desired format. When
the acutal image is loaded into memory, basic preprocessing metrics are
computed and added to metadata for preprocessing.
Args:
output_dir (str): Parent directory for all output.
overwrite (bool, optional): This will **remove** all saved images,
thumbnail and metadata and save images again.. Defaults to
False.
image_format (str, optional): Format can be jpeg or png. Defaults
to 'jpeg'.
quality (int, optional): For jpeg compression. Defaults to 95.
custom_preprocess (Callable[[Image.Image], dict], optional): This is
intended for users that want to define their own preprocessing
function. The function must take a Pillow image as an input and
return a dictionary of desired metrics. Defaults to None.
Raises:
ValueError: Invalid image format.
Returns:
pd.DataFrame: Metadata.
"""
allowed_formats = ['jpeg', 'png']
if image_format not in allowed_formats:
raise ValueError(
'Image format {} not allowed. Select from {}'.format(
image_format, allowed_formats
))
self._prepare_directories(output_dir)
# Check if slide has been cut before.
if exists(self._meta_path) and not overwrite:
logger.warning(f'Slide has already been cut!')
return pd.read_csv(self._meta_path)
elif exists(self._meta_path) and overwrite:
# Remove all previous files.
os.remove(self._annotated_path)
os.remove(self._meta_path)
remove_images(self._image_dir)
# Save both thumbnails.
self.thumbnail.save(self._thumb_path, quality=95)
self._annotated_thumbnail.save(self._annotated_path, quality=95)
# Save used parameters. NOTE: Can't remember where I would need these...
# self._save_parameters()
# Save text summary.
with open(self._summary_path, "w") as f:
f.write(self._summary())
# Wrap the saving function so it can be parallized.
save_tile
func_args = {
'slide_path': self.slide_path,
'slide_name': self.slide_name,
'image_dir': self._image_dir,
'width': self.width,
'threshold': self.threshold,
'image_format': image_format,
'quality': quality,
'custom_preprocess': custom_preprocess,
}
# Multiprocessing to speed things up!
metadata = multiprocess_map(
func=save_tile,
lst=self.filtered_coordinates,
func_args=func_args,
desc=self.slide_name,
)
metadata = list(filter(lambda x: x is not None, metadata))
if len(metadata) == 0:
logger.warning(f'No tiles saved from slide {self.slide_path}!')
return
# Save metadata.
self.metadata = pd.DataFrame(metadata)
self.metadata.to_csv(self._meta_path, index=False)
return self.metadata
def _get_all_coordinates(self):
"""Return tile coordinates over the whole slide."""
x = [0]
y = [0]
overlap_px = int(self.width*self.overlap)
while x[-1] < self.dimensions[0]:
x.append(x[-1] + self.width - overlap_px)
x = x[:-1]
while y[-1] < self.dimensions[1]:
y.append(y[-1] + self.width - overlap_px)
y = y[:-1]
coordinates = list(itertools.product(x, y))
return coordinates
def _filter_coordinates(self):
"""Filter out coordinates with too much background."""
filtered = []
width_d = np.ceil(self.width/self.downsample).astype(int)
for x, y in self.all_coordinates:
y_d = int(y/self.downsample)
x_d = int(x/self.downsample)
mask = self._tissue_mask[y_d:y_d+width_d, x_d:x_d+width_d]
if mask.size == 0:
continue
bg_perc = 1 - mask.sum()/mask.size
if bg_perc < self.max_background:
filtered.append(((x, y), bg_perc))
return filtered
def save_tile(
coords: Tuple[int, int, float],
slide_path: str,
slide_name: str,
image_dir: str,
width: int,
threshold: int,
image_format: str,
quality: int,
custom_preprocess: Callable[[Image.Image], dict] = None
) -> dict:
"""Saves a tile and returns metadata (parallizable)."""
# Load slide as it can't be pickled...
if slide_path.endswith('czi'):
reader = OpenSlideCzi(slide_path)
else:
reader = OpenSlide(slide_path)
(x, y), bg_estimate = coords
# Prepare filename.
filepath = join(image_dir, f'x-{x}_y-{y}')
if image_format == 'png':
filepath = filepath + '.png'
else:
filepath = filepath + '.jpeg'
# Collect basic metadata.
metadata = {
'path': filepath,
'slide_name': slide_name,
'x': x,
'y': y,
'width': width,
'background_estimate': bg_estimate
}
# Load image.
try:
image = reader.read_region((x, y), 0, (width, width)).convert('RGB')
except:
# Sometimes parts of the slide are corrupt or something...
return
# Update metadata with preprocessing metrics.
metadata.update(preprocess(image=image, threshold=threshold))
# Add custom metrics.
if custom_preprocess is not None:
metadata.update(custom_preprocess(image))
# Save image.
if not exists(filepath):
image.save(filepath, quality=quality)
return metadata
```
#### File: HistoPrep/histoprep/_logger.py
```python
import io
import logging
import time
import tqdm
__all__ = [
'logger',
'progress_bar',
]
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
COLORS = {
'WARNING': RED,
'INFO': BLUE,
'DEBUG': CYAN,
'CRITICAL': YELLOW,
'ERROR': RED
}
BAR_FORMAT = '{l_bar}{bar:10}{r_bar}{bar:-10b}'
TQDM_OUTPUT = '_OUTPUT_TQDM_'
TQDM_END = '_END_TQDM_'
def formatter_message(message, use_color=True):
if use_color:
message = message.replace(
"$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
else:
message = message.replace("$RESET", "").replace("$BOLD", "")
return message
class ColoredFormatter(logging.Formatter):
def __init__(self, msg, use_color=True):
logging.Formatter.__init__(self, msg)
self.use_color = use_color
def format(self, record):
levelname = record.levelname
msg = record.msg
if self.use_color and levelname in COLORS:
# Background is 40, foreground 30.
levelname_color = COLOR_SEQ % (
30 + COLORS[levelname]) + levelname + RESET_SEQ
record.levelname = levelname_color
return logging.Formatter.format(self, record)
class TqdmLoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super().__init__(level)
FORMAT = ("[$BOLD%(levelname)s$RESET] %(message)s ")
TQDM_FORMAT = " %(message)s"
COLOR_FORMAT = formatter_message(FORMAT, True)
COLOR_FORMAT_TQDM = formatter_message(TQDM_FORMAT, True)
self.formatter = ColoredFormatter(COLOR_FORMAT)
self.tqdm = ColoredFormatter(COLOR_FORMAT_TQDM)
self.last_tqdm_output = None
self.max_tqdm_len = -1
def emit(self, record):
try:
if TQDM_OUTPUT in record.msg:
record.msg = record.msg.replace(TQDM_OUTPUT, "")
msg = self.tqdm.format(record)
end = "\r"
self.last_tqdm_output = msg
self.max_tqdm_len = max(self.max_tqdm_len, len(msg))
elif TQDM_END in record.msg:
record.msg = record.msg.replace(TQDM_END, "")
msg = self.tqdm.format(record)
end = "\n"
else:
msg = self.format(record)
end = "\n"
tqdm.tqdm.write(msg, end=end)
self.flush()
except Exception:
self.handleError(record)
class TqdmToLogger(io.StringIO):
"""Sends tqdm output to logger"""
logger = None
level = None
buf = ''
def __init__(self, logger, level=None):
super(TqdmToLogger, self).__init__()
self.logger = logger
self.level = level or logging.INFO
def write(self, buf):
self.buf = buf.strip('\r\n\t ')
def flush(self):
self.logger.log(self.level, self.buf)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(TqdmLoggingHandler())
def progress_bar(iterable, **kwargs):
"""Wraps tqdm.tqdm and passes everything nicely through logger."""
out = TqdmToLogger(logger, level=logging.INFO)
desc = kwargs.get('desc', 'progress')
kwargs['desc'] = TQDM_OUTPUT + desc
for x in tqdm.tqdm(iterable, file=out, bar_format=BAR_FORMAT, **kwargs):
yield x
logger.info(TQDM_END)
return
```
#### File: preprocess/functional/_utils.py
```python
from typing import Union, List, Dict, Any, Tuple
import numpy as np
import cv2
from PIL import Image
__all__ = [
'tissue_mask',
'HSV_quantiles',
'RGB_quantiles',
'data_loss',
'laplacian_variance',
'sharpness',
'preprocess',
'sliding_window',
'PIL_to_array',
'array_to_PIL',
'mask_to_PIL',
]
def PIL_to_array(image: Image.Image) -> np.ndarray:
"""
Convert Pillow image to numpy array.
Args:
image (Image.Image): Input image.
Raises:
TypeError: Invalid input type for ``image``.
Returns:
np.ndarray: ``image`` as an numpy array.
"""
if isinstance(image, Image.Image):
if image.mode != 'RGB':
image = image.convert('RGB')
return np.array(image)
else:
raise TypeError('Excpected {} not {}.'.format(
Image.Image, type(image)))
def array_to_PIL(image: np.ndarray) -> Image.Image:
"""
Convert numpy array to Pillow image,
[extended_summary]
Args:
image (np.ndarray): Input image.
Raises:
TypeError: Invalid input type for ``image``.
Returns:
Image.Image: ``image`` as a Pillow image.
"""
if isinstance(image, np.ndarray):
return Image.fromarray(image.astype(np.uint8))
else:
raise TypeError('Excpected {} not {}.'.format(np.ndarray, type(image)))
""""""
def mask_to_PIL(mask: np.ndarray) -> Image.Image:
"""
Normalize a numpy mask between 0 and 255 and convert to PIL image.
Args:
mask (np.ndarray): Mask as an numpy array.
Raises:
TypeError: Invalid mask type.
Returns:
Image.Image: Mask image.
"""
if isinstance(mask, np.ndarray):
# Normalize between 0-255.
if mask.max() != 0:
mask = (mask/mask.max()) * 255
return Image.fromarray(mask.astype(np.uint8))
else:
raise TypeError('Excpected {} not {}.'.format(np.ndarray, type(mask)))
def tissue_mask(
image: Union[np.ndarray, Image.Image],
threshold: Union[int, float] = 1.1,
blur_kernel: Tuple[int, int] = (5, 5),
blur_iterations: int = 1,
return_threshold: bool = False) -> np.ndarray:
"""
Generate a tissue mask for image.
Two methods are implemented.
Otsu's binarization (threshold is a float):
Otsu's method is used to find an optimal threshold by minimizing the
weighted within-class variance. Due to this, a relatively low
threshold for tissue detection is often selected and actual tissue
is misclassified as background. Binarization is also forced even for
tiles with only background, causing the detection of non-existent
tissue. For this reason the found threshold is then multiplied by the
`threshold` input.
Adaptive gaussian thresholding (threshold is an integer)::
Requires a threshold to be given but performs better than Otsu's method.
This is automatically implemented if a threshold is given.
Args:
image (Union[np.ndarray, Image.Image]): Input image.
threshold (int, optional): Threshold for tissue detection (see the
method explanation above). Defaults to 'auto'.
blur_kernel (Tuple[int, int], optional): Kernel to be used in Gaussian
Blur. Set to None to disable. Defaults to (5, 5).
blur_iterations (int, optional): How many iterations to blur with
kernel. Defaults to 1.
return_threshold (bool, optional): Whether to return the used threshold
in the case of Otsu's method. Defaults to False.
Raises:
TypeError: Invalid type for ``image`` or ``threshold``.
Returns:
np.ndarray: tissue mask.
"""
if isinstance(image, Image.Image):
if image.mode != 'RGB':
image = image.convert('RGB')
image = np.array(image, dtype=np.uint8)
elif isinstance(image, np.ndarray):
image = image.astype(np.uint8)
else:
raise TypeError('Excpected {} or {} not {}.'.format(
np.ndarray, Image.Image, type(image)
))
# Turn RGB to GRAY
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# Blur if asked.
if blur_kernel is not None:
gray = cv2.GaussianBlur(gray, blur_kernel, blur_iterations)
# Then do thresholding.
if isinstance(threshold, float):
thresh, __ = cv2.threshold(src=gray, thresh=None,maxval=1,
type=cv2.THRESH_BINARY+cv2.THRESH_OTSU)
threshold = round(thresh * threshold)
if threshold > gray.max():
threshold = gray.max() - 1
else:
try:
threshold = int(threshold)
except:
raise TypeError(f'Excpected {int} not {type(threshold)}.')
thresh, mask = cv2.threshold(
src=gray,
thresh=threshold,
maxval=1,
type=cv2.ADAPTIVE_THRESH_GAUSSIAN_C
)
if return_threshold:
return int(thresh), mask
else:
return mask
def RGB_quantiles(
image: Union[np.ndarray, Image.Image],
quantiles: List[float] = [.01, .05, 0.1,
0.25, 0.5, 0.75, 0.9, 0.95, 0.99],
gray: np.ndarray = None,
mask: np.ndarray = None,
resize: int = None,
threshold: int = None) -> Dict[int, int]:
"""
Measure color channel quantiles.
Useful in the detection of misclassified tissue and artifacts.
Args:
image (Union[np.ndarray, Image.Image]): Input image.
quantiles (List[float], optional): Quantiles to be collected. Defaults
to [.01, .05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99].
gray (np.ndarray, optional): Grayscale image of the input image. Will be
generated if not defined. Defaults to None.
mask (np.ndarray, optional): Tissue mask for the input image.
Will be generated if not defined. Defaults to None.
resize (int, optional): Resize the image to resize x resize. The
function can become really slow with large images as we have to sort
every pixel in the image. In these situations just use this
option. Defaults to None.
threshold (int, optional): For tissue_mask() function. Ignored if mask
is defined. Defaults to None.
Raises:
TypeError: Invalid type for ``image``.
Returns:
Dict[int, int]: Dictionary of color channel quantiles.
"""
if isinstance(image, Image.Image):
if image.mode != 'RGB':
image = image.convert('RGB')
image = np.array(image, dtype=np.uint8)
elif isinstance(image, np.ndarray):
image = image.astype(np.uint8)
else:
raise TypeError('Excpected {} or {} not {}.'.format(
np.ndarray, Image.Image, type(image)
))
if resize is not None:
image = cv2.resize(image, (resize, resize), cv2.INTER_LANCZOS4)
if mask is None:
mask = tissue_mask(image, threshold=threshold)
elif mask.shape != image.shape[:2]:
mask = cv2.resize(mask, (resize, resize), cv2.INTER_LANCZOS4)
if mask.sum() == 0:
# No tissue, return empty dict
return {}
# Collect channels and sort.
red = np.sort(image[mask == 1, 0])
green = np.sort(image[mask == 1, 1])
blue = np.sort(image[mask == 1, 2])
# Collect quantiles.
red = [np.quantile(red, q) for q in quantiles]
green = [np.quantile(green, q) for q in quantiles]
blue = [np.quantile(blue, q) for q in quantiles]
keys = (
[f'red_{x}' for x in quantiles] +
[f'green_{x}' for x in quantiles] +
[f'blue_{x}' for x in quantiles]
)
results = dict(zip(keys, red + green + blue))
return results
"""HSV channel quantiles.
Useful in the detection of misclassified tissue and artifacts.
Arguments:
image:
Input image.
mask:
Tissue mask for the input image. Will be generated if not defined.
quantiles:
The quantiles of hue, sat and value values for tissue areas.
resize:
Resize the image to resize x resize. The function can become
really slow with large images, in these situations just use this
option.
threshold:
For tissue_mask() function. Ignored if mask is defined.
Return:
dict: A dictionary of the quantiles of hue, sat and value values for
tissue areas.
"""
def HSV_quantiles(
image: Union[np.ndarray, Image.Image],
quantiles: List[float] = [.01, .05, 0.1,
0.25, 0.5, 0.75, 0.9, 0.95, 0.99],
mask: np.ndarray = None,
resize: int = None,
threshold: int = None) -> Dict[int, int]:
"""
Measure HSV channels quantiles.
Useful in the detection of misclassified tissue and artifacts.
Args:
image (Union[np.ndarray, Image.Image]): Input image.
quantiles (List[float], optional): Quantiles to be collected. Defaults
to [.01, .05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99].
mask (np.ndarray, optional): Tissue mask for the input image.
Will be generated if not defined. Defaults to None.
resize (int, optional): Resize the image to resize x resize. The
function can become really slow with large images as we have to sort
every pixel in the image. In these situations just use this
option. Defaults to None.
threshold (int, optional): For tissue_mask() function. Ignored if mask
is defined. Defaults to None.
Raises:
TypeError: Invalid type for ``image``.
Returns:
Dict[int, int]: Dictionary of HSV channels quantiles.
"""
if isinstance(image, Image.Image):
if image.mode != 'RGB':
image = image.convert('RGB')
image = np.array(image, dtype=np.uint8)
elif isinstance(image, np.ndarray):
image = image.astype(np.uint8)
else:
raise TypeError('Excpected {} or {} not {}.'.format(
np.ndarray, Image.Image, type(image)
))
if resize is not None:
image = cv2.resize(image, (resize, resize), cv2.INTER_LANCZOS4)
if mask is None:
mask = tissue_mask(image, threshold=threshold)
elif mask.shape != image.shape[:2]:
mask = cv2.resize(mask, (resize, resize), cv2.INTER_LANCZOS4)
if mask.sum() == 0:
# No tissue, return empty dict
return {}
# Collect channels and sort.
HSV = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
hue = np.sort(HSV[mask == 1, 0])
sat = np.sort(HSV[mask == 1, 1])
val = np.sort(HSV[mask == 1, 2])
# Collect quantiles.
hue = [np.quantile(hue, q) for q in quantiles]
sat = [np.quantile(sat, q) for q in quantiles]
val = [np.quantile(val, q) for q in quantiles]
keys = (
[f'hue_{x}' for x in quantiles] +
[f'sat_{x}' for x in quantiles] +
[f'val_{x}' for x in quantiles]
)
results = dict(zip(keys, hue + sat + val))
return results
def data_loss(image: Union[np.ndarray, Image.Image]) -> Dict[float, float]:
"""
Detect data loss.
Calculates the percentage of completely white and black pixels.
Args:
image (Union[np.ndarray, Image.Image]): Input image.
Raises:
TypeError: Invalid type for ``image``.
Returns:
Dict[float, float]: Percentages of completely black (0) and white (255)
pixels.
"""
if isinstance(image, Image.Image):
if image.mode != 'RGB':
image = image.convert('RGB')
image = np.array(image, dtype=np.uint8)
elif isinstance(image, np.ndarray):
image = image.astype(np.uint8)
else:
raise TypeError('Excpected {} or {} not {}.'.format(
np.ndarray, Image.Image, type(image)
))
if len(image.shape) > 2:
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
else:
gray = image
return {
'black_pixels': gray[gray == 0].size/gray.size,
'white_pixels': gray[gray == 255].size/gray.size
}
def laplacian_variance(image: Union[np.ndarray, Image.Image]):
"""
Return the laplacian variance of the image.
Args:
image (Union[np.ndarray, Image.Image]): input image.
Raises:
TypeError: Invalid type for ``image``.
"""
if isinstance(image, Image.Image):
if image.mode != 'RGB':
image = image.convert('RGB')
image = np.array(image, dtype=np.uint8)
elif isinstance(image, np.ndarray):
image = image.astype(np.uint8)
else:
raise TypeError('Excpected {} or {} not {}.'.format(
np.ndarray, Image.Image, type(image)
))
# Laplacian variance is defined for greyscale images.
if len(image.shape) > 2:
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
else:
gray = image
return cv2.Laplacian(gray, cv2.CV_32F).var()
def sharpness(
image: Union[np.ndarray, Image.Image],
divider: int = 2,
reduction: Union[str, List[str]] = 'max') -> dict:
"""
Sharpness detection with Laplacian variance.
Divides the image into 9, 25, 49, ... tiles with 50% overlap based on
``divider``, calculates the Laplacian sharpness for each tile and returns
the value based on ``reduction``.
Args:
image (Union[np.ndarray, Image.Image]): Input image.
divider (int, optional): Divider argument for the ``sliding_window()``
function. Defaults to 2.
reduction (Union[str, List[str]], optional): Reduction method(s) for the
Laplacian variance values for each window. Defaults to 'max'.
Raises:
TypeError: Invalid type for ``image``.
Returns:
dict: Laplacian variance values.
"""
if isinstance(image, Image.Image):
if image.mode != 'RGB':
image = image.convert('RGB')
image = np.array(image, dtype=np.uint8)
elif isinstance(image, np.ndarray):
image = image.astype(np.uint8)
else:
raise TypeError('Excpected {} or {} not {}.'.format(
np.ndarray, Image.Image, type(image)
))
# Laplacian variance is defined for greyscale images.
if len(image.shape) > 2:
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
else:
gray = image
values = []
for window in sliding_window(image):
values.append(cv2.Laplacian(window, cv2.CV_32F).var())
# Then reduction(s).
if isinstance(reduction, str):
reduction = [reduction]
results = dict(zip(
['sharpness_'+method for method in reduction],
[reduce_values(values, method) for method in reduction]
))
return results
def reduce_values(values: list, method: str):
"""Reduce values of values with given method."""
allowed_methods = ['max', 'median', 'mean', 'min']
if method not in allowed_methods:
raise ValueError('Reduction {} not recognised. Select from {}.'.format(
reduction, allowed_methods
))
if method == 'max':
return np.max(values)
elif method == 'median':
return np.median(values)
elif method == 'mean':
return np.mean(values)
elif method == 'min':
return np.min(values)
"""
Sliding window with 0.5 overlap.
:param image: Input image.
:type image: Union[np.ndarray, Image.Image]
:param divider: Window size is defined as min(height,width)/divider, where
divider defaults to 2. For square images, divider values will produce:
1: original image
2: 3x3=9 windows
3: 5x5=25 windows
4: 7x7=49 windows
:type divider: int, optional
:raises TypeError: If image is in a wrong format.
:return: List of window images.
:rtype: List[np.ndarray]
"""
def sliding_window(
image: Union[np.ndarray, Image.Image],
divider: int = 2) -> List[np.ndarray]:
"""
Sliding window with 0.5 overlap.
Args:
image (Union[np.ndarray, Image.Image]): Input image.
divider (int, optional): Window size is defined as
min(height,width)/divider, where divider defaults to 2. For square
images, divider values will produce:
1: original image
2: 3x3=9 windows
3: 5x5=25 windows
4: 7x7=49 windows.
Raises:
TypeError: Invalid type for ``image``.
Returns:
List[np.ndarray]: List of window images.
"""
if isinstance(image, Image.Image):
image = np.array(image, dtype=np.uint8)
elif isinstance(image, np.ndarray):
image = image.astype(np.uint8)
else:
raise TypeError('Excpected {} or {} not {}.'.format(
np.ndarray, Image.Image, type(image)
))
if divider < 2:
return [image]
w = int(min(x/divider for x in image.shape[:2]))
rows, cols = [int(x/(w/2)-1) for x in image.shape[:2]]
windows = []
for row in range(rows):
for col in range(cols):
r = int(row*w/divider)
c = int(col*w/divider)
windows.append(image[r:r+w, c:c+w])
return windows
def preprocess(
image: Union[np.ndarray, Image.Image],
threshold: int = None,
resize: int = 64,
quantiles: List[float] = [.01, .05, 0.1,
0.25, 0.5, 0.75, 0.9, 0.95, 0.99],
reduction: Union[str, List[str]] = ['max', 'median', 'mean', 'min']
) -> dict:
"""
Basic preprocessing metrics for a histological image.
Args:
image (Union[np.ndarray, Image.Image]): Input image.
threshold (int, optional): Threshold for tissue detection. If not
defined Otsu's binarization will be used (which) may fail for images
with data loss or only background. Defaults to None.
resize (int, optional): For artifact() function. Defaults to 64.
quantiles (List[float], optional): For HSV_quantiles() and RGB_quantiles
functions. Defaults to
[.01, .05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99].
reduction (Union[str, List[str]], optional): Reduction methods for
sharpness() function. Defaults to ['max', 'median', 'mean', 'min'].
Raises:
TypeError: Invalid type for ``image``.
Returns:
dict: Dictionary of basic preprocessing metrics.
"""
if isinstance(image, Image.Image):
if image.mode != 'RGB':
image = image.convert('RGB')
image = np.array(image, dtype=np.uint8)
elif isinstance(image, np.ndarray):
image = image.astype(np.uint8)
else:
raise TypeError('Excpected {} or {} not {}.'.format(
np.ndarray, Image.Image, type(image)
))
# Initialize results and other shit.
results = {}
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
mask = tissue_mask(image, threshold=threshold)
# Background percentage.
results['background'] = (mask == 0).sum()/mask.size
# Sharpness.
results.update(sharpness(gray, reduction=reduction))
# Data loss.
results.update(data_loss(gray))
# Artifacts.
small_img = cv2.resize(image, (resize, resize), cv2.INTER_LANCZOS4)
small_mask = cv2.resize(mask, (resize, resize), cv2.INTER_LANCZOS4)
results.update(HSV_quantiles(
small_img, mask=small_mask, quantiles=quantiles))
results.update(RGB_quantiles(
small_img, mask=small_mask, quantiles=quantiles))
return results
``` |
{
"source": "jopohl/slf",
"score": 3
} |
#### File: jopohl/slf/download.py
```python
from __future__ import print_function
import os
import sys
import tarfile
import tempfile
import zipfile
try:
# noinspection PyCompatibility
from urllib.request import urlopen
except ImportError: # Python 2 legacy
# noinspection PyCompatibility,PyUnresolvedReferences
from urllib2 import urlopen
def download_to_tmp(url):
data = urlopen(url).read()
suffix = ".tar.gz" if url.endswith(".tar.gz") else ".zip" if url.endswith(".zip") else ""
fd, filename = tempfile.mkstemp(suffix=suffix)
with open(filename, "wb") as f:
f.write(data)
os.close(fd)
return filename
def extract(filename):
if filename.endswith(".tar.gz"):
tar = tarfile.open(filename, "r:gz")
tar.extractall()
tar.close()
elif filename.endswith(".zip"):
with zipfile.ZipFile(filename, 'r') as f:
f.extractall()
if __name__ == '__main__':
if sys.platform.startswith("linux"):
url = "https://github.com/jopohl/slf/releases/latest/download/slf-linux-amd64.tar.gz"
elif sys.platform.startswith("win32"):
url = "https://github.com/jopohl/slf/releases/latest/download/slf-windows-amd64.zip"
elif sys.platform.startswith("darwin"):
url = "https://github.com/jopohl/slf/releases/latest/download/slf-darwin-amd64.tar.gz"
else:
print("OS {} not supported".format(sys.platform))
sys.exit(1)
fname = download_to_tmp(url)
extract(fname)
os.remove(fname)
``` |
{
"source": "jopoku/pytile2uni",
"score": 2
} |
#### File: pytile2uni/tests/test_client.py
```python
import json
import aiohttp
import pytest
from pytile import Client
from pytile.errors import RequestError
from .const import TILE_CLIENT_UUID, TILE_EMAIL, TILE_PASSWORD, TILE_USER_UUID
from .fixtures import * # noqa
# pylint: disable=protected-access
@pytest.mark.asyncio
async def test_create(event_loop):
"""Test the creation of a client."""
async with aiohttp.ClientSession(loop=event_loop) as websession:
client = Client(TILE_EMAIL, TILE_PASSWORD, websession)
assert client.client_uuid != TILE_CLIENT_UUID
@pytest.mark.asyncio
async def test_create_existing(event_loop):
"""Test the creation of a client with an existing client UUID."""
async with aiohttp.ClientSession(loop=event_loop) as websession:
client = Client(
TILE_EMAIL, TILE_PASSWORD, websession, client_uuid=TILE_CLIENT_UUID
)
assert client.client_uuid == TILE_CLIENT_UUID
@pytest.mark.asyncio
async def test_async_init(
aresponses, event_loop, fixture_create_client, fixture_create_session
):
"""Test initializing a client with a Tile session."""
aresponses.add(
"production.tile-api.com",
f"/api/v1/clients/{TILE_CLIENT_UUID}",
"put",
aresponses.Response(text=json.dumps(fixture_create_client), status=200),
)
aresponses.add(
"production.tile-api.com",
f"/api/v1/clients/{TILE_CLIENT_UUID}/sessions",
"post",
aresponses.Response(text=json.dumps(fixture_create_session), status=200),
)
async with aiohttp.ClientSession(loop=event_loop) as websession:
client = Client(
TILE_EMAIL, TILE_PASSWORD, websession, client_uuid=TILE_CLIENT_UUID
)
await client.async_init()
assert client.client_uuid == TILE_CLIENT_UUID
assert client.user_uuid == TILE_USER_UUID
@pytest.mark.asyncio
async def test_bad_endpoint(aresponses, event_loop):
"""Test that an exception is raised on a bad endpoint."""
aresponses.add(
"production.tile-api.com",
"/api/v1/bad_endpoint",
"get",
aresponses.Response(text="", status=404),
)
with pytest.raises(RequestError):
async with aiohttp.ClientSession(loop=event_loop) as websession:
client = Client(
TILE_EMAIL, TILE_PASSWORD, websession, client_uuid=TILE_CLIENT_UUID
)
await client.request("get", "bad_endpoint")
``` |
{
"source": "jopperm/circt",
"score": 2
} |
#### File: src/pycde/instance.py
```python
from __future__ import annotations
from typing import Union
from pycde.devicedb import PhysLocation, PrimitiveDB, PlacementDB
from .appid import AppID
from circt.dialects import hw, msft
import mlir.ir as ir
# TODO: bug: holds an Operation* without releasing it. Use a level of
# indirection.
class Instance:
"""Represents a _specific_ instance, unique in a design. This is in contrast
to a module instantiation within another module."""
import pycde.system as system
def __init__(self,
module: type,
instOp: msft.InstanceOp,
parent: Instance,
sys: system.System,
primdb: PrimitiveDB = None):
assert module is not None
self.module = module
self.instOp = instOp
self.parent = parent
if parent is None:
self.placedb = PlacementDB(sys._get_circt_mod(module), primdb)
assert isinstance(sys, Instance.system.System)
self.sys = sys
@property
def path(self) -> list[Instance]:
if self.parent is None:
return []
return self.parent.path + [self]
@property
def root_module(self) -> hw.HWModuleOp:
if self.parent is None:
return self.module
return self.parent.root_module
@property
def root_instance(self) -> Instance:
if self.parent is None:
return self
return self.parent.root_instance
@property
def path_attr(self) -> msft.RootedInstancePathAttr:
return msft.RootedInstancePathAttr.get(
ir.FlatSymbolRefAttr.get(self.sys._get_module_symbol(self.root_module)),
[x.name_attr for x in self.path[:-1]])
@property
def name(self):
return ir.StringAttr(self.instOp.sym_name).value
@property
def name_attr(self):
return ir.StringAttr(self.instOp.sym_name)
@property
def is_root(self):
return self.parent is None
@property
def appid(self):
return AppID(*[i.name for i in self.path])
def __repr__(self):
path_names = map(lambda i: i.name, self.path)
return "<instance: [" + ", ".join(path_names) + "]>"
def walk(self, callback):
"""Descend the instance hierarchy, calling back on each instance."""
circt_mod = self.sys._get_circt_mod(self.module)
if isinstance(circt_mod, msft.MSFTModuleExternOp):
return
for op in circt_mod.entry_block:
if not isinstance(op, msft.InstanceOp):
continue
assert "moduleName" in op.attributes
tgt_modname = ir.FlatSymbolRefAttr(op.attributes["moduleName"]).value
tgt_mod = self.sys._get_symbol_module(tgt_modname).modcls
assert tgt_mod is not None
inst = Instance(tgt_mod, op, self, self.sys)
callback(inst)
inst.walk(callback)
def _attach_attribute(self, attr_key: str, attr: ir.Attribute):
if isinstance(attr, PhysLocation):
assert attr_key.startswith("loc:")
attr = attr._loc
db = self.root_instance.placedb._db
rc = db.add_placement(attr, self.path_attr, attr_key[4:],
self.instOp.operation)
if not rc:
raise ValueError("Failed to place")
if attr_key not in self.instOp.attributes:
cases = []
else:
existing_attr = self.instOp.attributes[attr_key]
try:
inst_switch = msft.SwitchInstanceAttr(existing_attr)
cases = inst_switch.cases
except TypeError:
raise ValueError(
f"Existing attribute ({existing_attr}) is not msft.switch.inst.")
cases.append((self.path_attr, attr))
self.instOp.attributes[attr_key] = msft.SwitchInstanceAttr.get(cases)
def place(self,
subpath: Union[str, list[str]],
devtype: msft.PrimitiveType,
x: int,
y: int,
num: int = 0):
loc = msft.PhysLocationAttr.get(devtype, x, y, num)
if isinstance(subpath, list):
subpath = "|".join(subpath)
self._attach_attribute(f"loc:{subpath}", loc)
``` |
{
"source": "joppevos/airflow",
"score": 2
} |
#### File: airflow_breeze/utils/run_utils.py
```python
import contextlib
import os
import shlex
import stat
import subprocess
import sys
from distutils.version import StrictVersion
from functools import lru_cache
from pathlib import Path
from typing import List, Mapping, Optional, Union
from airflow_breeze.utils.console import get_console
from airflow_breeze.utils.path_utils import AIRFLOW_SOURCES_ROOT
def run_command(
cmd: List[str],
*,
check: bool = True,
verbose: bool = False,
dry_run: bool = False,
no_output_dump_on_exception: bool = False,
env: Optional[Mapping[str, str]] = None,
cwd: Optional[Path] = None,
input: Optional[str] = None,
**kwargs,
) -> Union[subprocess.CompletedProcess, subprocess.CalledProcessError]:
"""
Runs command passed as list of strings with some extra functionality over POpen (kwargs from PoPen can
be used in this command even if not explicitly specified).
It prints diagnostics when requested, also allows to "dry_run" the commands rather than actually
execute them.
An important factor for having this command running tool is to be able (in verbose mode) to directly
copy&paste the verbose output and run the command manually - including all the environment variables
needed to run the command.
:param cmd: command to run
:param check: whether to check status value and run exception (same as POpem)
:param verbose: print commands when running
:param dry_run: do not execute "the" command - just print what would happen
:param no_output_dump_on_exception: whether to suppress printing logs from output when command fails
:param env: mapping of environment variables to set for the run command
:param cwd: working directory to set for the command
:param input: input string to pass to stdin of the process
:param kwargs: kwargs passed to POpen
"""
workdir: str = str(cwd) if cwd else os.getcwd()
if verbose or dry_run:
command_to_print = ' '.join(shlex.quote(c) for c in cmd)
# if we pass environment variables to execute, then
env_to_print = ' '.join(f'{key}="{val}"' for (key, val) in env.items()) if env else ''
if env_to_print:
env_to_print += ' '
get_console().print(f"\n[info]Working directory {workdir} [/]\n")
# Soft wrap allows to copy&paste and run resulting output as it has no hard EOL
get_console().print(f"\n[info]{env_to_print}{command_to_print}[/]\n", soft_wrap=True)
if dry_run:
return subprocess.CompletedProcess(cmd, returncode=0)
try:
cmd_env = os.environ.copy()
if env:
cmd_env.update(env)
return subprocess.run(cmd, input=input, check=check, env=cmd_env, cwd=workdir, **kwargs)
except subprocess.CalledProcessError as ex:
if not no_output_dump_on_exception:
if ex.stdout:
get_console().print(
"[info]========================= OUTPUT start ============================[/]"
)
get_console().print(ex.stdout)
get_console().print(
"[info]========================= OUTPUT end ==============================[/]"
)
if ex.stderr:
get_console().print(
"[error]========================= STDERR start ============================[/]"
)
get_console().print(ex.stderr)
get_console().print(
"[error]========================= STDERR end ==============================[/]"
)
if check:
raise
return ex
def assert_pre_commit_installed(verbose: bool):
"""
Check if pre-commit is installed in the right version.
:param verbose: print commands when running
:return: True is the pre-commit is installed in the right version.
"""
# Local import to make autocomplete work
import yaml
pre_commit_config = yaml.safe_load((AIRFLOW_SOURCES_ROOT / ".pre-commit-config.yaml").read_text())
min_pre_commit_version = pre_commit_config["minimum_pre_commit_version"]
python_executable = sys.executable
get_console().print(f"[info]Checking pre-commit installed for {python_executable}[/]")
command_result = run_command(
[python_executable, "-m", "pre_commit", "--version"],
verbose=verbose,
capture_output=True,
text=True,
check=False,
)
if command_result.returncode == 0:
if command_result.stdout:
pre_commit_version = command_result.stdout.split(" ")[-1].strip()
if StrictVersion(pre_commit_version) >= StrictVersion(min_pre_commit_version):
get_console().print(
f"\n[success]Package pre_commit is installed. "
f"Good version {pre_commit_version} (>= {min_pre_commit_version})[/]\n"
)
else:
get_console().print(
f"\n[error]Package name pre_commit version is wrong. It should be"
f"aat least {min_pre_commit_version} and is {pre_commit_version}.[/]\n\n"
)
sys.exit(1)
else:
get_console().print(
"\n[warning]Could not determine version of pre-commit. " "You might need to update it![/]\n"
)
else:
get_console().print("\n[error]Error checking for pre-commit-installation:[/]\n")
get_console().print(command_result.stderr)
get_console().print("\nMake sure to run:\n breeze self-upgrade\n\n")
sys.exit(1)
def get_filesystem_type(filepath):
"""
Determine the type of filesystem used - we might want to use different parameters if tmpfs is used.
:param filepath: path to check
:return: type of filesystem
"""
# We import it locally so that click autocomplete works
import psutil
root_type = "unknown"
for part in psutil.disk_partitions():
if part.mountpoint == '/':
root_type = part.fstype
continue
if filepath.startswith(part.mountpoint):
return part.fstype
return root_type
def instruct_build_image(python: str):
"""Print instructions to the user that they should build the image"""
get_console().print(f'[warning]\nThe CI image for ' f'python version {python} may be outdated[/]\n')
print(f"\n[info]Please run at the earliest convenience:[/]\n\nbreeze build-image --python {python}\n\n")
@contextlib.contextmanager
def working_directory(source_path: Path):
"""
# Equivalent of pushd and popd in bash script.
# https://stackoverflow.com/a/42441759/3101838
:param source_path:
:return:
"""
prev_cwd = Path.cwd()
os.chdir(source_path)
try:
yield
finally:
os.chdir(prev_cwd)
def change_file_permission(file_to_fix: Path):
"""Update file permissions to not be group-writeable. Needed to solve cache invalidation problems."""
if file_to_fix.exists():
current = stat.S_IMODE(os.stat(file_to_fix).st_mode)
new = current & ~stat.S_IWGRP & ~stat.S_IWOTH # Removes group/other write permission
os.chmod(file_to_fix, new)
def change_directory_permission(directory_to_fix: Path):
"""Update directory permissions to not be group-writeable. Needed to solve cache invalidation problems."""
if directory_to_fix.exists():
current = stat.S_IMODE(os.stat(directory_to_fix).st_mode)
new = current & ~stat.S_IWGRP & ~stat.S_IWOTH # Removes group/other write permission
new = (
new | stat.S_IXGRP | stat.S_IXOTH
) # Add group/other execute permission (to be able to list directories)
os.chmod(directory_to_fix, new)
@working_directory(AIRFLOW_SOURCES_ROOT)
def fix_group_permissions(verbose: bool):
"""Fixes permissions of all the files and directories that have group-write access."""
if verbose:
get_console().print("[info]Fixing group permissions[/]")
files_to_fix_result = run_command(['git', 'ls-files', './'], capture_output=True, text=True)
if files_to_fix_result.returncode == 0:
files_to_fix = files_to_fix_result.stdout.strip().split('\n')
for file_to_fix in files_to_fix:
change_file_permission(Path(file_to_fix))
directories_to_fix_result = run_command(
['git', 'ls-tree', '-r', '-d', '--name-only', 'HEAD'], capture_output=True, text=True
)
if directories_to_fix_result.returncode == 0:
directories_to_fix = directories_to_fix_result.stdout.strip().split('\n')
for directory_to_fix in directories_to_fix:
change_directory_permission(Path(directory_to_fix))
def is_repo_rebased(repo: str, branch: str):
"""Returns True if the local branch contains latest remote SHA (i.e. if it is rebased)"""
# We import it locally so that click autocomplete works
import requests
gh_url = f"https://api.github.com/repos/{repo}/commits/{branch}"
headers_dict = {"Accept": "application/vnd.github.VERSION.sha"}
latest_sha = requests.get(gh_url, headers=headers_dict).text.strip()
rebased = False
command_result = run_command(['git', 'log', '--format=format:%H'], capture_output=True, text=True)
output = command_result.stdout.strip().splitlines() if command_result is not None else "missing"
if latest_sha in output:
rebased = True
return rebased
def check_if_buildx_plugin_installed(verbose: bool) -> bool:
"""
Checks if buildx plugin is locally available.
:param verbose: print commands when running
:return True if the buildx plugin is installed.
"""
is_buildx_available = False
check_buildx = ['docker', 'buildx', 'version']
docker_buildx_version_result = run_command(
check_buildx,
verbose=verbose,
no_output_dump_on_exception=True,
capture_output=True,
text=True,
)
if (
docker_buildx_version_result
and docker_buildx_version_result.returncode == 0
and docker_buildx_version_result.stdout != ''
):
is_buildx_available = True
return is_buildx_available
def prepare_build_command(prepare_buildx_cache: bool, verbose: bool) -> List[str]:
"""
Prepare build command for docker build. Depending on whether we have buildx plugin installed or not,
and whether we run cache preparation, there might be different results:
* if buildx plugin is installed - `docker buildx` command is returned - using regular or cache builder
depending on whether we build regular image or cache
* if no buildx plugin is installed, and we do not prepare cache, regular docker `build` command is used.
* if no buildx plugin is installed, and we prepare cache - we fail. Cache can only be done with buildx
:param prepare_buildx_cache: whether we are preparing buildx cache.
:param verbose: print commands when running
:return: command to use as docker build command
"""
build_command_param = []
is_buildx_available = check_if_buildx_plugin_installed(verbose=verbose)
if is_buildx_available:
if prepare_buildx_cache:
build_command_param.extend(["buildx", "build", "--builder", "airflow_cache", "--progress=tty"])
cmd = ['docker', 'buildx', 'inspect', 'airflow_cache']
buildx_command_result = run_command(cmd, verbose=True, text=True)
if buildx_command_result and buildx_command_result.returncode != 0:
next_cmd = ['docker', 'buildx', 'create', '--name', 'airflow_cache']
run_command(next_cmd, verbose=True, text=True, check=False)
else:
build_command_param.extend(["buildx", "build", "--builder", "default", "--progress=tty"])
else:
if prepare_buildx_cache:
get_console().print(
'\n[error] Buildx cli plugin is not available and you need it to prepare buildx cache. \n'
)
get_console().print(
'[error] Please install it following https://docs.docker.com/buildx/working-with-buildx/ \n'
)
sys.exit(1)
build_command_param.append("build")
return build_command_param
@lru_cache(maxsize=None)
def commit_sha():
"""Returns commit SHA of current repo. Cached for various usages."""
command_result = run_command(['git', 'rev-parse', 'HEAD'], capture_output=True, text=True, check=False)
if command_result.stdout:
return command_result.stdout.strip()
else:
return "COMMIT_SHA_NOT_FOUND"
def filter_out_none(**kwargs) -> dict:
"""Filters out all None values from parameters passed."""
for key in list(kwargs):
if kwargs[key] is None:
kwargs.pop(key)
return kwargs
``` |
{
"source": "joppevos/maya_to_houdini",
"score": 2
} |
#### File: joppevos/maya_to_houdini/Maya_Exporter.py
```python
import maya.cmds as cmds
import json
import os
import maya.mel as mel
def list_all_lamps():
lamps = cmds.ls(selection=True)
if not lamps:
cmds.confirmDialog(title='Confirm', message='Please select any Light')
else:
return lamps
def attribute_generator(attributes, lamps):
lamp_dict = [{attr: cmds.getAttr('{}.{}'.format(lamp, attr)) for attr in attributes} for lamp in lamps]
filepath = cmds.file(q=True, sn=True)
filename = os.path.basename(filepath)
raw_name, extension = os.path.splitext(filename)
for dicts, name in zip(lamp_dict, lamps):
dicts['name'] = name
dicts['filename'] = raw_name
return lamp_dict
def ask_filepath_location():
basicFilter = "*.json"
filepath = cmds.fileDialog2(fileFilter=basicFilter, dialogStyle=2)
return filepath
def write_attributes(*args):
""" Write out the attributes in json and fbx"""
attrdict = write_json()
filename = ''.join(ask_filepath_location())
file = open('{}'.format(filename), 'w')
file.write(attrdict)
file.close()
write_fbx(filename)
cmds.confirmDialog(title='LightExporter', message='Lights have been exported')
def write_fbx(filename):
path = os.path.dirname(filename)
fbxpath = '{}/'.format(path) + 'scene' + '.fbx'
mel.eval('FBXExportBakeComplexAnimation -q; ') # bake animation
mel.eval('FBXExport -f "{}" -s'.format(fbxpath)) # remove -s to export all
def world_duplicater(*arg):
""" bake lamps to world space and remove from parent"""
lamps = cmds.ls(selection=True)
bakelist = []
for lamp in lamps:
par = cmds.listRelatives(lamp, parent=True)
if not par:
continue
else:
duplicated_lamps = cmds.duplicate(lamp, name=lamp + '_bakedToWorld', rc=True, rr=True)
children = cmds.listRelatives(duplicated_lamps, c=True, pa=True)[1:]
for child in children:
cmds.delete(child)
tobake = cmds.parent(duplicated_lamps, w=True)
bakelist.append(tobake)
cmds.parentConstraint(lamp, tobake, mo=False)
cmds.scaleConstraint(lamp, tobake, mo=False)
# get Start and End Frame of Time Slider
startframe = cmds.playbackOptions(q=True, minTime=True)
endframe = cmds.playbackOptions(q=True, maxTime=True)
for i in bakelist:
cmds.bakeResults(i, t=(startframe, endframe))
cmds.delete(i[0] + '*Constraint*')
cmds.confirmDialog(title='Duplicater', message='Baked and duplicated child lights to worldscale')
def write_json():
attributes = ['scale', 'rotate', 'translate', 'intensity', 'color', 'affectsDiffuse', 'affectsSpecular',
'areaVisibleInRender', 'areaBidirectional', 'volumeRayContributionScale', 'exposure', 'areaShape']
attr = json.dumps(attribute_generator(attributes, list_all_lamps()))
return attr
def launch_interface():
""" menu to start function with buttons"""
cmds.window(width=250, title='Light Exporter')
cmds.columnLayout(adjustableColumn=True)
cmds.button(label='Step1. Bake and duplicate selected lights', command=world_duplicater)
cmds.button(label='Step2. Export selected lights', command=write_attributes)
cmds.showWindow()
if __name__ == '__main__':
launch_interface()
``` |
{
"source": "joppev/Project_D2",
"score": 3
} |
#### File: opstartData/Python_scripts/Sensoren.py
```python
import RPi.GPIO as GPIO
import time
import MySQLdb
from datetime import datetime
tijd = datetime.now()
gemeten_afstand = 0
kade_naam = ""
kade_vrij = True
database = MySQLdb.connect(
host="192.168.3.11", # your host, usually localhost
user="wodran", # your username
passwd="<PASSWORD>", # your password
db="project2D") # name of the data base
mycursor = database.cursor()
#GPIO Mode (BOARD / BCM)
GPIO.setmode(GPIO.BCM)
#set GPIO Pins
GPIO_TRIGGER1 = 18
GPIO_TRIGGER2 = 17
GPIO_ECHO1 = 24
GPIO_ECHO2 = 27
#set GPIO direction (IN / OUT)
GPIO.setup(GPIO_TRIGGER1, GPIO.OUT)
GPIO.setup(GPIO_TRIGGER2, GPIO.OUT)
GPIO.setup(GPIO_ECHO1, GPIO.IN)
GPIO.setup(GPIO_ECHO2, GPIO.IN)
#set variabelen
teller1 = 0
teller2 = 0
kade_vrij1 = True
kade_vrij2 = True
def distance1():
# set Trigger to HIGH
GPIO.output(GPIO_TRIGGER1, True)
# set Trigger after 0.01ms to LOW
time.sleep(0.00001)
GPIO.output(GPIO_TRIGGER1, False)
StartTime1 = time.time()
StopTime1 = time.time()
# save StartTime
while GPIO.input(GPIO_ECHO1) == 0:
StartTime1 = time.time()
# save time of arrival
while GPIO.input(GPIO_ECHO1) == 1:
StopTime1 = time.time()
# time difference between start and arrival
TimeElapsed1 = StopTime1 - StartTime1
# multiply with the sonic speed (34300 cm/s)
# and divide by 2, because there and back
distance1 = (TimeElapsed1 * 34300) / 2
return distance1
def distance2():
# set Trigger to HIGH
GPIO.output(GPIO_TRIGGER2, True)
# set Trigger after 0.01ms to LOW
time.sleep(0.00001)
GPIO.output(GPIO_TRIGGER2, False)
StartTime2 = time.time()
StopTime2 = time.time()
# save StartTime
while GPIO.input(GPIO_ECHO2) == 0:
StartTime2 = time.time()
# save time of arrival
while GPIO.input(GPIO_ECHO2) == 1:
StopTime2 = time.time()
# time difference between start and arrival
TimeElapsed2 = StopTime2 - StartTime2
# multiply with the sonic speed (34300 cm/s)
# and divide by 2, because there and back
distance2 = (TimeElapsed2 * 34300) / 2
return distance2
if __name__ == '__main__':
try:
while True:
dist1 = distance1()
print ("Gemeten afstand sensor 1 = %.1f cm" % dist1)
if dist1 < 100:
teller1 = teller1 + 1
else:
teller1 = 0
if teller1 > 10:
kade_vrij1 = True
else:
kade_vrij1 = False
kade_vrij = kade_vrij1
gemeten_afstand = dist1
kade_naam = "kade1"
print (kade_naam)
print (gemeten_afstand)
print (kade_vrij)
print (tijd)
sql = "INSERT INTO sensordatas (kadeNaam, afstand, kadeVrij, tijdstip) VALUES (%s, %s, %s, %s)"
val = (kade_naam, gemeten_afstand, kade_vrij, tijd)
mycursor.execute(sql, val)
database.commit()
print(mycursor.rowcount, "Waardes toegevoegd!")
time.sleep(1)
dist2 = distance2()
dist2 = dist2
print ("Gemeten afstand sensor 3 = %.1f cm" % dist2)
if dist2 < 100:
teller2 = teller2 + 1
else:
teller2 = 0
if teller2 > 10:
kade_vrij2 = True
else:
kade_vrij2 = False
kade_vrij = kade_vrij2
gemeten_afstand = dist2
kade_naam = "kade3"
print (kade_naam)
print (gemeten_afstand)
print (kade_vrij)
print (tijd)
sql = "INSERT INTO sensordatas (kadeNaam, afstand, kadeVrij, tijdstip) VALUES (%s, %s, %s, %s)"
val = (kade_naam, gemeten_afstand, kade_vrij, tijd)
mycursor.execute(sql, val)
database.commit()
print(mycursor.rowcount, "Waardes toegevoegd!")
time.sleep(5)
# Reset by pressing CTRL + C
except KeyboardInterrupt:
print("Meting gestopt door User")
database.close()
GPIO.cleanup()
``` |
{
"source": "joppich/flask.aws-api",
"score": 3
} |
#### File: flask.aws-api/app/auth.py
```python
import os
from uuid import uuid1
from flask_httpauth import HTTPTokenAuth
from flask import g
auth = HTTPTokenAuth(scheme='Token')
access_dict = {os.environ.get('APP_AUTH_TOKEN', '<PASSWORD>'):str(uuid1())}
@auth.verify_token
def verify_token(token):
if token in access_dict:
g.current_user = access_dict[token]
return True
else:
return False
``` |
{
"source": "joppich/flask.jsonstore",
"score": 2
} |
#### File: flask.jsonstore/app/__init__.py
```python
import os
from flask import Flask, json
from sqlalchemy_utils import database_exists
def insert_sample_data():
import requests
from app.models import Document
url = 'https://api.github.com/users/moby/repos'
sample_data = json.loads(requests.get(url).content.decode('utf-8'))
for x in sample_data:
d = Document(doc=x)
d.create(d)
def create_app():
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('APP_DB_URI')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['DEBUG'] = os.environ.get('APP_DEBUG', False)
from app.models import db
db.init_app(app)
with app.app_context():
try:
db.create_all()
except:
pass
from .api import bp as api_blueprint
app.register_blueprint(api_blueprint)
return app
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.