content
stringlengths
5
1.05M
# Quick Script to execute spark-submit commands
#!/usr/bin/env python import requests from HTMLParser import HTMLParser from config import HTTP_HEADERS def new_or_revised(pub_id): resp = requests.get( 'http://eprint.iacr.org/eprint-bin/versions.pl?entry=' + pub_id, headers=HTTP_HEADERS ) if resp.status_code != 200: # try again resp = requests.get( 'http://eprint.iacr.org/eprint-bin/versions.pl?entry=' + pub_id, headers=HTTP_HEADERS ) if resp.status_code != 200: raise Exception( 'new_or_revised request (' + pub_id + 'error: ' + resp.status_code + '\n\n' + resp.text) if resp.text.count('posted') > 1: return 'revised' else: return 'new' class EPrintParser(HTMLParser): def __init__(self): HTMLParser.__init__(self) self.in_main_content = False self.data_type = None self.entry = None self.list_entries = [] def feed(self, data): HTMLParser.feed(self, data) return self.list_entries def handle_starttag(self, tag, attrs): if tag == 'dl': self.in_main_content = True return if not self.in_main_content: return if tag == 'dt': if self.entry: self.list_entries.append(self.entry) self.entry = dict() elif tag == 'a': self.data_type = 'link' elif tag == 'b': self.data_type = 'title' elif tag == 'em': self.data_type = 'authors' def handle_endtag(self, tag): if tag == 'dl': self.in_main_content = False if self.entry: self.list_entries.append(self.entry) self.entry = None assert self.data_type is None elif tag in ('a', 'em', 'b'): self.data_type = None def handle_data(self, data): if not self.in_main_content: return if data in ('PDF', 'PS', 'PS.GZ') and self.data_type == 'link': self.entry['update_type'] = \ new_or_revised(self.entry['pub_id']) return elif 'withdrawn' in data and self.data_type is None: self.entry['update_type'] = 'withdrawn' return if self.data_type == 'link': self.entry['pub_id'] = data elif self.data_type: if self.data_type in self.entry: self.entry[self.data_type] += data else: self.entry[self.data_type] = data def handle_charref(self, data): data = '&#' + data + ';' if self.data_type: if self.data_type in self.entry: self.entry[self.data_type] += HTMLParser().unescape(data) else: self.entry[self.data_type] = HTMLParser().unescape(data) if __name__ == '__main__': req = requests.get( 'http://eprint.iacr.org/eprint-bin/search.pl?last=7&title=1') my_parser = EPrintParser() entries = my_parser.feed(req.text) entry = entries[0] print type(entry['authors']) from pprint import pprint pprint(entry) print entry['authors'] print pprint(entries)
"""Tests for Kamereon models.""" from typing import cast from tests import get_response_content from renault_api.kamereon import models from renault_api.kamereon import schemas FIXTURE_PATH = "tests/fixtures/kamereon/vehicle_data" TEST_UPDATE = { "id": 1, "tuesday": {"startTime": "T12:00Z", "duration": 15}, } def test_for_json() -> None: """Test for updating charge settings.""" response: models.KamereonVehicleDataResponse = get_response_content( f"{FIXTURE_PATH}/charging-settings.json", schemas.KamereonVehicleDataResponseSchema, ) response.raise_for_error_code() vehicle_data = cast( models.KamereonVehicleChargingSettingsData, response.get_attributes(schemas.KamereonVehicleChargingSettingsDataSchema), ) # Check that for_json returns the same as the original data for_json = { "schedules": list(schedule.for_json() for schedule in vehicle_data.schedules) } assert for_json == { "schedules": [ { "id": 1, "activated": True, "monday": {"startTime": "T12:00Z", "duration": 15}, "tuesday": {"startTime": "T04:30Z", "duration": 420}, "wednesday": {"startTime": "T22:30Z", "duration": 420}, "thursday": {"startTime": "T22:00Z", "duration": 420}, "friday": {"startTime": "T12:15Z", "duration": 15}, "saturday": {"startTime": "T12:30Z", "duration": 30}, "sunday": {"startTime": "T12:45Z", "duration": 45}, } ] } # Check that for_json returns the same as the original data for_json = { "schedules": list(schedule.for_json() for schedule in vehicle_data.schedules) } assert for_json == { "schedules": [ { "id": 1, "activated": True, "monday": {"startTime": "T12:00Z", "duration": 15}, "tuesday": {"startTime": "T04:30Z", "duration": 420}, "wednesday": {"startTime": "T22:30Z", "duration": 420}, "thursday": {"startTime": "T22:00Z", "duration": 420}, "friday": {"startTime": "T12:15Z", "duration": 15}, "saturday": {"startTime": "T12:30Z", "duration": 30}, "sunday": {"startTime": "T12:45Z", "duration": 45}, } ] } vehicle_data.update(TEST_UPDATE) assert vehicle_data.schedules[0].tuesday.startTime == "T12:00Z" assert vehicle_data.schedules[0].tuesday.duration == 15 for_json = { "schedules": list(schedule.for_json() for schedule in vehicle_data.schedules) } assert for_json == { "schedules": [ { "id": 1, "activated": True, "monday": {"startTime": "T12:00Z", "duration": 15}, "tuesday": {"startTime": "T12:00Z", "duration": 15}, "wednesday": {"startTime": "T22:30Z", "duration": 420}, "thursday": {"startTime": "T22:00Z", "duration": 420}, "friday": {"startTime": "T12:15Z", "duration": 15}, "saturday": {"startTime": "T12:30Z", "duration": 30}, "sunday": {"startTime": "T12:45Z", "duration": 45}, } ] }
''' Copyright (c) Project TemperStat. All rights reserved. by Aditya Borgaonkar & Sahil Gothoskar, 2020. https://github.com/adityaborgaonkar https://github.com/SahilGothoskar ''' import csv import smtplib from email.mime.multipart import MIMEMultipart from email.mime.base import MIMEBase from email.mime.text import MIMEText from email.utils import COMMASPACE from email import encoders from datetime import datetime from flask import render_template, url_for, request import Adafruit_DHT pin = 17 sensor = Adafruit_DHT.DHT11 from flask import Flask app = Flask(__name__) data_temp = [] data_hum = [] data_time = [] @app.route('/mail', methods = ['POST', 'GET']) def mail(): # [email protected] # [email protected] SUBJECT = 'IOT Project :: TemperStat Readings' FILENAME = 'readings.csv' FILEPATH = 'readings.csv' MY_EMAIL = '[email protected]' MY_PASSWORD = 'temperstat20' TO_EMAIL = request.form['email'] toemail = TO_EMAIL SMTP_SERVER = 'smtp.gmail.com' SMTP_PORT = 587 msg = MIMEMultipart() msg['From'] = MY_EMAIL msg['To'] = COMMASPACE.join([TO_EMAIL]) msg['Subject'] = SUBJECT part = MIMEBase('application', "octet-stream") part.set_payload(open(FILEPATH, "rb").read()) encoders.encode_base64(part) part.add_header('Content-Disposition', 'attachment', filename=FILENAME) msg.attach(part) smtpObj = smtplib.SMTP(SMTP_SERVER, SMTP_PORT) smtpObj.ehlo() smtpObj.starttls() smtpObj.login(MY_EMAIL, MY_PASSWORD) smtpObj.sendmail(MY_EMAIL, TO_EMAIL, msg.as_string()) smtpObj.quit() return render_template("mail.html",toemail=toemail) @app.route('/', methods = ['POST', 'GET']) def index(): nowtime = datetime.now() nowtime = nowtime.strftime("%H:%M:%S") temperature, humidity = sensor_1() data_temp.append(temperature) data_hum.append(humidity) data_time.append(nowtime) temperature_max = max(data_temp) humidity_max = max(data_hum) temperature_min = min(data_temp) humidity_min = min(data_hum) with open('readings.csv', 'a') as file: writer = csv.writer(file) writer.writerow([nowtime,temperature, humidity]) return render_template("index.html",temperature=temperature, humidity=humidity,data_temp=data_temp,data_time=data_time,data_hum=data_hum,temperature_max=temperature_max,humidity_max=humidity_max,temperature_min=temperature_min,humidity_min=humidity_min) def sensor_1(): humidity, temperature = Adafruit_DHT.read_retry(sensor, pin) if humidity is not None and temperature is not None: return temperature, humidity if __name__ == "__main__": app.run(debug=True)
import torch.cuda as cuda import torch.optim as optim import torch.utils.trainer as trainer import torch.utils.trainer.plugins import torch.utils.data as tudata import torch.nn as nn from torch.autograd import Variable import torchvision as tv import torchvision.transforms as tforms import torchvision.datasets as dsets import torchvision.models as tmodels from workspace_utils import active_session, keep_awake from collections import OrderedDict import argparse def get_input_args(): parser = argparse.ArgumentParser() # Create command line arguments using add_argument() from ArguementParser method #default = 'data/', parser.add_argument('data_directory', type = str, help = 'path to the data folder') parser.add_argument('--arch', type = str, default = 'vgg16', help = 'Set the Model Architecture to use. Can be any of the following alexnet, vgg13, vgg16, resnet18, squeezenet1_0, densenet161') parser.add_argument('--save_dir', type = str, default = '~/opt/opmat_save_dir', help = 'Set directory to save checkpoints') parser.add_argument('--learning_rate', type = float, default = 0.001, help = 'Set the Learning Rate') parser.add_argument('--hidden_units', type = int, default = 4096, help = 'Set the hidden unit to use') parser.add_argument('--epochs', type = int, default = 3, help = 'Set the Epoch to use') parser.add_argument('--gpu', action='store_true', dest='gpu', default=False) return parser.parse_args() def get_data(data_dir): train_dir = data_dir + '/train' valid_dir = data_dir + '/valid' test_dir = data_dir + '/test' train_transforms = tforms.Compose([ tforms.RandomResizedCrop(224), tforms.RandomHorizontalFlip(), tforms.ToTensor(), tforms.Normalize(mean = [ 0.485, 0.456, 0.406 ], std = [ 0.229, 0.224, 0.225 ]) ]) odata_transforms = tforms.Compose([ tforms.Resize(256), tforms.CenterCrop(224), tforms.ToTensor(), tforms.Normalize(mean = [ 0.485, 0.456, 0.406 ], std = [ 0.229, 0.224, 0.225 ]) ]) train_datasets = dsets.ImageFolder(train_dir, train_transforms) valid_datasets = dsets.ImageFolder(valid_dir, odata_transforms) test_datasets = dsets.ImageFolder(test_dir, odata_transforms) train_loader = tudata.DataLoader(train_datasets, batch_size=64, shuffle=True) valid_loader = tudata.DataLoader(valid_datasets, batch_size=32) test_loader = tudata.DataLoader(test_datasets, batch_size=32) return train_loader, train_datasets, train_datasets.class_to_idx, valid_loader, test_loader def get_model(arch, nlabels, hidden_units, learning_rate): if arch == 'vgg16': model = tmodels.vgg16(pretrained=True) elif arch == 'vgg13': model = tmodels.vgg13(pretrained=True) elif arch == 'alexnet': model = tmodels.alexnet(pretrained=True) elif arch == 'resnet18': model = tmodels.resnet18(pretrained=True) elif arch == 'squeezenet1_0': model = tmodels.squeezenet1_0(pretrained=True) elif arch == 'densenet161': model = tmodels.densenet161(pretrained=True) else: raise ValueError('Unspected network architecture ', arch) #Freeze Parameters for param in model.parameters(): param.requires_grad = False model.classifier = nn.Sequential(nn.Linear(25088, hidden_units), nn.ReLU(), nn.Dropout(0.2), nn.Linear(hidden_units, hidden_units), nn.ReLU(), nn.Dropout(0.3), nn.Linear(hidden_units, nlabels), nn.LogSoftmax(dim=1)) criterion = nn.NLLLoss() optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate) return model, model.classifier, criterion, optimizer def train_model(model, criterion, optimizer, trainloader, validloader, epochs=3, print_interval=40, device='cpu'): # if args.gpu and torch.cuda.is_available(): # model.cuda() model.to(device=device) step = 0 with active_session(): for epoch in range(epochs): model.train() loss=0 match = 0 allitems = 0 for ct, (images, labels) in enumerate(trainloader): model.train() # if args.gpu and torch.cuda.is_available(): # images, labels = images.to("cuda"), labels.to("cuda") # else: # images, labels = images.to("cpu"), labels.to("cpu") images, labels = images.to(device=device), labels.to(device=device) step += 1 optimizer.zero_grad() outputs = model.forward(images) tloss = criterion(outputs, labels) _, predicted = torch.max(outputs.data, 1) allitems += labels.size(0) match += (predicted == labels).sum().item() tloss.backward() optimizer.step() loss += tloss.item() if step % print_interval == 0: taccuracy = (100 * match / allitems) validation = validate_model(model, criterion, validloader, device) print(f"Epoch: {epoch+1}/{epochs} ", "Training Loss: {:.3f}.. ".format(loss/print_interval), "Training Accuracy: {:.2f}%.. ".format(taccuracy), "Valid Loss: {:.3f}.. ".format(validation['loss']), "Valid Accuracy: {:.3f}".format(validation['accuracy']), "Valid Accuracy: {:.3f}%".format(validation['nacc'])) def validate_model(model, criterion, validloader, device): # if args.gpu and torch.cuda.is_available(): # model.cuda() model.to(device=device) model.eval() accuracy = 0 loss = 0 match = 0 allitems = 0 for ctt, (images, labels) in enumerate(validloader): # if args.gpu and torch.cuda.is_available(): # images, labels = images.to("cuda"), labels.to("cuda") # else: # images, labels = images.to("cpu"), labels.to("cpu") images, labels = images.to(device=device), labels.to(device=device) # forward pass with torch.no_grad(): outputs = model.forward(images) # calculate loss vloss = criterion(outputs, labels) loss += vloss.item() ps = torch.exp(outputs) equality = (labels == ps.max(dim = 1)[1]) accuracy += equality.type(torch.float64).mean().item() _, predicted = torch.max(outputs.data, 1) allitems += labels.size(0) match += (predicted == labels).sum().item() ret = {'loss': loss/ len(validloader), 'accuracy' : accuracy / len(validloader), 'nacc' : (100 * match / allitems)} return ret def save_checkpoint(model, optimizer, save_dir, class_to_idx, classifier, arch, epochs, print_interval): model.class_to_idx = class_to_idx checkpoint = { 'model_arch' : arch, 'epochs' : epochs, 'print_interval' : print_interval, 'optimizer_state' : optimizer.state_dict(), 'class_to_idx' : model.class_to_idx, 'model_state': model.state_dict(), # 'loss' : dloss, 'classifier': classifier } torch.save(checkpoint, save_dir + '/my_checkpoint.ckpt') def loadModelCheckpoint(checkpointfile='./my_checkpoint.ckpt'): # Load checkpoint from file checkpoint = torch.load(checkpointfile) vgg16model = tmodels.vgg16(pretrained=True) for param in vgg16model.parameters(): param.requires_grad = False vgg16model.classifier = checkpoint['classifier'] vgg16model.load_state_dict(checkpoint['model_state']) criterion = nn.NLLLoss() optimizer = optim.Adam(vgg16model.classifier.parameters(), lr=0.001) optimizer.load_state_dict(checkpoint['optimizer_state']) epoch = checkpoint['epochs'] loss = checkpoint['loss'] class_to_idx = checkpoint['class_to_idx'] return vgg16model, optimizer, epoch, loss, class_to_idx def main(): args = get_input_args() args.device = None if args.gpu and torch.cuda.is_available(): args.device = torch.device('cuda') else: args.device = torch.device('cpu') #Load data from provided data_directory train_loader, train_datasets, class_to_idx, valid_loader, test_loader = get_data(args.data_directory) # print(args) # print(train_datasets) # exit() model, classifier, criterion, optimizer = get_model(args.arch, len(train_datasets.classes), args.hidden_units, args.learning_rate) #Run Training train_model(model, criterion, optimizer, train_loader, valid_loader, epochs=args.epochs, print_interval=40, device=args.device) # Save the checkpoint to save_dir if(args.save_dir != None): save_checkpoint(model, optimizer, args.save_dir, class_to_idx, classifier, args.arch, args.epochs, print_interval=40) if __name__ == '__main__': main()
INT = 0 BYTES = 1 STRING = 2 BOOL = 3 ARRAY = 4 DICT = 5 B_INT = b'\x00' B_BYTES = b'\x01' B_STRING = b'\x02' B_BOOL = b'\x03' B_ARRAY = b'\x04' B_DICT = b'\x05' def encode_int(n): if n == 0: return b'\x00' else: return bytes([n % 256]) + encode_int(n // 256) def consume_int(index, bstring): result = 0 factor = 1 while bstring[index] != 0: result += bstring[index] * factor factor *= 256 index += 1 return index + 1, result def encode_bytes(string): return encode_int(len(string)) + string def consume_bytes(index, bstring): index, length = consume_int(index, bstring) result = bstring[index : index + length] return index + length, result def encode_string(string): return encode_int(len(string)) + string.encode() def consume_string(index, bstring): index, bstr = consume_bytes(index, bstring) return index, bstr.decode() def encode_bool(b): return b'\x01' if b else b'\x00' def consume_bool(index, bstring): return index + 1, (bstring[index] == 1) def encode_array(array): return encode_int(len(array)) + b''.join(encode_any(obj) for obj in array) def consume_array(index, bstring): index, length = consume_int(index, bstring) arr = [] for i in range(length): index, item = consume_any(index, bstring) arr.append(item) return index, arr def encode_dict(d): result = b'' result += encode_int(len(d)) for key in d: result += encode_string(key) result += encode_any(d[key]) return result def consume_dict(index, bstring): index, length = consume_int(index, bstring) result = {} for i in range(length): index, key = consume_string(index, bstring) index, value = consume_any(index, bstring) result[key] = value return index, result def encode_any(data): if type(data) == int: return B_INT + encode_int(data) elif type(data) == bytes: return B_BYTES + encode_bytes(data) elif type(data) == bool: return B_BOOL + encode_bool(data) elif type(data) == str: return B_STRING + encode_string(data) elif type(data) == list: return B_ARRAY + encode_array(data) elif type(data) == dict: return B_DICT + encode_dict(data) def consume_any(index, bstring): if bstring[index] == INT: return consume_int(index + 1, bstring) elif bstring[index] == BYTES: return consume_bytes(index + 1, bstring) elif bstring[index] == STRING: return consume_string(index + 1, bstring) elif bstring[index] == BOOL: return consume_bool(index + 1, bstring) elif bstring[index] == ARRAY: return consume_array(index + 1, bstring) elif bstring[index] == DICT: return consume_dict(index + 1, bstring) def encode(obj): return encode_any(obj) def decode(bstring): _, result = consume_any(0, bstring) return result
# -*- coding: utf-8 -*- from fixture.orm import ORMFixture from model.group import Contact, Group import random db = ORMFixture(host="127.0.0.1", name="addressbook", user="root", password="") def test_add_contact_in_group(app): if len(db.get_group_list()) == 0: app.group.create(Group(name="NewGroup")) groups = db.get_group_list() group = random.choice(groups) old_contacts_in_group = db.get_contacts_in_group(group) if len(db.get_contact_list()) == 0 or len(db.get_contact_list()) == len(old_contacts_in_group): app.contact.fill_new(Contact(firstname="NEWSContact")) contacts = db.get_contacts_not_in_group(group) contact = random.choice(contacts) app.contact.add_contact_in_group(contact, group) new_contact_in_group = db.get_contacts_in_group(group) assert len(old_contacts_in_group) + 1 == len(new_contact_in_group) old_contacts_in_group.append(contact) assert sorted(old_contacts_in_group, key=Contact.id_or_max) == sorted(new_contact_in_group, key=Contact.id_or_max)
import sys sys.stderr = open(snakemake.log[0], "w") import pandas as pd def aggregate(sm_input, sm_output, samples): single_outputs = [] for file_path, sample in zip(sm_input, samples): single_deep_arg_output = pd.read_csv(file_path, sep="\t") single_deep_arg_output["sample"] = sample single_outputs.append(single_deep_arg_output) deep_arg_output = pd.concat(single_outputs, ignore_index=True) deep_arg_output.to_csv(sm_output, sep="\t", index=False) if __name__ == "__main__": samples = snakemake.params.get("samples", "") aggregate(snakemake.input, snakemake.output[0], samples)
""" The splash screen of the game. The first thing the user sees. """ import pygame as pg from .. import prepare, state_machine class Splash(state_machine._State): """This State is updated while our game shows the splash screen.""" def __init__(self): state_machine._State.__init__(self) self.next = "TITLE" self.timeout = 5 self.alpha = 0 self.alpha_speed = 2 #Alpha change per frame self.image = prepare.GFX["misc"]['splash1'].copy().convert() self.image.set_alpha(self.alpha) self.rect = self.image.get_rect(center=prepare.SCREEN_RECT.center) def update(self, keys, now): """Updates the splash screen.""" self.now = now self.alpha = min(self.alpha+self.alpha_speed, 255) self.image.set_alpha(self.alpha) if self.now-self.start_time > 1000.0*self.timeout: self.done = True def draw(self, surface, interpolate): surface.fill(prepare.BACKGROUND_COLOR) surface.blit(self.image, self.rect) def get_event(self, event): """ Get events from Control. Changes to next state on any key press. """ self.done = event.type == pg.KEYDOWN
from mongoengine import Document from mongoengine import ListField from mongoengine import StringField from mongoengine import IntField import numpy as np import cPickle as pickle from sklearn import svm class User(Document): email = StringField(required=True, max_length=50) first_name = StringField(max_length=50) last_name = StringField(max_length=50) def __unicode__(self): return self.email class Classifier(Document): name = StringField(primary_key=True, max_length=50) last_training_set_length = IntField(default=0) #created_by = ReferenceField(User) #TODO: create this field classes = ListField(StringField(required=True, max_length=50)) classifier = StringField() X = ListField(ListField()) Y = ListField() def __init__(self, *args, **kwargs): Document.__init__(self, *args, **kwargs) if self.classifier: self.clf = pickle.loads(str(self.classifier)) else: self.clf = None def __unicode__(self): return self.name def classify(self, data): if not self.clf: self.train() self.clf.probability = True return self.clf.predict(data) def train(self): x = self.X y = self.Y if not len(x): #TODO: raise and process an exception return x, y = map(np.array, (x, y)) clf = svm.SVC(probability=True) clf.fit(x, y) self.classifier = pickle.dumps(clf) self.last_training_set_length = len(self.X) self.save() def is_trained_recently(self): return self.additions_since_last_training() == 0 def additions_since_last_training(self): return len(self.X) - self.last_training_set_length
from django.contrib import admin from .models import User, Income, Transaction, Budget # Register your models here. admin.site.register(User) admin.site.register(Income) admin.site.register(Transaction) # admin.site.register(Budget)
def acceleration(): initial_velocity = float(input("What is the initial velocity?: ")) final_velocity = float(input("What is the final velocity?: ")) starting_time = float(input("What is the starting time?: ")) ending_time = float(input("What is the ending time?: ")) delta_t = ending_time - starting_time delta_v = final_velocity - initial_velocity acceleration = delta_v/delta_t print(f"acceleration = {acceleration} m/s") acceleration()
from unittest import TestCase from tests.integration.it_utils import submit_transaction from tests.integration.reusable_values import FEE, WALLET from xrpl.models.transactions import SignerEntry, SignerListSet from xrpl.wallet import Wallet class TestSignerListSet(TestCase): def test_add_signer(self): # sets up another signer for this account other_signer = Wallet.create() response = submit_transaction( SignerListSet( account=WALLET.classic_address, sequence=WALLET.next_sequence_num, fee=FEE, signer_quorum=1, signer_entries=[ SignerEntry( account=other_signer.classic_address, signer_weight=1, ), ], ), WALLET, ) self.assertTrue(response.is_successful())
#Faça um programa que leia um nome de usuário e a sua senha e não aceite a senha igual ao nome do usuário, mostrando uma mensagem de erro e voltando a pedir as informações. usuario = input("Digite o nome do usuário: ") senha = input("Digite a senha: ") while usuario == senha: print("Valor inválido - usuário e senha não podem ser iguais") usuario = input("Digite o nome do usuário: ") senha = input("Digite a senha: ") print("Você digitou corretamente")
import theano import theano.tensor as T import lasagne from lasagne.layers import * class NLLELayer(lasagne.layers.Layer): #Negative Log Likelihood Estimator Layer def get_output_for(self, input, **kwargs): return -T.mean(T.log(input)) def get_output_shape_for(self, shape, **kwargs): return () class PixelClassificationLayer(lasagne.layers.Layer): def get_output_for(self, input, **kwargs): (bs, cc, h, w) = input.shape tmp = input.transpose((0,2,3,1)) tmp = tmp.reshape((bs*h*w, cc)) tmp = T.nnet.nnet.softmax(tmp) output = tmp.reshape((bs, h, w, cc)) return output def get_output_shape_for(self, shape): return (shape[0], shape[2], shape[3], shape[1]) class StableSoftMaxCrossEntropyLayer(lasagne.layers.MergeLayer): def __init__(self, incomings, axis=1, **kwargs): super(StableSoftMaxCrossEntropyLayer, self).__init__(incomings, **kwargs) self.axis = axis def get_output_for(self, inputs, **kwargs): input, target = inputs in_dev = input - input.max(axis=self.axis, keepdims=True) log_softmax = in_dev - T.log(T.sum(T.exp(in_dev), axis=self.axis, keepdims=True)) cross_entropy = -T.sum(target * log_softmax, axis=self.axis) return T.mean(cross_entropy) def get_output_shape_for(self, input_shape): return () class StableBinaryCrossEntropyLayer(lasagne.layers.Layer): def __init__(self, incoming, target, **kwargs): super(StableBinaryCrossEntropyLayer, self).__init__(incoming, **kwargs) self.target = get_output(target) def get_output_for(self, input, **kwargs): small = T.log(1+T.exp(input)) - input*self.target big = (1-self.target)*input + T.log(1+T.exp(-input)) stable = T.switch(T.lt(input, 0), small, big) return stable#T.mean(stable) def get_output_shape_for(self, input_shape): return (1,) class SoftIoULayer(lasagne.layers.MergeLayer): def get_output_for(self, inputs, **kwargs): #inputs: dt.shape=(bs, sq, h, w), gt.shape=(bs, sq, h, w) #return: shape=() dt, gt = inputs tmp = (dt * gt).sum(axis=(2,3)) return (tmp/((dt + gt).sum(axis=(2,3)) - tmp)).mean() def get_output_shape_for(self, shapes, **kwargs): return () class MultipleSoftIoULayer(lasagne.layers.MergeLayer): #SoftIoU like in https://arxiv.org/pdf/1605.09410.pdf def __init__(self, incomings, **kwargs): if len(incomings) != 2: raise ValueError('We need [dt, gt] as input') super(MultipleSoftIoULayer, self).__init__(incomings, **kwargs) def get_output_for(self, inputs, **kwargs): #inputs: dt.shape=(bs, sq1, h, w), gt.shape=(bs,sq2, h, w) #return: shape=(bs, sq1, sq2) dt, gt = inputs dt = dt.dimshuffle([0,1,'x',2,3]) gt = gt.dimshuffle([0,'x',1,2,3]) tmp = (dt * gt).sum(axis=(3,4)) return tmp/((dt + gt).sum(axis=(3,4)) - tmp) def get_output_shape_for(self, shapes, **kwargs): dts, gts = shapes s0 = dts[0] s1 = dts[1] s2 = gts[1] return (s0, s1, s2)
import lzma def compress(path: str, content): f = lzma.open(path, 'w') f.write(bytes(content, encoding='utf-8')) f.close() def decompress(path): f = lzma.open(path, 'r') result = f.read() f.close() return result
import frappe from awesome_cart.compat.customer import get_current_customer from erpnext.stock.doctype.stock_entry.stock_entry_utils import make_stock_entry from frappe import _ from frappe.contacts.doctype.contact.contact import get_default_contact from frappe.core.doctype.role.role import get_emails_from_role from frappe.desk.form import assign_to from frappe.model.mapper import get_mapped_doc def set_missing_values(warranty_claim, method): if not warranty_claim.customer: customer = get_current_customer() warranty_claim.customer = customer.name else: customer = frappe.get_doc("Customer", warranty_claim.customer) warranty_claim.update({ "customer_name": customer.customer_name, "contact_person": get_default_contact("Customer", customer.name) }) if not warranty_claim.contact_email: warranty_claim.contact_email = customer.email_id if not warranty_claim.contact_mobile: warranty_claim.contact_mobile = customer.mobile_no if not warranty_claim.serial_no and frappe.db.exists("Serial No", warranty_claim.unlinked_serial_no): warranty_claim.serial_no = warranty_claim.unlinked_serial_no if warranty_claim.serial_no: serial_no = frappe.get_doc("Serial No", warranty_claim.serial_no) warranty_claim.update({ "item_code": serial_no.item_code, "item_name": serial_no.item_name, "item_group": serial_no.item_group, "description": serial_no.description, "warranty_amc_status": serial_no.maintenance_status, "warranty_expiry_date": serial_no.warranty_expiry_date, "amc_expiry_date": serial_no.amc_expiry_date, "is_under_warranty": serial_no.maintenance_status in ["Under Warranty", "Under AMC"] }) def validate_missing_serial_no(warranty_claim, method): if warranty_claim.item_group == "Custom": if not (warranty_claim.serial_no or warranty_claim.unlinked_serial_no): frappe.throw(_("Custom products must have a serial number")) def validate_serial_no_warranty(serial_no, method): # Remove warranty period for old manufactured items that are not in the system if serial_no.purchase_document_no: if frappe.db.get_value("Stock Entry", serial_no.purchase_document_no, "purpose") != "Manufacture": serial_no.warranty_period = None def set_iem_owner(warranty_claim, method): if warranty_claim.item_group and warranty_claim.item_group != "Custom": warranty_claim.iem_owner = None return serial_no = warranty_claim.serial_no or warranty_claim.unlinked_serial_no if serial_no: impression_id = frappe.db.get_value("Serial No", serial_no, "impression_id") if not impression_id: # Split the serial number to retrieve the IID (serial number format: JH{IEM model shorthand}-{IID}-{count}) impression_id = serial_no.split("-") impression_id = impression_id[1] if len(impression_id) > 1 else impression_id[0] try: impression_id = int(impression_id) except ValueError: return if impression_id: if frappe.db.exists("Serial No", serial_no): frappe.db.set_value("Serial No", serial_no, "impression_id", impression_id) iem_owner = frappe.get_all("IEM Owner", or_filters={"impression_id": impression_id, "old_impression_id": impression_id}) if iem_owner: frappe.db.set_value("Serial No", serial_no, "iem_owner", iem_owner[0].name) if impression_id: iem_owner = frappe.get_all("IEM Owner", or_filters={"impression_id": impression_id, "old_impression_id": impression_id}) if iem_owner: warranty_claim.iem_owner = iem_owner[0].name else: warranty_claim.iem_owner = None else: warranty_claim.iem_owner = None def assign_warranty_claim(warranty_claim, method): if not frappe.get_all("ToDo", filters={"reference_type": "Warranty Claim", "reference_name": warranty_claim.name}): repair_settings = frappe.get_doc("Repair Settings") user_emails = [] for notification in repair_settings.notification_settings: if notification.status == warranty_claim.status: if notification.user: user_emails.append(notification.user) if notification.role: user_emails.extend(get_emails_from_role(notification.role)) if notification.cc: notification.cc = notification.cc.replace(",", "\n") user_emails.extend(notification.cc.split("\n")) user_emails = list(set(user_emails)) admin_email = frappe.db.get_value("User", "Administrator", "email") if admin_email in user_emails: user_emails.remove(admin_email) for user in user_emails: assign_to.add({ 'assign_to': user, 'doctype': "Warranty Claim", 'name': warranty_claim.name, 'description': "Service Request {0} just moved to the '{1}' status".format(warranty_claim.name, warranty_claim.status), 'priority': 'Medium', 'notify': 1 }) def receive_stock_item(warranty_claim, method): if warranty_claim.item_received and warranty_claim.item_code: create_stock_entry(warranty_claim) def set_shipping_date(dti_shipment_note, method): warranty_claim = frappe.db.get_value("Delivery Note", dti_shipment_note.delivery_note, "warranty_claim") if warranty_claim: warranty_claim = frappe.get_doc("Warranty Claim", warranty_claim) if method == "on_submit": warranty_claim.shipping_date = frappe.utils.now_datetime() elif method == "on_cancel": warranty_claim.shipping_date = None warranty_claim.save() def complete_work_order(stock_entry, method): if method == "on_submit": if stock_entry.purpose == "Material Transfer for Manufacture": warranty_claim = frappe.db.get_value("Work Order", stock_entry.work_order, "warranty_claim") if warranty_claim: update_fields = { "produced_qty": 1, "status": "Completed" } frappe.db.set_value("Work Order", {"warranty_claim": warranty_claim}, update_fields, val=None) frappe.db.commit() warranty_claim = frappe.get_doc("Warranty Claim", warranty_claim) if warranty_claim.status == "Repairing": warranty_claim.status = "To Deliver" warranty_claim.resolution_date = frappe.utils.now_datetime() warranty_claim.save() def create_stock_entry(warranty_claim): to_warehouse = frappe.db.get_single_value("Repair Settings", "default_incoming_warehouse") serial_no = warranty_claim.serial_no or warranty_claim.unlinked_serial_no stock_entry = make_stock_entry(item_code=warranty_claim.item_code, qty=1, to_warehouse=to_warehouse, serial_no=serial_no, do_not_save=True) for item in stock_entry.items: item.warranty_claim = warranty_claim.name # Include the cable and case in the stock receipt, if entered if warranty_claim.cable: stock_entry.append("items", { "item_code": warranty_claim.cable, "t_warehouse": to_warehouse, "qty": 1 }) if warranty_claim.case: stock_entry.append("items", { "item_code": warranty_claim.case, "t_warehouse": to_warehouse, "qty": 1 }) for item in stock_entry.items: item.allow_zero_valuation_rate = True stock_entry.insert() stock_entry.submit() if not warranty_claim.serial_no: warranty_claim.db_set("serial_no", serial_no) if not warranty_claim.item_received: warranty_claim.db_set("item_received", True) warranty_claim.reload() return stock_entry.name def flush_raw_materials_for_repair(stock_entry, method): if method == "on_submit": new_se = frappe.new_doc("Stock Entry") consumption_warehouse = frappe.db.get_single_value("Repair Settings", "default_consumption_warehouse") new_se.update({ "purpose": "Material Issue", "work_order": stock_entry.work_order, "from_bom": 1, "fg_completed_qty": 1, "from_warehouse": frappe.db.get_single_value("Repair Settings", "default_consumption_warehouse"), "reference_stock_entry": stock_entry.name }) consumption_items = [item.as_dict() for item in stock_entry.items if item.t_warehouse == consumption_warehouse] if consumption_items: for c_item in consumption_items: c_item.s_warehouse = consumption_warehouse c_item.t_warehouse = None new_se.set("items", consumption_items) new_se.save() new_se.submit() elif method == "on_cancel": if stock_entry.purpose == "Material Transfer for Manufacture": existing_se = frappe.db.get_value("Stock Entry", filters={"reference_stock_entry": stock_entry.name}) if existing_se: existing_se = frappe.get_doc("Stock Entry", existing_se) existing_se.cancel() existing_se.delete() def make_mapped_doc(target_dt, source_dn, target_doc, target_cdt=None, filters=None, field_map=None, postprocess=None, child_postprocess=None, check_for_existing=True): if not field_map: field_map = {} if not filters: filters = {"warranty_claim": source_dn, "docstatus": 1} table_map = { "Warranty Claim": { "doctype": target_dt, "field_map": field_map } } if target_cdt: table_map.update({ "Warranty Claim Services": { "doctype": target_cdt, "field_map": field_map, "postprocess": child_postprocess } }) # Multiple sales orders and stock entries can be made against Warranty Claim if check_for_existing: if frappe.get_all(target_dt, filters=filters): frappe.throw(_("A {0} document already exists for this request.".format(target_dt))) return get_mapped_doc("Warranty Claim", source_dn, table_map, target_doc, postprocess=postprocess) def get_wc_dashboard_data(data): if not data: return frappe._dict({ 'fieldname': 'warranty_claim', 'non_standard_fieldnames': {}, 'internal_links': {}, 'transactions': [ { 'label': _('Reference'), 'items': ['Quotation', 'Sales Order'] }, { 'label': _('Stock'), 'items': ['Stock Entry'] }, { 'label': _('Work'), 'items': ['Work Order'] }, { 'label': _('Fulfilment'), 'items': ['Sales Invoice', 'Delivery Note'] } ] }) return data
"""Ugly numbers are numbers whose only prime factors are 2, 3 or 5. The sequence 1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, … shows the first 11 ugly numbers. By convention, 1 is included. Write a program to find Nth Ugly Number. Example 1: Input: N = 10 Output: 12 Explanation: 10th ugly number is 12. Example 2: Input: N = 4 Output: 4 Explanation: 4th ugly number is 4.""" class Solution: def __init__(self): self.p2 = 0 self.p3 = 0 self.p5 = 0 self.minimum = 0 def getUglyNumber(self, n): p2 = self.p2 p3 = self.p3 p5 = self.p5 minimum = self.minimum ugly_numbers_list = [] ugly_numbers_list.append(1) for _ in range(0, n): num2 = 2 * ugly_numbers_list[p2] num3 = 3 * ugly_numbers_list[p3] num5 = 5 * ugly_numbers_list[p5] minimum = min(num2, num3, num5) ugly_numbers_list.append(minimum) if num2 == minimum: p2 += 1 if num3 == minimum: p3 += 1 if num5 == minimum: p5 += 1 return ugly_numbers_list[n-1] def main(): ugn = Solution() print(ugn.getUglyNumber(1)) if __name__ == "__main__": main()
""" API file for discussion app consists of the viewsets for the apis in the discussion app """ from django.utils import timezone from rest_framework import status from rest_framework.authentication import TokenAuthentication, BasicAuthentication from rest_framework.permissions import IsAuthenticated from rest_framework.decorators import detail_route, list_route from rest_framework.response import Response from rest_framework import viewsets from discussion import functions from discussion.models import DiscussionThread from discussion.serializers import DiscussionThreadSerializer from login.models import UserProfile class DiscussionViewSet(viewsets.ViewSet): """ Viewset for creating and retrieving discussion threads """ authentication_classes = (TokenAuthentication, BasicAuthentication) permission_classes = (IsAuthenticated,) @list_route(methods=['post']) def add(self, request): """ Create a new discussion thread --- # YAML parameters: - name: title description: Title for the thread required: true type: string paramType: form - name: description description: description for the thread required: true type: string paramType: form """ response = functions.add_discussion_thread(request) return Response(response, status=status.HTTP_200_OK) @list_route() def get(self, request): """ Get a list of all discussion threads --- # YAML parameters: - name: page description: page no. of the results type: string paramType: query """ response = functions.get_discussion_list(request) return Response(response, status=status.HTTP_200_OK) @list_route() def tags(self, request): """ Get a list of all Tags matching the given query --- # YAML parameters: - name: query type: string paramType: query """ response = functions.get_tags(request.GET["query"]) return Response(response, status=status.HTTP_200_OK) @detail_route(methods=['post']) def reply(self, request, pk): """ Post a reply to a discussion thread --- # YAML parameters: - name: text description: Title for the thread required: true type: string paramType: form """ response = functions.add_reply(pk, request) return Response(response, status=status.HTTP_200_OK) @detail_route() def replies(self, request, pk): """ Get all replies of a discussion thread --- # YAML parameters: - name: page description: page no. of the results type: string paramType: query """ response = functions.get_replies(pk, request) return Response(response, status=status.HTTP_200_OK)
from pathlib import Path from fhir.resources.codesystem import CodeSystem from oops_fhir.utils import CodeSystemConcept __all__ = ["AdditionalMaterialCodes"] _resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json")) class AdditionalMaterialCodes: """ Additional Material Codes This value set includes sample additional material type codes. Status: draft - Version: 4.0.1 Copyright This is an example set. http://hl7.org/fhir/additionalmaterials """ xray = CodeSystemConcept({"code": "xray", "definition": "XRay", "display": "XRay"}) """ XRay XRay """ image = CodeSystemConcept( {"code": "image", "definition": "Image", "display": "Image"} ) """ Image Image """ email = CodeSystemConcept( {"code": "email", "definition": "Email", "display": "Email"} ) """ Email Email """ model = CodeSystemConcept( {"code": "model", "definition": "Model", "display": "Model"} ) """ Model Model """ document = CodeSystemConcept( {"code": "document", "definition": "Document", "display": "Document"} ) """ Document Document """ other = CodeSystemConcept( {"code": "other", "definition": "Other", "display": "Other"} ) """ Other Other """ class Meta: resource = _resource
import os import time import zc.lockfile from dvc.exceptions import DvcException class LockError(DvcException): pass class Lock(object): LOCK_FILE = 'lock' TIMEOUT = 5 def __init__(self, dvc_dir, name=LOCK_FILE): self.lock_file = os.path.join(dvc_dir, name) self._lock = None @staticmethod def init(dvc_dir): return Lock(dvc_dir) def _do_lock(self): try: self._lock = zc.lockfile.LockFile(self.lock_file) except zc.lockfile.LockError: raise LockError('Cannot perform the cmd since DVC is busy and locked. Please retry the cmd later.') def lock(self): try: self._do_lock() return except LockError: time.sleep(self.TIMEOUT) self._do_lock() def unlock(self): self._lock.close() self._lock = None def __enter__(self): self.lock() def __exit__(self, type, value, tb): self.unlock()
############################################################################## # Copyright (c) 2021, Oak Ridge National Laboratory # # All rights reserved. # # # # This file is part of HydraGNN and is distributed under a BSD 3-clause # # license. For the licensing terms see the LICENSE file in the top-level # # directory. # # # # SPDX-License-Identifier: BSD-3-Clause # ############################################################################## import torch import torch.nn.functional as F import torch.nn as nn from torch.nn import ModuleList from torch_geometric.nn import GINConv, BatchNorm from .Base import Base class GINStack(Base): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def get_conv(self, input_dim, output_dim): return GINConv( nn.Sequential( nn.Linear(input_dim, output_dim), nn.ReLU(), nn.Linear(output_dim, output_dim), ), eps=100.0, train_eps=True, ) def __str__(self): return "GINStack"
# from bw_processing import ( # as_unique_attributes, # chunked, # COMMON_DTYPE, # create_package, # create_datapackage_metadata, # create_structured_array, # create_processed_datapackage, # format_calculation_resource, # greedy_set_cover, # NAME_RE, # ) # from copy import deepcopy # import pytest # import tempfile # def test_format_calculation_resource(): # given = { # "path": "basic_array", # "name": "test-name", # "matrix": "technosphere", # "description": "some words", # "foo": "bar", # } # expected = { # "format": "npy", # "mediatype": "application/octet-stream", # "path": "basic_array.npy", # "name": "test-name", # "profile": "data-resource", # "matrix": "technosphere", # "description": "some words", # "foo": "bar", # } # assert format_calculation_resource(given) == expected # def test_calculation_package(): # resources = [ # { # "name": "first-resource", # "path": "some-array.npy", # "matrix": "technosphere", # "data": [ # tuple(list(range(11)) + [False, False]), # tuple(list(range(12, 23)) + [True, True]), # ], # } # ] # with tempfile.TemporaryDirectory() as td: # fp = create_package( # name="test-package", resources=resources, path=td, replace=False # ) # # Test data in fp # def test_calculation_package_directory(): # resources = [ # { # "name": "first-resource", # "path": "some-array.npy", # "matrix": "technosphere", # "data": [ # tuple(list(range(11)) + [False, False]), # tuple(list(range(12, 23)) + [True, True]), # ], # } # ] # with tempfile.TemporaryDirectory() as td: # fp = create_package( # name="test-package", resources=resources, path=td, compress=False # ) # # Test data in fp # def test_calculation_package_in_memory(): # resources = [ # { # "name": "first-resource", # "path": "some-array.npy", # "matrix": "technosphere", # "data": [ # tuple(list(range(11)) + [False, False]), # tuple(list(range(12, 23)) + [True, True]), # ], # } # ] # fp = create_package(name="test-package", resources=resources) # # Test data in fp # def test_calculation_package_replace(): # resources = [ # { # "name": "first-resource", # "path": "some-array.npy", # "matrix": "technosphere", # "data": [ # tuple(list(range(11)) + [False, False]), # tuple(list(range(12, 23)) + [True, True]), # ], # } # ] # with tempfile.TemporaryDirectory() as td: # create_package( # name="test-package", resources=deepcopy(resources), path=td # ) # create_package( # name="test-package", resources=deepcopy(resources), path=td, replace=True # ) # def test_calculation_package_replace_error(): # resources = [ # { # "name": "first-resource", # "path": "some-array.npy", # "matrix": "technosphere", # "data": [ # tuple(list(range(11)) + [False, False]), # tuple(list(range(12, 23)) + [True, True]), # ], # } # ] # with tempfile.TemporaryDirectory() as td: # create_package( # name="test-package", resources=deepcopy(resources), path=td # ) # with pytest.raises(ValueError): # create_package( # name="test-package", # resources=deepcopy(resources), # path=td, # replace=False, # ) # def test_calculation_package_name_conflict(): # pass # def test_calculation_package_specify_id(): # pass # def test_calculation_package_metadata(): # pass
# -*- coding: utf-8 -*- from .api import Api from .exceptions import PushException, GetException
"""Add git parameters to model Revision ID: ebbfb7d55f62 Revises: 75933cf6bdb9 Create Date: 2019-04-01 14:50:29.492904 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = "ebbfb7d55f62" down_revision = "75933cf6bdb9" branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column("models", sa.Column("git_active_branch", sa.String(), nullable=True)) op.add_column("models", sa.Column("git_commit_hash", sa.String(), nullable=True)) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_column("models", "git_commit_hash") op.drop_column("models", "git_active_branch") # ### end Alembic commands ###
#------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. #-------------------------------------------------------------------------- import os from azure.mgmt.network import NetworkManagementClient from azure.mgmt.resource import ResourceManagementClient from azure.common.credentials import ServicePrincipalCredentials #-------------------------------------------------------------------------- # credentials from environment #-------------------------------------------------------------------------- SUBSCRIPTION_ID = os.environ['AZURE_SUBSCRIPTION_ID'] TENANT_ID = os.environ['AZURE_TENANT'] CLIENT_ID = os.environ['AZURE_CLIENT_ID'] CLIENT_SECRET = os.environ['AZURE_SECRET'] #-------------------------------------------------------------------------- # variables #-------------------------------------------------------------------------- AZURE_LOCATION = 'eastus' RESOURCE_GROUP = "myResourceGroup" VIRTUAL_WAN_NAME = "myVirtualWan" VIRTUAL_HUB_NAME = "myVirtualHub" #-------------------------------------------------------------------------- # management clients #-------------------------------------------------------------------------- credentials = ServicePrincipalCredentials( client_id=CLIENT_ID, secret=CLIENT_SECRET, tenant=TENANT_ID ) mgmt_client = NetworkManagementClient(credentials, SUBSCRIPTION_ID) resource_client = ResourceManagementClient(credentials, SUBSCRIPTION_ID) #-------------------------------------------------------------------------- # resource group (prerequisite) #-------------------------------------------------------------------------- print("Creating Resource Group") resource_client.resource_groups.create_or_update(resource_group_name=RESOURCE_GROUP, parameters={ 'location': AZURE_LOCATION }) #-------------------------------------------------------------------------- # /VirtualWans/put/VirtualWANCreate[put] #-------------------------------------------------------------------------- print("VirtualWANCreate") BODY = { "location": AZURE_LOCATION, "tags": { "key1": "value1" }, "disable_vpn_encryption": False, "type": "Basic" } result = mgmt_client.virtual_wans.create_or_update(resource_group_name=RESOURCE_GROUP, virtual_wan_name=VIRTUAL_WAN_NAME, wan_parameters=BODY) result = result.result() #-------------------------------------------------------------------------- # /VirtualHubs/put/VirtualHubPut[put] #-------------------------------------------------------------------------- print("VirtualHubPut") BODY = { "location": AZURE_LOCATION, "tags": { "key1": "value1" }, "virtual_wan": { "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualWans/" + VIRTUAL_WAN_NAME }, "address_prefix": "10.168.0.0/24", "sku": "Basic" } result = mgmt_client.virtual_hubs.create_or_update(resource_group_name=RESOURCE_GROUP, virtual_hub_name=VIRTUAL_HUB_NAME, virtual_hub_parameters=BODY) result = result.result() #-------------------------------------------------------------------------- # /VirtualHubs/get/VirtualHubGet[get] #-------------------------------------------------------------------------- print("VirtualHubGet") result = mgmt_client.virtual_hubs.get(resource_group_name=RESOURCE_GROUP, virtual_hub_name=VIRTUAL_HUB_NAME) #-------------------------------------------------------------------------- # /VirtualHubs/get/VirtualHubListByResourceGroup[get] #-------------------------------------------------------------------------- print("VirtualHubListByResourceGroup") result = mgmt_client.virtual_hubs.list_by_resource_group(resource_group_name=RESOURCE_GROUP) #-------------------------------------------------------------------------- # /VirtualHubs/get/VirtualHubList[get] #-------------------------------------------------------------------------- print("VirtualHubList") result = mgmt_client.virtual_hubs.list() #-------------------------------------------------------------------------- # /VirtualHubs/patch/VirtualHubUpdate[patch] #-------------------------------------------------------------------------- print("VirtualHubUpdate") TAGS = { "key1": "value1", "key2": "value2" } result = mgmt_client.virtual_hubs.update_tags(resource_group_name=RESOURCE_GROUP, virtual_hub_name=VIRTUAL_HUB_NAME, tags=TAGS) #-------------------------------------------------------------------------- # /VirtualHubs/delete/VirtualHubDelete[delete] #-------------------------------------------------------------------------- print("VirtualHubDelete") result = mgmt_client.virtual_hubs.delete(resource_group_name=RESOURCE_GROUP, virtual_hub_name=VIRTUAL_HUB_NAME) result = result.result()
from scipy.spatial.distance import cosine import numpy as np import cv2 import mtcnn from keras.models import load_model from utils import get_face, plt_show, get_encode, load_pickle, l2_normalizer from sklearn.svm import SVC from sklearn.metrics import accuracy_score from sklearn.preprocessing import LabelEncoder from utils import * from numpy import asarray from numpy import expand_dims from datetime import datetime encoder_model = 'data/models/facenet_keras.h5' people_dir = 'data/people' encodings_path = 'data/encodings/encodings.pkl' test_img_path = 'data/test/friends.jpg' test_res_path = 'data/results/friends.jpg' recognition_t = 0.3 required_size = (160, 160) encoding_dict = load_pickle(encodings_path) face_detector = mtcnn.MTCNN() face_encoder = load_model(encoder_model) in_encoder = Normalizer(norm='l2') model = SVC(kernel='linear' , probability=True) y = [] X = [] for key in encoding_dict.keys(): y.append(key) print(y) for value in encoding_dict.values(): value = l2_normalizer.transform(value.reshape(1, -1))[0] value = value.reshape(1, -1) value = value[0] X.append(value) # label encode targets out_encoder = LabelEncoder() out_encoder.fit(y) y = out_encoder.transform(y) print(X) model.fit(X,y) def recognize(img, detector, encoder, encoding_dict,i, recognition_t=0.5, confidence_t=0.99, required_size=(160, 160), ): img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) results = detector.detect_faces(img_rgb) for res in results: if res['confidence'] < confidence_t: continue face, pt_1, pt_2 = get_face(img_rgb, res['box']) encode = get_encode(encoder, face, required_size) encode = l2_normalizer.transform(encode.reshape(1, -1))[0] encode = encode.reshape(1, -1) name = 'unknown' yhat_class = model.predict(encode) print(yhat_class) class_index = yhat_class[0] yhat_prob = model.predict_proba(encode) print(yhat_prob) class_probability = yhat_prob[0, class_index] * 100 predict_names = out_encoder.inverse_transform(yhat_class) distance = float("inf") for db_name, db_encode in encoding_dict.items(): # r_score = accuracy_score(i, r) dist = cosine(db_encode, encode) if dist < recognition_t and dist < distance: name = db_name distance = dist if name == 'unknown': cv2.rectangle(img, pt_1, pt_2, (0, 0, 255), 2) cv2.putText(img, name, pt_1, cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1) else: cv2.rectangle(img, pt_1, pt_2, (0, 255, 0), 2) cv2.putText(img, predict_names[0] + f'_probability_{class_probability:.2f}', (pt_1[0], pt_1[1] - 5), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 200, 200), 2) markAttendance(predict_names[0]) return img # img = cv2.imread(test_img_path) # plt_show(img) def markAttendance(name): with open('Attendance.csv','r+') as f: myDataList = f.readlines() nameList =[] for line in myDataList: entry = line.split(',') nameList.append(entry[0]) if name not in line: now = datetime.now() dt_string = now.strftime("%H:%M:%S") f.writelines(f'\n{name},{dt_string}') vc = cv2.VideoCapture(0) i=-1 while vc.isOpened(): ret, frame = vc.read() if ret : i+=1 if not ret: print("no frame:(") break frame = recognize(frame, face_detector, face_encoder, encoding_dict,i) cv2.imshow('camera', frame) if cv2.waitKey(1) & 0xFF == ord('q'): codec = cv2.VideoWriter_fourcc() out = cv2.VideoWriter(FLAGS.output, codec, frame, (160, 160)) break # # cv2.imwrite(test_res_path, img) # plt_show(img) print('2') print('3')
from functools import partial from typing import Any, Callable, Tuple import numpy as np import jax from jax import numpy as jnp from jax import tree_map from netket import jax as nkjax from netket import config from netket.stats import Stats, statistics, mean from netket.utils import mpi from netket.utils.types import PyTree from netket.utils.dispatch import dispatch, TrueT, FalseT from netket.operator import ( DiscreteOperator, AbstractSuperOperator, local_cost_function, local_value_cost, Squared, _der_local_values_jax, ContinuousOperator, ) from .mc_state import MCState from .mc_mixed_state import MCMixedState from .mc_expect import local_value_kernel, local_value_squared_kernel def _check_hilbert(A, B): if A.hilbert != B.hilbert: raise NotImplementedError( # pragma: no cover f"Non matching hilbert spaces {A.hilbert} and {B.hilbert}" ) # pure state, squared operator @dispatch def expect_and_grad( vstate: MCState, Ô: Squared[DiscreteOperator], use_covariance: TrueT, mutable: Any, ) -> Tuple[Stats, PyTree]: _check_hilbert(vstate, Ô) Ô = Ô.parent σ = vstate.samples σp, mels = Ô.get_conn_padded(np.asarray(σ.reshape((-1, σ.shape[-1])))) Ō, Ō_grad, new_model_state = grad_expect_operator_kernel( vstate.sampler.machine_pow, vstate._apply_fun, local_value_squared_kernel, mutable, vstate.parameters, vstate.model_state, vstate.samples, σp, mels, ) if mutable is not False: vstate.model_state = new_model_state return Ō, Ō_grad # mixed state, squared super-operator @dispatch def expect_and_grad( # noqa: F811 vstate: MCMixedState, Ô: Squared[AbstractSuperOperator], use_covariance: TrueT, mutable: Any, ) -> Tuple[Stats, PyTree]: _check_hilbert(vstate, Ô) Ô = Ô.parent σ = vstate.samples σp, mels = Ô.get_conn_padded(np.asarray(σ.reshape((-1, σ.shape[-1])))) Ō, Ō_grad, new_model_state = grad_expect_operator_Lrho2( vstate._apply_fun, mutable, vstate.parameters, vstate.model_state, vstate.samples, σp, mels, ) if mutable is not False: vstate.model_state = new_model_state return Ō, Ō_grad # mixed state, hermitian operator @dispatch.multi( (MCState, DiscreteOperator, TrueT, Any), (MCMixedState, AbstractSuperOperator, TrueT, Any), ) def expect_and_grad( # noqa: F811 vstate: MCState, Ô: DiscreteOperator, use_covariance: TrueT, mutable: Any, ) -> Tuple[Stats, PyTree]: _check_hilbert(vstate, Ô) σ = vstate.samples σp, mels = Ô.get_conn_padded(np.asarray(σ.reshape((-1, σ.shape[-1])))) Ō, Ō_grad, new_model_state = grad_expect_hermitian( vstate._apply_fun, mutable, vstate.parameters, vstate.model_state, σ, σp, mels, ) if mutable is not False: vstate.model_state = new_model_state return Ō, Ō_grad # mixed state, non-hermitian operator @dispatch def expect_and_grad( # noqa: F811 vstate: MCState, Ô: DiscreteOperator, use_covariance: FalseT, mutable: Any, ) -> Tuple[Stats, PyTree]: _check_hilbert(vstate, Ô) σ = vstate.samples σp, mels = Ô.get_conn_padded(np.asarray(σ.reshape((-1, σ.shape[-1])))) Ō, Ō_grad, new_model_state = grad_expect_operator_kernel( vstate.sampler.machine_pow, vstate._apply_fun, local_value_kernel, mutable, vstate.parameters, vstate.model_state, vstate.samples, σp, mels, ) if mutable is not False: vstate.model_state = new_model_state return Ō, Ō_grad @dispatch def expect_and_grad( # noqa: F811 vstate: MCState, Ô: ContinuousOperator, use_covariance: Any, mutable: Any, ) -> Tuple[Stats, PyTree]: _check_hilbert(vstate, Ô) x = vstate.samples kernel = Ô._expect_kernel Ō, Ō_grad = _grad_expect_continuous( vstate._apply_fun, kernel, vstate.parameters, Ô._pack_arguments(), vstate.model_state, x, ) return Ō, Ō_grad @partial(jax.jit, static_argnums=(0, 1)) def grad_expect_hermitian( model_apply_fun: Callable, mutable: bool, parameters: PyTree, model_state: PyTree, σ: jnp.ndarray, σp: jnp.ndarray, mels: jnp.ndarray, ) -> Tuple[PyTree, PyTree]: σ_shape = σ.shape if jnp.ndim(σ) != 2: σ = σ.reshape((-1, σ_shape[-1])) n_samples = σ.shape[0] * mpi.n_nodes O_loc = local_cost_function( local_value_cost, model_apply_fun, {"params": parameters, **model_state}, σp, mels, σ, ) Ō = statistics(O_loc.reshape(σ_shape[:-1]).T) O_loc -= Ō.mean # Then compute the vjp. # Code is a bit more complex than a standard one because we support # mutable state (if it's there) is_mutable = mutable is not False _, vjp_fun, *new_model_state = nkjax.vjp( lambda w: model_apply_fun({"params": w, **model_state}, σ, mutable=mutable), parameters, conjugate=True, has_aux=is_mutable, ) Ō_grad = vjp_fun(jnp.conjugate(O_loc) / n_samples)[0] Ō_grad = jax.tree_multimap( lambda x, target: (x if jnp.iscomplexobj(target) else x.real).astype( target.dtype ), Ō_grad, parameters, ) new_model_state = new_model_state[0] if is_mutable else None return Ō, tree_map(lambda x: mpi.mpi_sum_jax(x)[0], Ō_grad), new_model_state @partial(jax.jit, static_argnums=(1, 2, 3)) def grad_expect_operator_kernel( machine_pow: int, model_apply_fun: Callable, local_kernel: Callable, mutable: bool, parameters: PyTree, model_state: PyTree, σ: jnp.ndarray, σp: jnp.ndarray, mels: jnp.ndarray, ) -> Tuple[PyTree, PyTree, Stats]: if not config.FLAGS["NETKET_EXPERIMENTAL"]: raise RuntimeError( """ Computing the gradient of a squared or non hermitian operator is an experimental feature under development and is known not to return wrong values sometimes. If you want to debug it, set the environment variable NETKET_EXPERIMENTAL=1 """ ) σ_shape = σ.shape if jnp.ndim(σ) != 2: σ = σ.reshape((-1, σ_shape[-1])) has_aux = mutable is not False # if not has_aux: # out_axes = (0, 0) # else: # out_axes = (0, 0, 0) if not has_aux: logpsi = lambda w, σ: model_apply_fun({"params": w, **model_state}, σ) else: # TODO: output the mutable state logpsi = lambda w, σ: model_apply_fun( {"params": w, **model_state}, σ, mutable=mutable )[0] log_pdf = ( lambda w, σ: machine_pow * model_apply_fun({"params": w, **model_state}, σ).real ) def expect_closure(*args): local_kernel_vmap = jax.vmap( partial(local_kernel, logpsi), in_axes=(None, 0, 0, 0), out_axes=0 ) return nkjax.expect(log_pdf, local_kernel_vmap, *args, n_chains=σ_shape[0]) def expect_closure_pars(pars): return expect_closure(pars, σ, σp, mels) Ō, Ō_pb, Ō_stats = nkjax.vjp(expect_closure_pars, parameters, has_aux=True) Ō_pars_grad = Ō_pb(jnp.ones_like(Ō)) return ( Ō_stats, tree_map(lambda x: mpi.mpi_mean_jax(x)[0], Ō_pars_grad), model_state, ) @partial(jax.jit, static_argnums=(0, 1)) def grad_expect_operator_Lrho2( model_apply_fun: Callable, mutable: bool, parameters: PyTree, model_state: PyTree, σ: jnp.ndarray, σp: jnp.ndarray, mels: jnp.ndarray, ) -> Tuple[PyTree, PyTree, Stats]: σ_shape = σ.shape if jnp.ndim(σ) != 2: σ = σ.reshape((-1, σ_shape[-1])) n_samples_node = σ.shape[0] has_aux = mutable is not False # if not has_aux: # out_axes = (0, 0) # else: # out_axes = (0, 0, 0) if not has_aux: logpsi = lambda w, σ: model_apply_fun({"params": w, **model_state}, σ) else: # TODO: output the mutable state logpsi = lambda w, σ: model_apply_fun( {"params": w, **model_state}, σ, mutable=mutable )[0] # local_kernel_vmap = jax.vmap( # partial(local_value_kernel, logpsi), in_axes=(None, 0, 0, 0), out_axes=0 # ) # _Lρ = local_kernel_vmap(parameters, σ, σp, mels).reshape((σ_shape[0], -1)) ( Lρ, der_loc_vals, ) = _der_local_values_jax._local_values_and_grads_notcentered_kernel( logpsi, parameters, σp, mels, σ ) # _der_local_values_jax._local_values_and_grads_notcentered_kernel returns a loc_val that is conjugated Lρ = jnp.conjugate(Lρ) LdagL_stats = statistics((jnp.abs(Lρ) ** 2).T) LdagL_mean = LdagL_stats.mean # old implementation # this is faster, even though i think the one below should be faster # (this works, but... yeah. let's keep it here and delete in a while.) grad_fun = jax.vmap(nkjax.grad(logpsi, argnums=0), in_axes=(None, 0), out_axes=0) der_logs = grad_fun(parameters, σ) der_logs_ave = tree_map(lambda x: mean(x, axis=0), der_logs) # TODO # NEW IMPLEMENTATION # This should be faster, but should benchmark as it seems slower # to compute der_logs_ave i can just do a jvp with a ones vector # _logpsi_ave, d_logpsi = nkjax.vjp(lambda w: logpsi(w, σ), parameters) # TODO: this ones_like might produce a complexXX type but we only need floatXX # and we cut in 1/2 the # of operations to do. # der_logs_ave = d_logpsi( # jnp.ones_like(_logpsi_ave).real / (n_samples_node * utils.n_nodes) # )[0] der_logs_ave = tree_map(lambda x: mpi.mpi_sum_jax(x)[0], der_logs_ave) def gradfun(der_loc_vals, der_logs_ave): par_dims = der_loc_vals.ndim - 1 _lloc_r = Lρ.reshape((n_samples_node,) + tuple(1 for i in range(par_dims))) grad = mean(der_loc_vals.conjugate() * _lloc_r, axis=0) - ( der_logs_ave.conjugate() * LdagL_mean ) return grad LdagL_grad = jax.tree_util.tree_multimap(gradfun, der_loc_vals, der_logs_ave) # ⟨L†L⟩ ∈ R, so if the parameters are real we should cast away # the imaginary part of the gradient. # we do this also for standard gradient of energy. # this avoid errors in #867, #789, #850 LdagL_grad = jax.tree_multimap( lambda x, target: (x if jnp.iscomplexobj(target) else x.real).astype( target.dtype ), LdagL_grad, parameters, ) return ( LdagL_stats, LdagL_grad, model_state, ) @partial(jax.jit, static_argnums=(0, 1)) def _grad_expect_continuous( model_apply_fun: Callable, kernel, parameters: PyTree, additional_data: PyTree, model_state: PyTree, x: jnp.ndarray, ) -> Tuple[PyTree, PyTree]: x_shape = x.shape if jnp.ndim(x) != 2: x = x.reshape((-1, x_shape[-1])) n_samples = x.shape[0] * mpi.n_nodes def logpsi(w, σ): return model_apply_fun({"params": w, **model_state}, σ) local_value_vmap = jax.vmap( partial(kernel, logpsi), in_axes=(None, 0, None), out_axes=0, ) # TODO: Once batching/chunking is implemented, should be made available here too. x = x.reshape((-1, 1, x_shape[-1])) def compute_kernel(i, x): Oloc = local_value_vmap(parameters, x, additional_data) return i, Oloc _, O_loc = jax.lax.scan(compute_kernel, 0, x) O_loc = O_loc.reshape(-1) Ō = statistics(O_loc.reshape(x_shape[:-1]).T) x = x.reshape((-1, x_shape[-1])) O_loc -= Ō.mean _, vjp_fun = nkjax.vjp( lambda w: model_apply_fun({"params": w}, x), parameters, conjugate=False, ) Ō_grad = vjp_fun(jnp.conjugate(O_loc) / n_samples)[0] Ō_grad = jax.tree_multimap( lambda x, target: (x if jnp.iscomplexobj(target) else x.real).astype( target.dtype ), Ō_grad, parameters, ) return Ō, tree_map(lambda x: mpi.mpi_sum_jax(x)[0], Ō_grad)
from .models import * from .backends import * from .reporters import * from .constants import * from .cover import * from .persister import * from .scheduler import * from .celery import *
from datetime import timedelta, datetime import pytest from crawler.api_client import RestClient, Token from crawler import api_client from crawler.exceptions import APIException class MockResponse: """Class to mock server responses""" def __init__(self, json_date, status_code): self.json_data = json_date self.status_code = status_code def json(self): return self.json_data def test_get_success(mocker): def mock_request_get(*args, **kwargs): """Mock function to be called instead of requests.get to return a Mock reponse""" return MockResponse({"key": "value"}, 200) mocker.patch("requests.get", side_effect=mock_request_get) data = RestClient.get("http://mock-url") assert data == {"key": "value"} def test_get_exception(mocker): def mock_request_get(*args, **kwargs): return MockResponse(None, 404) mocker.patch("requests.get", side_effect=mock_request_get) with pytest.raises(APIException) as e: RestClient.get("http://mock-url") assert e.value.err_code == 404 assert e.value.err_msg == "Issue with external api" def test_get_token_if_empty_cache(mocker): def mock_token(*args, **kwargs): return MockResponse({"token": "some-random-token"}, 200) mocker.patch("requests.get", side_effect=mock_token) token = Token.get_token() assert token == "some-random-token" assert Token._tte is not None @pytest.mark.parametrize( "minutes,seconds,expected", [(4, 58, False), (4, 59, True), (5, 00, True)] ) def test_token_is_expired(mocker, minutes, seconds, expected): # Clear cache FIXME : find a better way to contain test Token._token = None Token._tte = None def mock_token(*args, **kwargs): return MockResponse({"token": "some-random-token"}, 200) mocker.patch("requests.get", side_effect=mock_token) Token.get_token() mock_now = datetime.now() + timedelta(minutes=minutes, seconds=seconds) mocker.patch("crawler.api_client._now", return_value=mock_now) assert Token._is_expired() is expected def test_get_token_when_not_expired(mocker): # Clear cache FIXME : find a better way to contain test Token._token = None Token._tte = None def first_mock_token_from_server(*args, **kwargs): return MockResponse({"token": "first-token"}, 200) mocker.patch("requests.get", side_effect=first_mock_token_from_server) assert Token.get_token() == "first-token" # Let us mock a call 2 minutes before the expiration mock_current_time = Token._tte - timedelta(minutes=2) mocker.patch("crawler.api_client._now", return_value=mock_current_time) # a new token should come from the server in case the actual call goes through def second_mock_token_from_server(*args, **kwargs): return MockResponse({"token": "second-token"}, 200) mocker.patch("requests.get", side_effect=second_mock_token_from_server) assert Token.get_token() == "first-token" def test_get_token_when_expired(mocker): # Clear cache FIXME : find a better way to contain test Token._token = None Token._tte = None def first_mock_token_from_server(*args, **kwargs): return MockResponse({"token": "first-token"}, 200) mocker.patch("requests.get", side_effect=first_mock_token_from_server) assert Token.get_token() == "first-token" # Let us mock a call exactly 1 second after the expiration mock_current_time = Token._tte + timedelta(seconds=1) mocker.patch("crawler.api_client._now", return_value=mock_current_time) # a new token should come from the server in case the actual call goes through def second_mock_token_from_server(*args, **kwargs): return MockResponse({"token": "second-token"}, 200) mocker.patch("requests.get", side_effect=second_mock_token_from_server) assert Token.get_token() == "second-token" def test_get_details_for_category(mocker, animals_page_one, animals_page_two): def mock_data(*args, **kwargs): if args[0] == "https://public-apis-api.herokuapp.com/api/v1/apis/entry?page=1&category=Animals": return MockResponse(animals_page_one, 200) elif args[0] == "https://public-apis-api.herokuapp.com/api/v1/apis/entry?page=2&category=Animals": return MockResponse(animals_page_two, 200) mocker.patch("requests.get", side_effect=mock_data) mocker.patch("crawler.api_client.Token.get_token", return_value="some-random-token") # Test for page 1 count, data = api_client.get_details_for_category("Animals") assert count == 13 assert len(data) == 10 # Test for page 3 count, data = api_client.get_details_for_category("Animals", 2) assert count == 13 assert len(data) == 3 def test_get_all_categories(mocker, categories_page_one, categories_page_two): def mock_get_categories_from_server(*args, **kwargs): if args[0] == "https://public-apis-api.herokuapp.com/api/v1/apis/categories?page=1": return MockResponse(categories_page_one, 200) elif args[0] == "https://public-apis-api.herokuapp.com/api/v1/apis/categories?page=2": return MockResponse(categories_page_two, 200) mocker.patch("requests.get", side_effect=mock_get_categories_from_server) mocker.patch("crawler.api_client.Token.get_token", return_value="some-random-token") mock_categories_list = categories_page_one["categories"] + categories_page_two["categories"] categories = api_client.get_all_categories() assert categories == mock_categories_list
# Copyright 2022 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Boltzmann DQN agent implemented in JAX. This algorithm is a variation of DQN that uses a softmax policy directly with the unregularized action-value function. See https://arxiv.org/abs/2102.01585. """ import jax import jax.numpy as jnp import numpy as np from open_spiel.python.jax import dqn class BoltzmannDQN(dqn.DQN): """Boltzmann DQN implementation in JAX.""" def __init__(self, *args, eta: float = 1.0, seed: int = 42, **kwargs): """Initializes the Boltzmann DQN agent. Args: *args: args passed to the underlying DQN agent. eta: Temperature parameter used in the softmax function. seed: Random seed used for action selection. **kwargs: kwargs passed to the underlying DQN agent. """ self._eta = eta self._rs = np.random.RandomState(seed) # Used to select actions. super().__init__(*args, seed=seed, **kwargs) def _create_networks(self, rng, state_representation_size): """Called to create the networks.""" # We use the DQN networks and an additional network for the fixed policy. super()._create_networks(rng, state_representation_size) self.params_prev_q_network = self.hk_network.init( rng, jnp.ones([1, state_representation_size])) def _softmax_action_probs(self, params, info_state, legal_actions, coeff=None): """Returns a valid soft-max action and action probabilities. Args: params: Parameters of the Q-network. info_state: Observations from the environment. legal_actions: List of legal actions. coeff: If not None, then the terms in softmax function will be element-wise multiplied with these coefficients. Returns: a valid soft-max action and action probabilities. """ info_state = np.reshape(info_state, [1, -1]) q_values = self.hk_network_apply(params, info_state)[0] legal_one_hot = self._to_one_hot(legal_actions) legal_q_values = ( q_values + (1 - legal_one_hot) * dqn.ILLEGAL_ACTION_LOGITS_PENALTY) # Apply temperature and subtract the maximum value for numerical stability. temp = legal_q_values / self._eta unnormalized = np.exp(temp - np.amax(temp)) if coeff is not None: unnormalized = np.multiply(coeff, unnormalized) probs = unnormalized / unnormalized.sum() action = self._rs.choice(legal_actions, p=probs[legal_actions]) return action, probs def _get_action_probs(self, info_state, legal_actions, is_evaluation=False): """Returns a selected action and the probabilities of legal actions.""" if is_evaluation: # Soft-max normalized by the action probabilities from the previous # Q-network. _, prev_probs = self._softmax_action_probs(self.params_prev_q_network, info_state, legal_actions) return self._softmax_action_probs(self.params_q_network, info_state, legal_actions, prev_probs) # During training, we use the DQN action selection, which will be # epsilon-greedy. return super()._get_action_probs( info_state, legal_actions, is_evaluation=False) def update_prev_q_network(self): """Updates the parameters of the previous Q-network.""" self.params_prev_q_network = jax.tree_multimap(lambda x: x.copy(), self.params_q_network)
# based on https://github.com/sfotiadis/yenlp/blob/master/extract_reviews.py import json import os import sys from string import Template from lingofunk_classify_sentiment.config import config, fetch business_data_filename = fetch(config["datasets"]["yelp"]["ids"]) reviews_data_filename = fetch(config["datasets"]["yelp"]["reviews"]) sample_template_filename = Template(fetch(config["datasets"]["yelp"]["sample_format"])) def get_business_ids(category): """Gets the business ids for the given category""" with open(business_data_filename) as businesses: business_ids = [] for business in businesses: business = json.loads(business) if business["categories"] and category in business["categories"].split(): business_ids.append(business["business_id"]) return business_ids def save_reviews(category, quantity): """Saves the given number of reviews of a specific category to two files, one for each class(pos/neg).""" pos_reviews_filename = sample_template_filename.substitute( category=category.lower(), quantity=quantity, label="pos" ) neg_reviews_filename = sample_template_filename.substitute( category=category.lower(), quantity=quantity, label="neg" ) if os.path.isfile(pos_reviews_filename) and os.path.isfile(neg_reviews_filename): return pos_reviews = open(pos_reviews_filename, "w") neg_reviews = open(neg_reviews_filename, "w") business_ids = get_business_ids(category) cnt_pos = 0 cnt_neg = 0 with open(reviews_data_filename) as reviews: for review in reviews: # stop when quantity is reached if cnt_pos >= quantity and cnt_neg >= quantity: return None review = json.loads(review) if review["business_id"] in business_ids: # discard 3 star ratings if int(review["stars"]) > 3 and cnt_pos < quantity: json.dump(review, pos_reviews) pos_reviews.write("\n") cnt_pos += 1 elif int(review["stars"]) < 3 and cnt_neg < quantity: json.dump(review, neg_reviews) neg_reviews.write("\n") cnt_neg += 1 def main(argv): if len(argv) != 2: print("Please list the category label and quantity of reviews required.") sys.exit(2) category = argv[0] quantity = int(argv[1]) # load data try: print(f"Creating files with {quantity} reviews of the '{category}' category") save_reviews(category, quantity) except Exception: print("Alas! Something went wrong.") sys.exit(2) if __name__ == "__main__": main(sys.argv[1:])
import requests from zvgportal.zvgclient.formdata import FormData SEARCH_URL = "https://www.zvg-portal.de/index.php" class ZvgClient: def __init__(self, query): self.query = query def search(self): params = {"button": "Suchen", "all": 1} data = FormData( land_abk=self.query.bland, search_object=self.query.search_object, value_limit=self.query.value_limit, btermin=self.query.btermin) r = requests.post(SEARCH_URL, data=data.__dict__, params=params) return r.text
""" This module is used to discover the serial address of any ML600 connected to the PC. """ import asyncio import aioserial import serial.tools.list_ports from loguru import logger from flowchem.components.devices.Hamilton.ML600 import ( HamiltonPumpIO, InvalidConfiguration, ) def ml600_finder(): """Try to initialize an ML600 on every available COM port.""" port_available = [comport.device for comport in serial.tools.list_ports.comports()] # Ports connected to an ML600-looking device valid_ports = set() for serial_port in port_available: try: print(f"Looking for pump on {serial_port}...") link = HamiltonPumpIO(aioserial.AioSerial(url=serial_port, timeout=0.1)) asyncio.run(link.initialize()) logger.info(f"{link.num_pump_connected} pump(s) found on <{serial_port}>") valid_ports.add(serial_port) except InvalidConfiguration: logger.debug(f"No pump found on {serial_port}") return valid_ports if __name__ == "__main__": ml600_pumps = ml600_finder() if len(ml600_pumps) > 0: print(f"The following serial port are connected to ML600: {ml600_pumps}") else: print("No ML600 pump found")
### Main Functionality - Export CSV file names py jobs with 25 jobs from files import converter as con from search import cb_job_search as cb from util import states def main(job, location, filename): for state in states.get_us_states(): job_data = cb.start_search(job, location) for item in job_data: con.add_to_csv(item, filename) con.remove_duplicate_rows(filename) # main("data science", "test") def html_generator(filename, jobs): job_info = "" index = [""" <!doctype html> <html lang="en"> <head> <!-- Required meta tags --> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no"> <!-- Bootstrap CSS --> <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.5.2/css/bootstrap.min.css" integrity="sha384-JcKb8q3iqJ61gNV9KGb8thSsNjpSL0n8PARn9HuZOnIxN0hoP+VmmDGMN5t9UJ0Z" crossorigin="anonymous"> <title>Jobpy Job Board</title> </head> <body> <!-- Image and text --> <nav class="navbar navbar-light bg-light"> <a class="navbar-brand" href="#"> <img src="/docs/4.0/assets/brand/bootstrap-solid.svg" width="30" height="30" class="d-inline-block align-top" alt=""> Jopby Job Board </a> </nav> <div style="height: 50px;"></div> <div style="margin-top: 25;"> <div class="row"> <div style="height: 24px;"></div> <!-- Block to add info --> """, """ <!-- Block ends --> </div> </div> <!-- Optional JavaScript --> <!-- jQuery first, then Popper.js, then Bootstrap JS --> <script src="https://code.jquery.com/jquery-3.5.1.slim.min.js" integrity="sha384-DfXdz2htPH0lsSSs5nCTpuj/zy4C+OGpamoFVy38MVBnE+IbbVYUew+OrCXaRkfj" crossorigin="anonymous"></script> <script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/umd/popper.min.js" integrity="sha384-9/reFTGAW83EW2RDu2S0VKaIzap3H66lZH81PoYlFhbGU+6BZp6G7niu735Sk7lN" crossorigin="anonymous"></script> <script src="https://stackpath.bootstrapcdn.com/bootstrap/4.5.2/js/bootstrap.min.js" integrity="sha384-B4gt1jrGC7Jh4AgTPSdUtOBvfO8shuf57BaghqFfPlYxofvL8/KUEfYiJOMMV+rV" crossorigin="anonymous"></script> </body> </html> """] count = 0 for job in jobs: job_info = f""" <div class="col-sm-6 mt-8"> <div class="card text-white bg-secondary mb-3" style="max-width: 25rem;"> <div class="card-header">{job["Job Title"]}</div> <div class="card-body"> <h5 class="card-title">{job["Company"]}</h5> <p class="card-text">{job["Location"]} </p> <p class="card-text">{job["Description"][0][:200].strip()}.</p> <a class="btn btn-primary" href="{job["Application Url"]}" role="button">Read More</a> </div> </div> </div> """ index.insert(1, job_info) count += 1 if count == 5: break with open(f"{filename}.html", "w") as f: f.write("".join(index)) # return job_info html_generator("test")
""" Exercise django-registration's built-in form classes. """ import uuid from django.core.exceptions import ValidationError from django.test import modify_settings from django.utils.six import text_type from django_registration import forms, validators from .base import RegistrationTestCase @modify_settings(INSTALLED_APPS={'remove': 'registration'}) class RegistrationFormTests(RegistrationTestCase): """ Test the built-in form classes. """ def test_email_required(self): """ The email address field is required. """ form = forms.RegistrationForm() self.assertTrue( form.fields['email'].required ) def test_username_uniqueness(self): """ Username uniqueness is enforced. This test is necessary as of 2.1.x to ensure the base UserCreationForm clean() continues to be called from the overridden clean() in RegistrationForm. """ user_data = self.valid_data.copy() del user_data['password1'] del user_data['password2'] user_data['password'] = 'swordfish' existing_user = self.user_model(**user_data) existing_user.save() form = forms.RegistrationForm(data=self.valid_data.copy()) self.assertFalse(form.is_valid()) self.assertTrue(form.has_error(self.user_model.USERNAME_FIELD)) def test_reserved_names(self): """ Reserved names are disallowed. """ for reserved_name in validators.DEFAULT_RESERVED_NAMES: data = self.valid_data.copy() data[self.user_model.USERNAME_FIELD] = reserved_name form = forms.RegistrationForm(data=data) self.assertFalse(form.is_valid()) self.assertTrue(form.has_error(self.user_model.USERNAME_FIELD)) self.assertTrue( text_type(validators.RESERVED_NAME) in form.errors[self.user_model.USERNAME_FIELD] ) def test_confusable_usernames(self): """ Usernames containing dangerously confusable use of Unicode are disallowed. """ for dangerous_value in ( u'p\u0430yp\u0430l', u'g\u043e\u043egle', u'\u03c1ay\u03c1al', ): data = self.valid_data.copy() data[self.user_model.USERNAME_FIELD] = dangerous_value form = forms.RegistrationForm(data=data) self.assertFalse(form.is_valid()) self.assertTrue(form.has_error(self.user_model.USERNAME_FIELD)) self.assertTrue( text_type(validators.CONFUSABLE) in form.errors[self.user_model.USERNAME_FIELD] ) def test_confusable_emails(self): """ Usernames containing dangerously confusable use of Unicode are disallowed. """ for dangerous_value in ( u'p\u0430yp\[email protected]', u'g\u043e\[email protected]', u'\u03c1y\[email protected]', u'paypal@ex\u0430mple.com', u'google@exam\u03c1le.com', ): data = self.valid_data.copy() data['email'] = dangerous_value form = forms.RegistrationForm(data=data) self.assertFalse(form.is_valid()) self.assertTrue(form.has_error('email')) self.assertTrue( text_type(validators.CONFUSABLE_EMAIL) in form.errors['email'] ) def test_custom_reserved_names(self): """ Reserved names can be overridden by an attribute. """ custom_reserved_names = ['foo', 'bar', 'eggs', 'spam'] class CustomReservedNamesForm(forms.RegistrationForm): reserved_names = custom_reserved_names for reserved_name in custom_reserved_names: data = self.valid_data.copy() data[self.user_model.USERNAME_FIELD] = reserved_name form = CustomReservedNamesForm(data=data) self.assertFalse(form.is_valid()) self.assertTrue(form.has_error(self.user_model.USERNAME_FIELD)) self.assertTrue( text_type(validators.RESERVED_NAME) in form.errors[self.user_model.USERNAME_FIELD] ) def test_reserved_name_non_string(self): """ GitHub issue #82: reserved-name validator should not attempt to validate a non-string 'username'. """ validator = validators.ReservedNameValidator() for value in (123456, 1.7, uuid.uuid4()): self.assertTrue(validator(value) is None) def test_tos_field(self): """ The terms-of-service field on RegistrationFormTermsOfService is required. """ form = forms.RegistrationFormTermsOfService( data=self.valid_data.copy() ) self.assertFalse(form.is_valid()) self.assertTrue(form.has_error('tos')) self.assertEqual( form.errors['tos'], [text_type(validators.TOS_REQUIRED)] ) def test_email_uniqueness(self): """ Email uniqueness is enforced by RegistrationFormUniqueEmail. """ self.user_model.objects.create( username='bob', email=self.valid_data['email'], password=self.valid_data['password1'] ) form = forms.RegistrationFormUniqueEmail( data=self.valid_data.copy() ) self.assertFalse(form.is_valid()) self.assertTrue(form.has_error('email')) self.assertEqual( form.errors['email'], [text_type(validators.DUPLICATE_EMAIL)] ) data = self.valid_data.copy() data.update(email='[email protected]') form = forms.RegistrationFormUniqueEmail( data=data ) self.assertTrue(form.is_valid()) def test_confusables_validator(self): """ Test the confusable-username validator standalone. """ for dangerous_value in ( u'p\u0430yp\u0430l', u'g\u043e\u043egle', u'\u03c1ay\u03c1al', ): with self.assertRaises(ValidationError): validators.validate_confusables(dangerous_value) for safe_value in ( u'paypal', u'google', u'root', u'admin', u'\u041f\u0451\u0442\u0440', u'\u5c71\u672c', 3, ): validators.validate_confusables(safe_value) def test_confusables_email_validator(self): """ Test the confusable-email validator standalone. """ for dangerous_value in ( u'p\u0430yp\[email protected]', u'g\u043e\[email protected]', u'\u03c1ay\[email protected]', u'paypal@ex\u0430mple.com', u'google@exam\u03c1le.com' ): with self.assertRaises(ValidationError): validators.validate_confusables_email(dangerous_value) for safe_value in( u'[email protected]', u'[email protected]', u'\u041f\u0451\u0442\[email protected]', u'\u5c71\[email protected]', u'username', ): validators.validate_confusables_email(safe_value)
liste = [12, -3, 3, 1, 34, 100] liste.sort(reverse = True) print(liste)
# -*- coding: utf-8 -*- # # Copyright © Spyder Project Contributors # Licensed under the terms of the MIT License # (see spyder/__init__.py for details) """ Simphony Photonic Simulator This module implements a free and open source photonic integrated circuit (PIC) simulation engine. It is speedy and easily extensible. """ import io import os import sys import setuptools from simphony import __version__, __website_url__ # analysis:ignore # ============================================================================== # Constants # ============================================================================== NAME = "simphony" LIBNAME = "simphony" # ============================================================================== # Auxiliary functions # ============================================================================== extra_files = [] data_files_ext = [ ".sparam", ".dat", ".txt", ".npy", ".npz", ] def package_data_files(directory): paths = [] for (path, directories, filenames) in os.walk(directory): for filename in filenames: name, ext = os.path.splitext(filename) if ext in data_files_ext: paths.append(os.path.join("..", path, filename)) return paths extra_files += package_data_files("simphony/library") extra_files += ["*.ini"] # ============================================================================== # Use README for long description # ============================================================================== with io.open("README.md", encoding="utf-8") as f: LONG_DESCRIPTION = f.read() # ============================================================================== # Setup arguments # ============================================================================== setup_args = dict( name=NAME, version=__version__, description="Simphony: A Simulator for Photonic circuits", long_description=LONG_DESCRIPTION, long_description_content_type="text/markdown", # download_url=__website_url__ + "", author="Sequoia Ploeg", author_email="[email protected]", url=__website_url__, license="MIT", keywords="photonics simulation circuits science", platforms=["Windows", "Linux"], # ["Mac OS-X"] support coming packages=setuptools.find_packages(), package_data={"": extra_files}, classifiers=[ "License :: OSI Approved :: MIT License", # 'Operating System :: MacOS', "Operating System :: Microsoft :: Windows", "Operating System :: POSIX :: Linux", # 'Operating System :: OS Independent', # 'Programming Language :: Python :: 3', # 'Programming Language :: Python :: 3.4', "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Development Status :: 2 - Pre-Alpha", "Intended Audience :: Education", "Intended Audience :: Science/Research", "Topic :: Scientific/Engineering", ], ) install_requires = [ "scipy>=1.2.1", "numpy", "parsimonious>=0.8.1", ] if "setuptools" in sys.modules: setup_args["install_requires"] = install_requires # setup_args['extras_require'] = extras_require # setup_args['entry_points'] = { # # 'gui_scripts': [ # # 'simphony = simphony.app.start:main' # # ], # 'console_scripts': [ # 'simphony = simphony.app.cli_start:main' # ] # } # setup_args.pop('scripts', None) # ============================================================================== # Main setup # ============================================================================== setuptools.setup(**setup_args)
import statistics # Assign problem data to variables with representative names # well height, daily advance, night retreat, accumulated distance advance_cm, well_height_cm, night_retreat_cm, accumulated_distance_cm= [30, 21, 33, 77, 44, 45, 23, 45, 12, 34, 55], 125, 20, 0 # Assign 0 to the variable that represents the solution days = 0 # Total days elapsed # Write the code that solves the problem for comp_day in advance_cm: # counts each number in array and counts it as a day #(1 full day) days += 1 # add day to counter accumulated_distance_cm += comp_day # add current dist travled in day and add to total moved distance if accumulated_distance_cm > well_height_cm: #if snail is over the well height break for loop break # exit loop via break else: # if not over well height snail = sleep so remove 20cm from total height #(1 full night) accumulated_distance_cm -= night_retreat_cm # night time draw back # Print the result with print('Days =', days) print('Days =', days) # What is its maximum displacement in a day? And its minimum? max_disp, mini_disp = max(advance_cm), min(advance_cm) #using python version of what was taught on lesson for google sheets print("Maximum Displacment = {0} \nMinimum Displacement = {1}".format(max_disp, mini_disp)) # What is its average progress? avrg_prog = sum(advance_cm)/len(advance_cm) # adding all distances travled and dividing them by the amount of numbers in array print("Average Progress = {0}".format(avrg_prog)) # What is the standard deviation of your displacement during the day? print("Standard Deviation = {0}".format(statistics.pstdev(advance_cm))) # Using imported statistics library from python to get the standard deviation. #3/22/19
from django.conf import settings from django.shortcuts import render from django.http import HttpResponse, HttpResponseRedirect, JsonResponse from django.core.cache import cache from django.contrib.auth.models import User from django.contrib.auth.decorators import login_required, user_passes_test, permission_required from django.contrib.auth import authenticate, login from django.contrib.auth.password_validation import validate_password, ValidationError from django.urls import reverse, reverse_lazy from django.views.decorators.csrf import csrf_exempt from django.utils import timezone from django.utils.http import urlencode from functools import wraps import urllib3, base64, json, re, datetime, random, string from distutils.command.check import check from collections import OrderedDict from twilio import twiml from channels import Channel, Group from isubscribe.models import Subscribe, Contact, ScheduledEvent, EventMembers, ScheduledOccurrence, Rule from isubscribe.notify import Notify from isubscribe.tasks import sensu_event_resolve, sensu_client_delete, sensu_result_delete, y_predict, y_sum_by_time from isubscribe.forms import ScheduledEventForm, ContactForm, RuleForm import logging from re import search logger = logging.getLogger(__name__) import redis import pickle import codecs redis_pool = redis.ConnectionPool(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, max_connections=settings.REDIS_POOL_MAX, password=settings.REDIS_PASSWORD) r = redis.Redis(connection_pool=redis_pool) http = urllib3.PoolManager(maxsize=10) def ajax_login_required(view_func): @wraps(view_func) def wrapper(request, *args, **kwargs): if request.user.is_authenticated(): return view_func(request, *args, **kwargs) url = reverse('login') next={'next': request.META.get('HTTP_REFERER')} return HttpResponse(json.dumps({ 'not_authenticated': True, 'login_url': u'%s?%s' % (url, urlencode(next)) }), 'application/json') return wrapper @login_required(login_url=reverse_lazy('login')) def index(request): return HttpResponseRedirect(reverse_lazy('events')) @login_required(login_url=reverse_lazy('login')) def entities(request): ''' for Test in r.scan_iter(match=':1:entity_*'): print('************************************' + Test.decode('utf-8')) ''' logger.debug('entities view triggered by %s' % request.user.username) if request.method == 'POST' and 'search' in request.POST and request.POST['search'] != '': logger.debug('entities view search by user %s search: %s' % (request.user.username, request.POST['search'])) data = {} mimetype = 'application/json' Group("entities-private-%s" % request.user.id).send({ "text": json.dumps({'flush_signal':True}) }) match_counter = 0 for word in r.scan_iter(match=':1:entity_*'): try: entity = re.sub(r'^:1:entity_', '', word.decode('utf-8')) patterns = re.compile(r'(?:%s)' % request.POST['search'], re.IGNORECASE) if patterns.search(entity): match_counter += 1 if match_counter > settings.MAX_ENTITY_SEARCH_RESULTS: break status_1 = False status_2 = False subscribed_status = [] try: rule = cache.get('rule_' + entity) if '1' in rule and request.user.id in rule['1']: subscribed_status.append(1) if '2' in rule and request.user.id in rule['2']: subscribed_status.append(2) except: pass if 1 in subscribed_status: status_1 = True if 2 in subscribed_status: status_2 = True regex_match_1 = False regex_match_2 = False regex_match_status = [] for rule_status in [1, 2]: try: patterns = pickle.loads(r.get('regexrule_%s_%s' % (request.user.id, rule_status))) if patterns.search(entity): regex_match_status.append(rule_status) except: pass if 1 in regex_match_status: regex_match_1 = True if 2 in regex_match_status: regex_match_2 = True if 'silent_' + entity in cache.keys("silent_*"): silent = True else: silent = False result = { 'entity': entity, 'status_1': status_1, 'status_2': status_2, 'silent': silent, 'regex_1': regex_match_1, 'regex_2': regex_match_2 } Group("entities-private-%s" % request.user.id).send({ "text": json.dumps(result) }) #logger.debug("entities view search: %s result: %s" % (request.POST['search'], json.dumps(result))) except: raise data['search'] = request.POST['search'] data['status'] = 0 data['timestamp'] = datetime.datetime.now().timestamp() return HttpResponse(json.dumps(data), mimetype) if request.method == 'POST' and 'name' in request.POST: logger.debug('entities view new rule user %s' % (request.user.username)) mimetype = 'application/json' try: re.compile(request.POST['regex_string']) except: return HttpResponse(json.dumps(['invalid regex_string']), status=409) form = RuleForm(request.POST, user=request.user) if form.is_valid: try: new_rule = form.save(commit=True) logger.debug('entities view new rule id: %s' % new_rule.id) Channel('background-build-user-rules').send({'user_id': request.user.id}) return HttpResponse(json.dumps({'id':new_rule.id, 'name':new_rule.name, 'regex_string':new_rule.regex_string, 'status':new_rule.status}), mimetype) except: return HttpResponse(json.dumps(form.errors), status=409) else: return HttpResponse(json.dumps(form.errors), status=409) if request.method == 'POST' and 'action' in request.POST and request.POST['action'] == 'rule_delete': logger.debug('entities delete new rule id %s' % (request.POST['id'])) rule_obj = Rule.objects.get(owner=request.user.id, id=request.POST['id']) form = RuleForm(instance=rule_obj, user=request.user) if form.is_valid: try: rule_obj.delete() Channel('background-build-user-rules').send({'user_id': request.user.id}) return HttpResponse('Done', status=200) except: return HttpResponse(json.dumps(form.errors), status=409) else: return HttpResponse(json.dumps(form.errors), status=409) if request.method == 'POST' and 'action' in request.POST and request.POST['action'] == 'rule_list': logger.debug('entities list new rule for user %s' % (request.user.username)) data = {} mimetype = 'application/json' return HttpResponse(json.dumps(Rule.objects.filter(owner=request.user.id).all()), mimetype) data = {} user_rules = Rule.objects.filter(owner=request.user.id) rule_form = RuleForm(initial={'owner': request.user}, user=request.user) profile_form = ContactForm(instance=Contact.objects.get(user=request.user.id)) return render(request, 'isubscribe/entities.html', {'DATA':data, 'profile_form': profile_form, 'user_rules': user_rules, 'rule_form': rule_form}) #@login_required(login_url=reverse_lazy('login')) @ajax_login_required def subscribe_toggle(request): mimetype = 'application/json' data = {} if request.method == 'POST' and 'entity' in request.POST and request.POST['entity'] != '' and 'status' in request.POST and request.POST['status'] != '': data['entity'] = request.POST['entity'] data['status'] = request.POST['status'] if Subscribe.objects.filter(entity=request.POST['entity'], status=int(request.POST['status'])).count() > 0: # change existing object obj = Subscribe.objects.get(entity=request.POST['entity'], status=int(request.POST['status'])) if request.user.pk not in obj.friends.values_list('pk', flat=True).all(): obj.friends.add(request.user.pk) data['result'] = "subscription added" logger.debug('%s subscribed to %s' % (request.user.username, request.POST['entity'])) else: obj.friends.remove(request.user.pk) data['result'] = "subscription removed" logger.debug('%s unsubscribed from %s' % (request.user.username, request.POST['entity'])) else: # create new object obj = Subscribe(entity=request.POST['entity'], status=int(request.POST['status'])) obj.save() obj.friends.add(request.user.pk) data['result'] = "subscription added" logger.debug('%s subscribed to new entity %s' % (request.user.username, request.POST['entity'])) Channel('background-build-entity-rules').send({'entity': request.POST['entity']}) return HttpResponse(json.dumps(data), mimetype) #@login_required(login_url=reverse_lazy('login')) @ajax_login_required def silent_toggle(request): mimetype = 'application/json' data = {} if request.method == 'POST' and 'entity' in request.POST and request.POST['entity'] != '': data['entity'] = request.POST['entity'] data['status'] = request.POST['status'] data['result'] = 'okay' if 'silent_comment' in request.POST: data['silent_comment'] = 'silenced by ' + request.user.username + ': ' + request.POST['silent_comment'] else: data['silent_comment'] = '' if 'ack_' + data['entity'] in cache.keys("ack_*"): ack = cache.get('ack_' + data['entity']) ack_by = ack['user_name'] ack_comment = ack['ack_comment'] acked = True else: acked = False ack_by = '' ack_comment = '' if 'silent_' + data['entity'] in cache.keys("silent_*"): cache.delete('silent_' + data['entity']) silent_return = False silent_data = { 'user_id': request.user.pk, 'user_name': request.user.username, "timestamp": datetime.datetime.now().timestamp(), "entity": data['entity'], "status": data['status'], "output": data['silent_comment'], "silent": False, "silent_by": request.user.username, "silent_comment": data['silent_comment'], "ack": acked, "ack_by": ack_by, "ack_comment": ack_comment } else: silent_data = { 'user_id': request.user.pk, 'user_name': request.user.username, "timestamp": datetime.datetime.now().timestamp(), "entity": data['entity'], "status": data['status'], "output": data['silent_comment'], "silent": True, "silent_by": request.user.username, "silent_comment": data['silent_comment'], "ack": acked, "ack_by": ack_by, "ack_comment": ack_comment } cache.set('silent_' + data['entity'], silent_data, timeout=(3600 * 24 * 365)) silent_return = True data['silent_info'] = silent_data data['silent'] = silent_return Group("notifications").send({"text": json.dumps(silent_data)}) return HttpResponse(json.dumps(data), mimetype) @login_required(login_url=reverse_lazy('login')) def events(request): data = {} if 'event' in request.GET and request.GET['event'] != '': logger.debug('event details view triggered by %s for event: %s' % (request.user.username, request.GET['event'])) try: data = cache.get('event_' + request.GET['event']) except: raise return render(request, 'isubscribe/generic.html', {'DATA':data['check']}) logger.debug('events view triggered by %s' % request.user.username) for word in cache.keys("event_*"): entity = re.sub(r'^event_', '', word) try: data[entity] = {} event_data = cache.get('event_' + entity) data[entity]['entity_element_id'] = re.sub(r':|\.', '_', entity) data[entity]['entity'] = entity data[entity]['status'] = event_data['check']['status'] data[entity]['output'] = json.dumps(event_data['check']['output'], ensure_ascii=False) data[entity]['timestamp'] = event_data['timestamp'] if 'ack_' + entity in cache.keys("ack_*"): data[entity]['ack'] = True ack = cache.get('ack_' + entity) data[entity]['ack_by'] = ack['user_name'] data[entity]['ack_comment'] = ack['ack_comment'] else: data[entity]['ack'] = False if 'silent_' + entity in cache.keys("silent_*"): data[entity]['silent'] = True silent = cache.get('silent_' + entity) data[entity]['silent_by'] = silent['user_name'] data[entity]['silent_comment'] = silent['silent_comment'] else: data[entity]['silent'] = False except: raise profile_form = ContactForm(instance=Contact.objects.get(user=request.user.id)) return render(request, 'isubscribe/events.html', {'DATA':OrderedDict(sorted(data.items(), key=lambda x: x[1]['timestamp'], reverse=True)), 'profile_form': profile_form}) @login_required(login_url=reverse_lazy('login')) def clients(request): data = {} for word in cache.keys("client_*"): client = re.sub(r'^client_', '', word) try: client_data = cache.get(word) data[client] = client_data except: raise profile_form = ContactForm(instance=Contact.objects.get(user=request.user.id)) return render(request, 'isubscribe/clients.html', {'DATA':data, 'profile_form': profile_form}) @login_required(login_url=reverse_lazy('login')) def subscriptions(request): data = {} for word in r.keys("subscription_*"): subscription = re.sub(r'^subscription_', '', str(word.decode('utf-8'))) try: subscription_data = r.lrange(word, 0, -1) data[subscription] = subscription_data except: raise profile_form = ContactForm(instance=Contact.objects.get(user=request.user.id)) return render(request, 'isubscribe/subscriptions.html', {'DATA':data, 'profile_form': profile_form}) #@login_required(login_url=reverse_lazy('login')) @ajax_login_required def ack(request): mimetype = 'application/json' data = {} if request.method == 'POST' and 'entity' in request.POST and request.POST['entity'] != '' and 'ack_interval' in request.POST and request.POST['ack_interval'] != '': data['entity'] = request.POST['entity'] data['ack_interval'] = request.POST['ack_interval'] data['status'] = request.POST['status'] data['timestamp'] = datetime.datetime.now().timestamp() data['ack_by'] = request.user.username data['ack'] = True data['output'] = "acknowledged by %s for %s hours" % (request.user.username, request.POST['ack_interval']) if 'ack_comment' in request.POST: data['ack_comment'] = request.user.username + ': ' + request.POST['ack_comment'] ack_data = { 'user_id': request.user.pk, 'user_name': request.user.username, 'timestamp': datetime.datetime.now().timestamp(), 'ack_interval': request.POST['ack_interval'], 'ack_comment': data['ack_comment'] } logger.debug('ack %s' % json.dumps(ack_data)) cache.set("ack_" + request.POST['entity'], ack_data, timeout=(float(data['ack_interval']) * 3600)) Channel('background-ack').send(data) return HttpResponse(json.dumps(data), mimetype) #@login_required(login_url=reverse_lazy('login')) @ajax_login_required def resolve(request): mimetype = 'application/json' data = {} if request.method == 'POST' and 'entity' in request.POST and request.POST['entity'] != '': data['entity'] = request.POST['entity'] data['status'] = 0 data['timestamp'] = datetime.datetime.now().timestamp() data['output'] = "resolve request by %s" % (request.user.username) data['result'] = 'okay' sensu_event_resolve(data) Channel('background-alert').send(dict(data)) return HttpResponse(json.dumps(data), mimetype) #@login_required(login_url=reverse_lazy('login')) @ajax_login_required def rmClient(request): mimetype = 'application/json' data = {} if request.method == 'POST' and 'client' in request.POST and request.POST['client'] != '': data['client'] = request.POST['client'] data['status'] = 0 data['timestamp'] = datetime.datetime.now().timestamp() if sensu_client_delete(data): data['result'] = 'okay' else: data['result'] = 'failed deleting ' + data['client'] return HttpResponse(json.dumps(data), mimetype) #@login_required(login_url=reverse_lazy('login')) @ajax_login_required def rmResult(request): mimetype = 'application/json' data = {} if request.method == 'POST' and 'entity' in request.POST and request.POST['entity'] != '': data['client'], data['check'] = request.POST['entity'].split(':') data['status'] = 0 data['timestamp'] = datetime.datetime.now().timestamp() if sensu_result_delete(data): data['result'] = 'okay' else: data['result'] = 'failed deleting result using sensu api for: ' + request.POST['entity'] return HttpResponse(json.dumps(data), mimetype) #@login_required(login_url=reverse_lazy('login')) @ajax_login_required def redoCheck(request): mimetype = 'application/json' data = {'result': None} if request.method == 'POST' and 'entity' in request.POST and request.POST['entity'] != '': client_name, check_name = request.POST['entity'].split(':') API_URL = settings.SENSU_API_URL + '/request' userAndPass = base64.b64encode(str.encode("%s:%s" % (settings.SENSU_API_USER, settings.SENSU_API_PASSWORD))).decode("ascii") headers = { 'X_REQUESTED_WITH' :'XMLHttpRequest', 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Authorization' : 'Basic %s' % userAndPass } try: client_name, check_name = request.POST['entity'].split(':') post_params = {'check': check_name, 'subscribers': ['client:' + client_name]} request = http.request('POST', API_URL, body=json.dumps(post_params), headers=headers) response = request.status if response == 202: data['result'] = 'accepted' elif response == 404: data['result'] = 'check not found' else: data['result'] = 'error' request.release_conn() except: logger.error("redoCheck failed request check_name: %s" % check_name) raise return HttpResponse(json.dumps(data), mimetype) @csrf_exempt def alert(request): if 'api_token' not in request.POST or request.POST['api_token'] != settings.API_TOKEN: return HttpResponse('Unauthorized', status=401) mimetype = 'application/json' data = {} if request.method == 'POST' and 'entity' in request.POST and request.POST['entity'] != '' and 'status' in request.POST and request.POST['status'] != '': data['entity'] = request.POST['entity'] data['status'] = int(request.POST['status']) data['timestamp'] = datetime.datetime.now().timestamp() if 'output' in request.POST: data['output'] = request.POST['output'].rstrip('\n') if 'history' in request.POST: data['history'] = request.POST.getlist('history') if 'occurrences' in request.POST: data['occurrences'] = request.POST['occurrences'] data['result-text'] = "got it!" data['result-code'] = 0 Channel('background-alert').send(data) logger.debug('alert triggered entity: %s status: %s' % (request.POST['entity'], str(request.POST['status']))) return HttpResponse(json.dumps(data), mimetype) def register_activate(request): if 'key' not in request.GET or request.GET['key'] == '': return HttpResponse('Unauthorized', status=401) data = {} data['username'] = request.GET['username'] data['key'] = request.GET['key'] data['errors'] = '' if 'email' in request.POST and request.POST['key'] != '' and request.POST['password'] == request.POST['password_repeat']: try: validate_password(request.POST['password']) # check for key against current user password logger.debug("validating registration key for user %s" % request.POST['username']) try: u = User.objects.get(username=request.POST['username']) if u.contact.email == request.POST['email']: if u.check_password(request.POST['key']): #user activate logger.debug("activating user %s" % request.POST['username']) u.set_password(request.POST['password']) u.is_active = True u.save() login(request, u) return HttpResponseRedirect(reverse_lazy('entities')) except: data['errors'] = 'An exception flew by!' except ValidationError as err: logger.debug("validating registration new password for user %s FAILED - %s" % (request.POST['username'], err)) data['errors'] = err return render(request, 'registration/activate.html', {'DATA':data}) @csrf_exempt def twilio_say(request): if 'api_token' not in request.GET or request.GET['api_token'] != settings.TWILIO_CALLBACK_API_TOKEN: return HttpResponse('Unauthorized', status=401) try: if 'CallStatus' in request.POST: for k in request.POST: logger.debug("***twilio_say got CallStatus in request %s : %s" % (k, request.POST[k])) except: pass if 'msg' in request.GET and request.GET['msg'] != '': logger.debug("twilio_say building xml for twilio API message: [%s]" % request.GET['msg']) r = twiml.Response() r.say(request.GET['msg'], voice='alice') r.hangup() return HttpResponse(r, content_type='text/xml') return HttpResponse('Unauthorized', status=401) @csrf_exempt def twilio_status(request): logger.debug('***twilio_status triggered ') if 'api_token' not in request.GET or request.GET['api_token'] != settings.TWILIO_CALLBACK_API_TOKEN: return HttpResponse('Unauthorized', status=401) try: for k in request.POST: logger.debug("***twilio_status POST in request. %s: %s" % (k, request.POST[k])) for k in request.GET: logger.debug("***twilio_status GET in request. %s: %s" % (k, request.GET[k])) except: pass try: if request.POST['CallStatus'] != 'completed': notifier = Notify({ 'entity': request.GET['entity'], 'status': request.GET['status'], 'output': 'twilio on duty retry' }) if 'retry_count' in request.GET: retry_count = int(request.GET['retry_count']) else: retry_count = 0 notifier.notify_onduty(twilio_retry=True, member_id=int(request.GET['member_id']), retry_count=retry_count) logger.debug("***twilio_status sent twilio_retry after failed calling %s" % (request.GET['member_id'])) except: logger.error('***twilio_status failed handling notify_onduty twilio_retry') raise return HttpResponse('I will handle it from here', status=200) @login_required(login_url=reverse_lazy('login')) def user_settings(request): logger.debug('settings view triggered by %s' % (request.user.username)) form = ContactForm(request.POST, instance=Contact.objects.get(user=request.user.id)) if form.is_valid: try: form.save() return HttpResponse('Done', status=200) except: return HttpResponse(json.dumps(form.errors), status=409) else: return HttpResponse(json.dumps(form.errors), status=409) return render(request, 'isubscribe/user_settings.html', {'DATA':data, 'form': form}) @login_required(login_url=reverse_lazy('login')) def onduty(request): if 'action' in request.GET and request.GET['action'] == 'onduty_agenda': data = [] FROM = datetime.datetime.strptime(request.GET['start'], "%Y-%m-%d") TO = datetime.datetime.strptime(request.GET['end'], "%Y-%m-%d") for event_start, event_end, instance in ScheduledOccurrence.objects.filter(event__in=ScheduledEvent.objects.filter(event=0)).all_occurrences(from_date=FROM, to_date=TO): description = [] for member in instance.event.members_list(): if not hasattr(member, 'contact') or member.contact.phone_number in [None, '']: description.append(member.username) else: description.append(member.username + ': ' + member.contact.phone_number) if request.user in instance.event.members_list(): event_privileges = True else: event_privileges = False data.append({ 'id': instance.id, 'title': instance.event.description, 'description': description, 'start': event_start, 'end': event_end, 'repeat': instance.repeat, 'repeat_until': instance.repeat_until, 'instance_start': instance.start, 'instance_end': instance.end, 'source': reverse_lazy('onduty'), 'event_id': instance.event.id, 'privileges': event_privileges }) for event_start, event_end, instance in ScheduledOccurrence.objects.filter(event__in=ScheduledEvent.objects.filter(event=1, members__in=[request.user.id])).all_occurrences(from_date=FROM, to_date=TO): description = [] for member in instance.event.members_list(): if not hasattr(member, 'contact') or member.contact.phone_number in [None, '']: description.append(member.username) else: description.append(member.username + ': ' + member.contact.phone_number) data.append({ 'id': instance.id, 'title': instance.event.description, 'description': description, 'start': event_start, 'end': event_end, 'repeat': instance.repeat, 'repeat_until': instance.repeat_until, 'instance_start': instance.start, 'instance_end': instance.end, 'source': reverse_lazy('onduty'), 'event_id': instance.event.id, 'privileges': True }) return JsonResponse(data, safe=False) elif 'action' in request.POST and request.POST['action'] == 'onduty_toggle_alerts': if cache.get('onduty_disable_alerts'): logger.debug('onduty view disable onduty alerts triggered by %s' % (request.user.username)) cache.set('onduty_disable_alerts', False, timeout=None) action_value = False else: logger.debug('onduty view enable onduty alerts triggered by %s' % (request.user.username)) cache.set('onduty_disable_alerts', True, timeout=None) action_value = True Group("on-duty").send({ "text": json.dumps({ "action_type": "onduty_toggle_alerts", "action_value": action_value, "action_by": request.user.username }) }) return HttpResponse(request.POST['action']) # no good! DnD objects should be created when creating user if len(ScheduledEvent.objects.filter(event=1, members__in=[request.user.id])) < 1: logger.info('onduty view creating DnD object for user: %s' % (request.user.username)) e = ScheduledEvent(event=1, description='DnD - ' + request.user.username) e.save() m = EventMembers(order=0, event_id=e.id, member_id=request.user.id) m.save() if 'onduty_disable_alerts' in cache.keys("onduty_disable_*"): onduty_disable_alerts = cache.get('onduty_disable_alerts') else: onduty_disable_alerts = False # check form if 'id' in request.POST: if request.POST['id'] == 'new': form = ScheduledEventForm(request.POST, user=request.user, editable=True) else: event_instance = ScheduledOccurrence.objects.get(id=request.POST['id']) form = ScheduledEventForm(request.POST, user=request.user, editable=True, instance=event_instance) if form.is_valid(): logger.debug('update ScheduledOccurrence') form.save() else: logger.debug('********************* ' + json.dumps(form.errors)) profile_form = ContactForm(instance=Contact.objects.get(user=request.user.id)) return render(request, 'isubscribe/cal.html', {'onduty_disable_alerts': onduty_disable_alerts, 'form_view': ScheduledEventForm(user=request.user, editable=False), 'form_edit': ScheduledEventForm(user=request.user, editable=True), 'profile_form': profile_form}) #@login_required(login_url=reverse_lazy('login')) @ajax_login_required def entity_history(request): data = [] mimetype = 'application/json' if request.method == 'POST' and 'entity' in request.POST and request.POST['entity'] != '': entity = request.POST['entity'] logger.debug("view entity_history user: %s entity: %s" % (request.user.username, entity)) for history_data in r.lrange('history_entity_' + entity, 0, 100): data.append(pickle.loads(history_data)) return HttpResponse(json.dumps(data), mimetype) #@login_required(login_url=reverse_lazy('login')) @ajax_login_required def entity_notify_history(request): data = [] mimetype = 'application/json' if request.method == 'POST' and 'entity' in request.POST and request.POST['entity'] != '': entity = request.POST['entity'] logger.debug("view entity_notify_history user: %s entity: %s" % (request.user.username, entity)) for history_data in r.lrange('notifyhistory_entity_' + entity, 0, 100): data.append(pickle.loads(history_data)) return HttpResponse(json.dumps(data), mimetype) #@login_required(login_url=reverse_lazy('login')) @ajax_login_required def check_config(request): mimetype = 'application/json' data = {} if request.method == 'POST' and 'entity' in request.POST and request.POST['entity'] != '': client_name, check_name = request.POST['entity'].split(':') #check_name = 'check_gw_tomcat_errors_1h' #data = cache.get('check_' + check_name) data = cache.get('check_' + request.POST['entity']) return HttpResponse(json.dumps(data), mimetype) #@login_required(login_url=reverse_lazy('login')) @ajax_login_required def check_result(request): mimetype = 'application/json' data = {} if request.method == 'POST' and 'entity' in request.POST and request.POST['entity'] != '': client_name, check_name = request.POST['entity'].split(':') API_URL = settings.SENSU_API_URL + '/results/' + client_name + '/' + check_name userAndPass = base64.b64encode(str.encode("%s:%s" % (settings.SENSU_API_USER, settings.SENSU_API_PASSWORD))).decode("ascii") headers = { 'X_REQUESTED_WITH' :'XMLHttpRequest', 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Authorization' : 'Basic %s' % userAndPass } try: request = http.request('GET', API_URL, None, headers, preload_content=False) response = request.status if response == 200: reader = codecs.getreader('utf-8') data = json.load(reader(request)) request.release_conn() else: logger.error('check_result response: %s' % str(response)) except: logger.error("check_result failed") raise return HttpResponse(json.dumps(data), mimetype) @permission_required('is_staff', login_url=reverse_lazy('login')) @login_required(login_url=reverse_lazy('login')) def whois(request): mimetype = 'application/json' # Get channel_layer function from channels.asgi import get_channel_layer from channels.sessions import session_for_reply_channel # passing group_channel takes channel name #channel_layer = get_channel_layer() #data = channel_layer.group_channels('notifications') #data = channel_layer.global_statistics() #data = channel_layer.channel_statistics('notifications') #data = get_channel_layer().group_channels('notifications') #data = Group("notifications").extensions #data = get_channel_layer().receive_twisted() #from channels import channel_layers #layer = channel_layers["default"] #print(layer.router.channels) data = [] from django.contrib.sessions.backends import cache as engine data = get_channel_layer().group_channels('notifications') active_users = [] for C in data: #Channel(C).send({"text": json.dumps({'clean_signal':True})}) c_session = session_for_reply_channel(C) session = engine.SessionStore(c_session._session_key) #print(c_session._session['_auth_user_id']) #print(session.keys()) #print(session.get('username', None), session.get_expiry_date()) username = session.get('username', None) # this is the same # username = c_session.get('username', None) if username not in active_users and username != None: active_users.append(username) data = active_users return HttpResponse(json.dumps(data), mimetype) @permission_required('is_staff', login_url=reverse_lazy('login')) @login_required(login_url=reverse_lazy('login')) def test_1(request): mimetype = 'application/json' data = [] API_URL = settings.SENSU_API_URL + '/events' userAndPass = base64.b64encode(str.encode("%s:%s" % (settings.SENSU_API_USER, settings.SENSU_API_PASSWORD))).decode("ascii") headers = { 'X_REQUESTED_WITH' :'XMLHttpRequest', 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Authorization' : 'Basic %s' % userAndPass } try: request = http.request('GET', API_URL, None, headers, preload_content=False) response = request.status if response == 200: reader = codecs.getreader('utf-8') reponse = json.load(reader(request)) request.release_conn() else: logger.error('response: %s' % str(response)) except: logger.error("failed") raise for object in reponse: if 'action' not in object or 'handle' not in object['check']: continue client = object['client']['name'] check = object['check']['name'] status = int(object['check']['status']) output = object['check']['output'] history = object['check']['history'] occurrences = int(object['occurrences']) timestamp = object['timestamp'] last_state_change = object['last_state_change'] status_duration = timestamp - last_state_change entity = client + ':' + check ABORT = False message = {'timestamp': timestamp, 'entity': entity, 'status': status, 'output': output, 'history': history, 'occurrences': occurrences } try: last_known = cache.get('last_known_' + entity) except: last_known = {'timestamp': 0} pass if last_known != None and last_known['timestamp'] == object['timestamp']: continue if object['action'] == 'flapping' and object['check']['handle'] == True and object['check']['type'] == 'standard' and object['silenced'] == False: message['detect'] = 'flapping' elif object['action'] == 'create' and object['check']['handle'] == True and object['check']['type'] == 'standard' and object['silenced'] == False: if status > 0 and occurrences < object['check']['occurrences']: message['detect'] = 'low_occurrences' else: continue cache.set('last_known_' + entity, message, timeout=object['check']['interval']) if not ABORT: r.lpush('attention_' + entity, pickle.dumps(message)) data.append(message) #cache.set('flap_' + entity, object, timeout=object['check']['interval']) #message['output'] = '(flapping %s) %s' % (object['check']['total_state_change'], message['output']) #Channel('background-alert').send(message) return HttpResponse(json.dumps(data), mimetype) #@login_required(login_url=reverse_lazy('login')) @ajax_login_required def trends(request): mimetype = 'application/json' try: data = cache.get('trends_all') except: data = [] pass return HttpResponse(json.dumps(data), mimetype) @permission_required('is_staff', login_url=reverse_lazy('login')) @login_required(login_url=reverse_lazy('login')) def mySubscribe(request): def user_id_subsribtions(user_id): subscriptions = [] for word in cache.keys("rule_*"): entity = re.sub(r'^rule_', '', word) status_1 = False status_2 = False try: rule = cache.get('rule_' + entity) if '1' in rule and user_id in rule['1']: status_1 = True if '2' in rule and user_id in rule['2']: status_2 = True except: pass if 'silent_' + entity in cache.keys("silent_*"): silent = True else: silent = False if status_1 == True or status_2 == True: subscriptions.append({ 'entity': entity, 'status_1': status_1, 'status_2': status_2, 'silent': silent }) return subscriptions mimetype = 'application/json' data = user_id_subsribtions(request.user.id) return HttpResponse(json.dumps(data), mimetype) @login_required(login_url=reverse_lazy('login')) def rules(request): mimetype = 'application/json' data = {} return HttpResponse(json.dumps(data), mimetype) #@permission_required('is_staff', login_url=reverse_lazy('login')) #@login_required(login_url=reverse_lazy('login')) @csrf_exempt def test(request): for k in request.POST: logger.debug("***POSTED: %s: %s" % (k, request.POST[k])) mimetype = 'application/json' data = {'test': True} return HttpResponse(json.dumps(data), mimetype)
from app.objects.secondclass.c_parserconfig import ParserConfig from app.utility.base_object import BaseObject class Parser(BaseObject): @property def unique(self): return self.module @classmethod def from_json(cls, json): parserconfigs = [ParserConfig.from_json(r) for r in json['relationships']] return cls(module=json['module'], parserconfigs=parserconfigs) @property def display(self): return dict(module=self.module, relationships=[p.display for p in self.parserconfigs]) def __init__(self, module, parserconfigs): super().__init__() self.module = module self.parserconfigs = parserconfigs
import json from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory import cattr import pytest from pytest_mock import MockerFixture from ambramelin.util import config as util_config from ambramelin.util.config import Config, Environment, User class TestLoadConfig: def test_with_file(self, mocker: MockerFixture) -> None: config = Config( current="envname", envs={"envname": Environment(url="envurl", user="username")}, users={"username": User(credentials_manager="dummy")}, ) with NamedTemporaryFile() as conf_file: conf_file.write(json.dumps(cattr.unstructure(config), indent=2).encode()) conf_file.seek(0) mocker.patch.object( util_config, "_get_config_path", return_value=Path(conf_file.name) ) assert util_config.load_config() == config def test_with_no_file(self, mocker: MockerFixture) -> None: mocker.patch.object( util_config, "_get_config_path", return_value=Path("nonexistent") ) assert util_config.load_config() == Config() def test_save_config(mocker: MockerFixture) -> None: config = Config( current="envname", envs={"envname": Environment(url="envurl", user="username")}, users={"username": User(credentials_manager="dummy")}, ) with TemporaryDirectory() as tmp: path = Path(tmp) / "config.json" mocker.patch.object(util_config, "_get_config_path", return_value=path) util_config.save_config(config) with path.open("r") as f: assert cattr.structure(json.loads(f.read()), Config) == config def test_update_config(mocker: MockerFixture) -> None: mocker.patch.object(util_config, "load_config", return_value=Config()) mock_save_config = mocker.patch.object(util_config, "save_config") with util_config.update_config() as config: config.current = "current" mock_save_config.assert_called_once_with(config) @pytest.mark.parametrize( "config,result", ( (Config(envs={"env": Environment(url="")}), True), (Config(), False), ), ) def test_envs_added(config: Config, result: bool) -> None: assert util_config.envs_added(config) is result @pytest.mark.parametrize( "config,result", ( (Config(current="env"), True), (Config(), False), ), ) def test_env_selected(config: Config, result: bool) -> None: assert util_config.env_selected(config) is result @pytest.mark.parametrize( "env_name,result", ( ("env1", True), ("env2", False), ), ) def test_env_exists(env_name: str, result: bool) -> None: assert ( util_config.env_exists(Config(envs={"env1": Environment(url="")}), env_name) is result ) @pytest.mark.parametrize( "config,result", ( (Config(users={"user": User(credentials_manager="keychain")}), True), (Config(), False), ), ) def test_users_added(config: Config, result: bool) -> None: assert util_config.users_added(config) is result @pytest.mark.parametrize( "user_name,result", ( ("user1", True), ("user2", False), ), ) def test_user_exists(user_name: str, result: bool) -> None: assert ( util_config.user_exists( Config(users={"user1": User(credentials_manager="keychain")}), user_name ) is result )
import warnings import pandas as pd import time from autox.autox_server.util import log from tqdm import tqdm warnings.filterwarnings('ignore') import re def fe_stat_for_same_prefix(G_df_dict, G_data_info, G_hist, is_train, remain_time, AMPERE): # 对G_df_dict['BIG']表做扩展特征 start = time.time() log('[+] feature engineer, stat_for_same_prefix') if is_train: G_hist['FE_stat_for_same_prefix'] = [] cols_agg_list = [] cols = G_df_dict['BIG'].columns c_1_list = [col for col in cols if bool(re.search(r'_1$', str(col)))] for c_1 in c_1_list: c_list = [c_1] for i in range(2, 20): c_i = c_1.replace('_1', '_{}'.format(i)) if c_i in cols: c_list.append(c_i) num_flag = True for item in c_list: if str(G_df_dict['BIG'][item].dtype) == 'object': num_flag = False if num_flag and 3 <= len(c_list) <= 3: cols_agg_list.append(c_list) G_hist['FE_stat_for_same_prefix'] = cols_agg_list log("stat_for_same_prefix features: {}".format(G_hist['FE_stat_for_same_prefix'])) if not AMPERE: G_df_dict['FE_stat_for_same_prefix'] = pd.DataFrame() for cols_agg in tqdm(G_hist['FE_stat_for_same_prefix']): G_df_dict['FE_stat_for_same_prefix']['{}__stat_for_same_prefix__mean'.format('__col__'.join(cols_agg))] = G_df_dict['BIG'][cols_agg].mean(axis = 1) # G_df_dict['FE_stat_for_same_prefix']['{}__stat_for_same_prefix__median'.format('__col__'.join(cols_agg))] = G_df_dict['BIG'][cols_agg].median(axis = 1) G_df_dict['FE_stat_for_same_prefix']['{}__stat_for_same_prefix__min'.format('__col__'.join(cols_agg))] = G_df_dict['BIG'][cols_agg].min(axis = 1) G_df_dict['FE_stat_for_same_prefix']['{}__stat_for_same_prefix__max'.format('__col__'.join(cols_agg))] = G_df_dict['BIG'][cols_agg].max(axis = 1) # G_df_dict['FE_stat_for_same_prefix']['{}__stat_for_same_prefix__std'.format('__col__'.join(cols_agg))] = G_df_dict['BIG'][cols_agg].std(axis = 1) end = time.time() remain_time -= (end - start) log("time consumption: {}".format(str(end - start))) log("remain_time: {} s".format(remain_time)) return remain_time
from flask import make_response, jsonify from fusillade.directory.resource import ResourceType, ResourceId from fusillade.utils.authorize import authorize @authorize(["fus:GetResources"], ["arn:hca:fus:*:*:resource/{resource_type_name}/{resource_id}"], resource_params=['resource_type_name', 'resource_id']) def get(token_info: dict, resource_type_name, resource_id): rid = ResourceId(resource_type_name, resource_id) info = rid.get_info() return make_response(info, 200) @authorize(["fus:PostResources"], ["arn:hca:fus:*:*:resource/{resource_type_name}/{resource_id}"], resource_params=['resource_type_name', 'resource_id']) def post(token_info: dict, resource_type_name, resource_id): rt = ResourceType(resource_type_name) rt.create_id(resource_id) return make_response(jsonify({ 'msg': f"Created resource/{rt.name}/id/{resource_id}.", 'resource_type': rt.name, 'resource_id': resource_id}), 201) @authorize(["fus:DeleteResources"], ["arn:hca:fus:*:*:resource/{resource_type_name}/{resource_id}"], resource_params=['resource_type_name', 'resource_id']) def delete(token_info: dict, resource_type_name, resource_id): rid = ResourceId(resource_type_name, name=resource_id) rid.delete_node() return make_response(jsonify({ 'msg': f"Deleted resource/{rid.resource_type}/id/{resource_id}.", 'resource_type': rid.resource_type.name, 'resource_id': rid.name}), 200)
############################################################################## # # Copyright (c) 2001, 2002 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """Generic two-dimensional array type (in context of security) """ from persistent import Persistent from zope.annotation import IAnnotations from zope.security.management import queryInteraction class SecurityMap(object): def __init__(self): self._clear() def _clear(self): self._byrow = {} self._bycol = {} def __nonzero__(self): return bool(self._byrow) __bool__ = __nonzero__ def addCell(self, rowentry, colentry, value): # setdefault may get expensive if an empty mapping is # expensive to create, for PersistentDict for instance. row = self._byrow.get(rowentry) if row: if row.get(colentry) is value: return False else: row = self._byrow[rowentry] = {} col = self._bycol.get(colentry) if not col: col = self._bycol[colentry] = {} row[colentry] = value col[rowentry] = value self._invalidated_interaction_cache() return True def _invalidated_interaction_cache(self): # Invalidate this threads interaction cache interaction = queryInteraction() if interaction is not None: try: invalidate_cache = interaction.invalidate_cache except AttributeError: pass else: invalidate_cache() def delCell(self, rowentry, colentry): row = self._byrow.get(rowentry) if row and (colentry in row): del row[colentry] if not row: del self._byrow[rowentry] col = self._bycol[colentry] del col[rowentry] if not col: del self._bycol[colentry] self._invalidated_interaction_cache() return True return False def queryCell(self, rowentry, colentry, default=None): row = self._byrow.get(rowentry) if row: return row.get(colentry, default) else: return default def getCell(self, rowentry, colentry): marker = object() cell = self.queryCell(rowentry, colentry, marker) if cell is marker: raise KeyError('Not a valid row and column pair.') return cell def getRow(self, rowentry): row = self._byrow.get(rowentry) if row: return list(row.items()) else: return [] def getCol(self, colentry): col = self._bycol.get(colentry) if col: return list(col.items()) else: return [] def getAllCells(self): res = [] for r in self._byrow.keys(): for c in self._byrow[r].items(): res.append((r,) + c) return res class PersistentSecurityMap(SecurityMap, Persistent): def addCell(self, rowentry, colentry, value): if SecurityMap.addCell(self, rowentry, colentry, value): self._p_changed = 1 def delCell(self, rowentry, colentry): if SecurityMap.delCell(self, rowentry, colentry): self._p_changed = 1 class AnnotationSecurityMap(SecurityMap): def __init__(self, context): self.__parent__ = context self._context = context annotations = IAnnotations(self._context) map = annotations.get(self.key) if map is None: self._byrow = {} self._bycol = {} else: self._byrow = map._byrow self._bycol = map._bycol self.map = map def _changed(self): map = self.map if isinstance(map, PersistentSecurityMap): map._p_changed = 1 else: map = self.map = PersistentSecurityMap() map._byrow = self._byrow map._bycol = self._bycol annotations = IAnnotations(self._context) annotations[self.key] = map def addCell(self, rowentry, colentry, value): if SecurityMap.addCell(self, rowentry, colentry, value): self._changed() def delCell(self, rowentry, colentry): if SecurityMap.delCell(self, rowentry, colentry): self._changed()
#!/usr/bin/env python # -*- coding: utf-8 -*- from headstock.lib.utils import extract_from_stanza __all__ = ['ProxyRegistry'] class ProxyRegistry(object): def __init__(self, stream): self.stream = stream self._dispatchers = {} self.logger = None def set_logger(self, logger): self.logger = logger def register(self, name, proxy_dispatcher, namespace=None): client = self.stream.get_client() parser = client.get_parser() parser.register_on_element(name, namespace=namespace, dispatcher=proxy_dispatcher) def cleanup(self, name, namespace=None): client = self.stream.get_client() parser = client.get_parser() parser.unregister_on_element(name, namespace=namespace) def add_dispatcher(self, name, dispatcher): self._dispatchers[name] = dispatcher def has_dispatcher(self, name): return name in self._dispatchers def dispatch(self, name, caller, e): if name in self._dispatchers: if self.logger: self.logger.debug("REGISTRY: %s %r" % (name, repr(self._dispatchers[name]))) caller.stanza = extract_from_stanza(e) self._dispatchers[name](caller, e)
import six import py import pytest import execnet pytest_plugins = "pytester" if six.PY2: @pytest.fixture(scope="session", autouse=True) def _ensure_imports(): # we import some modules because pytest-2.8's testdir fixture # will unload all modules after each test and this cause # (unknown) problems with execnet.Group() execnet.Group execnet.makegateway @pytest.fixture(autouse=True) def _divert_atexit(request, monkeypatch): import atexit finalizers = [] def fake_register(func, *args, **kwargs): finalizers.append((func, args, kwargs)) monkeypatch.setattr(atexit, "register", fake_register) yield while finalizers: func, args, kwargs = finalizers.pop() func(*args, **kwargs) def pytest_addoption(parser): parser.addoption( "--gx", action="append", dest="gspecs", help="add a global test environment, XSpec-syntax. ", ) @pytest.fixture def specssh(request): return getspecssh(request.config) # configuration information for tests def getgspecs(config): return [execnet.XSpec(spec) for spec in config.getvalueorskip("gspecs")] def getspecssh(config): xspecs = getgspecs(config) for spec in xspecs: if spec.ssh: if not py.path.local.sysfind("ssh"): py.test.skip("command not found: ssh") return str(spec) py.test.skip("need '--gx ssh=...'") def getsocketspec(config): xspecs = getgspecs(config) for spec in xspecs: if spec.socket: return spec py.test.skip("need '--gx socket=...'")
SECOND_REMIND_TIME = 5 THIRD_REMIND_TIME = 15 LAST_REMIND_TIME = 30 EXPIRED_REMIND_TIME = LAST_REMIND_TIME + 1 DATETIME_FORMAT = '%Y-%m-%d %H:%M:00' # Date/time format for week START_WEEK_FORMAT = '%Y-%m-%d 00:01:00' END_WEEK_FORMAT = '%Y-%m-%d 23:59:00' # List flags LIST_ALL_FLAG = 'all' LIST_WEEK_FLAG = 'week' HOUR = 60 ### Callback data # LIST_BUTTON LIST_ALL_BUTTON= 'list_all' LIST_WEEK_BUTTON = 'week' LIST_10_BUTTON = 'list_10' LIST_30_BUTTON = 'list_30' # DONE_BUTTON DONE_BUTTON = 'done' # POSTPONE_BUTTON POSTPONE_30M_BUTTON = 'postpone_30m' POSTPONE_1H_BUTTON = 'postpone_1h'
# Generated by Django 3.2.7 on 2021-11-03 14:40 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('app', '0002_profile'), ] operations = [ migrations.CreateModel( name='Cart', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ], ), migrations.CreateModel( name='Category', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ], ), migrations.CreateModel( name='Product', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=255)), ('price', models.DecimalField(decimal_places=2, max_digits=12)), ('description', models.TextField()), ('image', models.TextField()), ('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='products', to='app.category')), ('users', models.ManyToManyField(through='app.Cart', to=settings.AUTH_USER_MODEL)), ], ), migrations.RemoveField( model_name='profile', name='notes', ), migrations.DeleteModel( name='Notes', ), migrations.DeleteModel( name='Subject', ), migrations.AddField( model_name='cart', name='product', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.product'), ), migrations.AddField( model_name='cart', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), ]
# -*- coding: utf-8 -*- """ Copyright 2018 Alexey Melnikov and Katja Ried. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. Please acknowledge the authors when re-using this code and maintain this notice intact. Code written by Katja Ried, implementing ideas from 'Modelling collective motion based on the principle of agency' Katja Ried, Thomas Muller & Hans J. Briegel arXiv:1712.01334 (2017) """ import numpy as np class TaskEnvironment(object): """This is a one-dimensional, circular world in which multiple agents move around. Percepts show agents the net movement of their close neighbours relative to themselves. Actions are turning or keeping going. Agents are rewarded for aligning themselves with their neighbours. This environment is used to study the collective motion of marching locusts. Reference: 'Modelling collective motion based on the principle of agency', Katja Ried, Thomas Muller and Hans J. Briegel, arXiv:1712.01334.""" def __init__(self, num_agents, world_size, sensory_range): """Initializes a world. Arguments: num_agents (int>0) - number of agents world_size (int>0) - length of world; ends are identified (ie world is circular) sensory range (int>0) - how many steps away an agent can see others. Simple example: env = TaskEnvironment(5,40,4) (for 5 agents) max_num_trials, max_steps_per_trial = 20, 30 """ self.num_agents = num_agents; self.world_size = world_size; self.sensory_range = sensory_range; self.num_actions = 2 #turn or keep going self.num_percepts_list = [5] self.num_max_steps_per_trial = 10**9 self.positions = np.random.randint(world_size,size=num_agents) #where each agent is #Note that multiple agents can occupy the same position - they do not collide. self.speeds = np.ndarray.tolist(np.random.choice([-1,1],num_agents)) #which way they are going #note that positions is an array whereas speeds is a list def get_neighbours(self,agent_index): """Determine indices of all agents within visual range including self.""" focal_pos = self.positions[agent_index]; neighbours = np.ndarray.tolist(np.where(dist_mod(self.positions,focal_pos,self.world_size)<self.sensory_range+1)[0]); return(neighbours) def net_rel_mvmt(self,agent_index): """Returns the net flow of all neighbours (excluding self), with sign indicating movement relative to orientation of focal agent.""" neighbours = self.get_neighbours(agent_index) neighbours.remove(agent_index) return(self.speeds[agent_index]*sum([self.speeds[index] for index in neighbours])) def get_percept(self,agent_index): """Given an agent index, returns an integer [0,4] encoding the net flow relative to self (truncated at abs<=2).""" #compute percept net_rel_move = self.net_rel_mvmt(agent_index) #map to limited range of percepts if net_rel_move<-2: net_rel_move=-2 if net_rel_move>+2: net_rel_move=2 return(net_rel_move+2) def move(self,agent_index, action): """Given an agent_index and that agent's action (0 for turn, 1 for keep going), this function updates their speed and position and computes their reward, along with the percept for the next agent in the list.""" self.speeds[agent_index] = self.speeds[agent_index]*(action*2-1) self.positions[agent_index] = np.remainder(self.positions[agent_index]+self.speeds[agent_index],self.world_size) reward = (np.sign(self.net_rel_mvmt(agent_index))+1)/2 next_percept = self.get_percept((agent_index+1)%self.num_agents) return ([next_percept], reward, False) def reset(self): """Sets positions and speeds back to random values and returns the percept for the 0th agent.""" self.positions = np.random.randint(self.world_size,size=self.num_agents) self.speeds = np.ndarray.tolist(np.random.choice([-1,1],self.num_agents)) return([self.get_percept(0)]) def dist_mod(num1,num2,mod): """Distance between num1 and num2 (absolute value) if they are given modulo an integer mod, ie between zero and mod. Also works if num1 is an array (not a list) and num2 a number or vice versa.""" diff=np.remainder(num1-num2,mod) diff=np.minimum(diff, mod-diff) return(diff)
from flask import current_app as app from flask import Flask, request from passlib.hash import pbkdf2_sha256 from jose import jwt from ..tools import tools from ..auth import auth import json class User: def __init__(self): self.defaults = { "id": tools.randID(), "ip_addresses": [request.remote_addr], "acct_active": True, "date_created": tools.nowDatetimeUTC(), "last_login": tools.nowDatetimeUTC(), "first_name": "", "last_name": "", "email": "", "plan": "basic" } def get(self): token_data = jwt.decode(request.headers.get('AccessToken'), app.config['SECRET_KEY']) user = app.db.users.find_one({"id": token_data['user_id']}, { "_id": 0, "password": 0 }) if user: resp = tools.JsonResp(user, 200) else: resp = tools.JsonResp({"message": "User not found"}, 404) return resp def get_auth(self): access_token = request.headers.get("AccessToken") refresh_token = request.headers.get("RefreshToken") resp = tools.JsonResp({"message": "User not logged in"}, 401) if access_token: try: decoded = jwt.decode(access_token, app.config["SECRET_KEY"]) resp = tools.JsonResp(decoded, 200) except: # If the access_token has expired, get a new access_token - so long as the refresh_token hasn't # expired yet resp = auth.refreshAccessToken(refresh_token) return resp def login(self): resp = tools.JsonResp({"message": "Invalid user credentials"}, 403) try: data = json.loads(request.data) email = data["email"].lower() user = app.db.users.find_one({"email": email}, {"_id": 0}) if user and pbkdf2_sha256.verify(data["password"], user["password"]): access_token = auth.encodeAccessToken(user["id"], user["email"], user["plan"]) refresh_token = auth.encodeRefreshToken(user["id"], user["email"], user["plan"]) app.db.users.update({"id": user["id"]}, {"$set": { "refresh_token": refresh_token, "last_login": tools.nowDatetimeUTC() }}) resp = tools.JsonResp({ "id": user["id"], "email": user["email"], "first_name": user["first_name"], "last_name": user["last_name"], "plan": user["plan"], "access_token": access_token, "refresh_token": refresh_token }, 200) except Exception: pass return resp def logout(self): try: tokenData = jwt.decode(request.headers.get("AccessToken"), app.config["SECRET_KEY"]) app.db.users.update({"id": tokenData["user_id"]}, {'$unset': {"refresh_token": ""}}) # Note: At some point I need to implement Token Revoking/Blacklisting # General info here: https://flask-jwt-extended.readthedocs.io/en/latest/blacklist_and_token_revoking.html except: pass resp = tools.JsonResp({"message": "User logged out"}, 200) return resp def add(self): data = json.loads(request.data) expected_data = { "first_name": data['first_name'], "last_name": data['last_name'], "email": data['email'].lower(), "password": data['password'] } # Merge the posted data with the default user attributes self.defaults.update(expected_data) user = self.defaults # Encrypt the password user["password"] = pbkdf2_sha256.encrypt(user["password"], rounds=20000, salt_size=16) # Make sure there isn"t already a user with this email address existing_email = app.db.users.find_one({"email": user["email"]}) if existing_email: resp = tools.JsonResp({ "message": "There's already an account with this email address", "error": "email_exists" }, 400) else: if app.db.users.save(user): # Log the user in (create and return tokens) access_token = auth.encodeAccessToken(user["id"], user["email"], user["plan"]) refresh_token = auth.encodeRefreshToken(user["id"], user["email"], user["plan"]) app.db.users.update({"id": user["id"]}, { "$set": { "refresh_token": refresh_token } }) resp = tools.JsonResp({ "id": user["id"], "email": user["email"], "first_name": user["first_name"], "last_name": user["last_name"], "plan": user["plan"], "access_token": access_token, "refresh_token": refresh_token }, 200) else: resp = tools.JsonResp({"message": "User could not be added"}, 400) return resp
from kgx import PandasTransformer from kgx import ObanRdfTransformer from kgx import PrefixManager import networkx as nx import logging HAS_EVIDENCE_CURIE = 'RO:0002558' HAS_EVIDENCE_IRI = 'http://purl.obolibrary.org/obo/RO_0002558' # TODO: sync with context when we switch to identifiers.org PUB = 'http://www.ncbi.nlm.nih.gov/pubmed/18375391' def test_prefixmanager(): M = PrefixManager() assert M.contract(HAS_EVIDENCE_IRI) == HAS_EVIDENCE_CURIE assert M.expand(HAS_EVIDENCE_CURIE) == HAS_EVIDENCE_IRI #assert M.contract(PUB) == 'PMID:18375391' #assert M.expand(M.contract(PUB)) == PUB
import sympy.physics.mechanics as me import sympy as sm import math as m import numpy as np x, y = me.dynamicsymbols('x y') xd, yd = me.dynamicsymbols('x y', 1) e1 = (x+y)**2+(x-y)**3 e2 = (x-y)**2 e3 = x**2+y**2+2*x*y m1 = sm.Matrix([e1,e2]).reshape(2, 1) m2 = sm.Matrix([(x+y)**2,(x-y)**2]).reshape(1, 2) m3 = m1+sm.Matrix([x,y]).reshape(2, 1) am = sm.Matrix([i.expand() for i in m1]).reshape((m1).shape[0], (m1).shape[1]) cm = sm.Matrix([i.expand() for i in sm.Matrix([(x+y)**2,(x-y)**2]).reshape(1, 2)]).reshape((sm.Matrix([(x+y)**2,(x-y)**2]).reshape(1, 2)).shape[0], (sm.Matrix([(x+y)**2,(x-y)**2]).reshape(1, 2)).shape[1]) em = sm.Matrix([i.expand() for i in m1+sm.Matrix([x,y]).reshape(2, 1)]).reshape((m1+sm.Matrix([x,y]).reshape(2, 1)).shape[0], (m1+sm.Matrix([x,y]).reshape(2, 1)).shape[1]) f = (e1).expand() g = (e2).expand() a = sm.factor((e3), x) bm = sm.Matrix([sm.factor(i, x) for i in m1]).reshape((m1).shape[0], (m1).shape[1]) cm = sm.Matrix([sm.factor(i, x) for i in m1+sm.Matrix([x,y]).reshape(2, 1)]).reshape((m1+sm.Matrix([x,y]).reshape(2, 1)).shape[0], (m1+sm.Matrix([x,y]).reshape(2, 1)).shape[1]) a = (e3).diff(x) b = (e3).diff(y) cm = sm.Matrix([i.diff(x) for i in m2]).reshape((m2).shape[0], (m2).shape[1]) dm = sm.Matrix([i.diff(x) for i in m1+sm.Matrix([x,y]).reshape(2, 1)]).reshape((m1+sm.Matrix([x,y]).reshape(2, 1)).shape[0], (m1+sm.Matrix([x,y]).reshape(2, 1)).shape[1]) frame_a = me.ReferenceFrame('a') frame_b = me.ReferenceFrame('b') frame_b.orient(frame_a, 'DCM', sm.Matrix([1,0,0,1,0,0,1,0,0]).reshape(3, 3)) v1=x*frame_a.x+y*frame_a.y+x*y*frame_a.z e=(v1).diff(x, frame_b) fm = sm.Matrix([i.diff(sm.Symbol('t')) for i in m1]).reshape((m1).shape[0], (m1).shape[1]) gm = sm.Matrix([i.diff(sm.Symbol('t')) for i in sm.Matrix([(x+y)**2,(x-y)**2]).reshape(1, 2)]).reshape((sm.Matrix([(x+y)**2,(x-y)**2]).reshape(1, 2)).shape[0], (sm.Matrix([(x+y)**2,(x-y)**2]).reshape(1, 2)).shape[1]) h=(v1).dt(frame_b)
import os import tarfile from argparse import ArgumentParser, Namespace from dataclasses import dataclass from enum import Enum from pathlib import Path from typing import Any, Dict, List, Optional, Union import mlflow.pytorch import pandas as pd import pytorch_lightning as pl import requests import torch from pytorch_lightning.callbacks import ( EarlyStopping, LearningRateMonitor, ModelCheckpoint, ) from pytorch_lightning.utilities import rank_zero_info from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split from torch.optim.lr_scheduler import ReduceLROnPlateau from torch.utils.data import DataLoader, Dataset from transformers import ( AdamW, BatchEncoding, BertConfig, BertForSequenceClassification, BertTokenizerFast, PretrainedConfig, PreTrainedModel, PreTrainedTokenizerFast, ) from transformers.modeling_outputs import SequenceClassifierOutput from transformers.optimization import Adafactor # huggingface/tokenizers: Disabling parallelism to avoid deadlocks. os.environ["TOKENIZERS_PARALLELISM"] = "false" IntList = List[int] IntListList = List[IntList] StrList = List[str] TEXT_COL_NAME: str = "text" LABEL_COL_NAME: str = "label" class LABELS(Enum): sports_watch = "sports-watch" topic_news = "topic-news" dokujo_tsushin = "dokujo-tsushin" peachy = "peachy" movie_enter = "movie-enter" kaden_channel = "kaden-channel" livedoor_homme = "livedoor-homme" smax = "smax" it_life_hack = "it-life-hack" class Split(Enum): train = "train" dev = "dev" test = "test" @dataclass class SequenceClassificationExample: guid: str text: str label: str @dataclass class InputFeatures: input_ids: IntList attention_mask: IntList label_ids: IntList def download_and_extract_corpus(data_dir: Path) -> Optional[Path]: """livedoorコーパスデータのダウンロード""" filepath = Path("ldcc.tar") url = "https://www.rondhuit.com/download/ldcc-20140209.tar.gz" response = requests.get(url) if response.ok: with open(filepath, "wb") as fp: fp.write(response.content) with tarfile.open(filepath, "r") as fp: fp.extractall(data_dir) filepath.unlink() return data_dir / "text" return None def make_livedoor_corpus_dataset(data_dir: str = "./data") -> pd.DataFrame: # ライブドアコーパスを[カテゴリ, 本文]形式でpd.DataFrameで読み込む pdir = Path(data_dir) if not (pdir / "text").exists(): pdir.mkdir(exist_ok=True) parent_path = download_and_extract_corpus(Path(data_dir)) else: parent_path = pdir / "text" categories = [v.value for v in LABELS] docs = [] for category in categories: for p in (parent_path / f"{category}").glob(f"{category}*.txt"): with open(p, "r") as f: next(f) # url next(f) # date next(f) # title body = "\n".join([line.strip() for line in f if line.strip()]) docs.append((category, body)) return pd.DataFrame(docs, columns=[LABEL_COL_NAME, TEXT_COL_NAME]) class SequenceClassificationDataset(Dataset): """ Build feature dataset so that the model can load """ def __init__( self, examples: List[SequenceClassificationExample], tokenizer: PreTrainedTokenizerFast, label_to_id: Dict[str, int], tokens_per_batch: int = 32, ): self.features: List[InputFeatures] = [] self.examples: List[SequenceClassificationExample] = examples texts: StrList = [ex.text for ex in self.examples] labels: StrList = [ex.label for ex in self.examples] # tokenize text into subwords with padding and truncation self.encodings: List[BatchEncoding] = [ tokenizer.encode_plus( text, add_special_tokens=True, max_length=tokens_per_batch, return_token_type_ids=False, padding="max_length", return_attention_mask=True, return_tensors="np", truncation=True, ) for text in texts ] # register features self.features = [ InputFeatures( input_ids=encoding.input_ids.flatten().tolist(), attention_mask=encoding.attention_mask.flatten().tolist(), label_ids=[label_to_id.get(label, 0)], ) for encoding, label in zip(self.encodings, labels) ] self._n_features = len(self.features) def __len__(self): return self._n_features def __getitem__(self, idx) -> InputFeatures: return self.features[idx] class InputFeaturesBatch: def __init__(self, features: List[InputFeatures]): self.input_ids: torch.Tensor self.attention_masks: torch.Tensor self.label_ids: Optional[torch.Tensor] self._n_features = len(features) input_ids_list: IntListList = [] masks_list: IntListList = [] label_ids_list: IntListList = [] for f in features: input_ids_list.append(f.input_ids) masks_list.append(f.attention_mask) if f.label_ids is not None: label_ids_list.append(f.label_ids) self.input_ids = torch.LongTensor(input_ids_list) self.attention_mask = torch.LongTensor(masks_list) if label_ids_list: self.label_ids = torch.LongTensor(label_ids_list) def __len__(self): return self._n_features def __getitem__(self, item): return getattr(self, item) class SequenceClassificationDataModule(pl.LightningDataModule): """ Prepare dataset and build DataLoader """ def __init__(self, hparams: Namespace): self.tokenizer: PreTrainedTokenizerFast self.train_examples: List[SequenceClassificationExample] self.val_examples: List[SequenceClassificationExample] self.test_examples: List[SequenceClassificationExample] self.train_dataset: SequenceClassificationDataset self.val_dataset: SequenceClassificationDataset self.test_dataset: SequenceClassificationDataset self.df_org: pd.DataFrame self.df_use: pd.DataFrame self.label_to_id: Dict[str, int] super().__init__() self.max_seq_length = hparams.max_seq_length self.cache_dir = hparams.cache_dir if not os.path.exists(self.cache_dir): os.mkdir(self.cache_dir) self.data_dir = hparams.data_dir if not os.path.exists(self.data_dir): os.mkdir(self.data_dir) self.tokenizer_name = hparams.model_name_or_path self.train_batch_size = hparams.train_batch_size self.eval_batch_size = hparams.eval_batch_size self.num_workers = hparams.num_workers self.num_samples = hparams.num_samples def prepare_data(self): """ Downloads the data and prepare the tokenizer """ self.tokenizer = BertTokenizerFast.from_pretrained( self.tokenizer_name, cache_dir=self.cache_dir, tokenize_chinese_chars=False, strip_accents=False, ) df = make_livedoor_corpus_dataset(self.data_dir) self.df_org = df if self.num_samples > 0: df = df.iloc[: self.num_samples] self.df_use = df def setup(self, stage=None): """ split the data into train, test, validation data :param stage: Stage - training or testing """ df = self.df_use # label_to_id = {k: v for v, k in enumerate(LABELS)} self.label_to_id = { k: v for v, k in enumerate(sorted(set(df[LABEL_COL_NAME].values.tolist()))) } df_train, df_test = train_test_split( df, test_size=0.3, stratify=df[LABEL_COL_NAME] ) df_val, df_test = train_test_split( df_test, test_size=0.5, stratify=df_test[LABEL_COL_NAME] ) self.train_examples = [ SequenceClassificationExample(guid=f"train-{i}", text=t, label=l) for i, (t, l) in df_train[[TEXT_COL_NAME, LABEL_COL_NAME]].iterrows() ] self.val_examples = [ SequenceClassificationExample(guid=f"val-{i}", text=t, label=l) for i, (t, l) in df_val[[TEXT_COL_NAME, LABEL_COL_NAME]].iterrows() ] self.test_examples = [ SequenceClassificationExample(guid=f"test-{i}", text=t, label=l) for i, (t, l) in df_test[[TEXT_COL_NAME, LABEL_COL_NAME]].iterrows() ] self.train_dataset = self.create_dataset(self.train_examples) self.val_dataset = self.create_dataset(self.val_examples) self.test_dataset = self.create_dataset(self.test_examples) self.dataset_size = len(self.train_dataset) def create_dataset( self, data: List[SequenceClassificationExample] ) -> SequenceClassificationDataset: return SequenceClassificationDataset( data, self.tokenizer, self.label_to_id, self.max_seq_length, ) @staticmethod def create_dataloader( ds: SequenceClassificationDataset, batch_size: int, num_workers: int = 0, shuffle: bool = False, ) -> DataLoader: return DataLoader( ds, collate_fn=InputFeaturesBatch, batch_size=batch_size, num_workers=num_workers, pin_memory=True, shuffle=shuffle, ) def train_dataloader(self): return self.create_dataloader( self.train_dataset, self.train_batch_size, self.num_workers, shuffle=True ) def val_dataloader(self): return self.create_dataloader( self.val_dataset, self.eval_batch_size, self.num_workers, shuffle=False ) def test_dataloader(self): return self.create_dataloader( self.test_dataset, self.eval_batch_size, self.num_workers, shuffle=False ) def total_steps(self) -> int: """ The number of total training steps that will be run. Used for lr scheduler purposes. """ num_devices = max(1, self.hparams.gpus) # TODO: consider num_tpu_cores effective_batch_size = ( self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices ) return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs @staticmethod def add_model_specific_args(parent_parser): parser = ArgumentParser(parents=[parent_parser], add_help=False) parser.add_argument( "--train_batch_size", type=int, default=32, help="input batch size for training (default: 32)", ) parser.add_argument( "--eval_batch_size", type=int, default=32, help="input batch size for validation/test (default: 32)", ) parser.add_argument( "--num_workers", type=int, default=4, metavar="N", help="number of workers (default: 3)", ) parser.add_argument( "--max_seq_length", default=256, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument( "--data_dir", default="data", type=str, required=True, help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.", ) parser.add_argument( "--num_samples", type=int, default=15000, metavar="N", help="Number of samples to be used for training and evaluation steps (default: 15000) Maximum:100000", ) return parser class SequenceClassificationModule(pl.LightningModule): """ Initialize a model and config for token-classification """ def __init__(self, hparams: Union[Dict, Namespace]): # NOTE: internal code may pass hparams as dict **kwargs if isinstance(hparams, Dict): hparams = Namespace(**hparams) num_labels = len(LABELS) super().__init__() # Enable to access arguments via self.hparams self.save_hyperparameters(hparams) self.step_count = 0 self.output_dir = Path(self.hparams.output_dir) self.cache_dir = None if self.hparams.cache_dir: if not os.path.exists(self.hparams.cache_dir): os.mkdir(self.hparams.cache_dir) self.cache_dir = self.hparams.cache_dir # AutoTokenizer # trf>=4.0.0: PreTrainedTokenizerFast by default # NOTE: AutoTokenizer doesn't load PreTrainedTokenizerFast... self.tokenizer_name = self.hparams.model_name_or_path self.tokenizer = BertTokenizerFast.from_pretrained( self.tokenizer_name, cache_dir=self.cache_dir, tokenize_chinese_chars=False, strip_accents=False, ) # AutoConfig config_name = self.hparams.model_name_or_path self.config: PretrainedConfig = BertConfig.from_pretrained( config_name, **({"num_labels": num_labels} if num_labels is not None else {}), cache_dir=self.cache_dir, ) extra_model_params = ( "encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout", ) for p in extra_model_params: if getattr(self.hparams, p, None) and hasattr(self.config, p): setattr(self.config, p, getattr(self.hparams, p, None)) # AutoModelForSequenceClassification self.model: PreTrainedModel = BertForSequenceClassification.from_pretrained( self.hparams.model_name_or_path, from_tf=bool(".ckpt" in self.hparams.model_name_or_path), config=self.config, cache_dir=self.cache_dir, ) self.scheduler = None self.optimizer = None def forward(self, **inputs) -> SequenceClassifierOutput: """BertForSequenceClassification.forward""" return self.model(**inputs) def shared_step(self, batch: InputFeaturesBatch) -> SequenceClassifierOutput: # .to(self.device) is not necessary with pl.Traner ?? inputs = { "input_ids": batch.input_ids.to(self.device), "attention_mask": batch.attention_mask.to(self.device), "labels": batch.label_ids.to(self.device), } return self.model(**inputs) def training_step( self, train_batch: InputFeaturesBatch, batch_idx ) -> Dict[str, torch.Tensor]: output = self.shared_step(train_batch) loss = output.loss self.log("train_loss", loss, prog_bar=True) return {"loss": loss} def validation_step( self, val_batch: InputFeaturesBatch, batch_idx ) -> Dict[str, torch.Tensor]: output = self.shared_step(val_batch) return { "val_step_loss": output.loss, } def validation_epoch_end(self, outputs: List[Dict[str, torch.Tensor]]): avg_loss = torch.stack([x["val_step_loss"] for x in outputs]).mean() self.log("val_loss", avg_loss, sync_dist=True) def test_step( self, test_batch: InputFeaturesBatch, batch_idx ) -> Dict[str, torch.Tensor]: output = self.shared_step(test_batch) _, y_hat = torch.max(output.logits, dim=1) # values, indices test_acc = accuracy_score(y_hat.cpu(), test_batch.label_ids.detach().cpu()) return {"test_acc": torch.Tensor([test_acc])} def test_epoch_end(self, outputs: List[Dict[str, torch.Tensor]]): avg_test_acc = torch.stack([x["test_acc"] for x in outputs]).mean() self.log("avg_test_acc", avg_test_acc) def configure_optimizers(self): """Prepare optimizer and schedule (linear warmup and decay)""" model = self.model no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) ], "weight_decay": self.hparams.weight_decay, }, { "params": [ p for n, p in model.named_parameters() if any(nd in n for nd in no_decay) ], "weight_decay": 0.0, }, ] if self.hparams.adafactor: self.optimizer = Adafactor( optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False, ) else: self.optimizer = AdamW( optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon, ) self.scheduler = { "scheduler": ReduceLROnPlateau( self.optimizer, mode="min", factor=0.2, patience=2, min_lr=1e-6, verbose=True, ), "monitor": "val_loss", } return [self.optimizer], [self.scheduler] @pl.utilities.rank_zero_only def on_save_checkpoint(self, checkpoint: Dict[str, Any]): save_path = self.output_dir.joinpath("best_tfmr") self.model.config.save_step = self.step_count self.model.save_pretrained(save_path) self.tokenizer.save_pretrained(save_path) @staticmethod def add_model_specific_args(parent_parser): parser = ArgumentParser(parents=[parent_parser], add_help=False) parser.add_argument( "--encoder_layerdrop", type=float, help="Encoder layer dropout probability (Optional). Goes into model.config", ) parser.add_argument( "--decoder_layerdrop", type=float, help="Decoder layer dropout probability (Optional). Goes into model.config", ) parser.add_argument( "--dropout", type=float, help="Dropout probability (Optional). Goes into model.config", ) parser.add_argument( "--attention_dropout", type=float, help="Attention dropout probability (Optional). Goes into model.config", ) parser.add_argument( "--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.", ) parser.add_argument( "--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.", ) parser.add_argument( "--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.", ) parser.add_argument("--adafactor", action="store_true") return parser class LoggingCallback(pl.Callback): # def on_batch_end(self, trainer, pl_module): # lr_scheduler = trainer.lr_schedulers[0]["scheduler"] # # lrs = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr())} # # pl_module.logger.log_metrics(lrs) # pl_module.logger.log_metrics({"last_lr": lr_scheduler._last_lr}) def on_validation_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): rank_zero_info("***** Validation results *****") metrics = trainer.callback_metrics # Log results for key in sorted(metrics): rank_zero_info("{} = {}\n".format(key, str(metrics[key]))) def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): rank_zero_info("***** Test results *****") metrics = trainer.callback_metrics # Log and save results to file output_test_results_file = os.path.join( pl_module.hparams.output_dir, "test_results.txt" ) with open(output_test_results_file, "w") as writer: for key in sorted(metrics): rank_zero_info("{} = {}\n".format(key, str(metrics[key]))) writer.write("{} = {}\n".format(key, str(metrics[key]))) def make_trainer(argparse_args: Namespace): """ Prepare pl.Trainer with callbacks and args """ early_stopping = EarlyStopping(monitor="val_loss", mode="min", verbose=True) checkpoint_callback = ModelCheckpoint( dirpath=argparse_args.output_dir, filename="checkpoint-{epoch}-{val_loss:.2f}", save_top_k=1, verbose=True, monitor="val_loss", mode="min", ) lr_logger = LearningRateMonitor() logging_callback = LoggingCallback() train_params = {"deterministic": True} if args.gpus > 1: train_params["distributed_backend"] = "ddp" train_params["accumulate_grad_batches"] = args.accumulate_grad_batches trainer = pl.Trainer.from_argparse_args( argparse_args, callbacks=[lr_logger, early_stopping, checkpoint_callback, logging_callback], **train_params, ) return trainer, checkpoint_callback if __name__ == "__main__": parser = ArgumentParser(description="Transformers Document Classifier") parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models", ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from huggingface.co", ) parser.add_argument( "--seed", type=int, default=42, help="random seed for initialization" ) parser.add_argument( "--do_train", action="store_true", help="Whether to run training." ) parser.add_argument( "--do_predict", action="store_true", help="Whether to run predictions on the test set.", ) parser = pl.Trainer.add_argparse_args(parent_parser=parser) parser = SequenceClassificationModule.add_model_specific_args(parent_parser=parser) parser = SequenceClassificationDataModule.add_model_specific_args( parent_parser=parser ) args = parser.parse_args() # sets seeds for numpy, torch, python.random and PYTHONHASHSEED. pl.seed_everything(args.seed) Path(args.output_dir).mkdir(exist_ok=True) # Logs loss and any other metrics specified in the fit function, # and optimizer data as parameters. Model checkpoints are logged # as artifacts and pytorch model is stored under `model` directory. mlflow.pytorch.autolog(log_every_n_epoch=1) dm = SequenceClassificationDataModule(args) dm.prepare_data() dm.setup(stage="fit") model = SequenceClassificationModule(args) trainer, checkpoint_callback = make_trainer(args) trainer.fit(model, dm) if args.do_predict: # NOTE: load the best checkpoint automatically trainer.test()
import copy import heapq import math import matplotlib.pyplot as plt import numpy as np import time import utils from occupancy_grid import OccupancyCell, OccupancyGrid from robot import Robot from shapely import affinity, geometry from typing import List, Tuple class FrontierPlanner(): def __init__(self, grid: OccupancyGrid, robot: Robot, wall_observation_angle: float, show_plot: bool): self.grid = grid self.robot = robot self.frontier = {} self.coverage_path = [] self.rotation_interpolation_step = 10 self.top_n_frontier = 3 self.wall_observation_angle = wall_observation_angle self.show_plot = show_plot def astar(self, start: OccupancyCell, goal: OccupancyCell) -> Tuple[List[OccupancyCell], float]: """Runs an astar grid search between the start and the goal cell to identify the shortest path to a cell from which the goal cell can be viewed.""" if self.is_visible(start, goal): return [start], [start.z_axis_angle(goal)], 0 distances = {} # A dictionary used to store and lateer unwind the shortest path path_history = {} path_history[start.key()] = None current_cell = None min_heap = [] heapq.heappush(min_heap, [0, 0, 0, start]) while len(min_heap) and current_cell is None: _, _, _, cell = heapq.heappop(min_heap) for n in self.grid.get_neighbors(cell): if n.key() in path_history: continue path_history[n.key()] = cell if (not self.is_collision(n)): # Use euclidean distance to prioritize entries in the priority queue. This can always be # updated later. d = cell.euclidean_distance(n) distances[n.key()] = d if d <= self.robot.sensor_range and self.is_visible(n, goal): current_cell = n break heapq.heappush(min_heap, (goal.euclidean_distance(n), utils.angular_distance(n.z_axis_angle(goal), self.robot.yaw), np.random.rand(), n)) path = [current_cell] angles = [current_cell.z_axis_angle(goal)] distance = 0 # Unwind the path to the cell that has a view of the goal. while current_cell.key() in path_history and path_history[current_cell.key()] is not None: path.insert(0, current_cell) prev_cell = path_history[current_cell.key()] heading = prev_cell.z_axis_angle(current_cell) distance += distances[current_cell.key()] current_cell = prev_cell return path, angles, distance def bfs(self, start: OccupancyCell, goal: OccupancyCell) -> Tuple[List[OccupancyCell], float]: """Runs an bfs grid search between the start and the goal cell to identify the shortest path to a cell from which the goal cell can be viewed.""" if self.is_visible(start, goal): return [start], [start.z_axis_angle(goal)], 0 distances = {} path_history = {} path_history[start.key()] = None current_cell = None queue = [start] while len(queue) and current_cell is None: cell = queue.pop(0) for n in self.grid.get_neighbors(cell): if n.key() in path_history: continue path_history[n.key()] = cell if ((n.is_visited and not n.is_occupied) or not self.is_collision(n)): d = cell.euclidean_distance(n) distances[n.key()] = d if d <= self.robot.sensor_range and self.is_visible(n, goal): current_cell = n break queue.append(n) path = [current_cell] angles = [current_cell.z_axis_angle(goal)] distance = 0 # Unwind the path to the cell that has a view of the goal. while current_cell.key() in path_history and path_history[current_cell.key()] is not None: path.insert(0, current_cell) prev_cell = path_history[current_cell.key()] heading = prev_cell.z_axis_angle(current_cell) distance += distances[current_cell.key()] current_cell = prev_cell return path, angles, distance def normals_in_range(self, current: OccupancyCell, goal: OccupancyCell, normal_range: float) -> float: """Determines whether the normal formed by the goal cell center and its nearest edge to the robot forms an angle with the current and goal cell centers that is within the allowable range.""" neighbors = self.grid.get_neighbors(goal) point_slopes = [] distances = [] min_angle = 360 for (x1, y1), (x2, y2), (row_offset, col_offset) in goal.get_sides(): neighbor = self.grid.get_cell(goal.row + row_offset, goal.col + col_offset) if neighbor is None: continue # Gets the centroid of the edge. x3 = (x1 + x2) / 2.0 y3 = (y1 + y2) / 2.0 # Maybe calculates the angle between the two vectors and updates the min_angle if not neighbor.is_occupied and neighbor.is_viewable: v1 = (current.x - goal.x, current.y - goal.y) v2 = (x3 - goal.x, y3 - goal.y) dot_product = np.dot(v1 / np.linalg.norm(v1), v2 / np.linalg.norm(v2)) angle = math.degrees(np.arccos(dot_product)) min_angle = min(min_angle, angle) return normal_range >= min_angle def is_visible(self, current: OccupancyCell, goal: OccupancyCell) -> bool: """Determines if a goal cell is visible from the urrent cell by checking for occlusion constraints, viewing angle constraints and whether or not the robot sensor physically overlaps the cell.""" if not goal.is_viewable: return False new_robo = copy.deepcopy(self.robot) x_offset = current.x - self.robot.x y_offset = current.y - self.robot.y new_robo.translate(current.x, current.y) new_robo.rotate(current.z_axis_angle(goal)) if ((new_robo.camera_poly.overlaps(goal.poly) or goal.poly.within(new_robo.camera_poly)) and not self.grid.is_occluded(current, goal) and new_robo.in_view(goal.x, goal.y) and self.normals_in_range(current, goal, self.wall_observation_angle)): return True return False def is_collision(self, cell: OccupancyCell) -> bool: """Identifies if moving to a given cell location would cause the robot to collide with a wall. Cells that have not yet been visited will always return False since we know nothing about them.""" if cell.is_visited and cell.is_occupied: return True new_body_poly = geometry.Point(cell.x, cell.y).buffer(self.robot.radius) cells = self.grid.get_overlap_cells(new_body_poly) for cell in cells: if (cell.is_visited and cell.is_occupied): return True return False def maybe_visit_and_update_frontier(self, cells: List[OccupancyCell]): """Visits a cell if it already has not been visited. If it has not been visited the function proceeds to remove it from the frontier and add its non-visited neighbors to the frontier including diagonal entries.""" for cell in cells: if cell.is_visited or not cell.is_viewable: continue cell.is_visited = True cell.is_frontier = False self.frontier.pop(cell.key(), None) for n in self.grid.get_neighbors(cell, include_diagonals = True): n_key = n.key() if (not n.is_visited and n.is_viewable and n_key not in self.frontier): self.frontier[n_key] = n n.is_frontier = True def observe_cells(self, view_cell, camera_poly): """Determines the list of visible cells given the viewing locaiton, view_cell and the provided sensor polygon. Cells that are visible are then observed and the frontier is updated.""" for cell in self.grid.get_overlap_cells(camera_poly): if (not cell.is_visited and self.is_visible(view_cell, cell)): self.maybe_visit_and_update_frontier([cell]) def get_full_coverage_path(self) -> List[Tuple[float, float, float]]: """Determines a path to fully cover the accessible map regions using the robot and associated camera sensor passed to the class. Inaccessible regions are precomputed from the robot starting location. """ self.coverage_path = [[self.robot.x, self.robot.y, self.robot.yaw]] robot_cell = self.grid.get_cell_from_coords(self.robot.x, self.robot.y) # Mark the initial robot location as visited / vieed. self.grid.mark_viewable_cells(robot_cell) start_cells = self.grid.get_overlap_cells(self.robot.body_poly) self.maybe_visit_and_update_frontier(start_cells) self.observe_cells(robot_cell, self.robot.camera_poly) self.maybe_plot() robot_cell = self.grid.get_cell_from_coords(self.robot.x, self.robot.y) # Continues to perform the search until the frontier is fully explored. while len(self.frontier): self.grid.print_coverage() goal_path = None goal_cell = None goal_distance = np.inf goal_rotation = 360 # Determines the euclidean distance to all frontier cells. This heuristic for # ranking goal candidates can be updated later. frontier_distances = [] for cell in self.frontier.values(): frontier_distances.append([cell.key(), robot_cell.euclidean_distance(cell)]) frontier_distances = sorted(frontier_distances, key = lambda x: x[1]) n_frontier = min(len(frontier_distances), self.top_n_frontier) # Each candidate frontier cell is investigated using an astar search to identify # a goal location from which to view the cell. The lowest cost goal cell is selected # where cost in this case is the cummulative translational distance to reach the cell. for i in range(0, n_frontier): cell = self.frontier[frontier_distances[i][0]] path, angles, distance = self.astar(robot_cell, cell) rotation = utils.angular_distance(angles[0], self.robot.yaw) if (goal_distance > distance) or (goal_distance >= distance and goal_rotation > rotation): goal_rotation = rotation goal_distance = distance goal_cell = cell goal_path = path next_cell = goal_path[0] current_heading = self.robot.yaw new_heading = None # Robots do not need to adjust their heading if they are heading to an explored reason since there # is no risk of them colliding. if next_cell.is_visited and next_cell != robot_cell: self.robot.translate(next_cell.x, next_cell.y) current_heading = self.robot.yaw self.observe_cells(robot_cell, self.robot.camera_poly) self.maybe_plot() # Wraps the heading angles to prevent wrap around issues with 360 degrees. new_heading = utils.wrap_heading(current_heading, self.robot.get_z_axis_angle(goal_cell.x, goal_cell.y)) # If robot cell is not equal to the next cell we update the required heading to by equally splitting the total rotational # offset over the number of cells in the goal path. if next_cell != robot_cell: new_heading = utils.wrap_heading(current_heading, current_heading + ((new_heading - current_heading) / (len(goal_path) - 1))) robot_cell = self.grid.get_cell_from_coords(self.robot.x, self.robot.y) self.coverage_path.append([self.robot.x, self.robot.y, self.robot.yaw]) # Interpolates the required heading rotation such that the robot physically observes the cells in intermediate rotation # states. for r in np.linspace(current_heading, new_heading, int(abs(current_heading - new_heading) / self.rotation_interpolation_step)): self.robot.rotate(r) self.observe_cells(robot_cell, self.robot.camera_poly) self.maybe_plot() self.observe_cells(robot_cell, self.robot.camera_poly) self.robot.rotate(new_heading) self.maybe_plot() return self.coverage_path def maybe_plot(self): """Maybe plots the current state of the frontier planner.""" if self.show_plot: self.plot() def plot(self): """Plots the occupancy cell grid, robot location, robot heading, camera sensor and the trajectory of the robot to date.""" f = plt.figure(figsize=(9, 9)) ax = f.add_subplot(1, 1, 1) self.grid.plot(ax) self.robot.plot(ax) if len(self.coverage_path) > 1: for i in range(1, len(self.coverage_path)): x1, y1, _ = self.coverage_path[i - 1] x2, y2, _ = self.coverage_path[i] ax.plot([x1, x2], [y1, y2], color='k') plt.show(block=False) plt.pause(0.01) plt.close()
from math import gcd from functools import reduce class Solution: def isGoodArray(self, nums: List[int]) -> bool: return reduce(gcd, nums) == 1
# -*- coding: utf-8 -*- """ File multi_label_loss.py @author:ZhengYuwei """ import numpy as np import tensorflow as tf from tensorflow import keras class MyLoss(object): """ 损失函数 """ def __init__(self, model, **options): self.model = model self.is_label_smoothing = options.setdefault('is_label_smoothing', False) self.is_focal_loss = options.setdefault('is_focal_loss', False) self.is_gradient_harmonizing = options.setdefault('is_gradient_harmonized', False) self.loss_func = self._normal_categorical_crossentropy() # 标签平滑 if self.is_label_smoothing: self.smoothing_epsilon = options.setdefault('smoothing_epsilon', 0.005) # focal loss损失函数 if self.is_focal_loss: gamma = options.setdefault('focal_loss_gamma', 2.0) alpha = options.setdefault('focal_loss_alpha', 1.0) self.loss_func = self._categorical_focal_loss(gamma, alpha) # gradient harmonized mechanism if self.is_gradient_harmonizing: bins = options.setdefault('ghm_loss_bins', 30) momentum = options.setdefault('ghm_loss_momentum', 0.75) self.loss_func = self._categorical_ghm_loss(bins, momentum) @staticmethod def _normal_categorical_crossentropy(): """ 自带的多标签分类损失函数 categorical_crossentropy """ def categorical_crossentropy(y_truth, y_pred, _): return keras.backend.categorical_crossentropy(y_truth, y_pred) return categorical_crossentropy @staticmethod def _categorical_focal_loss(gamma=2.0, alpha=1.0): """ 返回多分类 focal loss 函数 Formula: loss = -alpha*((1-p_t)^gamma)*log(p_t) Parameters: alpha -- the same as wighting factor in balanced cross entropy, default 2.0 gamma -- focusing parameter for modulating factor (1-p), default 0.25 """ def focal_loss(y_truth, y_pred, _): epsilon = keras.backend.epsilon() y_pred = keras.backend.clip(y_pred, epsilon, 1.0 - epsilon) cross_entropy = -y_truth * keras.backend.log(y_pred) weight = alpha * keras.backend.pow(keras.backend.abs(y_truth - y_pred), gamma) loss = weight * cross_entropy loss = keras.backend.sum(loss, axis=1) return loss return focal_loss @staticmethod def _categorical_ghm_loss(bins=30, momentum=0.75): """ 返回多分类 GHM 损失函数: 把每个区间上的梯度做平均,也就是说把梯度拉平,回推到公式上等价于把loss做平均 Formula: loss = sum(crossentropy_loss(p_i,p*_i) / GD(g_i)) GD(g) = S_ind(g) / delta = S_ind(g) * M S_ind(g) = momentum * S_ind(g) + (1 - momentum) * R_ind(g) R_ind(g)是 g=|p-p*| 所在梯度区间[(i-1)delta, i*delta]的样本数 M = 1/delta,这个是个常数,理论上去掉只有步长影响 Parameters: (论文默认) bins -- 区间个数,default 30 momentum -- 使用移动平均来求区间内样本数,动量部分系数,论文说不敏感 """ # 区间边界 edges = np.array([i/bins for i in range(bins + 1)]) edges = np.expand_dims(np.expand_dims(edges, axis=-1), axis=-1) acc_sum = 0 if momentum > 0: acc_sum = tf.zeros(shape=(bins,), dtype=tf.float32) def ghm_class_loss(y_truth, y_pred, valid_mask): epsilon = keras.backend.epsilon() y_pred = keras.backend.clip(y_pred, epsilon, 1.0 - epsilon) # 0. 计算本次mini-batch的梯度分布:R_ind(g) gradient = keras.backend.abs(y_truth - y_pred) # 获取概率最大的类别下标,将该类别的梯度做为该标签的梯度代表 # 没有这部分就是每个类别的梯度都参与到GHM,实验表明没有这部分会更好些 # truth_indices_1 = keras.backend.expand_dims(keras.backend.argmax(y_truth, axis=1)) # truth_indices_0 = keras.backend.expand_dims(keras.backend.arange(start=0, # stop=tf.shape(y_pred)[0], # step=1, dtype='int64')) # truth_indices = keras.backend.concatenate([truth_indices_0, truth_indices_1]) # main_gradient = tf.gather_nd(gradient, truth_indices) # gradient = tf.tile(tf.expand_dims(main_gradient, axis=-1), [1, y_pred.shape[1]]) # 求解各个梯度所在的区间,并落到对应区间内进行密度计数 grads_bin = tf.logical_and(tf.greater_equal(gradient, edges[:-1, :, :]), tf.less(gradient, edges[1:, :, :])) valid_bin = tf.boolean_mask(grads_bin, valid_mask, name='valid_gradient', axis=1) valid_bin = tf.reduce_sum(tf.cast(valid_bin, dtype=tf.float32), axis=(1, 2)) # 2. 更新指数移动平均后的梯度分布:S_ind(g) nonlocal acc_sum acc_sum = tf.add(momentum * acc_sum, (1 - momentum) * valid_bin, name='update_bin_number') # sample_num = tf.reduce_sum(acc_sum) # 是否乘以总数,乘上效果反而变差了 # 3. 计算本次mini-batch不同loss对应的梯度密度:GD(g) position = tf.slice(tf.where(grads_bin), [0, 1], [-1, 2]) value = tf.gather_nd(acc_sum, tf.slice(tf.where(grads_bin), [0, 0], [-1, 1])) # * bins grad_density = tf.sparse.SparseTensor(indices=position, values=value, dense_shape=tf.shape(gradient, out_type=tf.int64)) grad_density = tf.sparse.to_dense(grad_density, validate_indices=False) grad_density = grad_density * tf.expand_dims(valid_mask, -1) + (1 - tf.expand_dims(valid_mask, -1)) # 4. 计算本次mini-batch不同样本的损失:loss cross_entropy = -y_truth * keras.backend.log(y_pred) # loss = cross_entropy / grad_density * sample_num loss = cross_entropy / grad_density loss = keras.backend.sum(loss, axis=1) """ # 调试用,打印tensor print_op = tf.print('acc_sum: ', acc_sum, '\n', 'grad_density: ', grad_density, '\n', 'cross_entropy: ', cross_entropy, '\n', 'loss:', loss, '\n', '\n', '=================================================\n', summarize=100) with tf.control_dependencies([print_op]): return tf.identity(loss) """ return loss return ghm_class_loss def categorical_crossentropy(self, y_truth, y_pred): """ 单标签多分类损失函数 :param y_truth: 真实类别值, (?, ?) :param y_pred: 预测类别值, (?, num_classes) :return: loss """ num_classes = keras.backend.cast(keras.backend.int_shape(y_pred)[-1], dtype=tf.int32) # 类别数 # 将sparse的truth输出flatten, 记录无效标签(-1)和有效标签(>=0)位置,后续用于乘以loss y_truth = keras.backend.flatten(y_truth) valid_mask = 1.0 - tf.cast(tf.less(y_truth, 0), dtype=tf.float32) # 转为one_hot y_truth = keras.backend.cast(y_truth, dtype=tf.uint8) y_truth = keras.backend.one_hot(indices=y_truth, num_classes=num_classes) # 标签平滑 if self.is_label_smoothing: num_classes = keras.backend.cast(num_classes, dtype=y_pred.dtype) y_truth = (1.0 - self.smoothing_epsilon) * y_truth + self.smoothing_epsilon / num_classes loss = self.loss_func(y_truth, y_pred, valid_mask) loss = loss * valid_mask """ # 调试用,打印tensor print_op = tf.print( # 'y_pred: ', y_pred, '\n', # 'y_truth: ', y_truth, '\n', # 'valid_mask: ', valid_mask, '\n', # 'loss:', loss, '\n', # 'normal_loss:', self._normal_categorical_crossentropy()(y_truth, y_pred, valid_mask), '\n', 'layer losses (regularization)', tf.transpose(self.model.losses), '\n', 'mean loss:', tf.reduce_mean(loss), '\t', 'sum layer losses:', tf.reduce_sum(tf.transpose(self.model.losses)), '\n', '=================================================\n', summarize=100 ) with tf.control_dependencies([print_op]): return tf.identity(loss) """ return loss
for i in range(1, 21): print(i)
from setuptools import setup, find_packages # name is the package name in pip, should be lower case and not conflict with existing packages # packages are code source setup( name="astrosimon", version="0.8.5", description="Simulation Monitor for computational astrophysics", url="https://github.com/maxwelltsai/SiMon", author="Maxwell Cai, Penny Qian", author_email="[email protected]", license="BSD 2-Clause", packages=find_packages(), zip_safe=False, install_requires=["python-daemon", "numpy"], entry_points={ "console_scripts": ["simon = SiMon.simon:main"], }, )
from .runner import Runner # noqa
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] def caesar(directionn,cypher_text, shift_amount): plain_text = "" for char in cypher_text: if char in alphabet: position = alphabet.index(char) if direction == "encode": new_position = position + shift_amount elif direction == "decode": new_position = 26+position - shift_amount plain_text += alphabet[new_position] else: plaint_text+=char print(f"Here's the {direction}d result: {plain_text}") #TODO-1: Import and print the logo from art.py when the program starts. from art import logo print(logo) #TODO-4: Can you figure out a way to ask the user if they want to restart the cipher program? #e.g. Type 'yes' if you want to go again. Otherwise type 'no'. #If they type 'yes' then ask them for the direction/text/shift again and call the caesar() function again? #Hint: Try creating a new function that calls itself if they type 'yes'. #TODO-2: What if the user enters a shift that is greater than the number of letters in the alphabet? #Try running the program and entering a shift number of 45. #Hint: Think about how you can use the modulus (%). x=True while x is True: direction = input("Type 'encode' to encrypt, type 'decode' to decrypt:\n") text = input("Type your message:\n").lower() shift = int(input("Type the shift number:\n")) shift=shift%25 caesar(cypher_text=text, shift_amount=shift, directionn=direction) restart=input("Do you want to go again? yes or no ") if restart=="yes": x=True else: x=False print("Goodbye")
import pytest from stock_indicators import indicators class TestSTC: def test_standard(self, quotes): cycle_periods = 9 fast_periods = 12 slow_periods = 26 results = indicators.get_stc(quotes, cycle_periods, fast_periods, slow_periods) r = results[34] assert r.stc is None r = results[35] assert 100 == round(float(r.stc), 4) r = results[49] assert 0.8370 == round(float(r.stc), 4) r = results[249] assert 27.7340 == round(float(r.stc), 4) r = results.pop() assert 19.2544 == round(float(r.stc), 4) def test_bad_data(self, bad_quotes): r = indicators.get_stc(bad_quotes, 10, 23, 50) assert 502 == len(r) def test_no_quotes(self, quotes): r = indicators.get_stc([]) assert 0 == len(r) r = indicators.get_stc(quotes[:1]) assert 1 == len(r) def test_removed(self, quotes): cycle_periods = 9 fast_periods = 12 slow_periods = 26 results = indicators.get_stc(quotes, cycle_periods, fast_periods, slow_periods) results = results.remove_warmup_periods() last = results.pop() assert 19.2544 == round(float(last.stc), 4) def test_exceptions(self, quotes, other_quotes, mismatch_quotes): from System import ArgumentOutOfRangeException with pytest.raises(ArgumentOutOfRangeException): indicators.get_stc(quotes, 9, 0, 26) with pytest.raises(ArgumentOutOfRangeException): indicators.get_stc(quotes, 9, 12, 12) with pytest.raises(ArgumentOutOfRangeException): indicators.get_stc(quotes, -1, 12, 26)
import io import logging import os import pathlib from datetime import datetime from typing import List from musiquepy.data.errors import MusiquepyExistingUserError from musiquepy.data.media import get_profile_pictures_dir from musiquepy.data.model import ( Album, AlbumPhoto, Artist, MusicGenre, MusicTrack, User) from sqlalchemy import select from sqlalchemy.engine import Engine, ResultProxy from sqlalchemy.orm.session import Session class MusiquepyDB: _engine: Engine _session: Session _log: logging.Logger def __init__(self, engine: Engine) -> None: self._engine = engine self._log = logging.getLogger(__name__) def connect(self): self._session = Session(self._engine) self._session.expire_on_commit = False def close(self): self._session.close() def __enter__(self): self.connect() return self def __exit__(self, exc_type, exc_val, exc_tb): self.close() def create_user(self, name: str, email: str, password: str) -> User: usr = self.get_user_by_email(email) if usr is not None: raise MusiquepyExistingUserError( f"utilisateur existe déjà: {email}") usr = User() usr.email = email usr.name = name usr.password = password usr.accept_marketing = 0 usr.active = 1 usr.created_at = int(datetime.now().timestamp()) usr.email_confirmed_at = None self._session.add(usr) self._session.commit() return usr def get_users(self) -> List[User]: result: ResultProxy result = self._session.execute(select(User)) return [row.User for row in result.fetchall()] def get_user_by_id(self, id) -> User: stmt = select(User).where(User.id == id) return self._session.execute(stmt).scalar() def get_user_by_email(self, email) -> User: stmt = select(User).where(User.email == email) return self._session.execute(stmt).scalar() def get_user_profile_picture(self, user_id: int) -> io.IOBase: pictures_path = get_profile_pictures_dir() profile_pic_path = pathlib.Path( pictures_path, f'user_{int(user_id)}.jpg') if not profile_pic_path.exists(): profile_pic_path = pathlib.Path(pictures_path, 'default.jpg') return io.FileIO(os.path.join(pictures_path, 'default.jpg')) def get_genres(self) -> List[MusicGenre]: stmt = select(MusicGenre).order_by(MusicGenre.description) return self._session.execute(stmt).scalars().all() def get_genre_by_id(self, id: int) -> MusicGenre: stmt = select(MusicGenre).where(MusicGenre.id == id) return self._session.execute(stmt).scalar() def get_artist_by_id(self, id: int) -> Artist: stmt = select(Artist).where(Artist.id == id) return self._session.execute(stmt).scalar() def get_music_tracks_by_genre(self, id_genre: int) -> List[MusicTrack]: stmt = ( select(MusicTrack, Artist, Album) .join(MusicTrack.album) .join(Album.artist) .join(Artist.genres) .where(MusicGenre.id == id_genre) ) result = self._session.execute(stmt) return [row.MusicTrack for row in result.fetchall()] def get_album_photo(self, id: int) -> AlbumPhoto: stmt = (select(AlbumPhoto).where(AlbumPhoto.album_id == id)) return self._session.execute(stmt).scalar()
""" #def swap(x,y): T=((10,'Q1'), (20,'Q2'), (50,'Q3')) for i in T: print(i[1]) def get_data(aTuple): nums = () words = () for t in aTuple: nums = nums + (t[0],) if t[1] not in words: words = words + (t[1],) min_n = min(nums) max_n = max(nums) unique_words = len(words) return (min_n, max_n, unique_words) print(get_data(((10,'Q1'), (20,'Q2'), (50,'Q3')))) Write a Python program to find the second smallest number in a list """ #L = [6,3,2,6,3] L = list(map(int, input("Enter the list numbers separated by space ").strip().split())) Ln = [] for i in L: if (i not in Ln): Ln.append(i) Ln.sort() print(Ln[1])
from base64 import b64encode, b64decode import binascii from itertools import islice import logging LOG = logging.getLogger(__name__) gen_raw_nec_protocols_standard = ['nec1', 'nec2', 'necx1', 'necx2'] gen_raw_nec_protocols_suffixes = ['', '-y1', '-y2', '-y3', '-f16'] gen_raw_nec_protocols = list(x + y for x in gen_raw_nec_protocols_standard for y in gen_raw_nec_protocols_suffixes) gen_raw_rc5_protocols = ['rc5'] gen_raw_rc6_protocols = ['rc6'] gen_raw_rca38_protocols = ['rca38'] gen_raw_protocols = [*gen_raw_nec_protocols, *gen_raw_rc5_protocols, *gen_raw_rc6_protocols, *gen_raw_rca38_protocols] def uX_to_bin(v, x): if(v < 0): v += (1 << x) return bin(v)[2:].rjust(x, '0') def gen_raw_rc5(protocol, device, subdevice, function, toggle=0): logical_bit = 889.0 def encode_bit(s): if s == '1': yield logical_bit * -1 yield logical_bit * 1 else: yield logical_bit * 1 yield logical_bit * -1 def encode_uX(x, l): for s in uX_to_bin(x, l): yield from encode_bit(s) yield from encode_bit('1') # start if function < 64: yield from encode_bit('1') # field (function 0-63) else: yield from encode_bit('0') # field (function 64-127) yield from encode_bit(str(toggle)) # toggle # address yield from encode_uX(device, 5) # command yield from encode_uX(function % 64, 6) # trailing silence yield logical_bit * -100 def gen_raw_rc6(protocol, device, subdevice, function, toggle=0, mode=0): logical_bit = 444.0 def encode_bit(s): if s == '1': yield logical_bit * 1 yield logical_bit * -1 else: yield logical_bit * -1 yield logical_bit * 1 def encode_uX(x, l): for s in uX_to_bin(x, l): yield from encode_bit(s) #LS yield logical_bit * 6 yield logical_bit * -2 #SB yield from encode_bit('1') #Mode yield from encode_uX(mode, 3) #TB if toggle: yield logical_bit * 2 yield logical_bit * -2 else: yield logical_bit * -2 yield logical_bit * 2 #Control yield from encode_uX(device, 8) #Information yield from encode_uX(function, 8) #Signal Free yield logical_bit * -6 def gen_raw_nec(protocol, device, subdevice, function): logical_bit = 562.5 protocol_base, protocol_suffix = (protocol.split('-') + [None])[:2] def encode(value): b = uX_to_bin(value, 8) for s in reversed(b): yield logical_bit # burst if s == '1': yield logical_bit * -3 # one is encoded by 3 length else: yield logical_bit * -1 # zero is encoded by 1 lengths if protocol_base in ('nec1', 'necx1'): yield logical_bit * 16 # leading burst else: yield logical_bit * 8 # leading burst yield logical_bit * -8 # space before data yield from encode(device) if subdevice >= 0: yield from encode(subdevice) else: yield from encode(~device) yield from encode(function & 0xFF) if protocol_suffix == 'y1': # Yamaha special version 1 yield from encode(function ^ 0x7F) elif protocol_suffix == 'y2': # Yamaha special version 2 yield from encode(function ^ 0xFE) elif protocol_suffix == 'y3': # Yamaha special version 3 yield from encode(function ^ 0x7E) elif protocol_suffix == 'f16': # 16 bit function yield from encode((function >> 8) & 0xFF) else: # Standard invert yield from encode(function ^ 0xFF) yield logical_bit # Trailing burst yield logical_bit * -3 # Trailing zero to separate def gen_raw_rca38(protocol, device, subdevice, function, **kwargs): logical_bit = 460 def encode_bit(s): if s == '1': yield logical_bit * 1 yield logical_bit * -4 else: yield logical_bit * 1 yield logical_bit * -2 def rev_encode_bit(s): if s == '1': yield from encode_bit('0') else: yield from encode_bit('1') def encode_uX(x, l, f): for s in uX_to_bin(x, l): yield from f(s) #Starting burst yield logical_bit * 8 yield logical_bit * -8 # Device and function yield from encode_uX(device, 4, encode_bit) yield from encode_uX(function, 8, encode_bit) #Reversed device and function yield from encode_uX(device, 4, rev_encode_bit) yield from encode_uX(function, 8, rev_encode_bit) #Ending burst yield logical_bit * 1 yield logical_bit * -16 def gen_raw_general(protocol, device, subdevice, function, **kwargs): if protocol.lower() in gen_raw_nec_protocols: yield from gen_raw_nec(protocol.lower(), int(device), int(subdevice), int(function)) if protocol.lower() in gen_raw_rc5_protocols: yield from gen_raw_rc5(protocol.lower(), int(device), int(subdevice), int(function)) if protocol.lower() in gen_raw_rc6_protocols: yield from gen_raw_rc6(protocol.lower(), int(device), int(subdevice), int(function)) if protocol.lower() in gen_raw_rca38_protocols: yield from gen_raw_rca38(protocol.lower(), int(device), int(subdevice), int(function)) def gen_simplified_from_raw(x): """ Simplify raw string. Combine successive same sign value, drop zeros, drop leading negative """ value = 0 for i in x: if i == 0: continue elif value == 0: if i > 0: value = i else: pass # leading negative elif (value > 0) == (i > 0): value += i else: yield value value = i if value != 0: yield value def gen_paired_from_raw(x): """ Create pairs of on, off """ sign = 1 for i in x: if (i < 0) ^ (sign < 0): yield 0.0 yield i else: yield i sign = -sign if sign < 0: yield 0.0 def gen_raw_from_broadlink(data): v = iter(data) code = next(v) repeat = next(v) assert code == 0x26 # IR length = int.from_bytes(islice(v, 2), byteorder='little') def decode_one(x): return round(x * 8192 / 269) def decode_iter(x): sign = 1 while True: try: d = next(x) except StopIteration: return if d == 0: d = int.from_bytes(islice(x, 2), byteorder='big') yield sign * decode_one(d) sign = sign * -1 yield from decode_iter(islice(v, length)) assert next(v) == 0x0d assert next(v) == 0x05 rem = list(v) if any(rem): LOG.warning("Ignored extra data: %s", rem) def gen_raw_from_broadlink_base64(data): yield from gen_raw_from_broadlink(b64decode(data)) def gen_broadlink_from_raw(data, repeat=0): yield from b'\x26' # IR yield from repeat.to_bytes(1, byteorder='big') # Repeat def encode_one(x): # v = abs(int(i / 32.84)) v = abs(round(x * 269 / 8192)) if v > 255: yield from b'\x00' yield from v.to_bytes(2, byteorder='big') else: yield from v.to_bytes(1, byteorder='big') def encode_list(x): for i in gen_simplified_from_raw(x): yield from encode_one(i) c = bytearray(encode_list(data)) count = len(c) yield from count.to_bytes(2, byteorder='little') yield from c yield from b'\x0d' yield from b'\x05' # calculate total length for padding count += 6 # header+len+trailer count += 4 # rm.send_data() 4 byte header (not seen here) yield from bytearray(16 - (count % 16)) def gen_broadlink_base64_from_raw(data, repeat=0): return b64encode(bytes(gen_broadlink_from_raw(data, repeat))) def gen_raw_from_pronto(data): clock = 0.241246 # Pronto clock base: 1000000 / (32768 * 506 / 4) v = iter(data) zero = next(v) assert zero == 0 base = next(v) freq = 1.0 / (base * clock) seq1_len = next(v) seq2_len = next(v) for _ in range(seq1_len): yield +round(next(v) / freq, 1) yield -round(next(v) / freq, 1) for _ in range(seq2_len): yield +round(next(v) / freq, 1) yield -round(next(v) / freq, 1) def gen_pronto_from_raw_int(seq1, seq2, base=None, freq=None): clock = 0.241246 # Pronto clock base: 1000000 / (32768 * 506 / 4) if freq is None: if base is None: freq = 0.040 else: freq = 1.0 / (base * clock) if base is None: base = int(1 / (freq * clock)) yield 0 yield base def fixup(x): return list(gen_paired_from_raw(gen_simplified_from_raw(x))) #return list(gen_paired_from_raw((x))) simple1 = fixup(seq1) simple2 = fixup(seq2) yield int(len(simple1)/2) # sequence 1 yield int(len(simple2)/2) # sequence 2 for x in simple1: yield int(abs(x) * freq) for x in simple2: yield int(abs(x) * freq) def gen_pronto_from_raw(seq1, seq2, base=None, freq=None): data = gen_pronto_from_raw_int(seq1, seq2, base, freq) for value in data: yield "{0:0{1}x}".format(value,4)
import numpy as np from sklearn.tree import DecisionTreeRegressor from sklearn.kernel_ridge import KernelRidge from sklearn.datasets import make_regression, make_classification from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error, log_loss from sklearn.base import clone class MeanSquaredError: ''' Mean squared error loss function ''' @staticmethod def compute_derivatives(y, f): g = 2*(f - y) h = 2.0 * np.ones(y.shape[0]) return g, h class LogisticLoss: ''' Logistic loss function ''' @staticmethod def compute_derivatives(y, f): tmp = np.exp(-np.multiply(y, f)) tmp2 = np.divide(tmp, 1+tmp) g = -np.multiply(y, tmp2) h = np.multiply(tmp2, 1.0-tmp2) return g, h class HNBM: ''' A generic Heterogeneous Newton Boosting Machine Args: loss (class): loss function num_iterations (int): number of boosting iterations learning_rate (float): learning rate base_learners (list): list of base learners probabilities (list): list of sampling probabilities Attributes: ensemble_ (list): Ensemble after training ''' def __init__(self, loss, num_iterations, learning_rate, base_learners, probabilities): self.loss_ = loss self.num_iterations_ = num_iterations self.learning_rate_ = learning_rate self.base_learners_ = base_learners self.probabilities_ = probabilities self.ensemble_ = [] def fit(self, X, y): ''' Train the model Args: X (np.ndarray): Feature matrix y (np.ndarray): Labels ''' z = np.zeros(X.shape[0]) self.ensemble_ = [] for i in range(0, self.num_iterations_): g, h = self.loss_.compute_derivatives(y, z) base_learner = clone(np.random.choice(self.base_learners_, p=self.probabilities_)) base_learner.fit(X, -np.divide(g, h), sample_weight=h) z += base_learner.predict(X) * self.learning_rate_ self.ensemble_.append(base_learner) def predict(self, X): ''' Predict using the model Args: X (np.ndarray): Feature matrix ''' preds = np.zeros(X.shape[0]) for learner in self.ensemble_: preds += self.learning_rate_ * learner.predict(X) return preds class SnapBoost(HNBM): ''' A particular realization of a HNBM that uses decision trees and kernel ridge regressors Args: loss (class): loss function num_iterations (int): number of boosting iterations learning_rate (float): learning rate p_tree (float): probability of selecting a tree at each iteration min_max_depth (int): minimum maximum depth of a tree in the ensemble max_max_depth (int): maximum maximum depth of a tree in the ensemble alpha (float): L2-regularization penalty in the ridge regression gamma (float): RBF-kernel parameter ''' def __init__(self, loss=MeanSquaredError, num_iterations=100, learning_rate=0.1, p_tree=0.8, min_max_depth=4, max_max_depth=8, alpha=1.0, gamma=1.0): base_learners = [] probabilities = [] # Insert decision tree base learners depth_range = range(min_max_depth, 1+max_max_depth) for d in depth_range: base_learners.append(DecisionTreeRegressor(max_depth=d, random_state=42)) probabilities.append(p_tree/len(depth_range)) # Insert kernel ridge base learner base_learners.append(KernelRidge(alpha=alpha, kernel='rbf', gamma=gamma)) probabilities.append(1.0-p_tree) super().__init__(loss, num_iterations, learning_rate, base_learners, probabilities) def test(classification=False): ''' Test SnapBoost on a synthetic learning task: Args: classification (bool): generate a classification task (if True) or a regression task (if False) ''' # for deterministic results across runs np.random.seed(42) # construct a SnapBoost object model = SnapBoost(loss=LogisticLoss if classification else MeanSquaredError) # generate a dataset (regression task) if classification: X, y = make_classification(n_samples=1000, n_features=20, random_state=42) # we assume [-1,+1] labels y = 2*y-1 else: X, y = make_regression(n_samples=1000, n_features=20, random_state=42) # split into train/test datasets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # train a SnapBoost model model.fit(X_train, y_train) # predict using the SnapBoost model preds = model.predict(X_test) # evaluate the model if classification: logloss= log_loss(y_test, 1.0/(1.0+np.exp(-preds))) print("SnapBoost log_loss (test set): %.4f" % (logloss)) else: rmse = np.sqrt(mean_squared_error(y_test, preds)) print("SnapBoost RMSE (test set): %.4f" % (rmse)) if __name__ == "__main__": test(classification=False) test(classification=True)
# Python library import import asyncio, netscud async def task(): """ Async function """ my_device = { "ip": "192.168.0.16", "username": "cisco", "password": "cisco", "device_type": "cisco_ios", } # Creation of a device sw1 = netscud.ConnectDevice(**my_device) # Connection to the device await sw1.connect() # Command to send cmd = "show interfaces description" # Sending command output = await sw1.send_command(cmd) # Display message print(output) # Disconnection await sw1.disconnect() # Main function call if __name__ == "__main__": # Main async loop asyncio.run(task())
from setuptools import setup from setuptools import find_packages VERSION = "1.0.0" DESCRIPTION = "DWIO NYU Helpers" setup( name="dwio-nyu", version=VERSION, author="Daniel Walt", description=DESCRIPTION, packages=find_packages(), intsall_requires=[ ] )
#!/usr/bin/env python3 from utils import add_to_dict_num from math import log10 from jellyfish import levenshtein_distance from operator import itemgetter class comparator: def __init__(self): None def compare(self, A, B): return A == B class probabilityTree: def __init__(self, function, probability_lookup): self.tree = probability_lookup self.function = function def get_probability(self, A, B): comparison = self.function.compare(A, B) if comparison in self.tree: value = self.tree[comparison] else: value = self.tree['*'] if isinstance(value, probabilityTree): return value.get_probability(A,B) else: return value class equalsComparator(comparator): # Kind of unnecessary but it has a Ronseal name def __init__(self): super().__init__() class lookupComparator(comparator): def __init__(self, lookup_set): self.lookup = lookup_set def compare(self, A, B): return_string = "" if A == B: return "A" if A in self.lookup: return_string += "A" if B in self.lookup: return_string += "B" return return_string class missingComparator(comparator): def __init__(self): super().__init__() def compare(self, A, B): if len(A) > 0 and len(B) == 0: return 1 elif len(B) > 0 and len(A) == 0: return -1 else: return 0 class similarityComparator(comparator): def __init__(self): super().__init__() def compare(self, A, B): return levenshtein_distance(A,B) class confidenceCalculator: def __init__(self, probability_tree): self.values = {} self.probability_tree = probability_tree self.value_count = 0 self.calculated = False def add_value(self, value): add_to_dict_num(self.values, value) self.value_count += 1 def calc(self): keys = [k for k in self.values.keys()] all_probabilities = [] probability_sum = 0 for i in range(len(keys)): total_probability = 1 for j in range(len(keys)): probability = self.probability_tree.get_probability(keys[i], keys[j]) total_probability *= probability ** self.values[keys[j]] if len(keys) == 1: probability_sum += (1-probability) ** self.values[keys[j]] all_probabilities.append([keys[i],total_probability]) probability_sum += total_probability self.calculated_probabilities = sorted([p for p in all_probabilities], key=itemgetter(1), reverse=True) for cp in self.calculated_probabilities: cp.append(int(log10(cp[1]/(probability_sum-cp[1]))*10)) self.calculated = True def conf_iter(self): if not self.calculated: self.calc() for cp in self.calculated_probabilities: yield cp if __name__ == '__main__': PT1 = probabilityTree(similarityComparator(), {1:0.6, 2:0.3, '*': 0.1}) PT2 = probabilityTree(equalsComparator(), {1:0.9, 0: PT1}) PT3 = probabilityTree(missingComparator(), {1:0.7, -1:0.1, 0: PT2}) PT4 = probabilityTree(lookupComparator(set(["Woman Clerk", "Surgeon", "Manager"])), {"AB":0.2, "A":0.9, "B":0.01, "*": PT3}) print("Ex1:", PT3.get_probability("jones","jones")) print("Ex2:", PT3.get_probability("jones","jonesy")) print("Ex3:", PT3.get_probability("jones","jonesyyy")) print("Ex4:", PT3.get_probability("","jonesyyy")) CC = confidenceCalculator(PT3) CC.add_value("jones") CC.add_value("jones") CC.add_value("jones") for c in CC.conf_iter(): print(c) CC = confidenceCalculator(PT4) CC.add_value("Woman Clerk") CC.add_value("Woman Clerk") CC.add_value("Surgeon") print("") print("*************") for c in CC.conf_iter(): print(c)
from IPython.core.display import HTML, SVG import pandas as pd import numpy as np import xport import IPython from ipywidgets import Layout from ipywidgets import widgets from IPython.display import display import matplotlib.ticker as ticker import matplotlib.cm as cm import matplotlib as mpl from matplotlib.gridspec import GridSpec import matplotlib.pyplot as plt import seaborn as sns input = pd.read_csv('/home/jgines/Desktop/topics_to_csv_12_02_2020_17_21_44.csv') source = [] #rango dinámico for i in range(0,1857): for l in input: #if l == 'time': # continue #if l == 'distance': # continue #if l == 'recovery_behavior_executed': # continue #if l == 'vel_x': # continue #if l == 'vel_theta': # continue new_reg = { 'time':i, 'distance':input['distance'][i], 'recovery_behavior_executed':input['recovery_behavior_executed'][i], 'vel_x':input['vel_x'][i], 'vel_theta':input['vel_theta'][i] } source.append(new_reg) data = pd.DataFrame(source) #print(data) sns.relplot(x="time", y="distance", kind="line", data=data) plt.show()
from .train_wandb_fpc import train_wandb_cae, train_wandb_aae, \ train_wandb_svdae # noqa: F401
# -*- coding: utf-8 -*- from django.contrib import admin from django.utils.translation import ugettext_lazy from .models import Client, ProxyResource, HTTPMethod, Api @admin.register(Client) class ClientAdmin(admin.ModelAdmin): list_display = ('name', 'api_key', ) readonly_fields = ('api_key', ) def save_model(self, request, obj, form, change): if not obj.api_key: obj.api_key = Client.generate_api_key() return super(ClientAdmin, self).save_model(request, obj, form, change) class ProxyHttpMethodsInline(admin.TabularInline): model = ProxyResource.methods.through @admin.register(ProxyResource) class ProxyResourceAdmin(admin.ModelAdmin): list_display = ('name', 'api', 'display_methods', 'endpoint_url') list_filter = ('api', ) # inlines = [ProxyHttpMethodsInline] @admin.register(HTTPMethod) class HTTPMethodAdmin(admin.ModelAdmin): actions = None list_display_links = None def has_delete_permission(self, request, obj=None): """Restrict to remove HTTP methods from the admin""" return False # def get_actions(self, request): # actions = super(HTTPMethodAdmin, self).get_actions(request) # # if 'delete_selected' in actions: # del actions['delete_selected'] # return actions @admin.register(Api) class ApiAdmin(admin.ModelAdmin): list_display = ('path', ) # Customize admin titles admin.site.site_header = ugettext_lazy('Pilvi management') admin.site.site_title = ugettext_lazy('Pilvi management')
# Generated with LoadAndMassFormulation # from enum import Enum from enum import auto class LoadAndMassFormulation(Enum): """""" LUMPED = auto() CONSISTENT = auto() def label(self): if self == LoadAndMassFormulation.LUMPED: return "Lumped load and mass formulation" if self == LoadAndMassFormulation.CONSISTENT: return "Consistent load and mass formulation"
#!/usr/bin/env python # -*- coding: utf-8 -*- """Trace splay tree.""" import inspect import logging import os import pygraphviz as pgv from splay_tree_orig import SplayTree logger = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) os.makedirs("graph", exist_ok=True) def for_all_methods(decorator): """ Add decorator to all class methods. https://stackoverflow.com/questions/6307761/how-to-decorate-all-functions-of-a-class-without-typing-it-over-and-over-for-eac/6307868#6307868 :param decorator: decorator :return: decorated class """ def decorate(cls): members = inspect.getmembers(cls, predicate=inspect.isfunction) for name, value in members: if name in ("__init__", "draw"): continue setattr(cls, name, decorator(value)) return cls return decorate def draw_decorator(func): """ Draw state of tree. :param func: called function :return: decorated function """ def wrapper(*args, **kwargs): assert len(args) > 0 tree = args[0] assert isinstance(tree, SplayTree) func_name = func.__qualname__ message = f"{func_name}{args[1:]}" draw(tree, " before " + message) res = func(*args, **kwargs) draw(tree, " after " + message) return res return wrapper def draw(tree, message): """Draw state.""" logger.debug(str(tree._step) + message) tree._step += 1 A = pgv.AGraph() A.node_attr["style"] = "filled" A.node_attr["shape"] = "record" A.node_attr["fixedsize"] = "true" A.node_attr["fontsize"] = 12 for node in tree._nodes: label = f"""<f0> {node.val}|<f1> {node.counter}""" A.add_node(id(node), label=label) n = A.get_node(id(node)) if not node.parent: n.attr["fillcolor"] = "#CFC291" for node in tree._nodes: if node.parent: # красный A.add_edge(id(node), id(node.parent), color="#F15A5A") if node.left: # зеленый A.add_edge(id(node), id(node.left), color="#4EBA6F") if node.right: # синий A.add_edge(id(node), id(node.right), color="#2D95BF") A.layout() filename = os.path.join("graph", f"{tree._step:03}-{message}.dot") A.draw(filename) class Worker: """Worker.""" def __init__(self): """Worker init.""" self.tree = for_all_methods(draw_decorator)(SplayTree)() def process(self, commands): """Process commands.""" res = [] for cmd_code, x in commands: if cmd_code == 1: pos = self.tree.insert(x) res.append(pos) elif cmd_code == 2: self.tree.remove(x) else: raise ValueError("Invalid Command") return res if __name__ == "__main__": worker = Worker() n = int(input()) commands = [] for _ in range(n): command = list(map(int, input().strip().split())) commands.append(command) res = worker.process(commands) print("\n".join(map(str, res)))
import numpy as np from rpy2.robjects import FloatVector from rpy2.robjects.packages import importr import matplotlib.pyplot as plt from itertools import combinations_with_replacement as comb_rep from src.tools import get_params def scale_omega(Omega, omega_scale, pos, C): Omega_tmp = np.copy(Omega) idxC = get_params(pos, C, 2) Omega_tmp[idxC, idxC] *= omega_scale return Omega_tmp def get_omega_blocks(df1, df2, kernel_support, granularity=1000, second_order=True): def r_bs(df, derivs, intercept=False, plot=False): """Returns differentiated design matrices (using R-call via rpy2)""" base = importr('base') splines = importr('splines') # Setup ordinary B-spline to compute knots spl = splines.bs(base.c(0, kernel_support), df = df, intercept = intercept) knots = base.attributes(spl).rx2('knots') knots = base.c(base.rep(0, base.ifelse(intercept, 4, 3)), knots, base.rep(kernel_support, 4)) # Compute design matrix grd = np.linspace(0, kernel_support, granularity + 1) x = FloatVector(grd) x = np.asarray(splines.splineDesign(x, knots = knots, outer_ok = True, derivs = derivs)) if plot: plt.plot(grd, x) plt.title(f"Derivs {derivs}, df {df}") plt.show() return x def get_partial_block(deriv1, deriv2, diag): """Get the integrated partial blocks (i.e. order 1,1 or 2, 0) Integrand is e.g. vec(b'' b^T) """ # Compute each spline matrix b1 = r_bs(df=df2, derivs=deriv1) b1 = np.einsum("ij, ik -> ijk", b1, np.ones(b1.shape)) b2 = r_bs(df=df2, derivs=deriv2) b2 = np.einsum("ij, ik -> ijk", b2, np.ones(b2.shape)) # Get the ordering of b1 and b2 right - depends on whether diag or non-diag if diag: upper_diag = np.triu_indices(b1.shape[1]) b1 = np.stack([x[upper_diag] for x in b1]) b2 = np.stack([x.T[upper_diag] for x in b2]) else: b1 = b1.reshape(b1.shape[0], -1) b2 = np.moveaxis(b2, 1, 2).reshape(b2.shape[0], -1) # Perform each integration separately outer1 = np.einsum("ij, ik -> ijk", b1, b1).sum(axis=0) * kernel_support/granularity outer2 = np.einsum("ij, ik -> ijk", b2, b2).sum(axis=0) * kernel_support/granularity return outer1 * outer2 def get_block(diag): """Wrapper for getting f''g + 2f'g' + fg'' """ out = (get_partial_block(2, 0, diag) + 2*get_partial_block(1, 1, diag) + get_partial_block(0, 2, diag)) return out block1 = r_bs(df=df1, derivs=2) block1 = np.einsum("ij, ik -> jk", block1, block1) * kernel_support/granularity # Diagonal and off-diagonal blocks block_d = get_block(True,) block_o = get_block(False) return block1, block_d, block_o def get_omega(df1, df2, kernel_support, pos, abC, blocks=None, granularity=1000, second_order=True, scale = 10): a, b, C = abC.values() param_size = int(1 + len([a] + C)*df1 + (((len(C)*df2)**2 + len(C)*df2)/2 if second_order else 0)) if blocks is None: blocks = get_omega_blocks(df1, df2, kernel_support, granularity=granularity, second_order=second_order) block1, block_d, block_o = blocks # Initialize Omega = np.zeros((param_size, param_size)) for v in [a] + C: idx = get_params(pos, v, order=1) Omega[np.ix_(idx, idx)] = block1 if second_order: for c1, c2 in list(comb_rep(C, 2)): if c1 == c2: idx = get_params(pos, c1, order=2, double=True) else: idx1, idx2 = get_params(pos, c1, order=2), get_params(pos, c2, order=2) idx = np.array(list(set(idx1) & set(idx2))) Omega[np.ix_(idx, idx)] = scale*block_d if c1 == c2 else scale*block_o # if scale != 1: # idxC = get_params(pos, C, 2) # Omega[idxC, idxC] *= scale return Omega def identity_omega(omega_scale, pos, abC, scale = 1): a, b, C = abC.values() Omega = np.identity(pos[0].size) pos_2 = get_params(pos, C, 2) Omega[np.ix_(pos_2, pos_2)] *= omega_scale return Omega
import unittest from think import Agent, Memory, Speech class AgentTest(unittest.TestCase): def test_agent(self, output=False): agent = Agent(output=output) agent.wait(10.0) self.assertAlmostEqual(10.0, agent.time(), 2) agent.run_thread(lambda: agent.wait(5.0)) agent.wait(2.0) self.assertAlmostEqual(12.0, agent.time(), 2) agent.wait_for_all() self.assertAlmostEqual(15.0, agent.time(), 2) def test_threads(self, output=False): agent = Agent(output=output) memory = Memory(agent) memory.add(isa='item') memory.add(isa='digit', value=1) memory.add(isa='number', value='three') speech = Speech(agent) def thread2(): for _ in range(2): number = memory.recall(isa='number') speech.say(number.value) agent.run_thread(thread2) agent.wait(.100) def thread3(): for _ in range(2): memory.recall('digit') agent.run_thread(thread3) for _ in range(2): memory.recall('item') agent.wait_for_all() self.assertAlmostEqual(1.200, agent.time(), 2)
""" Generic training script that trains a model using a given dataset. This code modifies the "TensorFlow-Slim image classification model library", Please visit https://github.com/tensorflow/models/tree/master/research/slim for more detailed usage. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import os import numpy as np from datasets import dataset_factory from deployment import model_deploy from nets import nets_factory from preprocessing import preprocessing_factory from modules import * from configuration import * slim = tf.contrib.slim FLAGS = tf.app.flags.FLAGS def _average_gradients(tower_grads): """Calculate the average gradient for each shared variable across all towers. Note that this function provides a synchronization point across all towers. Args: tower_grads: List of lists of (gradient, variable) tuples. The outer list is over individual gradients. The inner list is over the gradient calculation for each tower. Returns: List of pairs of (gradient, variable) where the gradient has been averaged across all towers. """ average_grads = [] for grad_and_vars in zip(*tower_grads): # Note that each grad_and_vars looks like the following: # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN)) grads = [] for g, _ in grad_and_vars: # Add 0 dimension to the gradients to represent the tower. # print(g) expanded_g = tf.expand_dims(g, 0) # Append on a 'tower' dimension which we will average over below. grads.append(expanded_g) # Average over the 'tower' dimension. grad = tf.concat(axis=0, values=grads) grad = tf.reduce_mean(grad, 0) # Keep in mind that the Variables are redundant because they are shared # across towers. So .. we will just return the first tower's pointer to # the Variable. v = grad_and_vars[0][1] grad_and_var = (grad, v) average_grads.append(grad_and_var) return average_grads def _tower_loss(network_fn, images, labels, input_seqs, input_masks): """Calculate the total loss on a single tower running the model.""" # Get image features, text features, and joint embeddings image_features, _ = build_image_features(network_fn, images) text_features, _ = build_text_features(input_seqs, input_masks) image_embeddings = build_joint_embeddings(image_features, scope='image_joint_embedding') text_embeddings = build_joint_embeddings(text_features, scope='text_joint_embedding') loss, cmpm_loss, cmpc_loss, i2t_loss, t2i_loss = 0.0, 0.0, 0.0, 0.0, 0.0 cmpm_loss = tf.cast(cmpm_loss, tf.float32) cmpc_loss = tf.cast(cmpc_loss, tf.float32) i2t_loss = tf.cast(i2t_loss, tf.float32) t2i_loss = tf.cast(t2i_loss, tf.float32) # CMPM loss if FLAGS.CMPM: i2t_loss, t2i_loss, pos_avg_dist, neg_avg_dist = \ cmpm_loss_compute(text_embeddings, image_embeddings, labels) cmpm_loss = i2t_loss + t2i_loss tf.summary.scalar('cmpm_i2t_loss', i2t_loss) tf.summary.scalar('cmpm_t2i_loss', t2i_loss) tf.summary.scalar('cmpm_loss', cmpm_loss) tf.summary.scalar('pos_avg_dist', pos_avg_dist) tf.summary.scalar('neg_avg_dist', neg_avg_dist) # CMPC loss if FLAGS.CMPC: ipt_loss, tpi_loss, image_precision, text_precision = \ cmpc_loss_compute(text_embeddings, image_embeddings, labels) cmpc_loss = ipt_loss + tpi_loss tf.summary.scalar('cmpc_ipt_loss', ipt_loss) tf.summary.scalar('cmpc_tpi_loss', tpi_loss) tf.summary.scalar('cmpc_loss', cmpc_loss) tf.summary.scalar('image_precision', image_precision) tf.summary.scalar('text_precision', text_precision) loss = cmpc_loss + cmpm_loss reg_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) total_loss = tf.add_n([loss] + reg_loss, name='total_loss') loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg_loss') loss_averages_op = loss_averages.apply([loss] + [total_loss]) tf.summary.scalar('loss_raw', loss) tf.summary.scalar('loss_avg', loss_averages.average(loss)) with tf.control_dependencies([loss_averages_op]): total_loss = tf.identity(total_loss) return total_loss, cmpm_loss, cmpc_loss, i2t_loss, t2i_loss def train(): if not FLAGS.dataset_dir: raise ValueError('You must supply the dataset directory with --dataset_dir') tf.logging.set_verbosity(tf.logging.INFO) with tf.Graph().as_default(): ####################### # Config model_deploy # ####################### deploy_config = model_deploy.DeploymentConfig( num_clones=FLAGS.num_clones, clone_on_cpu=FLAGS.clone_on_cpu, replica_id=FLAGS.task, num_replicas=FLAGS.worker_replicas, num_ps_tasks=FLAGS.num_ps_tasks) # Create global_step with tf.device(deploy_config.variables_device()): global_step = slim.create_global_step() ###################### # Select the dataset # ###################### dataset = dataset_factory.get_dataset( FLAGS.dataset_name, FLAGS.split_name, FLAGS.dataset_dir) ########################### # Select the CNN network # ########################### network_fn = nets_factory.get_network_fn( FLAGS.model_name, num_classes=None, weight_decay=FLAGS.weight_decay, is_training=True) ######################################### # Configure the optimization procedure. # ######################################### with tf.device(deploy_config.optimizer_device()): learning_rate = configure_learning_rate(dataset.num_samples, global_step) optimizer = configure_optimizer(learning_rate) ##################################### # Select the preprocessing function # ##################################### preprocessing_name = FLAGS.preprocessing_name image_preprocessing_fn = preprocessing_factory.get_preprocessing( preprocessing_name, is_training=True) ############################################################## # Create a dataset provider that loads data from the dataset # ############################################################## with tf.device(deploy_config.inputs_device()): examples_per_shard = 1024 min_queue_examples = examples_per_shard * FLAGS.input_queue_memory_factor provider = slim.dataset_data_provider.DatasetDataProvider( dataset, num_readers=FLAGS.num_readers, common_queue_capacity=min_queue_examples + 3 * FLAGS.batch_size, common_queue_min=min_queue_examples) [image, label, text_id, text] = provider.get(['image', 'label', 'caption_ids', 'caption']) train_image_size = network_fn.default_image_size image = image_preprocessing_fn(image, train_image_size, train_image_size) # This function splits the text into an input sequence and a target sequence, # where the target sequence is the input sequence right-shifted by 1. Input and # target sequences are batched and padded up to the maximum length of sequences # in the batch. A mask is created to distinguish real words from padding words. # Note that the target sequence is used if performing caption generation seq_length = tf.shape(text_id)[0] input_length = tf.expand_dims(tf.subtract(seq_length, 1), 0) input_seq = tf.slice(text_id, [0], input_length) target_seq = tf.slice(text_id, [1], input_length) input_mask = tf.ones(input_length, dtype=tf.int32) images, labels, input_seqs, target_seqs, input_masks, texts, text_ids = tf.train.batch( [image, label, input_seq, target_seq, input_mask, text, text_id], batch_size=FLAGS.batch_size, capacity=2 * FLAGS.num_preprocessing_threads * FLAGS.batch_size, dynamic_pad=True, name="batch_and_pad") batch_queue = slim.prefetch_queue.prefetch_queue( [images, labels, input_seqs, target_seqs, input_masks, texts, text_ids], capacity=16 * deploy_config.num_clones, num_threads=FLAGS.num_preprocessing_threads, dynamic_pad=True, name="perfetch_and_pad") images, labels, input_seqs, target_seqs, input_masks, texts, text_ids = batch_queue.dequeue() images_splits = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=images) labels_splits = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=labels) input_seqs_splits = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=input_seqs) target_seqs_splits = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=target_seqs) input_masks_splits = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=input_masks) texts_splits = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=texts) text_ids_splits = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=text_ids) tower_grads = [] for k in xrange(FLAGS.num_gpus): with tf.device('/gpu:%d' % k): with tf.name_scope('tower_%d' % k) as scope: with tf.variable_scope(tf.get_variable_scope()): loss, cmpm_loss, cmpc_loss, i2t_loss, t2i_loss = \ _tower_loss(network_fn, images_splits[k], labels_splits[k], input_seqs_splits[k], input_masks_splits[k]) # Reuse variables for the next tower. tf.get_variable_scope().reuse_variables() # Retain the summaries from the final tower. summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope=scope) # Variables to train. variables_to_train = get_variables_to_train() grads = optimizer.compute_gradients(loss, var_list=variables_to_train) tower_grads.append(grads) # We must calculate the mean of each gradient. Note that this is the # synchronization point across all towers. grads = _average_gradients(tower_grads) # Add a summary to track the learning rate and precision. summaries.append(tf.summary.scalar('learning_rate', learning_rate)) # Add histograms for histogram and trainable variables. for grad, var in grads: if grad is not None: summaries.append(tf.summary.histogram(var.op.name + '/gradients', grad)) for var in tf.trainable_variables(): summaries.append(tf.summary.histogram(var.op.name, var)) ################################# # Configure the moving averages # ################################# if FLAGS.moving_average_decay: moving_average_variables = slim.get_model_variables() variable_averages = tf.train.ExponentialMovingAverage( FLAGS.moving_average_decay, global_step) update_ops.append(variable_averages.apply(moving_average_variables)) # Apply the gradients to adjust the shared variables. grad_updates = optimizer.apply_gradients(grads, global_step=global_step) update_ops.append(grad_updates) # Group all updates to into a single train op. train_op = tf.group(*update_ops) # Create a saver. saver = tf.train.Saver(tf.global_variables()) # Build the summary operation from the last tower summaries. summary_op = tf.summary.merge(summaries) # Build an initialization operation to run below. init = tf.global_variables_initializer() # Start running operations on the Graph. allow_soft_placement must be set to # True to build towers on GPU, as some of the ops do not have GPU implementations. config = tf.ConfigProto( allow_soft_placement=True, log_device_placement=FLAGS.log_device_placement) sess = tf.Session(config=config) sess.run(init) ck_global_step = get_init_fn(sess) print_train_info() # Start the queue runners. tf.train.start_queue_runners(sess=sess) summary_writer = tf.summary.FileWriter( os.path.join(FLAGS.log_dir), graph=sess.graph) num_steps_per_epoch = int(dataset.num_samples / FLAGS.batch_size) max_number_of_steps = FLAGS.num_epochs * num_steps_per_epoch for step in xrange(max_number_of_steps): step += int(ck_global_step) # check the training data # simages, slabels, sinput_seqs, starget_seqs, sinput_masks, stexts, stext_ids = \ # sess.run([images_splits[0], labels_splits[0], input_seqs_splits[0], target_seqs_splits[0], # input_masks_splits[0], texts_splits[0], text_ids_splits[0]]) # save_images(simages[:8], [1, 8], './{}/{:05d}.png'.format(FLAGS.train_samples_dir, step)) # import pdb # pdb.set_trace() _, total_loss_value, cmpm_loss_value, cmpc_loss_value, i2t_loss_value, t2i_loss_value = \ sess.run([train_op, loss, cmpm_loss, cmpc_loss, i2t_loss, t2i_loss]) assert not np.isnan(cmpm_loss_value), 'Model diverged with cmpm_loss = NaN' assert not np.isnan(cmpc_loss_value), 'Model diverged with cmpc_loss = NaN' assert not np.isnan(total_loss_value), 'Model diverged with total_loss = NaN' if step % 10 == 0: format_str = ('%s: step %d, cmpm_loss = %.2f, cmpc_loss = %.2f, ' 'i2t_loss = %.2f, t2i_loss = %.2f') print(format_str % (FLAGS.dataset_name, step, cmpm_loss_value, cmpc_loss_value, i2t_loss_value, t2i_loss_value)) if step % 100 == 0: summary_str = sess.run(summary_op) summary_writer.add_summary(summary_str, step) # Save the model checkpoint periodically. if step % FLAGS.ckpt_steps == 0 or (step + 1) == max_number_of_steps: checkpoint_path = os.path.join(FLAGS.checkpoint_dir, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=step)
import os import sys def count_tabs(text): counter = 0 for i in range(len(text)): if i+1 < len(text)-1: #print(text[i], text[i+1]) if text[i] == "\\" and text[i+1] == "t": counter += 1 return counter def parser(file='default_file.cc', substitute_lines=[""], tag="//REPLACE", output_tag=True, indent=""): """this function reads a c++ source code and update the lines in between a specified tag. Keyword Arguments: file {str} -- source code (default: {'default_file.cc'}) substitute_lines {list} -- a list of lines to replace (default: {[""]}) tag {str} -- the tag to looking for (default: {"//REPLACE"}) output_tag {bool} -- specify if the tag should also be written in the output (default: {True}) indent {str} -- apply a tab or spaces to all the input lines (default: {""}) Returns: {list} -- return the file in the shape of a list of string, where each element is a line """ # open the file open_file = open(os.path.join(sys.path[0], file), 'r') # put lines in a list lines = open_file.readlines() # specify the list to be populated and returned parsed_file_lines = [] # open and close tag condition variables tag_o, tag_c = False, False # open tag # read each line for line in lines: # if there is a tag and it's not the closing tag if line.strip() == tag and not tag_c: # count the number of tabs to replicate tabs = count_tabs(line) tabs_append = "" + indent for i in range(tabs): tabs_append += "\t" # if the tag is open if tag_o: for substitute in substitute_lines: parsed_file_lines.append("{}{}\n".format(tabs_append, substitute)) tag_c = True parsed_file_lines.append(tag + "\n") tag_o = True elif not tag_o or tag_c: # just append line as it is parsed_file_lines.append(line) return parsed_file_lines def output_parsed_file(file='prova_parsed.cc', lines=[]): with open(os.path.join(sys.path[0], file), 'w') as f: f.writelines(lines) f.close() def parser_wrapper(file='', lines=[]): out = parser(file=file, substitute_lines=lines) output_parsed_file(file=file, lines=out) # out = parser() # print(out) # output_parsed_file(lines=out)
# Generated by Django 2.1.7 on 2019-03-25 10:47 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('ingenieria', '0002_auto_20190322_0931'), ] operations = [ migrations.AlterField( model_name='recurrente', name='nombre', field=models.CharField(max_length=50, unique=True), ), migrations.AlterUniqueTogether( name='documento', unique_together={('nro', 'fecha')}, ), ]
import unittest from parameterized import parameterized_class from src.calculus_of_variations import HigherDerivativesSolver from src.calculus_of_variations.utils import t, var Cs = {} for i in range(1, 9): Cs[i] = var("C{}".format(i)) def make_solution( n: str, L: str, t0: str, t1: str, x0: str, x1: str, x0_array: str, x1_array: str ): solution = HigherDerivativesSolver( n=n, L=L, t0=t0, t1=t1, x0=x0, x1=x1, x0_array=x0_array, x1_array=x1_array ) solution.solve(verbose=False) return solution test_case_1 = { "solution": make_solution( n="2", L="x_diff_2 ** 2", t0="0", t1="1", x0="0", x1="0", x0_array="0", x1_array="1", ), "general_solution": Cs[1] + Cs[2] * t + Cs[3] * t ** 2 + Cs[4] * t ** 3, "coefficients": {Cs[1]: 0, Cs[2]: 0, Cs[3]: -1, Cs[4]: 1}, "particular_solution": t ** 3 - t ** 2, "extrema_value": 4, } test_case_2 = { "solution": make_solution( n="2", L="x_diff_2 ** 2 - 48 * x", t0="0", t1="1", x0="1", x1="0", x0_array="-4", x1_array="0", ), "general_solution": Cs[1] + Cs[2] * t + Cs[3] * t ** 2 + Cs[4] * t ** 3 + t ** 4, "coefficients": {Cs[1]: 1, Cs[2]: -4, Cs[3]: 6, Cs[4]: -4}, "particular_solution": t ** 4 - 4 * t ** 3 + 6 * t ** 2 - 4 * t + 1, "extrema_value": 96 / 5, } test_case_3 = { "solution": make_solution( n="3", L="x_diff_3 ** 2", t0="0", t1="1", x0="0", x1="1", x0_array="0, 0", x1_array="3, 6", ), "general_solution": Cs[1] + Cs[2] * t + Cs[3] * t ** 2 + Cs[4] * t ** 3 + Cs[5] * t ** 4 + Cs[6] * t ** 5, "coefficients": {Cs[1]: 0, Cs[2]: 0, Cs[3]: 0, Cs[4]: 1, Cs[5]: 0, Cs[6]: 0}, "particular_solution": t ** 3, "extrema_value": 36, } test_case_4 = { "solution": make_solution( n="4", L="x_diff_4 ** 2", t0="0", t1="1", x0="0", x1="1", x0_array="0, 0, 0", x1_array="3, 6, 10", ), "general_solution": Cs[1] + Cs[2] * t + Cs[3] * t ** 2 + Cs[4] * t ** 3 + Cs[5] * t ** 4 + Cs[6] * t ** 5 + Cs[7] * t ** 6 + Cs[8] * t ** 7, "coefficients": { Cs[1]: 0, Cs[2]: 0, Cs[3]: 0, Cs[4]: 0, Cs[5]: 10 / 3, Cs[6]: -4, Cs[7]: 2, Cs[8]: -1 / 3, }, "particular_solution": -(t ** 7) / 3 + 2 * t ** 6 - 4 * t ** 5 + 10 / 3 * t ** 4, "extrema_value": 640, } @parameterized_class([test_case_1, test_case_2, test_case_3, test_case_4]) class TestSolver(unittest.TestCase): def test_general_solution(self): self.assertAlmostEqual(self.solution.general_solution, self.general_solution) def test_coefficients(self): for coef in self.coefficients.keys(): self.assertAlmostEqual( self.solution.coefficients[coef], self.coefficients[coef] ) def test_particular_solution(self): self.assertAlmostEqual( self.solution.particular_solution, self.particular_solution ) def test_extrema_value(self): self.assertAlmostEqual(self.solution.extrema_value, self.extrema_value) if __name__ == "__main__": unittest.main()
""" Functions for plotting datasets nicely. """ # TODO: custom xtick labels # # TODO: annotations, arbitrary text # # TODO: docs # import functools import numpy as np from ..manage import auto_xyz_ds from .core import ( Plotter, AbstractLinePlot, AbstractScatter, AbstractHistogram, AbstractHeatMap, PLOTTER_DEFAULTS, calc_row_col_datasets, intercept_call_arg, prettify, ) from .color import xyz_colormaps # ----------------- Main lineplot interface for matplotlib ------------------ # class PlotterMatplotlib(Plotter): """ """ def __init__(self, ds, x, y, z=None, y_err=None, x_err=None, **kwargs): super().__init__(ds, x, y, z=z, y_err=y_err, x_err=x_err, **kwargs, backend='MATPLOTLIB') def prepare_axes(self): """ """ import matplotlib as mpl if self.math_serif: mpl.rcParams['mathtext.fontset'] = 'cm' mpl.rcParams['mathtext.rm'] = 'serif' mpl.rcParams['font.family'] = self.font import matplotlib.pyplot as plt if self.axes_rloc is not None: if self.axes_loc is not None: raise ValueError("Cannot specify absolute and relative " "location of axes at the same time.") if self.add_to_fig is None: raise ValueError("Can only specify relative axes position " "when adding to a figure, i.e. when " "add_to_fig != None") if self.axes_rloc is not None: self._axes_loc = self._cax_rel2abs_rect( self.axes_rloc, self.add_to_fig.get_axes()[-1]) else: self._axes_loc = self.axes_loc # Add a new set of axes to an existing plot if self.add_to_fig is not None and self.subplot is None: self._fig = self.add_to_fig self._axes = self._fig.add_axes((0.4, 0.6, 0.30, 0.25) if self._axes_loc is None else self._axes_loc) # Add lines to an existing set of axes elif self.add_to_axes is not None: self._fig = self.add_to_axes self._axes = self._fig.get_axes()[-1] # Add lines to existing axes but only sharing the x-axis elif self.add_to_xaxes is not None: self._fig = self.add_to_xaxes self._axes = self._fig.get_axes()[-1].twinx() elif self.subplot is not None: # Add new axes as subplot to existing subplot if self.add_to_fig is not None: self._fig = self.add_to_fig # New figure but add as subplot else: self._fig = plt.figure(self.fignum, figsize=self.figsize, dpi=100) self._axes = self._fig.add_subplot(self.subplot) # Make new figure and axes else: self._fig = plt.figure(self.fignum, figsize=self.figsize, dpi=100) self._axes = self._fig.add_axes((0.15, 0.15, 0.8, 0.75) if self._axes_loc is None else self._axes_loc) self._axes.set_title("" if self.title is None else self.title, fontsize=self.fontsize_title) def set_axes_labels(self): if self._xtitle: self._axes.set_xlabel(self._xtitle, fontsize=self.fontsize_xtitle) self._axes.xaxis.labelpad = self.xtitle_pad if self._ytitle: self._axes.set_ylabel(self._ytitle, fontsize=self.fontsize_ytitle) self._axes.yaxis.labelpad = self.ytitle_pad if self.ytitle_right: self._axes.yaxis.set_label_position("right") def set_axes_scale(self): """ """ self._axes.set_xscale("log" if self.xlog else "linear") self._axes.set_yscale("log" if self.ylog else "linear") def set_axes_range(self): """ """ if self._xlims: self._axes.set_xlim(self._xlims) if self._ylims: self._axes.set_ylim(self._ylims) def set_spans(self): """ """ if self.vlines is not None: for x in self.vlines: self._axes.axvline(x, lw=self.span_width, color=self.span_color, linestyle=self.span_style) if self.hlines is not None: for y in self.hlines: self._axes.axhline(y, lw=self.span_width, color=self.span_color, linestyle=self.span_style) def set_gridlines(self): """ """ for axis in ('top', 'bottom', 'left', 'right'): self._axes.spines[axis].set_linewidth(1.0) if self.gridlines: # matplotlib has coarser gridine style than bokeh self._gridline_style = [x / 2 for x in self.gridline_style] self._axes.set_axisbelow(True) # ensures gridlines below all self._axes.grid(True, color="0.9", which='major', linestyle=(0, self._gridline_style)) self._axes.grid(True, color="0.95", which='minor', linestyle=(0, self._gridline_style)) def set_tick_marks(self): """ """ import matplotlib as mpl if self.xticks is not None: self._axes.set_xticks(self.xticks, minor=False) self._axes.get_xaxis().set_major_formatter( mpl.ticker.ScalarFormatter()) if self.yticks is not None: self._axes.set_yticks(self.yticks, minor=False) self._axes.get_yaxis().set_major_formatter( mpl.ticker.ScalarFormatter()) if self.xtick_labels is not None: self._axes.set_xticklabels(self.xtick_labels) if self.xticklabels_hide: (self._axes.get_xaxis() .set_major_formatter(mpl.ticker.NullFormatter())) if self.yticklabels_hide: (self._axes.get_yaxis() .set_major_formatter(mpl.ticker.NullFormatter())) self._axes.tick_params(labelsize=self.fontsize_ticks, direction='out', bottom='bottom' in self.ticks_where, top='top' in self.ticks_where, left='left' in self.ticks_where, right='right' in self.ticks_where) if self.yticklabels_right or (self.yticklabels_right is None and self.ytitle_right is True): self._axes.yaxis.tick_right() def _cax_rel2abs_rect(self, rel_rect, cax=None): """Turn a relative axes specification into a absolute one. """ if cax is None: cax = self._axes bbox = cax.get_position() l, b, w, h = bbox.x0, bbox.y0, bbox.width, bbox.height cl = l + w * rel_rect[0] cb = b + h * rel_rect[1] try: cw = w * rel_rect[2] ch = h * rel_rect[3] except IndexError: return cl, cb return cl, cb, cw, ch def plot_legend(self, grid=False, labels_handles=None): """Add a legend """ if self._use_legend: if labels_handles: labels, handles = zip(*labels_handles.items()) else: handles, labels = self._legend_handles, self._legend_labels if self.legend_reverse: handles, labels = handles[::-1], labels[::-1] # Limit minimum size of markers that appear in legend should_auto_scale_legend_markers = ( (self.legend_marker_scale is None) and # not already set hasattr(self, '_marker_size') and # is a valid parameter self._marker_size < 3 # and is small ) if should_auto_scale_legend_markers: self.legend_marker_scale = 3 / self._marker_size opts = { 'title': (self.z_coo if self.ztitle is None else self.ztitle), 'loc': self.legend_loc, 'fontsize': self.fontsize_zlabels, 'frameon': self.legend_frame, 'numpoints': 1, 'scatterpoints': 1, 'handlelength': self.legend_handlelength, 'markerscale': self.legend_marker_scale, 'labelspacing': self.legend_label_spacing, 'columnspacing': self.legend_column_spacing, 'bbox_to_anchor': self.legend_bbox, 'ncol': self.legend_ncol } if grid: bb = opts['bbox_to_anchor'] if bb is None: opts['bbox_to_anchor'] = (1, 0.5, 0, 0) opts['loc'] = 'center left' else: loc = opts['loc'] # will get warning for 'best' opts['loc'] = 'center' if loc in ('best', 0) else loc lgnd = self._fig.legend(handles, labels, **opts) else: lgnd = self._axes.legend(handles, labels, **opts) lgnd.get_title().set_fontsize(self.fontsize_ztitle) if self.legend_marker_alpha is not None: for l in lgnd.legendHandles: l.set_alpha(1.0) def set_mappable(self): """Mappale object for colorbars. """ from matplotlib.cm import ScalarMappable self.mappable = ScalarMappable(cmap=self.cmap, norm=self._color_norm) self.mappable.set_array([]) def plot_colorbar(self, grid=False): """Add a colorbar to the data. """ if self._use_colorbar: # Whether the colorbar should clip at either end extendmin = (self.vmin is not None) and (self.vmin > self._zmin) extendmax = (self.vmax is not None) and (self.vmax < self._zmax) extend = ('both' if extendmin and extendmax else 'min' if extendmin else 'max' if extendmax else 'neither') opts = {'extend': extend, 'ticks': self.zticks} if self.colorbar_relative_position: opts['cax'] = self._fig.add_axes( self._cax_rel2abs_rect(self.colorbar_relative_position)) if grid: opts['ax'] = self._fig.axes opts['anchor'] = (0.5, 0.5) self._cbar = self._fig.colorbar( self.mappable, **opts, **self.colorbar_opts) self._cbar.ax.tick_params(labelsize=self.fontsize_zlabels) self._cbar.ax.set_title( self._ctitle, fontsize=self.fontsize_ztitle, color=self.colorbar_color if self.colorbar_color else None) if self.colorbar_color: self._cbar.ax.yaxis.set_tick_params( color=self.colorbar_color, labelcolor=self.colorbar_color) self._cbar.outline.set_edgecolor(self.colorbar_color) def set_panel_label(self): if self.panel_label is not None: self._axes.text(*self.panel_label_loc, self.panel_label, transform=self._axes.transAxes, fontsize=self.fontsize_panel_label, color=self.panel_label_color, ha='left', va='top') def show(self): import matplotlib.pyplot as plt if self.return_fig: plt.close(self._fig) return self._fig def prepare_plot(self): """Do all the things that every plot has. """ self.prepare_axes() self.set_axes_labels() self.set_axes_scale() self.set_axes_range() self.set_spans() self.set_gridlines() self.set_tick_marks() # --------------------------------------------------------------------------- # def mpl_multi_plot(fn): """Decorate a plotting function to plot a grid of values. """ @functools.wraps(fn) def multi_plotter(ds, *args, row=None, col=None, hspace=None, wspace=None, tight_layout=True, coltitle=None, rowtitle=None, **kwargs): if (row is None) and (col is None): return fn(ds, *args, **kwargs) import matplotlib.pyplot as plt from matplotlib.gridspec import GridSpec # Set some global parameters p = fn(ds, *args, **kwargs, call=False) p.prepare_data_multi_grid() kwargs['vmin'] = kwargs.pop('vmin', p.vmin) kwargs['vmax'] = kwargs.pop('vmax', p.vmax) coltitle = col if coltitle is None else coltitle rowtitle = row if rowtitle is None else rowtitle # split the dataset into its respective rows and columns ds_r_c, nrows, ncols = calc_row_col_datasets(ds, row=row, col=col) figsize = kwargs.pop('figsize', (3 * ncols, 3 * nrows)) return_fig = kwargs.pop('return_fig', PLOTTER_DEFAULTS['return_fig']) # generate a figure for all the plots to use p._fig = plt.figure(figsize=figsize, dpi=100, constrained_layout=tight_layout) p._fig.set_constrained_layout_pads(hspace=hspace, wspace=wspace) # and a gridspec to position them gs = GridSpec(nrows=nrows, ncols=ncols, figure=p._fig, hspace=hspace, wspace=wspace) # want to collect all entries for legend labels_handles = {} # range through rows and do subplots for i, ds_r in enumerate(ds_r_c): skws = {'legend': False, 'colorbar': False} # if not last row if i != nrows - 1: skws['xticklabels_hide'] = True skws['xtitle'] = '' # range through columns for j, sub_ds in enumerate(ds_r): if hspace == 0 and wspace == 0: ticks_where = [] if j == 0: ticks_where.append('left') if i == 0: ticks_where.append('top') if j == ncols - 1: ticks_where.append('right') if i == nrows - 1: ticks_where.append('bottom') skws['ticks_where'] = ticks_where # if not first column if j != 0: skws['yticklabels_hide'] = True skws['ytitle'] = '' # label each column if (i == 0) and (col is not None): col_val = prettify(ds[col].values[j]) skws['title'] = "{} = {}".format(coltitle, col_val) fx = 'fontsize_xtitle' skws['fontsize_title'] = kwargs.get( fx, PLOTTER_DEFAULTS[fx]) # label each row if (j == ncols - 1) and (row is not None): # XXX: if number of cols==1 this hide yaxis - want both row_val = prettify(ds[row].values[i]) skws['ytitle_right'] = True skws['ytitle'] = "{} = {}".format(rowtitle, row_val) sP = fn(sub_ds, *args, add_to_fig=p._fig, call='both', subplot=gs[i, j], **{**kwargs, **skws}) try: labels_handles.update(dict(zip(sP._legend_labels, sP._legend_handles))) except AttributeError: pass # make sure all have the same plot ranges xmins, xmaxs = zip(*(gax.get_xlim() for gax in p._fig.axes)) ymins, ymaxs = zip(*(gax.get_ylim() for gax in p._fig.axes)) xmin, xmax = min(xmins), max(xmaxs) ymin, ymax = min(ymins), max(ymaxs) for gax in p._fig.axes: gax.set_xlim(xmin, xmax) gax.set_ylim(ymin, ymax) # add global legend or colorbar p.plot_legend(grid=True, labels_handles=labels_handles) p.plot_colorbar(grid=True) if return_fig: plt.close(p._fig) return p._fig return multi_plotter # --------------------------------------------------------------------------- # class LinePlot(PlotterMatplotlib, AbstractLinePlot): """ """ def __init__(self, ds, x, y, z=None, *, y_err=None, x_err=None, **kwargs): super().__init__(ds, x, y, z=z, y_err=y_err, x_err=x_err, **kwargs) def plot_lines(self): """ """ for data in self._gen_xy(): col = next(self._cols) line_opts = { 'c': col, 'lw': next(self._lws), 'marker': next(self._mrkrs), 'markersize': self._marker_size, 'markeredgecolor': col[:3] + (self.marker_alpha * col[3],), 'markerfacecolor': col[:3] + (self.marker_alpha * col[3] / 2,), 'label': next(self._zlbls), 'zorder': next(self._zordrs), 'linestyle': next(self._lines), 'rasterized': self.rasterize, } if ('ye' in data) or ('xe' in data): self._axes.errorbar(data['x'], data['y'], yerr=data.get('ye', None), xerr=data.get('xe', None), ecolor=col, capsize=self.errorbar_capsize, capthick=self.errorbar_capthick, elinewidth=self.errorbar_linewidth, **line_opts) else: # add line to axes, with options cycled through self._axes.plot(data['x'], data['y'], **line_opts) self._legend_handles, self._legend_labels = \ self._axes.get_legend_handles_labels() def __call__(self): self.prepare_data_single() # matplotlib preparation self.prepare_plot() self.plot_lines() self.plot_legend() self.plot_colorbar() self.set_panel_label() return self.show() @mpl_multi_plot @intercept_call_arg def lineplot(ds, x, y, z=None, y_err=None, x_err=None, **plot_opts): """From ``ds`` plot lines of ``y`` as a function of ``x``, optionally for varying ``z``. Parameters ---------- ds : xarray.Dataset Dataset to plot from. x : str Dimension to plot along the x-axis. y : str or tuple[str] Variable(s) to plot along the y-axis. If tuple, plot each of the variables - instead of ``z``. z : str, optional Dimension to plot into the page. y_err : str, optional Variable to plot as y-error. x_err : str, optional Variable to plot as x-error. row : str, optional Dimension to vary over as a function of rows. col : str, optional Dimension to vary over as a function of columns. plot_opts See ``xyzpy.plot.core.PLOTTER_DEFAULTS``. """ return LinePlot(ds, x, y, z, y_err=y_err, x_err=x_err, **plot_opts) class AutoLinePlot(LinePlot): def __init__(self, x, y_z, **lineplot_opts): ds = auto_xyz_ds(x, y_z) super().__init__(ds, 'x', 'y', z='z', **lineplot_opts) def auto_lineplot(x, y_z, **lineplot_opts): """Auto version of :func:`~xyzpy.lineplot` that accepts array arguments by converting them to a ``Dataset`` first. """ return AutoLinePlot(x, y_z, **lineplot_opts)() # --------------------------------------------------------------------------- # _SCATTER_ALT_DEFAULTS = ( ('legend_handlelength', 0), ) class Scatter(PlotterMatplotlib, AbstractScatter): def __init__(self, ds, x, y, z=None, **kwargs): # set some scatter specific options for k, default in _SCATTER_ALT_DEFAULTS: if k not in kwargs: kwargs[k] = default super().__init__(ds, x, y, z, **kwargs) def plot_scatter(self): """ """ self._legend_handles = [] self._legend_labels = [] for data in self._gen_xy(): if 'c' in data: col = data['c'] else: col = [next(self._cols)] scatter_opts = { 'c': col, 'marker': next(self._mrkrs), 's': self._marker_size, 'alpha': self.marker_alpha, 'label': next(self._zlbls), 'zorder': next(self._zordrs), 'rasterized': self.rasterize, } if 'c' in data: scatter_opts['cmap'] = self.cmap self._legend_handles.append( self._axes.scatter(data['x'], data['y'], **scatter_opts)) self._legend_labels.append( scatter_opts['label']) def __call__(self): self.prepare_data_single() # matplotlib preparation self.prepare_plot() self.plot_scatter() self.plot_legend() self.plot_colorbar() self.set_panel_label() return self.show() @mpl_multi_plot @intercept_call_arg def scatter(ds, x, y, z=None, y_err=None, x_err=None, **plot_opts): """From ``ds`` plot a scatter of ``y`` against ``x``, optionally for varying ``z``. Parameters ---------- ds : xarray.Dataset Dataset to plot from. x : str Quantity to plot along the x-axis. y : str or tuple[str] Quantity(s) to plot along the y-axis. If tuple, plot each of the variables - instead of ``z``. z : str, optional Dimension to plot into the page. y_err : str, optional Variable to plot as y-error. x_err : str, optional Variable to plot as x-error. row : str, optional Dimension to vary over as a function of rows. col : str, optional Dimension to vary over as a function of columns. plot_opts See ``xyzpy.plot.core.PLOTTER_DEFAULTS``. """ return Scatter(ds, x, y, z, y_err=y_err, x_err=x_err, **plot_opts) class AutoScatter(Scatter): def __init__(self, x, y_z, **scatter_opts): ds = auto_xyz_ds(x, y_z) super().__init__(ds, 'x', 'y', z='z', **scatter_opts) def auto_scatter(x, y_z, **scatter_opts): """Auto version of :func:`~xyzpy.scatter` that accepts array arguments by converting them to a ``Dataset`` first. """ return AutoScatter(x, y_z, **scatter_opts)() # --------------------------------------------------------------------------- # _HISTOGRAM_SPECIFIC_OPTIONS = { 'stacked': False, } _HISTOGRAM_ALT_DEFAULTS = { 'xtitle': 'x', 'ytitle': 'f(x)', } class Histogram(PlotterMatplotlib, AbstractHistogram): def __init__(self, ds, x, z=None, **kwargs): # Set the alternative defaults for opt, default in _HISTOGRAM_ALT_DEFAULTS.items(): if opt not in kwargs: kwargs[opt] = default # Set histogram specfic options for opt, default in _HISTOGRAM_SPECIFIC_OPTIONS.items(): setattr(self, opt, kwargs.pop(opt, default)) super().__init__(ds, x, None, z=z, **kwargs) def plot_histogram(self): from matplotlib.patches import Rectangle, Polygon def gen_ind_plots(): for data in self._gen_xy(): col = next(self._cols) edgecolor = col[:3] + (self.marker_alpha * col[3],) facecolor = col[:3] + (self.marker_alpha * col[3] / 4,) linewidth = next(self._lws) zorder = next(self._zordrs) label = next(self._zlbls) handle = Rectangle((0, 0), 1, 1, color=facecolor, ec=edgecolor) yield (data['x'], edgecolor, facecolor, linewidth, zorder, label, handle) xs, ecs, fcs, lws, zds, lbs, hnds = zip(*gen_ind_plots()) histogram_opts = { 'label': lbs, 'bins': self.bins, 'density': True, 'histtype': 'stepfilled', 'fill': True, 'stacked': self.stacked, 'rasterized': self.rasterize, } _, _, patches = self._axes.hist(xs, **histogram_opts) # Need to set varying colors, linewidths etc seperately for patch, ec, fc, lw, zd in zip(patches, ecs, fcs, lws, zds): # patch is not iterable if only one set of data created if isinstance(patch, Polygon): patch = (patch,) for sub_patch in patch: sub_patch.set_edgecolor(ec) sub_patch.set_facecolor(fc) sub_patch.set_linewidth(lw) sub_patch.set_zorder(zd) # store handles for legend self._legend_handles, self._legend_labels = hnds, lbs def __call__(self): # Core preparation self.prepare_data_single() # matplotlib preparation self.prepare_plot() self.plot_histogram() self.plot_legend() self.plot_colorbar() self.set_panel_label() return self.show() @mpl_multi_plot @intercept_call_arg def histogram(ds, x, z=None, **plot_opts): """Dataset histogram. Parameters ---------- ds : xarray.Dataset The dataset to plot. x : str, sequence of str The variable(s) to plot the probability density of. If sequence, plot a histogram of each instead of using a ``z`` coordinate. z : str, optional If given, range over this coordinate a plot a histogram for each. row : str, optional Dimension to vary over as a function of rows. col : str, optional Dimension to vary over as a function of columns. plot_opts See ``xyzpy.plot.core.PLOTTER_DEFAULTS``. """ return Histogram(ds, x, z=z, **plot_opts) class AutoHistogram(Histogram): def __init__(self, x, **histogram_opts): ds = auto_xyz_ds(x) super().__init__(ds, 'x', **histogram_opts) def auto_histogram(x, **histogram_opts): """Auto version of :func:`~xyzpy.histogram` that accepts array arguments by converting them to a ``Dataset`` first. """ return AutoHistogram(x, **histogram_opts)() # --------------------------------------------------------------------------- # _HEATMAP_ALT_DEFAULTS = ( ('legend', False), ('colorbar', True), ('colormap', 'inferno'), ('method', 'pcolormesh'), ('gridlines', False), ('rasterize', True), ) class HeatMap(PlotterMatplotlib, AbstractHeatMap): def __init__(self, ds, x, y, z, **kwargs): # set some heatmap specific options for k, default in _HEATMAP_ALT_DEFAULTS: if k not in kwargs: kwargs[k] = default super().__init__(ds, x, y, z, **kwargs) def plot_heatmap(self): """Plot the data as a heatmap. """ self.calc_color_norm() # add extra coords since they *bound* the quads placed -> want ticks # at center of quads X = self._heatmap_x av_x_bin = np.mean(np.abs(X[:-1] - X[1:])) X = np.append(X - av_x_bin / 2, X[-1] + av_x_bin / 2) Y = self._heatmap_y av_Y_bin = np.mean(np.abs(Y[:-1] - Y[1:])) Y = np.append(Y - av_Y_bin / 2, Y[-1] + av_Y_bin / 2) self._heatmap = getattr(self._axes, self.method)( X, Y, self._heatmap_var, norm=self._color_norm, cmap=xyz_colormaps(self.colormap), rasterized=self.rasterize) def __call__(self): # Core preparation self.prepare_data_single() # matplotlib preparation self.prepare_plot() self.plot_heatmap() self.plot_colorbar() self.set_panel_label() return self.show() @mpl_multi_plot @intercept_call_arg def heatmap(ds, x, y, z, **kwargs): """From ``ds`` plot variable ``z`` as a function of ``x`` and ``y`` using a 2D heatmap. Parameters ---------- ds : xarray.Dataset Dataset to plot from. x : str Dimension to plot along the x-axis. y : str Dimension to plot along the y-axis. z : str, optional Variable to plot as colormap. row : str, optional Dimension to vary over as a function of rows. col : str, optional Dimension to vary over as a function of columns. plot_opts See ``xyzpy.plot.core.PLOTTER_DEFAULTS``. """ return HeatMap(ds, x, y, z, **kwargs) class AutoHeatMap(HeatMap): def __init__(self, x, **heatmap_opts): ds = auto_xyz_ds(x) super().__init__(ds, 'y', 'z', 'x', **heatmap_opts) def auto_heatmap(x, **heatmap_opts): """Auto version of :func:`~xyzpy.heatmap` that accepts array arguments by converting them to a ``Dataset`` first. """ return AutoHeatMap(x, **heatmap_opts)() # --------------- Miscellenous matplotlib plotting functions ---------------- # def choose_squarest_grid(x): p = x ** 0.5 if p.is_integer(): m = n = int(p) else: m = int(round(p)) p = int(p) n = p if m * p >= x else p + 1 return m, n def visualize_matrix(x, figsize=(4, 4), colormap='Greys', touching=False, zlims=(None, None), gridsize=None, tri=None, return_fig=True): """Plot the elements of one or more matrices. Parameters ---------- x : array or iterable of arrays 2d-matrix or matrices to plot. figsize : tuple Total size of plot. colormap : str Colormap to use to weight elements. touching : bool If plotting more than one matrix, whether the edges should touch. zlims: Scaling parameters for the element colorings, (i.e. if these are set then the weightings are not normalized). return_fig : bool Whether to return the figure created or just show it. """ import matplotlib.pyplot as plt fig = plt.figure(figsize=figsize, dpi=100) if isinstance(x, np.ndarray): x = (x,) nx = len(x) if gridsize: m, n = gridsize else: m, n = choose_squarest_grid(nx) subplots = tuple((m, n, i) for i in range(1, nx + 1)) for img, subplot in zip(x, subplots): if tri is not None: if tri not in {'upper', 'lower'}: raise ValueError("'tri' should be one of {'upper', 'lower}.") ma_fn = np.tril if tri == 'upper' else np.triu img = np.ma.array(img, mask=ma_fn(np.ones_like(img), k=-1)) ax = fig.add_subplot(*subplot) ax.imshow(img, cmap=xyz_colormaps(colormap), interpolation='nearest', vmin=zlims[0], vmax=zlims[1]) # Hide the right and top spines ax.spines['right'].set_visible(False) ax.spines['left'].set_visible(False) ax.spines['bottom'].set_visible(False) ax.spines['top'].set_visible(False) # Only show ticks on the left and bottom spines ax.yaxis.set_visible(False) ax.xaxis.set_visible(False) plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=-0.001 if touching else 0.05, hspace=-0.001 if touching else 0.05) if return_fig: plt.close(fig) return fig else: plt.show()
from telegram.ext.updater import Updater from telegram.update import Update from telegram.ext.callbackcontext import CallbackContext from telegram.ext.commandhandler import CommandHandler from telegram.ext.messagehandler import MessageHandler from telegram.ext.filters import Filters from telegram import KeyboardButton, ReplyKeyboardMarkup from collections import OrderedDict import requests import json import f # Content of f.py # # Module: f.py # from telegram.ext.updater import Updater # updater = Updater("Token", use_context=True) ONESTO = 'onesto.txt' PAP = 'pap.txt' PPB = 'ppb.txt' PUNTI = 'punteggio.txt' CLASSIFICA = 'classifica.txt' t1 = "/onesto" t2 = "/pap" t3 = "/ppb" t5 = "/punti" t4 = "/classifica" PROVA = 21 punti_onesto = 0 punti_pap = 0 punti_ppb = 0 def dict_or_OrdDict_to_formatted_str(OD, mode='dict', s="", indent=' '*4, level=0): def is_number(s): try: float(s) return True except ValueError: return False def fstr(s): return s if is_number(s) else '"%s"'%s if mode != 'dict': kv_tpl = '("%s", %s)' ST = 'OrderedDict([\n'; END = '])' else: kv_tpl = '"%s": %s' ST = '{\n'; END = '}' for i,k in enumerate(OD.keys()): if type(OD[k]) in [dict, OrderedDict]: level += 1 s += (level-1)*indent+kv_tpl%(k,ST+dict_or_OrdDict_to_formatted_str(OD[k], mode=mode, indent=indent, level=level)+(level-1)*indent+END) level -= 1 else: s += level*indent+kv_tpl%(k,fstr(OD[k])) if i!=len(OD)-1: s += "," s += "\n" return s def start(update: Update, context: CallbackContext): buttons = [[KeyboardButton(t1)], [KeyboardButton(t2)], [KeyboardButton(t3)], [KeyboardButton(t4)], [KeyboardButton(t5)]] context.bot.send_message(chat_id=update.effective_chat.id, text="Ciao, Benvenuto nel bot non ufficiale del fantacitorio. Scrivi /help per vedere tutti i comandi.", reply_markup=ReplyKeyboardMarkup(buttons)) def help(update: Update, context: CallbackContext): update.message.reply_text("""Available Commands : /onesto - squadra /pap - squadra /ppb - squadra /classifica - giocatori /punti - tutti i politici""") def onestoo(update: Update, context: CallbackContext): onesto_1 = " "+(str(onesto).replace("{","").replace("}", "").replace("'", "").replace(",", "\n").replace("prezzo", "").replace(":", " ")) update.message.reply_text(onesto_1) def papp(update: Update, context: CallbackContext): pap_1 = " "+(str(pap).replace("{","").replace("}", "").replace("'", "").replace(",", "\n").replace("prezzo", "").replace(":", " ")) update.message.reply_text(pap_1) def ppbb(update: Update, context: CallbackContext): ppb_1 = " "+(str(ppb).replace("{","").replace("}", "").replace("'", "").replace(",", "\n").replace("prezzo", "").replace(":", " ")) update.message.reply_text(ppb_1) def puntii(update: Update, context: CallbackContext): # o_pol_1 = dict_or_OrdDict_to_formatted_str(o_pol) # o_pol_2 = (str(o_pol_1).replace("\"", "").replace(",", "").replace("{","").replace("}", "").replace("'", "").replace(",", "\n").replace("punti", "").replace(":", " ")) # update.message.reply_text(o_pol_2) punti_1 = " "+(str(punti).replace("{","").replace("}", "").replace("'", "").replace(",", "\n").replace("punti", "").replace(":", " ")) update.message.reply_text(punti_1) def classificaa(update: Update, context: CallbackContext): res_1 = dict_or_OrdDict_to_formatted_str(res) res_2 = (str(res_1).replace("\"", "").replace(",", "")) update.message.reply_text(res_2) def unknown(update: Update, context: CallbackContext): update.message.reply_text( "Sorry '%s' is not a valid command" % update.message.text) def unknown_text(update: Update, context: CallbackContext): update.message.reply_text( "Sorry I can't recognize you , you said '%s'" % update.message.text) onesto = {} file = open(ONESTO, 'r') for line in file: campi = line.rstrip().split(',') nome = campi[0] fanfani = { 'prezzo': int(campi[1]) } onesto[nome] = fanfani file.close() pap = {} file = open(PAP, 'r') for line in file: campi = line.rstrip().split(',') nome = campi[0] fanfani = { 'prezzo': int(campi[1]) } pap[nome] = fanfani file.close() ppb = {} file = open(PPB, 'r') for line in file: campi = line.rstrip().split(',') nome = campi[0] fanfani = { 'prezzo': int(campi[1]) } ppb[nome] = fanfani file.close() punti = {} file = open(PUNTI, 'r') for line in file: campi = line.rstrip().split(',') nome = campi[0] if nome in punti: punti[nome]['punti'] = int(punti[nome]['punti']) + int(campi[1]) else: p = { 'punti': int(campi[1]) } punti[nome] = p file.close() classifica = {} for politico in punti: for membro in onesto: if membro == politico: punti_p = punti[politico]['punti'] punti_onesto = punti_onesto + punti_p classifica.update({"ONESTO": punti_onesto}) for politico in punti: for membro in pap: if membro == politico: punti_r = punti[politico]['punti'] punti_pap = punti_pap + punti_r classifica.update({"PAP": punti_pap}) for politico in punti: for membro in ppb: if membro == politico: punti_q = punti[politico]['punti'] punti_ppb = punti_ppb + punti_q classifica.update({"PPB": punti_ppb}) res = OrderedDict(reversed(list(classifica.items()))) # o_pol = OrderedDict(reversed(list(punti.items()))) f.updater.dispatcher.add_handler(CommandHandler('start', start)) f.updater.dispatcher.add_handler(CommandHandler('onesto', onestoo)) f.updater.dispatcher.add_handler(CommandHandler('pap', papp)) f.updater.dispatcher.add_handler(CommandHandler('ppb', ppbb)) f.updater.dispatcher.add_handler(CommandHandler('punti', puntii)) f.updater.dispatcher.add_handler(CommandHandler('classifica', classificaa)) f.updater.dispatcher.add_handler(CommandHandler('help', help)) f.updater.dispatcher.add_handler(MessageHandler(Filters.text, unknown)) f.updater.dispatcher.add_handler(MessageHandler(Filters.command, unknown)) f.updater.dispatcher.add_handler(MessageHandler(Filters.text, unknown_text)) f.updater.start_polling()
# TULISAN nya digituin pokoknya gk tau istilah nya ngasal aja def rev(txt): g=int(len(txt)/2) print(txt[g:]+txt[:g]) # if len(txt)%2==0: # print(txt[g:]+txt[:g]) # else: # print('eror pie iki') tulisan='sabi lika taki jarbela honpyt engbar' les =tulisan.split() for i in range(len(les)): rev(les[i]) ''' YANG DI ATAS GK TAU ISTILAH NYA SEBENARNYA KALO MAU BALIK TULISAN ATAU REVERSE STRING GINI AJA ''' contohtext='tamvan' print(contohtext[::-1])
#!/usr/bin/python import ConfigParser class Configuration: def __init__(self, size, count, page): self.config = ConfigParser.ConfigParser() self.config.read('/opt/esm/etc/http.ini') def user(self): return self.config.get('access', 'admin') def passwd(self): return self.config.get('access', 'password')
import numpy as np import pandas as pd import sys import json import os import igraph as ig import pprint import copy def getopts(argv): opts = {} while argv: if argv[0][0] == '-': opts[argv[0]] = argv[1] argv = argv[1:] return opts if __name__ == '__main__': args = getopts(sys.argv) print(args) # open graph decomposition info json graph_decomposition_info_path = '../data/' + args['-data'] + '/' + args['-data'] + '-decomposition-info.json' graph_decomposition_info = json.load(open(graph_decomposition_info_path)) print('graph-decomposition-info:', graph_decomposition_info) # define our data data = {} data['name'] = args['-data'] data['description'] = '' data['vertices'] = graph_decomposition_info['vertices'] data['edges'] = graph_decomposition_info['edges'] data['peels'] = [] data['layers'] = [] # get peels form graph layer json files peels = [] for file in os.listdir('../data/' + args['-data']): if file.startswith(args['-data'] + '-layer-'): peel = int(file.split('-')[-1].split('.')[0]) peels.append(peel) peels = sorted(peels) print('peels:', peels) # iterate through graph layer json files, create igraph, computer stats for each layer for peel in peels: print('-----') print('peel ', peel) graph_layer_data_path = '../data/' + args['-data'] + '/' + args['-data'] + '-layer-' + str(peel) + '.json' graph_layer = json.load(open(graph_layer_data_path)) g_layer = ig.Graph.DictList(directed=False, vertices=graph_layer['nodes'], vertex_name_attr='id', edges=graph_layer['links'], edge_foreign_keys=('source', 'target')) print(g_layer.summary()) # compute stats clone_count = 0 for v in g_layer.vs: if len(v['peels']) > 1: clone_count += 1 clustering = g_layer.transitivity_undirected() print('computing graph layer layout') if g_layer.vcount() < 5000: layout = g_layer.layout("fr", maxiter=100) for i, coords in enumerate(layout): graph_layer['nodes'][i]['fdx'] = coords[0] graph_layer['nodes'][i]['fdy'] = coords[1] if (g_layer.vcount() > 5000) and (g_layer.vcount() < 25000): # layout = g_layer.layout("drl") layout = g_layer.layout("fr", maxiter=50) for i, coords in enumerate(layout): graph_layer['nodes'][i]['fdx'] = coords[0] graph_layer['nodes'][i]['fdy'] = coords[1] if (g_layer.vcount() > 25000) and (g_layer.vcount() < 52000): # layout = g_layer.layout("drl") layout = g_layer.layout("fr", maxiter=10) for i, coords in enumerate(layout): graph_layer['nodes'][i]['fdx'] = coords[0] graph_layer['nodes'][i]['fdy'] = coords[1] if g_layer.vcount() > 52000: # layout = g_layer.layout("drl") pass largest_component = g_layer.subgraph(max(g_layer.components(),key=len)) # add connected component ids to each node print('adding connected component ids') comp_list = list(g_layer.components()) comp_list.sort(key=len) for i, cl in enumerate(comp_list): for j, n in enumerate(cl): cl[j] = g_layer.vs[n]['id'] for node in graph_layer['nodes']: for i, cmpt in enumerate(comp_list): if int(node['id']) in cmpt: node['cmpt'] = i break # define layer data print('computing layer data') layer = {} layer['peel'] = peel layer['edges'] = g_layer.ecount() layer['vertices'] = g_layer.vcount() layer['clones'] = clone_count layer['components'] = len(comp_list) layer['clustering'] = clustering layer['largest-component-edges'] = largest_component.ecount() layer['largest-component-vertices'] = largest_component.vcount() # add layer data to data data['layers'].append(layer) data['peels'].append(peel) # save data as json print('saving layer data with component ids') with open(graph_layer_data_path, 'w') as outfile: json.dump(graph_layer, outfile) # pprint.pprint(data) with open('../data/' + args['-data'] + '/' + args['-data'] + '.json' , 'w') as outfile: json.dump(data, outfile)
import os from pathlib import Path from urllib.parse import quote from django.http import JsonResponse from django.shortcuts import redirect, render from Tagger import params, settings from . import db_operations as db def _is_allowed_path(path: Path) -> bool: return ( not params.BASE_PATH or path == params.BASE_PATH or params.BASE_PATH in path.parents ) def _get_db_path_str(path: Path) -> str: if not params.BASE_PATH: return path.as_posix() if path == params.BASE_PATH: return "/" if params.BASE_PATH in path.parents: return "/".join(path.parts[len(params.BASE_PATH.parts) :]) return "" def _get_extended_dataset(dataset): for element in dataset: db_path_str = element["path"] db_path = Path(db_path_str) if params.BASE_PATH: if db_path_str == "/": path = params.BASE_PATH else: path = params.BASE_PATH.joinpath(db_path) else: path = db_path element["path_str"] = path.as_posix() element["system_path_str"] = str(path) element["db_path_str"] = db_path.as_posix() element["path_exists"] = path.exists() element["path_is_dir"] = path.is_dir() if element.get("tag_ids", []): element["tags"] = [ db.get_tag_by_id(int(mapping_tag_id)) for mapping_tag_id in element["tag_ids"] ] return dataset def _get_drive_root_dirs(): if os.name == "nt": from ctypes import windll # pylint: disable=C0415 bitfield = windll.kernel32.GetLogicalDrives() masks = [(1 << n, chr(ord("A") + n)) for n in range(ord("Z") - ord("A") + 1)] return [ {"path_str": drive + ":/", "system_path_str": drive + ":\\"} for mask, drive in masks if bitfield & mask ] return [] def mapping_details(request, mapping_id): if request.method == "GET": return render( request, "pathtagger/mapping_details.html", { "mapping": _get_extended_dataset([db.get_mapping(mapping_id)]).pop(), "tags": db.get_all_tags(), }, ) if request.method == "POST": if path_str := request.POST.get("path", ""): if _is_allowed_path(path := Path(path_str)): db.update_mapping(mapping_id, _get_db_path_str(path)) return redirect("pathtagger:mappings_list") def add_mapping(request): if path_str := request.POST.get("path", ""): path = Path(path_str) if _is_allowed_path(path) and not db.get_mapping_by_path( _get_db_path_str(path) ): db.insert_mapping(_get_db_path_str(path), []) return redirect("pathtagger:mappings_list") def edit_mappings(request): if request.POST.get("action_delete"): return delete_mappings(request) if request.POST.get("action_edit_tags"): if mapping_ids := [ int(mapping_id) for mapping_id in request.POST.getlist("mapping_id", []) ]: tag_ids_to_append = [] tag_ids_to_remove = [] for key in request.POST: if key.startswith("tag_"): value = request.POST.get(key, "") if value == "append": tag_ids_to_append.append(int(key.strip("tag_"))) elif value == "remove": tag_ids_to_remove.append(int(key.strip("tag_"))) if new_tag_names_str := request.POST.get("new_tag_names", ""): for name in new_tag_names_str.strip(",").split(","): name = name.strip() if tag := db.get_tag_by_name(name): tag_ids_to_append.append(tag.doc_id) else: tag_ids_to_append.append( db.insert_tag(name, params.DEFAULT_TAG_COLOR) ) db.append_tags_to_mappings(tag_ids_to_append, mapping_ids) db.remove_tags_from_mappings(tag_ids_to_remove, mapping_ids) return redirect("pathtagger:mappings_list") def delete_mappings(request): db.delete_mappings( [int(mapping_id) for mapping_id in request.POST.getlist("mapping_id", [])] ) return redirect("pathtagger:mappings_list") def mappings_list(request): tag_ids_to_include = [ int(tag_id) for tag_id in request.GET.getlist("tag_id_include", []) ] tag_ids_to_exclude = [ int(tag_id) for tag_id in request.GET.getlist("tag_id_exclude", []) ] path_name_like = request.GET.get("path_name_like", "") path_type = request.GET.get("path_type", "all") mappings = _get_extended_dataset( db.get_filtered_mappings(tag_ids_to_include, tag_ids_to_exclude, path_name_like) ) if path_type == "existent": mappings = [mapping for mapping in mappings if mapping["path_exists"]] elif path_type == "nonexistent": mappings = [mapping for mapping in mappings if not mapping["path_exists"]] filters = {} filters["tag_ids_to_include"] = tag_ids_to_include filters["tag_ids_to_exclude"] = tag_ids_to_exclude filters["path_name_like"] = path_name_like filters["path_type"] = path_type return render( request, "pathtagger/mappings_list.html", { "mappings": mappings, "no_mappings_at_all": not db.get_all_mappings(), "filters": filters, "tags": db.get_all_tags(), }, ) def tag_details(request, tag_id): if request.method == "GET": return render( request, "pathtagger/tag_details.html", { "tag": db.get_tag_by_id(tag_id), "mappings": _get_extended_dataset(db.get_tag_mappings(tag_id)), }, ) if request.method == "POST": tag_id = int(request.POST.get("tag_id", "0")) db.update_tag( tag_id, request.POST.get("name", ""), request.POST.get("color", params.DEFAULT_TAG_COLOR), ) return redirect("pathtagger:tag_details", tag_id=tag_id) return redirect("pathtagger:tags_list") def add_tag(request): name = request.POST.get("name", "") if name and not db.get_tag_by_name(name): db.insert_tag(name, request.POST.get("color", params.DEFAULT_TAG_COLOR)) return redirect("pathtagger:tags_list") def delete_tags(request): db.delete_tags([int(tag_id) for tag_id in request.POST.getlist("tag_id", [])]) return redirect("pathtagger:tags_list") def tags_list(request): tags = db.get_all_tags() for tag in tags: tag["occurrences"] = len(db.get_tag_mappings(tag.doc_id)) return render(request, "pathtagger/tags_list.html", {"tags": tags}) def remove_tag_from_mappings(request): tag_id = int(request.POST.get("tag_id", 0)) db.remove_tags_from_mappings( [tag_id], [int(mapping_id) for mapping_id in request.POST.getlist("mapping_id", [])], ) return redirect("pathtagger:tag_details", tag_id=tag_id) def path_details(request, path_str): path = Path(path_str) path_tokens = [] path_children = [] if path.exists(): path_parents = list(reversed(path.parents)) if path.is_dir(): path_parents.append(path) path_tokens = [ {"name": part, "path_str": parent.as_posix()} for part, parent in zip(path.parts, path_parents) ] if path.is_dir(): path_children = [ { "path_str": path_child.as_posix(), "db_path_str": _get_db_path_str(path_child), "name": path_child.name, "is_dir": path_child.is_dir(), } for path_child in sorted( list(path.glob("*")), key=lambda x: (1 - x.is_dir(), str(x).upper()) ) ] for path_child in path_children: mapping = db.get_mapping_by_path(path_child["db_path_str"]) if mapping and mapping.get("tag_ids", []): path_child["tags"] = [ db.get_tag_by_id(int(mapping_tag_id)) for mapping_tag_id in mapping["tag_ids"] ] return render( request, "pathtagger/path_details.html", { "path_str": path_str, "system_path_str": str(path), "ajax_path_str": quote(path_str), "is_root_path": path.anchor == str(path), "path_exists": path.exists(), "path_is_favorite": bool(db.get_favorite_path(_get_db_path_str(path))), "path_parent": path.parent.as_posix(), "path_tokens": path_tokens, "path_children": path_children, "tags": db.get_all_tags(), "drive_root_dirs": _get_drive_root_dirs(), "is_tagging_allowed": _is_allowed_path(path), }, ) def edit_path_tags(request): if paths := request.POST.getlist("path", []): mapping_ids = [] for path in paths: mapping = db.get_mapping_by_path(path) if mapping: mapping_ids.append(mapping.doc_id) else: mapping_ids.append(db.insert_mapping(path, [])) tag_ids_to_append = [] tag_ids_to_remove = [] for key in request.POST: if key.startswith("tag_"): value = request.POST.get(key, "") if value == "append": tag_ids_to_append.append(int(key.strip("tag_"))) elif value == "remove": tag_ids_to_remove.append(int(key.strip("tag_"))) if new_tag_names_str := request.POST.get("new_tag_names", ""): for name in new_tag_names_str.strip(",").split(","): name = name.strip() if tag := db.get_tag_by_name(name): tag_ids_to_append.append(tag.doc_id) else: tag_ids_to_append.append( db.insert_tag(name, params.DEFAULT_TAG_COLOR) ) db.append_tags_to_mappings(tag_ids_to_append, mapping_ids) db.remove_tags_from_mappings(tag_ids_to_remove, mapping_ids) return redirect( "pathtagger:path_details", path_str=request.POST.get("current_path", "") ) def toggle_favorite_path(request): if path_str := request.POST.get("path", ""): path = Path(path_str) db_path_str = _get_db_path_str(path) if not db_path_str: db_path_str = "/" if db.get_favorite_path(db_path_str): db.delete_favorite_path(db_path_str) is_favorite = False elif _is_allowed_path(path): db.insert_favorite_path(db_path_str) is_favorite = True else: is_favorite = False if request.is_ajax(): return JsonResponse({"status": "ok", "is_favorite": str(bool(is_favorite))}) return redirect("pathtagger:homepage") def root_path_redirect(request): # pylint: disable=W0613 return redirect( "pathtagger:path_details", path_str=Path(Path(settings.BASE_DIR).anchor).as_posix(), ) def homepage(request): return render( request, "pathtagger/homepage.html", {"favorite_paths": _get_extended_dataset(db.get_all_favorite_paths())}, )
from datetime import date, timedelta from django.conf import settings from django.core.management.base import BaseCommand from kitsune.kpi.management import utils from kitsune.kpi.models import L10N_METRIC_CODE, Metric, MetricKind from kitsune.sumo import googleanalytics class Command(BaseCommand): help = "Calculate new l10n coverage numbers and save." def handle(self, **options): """ L10n coverage is a measure of the amount of translations that are up to date, weighted by the number of visits for each locale. The "algorithm" (see Bug 727084): SUMO visits = Total SUMO visits for the last 30 days; Total translated = 0; For each locale { Total up to date = Total up to date + ((Number of up to date articles in the en-US top 50 visited)/50 ) * (Visitors for that locale / SUMO visits)); } An up to date article is any of the following: * An en-US article (by definition it is always up to date) * The latest en-US revision has been translated * There are only new revisions with TYPO_SIGNIFICANCE not translated * There is only one revision of MEDIUM_SIGNIFICANCE not translated """ # Get the top 60 visited articles. We will only use the top 50 # but a handful aren't localizable so we get some extras. top_60_docs = utils._get_top_docs(60) # Get the visits to each locale in the last 30 days. end = date.today() - timedelta(days=1) # yesterday start = end - timedelta(days=30) locale_visits = googleanalytics.visitors_by_locale(start, end) # Total visits. total_visits = sum(locale_visits.itervalues()) # Calculate the coverage. coverage = 0 for locale, visits in locale_visits.iteritems(): if locale == settings.WIKI_DEFAULT_LANGUAGE: num_docs = utils.MAX_DOCS_UP_TO_DATE up_to_date_docs = utils.MAX_DOCS_UP_TO_DATE else: up_to_date_docs, num_docs = utils._get_up_to_date_count(top_60_docs, locale) if num_docs and total_visits: coverage += (float(up_to_date_docs) / num_docs) * ( float(visits) / total_visits ) # Save the value to Metric table. metric_kind = MetricKind.objects.get_or_create(code=L10N_METRIC_CODE)[0] day = date.today() Metric.objects.create( kind=metric_kind, start=day, end=day + timedelta(days=1), value=int(coverage * 100), ) # Store as a % int.
number_list = ['1', '2', '3', '4', '5', '6', '7', '8', '9'] for number in number_list: if number == '1': print(number + "st") elif number == '2': print(number + "nd") elif number == '3': print(number + "rd") elif number == '4': print(number + "th") elif number == '5': print(number + "th") elif number == '6': print(number + "th") elif number == '7': print(number + "th") elif number == '8': print(number + "th") else: print(number + "th")
from music21 import note, instrument, stream import numpy as np import matplotlib.pyplot as plt import os import os.path from helper import createPitchVocabularies, loadChorales from config import note_embedding_dim, note_embedding_dir from keras.models import Model from keras.layers import Input, Dense # create the vocabulary note_vocab, note_names_vocab, note_vocab_categorical = createPitchVocabularies() # note to integer and reversal dictinaries used to make categorical data note_to_int = dict((note, number) for number, note in enumerate(note_vocab)) int_to_note = dict((number, note) for number, note in enumerate(note_vocab)) # define the autoencoder model parts input_shape = note_vocab_categorical.shape[0] note_input = Input(shape=(input_shape,)) note_encoded = Dense(note_embedding_dim, kernel_initializer='uniform', activation='relu')(note_input) note_decoded = Dense(input_shape, kernel_initializer='uniform', activation='sigmoid')(note_encoded) # define the full autoencoder note_autoencoder = Model(note_input, note_decoded) # define the encoder part of the network note_encoder = Model(note_input, note_encoded) #define the decoder part of the network note_encoded_input = Input(shape=(note_embedding_dim,)) note_decoder_layer = note_autoencoder.layers[-1] note_decoder = Model(note_encoded_input, note_decoder_layer(note_encoded_input)) # compile and print autoencoder summary note_autoencoder.compile(optimizer='adam', loss='categorical_crossentropy') note_autoencoder.summary() # prepare data for the network # load Bach chorales print('loading chorales...') notes = loadChorales() # map notes to categorical ones notes_categorical = [] for (chord, dur) in notes: for _note in chord: notes_categorical.append(note_vocab_categorical[note_to_int[_note]]) notes_categorical = np.reshape(notes_categorical, (len(notes_categorical), -1)) # prepare train and test samples shuffled_notes = notes_categorical np.random.shuffle(shuffled_notes) train_index = int(0.8 * len(shuffled_notes)) test_index = int(0.2 * len(shuffled_notes)) notes_train = shuffled_notes[0:train_index] notes_test = shuffled_notes[train_index:] # train the autoencoder network note_autoencoder.fit(x=notes_train, y=notes_train, epochs=50, batch_size=128, shuffle=True, validation_data=(notes_test, notes_test)) # save the model for future use os.makedirs(note_embedding_dir, exist_ok=True) with open(os.path.join(note_embedding_dir, "full-model.json"), "w") as json_file: json_file.write(note_autoencoder.to_json()) with open(os.path.join(note_embedding_dir, "encoder-model.json"), "w") as json_file: json_file.write(note_encoder.to_json()) with open(os.path.join(note_embedding_dir, "decoder-model.json"), "w") as json_file: json_file.write(note_decoder.to_json()) note_autoencoder.save_weights(os.path.join(note_embedding_dir, 'full-weights.h5')) note_encoder.save_weights(os.path.join(note_embedding_dir, 'encoder-weights.h5')) note_decoder.save_weights(os.path.join(note_embedding_dir, 'decoder-weights.h5')) # fun visualization of generated 2D-embedding if note_embedding_dim == 2: import matplotlib.pyplot as plt space = note_encoder.predict(note_vocab_categorical) x_axis = [x[0] for x in space] y_axis = [x[1] for x in space] text = note_names_vocab fig, ax = plt.subplots() ax.scatter(x_axis, y_axis) for i, txt in enumerate(text): ax.annotate(txt, (x_axis[i], y_axis[i])) print('SUCCESS')
from typing import Dict from .abstract_template import AbstractXDLElementTemplate from ..base_steps import AbstractStep from ...constants import VESSEL_PROP_TYPE from ...utils.prop_limits import TIME_PROP_LIMIT, PRESSURE_PROP_LIMIT from ...utils.vessels import VesselSpec class AbstractEvacuateAndRefillStep(AbstractXDLElementTemplate, AbstractStep): """Evacuate vessel and refill with inert gas. Name: Evacuate Mandatory props: vessel (vessel): Vessel to evacuate and refill. gas (str): Gas to refill vessel with. If not given use any available inert gas. repeats (int): Number of evacuation/refill cycles to perform. """ MANDATORY_NAME = 'EvacuateAndRefill' MANDATORY_PROP_TYPES = { 'vessel': VESSEL_PROP_TYPE, 'gas': str, 'repeats': int, } MANDATORY_DEFAULT_PROPS = { 'gas': None, 'repeats': None, } @property def vessel_specs(self) -> Dict[str, VesselSpec]: return { 'vessel': VesselSpec(inert_gas=self.gas is None, vacuum=True), } class AbstractPurgeStep(AbstractXDLElementTemplate, AbstractStep): """Purge liquid by bubbling gas through it. Name: Purge Mandatory props: vessel (vessel): Vessel containing liquid to purge with gas. gas (str): Gas to purge vessel with. If not given use any available inert gas. time (float): Optional. Time to bubble gas through vessel. pressure (float): Optional. Pressure of gas. flow_rate (float): Optional. Flow rate of gas in mL / min. """ MANDATORY_NAME = 'Purge' MANDATORY_PROP_TYPES = { 'vessel': VESSEL_PROP_TYPE, 'time': float, 'gas': str, 'pressure': float, 'flow_rate': float, } MANDATORY_DEFAULT_PROPS = { 'time': None, 'gas': None, 'pressure': None, 'flow_rate': None, } MANDATORY_PROP_LIMITS = { 'time': TIME_PROP_LIMIT, 'pressure': PRESSURE_PROP_LIMIT, } @property def vessel_specs(self) -> Dict[str, VesselSpec]: return { 'vessel': VesselSpec(inert_gas=self.gas is None), } class AbstractStartPurgeStep(AbstractXDLElementTemplate, AbstractStep): """Start purging liquid by bubbling gas through it. Name: Start Purge Mandatory props: vessel (vessel): Vessel containing liquid to purge with gas. gas (str): Gas to purge vessel with. If not given use any available inert gas. pressure (float): Optional. Pressure of gas. flow_rate (float): Optional. Flow rate of gas in mL / min. """ MANDATORY_NAME = 'StartPurge' MANDATORY_PROP_TYPES = { 'vessel': VESSEL_PROP_TYPE, 'pressure': float, 'flow_rate': float, 'gas': str, } MANDATORY_DEFAULT_PROPS = { 'gas': None, 'pressure': None, 'flow_rate': None, } MANDATORY_PROP_LIMITS = { 'pressure': PRESSURE_PROP_LIMIT, } @property def vessel_specs(self) -> Dict[str, VesselSpec]: return { 'vessel': VesselSpec(inert_gas=self.gas is None), } class AbstractStopPurgeStep(AbstractXDLElementTemplate, AbstractStep): """Stop bubbling gas through vessel. Name: Stop Purge Mandatory props: vessel (vessel): Vessel to stop bubbling gas through. """ MANDATORY_NAME = 'StopPurge' MANDATORY_PROP_TYPES = { 'vessel': VESSEL_PROP_TYPE, } @property def vessel_specs(self) -> Dict[str, VesselSpec]: return { 'vessel': VesselSpec(), }
def main(): # get sales = get_sales() advanced_pay = get_advanced_pay() rate = determined_comm_rate(sales) # calc pay = sales * rate - advanced_pay # print print("The pay is $", format(pay, ",.2f"), sep='') return def get_sales(): return float(input("Sales: $")) def determined_comm_rate(sales): if sales < 10_000 : return 0.10 elif sales >= 10_000 and sales <= 14_999: return 0.12 elif sales >= 15_000 and sales <= 17_999: return 0.14 elif sales >= 18_000 and sales <= 21_999: return 0.16 else: # 22k+ return 0.18 def get_advanced_pay(): print("Input $0 if you do not have advanced pay") return float(input("Advanced Pay: $")) main()
class Angel: color = "white" feature = "wings" home = "Heaven" class Demon: color = "red" feature = "horns" home = "Hell" goodie = Angel() baddie = Demon() print(f'{goodie.color}\n{goodie.feature}\n{goodie.home}') print(f'{baddie.color}\n{baddie.feature}\n{baddie.home}')
import os from shutil import copyfile class FileCheck: @staticmethod def check_folder(path): if os.path.exists(path): return True return False def check_create_folder(self, path): if not self.check_folder(path): print(f"[Action] Creating a folder for you... ({path})") os.makedirs(path) @staticmethod def check_file(path): if os.path.isfile(path): return True return False def check_copy_file(self, path): if not self.check_file(path): print(f"[Action] Copying a base file for you... ({path}). \nYou have to manually edit something in it!") source_path = path.split("/")[-1] copyfile(f"utils/base/{source_path}", path) exit(1) def check_create_file(self, path, content): if not self.check_file(path): print(f"[Action] Creating a file for you... ({path})") with open(path, "w") as f: f.write(content)
__all__ = [ "Extensible" ] from .pygen import ( pythonizable ) class Extensible(object): "Remembers attributes added to it and support serialization to Python." def __init__(self, **kw): self.__added = set(kw) self.__dict__.update(kw) def __setattr__(self, n, v): if n[0] != '_': self.__added.add(n) object.__setattr__(self, n, v) def __delattr__(self, n): if n[0] != '_': self.__added.remove(n) return object.__delattr__(self, n) @property def _py_serializable(self): for a in self.__added: o = getattr(self, a) if hasattr(o, "__gen_code__") or isinstance(o, pythonizable): yield a @property def __pygen_deps__(self): return tuple(self._py_serializable) def __gen_code__(self, g): g.reset_gen(self) for a in self.__added: g.gen_field(a) g.write(" = ") g.pprint(getattr(self, a)) g.gen_end() def __repr__(self): res = type(self).__name__ + "(\n" kw = [] for a in self.__added: kw.append(" " + a + " = " + repr(getattr(self, a))) res = res + ",\n".join(kw) + "\n)" return res def __iter__(self): for k in self.__added: yield k @property def _dict(self): return dict((k, getattr(self, k)) for k in iter(self))
from optparse import OptionParser from sys import maxint import string import time import random import socket import logging import logging.handlers fixed_line = "" hostname = socket.gethostname() # Possible run times are: # - fixed number of seconds # - set number of messages # - run until stopped def determine_run_time() : if int(options.time) > 0 : fixed_count = False infinite = False fixed_time = True elif int(options.num_lines) > 0 : fixed_count = True infinite = False fixed_time = False else: fixed_count = False infinite = True fixed_time = False return fixed_count, fixed_time, infinite def delay(sleep_time,num_messages) : # Avoid unnecessary calls to time.sleep() if sleep_time > 0.0 : sleep_this_time = True; # Calling time.sleep() more than 10 times per second is pointless and adds too much overhead # Back off to batches of messages big enough so that sleep is called 10 times per second max if sleep_time < 0.05 : sleep_this_time = False batch_size = (0.05 / sleep_time) * 10.0 sleep_time = 0.5 if num_messages % int(round(batch_size)) == 0 : sleep_this_time = True if sleep_this_time : time.sleep(sleep_time) return # When file input used, pull a line from the file or re-open file if wrapped/eof def next_line_from_file() : global infile if infile : in_line = infile.readline() if in_line == "" : infile.close() infile = open(options.file,'r') in_line = infile.readline() return in_line.rstrip() def next_line_from_file_by_length(line_length): current_line = next_line_from_file() while (len(current_line) < line_length) : current_line = current_line + " " + next_line_from_file() return current_line[:line_length] def get_word() : return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(options.word_length)) def get_new_line(): current_line = "" if options.text_type == "random" : while len(current_line) < options.line_length : current_line = current_line + get_word() + " " else: current_line = next_line_from_file_by_length(options.length) return current_line[:options.line_length] def get_raw_line(): return next_line_from_file() def single_line(): if options.fixed_line and (not fixed_line == ""): single_line = fixed_line elif options.raw: single_line = get_raw_line() else: single_line = get_new_line() return single_line def create_message(seq_number, msg) : global hostname if not options.raw : msg = hostname + " : " + str(seq_number) + " : " + msg return msg # Fixed time period, in seconds def generate_for_time(): now = time.time() then = now + options.time number_generated = 0 while now <= then : number_generated += 1 logger.info( create_message(number_generated, single_line()) ) delay(options.sleep_time, number_generated) now = time.time() return #Set number of lines, or infinite if time is 0 def generate_num_lines() : global hostname if options.num_lines == 0 : number_to_generate = maxint else : number_to_generate = options.num_lines number_generated = 0 while (number_generated < number_to_generate) : number_generated += 1 logger.info( create_message(number_generated, single_line()) ) delay(options.sleep_time, number_generated) def generate_messages() : global fixed_line if options.fixed_line : fixed_line = single_line() (fixed_count, fixed_time, infinite) = determine_run_time() if fixed_time : generate_for_time() else: generate_num_lines() def init_logger(my_logger): my_logger.setLevel(logging.INFO) if options.journal : jh = logging.handlers.SysLogHandler(address = '/dev/log') my_logger.addHandler(jh) elif options.log_on_file: print 'log_on_file: {}'.format(options.log_on_file) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') fh = logging.FileHandler(options.log_on_file) if not options.raw : fh.setFormatter(formatter) my_logger.addHandler(fh) else : formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') sh = logging.StreamHandler() if not options.raw : sh.setFormatter(formatter) my_logger.addHandler(sh) if __name__ == "__main__": parser = OptionParser() parser.add_option("-l", "--line-length", dest="line_length", type="int", default=100, help="length of each output line") parser.add_option("--text-type", dest="text_type", help="random or input", default="random") parser.add_option("--word-length", dest="word_length", type="int", default=9, help="word length for random text") parser.add_option("--fixed-line", dest="fixed_line", action="store_true", default=False, help="the same line is repeated if true, variable line content if false") parser.add_option("-f","--file", dest="file", default="", help="file for input text") parser.add_option("-j","--journal", dest="journal", action="store_true", default=False, help="use logger to log messages to journald instead of stdout") parser.add_option("--raw", dest="raw", action="store_true", default=False, help="log raw lines from a file with no timestamp or counters") parser.add_option("-o", "--log-on-file", dest="log_on_file", help="the file path to which the log outputs") parser.add_option("-r", "--rate", dest="rate", type="float", default=10.0, help="rate in lines per minute") parser.add_option("-n", "--num-lines", dest="num_lines", type="int", default=0, help="number of lines to generate, 0 is infinite - cannot be used with -t") parser.add_option("-t", "--time", dest="time", type="int", default=0, help="time to run in seconds, cannot be used with -n") (options, args) = parser.parse_args() if not options.file == "" : infile = open(options.file,'r') if options.raw and options.file == "": print "ERROR: --raw mode can only be used if --file is specified" exit(-1) options.sleep_time = 0.0 if options.rate > 0.0 : options.sleep_time = 60.0/options.rate logger = logging.getLogger('SVTLogger') init_logger(logger) generate_messages()
#IMPORTS import praw #Reddit API Package (https://praw.readthedocs.io/en/latest/index.html) import datetime as dt from datetime import date import spotipy from spotipy.oauth2 import SpotifyClientCredentials import spotipy.util as util from spotipy.oauth2 import SpotifyOAuth import random import json import requests import re SPOTIPY_CLIENT_ID = '' secret = '' redirect_uri = '' username = '' API_KEY = '-g' scope = 'playlist-modify-public' token = SpotifyOAuth(scope=scope,username=username, client_id=SPOTIPY_CLIENT_ID,client_secret=secret, redirect_uri=redirect_uri) sp = spotipy.Spotify(auth_manager=token) tracks = [] #PRAW Read Only Instance (https://praw.readthedocs.io/en/latest/getting_started/quick_start.html) reddit = praw.Reddit(client_id = '', client_secret = '', user_agent = '') #Dictionary to store all of the data retrieved master_dict = {"url": []} song_titles = [] #Function to remove URL Hostile string components def urlify(s): # Remove all non-word characters (everything except numbers and letters) s = re.sub(r"[^\w\s]", '', s) # Replace all runs of whitespace with a single dash s = re.sub(r"\s+", '+', s) return s #Defining functions to build the master dictionary using data from each subreddit def Pop(): popreddit = reddit.subreddit('Popheads') #Defining subreddit of choice (first, popheads) popemergencies = popreddit.top(limit =35, time_filter = 'day') #Retrieving the top 5 posts within the last 24hours for post in popemergencies: master_dict['url'].append(post.url) print(master_dict) #Repeat for Subs of Choice! def HipHop(): hiphopheads = reddit.subreddit('hiphopheads') HHH = hiphopheads.top(limit=30, time_filter = 'day') for post in HHH: master_dict['url'].append(post.url) print(master_dict) def PsychRock(): psychrock = reddit.subreddit('psychedelicrock') psychposts = psychrock.top(limit=10, time_filter = 'day') for post in psychposts: master_dict['url'].append(post.url) print(master_dict) def PCmusic(): pcmusic = reddit.subreddit('pcmusic') pcposts = pcmusic.top(limit=15, time_filter = 'day') for post in pcposts: master_dict['url'].append(post.url) print(master_dict) def indieheads(): indieheads = reddit.subreddit('indieheads') indietunes = indieheads.top(limit=15, time_filter = 'day') for post in indietunes: master_dict['url'].append(post.url) print(master_dict) def convert(tracks): return tuple(tracks) #Calling each function to perform their duties before creating the email Pop() HipHop() PsychRock() PCmusic() indieheads() #Looping through either a playlist, track, or album for url in master_dict['url']: if url.startswith('https://open.spotify.com/playlist/'): playlist_id = url[34:] print(playlist_id) for item in sp.playlist(playlist_id)['tracks']['items']: tracks.append(item['track']['id']) elif url.startswith('https://open.spotify.com/track/'): track_id = url[31:] print(track_id) tracks.append(track_id) elif url.startswith('https://open.spotify.com/album/'): album_id = url[31:] print(album_id) try: for item in sp.album(album_id)['tracks']['items']: tracks.append(item['id']) except: pass elif url.startswith('https://www.youtube.com/watch?v='): video_id = url[32:43] response = requests.get('https://youtube.googleapis.com/youtube/v3/videos?part=snippet&id=' + video_id + '&key=' + API_KEY) response = json.loads(response.content) song_titles.append(response['items'][0]['snippet']['title']) elif url.startswith('https://youtu.be/'): video_id = url[17:28] response = requests.get('https://youtube.googleapis.com/youtube/v3/videos?part=snippet&id=' + video_id + '&key=' + API_KEY) response = json.loads(response.content) try: song_titles.append(response['items'][0]['snippet']['title']) except: pass #Need first 23 characters (the length of a track id) to control for a quirk with playlist tracks # tracks = [[track[0:22]] for track in tracks] tracks = [track[0:22] for track in tracks] alltracks = tracks #50 Random Spotify songs to not have long albums take up space (can't add more than 50 at once) if len(tracks) > 50: tracks = random.sample(tracks, 50) else: tracks = tracks tracks = convert(tracks) print(tracks) #To Replace Instead of Add New, Uncomment This: sp.playlist_replace_items('2X7dRDEE3rFzSS7Opipohb', tracks) #The Spotify API limits you to 50 songs at a time. The code below is to include an additional 50 songs that weren't #added above. try: tracks = list(set(alltracks).difference(tracks)) if len(tracks) > 50: tracks = random.sample(tracks, 50) sp.playlist_add_items('2X7dRDEE3rFzSS7Opipohb', tracks) else: tracks = tracks sp.playlist_add_items('2X7dRDEE3rFzSS7Opipohb', tracks) except Exception as e: print(e) #TIME TO GRAB THE YOUTUBE SONGS ON SPOTIFY tracks = [] for song in song_titles: song = re.sub('\(.*\)', "", song) song = re.sub('\[.*\]', "", song) results = sp.search(q=song, type='track') try: tracks.append(results['tracks']['items'][0]['id']) except: pass if len(tracks) > 50: tracks = random.sample(tracks, 50) else: tracks = tracks print(tracks) tracks = convert(tracks) sp.playlist_add_items('2X7dRDEE3rFzSS7Opipohb', tracks)
import math import numpy as np import torch from torch.utils.data import DataLoader from torch.utils.data.sampler import SubsetRandomSampler, SequentialSampler from sklearn.model_selection import StratifiedKFold import dgl def collate(samples): # 'samples (graph, label)' graphs, labels = map(list, zip(*samples)) for g in graphs: for key in g.node_attr_schemes().keys(): g.ndata[key] = g.ndata[key].float() batched_graph = dgl.batch(graphs) labels = torch.tensor(labels) return batched_graph, labels class GraphDataLoader(): def __init__(self, dataset, batch_size, device, collate_fn=collate, seed=0, shuffle=True, split_name='fold10', fold_idx=0, split_ratio=0.7): self.shuffle = shuffle self.seed = seed self.kwargs = {'pin_memory': True} if device >= 0 else {} labels = [l for _, l in dataset] if split_name == 'fold10': train_idx, valid_idx = self._split_fold10( labels, fold_idx, seed, shuffle ) elif split_name == 'rand': train_idx, valid_idx = self._split_rand( labels, split_ratio, seed, shuffle ) else: raise NotImplementedError() train_sampler = SubsetRandomSampler(train_idx) valid_sampler = SubsetRandomSampler(valid_idx) self.train_loader = DataLoader( dataset, sampler=train_sampler, batch_size=batch_size, collate_fn=collate_fn, **self.kwargs ) self.valid_loader = DataLoader( dataset, sampler=valid_sampler, batch_size=batch_size, collate_fn=collate_fn, **self.kwargs ) def train_valid_loader(self): return self.train_loader, self.valid_loader def _split_fold10(self, labels, fold_idx=0, seed=0, shuffle=True): assert 0 <= fold_idx and fold_idx < 10, print( 'fold_idx must be from 0 to 9.' ) skf = StratifiedKFold(n_splits=10, shuffle=shuffle, random_state=seed) idx_list = [] for idx in skf.split(np.zeros(len(labels)), labels): idx_list.append(idx) train_idx, valid_idx = idx_list[fold_idx] print( 'train_set: test_set = %d : %d' % (len(train_idx), len(valid_idx)) ) return train_idx, valid_idx def _split_rand(self, labels, split_ratio=0.7, seed=0, shuffle=True): num_entries = len(labels) indices = list(range(num_entries)) np.random.seed(seed) np.random.shuffle(indices) split = int(math.floor(split_ratio * num_entries)) train_idx, valid_idx = indices[:split], indices[split:] print( 'train_set: test_set = %d : %d' % (len(train_idx), len(valid_idx)) ) return train_idx, valid_idx if __name__ == '__main__': from Temp.dataset import GINDataset dataset = GINDataset(name='PROTEINS', self_loop=True, degree_as_nlabel=False) Loader_list = [] for idx in range(10): train_loader, valid_loader = GraphDataLoader( dataset, batch_size=128, device=0, collate_fn=collate, seed=9, shuffle=True, split_name='fold10', fold_idx=idx ).train_valid_loader() Loader_list.append((train_loader, valid_loader)) print(Loader_list)
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import unicode_literals from enum import IntEnum from typing import Tuple, Optional, List, Union from typing import Any from nassl import _nassl from typing import Dict from typing import Text class OcspResponseNotTrustedError(IOError): def __init__(self, trust_store_path): # type: (Text) -> None self.trust_store_path = trust_store_path class OcspResponseStatusEnum(IntEnum): SUCCESSFUL = 0 MALFORMED_REQUEST = 1 INTERNAL_ERROR = 2 TRY_LATER = 3 SIG_REQUIRED = 5 UNAUTHORIZED = 6 class OcspResponse(object): """High level API for parsing an OCSP response. """ def __init__(self, ocsp_response): # type: (_nassl.OCSP_RESPONSE) -> None self._ocsp_response = ocsp_response self._ocsp_response_dict = self._parse_ocsp_response_from_openssl_text(self.as_text(), self.status) @property def status(self): return OcspResponseStatusEnum(self._ocsp_response.get_status()) def as_text(self): # type: () -> Text ocsp_resp_bytes = self._ocsp_response.as_text() # The response may contain certificates, which then may contain non-utf8 characters - get rid of them ocsp_first_resp = ocsp_resp_bytes.split(b'Certificate:')[0] return ocsp_first_resp.decode('utf-8') def verify(self, verify_locations): # type: (Text) -> None """Verify that the OCSP response is trusted. Args: verify_locations: The file path to a trust store containing pem-formatted certificates, to be used for validating the OCSP response. Raises OcspResponseNotTrustedError if the validation failed ie. the OCSP response is not trusted. """ # Ensure the file exists with open(verify_locations): pass try: self._ocsp_response.basic_verify(verify_locations) except _nassl.OpenSSLError as e: if 'certificate verify error' in str(e): raise OcspResponseNotTrustedError(verify_locations) raise def as_dict(self): # type: () -> Dict[Text, Any] return self._ocsp_response_dict @classmethod def _parse_ocsp_response_from_openssl_text(cls, response_text, response_status): # type: (Text, OcspResponseStatusEnum) -> Dict[Text, Any] """Parse OpenSSL's text output and make a lot of assumptions. """ response_dict = { 'responseStatus': cls._get_value_from_text_output_no_p('OCSP Response Status:', response_text), 'version' : cls._get_value_from_text_output_no_p('Version:', response_text), 'responseType': cls._get_value_from_text_output('Response Type:', response_text), 'responderID': cls._get_value_from_text_output('Responder Id:', response_text), 'producedAt': cls._get_value_from_text_output('Produced At:', response_text), } # type: Dict[Text, Any] if response_status != OcspResponseStatusEnum.SUCCESSFUL: return response_dict # A successful OCSP response will contain more data - let's parse it # TODO(ad): This will not work correctly if there are multiple responses as it assumes just one response_dict['responses'] = [ { 'certID': { 'hashAlgorithm': cls._get_value_from_text_output('Hash Algorithm:', response_text), 'issuerNameHash': cls._get_value_from_text_output('Issuer Name Hash:', response_text), 'issuerKeyHash': cls._get_value_from_text_output('Issuer Key Hash:', response_text), 'serialNumber': cls._get_value_from_text_output('Serial Number:', response_text) }, 'certStatus': cls._get_value_from_text_output('Cert Status:', response_text), 'thisUpdate': cls._get_value_from_text_output('This Update:', response_text), 'nextUpdate': cls._get_value_from_text_output('Next Update:', response_text), } ] if cls._get_scts_from_text_output(response_text): # SCT extension present response_dict['responses'][0]['singleExtensions'] = { 'ctCertificateScts': cls._get_scts_from_text_output(response_text) } return response_dict # Text parsing @staticmethod def _get_value_from_text_output(key, text_output): # type: (Text, Text) -> Optional[Text] value = text_output.split(key) return None if len(value) < 2 else value[1].split('\n')[0].strip() @classmethod def _get_value_from_text_output_no_p(cls, key, text_output): # type: (Text, Text) -> Optional[Text] value = cls._get_value_from_text_output(key, text_output) return None if value is None else value.split('(')[0].strip() @staticmethod def _parse_sct_text_line(text_output): # type: (Text) -> Tuple[Text, Optional[Text]] text_output_split = text_output.split(':', 1) key = text_output_split[0].strip() value = text_output_split[1].strip() if value == 'none': final_value = None else: final_value = value return key, final_value @classmethod def _parse_single_sct(cls, sct_text_output): parsed_sct = {} for line in sct_text_output.split('\n'): # One-line fields if any(key in line for key in ['Version', 'Extensions', 'Timestamp']): key, value = cls._parse_sct_text_line(line) parsed_sct[key] = value elif 'Log ID' in line: log_id_text = sct_text_output.split('Log ID :')[1].split('Timestamp')[0] final_log_id = '' for line in log_id_text: final_log_id += line.strip(' ').replace('\n', '') parsed_sct['logId'] = final_log_id return parsed_sct @classmethod def _get_scts_from_text_output(cls, response_text): scts_text_list = response_text.split('Signed Certificate Timestamp') if len(scts_text_list) < 1: return None scts_text_list = scts_text_list[1::] parsed_scts = [] for sct_text in scts_text_list: parsed_scts.append(cls._parse_single_sct(sct_text)) return parsed_scts
import cocos from cocos.text import Label from cocos import scene from cocos.layer import Layer from cocos.director import director from cocos.sprite import Sprite # Except for this import. This import, from Pyglet (one of the dependencies of cocos), is to recognize keyboard codes from pyglet.window.key import symbol_string # In this program, we'll be making a Layer with a bit of text that shows what keys you're holding down # It's important to note that there are multiple ways of handling keyboard input, and this is just one of them (I show another in the intermediate course) # Similar starting code to before with one exception class KeyboardInput(Layer): # You need to tell cocos that your layer is for handling input! # This is key (no pun intended)! # If you don't include this you'll be scratching your head wondering why your game isn't accepting input is_event_handler = True def __init__(self): super(KeyboardInput, self).__init__() # Let's make a Label like we did in the HelloWorld sample to show the keys being pressed # We will write code to append the key being pressed further down in the file self.label = Label("Keys: ", font_name = "Helvetica", font_size = 32, anchor_x = "center", anchor_y = "center") self.label.position = 320, 240 # Here I make a variable to store the keys being pressed # set() creates a new set object. # A set object is an object built into Python that stores things in, you guessed it, sets # You can call the add() and remove() methods to modify the contents of a set self.keys_being_pressed = set() # Next I run my update_text function that I write further down in the file self.update_text() # And lastly I add my label to the layer! self.add(self.label) # This is the function that updates the label def update_text(self): # Here I get the name of the key # This line requires a bit of explanation # Essentially, I look at the key being pressed (which I set in a function further down) # And I match it up to the appropriate symbol for that string # So if the "T" key and the left "Shift" key were being pressed, this line would recognize their respective code, # And it would set the variable to "T", "LSHIFT" key_names = [symbol_string(k) for k in self.keys_being_pressed] # This line sets the text to the indicated string, joining in the key names until there are no more keys indicated text_for_label = "Keys: " + ", ".join(key_names) # This code is a bit lengthy, but essentially I'm just accessing the text element from the Label object self.label.element.text = text_for_label # This function is once of the default Cocos functions. I overload it and add my own code # By default this function just passes, but I make it actually do stuff! def on_key_press(self, key, modifiers): # By stuff I mean updating the keys_being_pressed variable with the key being passed in through Cocos self.keys_being_pressed.add(key) # Then I simply run my update text method to make sure the string gets updated self.update_text() # This function is also a default Cocos function! # I have to remember to update both the keys currently pressed and the string when they release a key # You can try commenting this out to see what happens when you hit a key and let go without this piece of code def on_key_release(self, key, modifiers): # Same code as before, except I remove instead of add the key self.keys_being_pressed.remove(key) self.update_text() # And then I use the same code you're used to seeing to run the layer director.init() director.run(scene.Scene(KeyboardInput()))
# -*- coding: utf-8 -*- # Generated by Django 1.9 on 2016-02-07 00:41 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='FwdForm', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')), ('updated_at', models.DateTimeField(auto_now=True, verbose_name='Updated at')), ('name', models.CharField(max_length=100, verbose_name='Name')), ('recipients', models.CharField(help_text='Separate several addresses with a comma.', max_length=255, verbose_name='Recipients')), ('thankyou_url', models.URLField(blank=True, verbose_name='Thank You URL')), ('sent_count', models.PositiveIntegerField(default=0, verbose_name='Total Submissions')), ('spam_count', models.PositiveIntegerField(default=0, verbose_name='Spam Count')), ], options={ 'verbose_name': 'Form', 'verbose_name_plural': 'Forms', }, ), migrations.CreateModel( name='Site', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')), ('updated_at', models.DateTimeField(auto_now=True, verbose_name='Updated at')), ('domain', models.URLField(max_length=100, unique=True, verbose_name='Site URL')), ('is_active', models.BooleanField(default=True, verbose_name='Is Active?')), ('akismet_key', models.CharField(blank=True, max_length=40, verbose_name='Akismet API key')), ], options={ 'abstract': False, }, ), migrations.AddField( model_name='fwdform', name='site', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='contact_forms', to='fwdform.Site'), ), ]