seq_id
stringlengths
7
11
text
stringlengths
156
1.7M
repo_name
stringlengths
7
125
sub_path
stringlengths
4
132
file_name
stringlengths
4
77
file_ext
stringclasses
6 values
file_size_in_byte
int64
156
1.7M
program_lang
stringclasses
1 value
lang
stringclasses
38 values
doc_type
stringclasses
1 value
stars
int64
0
24.2k
dataset
stringclasses
1 value
pt
stringclasses
1 value
26693866815
import torch import logging from tqdm import tqdm from schnetpack.src.schnetpack import properties __all__ = ["TorchStructureLBFGS"] class TorchStructureLBFGS(torch.optim.LBFGS): """ LBFGS optimizer that allows for relaxation of multiple structures in parallel. The approximation of the inverse hessian is shared across the entire batch (all structures). Hence, it is recommended to use this optimizer preferably for batches of similar structures/compositions. In other cases, please utilize the ASELBFGS optimizer, which is particularly constructed for batches of different structures/compositions. This optimizer is an extension/adaptation of the torch.optim.LBFGS optimizer particularly designed for relaxation of atomic structures. In addition to the inherited features, this optimizer allows for fixing the positions of a set of atoms during the relaxation and a method to run the optimizer. Latter allows for setting a convergence criterium. Furthermore, we implemented a logging method that prints out the largest force in the system after each optimization iteration. """ def __init__( self, model, model_inputs, fixed_atoms_mask, maxstep=None, logging_function=None, lr: float = 1.0, energy_key: str = "energy", position_key: str = properties.R, ): """ Args: model (schnetpack.model.AtomisticModel): ml force field model model_inputs: input batch containing all structures fixed_atoms_mask (list(bool)): list of booleans indicating to atoms with positions fixed in space. maxstep (float): how far is a single atom allowed to move. (default: None) logging_function: function that logs the structure of the systems during the relaxation lr (float): learning rate (default: 1) energy_key (str): name of energies in model (default="energy") position_key (str): name of atomic positions in model (default="_positions") """ self.model = model self.energy_key = energy_key self.position_key = position_key self.fixed_atoms_mask = fixed_atoms_mask self.model_inputs = model_inputs self.logging_function = logging_function self.fmax = None self.maxstep = maxstep R = self.model_inputs[self.position_key] R.requires_grad = True super().__init__(params=[R], lr=lr) def _gather_flat_grad(self): """override this function to allow for keeping atoms fixed during the relaxation""" views = [] for p in self._params: if p.grad is None: view = p.new(p.numel()).zero_() elif p.grad.is_sparse: view = p.grad.to_dense().view(-1) else: view = p.grad.view(-1) views.append(view) flat_grad = torch.cat(views, 0) if self.fixed_atoms_mask is not None: flat_grad[self.fixed_atoms_mask] = 0.0 self.flat_grad = flat_grad return flat_grad def _add_grad(self, step_size, update): offset = 0 if self.maxstep is not None: step_size = self.determine_step_size(step_size, update) for p in self._params: numel = p.numel() # view as to avoid deprecated pointwise semantics p.add_(update[offset : offset + numel].view_as(p), alpha=step_size) offset += numel assert offset == self._numel() def determine_step_size(self, step_size, update): """Determine step to take according to maxstep Normalize all steps as the largest step. This way we still move along the eigendirection. """ reshaped_update = update.view(-1, 3) steplengths = ((step_size * reshaped_update) ** 2).sum(1) ** 0.5 longest_step = torch.max(steplengths) # check if any step in entire batch is greater than maxstep if longest_step >= self.maxstep: # rescale all steps logging.info("normalized integration step") step_size *= self.maxstep / longest_step return step_size def closure(self): results = self.model(self.model_inputs) self.zero_grad() loss = results[self.energy_key].sum() loss.backward() return loss def log(self, forces=None): """log relaxation results such as max force in the system""" if forces is None: forces = self.flat_grad.view(-1, 3) if not self.converged(): logging.info("NOT CONVERGED") logging.info( "max. atomic force: {}".format(torch.sqrt((forces**2).sum(axis=1).max())) ) def converged(self, forces=None): """Did the optimization converge?""" if forces is None: forces = self.flat_grad.view(-1, 3) return (forces**2).sum(axis=1).max() < self.fmax**2 def run(self, fmax, max_opt_steps): """run relaxation""" self.fmax = fmax # optimization for opt_step in tqdm(range(max_opt_steps)): self.step(self.closure) # log structure if self.logging_function is not None: self.logging_function(opt_step) # stop optimization if max force is smaller than threshold if self.converged(): break self.log() def get_relaxed_structure(self): return self.model_inputs[self.position_key]
maltefranke/solubility_prediction
schnetpack/src/schnetpack/interfaces/batchwise_optimizer.py
batchwise_optimizer.py
py
5,582
python
en
code
1
github-code
6
40411416951
#!/usr/bin/env python3 """ Name: vpc_consistency.py Description: NXAPI: display inconsistent vpc parameters Example output when vpc is consistent: % ./vpc_consistency.py --vault hashicorp --devices cvd_leaf_2 --interface Po11,Po12 192.168.11.103 cvd-1312-leaf all 22 global vpc params are consistent 192.168.11.103 cvd-1312-leaf all 7 vni vpc params are consistent 192.168.11.103 cvd-1312-leaf all 12 vlans vpc params are consistent 192.168.11.103 cvd-1312-leaf Po11 all 23 interface vpc port-channel params are consistent 192.168.11.103 cvd-1312-leaf Po12 all 23 interface vpc port-channel params are consistent % Example output when vpc po allowed-vlans are mismatched: % ./vpc_consistency.py --vault hashicorp --devices cvd_leaf_2 --interface Po11,Po12 192.168.11.103 cvd-1312-leaf all 22 global vpc params are consistent 192.168.11.103 cvd-1312-leaf all 7 vni vpc params are consistent 192.168.11.103 cvd-1312-leaf all 12 vlans vpc params are consistent 192.168.11.103 cvd-1312-leaf Po11 Allowed VLANs vpc-param-type: - vpc-param-local-val: 1111-1112 vpc-param-peer-val: 1111 192.168.11.103 cvd-1312-leaf Po11 Local suspended VLANs vpc-param-type: - vpc-param-local-val: 1112 vpc-param-peer-val: - 192.168.11.103 cvd-1312-leaf Po12 all 23 interface vpc port-channel params are consistent % """ our_version = 109 script_name = "vpc_consistency" # standard libraries import argparse from concurrent.futures import ThreadPoolExecutor # local libraries from nxapi_netbox.args.args_cookie import ArgsCookie from nxapi_netbox.args.args_nxapi_tools import ArgsNxapiTools from nxapi_netbox.general.log import get_logger from nxapi_netbox.netbox.netbox_session import netbox, get_device_mgmt_ip from nxapi_netbox.vault.vault import get_vault from nxapi_netbox.nxapi.nxapi_vpc_consistency import ( NxapiVpcConsistencyGlobal, NxapiVpcConsistencyVni, NxapiVpcConsistencyVlans, NxapiVpcConsistencyInterface, ) def get_parser(): help_interfaces = "a comma-separated list (no spaces) of port-channel interfaces to test for vpc consistency" help_mismatched_labels = "display labels whose number of comma-separated entries differ from the number of values they refer to" ex_interfaces = "Example: --interfaces Po1,Po10" ex_mismatched_labels = "Example: --mismatched_labels" parser = argparse.ArgumentParser( description="DESCRIPTION: NXAPI: display inconsistent vpc parameters", parents=[ArgsCookie, ArgsNxapiTools], ) default = parser.add_argument_group(title="DEFAULT SCRIPT ARGS") mandatory = parser.add_argument_group(title="MANDATORY SCRIPT ARGS") parser.add_argument( "--version", action="version", version="{} v{}".format("%(prog)s", our_version) ) default.add_argument( "--mismatched_labels", dest="mismatched_labels", required=False, action="store_true", default=False, help="{} {}".format(help_mismatched_labels, ex_mismatched_labels), ) default.add_argument( "--interfaces", dest="interfaces", required=False, default=None, help="{} {}".format(help_interfaces, ex_interfaces), ) return parser.parse_args() def get_device_list(): try: return cfg.devices.split(",") except: log.error( "exiting. Cannot parse --devices {}. Example usage: --devices leaf_1,spine_2,leaf_2".format( cfg.devices ) ) exit(1) def print_output(futures): for future in futures: output = future.result() if output == None: continue for line in output: print(line) def show_inconsistent_params(ip, nx, interface=None): lines = list() if nx.error_reason != None: log.error("{} {} error: {}".format(tb.sid, nx.hostname, nx.error_reason)) return lines inconsistent_items = nx.inconsistent_params if len(inconsistent_items) == 0: if interface == None: lines.append( "{:<15} {:<20} all {} {} vpc params are consistent".format( ip, nx.hostname, len(nx.info), nx.param_type ) ) else: lines.append( "{:<15} {:<20} {} all {} {} vpc port-channel params are consistent".format( ip, nx.hostname, interface, len(nx.info), nx.param_type ) ) else: for item in nx.inconsistent_params: if interface == None: lines.append( "{:<15} {:<20} {}".format(ip, nx.hostname, item["vpc-param-name"]) ) else: lines.append( "{:<15} {:<20} {} {}".format( ip, nx.hostname, interface, item["vpc-param-name"] ) ) for key in item: if key == "vpc-param-name": continue lines.append(" {}: {}".format(key, item[key])) return lines def show_mismatched_labels(ip, nx): lines = list() if cfg.mismatched_labels == False: return lines if len(nx.mismatched_info) > 0: for label in nx.mismatched_info: lines.append( "{:<15} {:<20} vpc-param-name {}".format(ip, nx.hostname, label) ) lines.append(" labels {}".format(nx.mismatched_info[label]["names"])) lines.append(" values {}".format(nx.mismatched_info[label]["params"])) return lines def worker(device, vault): ip = get_device_mgmt_ip(nb, device) lines = list() for class_name in [ NxapiVpcConsistencyGlobal, NxapiVpcConsistencyVni, NxapiVpcConsistencyVlans, ]: nx = class_name(vault.nxos_username, vault.nxos_password, ip, log) nx.nxapi_init(cfg) nx.refresh() if nx.error_reason != None: lines.append("{} {} error: {}".format(ip, nx.hostname, nx.error_reason)) return lines lines += show_inconsistent_params(ip, nx) lines += show_mismatched_labels(ip, nx) if cfg.interfaces == None: return lines for interface in cfg.interfaces.split(","): nx = NxapiVpcConsistencyInterface( vault.nxos_username, vault.nxos_password, ip, log ) nx.nxapi_init(cfg) nx.interface = interface nx.refresh() lines += show_inconsistent_params(ip, nx, interface) return lines cfg = get_parser() log = get_logger(script_name, cfg.loglevel, "DEBUG") vault = get_vault(cfg.vault) vault.fetch_data() nb = netbox(vault) devices = get_device_list() executor = ThreadPoolExecutor(max_workers=len(devices)) futures = list() for device in devices: args = [device, vault] futures.append(executor.submit(worker, *args)) print_output(futures)
allenrobel/nxapi-netbox
scripts/vpc_consistency.py
vpc_consistency.py
py
6,995
python
en
code
0
github-code
6
18699671015
from django.urls import path from posting.views import ( PostingView, CategoryView, PostingLikeView, PostingScrapView ) urlpatterns = [ path('', PostingView.as_view()), path('/category', CategoryView.as_view()), path('/like', PostingLikeView.as_view()), path('/scrap', PostingScrapView.as_view()) ]
wecode-bootcamp-korea/17-1st-SweetHome-backend
posting/urls.py
urls.py
py
336
python
en
code
3
github-code
6
33222746920
from tqdm import tqdm # Define constant SHORT = 15 MIDDLE = 30 LARGE = 50 def __clear__(times: int = 11, length: int = 123): """ Clear the previous table toward the terminal """ print() for i in range(times + 1): if i != times: print("\033[F" + ' ' * length, end='') else: print("\033[F", end = '') print("\r") def __getCutLine__(length: int = 50, width_cell: int = 10, equal_symbol: bool = True) -> str: """ Obtain the cut line Arg: length - The maximun length of the terminal width_cell - The width of the table cell equal_symbol - If use '=' as the symbol or not Ret: The custom cut line string """ cut_string = "+" acc_length = 0 while True: if acc_length == 0: if equal_symbol: cut_string = cut_string + '=' * (width_cell) + '+' else: cut_string = cut_string + '-' * (width_cell) + '+' acc_length += width_cell else: if equal_symbol: cut_string = cut_string + '=' * (width_cell + 2) + '+' else: cut_string = cut_string + '-' * (width_cell + 2) + '+' acc_length += (width_cell + 2) if acc_length >= length - width_cell: break return cut_string class tqdm_table(tqdm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.max_length = 100 self.prev_lines = -1 def set_table_setting(self, max_length: int = 100): """ Assign the setting of this object Arg: max_length - The maximun length of the terminal """ self.max_length = max_length def set_table_info(self, mapping: dict): """ Set the table information Arg: mapping - The key-value pair you want to form the table """ key_string = "" val_string = "" table_string = [] key_list = sorted(mapping.keys()) # Clear the previous table information toward stdout if self.prev_lines > 0: __clear__(self.prev_lines, self.max_length) # Determine the width of cell if max([max(len(str(val)), len(key)) for key, val in mapping.items()]) <= 15: width_length = SHORT elif max([max(len(str(val)), len(key)) for key, val in mapping.items()]) <= 30: width_length = MIDDLE else: width_length = LARGE # Collect the lines of keys and values for key in key_list: val = mapping[key] single_max_length = max(len(key), len(str(val))) if len(key_string) + single_max_length + 2 < self.max_length: if width_length == SHORT: key_string += '{:>15} | '.format(key) val_string += '{:>15} | '.format(val) elif width_length == MIDDLE: key_string += '{:>30} | '.format(key) val_string += '{:>30} | '.format(val) else: key_string += '{:>50} | '.format(key) val_string += '{:>50} | '.format(val) else: table_string.append(key_string) table_string.append(val_string) if width_length == SHORT: key_string = '{:>15} | '.format(key) val_string = '{:>15} | '.format(val) elif width_length == MIDDLE: key_string = '{:>30} | '.format(key) val_string = '{:>30} | '.format(val) else: key_string = '{:>50} | '.format(key) val_string = '{:>50} | '.format(val) # Accumulate the rest information if there are some information rest if len(key_string) > 0 or len(val_string) > 0: table_string.append(key_string) table_string.append(val_string) # Transfer the containing of queue into string cut_string_small = __getCutLine__(length=max([len(_) for _ in table_string]), width_cell=width_length, equal_symbol=False) cut_string_large = __getCutLine__(length=max([len(_) for _ in table_string]), width_cell=width_length, equal_symbol=True) print_string = cut_string_large for i in range(len(table_string) // 2): print_string = print_string + '\n' + table_string[2*i] + '\n' + cut_string_small + '\n' + table_string[2*i+1] + '\n' + cut_string_large self.prev_lines = 2 * (len(table_string) + 1) # Write into tqdm self.write(print_string)
SunnerLi/tqdm_table
tqdm_table/__init__.py
__init__.py
py
4,733
python
en
code
0
github-code
6
41045436456
''' Exercise 3: Write a program to prompt for a score between 0.0 and 1.0. If the score is out of range, print an error message. If the score is between 0.0 and 1.0, print a grade using the following table >= 0.9 A >= 0.8 B >= 0.7 C >= 0.6 D < 0.6 F ''' # Function to calculate the score. def cal_grade(score): if score >= 0.9: print("Grade: A") elif score >= 0.8: print("Grade: B") elif score >= 0.7: print("Grade: C") elif score >= 0.6: print("Grade: D") else: print("Grade: F") try: score = float(input("Enter your score: ")) cal_grade(score) except: print("Please input a number between 0.0 and 1.0")
simonSlamka/UCL-ITtech
programming/SimonStorm/ch3_ex3.py
ch3_ex3.py
py
682
python
en
code
2
github-code
6
41058141376
from copy import copy class Poly: def __init__(self,*terms): # __str__ uses the name self.terms for the dictionary of terms # So __init__ should build this dictionary from terms self.terms = {} if terms==None: self.terms={0:0} for term in terms: assert type(term[0]) in (int,float) assert type(term[1])==int and term[1]>=0 if term[1] in self.terms: if term[0]!=0: raise AssertionError if term[0]!=0: self.terms[term[1]]=term[0] # Fill in the rest of this method, using *terms to intialize self.terms # I have written str(...) because it is used in the bsc.txt file and # it is a bit subtle to get correct. Notice that it assumes that # every Poly object stores a dict whose keys are powers and whose # associated values are coefficients. This function does not depend # on any other method in this class being written correctly. def __str__(self): def term(c,p,var): return (str(c) if p == 0 or c != 1 else '') +\ ('' if p == 0 else var+('^'+str(p) if p != 1 else '')) if len(self.terms) == 0: return '0' else: return ' + '.join([term(c,p,'x') for p,c in sorted(self.terms.items(),reverse=True)]).replace('+ -','- ') def __repr__(self): results=[] for k,v in self.terms.items(): results.append(str((v,k))) return 'Poly({})'.format(','.join(results)) def __len__(self): return max(self.terms) if len(self.terms)>0 else 0 def __call__(self,arg): results=0 for k,v in self.terms.items(): results+=v*(arg)**k return results def __iter__(self): for k in sorted(self.terms,reverse=True): yield(self.terms[k],k) def __getitem__(self,index): if type(index)==int and index>=0: return self.terms[index] if index in self.terms else 0 raise TypeError def __setitem__(self,index,value): if type(index)==int and index>=0: if value==0: if index in self.terms: del self.terms[index] else: pass else: self.terms[index]=value else: raise TypeError def __delitem__(self,index): if type(index)==int and index>=0: if index in self.terms: del self.terms[index] else: raise TypeError def _add_term(self,c,p): if type(p)==int and p>=0 and type(c) in (int,float): if p not in self.terms and c!=0: self.terms[p]=c else: if p in self.terms: if self.terms[p]+c!=0: self.terms[p]=self.terms[p]+c else: del self.terms[p] else: raise TypeError def __add__(self,right): values=[[v,k] for k,v in self.terms.items()] if type(right) in (int,float): for value in values: if value[1]==0: if value[0]+right!=0: value[0]=value[0]+right else: values.remove(value) elif type(right)==Poly: for k,v in right.terms.items(): if k in [value[1] for value in values]: for value in values: if value[1]==k: if value[0]+right.terms[k]!=0: value[0]=value[0]+right.terms[k] else: values.remove(value) else: values.append((v,k)) else: raise TypeError results=[str(value) for value in values] return eval('Poly({})'.format(','.join(results))) def __radd__(self,left): values=[[v,k] for k,v in self.terms.items()] if type(left) in (int,float): for value in values: if value[1]==0: if value[0]+left!=0: value[0]=value[0]+left else: values.remove(value) elif type(left)==Poly: for k,v in left.terms.items(): if k in [value[1] for value in values]: for value in values: if value[1]==k: if value[0]+left.terms[k]!=0: value[0]=value[0]+left.terms[k] else: values.remove(value) else: values.append((v,k)) else: raise TypeError results=[str(value) for value in values] return eval('Poly({})'.format(','.join(results))) def __mul__(self,right): values=[[v,k] for k,v in self.terms.items()] if type(right) in (int,float): if right==0: return 0 else: for value in values: value[0]=right*value[0] results=[str(value) for value in values] if type(right)==Poly: temp=[] for k,v in right.terms.items(): for value in values: if k+value[1] in [t[1] for t in temp]: for t in temp: if t[1]==k+value[1]: temp.append((v*value[0]+t[0],t[1])) temp.remove(t) else: temp.append((v*value[0],k+value[1])) results=[str(i) for i in temp] return eval('Poly({})'.format(','.join(results))) def __rmul__(self,left): values=[[v,k] for k,v in self.terms.items()] if type(left) in (int,float): if left==0: return 0 for value in values: value[0]=left*value[0] results=[str(value) for value in values] return eval('Poly({})'.format(','.join(results))) def __eq__(self,right): if type(right) in (int,float): if len(self.terms)==1 and 0 in self.terms: return self.terms[0]==right else: return False elif type(right)==Poly: results=True for k,v in right.terms.items(): if k not in self.terms and v!=0: return False results=results*(self.terms[k]==right.terms[k]) return results raise TypeError if __name__ == '__main__': # Some simple tests; you can comment them out and/or add your own before # the driver is called. # print('Start simple tests') # p = Poly((3,2),(-2,1), (4,0)) # print(' For Polynomial: 3x^2 - 2x + 4') # print(' str(p):',p) # print(' repr(p):',repr(p)) # print(' len(p):',len(p)) # print(' p(2):',p(2)) # print(' list collecting iterator results:',[t for t in p]) # print(' p+p:',p+p) # print(' p+2:',p+2) # print(' p*p:',p*p) # print(' p*2:',p*2) # print('End simple tests\n') # p[2] = 0 # print(str(p)) # p[10] = 0 # p1 = Poly((1,1),(2,0)) # p2 = Poly((3,2),(2,1),(1,0)) # p3 = Poly((3,5),(-2,2),(-4,0)) # print(p1+p2) # print(p2+p3) # print(p1+p3) # p3*2 import driver #driver.default_show_exception=True #driver.default_show_exception_message=True #driver.default_show_traceback=True driver.driver()
solomc1/python
ics 33/solutions/ile2 solutions/Lab 1/JiangYu/poly.py
poly.py
py
8,201
python
en
code
0
github-code
6
70285709629
from __future__ import absolute_import import os from setuptools import setup, find_packages # Utility function to read the README file. # Used for the long_description. It's nice, because now 1) we have a top level # README file and 2) it's easier to type in the README file than to put a raw # string in below ... def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() setup( name='pyswf', version='1.5.4', description='SWF Parsing Utilities', long_description=read('README.md'), keywords = "swf parser parsing decompile utilities", author='Tim Knip', author_email='[email protected]', url='https://github.com/timknip/pyswf', install_requires = ["lxml>=3.3.0", "Pillow>=2.3.0", "pylzma>=0.4.6", "six"], packages=find_packages(), license = "MIT", classifiers=[ "Development Status :: 4 - Beta", "Topic :: Utilities", "License :: OSI Approved :: MIT License", ], )
timknip/pyswf
setup.py
setup.py
py
982
python
en
code
154
github-code
6
32100438574
from sklearn.datasets import load_breast_cancer from sklearn.datasets import load_wine from E2 import sammon from E1 import bkmeans from sklearn.decomposition import PCA from sklearn.manifold import TSNE import matplotlib.pyplot as plt import numpy as np from sklearn.cluster import KMeans from scipy.cluster.hierarchy import linkage from scipy.cluster.hierarchy import dendrogram sammons_maxiter = 100 e = 0.001 A = 0.3 # Figure 1 fig1, axs1 = plt.subplots(3, 3, figsize=(10, 10)) print(f"Max iter set to {sammons_maxiter}. It might take a total of {sammons_maxiter*3} to run the code. Each iteration takes approx 0.5-2 seconds.") for i in range(3): if i == 0: datasetX, datasetLabels = load_breast_cancer().data, load_breast_cancer().target dataset_name = 'Breast Cancer Dataset' elif i == 1: datasetX, datasetLabels = load_wine().data, load_wine().target dataset_name = 'Wine Dataset' elif i == 2: diabetesData = np.genfromtxt('csv_result-diabetes.csv', delimiter=',', skip_header=1) datasetX, datasetLabels = diabetesData[:, :-1], diabetesData[:, -1] dataset_name = 'Diabetes Dataset' for j in range(3): if j == 0: print(f"Computing sammon mapping for {dataset_name} ({i+1}/3)") result = sammon(datasetX, sammons_maxiter, e, A) plot_title = 'Sammon Mapping' elif j == 1: pca = PCA(n_components=2) result = pca.fit_transform(datasetX) plot_title = 'PCA' elif j == 2: tsne = TSNE(n_components=2) result = tsne.fit_transform(datasetX) plot_title = 't-SNE' axs1[i, j].scatter(result[:, 0], result[:, 1], c=datasetLabels, s=5) axs1[i, j].set_title(f'{dataset_name} - {plot_title}') plt.tight_layout() fig, axes = plt.subplots(3, 3, figsize=(10, 10)) for i in range(3): if i == 0: datasetX, datasetLabels = load_breast_cancer().data, load_breast_cancer().target dataset_name = 'Breast Cancer Dataset' elif i == 1: datasetX, datasetLabels = load_wine().data, load_wine().target dataset_name = 'Wine Dataset' elif i == 2: diabetesData = np.genfromtxt('csv_result-diabetes.csv', delimiter=',', skip_header=1) datasetX, datasetLabels = diabetesData[:, :-1], diabetesData[:, -1] dataset_name = 'Diabetes Dataset' pca = PCA(n_components=2) X_pca = pca.fit_transform(datasetX) for j in range(3): ax = axes[i, j] if j == 0: bkmeans_result = bkmeans(X_pca, 2, 30) c = bkmeans_result algorithm_name = 'bk means' elif j == 1: kmeans_result = KMeans(n_init = 10, n_clusters=2, random_state=0).fit_predict(X_pca) c = kmeans_result algorithm_name = 'classic k means' elif j == 2: hierarchical_result = linkage(X_pca, method='ward') dendro = dendrogram(hierarchical_result, ax=ax) c = datasetLabels algorithm_name = 'hierarchical' ax.scatter(X_pca[:, 0], X_pca[:, 1], c=c) ax.set_title(f'{dataset_name} - {algorithm_name}') plt.tight_layout() plt.show()
Knoz9/ML-A3-A4
km222ug_A4/E3.py
E3.py
py
3,231
python
en
code
0
github-code
6
26831922928
""" This class is used to cause object-like behavior in non-OOP libraries or enhance polymorphism in objects. It does so by saving any repetitive parameter in its attributes and passing it into any function that can accept them. For a demo, see example.py @author Ben Hsieh """ import inspect class ClassEmulator: def __init__(self, target_class, **kwargs): """ Creates a target_class emulator and stores any function parameter as self attributes. Parameters ---------- target_class: class or object The target to emulate. Can be anything with its own library of functions. **kwargs: any key=value Any additional parameters to store and use in target_class's functions. """ self.__target_class = target_class self.__reference_objects = [] self.save_params(**kwargs) def save_params(self, **kwargs): """ Stores any parameter you may want to pass into functions as class attributes. Parameters ---------- **kwargs: any key=value Any parameter to store and use in target_class's functions. """ for key, value in kwargs.items(): self.__setattr__(key, value) def save_reference_object( self, reference_object, copy_attributes: bool = False ) -> None: """ Saves an object from which we can look up attributes. Newer references' attributes will overwrite older references' attributes. Self attributes will overwrite any reference object attributes. If copy_attributes, the reference object's attributes are copied to self attributes, and the reference object is then discarded. """ if copy_attributes: reference_object_attributes = reference_object.__dict__ self.save_params(**reference_object_attributes) else: self.__reference_objects.append(reference_object) def __getattr__(self, function_name): """ Runs the named function with any stored applicable parameter and any parameter the user passes in. """ def run_named_function(*args, **kwargs): # identifies the target function target_function = getattr(self.__target_class, function_name) # identifies all parameters the target function uses target_function_parameters = [ parameter for i, parameter in enumerate(target_function.__code__.co_varnames) if i > 0 or parameter != "obj" # TODO check if other non-parameters could be referenced ] # acquires all stored parameters stored_parameters = {} for reference_object in self.__reference_objects: for key, value in reference_object.__dict__.items(): stored_parameters[key] = value for key, value in self.__dict__.items(): if not any( key == reserved_name for reserved_name in [ "_ClassEmulator__target_class", "_ClassEmulator__reference_objects", ] ): stored_parameters[key] = value # finds all stored parameters parameters that are acceptable to the target function named_parameters = { key: value for key, value in stored_parameters.items() if "kwargs" in target_function_parameters or key in target_function_parameters } # removes all stored parameters that overlap with kwargs number_of_positional_parameters = len(args) try: is_class_method = self.__target_class.__name__ == target_function.__self__.__name__ if is_class_method: number_of_positional_parameters += 1 except AttributeError: pass positional_parameters_used = target_function_parameters[:number_of_positional_parameters] named_parameters = { key: value for key, value in named_parameters.items() if key not in positional_parameters_used } # makes user input kwargs count named_parameters.update(kwargs) # run the target function and return the output return target_function(*args, **named_parameters) return run_named_function
hyv3m1nd/classemulator
emulator.py
emulator.py
py
4,564
python
en
code
0
github-code
6
72530976189
def main(): # List comprehensions # [ <expression> for item in list if <conditional>] syntax # creating a list based on an existing list # the if part act as a filter (if True it includes, False omittes) # my_comp_list = [x for x in range(1,10) if x % 2 == 0] print(my_comp_list) # calling a function in the expression slot. # x is defined to be [0, 1, 2, 3, 4, 5, 6, 7] # where some_function() is being called for every item in that list my_comp_list_2 = [some_function(x) for x in range(8)] print(my_comp_list_2) # creating a matrix using nested list comprehension # [[j for j in num_of_column] for i in [num_of_row]] # better coding style than making a matrix with a nested for loop my_comp_list_3 = [[j for j in range(3)] for i in range(10)] print(my_comp_list_3) # flatten the matrix into a one dimentional list print([value for sublist in my_comp_list_3 for value in sublist]) # Set comprehensions are just like lists but with {} # "s % 2 == 1" is the same as "s % 2" where it only sorts the odds my_comp_set = [s for s in range(1, 5) if s % 2] print(my_comp_set) # Dictionary Comprehension # it takes a key and a value my_comp_dict = {x: x**2 for x in (2, 4, 6)} print(my_comp_dict) def some_function(a): return (a + 5) / 2 if __name__ == '__main__': main()
itiro-y/python-repo
LearningPython/data_types/comprehensions.py
comprehensions.py
py
1,394
python
en
code
0
github-code
6
1369749447
""" Import 'OktaPasswordHealth.csv' and sort users into internal and external groups. """ import csv print("\nWhat is your fully qualified domain name?") DOMAIN = input('> ') INTERNAL_USERS = [] EXTERNAL_USERS = [] def write_csv(csv_file, lst): """Write results to a csv.""" with open(csv_file, 'w') as out_csvfile: writer = csv.writer(out_csvfile) writer.writerow(['email']) for email in lst: writer.writerow([email]) with open('OktaPasswordHealth.csv', 'r') as in_csvfile: READER = csv.DictReader(in_csvfile) for row in READER: user_email = row['Login'] if user_email.endswith(DOMAIN): INTERNAL_USERS.append(user_email) else: EXTERNAL_USERS.append(user_email) write_csv('internal_users.csv', INTERNAL_USERS) write_csv('external_users.csv', EXTERNAL_USERS) print("'internal_users.csv' and 'external_users.csv' exported successfully\n")
craighillelson/sort_okta_users_internal_and_external
sort_okta_users_internal_and_external.py
sort_okta_users_internal_and_external.py
py
945
python
en
code
1
github-code
6
26932741505
from lightcurvetools import * # should do a smooth-and-subtract operation on the test lightcurve, show a plot, # and save the smoothed lc testfile = 'testdata/lc_xrt_alldata.dat' testlc = readlightcurve(testfile) testlc.label = 'original' testlc.stats() sigma_HW = 2.355/2 wid = round(20/sigma_HW) smoothlc = smoothlightcurve(testlc, wid) smoothlc.label = 'smoothed' sublc = subtractlightcurve(testlc, smoothlc) sublc.label = 'subtracted' plotlightcurve([testlc, smoothlc, sublc]) sublc.write('testwrite.dat')
unclellama/lightcurvetools
lightcurvetools/tests.py
tests.py
py
511
python
en
code
0
github-code
6
8804328521
import pytest from selenium.webdriver.common.by import By from selenium.webdriver.common.keys import Keys from selenium.webdriver.common.action_chains import ActionChains from datetime import datetime from BaseTest import BaseTest from Helper import Helper from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as ec from Utilities.config import Utilities from selenium.webdriver.support.color import Color import time @pytest.mark.usefixtures("user") @pytest.mark.usefixtures("setup") @pytest.mark.usefixtures("env") @pytest.mark.migration @pytest.mark.singapore @pytest.mark.uk @pytest.mark.usa @pytest.mark.sandoz @pytest.mark.eg @pytest.mark.global_site @pytest.mark.de @pytest.mark.at @pytest.mark.ph @pytest.mark.ch @pytest.mark.ch_fr @pytest.mark.fr @pytest.mark.za @pytest.mark.malaysia @pytest.mark.es @pytest.mark.pt @pytest.mark.tw @pytest.mark.jp @pytest.mark.lv @pytest.mark.it @pytest.mark.ar @pytest.mark.fi @pytest.mark.kr @pytest.mark.br @pytest.mark.cn @pytest.mark.scn @pytest.mark.hu @pytest.mark.biome @pytest.mark.foundation @pytest.mark.ie @pytest.mark.gr @pytest.mark.dk # @pytest.mark.no @pytest.mark.ca # @pytest.mark.se @pytest.mark.tr # @pytest.mark.cz @pytest.mark.ru # @pytest.mark.rs @pytest.mark.ro @pytest.mark.co @pytest.mark.sk @pytest.mark.ve #@pytest.mark.id @pytest.mark.bd @pytest.mark.be @pytest.mark.au @pytest.mark.pl class Test_NewsArchive(BaseTest): # @pytest.mark.malaysia # def test_ARC_2586_NewsArchive_get_newsArchive(self): # self.driver.get(self.env) # self.newsPage.launch_newsArchive(self.env_name) @pytest.mark.malaysia def test_ARC_2586_NewsArchive_bannerImage(self): """ Checks banner image in news archive page. """ self.driver.get(self.env) if self.env_name == "france": self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') self.basePage.press_button(self.newsRoomPage.link_btn_css) else: self.newsPage.launch_newsArchive(self.env_name) if len(self.driver.find_elements(*self.newsPage.banner_img_css)) > 0: assert len(self.basePage.get_elemet_attribute(self.newsPage.banner_img_css, "src")) > 0 @pytest.mark.malaysia def test_ARC_2586_NewsArchive_pattern(self): """ Checks pattern in news archive page. """ self.driver.get(self.env) if self.env_name == "france": self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') self.basePage.press_button(self.newsRoomPage.link_btn_css) else: self.newsPage.launch_newsArchive(self.env_name) assert "patterns" in self.basePage.get_css_property(self.newsPage.pattern_css, "background-image") # Already covered in test_breadcrumb # def test_ARC_2586_NewsArchive_breadcrumbs(self): # self.driver.get(self.env) # if self.env_name == "france": # self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') # self.basePage.press_button(self.newsRoomPage.link_btn_css) # else: # self.newsPage.launch_newsArchive(self.env_name) # breadcrumb_items, breadcrumb_first_arrow_element, breadcrumb_second_arrow_element= self.newsPage.breadcrumb_ele() # assert len(breadcrumb_items) == 3 # assert ">" in breadcrumb_first_arrow_element # assert ">" in breadcrumb_second_arrow_element # assert "#656565" == self.basePage.get_css_color(self.newsPage.news_archive_last_child_breadcrumb_color_css, "color") @pytest.mark.malaysia def test_ARC_2586_NewsArchive_search_bar_button(self): """ Checks search bar and button in news archive page. """ self.driver.get(self.env) if self.env_name == "france": self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') self.basePage.press_button(self.newsRoomPage.link_btn_css) else: self.newsPage.launch_newsArchive(self.env_name) search_bar, search_btn = self.newsPage.search_bar_and_btn() assert search_bar == True assert search_btn == True @pytest.mark.malaysia def test_ARC_2586_NewsArchive_default_view(self): """ Checks default view - grid view and list.svg icon in news archive page. """ self.driver.get(self.env) if self.env_name == "france": self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') self.basePage.press_button(self.newsRoomPage.link_btn_css) else: self.newsPage.launch_newsArchive(self.env_name) assert "arctic_grid_view" in self.basePage.get_elemet_attribute(self.newsPage.news_page_view_css, "class") assert "list.svg" in self.basePage.get_css_property(self.newsPage.view_toggle_icon, "background") # This is disabled in all sites now # def test_ARC_2586_NewsArchive_calendar_from_to(self): # if self.env_name == "france": # self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') # self.basePage.press_button(self.newsRoomPage.link_btn_css) # else: # self.newsPage.launch_newsArchive(self.env_name) # # assert len(self.basePage.get_element_text(self.newsPage.from_date_xpath)) > 0 # assert len(self.basePage.get_element_text(self.newsPage.to_date_xpath)) > 0 @pytest.mark.malaysia def test_ARC_2586_NewsArchive_all_topics_grey_color(self): """ Checks All topics is greyed out by default in news archive page. """ self.driver.get(self.env) if self.env_name == "france": self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') self.basePage.press_button(self.newsRoomPage.link_btn_css) else: self.newsPage.launch_newsArchive(self.env_name) all_topics_text_status, hex_code= self.newsPage.all_topics_grey_color() assert all_topics_text_status == True assert "#f1f1f1" in hex_code @pytest.mark.malaysia def test_ARC_2586_NewsArchive_pagination_num(self): """ Checks pagination numbers are correctly displayed. """ self.driver.get(self.env) if self.env_name == "france": self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') self.basePage.press_button(self.newsRoomPage.link_btn_css) else: self.newsPage.launch_newsArchive(self.env_name) current_page_num , page_num = self.newsPage.pagination_number() assert current_page_num == page_num @pytest.mark.malaysia def test_ARC_2586_NewsArchive_right_hand_rail(self): """ Checks if there is no right hand rail. """ self.driver.get(self.env) if self.env_name == "france": self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') self.basePage.press_button(self.newsRoomPage.link_btn_css) else: self.newsPage.launch_newsArchive(self.env_name) assert len(self.driver.find_elements(*self.newsPage.right_hand_rail_xpath)) == 0 # Already covered in test_breadcrumb # def test_ARC_2586_NewsArchive_first_second_level_breadcrumbs(self): # self.driver.get(self.env) # if self.env_name == "france": # self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') # self.basePage.press_button(self.newsRoomPage.link_btn_css) # else: # self.newsPage.launch_newsArchive(self.env_name) # breadcumb_anchor_url_list, breadcumb_anchor_current_url_list = self.newsPage.check_all_breadcrumb_url() # for breadcumb_anchor_url, breadcumb_anchor_current_url in zip(breadcumb_anchor_url_list, breadcumb_anchor_current_url_list): # assert breadcumb_anchor_url in breadcumb_anchor_current_url @pytest.mark.malaysia def test_ARC_2586_NewsArchive_pagination(self): """ Checks pagination is working as expected or not. """ self.driver.get(self.env) if self.env_name == "france": self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') self.basePage.press_button(self.newsRoomPage.link_btn_css) else: self.newsPage.launch_newsArchive(self.env_name) if len(self.driver.find_elements(self.newsPage.pagination_heading_xpath[0], self.newsPage.pagination_heading_xpath[1])) > 0: num_content_page,pagination_heading, page_content_list = self.newsPage.pagination_validation() if num_content_page == 12 and pagination_heading > 0 : page_count_len = len(page_content_list) while page_count_len > 0: assert len(page_content_list) > 0 page_count_len = page_count_len - 1 @pytest.mark.malaysia def test_ARC_2586_NewsArchive_menuTabs_viewEmpty(self): """ Checks menu tabs are getting greyed out or not and if under any menu tab, no content is there, It should display No Result Found. """ empty_list = [] self.driver.get(self.env) if self.env_name == "france": self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') self.basePage.press_button(self.newsRoomPage.link_btn_css) else: self.newsPage.launch_newsArchive(self.env_name) hex_color_list, empty_list = self.newsPage.grey_menu_verify() hex_color_list_len = len(hex_color_list) while hex_color_list_len > 0: assert "#f1f1f1" in hex_color_list[hex_color_list_len-1] hex_color_list_len = hex_color_list_len - 1 # def test_ARC_2586_NewsArchive_viewEmpty(self): # self.driver.get(self.env) if self.env_name == "france": self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') self.basePage.press_button(self.newsRoomPage.link_btn_css) else: self.newsPage.launch_newsArchive(self.env_name) empty_list_len = len(empty_list) if empty_list_len > 0: if "view-empty" in empty_list[empty_list_len-1]: assert "view-empty" in empty_list[empty_list_len-1] empty_list_len = empty_list_len - 1 @pytest.mark.malaysia def test_ARC_2586_NewsArchive_listview(self): """ Checks the following :- 1. list.svg icon. 2. list view button. 3. Default view. """ self.driver.get(self.env) if self.env_name == "france": self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') self.basePage.press_button(self.newsRoomPage.link_btn_css) else: self.newsPage.launch_newsArchive(self.env_name) list_icon,list_view_btn,grid_view = self.newsPage.list_view() assert "list.svg" in list_icon assert list_view_btn == True assert "arctic_grid_view" in grid_view @pytest.mark.malaysia def test_ARC_2586_NewsArchive_gridView(self): """ Checks the following :- 1. grid.svg icon. 2. grid view button. 3. view is in list view. """ self.driver.get(self.env) if self.env_name == "france": self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') self.basePage.press_button(self.newsRoomPage.link_btn_css) else: self.newsPage.launch_newsArchive(self.env_name) grid_icon,grid_view_btn,list_view = self.newsPage.grid_view() assert "grid.svg" in grid_icon assert grid_view_btn == True assert "arctic_list_view" in list_view @pytest.mark.malaysia def test_ARC_2586_NewsArchive_randomText_search_url(self): """ Checks the searched text is coming in the url. """ self.driver.get(self.env) if self.env_name == "france": self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') self.basePage.press_button(self.newsRoomPage.link_btn_css) else: self.newsPage.launch_newsArchive(self.env_name) searched_keyword = self.newsPage.verify_search_result() current_url = self.driver.current_url assert searched_keyword in current_url @pytest.mark.malaysia def test_ARC_2586_NewsArchive_randomText_search_keyword(self): """ Checks searched keyword is coming in the text box while changing the tabs. """ self.driver.get(self.env) if self.env_name == "france": self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') self.basePage.press_button(self.newsRoomPage.link_btn_css) else: self.newsPage.launch_newsArchive(self.env_name) searched_keyword_list = self.newsPage.verify_searchText_with_Tabs() for search_keyword in searched_keyword_list: assert search_keyword == self.newsPage.test_data @pytest.mark.malaysia def test_ARC_2586_NewsArchive_randomText_greyMenu_verify(self): """ After searching some keyword, It checks tabs are getting greyed out or not. """ self.driver.get(self.env) if self.env_name == "france": self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') self.basePage.press_button(self.newsRoomPage.link_btn_css) else: self.newsPage.launch_newsArchive(self.env_name) hex_color_list = self.newsPage.grey_menu_verify_with_Tabs() for hex_color in hex_color_list: assert "#f1f1f1" == hex_color @pytest.mark.malaysia def test_ARC_2586_NewsArchive_randomText_menuTab_url(self): """ Checks menu tab name is coming in the url. """ self.driver.get(self.env) if self.env_name == "france": self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') self.basePage.press_button(self.newsRoomPage.link_btn_css) else: self.newsPage.launch_newsArchive(self.env_name) tab_href_list, selected_tab_url_list = self.newsPage.verify_menuTab_url() for tab_href, selected_tab_url in zip(tab_href_list, selected_tab_url_list): assert tab_href in selected_tab_url @pytest.mark.malaysia def test_ARC_2586_NewsArchive_randomText_searchKeyword_url(self): """ Checks searched keyword is coming in the url while changing tabs. """ self.driver.get(self.env) if self.env_name == "france": self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') self.basePage.press_button(self.newsRoomPage.link_btn_css) else: self.newsPage.launch_newsArchive(self.env_name) searched_keyword,selected_tab_url_list = self.newsPage.verify_searchText_with_url() for url in selected_tab_url_list: assert searched_keyword in url @pytest.mark.malaysia def test_ARC_2586_NewsArchive_pagination_front_arrow_back_arrow(self): """ Checks front and back pagination arrow is working. """ self.driver.get(self.env) if self.env_name == "france": self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') self.basePage.press_button(self.newsRoomPage.link_btn_css) else: self.newsPage.launch_newsArchive(self.env_name) if len(self.driver.find_elements(self.newsPage.pagination_heading_xpath[0], self.newsPage.pagination_heading_xpath[1])) > 0: num_content_page,pagination_heading,page_one_contents, page_zero_contents = self.newsPage.pagination_front_arrow_back_arrow_validation() if num_content_page == 12 and pagination_heading > 0 : assert len(page_one_contents) > 0 assert len(page_zero_contents) > 0 # @pytest.mark.malaysia # def test_ARC_2586_NewsArchive_key_release_language_tab(self): # """ # Checks language dropdown is coming under key release dropdown. # """ # self.driver.get(self.env) # if self.env_name == "france": # self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') # self.basePage.press_button(self.newsRoomPage.link_btn_css) # else: # self.newsPage.launch_newsArchive(self.env_name) # if self.env_name == "global": # language_tab_txt = self.newsPage.key_releases(self.env_name) # assert "Language" in language_tab_txt @pytest.mark.malaysia def test_ARC_2586_NewsArchive_media_release_language_tab(self): """ Checks language dropdown is coming under media release dropdown and dates are coming in descending order. """ self.driver.get(self.env) if self.env_name == "france": self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') self.basePage.press_button(self.newsRoomPage.link_btn_css) else: self.newsPage.launch_newsArchive(self.env_name) desc_sort = self.newsPage.media_release(self.env_name) assert desc_sort == True # if self.env_name == "global": # assert "Language" in language_tab_txt @pytest.mark.malaysia def test_ARC_2586_NewsArchive_randomText_verification(self): """ Checks after searching a keyword, the keyword is coming in the search box or not and all topics is getting greyed out or not. """ self.driver.get(self.env) if self.env_name == "france": self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') self.basePage.press_button(self.newsRoomPage.link_btn_css) else: self.newsPage.launch_newsArchive(self.env_name) searched_keyword = self.newsPage.verify_search_result() assert searched_keyword == self.newsPage.test_data all_topics_text_status, hex_code = self.newsPage.all_topics_grey_color() assert all_topics_text_status == True assert "#f1f1f1" in hex_code @pytest.mark.malaysia def test_ARC_2586_NewsArchive_content_validation(self): """ Checks if content is there in newsw archive page. """ self.driver.get(self.env) if self.env_name == "france": self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') self.basePage.press_button(self.newsRoomPage.link_btn_css) else: self.newsPage.launch_newsArchive(self.env_name) contents = self.basePage.get_elements(self.newsPage.content_pages_css) assert len(contents) > 0 @pytest.mark.malaysia def test_ARC_2586_NewsArchive_search_results(self): """ Checks searched input is coming in the searched result or not. """ self.driver.get(self.env) if self.env_name == "france": self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') self.basePage.press_button(self.newsRoomPage.link_btn_css) else: self.newsPage.launch_newsArchive(self.env_name) content_title = self.newsPage.search_results() for title in content_title: if self.newsPage.test_data_novartis in title: assert self.newsPage.test_data_novartis in title break @pytest.mark.malaysia def test_ARC_2586_NewsArchive_verify_filters(self): """ Checks Filters are working or not. """ media_release_list = ['key release', 'media release'] featured_news_list = ['pulse update', 'statement','featured news'] self.driver.get(self.env) if self.env_name == "france": self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') self.basePage.press_button(self.newsRoomPage.link_btn_css) else: self.newsPage.launch_newsArchive(self.env_name) menu_tab_list , tab_label_list = self.newsPage.verify_filters() for filter_name,tab_label in zip(menu_tab_list , tab_label_list): if filter_name == "media releases" and self.env_name == 'global': for tab in tab_label: assert tab in media_release_list elif filter_name == "key releases" and self.env_name == 'global': for tab in tab_label: assert tab in media_release_list elif filter_name == "featured news" and self.env_name == 'global': for tab in tab_label: assert tab in featured_news_list elif filter_name == "statements" and self.env_name == 'global': for tab in tab_label: assert tab in featured_news_list elif filter_name == "pulse updates" and self.env_name == 'global': for tab in tab_label: assert tab in featured_news_list elif filter_name == "statement" and self.env_name == 'biome': for tab in tab_label: assert tab in featured_news_list elif filter_name == "statements" and self.env_name == 'usa': for tab in tab_label: assert tab in featured_news_list elif filter_name == "statement" and self.env_name == 'foundation': for tab in tab_label: assert tab in featured_news_list else : for label in tab_label : assert label in filter_name @pytest.mark.malaysia def test_ARC_2586_NewsArchive_banner_title(self): """ Checks if banner title is displaying. """ self.driver.get(self.env) if self.env_name == "france": self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') self.basePage.press_button(self.newsRoomPage.link_btn_css) else: self.newsPage.launch_newsArchive(self.env_name) if self.env_name == "global": assert self.basePage.is_displayed(self.newsPage.banner_text_css) == True @pytest.mark.malaysia def test_ARC_2586_NewsArchive_key_release_text(self): """ Checks key release text is there above search bar. """ self.driver.get(self.env) if self.env_name == "france": self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') self.basePage.press_button(self.newsRoomPage.link_btn_css) else: self.newsPage.launch_newsArchive(self.env_name) if self.env_name == "global": assert self.basePage.is_displayed(self.newsPage.key_releases_text_xpath) == True assert "Key Releases" in self.basePage.get_element_text(self.newsPage.key_releases_text_xpath) @pytest.mark.malaysia def test_ARC_2586_NewsArchive_menu_ellipse(self): """ Checks ellipse is displayed. """ self.driver.get(self.env) if self.env_name == "france": self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') self.basePage.press_button(self.newsRoomPage.link_btn_css) else: self.newsPage.launch_newsArchive(self.env_name) if self.env_name == "global": visible_element = self.driver.find_elements(*self.newsPage.visible_element_xpath) if len(visible_element) == 7 : assert self.basePage.is_displayed(self.newsPage.ellipses_xpath) == True @pytest.mark.malaysia def test_ARC_2586_NewsArchive_ellipses_validation(self): """ Checks any element is there inside ellipse. """ self.driver.get(self.env) if self.env_name == "france": self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') self.basePage.press_button(self.newsRoomPage.link_btn_css) else: self.newsPage.launch_newsArchive(self.env_name) if self.env_name == "global": if self.basePage.is_displayed(self.newsPage.ellipses_xpath): self.basePage.press_button(self.newsPage.ellipses_xpath) elements = self.driver.find_elements(*self.newsPage.non_visible_ellipses_xpath) assert len(elements) > 0 @pytest.mark.malaysia def test_ARC_2586_NewsArchive_ellipses_element_greyed_out(self): """ Checks while clicking any element in ellipse, that element should greyed out. """ self.driver.get(self.env) if self.env_name == "france": self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') self.basePage.press_button(self.newsRoomPage.link_btn_css) else: self.newsPage.launch_newsArchive(self.env_name) if self.env_name == "global": if self.basePage.is_displayed(self.newsPage.ellipses_xpath): self.basePage.press_button(self.newsPage.ellipses_xpath) non_visible_element_text = self.basePage.get_element_text(self.newsPage.non_visible_ellipses_xpath) self.basePage.press_button(self.newsPage.non_visible_ellipses_xpath) length = len(self.driver.find_elements(*self.newsPage.visible_element_xpath)) element = self.driver.find_element(By.CSS_SELECTOR,f'ul#block-newsarchivenavigation>li:nth-child({length}) > a') value_of_css = element.value_of_css_property('background-color') hex = Color.from_string(value_of_css).hex assert '#f1f1f1' == hex assert non_visible_element_text == element.text def test_ARC_2586_NewsArchive_grey_bg_only_one_tab(self): """ Checks only tab is getting greyed out at a time. """ self.driver.get(self.env) if self.env_name == "france": self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news') self.basePage.press_button(self.newsRoomPage.link_btn_css) else: self.newsPage.launch_newsArchive(self.env_name) first_tab_color, second_tab_color = self.newsPage.menu_tab_one_grey_bg() assert "#000000" == first_tab_color assert "#f1f1f1" == second_tab_color
Shreyasi2205/MyPOMProject
tests/test_NewsArchive.py
test_NewsArchive.py
py
27,544
python
en
code
0
github-code
6
75174435386
from tkinter import * import time import threading from tkinter.ttk import * def start_download(): GB = 100 download = 0 speed = 1 while download < GB: time.sleep(0.05) download += speed percent.set(str(int(download/GB*100)) + "%") text.set(str(download) + "/" + str(GB) + " GB completed") bar['value'] = (download/GB)*100 window.update_idletasks() def download(): t = threading.Thread(target=start_download) t.start() window = Tk() percent = StringVar() text = StringVar() bar = Progressbar(window, orient=HORIZONTAL, length=300) bar.pack(pady=10) percentLabel = Label(window, textvariable=percent).pack() taskLabel = Label(window, textvariable=text).pack() button = Button(window, text="Download", command=download).pack() window.mainloop()
cakel/python_tutorial_tkinter_brocode
19_tk_progressbar5.py
19_tk_progressbar5.py
py
823
python
en
code
0
github-code
6
17463247487
#! /usr/bin/python # import sys import os import time if __name__ == '__main__': numsOfProducers = [ 5] numsOfConsumers = [ 5] Ns = [5, 50, 100] for prNum, conNum in zip(numsOfProducers, numsOfConsumers): for N in Ns: print("producers: ", prNum, " consumers: ", conNum, " N: ", N) if os.fork() == 0: os.execl("./wrapper","wrapper" ,str(prNum), str(conNum), str(N) ) else: os.wait() # verifySolution(prNum) with open("consumerFile.txt", "r") as conF: consumerLines = conF.readlines() for conLine in consumerLines: print(conLine, end = "") time.sleep(0.5)
youngdashu/systemy-operacyjne-2020-2021
lab5/NowakAdam/cw05/zad3/runTests.py
runTests.py
py
770
python
en
code
0
github-code
6
40851164655
from django.db import models from django.contrib.auth.models import AbstractUser # Create your models here. from django.dispatch import receiver from rest_framework.authtoken.models import Token from django.db.models.signals import post_save from django.conf import settings class User(AbstractUser): is_teacher = models.BooleanField(default = False) is_student = models.BooleanField(default = False) def __Str__(self): return self.username #class Teacher(models.Model): # user = models.OneToOneField(User, on_delete = models.CASCADE, primary_key = True) # #class Student(modesl.Model): # user = models.OneToOneField(User, on_delete = models.CASCADE, primary_key = True) class Doubt(models.Model): questions = models.TextField(max_length = 200) picture = models.ImageField(blank = True, null = True, upload_to='images') student = models.ForeignKey(User, related_name = 'student', on_delete = models.CASCADE) teacher = models.ForeignKey(User, related_name = 'teacher', on_delete = models.CASCADE) @receiver(post_save, sender=settings.AUTH_USER_MODEL) def create_auth_token(sender, instance=None, created=False, **kwargs): if created: Token.objects.create(user=instance)
ali7070droid/doubts-app
myapp/models.py
models.py
py
1,197
python
en
code
0
github-code
6
74280991547
hx711_reference_unit = 226 load_threshold = 1000 # g loop_interval = 0.1 camera_device = "/dev/video0" photo_format = "width=1920,height=1080" photo_count = 4 reset_timeout = 300 # sec tmpdir = "/tmp" debug = True # remote bot_token="BOT_TOKEN" channel_id=CHANNEL_ID
mojyack/rpi-cat-monitor
config-example.py
config-example.py
py
268
python
en
code
0
github-code
6
8916098020
# Faça um programa que lê um número inteiro n. E verifique se n é um número par, se não for pedir para inserir outro número até que seja par. Use while. #usando um laço infinito para o programa continuar executando while True: n = int(input("Digite um número: ")) #para While com true, usar o print dentro do laço if n % 2 != 0: #Se o numero digitado seja impar o programa continua executando (True) print("Digite um numero par") else: #se o número for par entra o break e o programa para a execução break
lucasnasc46/curso-python22
Lista 4 de py/questao5.py
questao5.py
py
608
python
pt
code
0
github-code
6
31355521081
import numpy as np # some useful constants: edges = (N,S,E,W) = list(range(4)) def lonlat2cart( lons, lats, rad=1.): """Convert longitude/latitude to cartesian coordinates. Args: lons (numpy 2-d array): longitude ("x") values (decimal degrees). lats (numpy 2-d array): latitude ("y") values (decimal degrees). rad (float): nominal sphere radius (default=1. returns cartesian coordinates on the unit sphere). Returns: cart (numpy 3-d array): cartesian coordinates on the sphere. Rows and columns are arranged according to the input lons/lats arrays, while along the third axis, the 0, 1, and 2 indices correspond to the x, y and z components, respectively. Raises: ValueError: If input lons and lats matrix dimensions are not equal. """ if lons.ndim!=2 or lats.ndim!=2 or lons.shape!=lats.shape: raise ValueError('lons and lats must be two-dimensional matrices of equal size.') dims = list(lons.shape) dims.append(3) cart = np.zeros(dims) cart[:,:,0] = np.cos(np.radians(lons))*np.cos(np.radians(lats)) # x cart[:,:,1] = np.sin(np.radians(lons))*np.cos(np.radians(lats)) # y cart[:,:,2] = np.sin(np.radians(lats)) # z return cart def nearest(lon,lat,lons,lats,geod): """Determine indices of, and distance to, nearest lon/lat point. Args: lon (float): Longitude of search origin lat (float): Latitude of search origin lons (numpy 2-d array): Matrix of longitude values used in the nearest neighbour search. lats (numpy 2-d array): Matrix of latitude values used in the nearest neighbour search. geod (pyproj.Geod object): Geod to be used as basis for distance calculations. Returns: i (int): Zeros-based row index of nearest neighbour. j (int): Zeros-based colum index of nearest neighbour. dist (float): Great circle distance from (lon,lat) to (lons(i,j), lats(i,j)) Raises: ValueError: If input lons and lats matrix dimensions are not equal. """ if lons.ndim!=2 or lats.ndim!=2 or lons.shape!=lats.shape: raise ValueError('lons and lats must be two-dimensional matrices of equal size.') i,j,dist = -1,-1,1.e10 it = np.nditer(lons,flags=['multi_index']) while not it.finished: (fwd_az,back_az,distance) = geod.inv( lon,lat, lons[it.multi_index[0],it.multi_index[1]], lats[it.multi_index[0],it.multi_index[1]]) if distance<dist: i,j,dist = it.multi_index[0],it.multi_index[1],distance it.iternext() return i,j,dist def squad_uarea( cart): """Compute quadrilateral surface areas for cartesian array of corner points on the unit sphere. Args: cart (numpy array): 3-d array (m x n x 3) of cartesian x,y,z corner points on the unit sphere (for every (i,j), (x,y,z) = (i,j,0), (i,j,1), (i,j,2)). Returns: areas (numpy array): 2-d array (n-1 x m-1) of cell areas. Note: One of many possible approaches, the algorithm implemented here is based on Girard's spherical excess formula, with direct calculation of angles using the spherical law of cosines. Note that, due to numerical round-off, its results can be inaccurate for small angles/areas (edge lengths less than roughly 5km on the scaled sphere). In such cases, pquad_uarea is recommended. """ # only because some geometries are poorly conditioned: import warnings warnings.filterwarnings("ignore") area = np.zeros((np.size(cart,0)-1,np.size(cart,1)-1)) for x_idx in np.arange(np.size(cart,0)-1): for y_idx in np.arange(np.size(cart,1)-1): # quadrilateral corners in counterclockwise direction: ptA = cart[x_idx ,y_idx ,:] ptB = cart[x_idx+1,y_idx ,:] ptC = cart[x_idx+1,y_idx+1,:] ptD = cart[x_idx ,y_idx+1,:] # interior angles of first subtriangle: at,bt,ct = ptA, ptB, ptC ca,cb,cc = np.dot(bt,ct), np.dot(at,ct), np.dot(at,bt) sa,sb,sc = np.sin(np.arccos(ca)), np.sin(np.arccos(cb)), np.sin(np.arccos(cc)) A1 = np.arccos((ca-cb*cc)/(sb*sc)) B1 = np.arccos((cb-ca*cc)/(sa*sc)) C1 = np.arccos((cc-ca*cb)/(sa*sb)) # interior angles of second subtriangle: at,bt,ct = ptA, ptC, ptD ca,cb,cc = np.dot(bt,ct), np.dot(at,ct), np.dot(at,bt) sa,sb,sc = np.sin(np.arccos(ca)), np.sin(np.arccos(cb)), np.sin(np.arccos(cc)) A2 = np.arccos((ca-cb*cc)/(sb*sc)) B2 = np.arccos((cb-ca*cc)/(sa*sc)) C2 = np.arccos((cc-ca*cb)/(sa*sb)) # area: area[x_idx,y_idx] = A1+B1+C1 + A2+B2+C2 - 2*np.pi return area def pquad_uarea( cart): """ Compute planar quadrilateral (faceted) areas for cartesian array of corner points on the unit sphere. Args: cart (numpy array): 3-d array (m x n x 3) of cartesian x,y,z corner points on the unit sphere (for every (i,j), (x,y,z) = (i,j,0), (i,j,1), (i,j,2)). Returns: areas (numpy array): 2-d array (n-1 x m-1) of cell areas. Note: The algorithm implemented here computes areas using edge vector cross products and is recommented for small angles/areas (edge lengths less than roughly 5km on the scaled sphere). If areas are larger, squad_uarea is recommended. """ area = np.zeros((np.size(cart,0)-1,np.size(cart,1)-1)) for x_idx in np.arange(np.size(cart,0)-1): for y_idx in np.arange(np.size(cart,1)-1): # quadrilateral corners in counterclockwise direction: ptA = cart[x_idx ,y_idx ,:] ptB = cart[x_idx+1,y_idx ,:] ptC = cart[x_idx+1,y_idx+1,:] ptD = cart[x_idx ,y_idx+1,:] # edge vectors: ab = ptB-ptA ac = ptC-ptA ad = ptD-ptA area[x_idx,y_idx] = 0.5 * np.linalg.norm(np.cross(ab,ac)+np.cross(ac,ad)) return area
nasa/simplegrid
simplegrid/util.py
util.py
py
6,298
python
en
code
5
github-code
6
73652316669
# 给你 n 个非负整数 a1,a2,...,an,每个数代表坐标中的一个点 (i, ai) 。 # 在坐标内画 n 条垂直线,垂直线 i 的两个端点分别为 (i, ai) 和 (i, 0) 。 # 找出其中的两条线,使得它们与 x 轴共同构成的容器可以容纳最多的水。 class Solution(object): def maxArea(self, height): """ :type height: List[int] :rtype: int """ l = 0 r = len(height) - 1 maxarea = 0 while (l < r): maxarea = max(maxarea, (r - l) * min(height[l], height[r])) if height[r] < height[l]: r = r - 1 else: l = l + 1 return maxarea height = [1,8,6,2,5,4,8,3,7] a = Solution() print(a.maxArea(height))
xxxxlc/leetcode
Dynamicprogramming/maxArea.py
maxArea.py
py
815
python
zh
code
0
github-code
6
37568035732
# import statements import nltk import sys import pandas as pd import re from nltk import pos_tag from nltk.corpus import stopwords from nltk.stem.porter import PorterStemmer from nltk.stem.wordnet import WordNetLemmatizer from nltk.tokenize import word_tokenize, sent_tokenize from sqlalchemy import create_engine # download necessary NLTK data nltk.download(['punkt', 'wordnet']) nltk.download('stopwords') def load_data(messages_filepath, categories_filepath): """ Load the data containing the messages and the categories, and merge on the common column 'id' Returns a single pandas Dataframe. Keyword arguments: messages_filepath -- filepath (including file name) of the file containing the messages categories_filepath -- filepath (including file name) of the file containing the message categories """ messages = pd.read_csv(messages_filepath, encoding='UTF-8') categories = pd.read_csv(categories_filepath, encoding='UTF-8') return messages.merge(categories, on=['id']) def clean_data(df): """ Parse the single 'categories' column into the 36 distinct message category columns, name the resulting columns, and clean the values, removing the category name from the cells and leaving only the numeric categorical value. Remove true duplicate rows. Returns a cleaned Dataframe. Keyword argument: df -- Dataframe requiring cleaning. """ categories_new = df['categories'].str.split(pat=';', expand=True) row = categories_new.iloc[0,:] category_colnames = list(row.apply(lambda x: x[:-2])) categories_new.columns = category_colnames for column in categories_new: # set each value to be the last character of the string categories_new[column] = categories_new[column].str.slice(-1) # convert column from string to numeric categories_new[column] = categories_new[column].astype(int) df.drop(columns=['categories'], inplace=True) df = pd.concat([df, categories_new], axis=1) # drop duplicates df.drop_duplicates(inplace=True) return df def save_data(df, database_filename): """ Save cleaned Dataframe to a SQL Database table. Keyword arguments: df -- Cleaned Dataframe for export database_filename -- name of the database in which table will be saved """ engine = create_engine('sqlite:///' + database_filename) df.to_sql('messages_and_categories', engine, index=False) def main(): """ Executes following functions: 1) load_data(messages_filepath, categories_filepath) 2) clean_data(df) 3) save_data(df, database_filename) """ if len(sys.argv) == 4: messages_filepath, categories_filepath, database_filepath = sys.argv[1:] print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}' .format(messages_filepath, categories_filepath)) df = load_data(messages_filepath, categories_filepath) print('Cleaning data...') df = clean_data(df) print('Saving data...\n DATABASE: {}'.format(database_filepath)) save_data(df, database_filepath) print('Cleaned data saved to database!') else: print('Please provide the filepaths of the messages and categories '\ 'datasets as the first and second argument respectively, as '\ 'well as the filepath of the database to save the cleaned data '\ 'to as the third argument. \n\nExample: python process_data.py '\ 'disaster_messages.csv disaster_categories.csv '\ 'DisasterResponse.db') if __name__ == '__main__': main()
goitom/project_2_disaster_response
data/process_data.py
process_data.py
py
3,643
python
en
code
0
github-code
6
16179638713
import os import torch import wandb import argparse import numpy as np import pandas as pd from sklearn.model_selection import StratifiedKFold from sklearn.experimental import enable_iterative_imputer from sklearn.impute import IterativeImputer from sklearn.preprocessing import StandardScaler from sklearn.metrics import confusion_matrix, roc_auc_score, average_precision_score, balanced_accuracy_score os.environ["CUDA_DEVICE_ORDER"] = 'PCI_BUS_ID' os.environ["CUDA_VISIBLE_DEVICES"] = '0' def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( '--layer_1', help='layer 1 size', type=int, default=50, required=True) parser.add_argument( '--layer_2', help='layer 2 size', type=int, default=50, required=True) parser.add_argument( '--layer_3', help='layer 3 size', type=int, default=50, required=True) parser.add_argument( '--lr', help='Learning Rate', type=float, default=0.01, required=True) parser.add_argument( '--weight_decay', help='Weight Decay', type=float, default=0.9, required=True) parser.add_argument( '--momentum', help='Weight Decay', type=float, default=0.9, required=True) parser.add_argument( '--no_epoch', help='Number of Epochs', type=int, default=1000, required=True) args = parser.parse_args() return args class Model(torch.nn.Module): def __init__(self, layer_1, layer_2, layer_3): super(Model, self).__init__() self.lin1 = torch.nn.Linear(12, layer_1) self.lin2 = torch.nn.Linear(layer_1, layer_2) self.lin3 = torch.nn.Linear(layer_2, layer_3) self.lin4 = torch.nn.Linear(layer_3, 1) self.selu = torch.nn.SELU() def forward(self, x): x = self.selu(self.lin1(x)) x = self.selu(self.lin2(x)) x = self.selu(self.lin3(x)) x = self.lin4(x) return x def load_data(): cd = os.getcwd() x_eicu = pd.read_csv(cd+'../data/x_eicu.csv') y_eicu = pd.read_csv(cd+'../data/y_eicu.csv') mimic = pd.read_csv(cd+'../data/mimic.csv') assert np.all(x_eicu['patientunitstayid'].to_numpy() == y_eicu['patientunitstayid'].to_numpy()) feature_list = ['lactate', 'oobventday1', 'eyes', 'motor', 'verbal', 'albumin_x', 'age', 'creatinine_x', 'BUN', 'PT - INR', 'WBC x 1000', 'meanbp'] feature_list_mimic = ['Lactate', 'firstdayvent', 'gcseyes', 'gcsmotor', 'gcsverbal', 'Albumin', 'Age', 'Creatinine', 'BUN', 'INR', 'WBC', 'MAP'] x_eicu = x_eicu[feature_list].to_numpy() y_eicu = y_eicu['actualicumortality'].to_numpy() x_mimic = mimic[feature_list_mimic].to_numpy() y_mimic = mimic['Mortality'].to_numpy() x = np.vstack((x_eicu, x_mimic)) y = np.hstack((y_eicu, y_mimic)) return x, y def main(): wandb.init(project='mortality-tool-newfeats') args = parse_args() x, y = load_data() kfold = StratifiedKFold(n_splits=10) logits_all = [] labels_all = [] counter = 1 for train_index, test_index in kfold.split(x, y): x_train, y_train = x[train_index], y[train_index] x_test, y_test = x[test_index], y[test_index] imputer = IterativeImputer() scaler = StandardScaler() x_train = scaler.fit_transform(imputer.fit_transform(x_train)) x_test = scaler.transform(imputer.transform(x_test)) x_train, y_train = torch.from_numpy(x_train).float().to('cuda:0'), torch.from_numpy(y_train).float().to('cuda:0') model = Model(args.layer_1, args.layer_2, args.layer_3) criterion = torch.nn.BCEWithLogitsLoss(pos_weight=torch.tensor([14.80], device='cuda:0')) optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum, nesterov=True) model.train() model.to('cuda:0') for epoch in range(args.no_epoch): optimizer.zero_grad() outputs = model.forward(x_train) loss = criterion(outputs, y_train.view(-1, 1)) loss.backward() optimizer.step() model.eval() outputs = model.forward(torch.from_numpy(x_test).float().to('cuda:0')) logits = torch.sigmoid(outputs).detach().cpu().numpy() logits_all.append(logits.reshape(-1)) labels_all.append(y_test) print('Iter {}/10 done'.format(counter)) counter += 1 logits_all = np.hstack(logits_all) labels_all = np.hstack(labels_all) tn, fp, fn, tp = confusion_matrix(labels_all, np.round(logits_all)).ravel() accuracy = (tp + tn) / (tp + tn + fp + fn) precision = tp / (tp + fp) sensitivity = tp / (tp + fn) specificity = tn / (tn + fp) roc_auc = roc_auc_score(labels_all, logits_all) prc_auc = average_precision_score(labels_all, logits_all) balanced_acc = balanced_accuracy_score(labels_all, np.round(logits_all)) pos_likelihood_ratio = sensitivity / (1 - specificity) neg_likelihood_ratio = (1 - sensitivity) / specificity class_names = ['ALIVE', 'EXPIRED'] wandb.log({'accuracy': accuracy, 'precision': precision, 'sensitivity': sensitivity, 'specificitiy': specificity, 'roc_auc': roc_auc, 'prc_auc': prc_auc, 'balanced_accuracy': balanced_acc, 'neg_likelihood_ratio': neg_likelihood_ratio, 'pos_likelihood_ratio': pos_likelihood_ratio}) if __name__ == '__main__': main()
jrepifano/mortality-tool
wandb_training/d_train_wandb.py
d_train_wandb.py
py
5,410
python
en
code
0
github-code
6
33670502801
import sqlite3 import sys from PyQt6.QtWidgets import QApplication, QLabel, QWidget, QGridLayout, \ QLineEdit, QPushButton, QMainWindow, QTableWidget, QTableWidgetItem, QDialog, \ QVBoxLayout, QComboBox, QToolBar, QStatusBar, QMessageBox from PyQt6.QtGui import QAction, QIcon from PyQt6.QtCore import Qt # Database connection class class DatabaseConnection: def __init__(self, database_file="database.db"): self.database_file = database_file def connect(self): # Establish connection to database and create cursor connection = sqlite3.connect(self.database_file) cursor = connection.cursor() # return connection and cursor, destructure variables in creation of instances return connection, cursor def close_connection(self, connection, cursor): # Commit changes to db and close connections, refresh app table return connection.commit(), cursor.close(), connection.close(), student_management_sys.load_data() # App Main Window class class MainWindow(QMainWindow): def __init__(self): super().__init__() self.setWindowTitle("Student Management System") self.setMinimumSize(600, 400) # Menu items file_menu_item = self.menuBar().addMenu("&File") utility_menu_item = self.menuBar().addMenu("&Utility") help_menu_item = self.menuBar().addMenu("&Help") # Add student menu item and action with toolbar icon binding to action add_student_action = QAction(QIcon("icons/add.png"), "Add Student", self) add_student_action.triggered.connect(self.insert) file_menu_item.addAction(add_student_action) # About menu item and action about_action = QAction("About", self) help_menu_item.addAction(about_action) about_action.triggered.connect(self.about) # SEARCH item and action with toolbar icon binding to action search_action = QAction(QIcon("icons/search.png"), "Search", self) search_action.triggered.connect(self.search) utility_menu_item.addAction(search_action) # Toolbar widget and elements, toolbar is also movable toolbar = QToolBar() toolbar.setMovable(True) self.addToolBar(toolbar) toolbar.addAction(add_student_action) toolbar.addAction(search_action) # Statusbar widget and elements self.statusbar = QStatusBar() self.setStatusBar(self.statusbar) # QTableWidget attributes self.table = QTableWidget() self.table.setColumnCount(4) self.table.setHorizontalHeaderLabels(("Id", "Name", "Course", "Mobile")) # To hide default vertical numbers not associated with SQL database self.table.verticalHeader().setVisible(False) # Detect if cell is clicked self.table.cellClicked.connect(self.cell_clicked) # Set a center layout widget to QTableWidget instance self.setCentralWidget(self.table) # Cell clicked method def cell_clicked(self): # Edit button edit_button = QPushButton("Edit Record") edit_button.clicked.connect(self.edit) # Delete button delete_button = QPushButton("Delete Record") delete_button.clicked.connect(self.delete) # Find children of statusbar widgets and remove appending children # Prevent duplications of widgets for every cell click children = self.findChildren(QPushButton) if children: for child in children: self.statusbar.removeWidget(child) # Add widgets after cell is clicked self.statusbar.addWidget(edit_button) self.statusbar.addWidget(delete_button) # Load SQL Database data in PyQt def load_data(self): # Connect SQL database connection, cursor = DatabaseConnection().connect() results = connection.execute("SELECT * FROM students") # Initialize table number to 0 self.table.setRowCount(0) # Iterate through row numbers for row_number, row_data in enumerate(results): # Every index insert a row cell with a row number self.table.insertRow(row_number) # Iterate through column numbers for column_number, column_data in enumerate(row_data): # Every index of a row number and column number add column data self.table.setItem(row_number, column_number, QTableWidgetItem(str(column_data))) # Close the database connection connection.close() # Insert new data method call def insert(self): dialog = InsertDialog() dialog.exec() def search(self): search_dialog = SearchDialog() search_dialog.exec() def edit(self): edit_dialog = EditDialog() edit_dialog.exec() def delete(self): delete_dialog = DeleteDialog() delete_dialog.exec() def about(self): about_dialog = AboutDialog() about_dialog.exec() # Dialog Attributes for Insert class InsertDialog(QDialog): def __init__(self): super().__init__() # Set Window Attributes self.setWindowTitle("Insert Student Data") self.setFixedWidth(300) self.setFixedHeight(300) layout = QVBoxLayout() # Add Student Name widget self.student_name = QLineEdit() self.student_name.setPlaceholderText("Name") layout.addWidget(self.student_name) # Add Course ComboBox widget self.course_name = QComboBox() courses = ["Biology", "Math", "Astronomy", "Physics"] self.course_name.addItems(courses) layout.addWidget(self.course_name) # Add Mobile Number widget self.mobile_number = QLineEdit() self.mobile_number.setPlaceholderText("Mobile Number") layout.addWidget(self.mobile_number) # Submit button submit_btn = QPushButton("Register") submit_btn.clicked.connect(self.add_student) layout.addWidget(submit_btn) self.setLayout(layout) # Add Student method def add_student(self): # Reference to field values stored in variables name = self.student_name.text() course = self.course_name.itemText(self.course_name.currentIndex()) mobile = self.mobile_number.text() # Connect to database and create cursor connection, cursor = DatabaseConnection().connect() # Use the cursor to destructure and INSERT reference variables into related db columns cursor.execute("INSERT INTO students (name, course, mobile) VALUES (?, ?, ?)", (name, course, mobile)) # Commit changes, Close connection to database and cursor DatabaseConnection().close_connection(connection, cursor) # Close window after entry self.close() # Dialog Attributes for Search class SearchDialog(QDialog): def __init__(self): super().__init__() # Set Window Attributes self.setWindowTitle("Search Student") self.setFixedWidth(300) self.setFixedHeight(300) search_layout = QVBoxLayout() # Search Student Name widget self.search_student_name = QLineEdit() self.search_student_name.setPlaceholderText("Name") search_layout.addWidget(self.search_student_name) # Search button search_btn = QPushButton("Search") search_btn.clicked.connect(self.search_student) search_layout.addWidget(search_btn) self.setLayout(search_layout) # Search Student method def search_student(self): # Reference to field values stored in variables name = self.search_student_name.text() # Connect to database and create cursor connection, cursor = DatabaseConnection().connect() # Select all fields that contained query of student name in database result = cursor.execute("SELECT * FROM students WHERE name = ?", (name, )) rows = list(result) print(rows) # Select all fields in Main window table and find match of student name items = student_management_sys.table.findItems(name, Qt.MatchFlag.MatchFixedString) # Highlight all names that match query and print item row to console for item in items: print(item) student_management_sys.table.item(item.row(), 1).setSelected(True) # Close cursor and connection to db cursor.close() connection.close() # Close dialog after search self.close() # Dialog Attributes for Edit class EditDialog(QDialog): def __init__(self): super().__init__() # Set Window Attributes self.setWindowTitle("Update Student Data") self.setFixedWidth(300) self.setFixedHeight(300) layout = QVBoxLayout() # Get table row and column of student to edit index = student_management_sys.table.currentRow() # Get ID from selected Row self.student_id = student_management_sys.table.item(index, 0).text() # Get student name student_name = student_management_sys.table.item(index, 1).text() # Get Course name course_name = student_management_sys.table.item(index, 2).text() # Get Mobile number mobile_number = student_management_sys.table.item(index, 3).text() # Add Student Name widget self.student_name = QLineEdit(student_name) self.student_name.setPlaceholderText("Name") layout.addWidget(self.student_name) # Add Course ComboBox widget self.course_name = QComboBox() courses = ["Biology", "Math", "Astronomy", "Physics"] self.course_name.addItems(courses) self.course_name.setCurrentText(course_name) layout.addWidget(self.course_name) # Add Mobile Number widget self.mobile_number = QLineEdit(mobile_number) self.mobile_number.setPlaceholderText("Mobile Number") layout.addWidget(self.mobile_number) # Submit button submit_btn = QPushButton("Update") submit_btn.clicked.connect(self.update_student) layout.addWidget(submit_btn) self.setLayout(layout) # Update method def update_student(self): connection, cursor = DatabaseConnection().connect() # Destructure table rows and UPDATE with new values from references in edit fields cursor.execute("UPDATE students SET name = ?, course = ?, mobile = ? WHERE id = ?", (self.student_name.text(), self.course_name.itemText(self.course_name.currentIndex()), self.mobile_number.text(), self.student_id)) # Commit changes, Close connection to database and cursor DatabaseConnection().close_connection(connection, cursor) # Close dialog after update self.close() # Dialog Attributes for Delete class DeleteDialog(QDialog): def __init__(self): super().__init__() # Set Window Attributes self.setWindowTitle("Delete Student Data") layout = QGridLayout() confirmation = QLabel("Are you sure you want to delete?") yes = QPushButton("Yes") no = QPushButton("No") layout.addWidget(confirmation, 0, 0, 1, 2) layout.addWidget(yes, 1, 0) layout.addWidget(no, 1, 1) self.setLayout(layout) yes.clicked.connect(self.delete_student) no.clicked.connect(self.close) # Delete Method def delete_student(self): # Connect to database connection, cursor = DatabaseConnection().connect() # Get table row and column of student to edit index = student_management_sys.table.currentRow() # Get ID from selected Row student_id = student_management_sys.table.item(index, 0).text() # Execute SQL DELETE query using student ID cursor.execute("DELETE FROM students WHERE id = ?", (student_id, )) # Commit changes, Close connection to database and cursor DatabaseConnection().close_connection(connection, cursor) # Create a message box to relay deletion was successful confirmation_widget = QMessageBox() confirmation_widget.setWindowTitle("Success") confirmation_widget.setText("The Record Deleted Successfully!") confirmation_widget.exec() # Close delete dialog window self.close() # About Inheriting from 'QMessageBox' simple child version of a QDialog class AboutDialog(QMessageBox): def __init__(self): super().__init__() self.setWindowTitle("About") # Content for about section content = "I built this academic management app as I learned PyQt6 and it's component libraries. " \ "I used object oriented architecture to keep my code organized and scalable." \ " A SQL database was used store records and 'CRUD' methods were used to managed it's contents." # Use set text to content self.setText(content) if __name__ == "__main__": app = QApplication(sys.argv) student_management_sys = MainWindow() student_management_sys.show() student_management_sys.load_data() sys.exit(app.exec())
KelvinBrannonJr/Student_Mangement_System
main.py
main.py
py
13,372
python
en
code
0
github-code
6
35484437119
def adress_book(): a = {} while True: print("Имя - ") name = input() if name != "q": print("Номер - ") phone = input() if (phone[0] == "+" and phone[2] == "-" and phone[6] == "-" and phone[10] == "-" and phone[13] == "-"): if phone[1:].replace("-", "").isdigit(): a[name] = phone else: break return a print(adress_book())
TaffetaEarth/homework_python
0610/hw.py
hw.py
py
480
python
en
code
0
github-code
6
10648619314
# -*- coding: utf-8 -*- """ Created on Wed May 6 15:04:40 2020 @author: Rijk Scipy signal sos filter toolbox test """ import numpy as np from scipy import signal import matplotlib.pyplot as plt plt.close('all') b, a = signal.butter(4, 100, 'low', analog=True) w, h = signal.freqs(b, a) plt.figure() plt.semilogx(w, 20 * np.log10(abs(h))) plt.title('Butterworth filter frequency response') plt.xlabel('Frequency [radians / second]') plt.ylabel('Amplitude [dB]') plt.margins(0, 0.1) plt.grid(which='both', axis='both') plt.axvline(100, color='green') # cutoff frequency #plt.show() t = np.linspace(0, 1, 1000, False) # 1 second sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t) fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True) ax1.plot(t, sig) ax1.set_title('10 Hz and 20 Hz sinusoids') ax1.axis([0, 1, -2, 2]) sos = signal.butter(10, 15, 'hp', fs=1000, output='sos') filtered = signal.sosfilt(sos, sig) ax2.plot(t, filtered) ax2.set_title('After 15 Hz high-pass filter') ax2.axis([0, 1, -2, 2]) ax2.set_xlabel('Time [seconds]') plt.tight_layout() plt.show() # Compare FFT before and after fft_before = np.fft.fft(sig) fft_after = np.fft.fft(filtered) sample_time = np.mean(np.diff(t)) f = np.fft.fftfreq(len(t), sample_time) half = int(len(t)/2) plt.figure() plt.plot(f[1:half], fft_before[1:half], label='Original') #plt.plot(f[1:half], fft_after[1:half], label='Filtered') plt.legend() ## Measurement data filter #f_axis = 1.12 #nyquist_f = f_axis # ## Define filter #lower_f = 0.025 #upper_f = 0.065 #bandpass_f = 2*np.pi * np.array([lower_f, upper_f]) / nyquist_f #butter_low = signal.butter(2, lower_f, btype='lowpass', output='sos') #butter_high = signal.butter(2, upper_f, btype='lowpass', output='sos') # #b, a = signal.butter(2, bandpass_f, btype='bandstop', output='ba') #w, h = signal.freqz(b, a) # #plt.figure() #plt.plot(w, 20 * np.log10(abs(h)))
rehogenbirk/MEP_control_software
Measurements/20200324 WO3196dev9/Hydrogen R_T correction/0324_1904_WO3196dev9_H2ToAir/signal_toolbox_test.py
signal_toolbox_test.py
py
1,897
python
en
code
0
github-code
6
21689221352
import json import os import subprocess from collections import OrderedDict from copy import deepcopy from sys import platform from tabulate import tabulate from openwpm.config import ConfigEncoder def parse_http_stack_trace_str(trace_str): """Parse a stacktrace string and return an array of dict.""" stack_trace = [] frames = trace_str.split("\n") for frame in frames: try: func_name, rest = frame.split("@", 1) rest, async_cause = rest.rsplit(";", 1) filename, line_no, col_no = rest.rsplit(":", 2) stack_trace.append( { "func_name": func_name, "filename": filename, "line_no": line_no, "col_no": col_no, "async_cause": async_cause, } ) except Exception as exc: print("Exception parsing the stack frame %s %s" % (frame, exc)) return stack_trace def get_firefox_binary_path(): """ If ../../firefox-bin/firefox-bin or os.environ["FIREFOX_BINARY"] exists, return it. Else, throw a RuntimeError. """ if "FIREFOX_BINARY" in os.environ: firefox_binary_path = os.environ["FIREFOX_BINARY"] if not os.path.isfile(firefox_binary_path): raise RuntimeError( "No file found at the path specified in " "environment variable `FIREFOX_BINARY`." "Current `FIREFOX_BINARY`: %s" % firefox_binary_path ) return firefox_binary_path root_dir = os.path.dirname(__file__) + "/../.." if platform == "darwin": firefox_binary_path = os.path.abspath( root_dir + "/Nightly.app/Contents/MacOS/firefox-bin" ) else: firefox_binary_path = os.path.abspath(root_dir + "/firefox-bin/firefox-bin") if not os.path.isfile(firefox_binary_path): raise RuntimeError( "The `firefox-bin/firefox-bin` binary is not found in the root " "of the OpenWPM directory (did you run the install script " "(`install.sh`)?). Alternatively, you can specify a binary " "location using the OS environment variable FIREFOX_BINARY." ) return firefox_binary_path def get_version(): """Return OpenWPM version tag/current commit and Firefox version""" try: openwpm = subprocess.check_output( ["git", "describe", "--tags", "--always"] ).strip() except subprocess.CalledProcessError: ver = os.path.join(os.path.dirname(__file__), "../../VERSION") with open(ver, "r") as f: openwpm = f.readline().strip() firefox_binary_path = get_firefox_binary_path() try: firefox = subprocess.check_output([firefox_binary_path, "--version"]) except subprocess.CalledProcessError as e: raise RuntimeError("Firefox not found. " " Did you run `./install.sh`?") from e ff = firefox.split()[-1] return openwpm, ff def get_configuration_string(manager_params, browser_params, versions): """Construct a well-formatted string for {manager,browser}params Constructs a pretty printed string of all parameters. The config dictionaries are split to try to avoid line wrapping for reasonably size terminal windows. """ config_str = "\n\nOpenWPM Version: %s\nFirefox Version: %s\n" % versions config_str += "\n========== Manager Configuration ==========\n" config_str += json.dumps( manager_params.to_dict(), sort_keys=True, indent=2, separators=(",", ": "), cls=ConfigEncoder, ) config_str += "\n\n========== Browser Configuration ==========\n" print_params = [deepcopy(x.to_dict()) for x in browser_params] table_input = list() profile_dirs = OrderedDict() archive_dirs = OrderedDict() js_config = OrderedDict() profile_all_none = archive_all_none = True for item in print_params: browser_id = item["browser_id"] # Update print flags if item["seed_tar"] is not None: profile_all_none = False if item["profile_archive_dir"] is not None: archive_all_none = False # Separate out long profile directory strings profile_dirs[browser_id] = str(item.pop("seed_tar")) archive_dirs[browser_id] = str(item.pop("profile_archive_dir")) js_config[browser_id] = item.pop("cleaned_js_instrument_settings") # Copy items in sorted order dct = OrderedDict() dct["browser_id"] = browser_id for key in sorted(item.keys()): dct[key] = item[key] table_input.append(dct) key_dict = OrderedDict() counter = 0 for key in table_input[0].keys(): key_dict[key] = counter counter += 1 config_str += "Keys:\n" config_str += json.dumps(key_dict, indent=2, separators=(",", ": ")) config_str += "\n\n" config_str += tabulate(table_input, headers=key_dict) config_str += "\n\n========== JS Instrument Settings ==========\n" config_str += json.dumps(js_config, indent=None, separators=(",", ":")) config_str += "\n\n========== Input profile tar files ==========\n" if profile_all_none: config_str += " No profile tar files specified" else: config_str += json.dumps(profile_dirs, indent=2, separators=(",", ": ")) config_str += "\n\n========== Output (archive) profile dirs ==========\n" if archive_all_none: config_str += " No profile archive directories specified" else: config_str += json.dumps(archive_dirs, indent=2, separators=(",", ": ")) config_str += "\n\n" return config_str
openwpm/OpenWPM
openwpm/utilities/platform_utils.py
platform_utils.py
py
5,733
python
en
code
1,286
github-code
6
3681349954
import json from dataclasses import dataclass, field from typing import Union import requests @dataclass class ManyChatAPI: api_base_url = 'https://api.manychat.com/fb/' api_key: str psid: str headers: dict = field(init=False) def __post_init__(self): self.headers = { 'Accept': 'application/json', 'Content-Type': 'application/json', 'Authorization': f'Bearer {self.api_key}', } def get_user_info(self) -> dict: params = { 'subscriber_id': self.psid, } try: response = requests.get( url=f'{self.api_base_url}subscriber/getInfo', headers=self.headers, params=params, timeout=5, ) except Exception as e: results = { 'status': 'error', 'message': e, } else: results = json.loads(response.text) return results def send_content(self, messages: list) -> dict: params = { 'subscriber_id': self.psid, 'data': { 'version': 'v2', 'content': { 'messages': [ { 'type': 'text', 'text': message, } for message in messages ] } }, } try: response = requests.post( url=f'{self.api_base_url}sending/sendContent', headers=self.headers, data=json.dumps(params), timeout=5, ) except Exception as e: results = { 'status': 'error', 'message': e, } else: results = json.loads(response.text) return results def send_flow(self, flow_ns: str) -> dict: params = { 'subscriber_id': self.psid, 'flow_ns': flow_ns, } try: response = requests.post( url=f'{self.api_base_url}sending/sendFlow', headers=self.headers, data=json.dumps(params), timeout=5, ) except Exception as e: results = { 'status': 'error', 'message': e, } else: results = json.loads(response.text) return results def set_custom_field_by_name(self, field_name: str, field_value: Union[str, int, bool]) -> dict: params = { 'subscriber_id': self.psid, 'field_name': field_name, 'field_value': field_value, } try: response = requests.post( url=f'{self.api_base_url}subscriber/setCustomFieldByName', headers=self.headers, data=json.dumps(params), timeout=5, ) except Exception as e: results = { 'status': 'error', 'message': e, } else: results = json.loads(response.text) return results
daiangan/manychat-dialogflow-connector
utils/manychat_helpers.py
manychat_helpers.py
py
3,304
python
en
code
5
github-code
6
11079705264
#------------------------------------------------ #other < supper_code #Dylan Friesen #October 30th, 2019 #------------------------------------------------ #--------------Dictionaries-------------------- supper = {} #------------------Definitions----------------- def add_stuff_s(): #used to add stuff to the supper list stop = False while stop == False: stuff = input("What do you want to add to to your shopping list?").lower() if stuff == "stop": stop = True elif stuff != "stop": value = input("How many or description?") print("adding", value, stuff) supper[stuff] = value def remove_stuff_s(): #used to remove stuff in the supper list stop = False while stop == False: stuff = input("What do you want to take off of your shopping list?").lower() if stuff == "stop": stop = True elif stuff != "stop": if stuff in supper: print("removing", stuff) del supper[stuff] else: print("You don't have that on your list") def view_stuff_s(): for (stuff, value) in supper.items(): print("you have", value, stuff, "on your shopping list") #Main def main_s(): #main code specified for just deciding what it wanted on the supper list play = 1 print("Supper part of list") while play == 1: print("") print("") choice = input("Would you like to Add, Remove, View, or Stop?").lower() if choice == "add": add_stuff_s() elif choice == "remove": remove_stuff_s() elif choice == "view": view_stuff_s() elif choice == "stop": play = 0 print ("Onto Desserts") else: ("Sorry, I didn't understand that.")
Gobbo306/libraries-summative
other/supper_code.py
supper_code.py
py
1,955
python
en
code
0
github-code
6
22626759020
import pandas as pd from math import sqrt import random import os import sys LIB_PATH = os.path.join(os.path.dirname(__file__), '../lib') sys.path.append(LIB_PATH) from RandomForest import * def main(): if (len(sys.argv) < 5): print("Usage: python3 %s <dataset-csv> <separator> <target-attr> <ntree>" % sys.argv[0]) exit(-1) datasetFile = sys.argv[1] separator = sys.argv[2] targetAttr = sys.argv[3] ntree = int(sys.argv[4]) random.seed(1) # Read dataset D = pd.read_csv(datasetFile, sep=separator) # Get the number of possible values for each attribute in dataset attrsNVals = D.nunique() # Build Random Forest forest = RandomForest(D, targetAttr, attrsNVals, ntree, attrsSampleFn=sqrt, graph=True) if ntree <= 6: forest.render() # Test classification instance = D.iloc[200] print("> Test instance:") print(instance) print("> Prediction: %s" % forest.classify(instance)) if __name__ == "__main__": main()
rubensrech/ml-random-forest
test/randomForest.py
randomForest.py
py
1,019
python
en
code
0
github-code
6
20194791845
# -*- coding: utf-8 -*- import datetime import json import sys from threading import Thread from resources.lib.common import tools from resources.lib.indexers.trakt import TraktAPI from resources.lib.modules import database from resources.lib.modules.trakt_sync.shows import TraktSyncDatabase from resources.lib.modules.trakt_sync.hidden import TraktSyncDatabase as HiddenDatabase try: from Queue import Queue except: from queue import Queue sysaddon = sys.argv[0] try: syshandle = int(sys.argv[1]) except: syshandle = '' trakt = TraktAPI() language_code = tools.get_language_code() trakt_database = TraktSyncDatabase() hidden_database = HiddenDatabase() class Menus: def __init__(self): self.itemList = [] self.threadList = [] self.direct_episode_threads = [] self.title_appends = tools.getSetting('general.appendtitles') self.task_queue = Queue(40) ###################################################### # MENUS ###################################################### def onDeckShows(self): hidden_shows = hidden_database.get_hidden_items('progress_watched', 'shows') trakt_list = trakt.json_response('sync/playback/episodes', limit=True) if trakt_list is None: return trakt_list = [i for i in trakt_list if i['show']['ids']['trakt'] not in hidden_shows] trakt_list = sorted(trakt_list, key=lambda i: tools.datetime_workaround(i['paused_at'][:19], format="%Y-%m-%dT%H:%M:%S", date_only=False), reverse=True) filter_list = [] showList = [] sort_list = [] for i in trakt_list: if i['show']['ids']['trakt'] not in filter_list: if int(i['progress']) != 0: showList.append(i) filter_list.append(i['show']['ids']['trakt']) sort_list.append(i['show']['ids']['trakt']) sort = {'type': 'showInfo', 'id_list': sort_list} self.mixedEpisodeBuilder(showList, sort=sort) tools.closeDirectory('tvshows') def discoverShows(self): tools.addDirectoryItem(tools.lang(32007), 'showsPopular&page=1', '', '') if tools.getSetting('trakt.auth') is not '': tools.addDirectoryItem(tools.lang(32008), 'showsRecommended', '', '') # tools.addDirectoryItem('This Years Most Popular', '', '', '') tools.addDirectoryItem(tools.lang(32009), 'showsTrending&page=1', '', '') tools.addDirectoryItem(tools.lang(32067), 'showsNew', '', '') tools.addDirectoryItem(tools.lang(32010), 'showsPlayed&page=1', '', '') tools.addDirectoryItem(tools.lang(32011), 'showsWatched&page=1', '', '') tools.addDirectoryItem(tools.lang(32012), 'showsCollected&page=1', '', '') tools.addDirectoryItem(tools.lang(32013), 'showsAnticipated&page=1', '', '') tools.addDirectoryItem(tools.lang(32014), 'showsUpdated&page=1', '', '') tools.addDirectoryItem(tools.lang(40121), 'showsNetworks', '', '') tools.addDirectoryItem(tools.lang(40123), 'showYears', '', '') tools.addDirectoryItem(tools.lang(32062), 'tvGenres', '', '') tools.addDirectoryItem(tools.lang(40151), 'showsByActor', '', '') # show genres is now labeled as tvGenres to support genre icons in skins if tools.getSetting('searchHistory') == 'false': tools.addDirectoryItem(tools.lang(32016), 'showsSearch', '', '') else: tools.addDirectoryItem(tools.lang(32016), 'showsSearchHistory', '', '') tools.closeDirectory('addons') def myShows(self): tools.addDirectoryItem(tools.lang(32063), 'onDeckShows', None, None) tools.addDirectoryItem(tools.lang(32017), 'showsMyCollection', '', '') tools.addDirectoryItem(tools.lang(32018), 'showsMyWatchlist', '', '') tools.addDirectoryItem('Next Up', 'showsNextUp', '', '') tools.addDirectoryItem('Upcoming Episodes', 'myUpcomingEpisodes', '', '') tools.addDirectoryItem('Unfinished Shows in Collection', 'showsMyProgress', '', '') tools.addDirectoryItem('Recent Episodes', 'showsMyRecentEpisodes', '', '') tools.addDirectoryItem('My Show Lists', 'myTraktLists&actionArgs=shows', '', '') tools.closeDirectory('addons') def myShowCollection(self): trakt_list = trakt_database.get_collected_episodes() trakt_list = [i for i in trakt_list if i is not None] trakt_list = list(set([i['show_id'] for i in trakt_list])) trakt_list = [{'ids': {'trakt': i}} for i in trakt_list] trakt_list = [i for i in trakt_list if i is not None] if trakt_list is None: return self.showListBuilder(trakt_list) tools.closeDirectory('tvshows', sort='title') def myShowWatchlist(self): trakt_list = trakt.json_response('users/me/watchlist/shows', limit=False) if trakt_list is None: return try: sort_by = trakt.response_headers['X-Sort-By'] sort_how = trakt.response_headers['X-Sort-How'] trakt_list = trakt.sort_list(sort_by, sort_how, trakt_list, 'show') except: tools.log('Failed to sort trakt list by response headers', 'error') pass self.showListBuilder(trakt_list) tools.closeDirectory('tvshows') def myProgress(self): collected_episodes = trakt_database.get_collected_episodes() collection = list(set([i['show_id'] for i in collected_episodes])) if len(collection) == 0: return show_dicts = [] for i in collection: show_dicts.append({'show': {'ids': {'trakt': i}}}) show_meta_list = trakt_database.get_show_list(show_dicts) unfinished = [] for show in show_meta_list: if show['info']['playcount'] == 0: unfinished.append(show) self.showListBuilder(unfinished) tools.closeDirectory('tvshows', sort='title') def newShows(self): hidden = hidden_database.get_hidden_items('recommendations', 'shows') datestring = datetime.datetime.today() - datetime.timedelta(days=29) trakt_list = database.get(trakt.json_response, 12, 'calendars/all/shows/new/%s/30?languages=%s' % (datestring.strftime('%d-%m-%Y'), language_code)) if trakt_list is None: return # For some reason trakt messes up their list and spits out tons of duplicates so we filter it duplicate_filter = [] temp_list = [] for i in trakt_list: if not i['show']['ids']['tvdb'] in duplicate_filter: duplicate_filter.append(i['show']['ids']['tvdb']) temp_list.append(i) trakt_list = temp_list trakt_list = [i for i in trakt_list if i['show']['ids']['trakt'] not in hidden] if len(trakt_list) > 40: trakt_list = trakt_list[:40] self.showListBuilder(trakt_list) tools.closeDirectory('tvshows') def myNextUp(self, ): watched_shows = trakt_database.get_watched_shows() hidden_shows = hidden_database.get_hidden_items('progress_watched', 'shows') watched_shows = [i for i in watched_shows if i['trakt_id'] not in hidden_shows] watched_episodes = trakt_database.get_watched_episodes() self._start_queue_workers() for show in watched_shows: self.task_queue.put((self._get_next_episode_to_watch, (show, watched_episodes)), block=True) self._finish_queue_workers() if tools.getSetting('nextup.sort') == '1': watched_list = trakt.json_response('users/me/watched/shows') watched_list = sorted(watched_list, key=lambda i: i['last_watched_at'], reverse=True) watched_list = [i['show']['ids']['trakt'] for i in watched_list] sort = {'type': 'showInfo', 'id_list': watched_list} else: sort = None episodes = self.itemList self.itemList = [] self.mixedEpisodeBuilder(episodes, sort=sort, hide_watched=True) tools.closeDirectory('tvshows') def _get_next_episode_to_watch(self, show_db_dict, watched_episodes): try: show_id = show_db_dict['trakt_id'] if show_db_dict['kodi_meta'] == {}: show_db_dict['kodi_meta'] = trakt_database.get_single_show(show_id) watched_episodes = [i for i in watched_episodes if i['show_id'] == show_id] watched_episodes = sorted(watched_episodes, key=lambda episode: episode['season'], reverse=True) season = watched_episodes[0]['season'] season_meta = trakt_database.get_single_season(show_id, season) watched_episodes = [i for i in watched_episodes if i['season'] == season] watched_episodes = sorted(watched_episodes, key=lambda episode: episode['number'], reverse=True) last_watched_episode = watched_episodes[0]['number'] next_episode = int(watched_episodes[0]['number']) + 1 if season_meta is None: tools.log('Could not acquire season meta information for %s Season %s' % (show_id, season), 'error') return if season_meta['info']['episode_count'] == len(watched_episodes) \ or season_meta['info']['episode_count'] == last_watched_episode: if int(show_db_dict['kodi_meta']['info']['season_count']) > season: season += 1 next_episode = 1 episode_dict = {'show': {'ids': {'trakt': show_id}}, 'episode': {'season': season, 'number': next_episode}} self.itemList.append(episode_dict) except KeyError: import traceback traceback.print_exc() pass except: import traceback traceback.print_exc() def myRecentEpisodes(self): hidden_shows = hidden_database.get_hidden_items('calendar', 'shows') datestring = datetime.datetime.today() - datetime.timedelta(days=13) trakt_list = database.get(trakt.json_response, 12, 'calendars/my/shows/%s/14' % datestring.strftime('%d-%m-%Y')) if trakt_list is None: return trakt_list = [i for i in trakt_list if i['show']['ids']['trakt'] not in hidden_shows] self.mixedEpisodeBuilder(trakt_list) tools.closeDirectory('episodes') def myUpcomingEpisodes(self): tomorrow = (datetime.date.today() + datetime.timedelta(days=1)).strftime('%Y-%m-%d') upcoming_episodes = database.get(trakt.json_response, 24, 'calendars/my/shows/%s/30' % tomorrow) sort = sorted(upcoming_episodes, key=lambda i: i['first_aired']) sort = [i['episode']['ids']['trakt'] for i in sort] sort = {'type': None, 'id_list': sort} self.mixedEpisodeBuilder(upcoming_episodes, sort=sort, hide_watched=False, hide_unaired=False, prepend_date=True) tools.closeDirectory('episodes') def showsNetworks(self): trakt_list = database.get(trakt.json_response, 24, 'networks') if trakt_list is None: return list_items = [] for i in trakt_list: list_items.append(tools.addDirectoryItem(i['name'], 'showsNetworkShows&actionArgs=%s&page=1' % i['name'], '', '', bulk_add=True)) tools.addMenuItems(syshandle, list_items, len(list_items)) tools.closeDirectory('addons') def showsNetworkShows(self, network, page): trakt_list = database.get(trakt.json_response, 24, 'shows/popular?networks=%s&page=%s' % (network, page)) if trakt_list is None: return self.showListBuilder(trakt_list) if len(trakt_list) == int(tools.getSetting('item.limit')): tools.addDirectoryItem(tools.lang(32019), 'showsNetworkShows&actionArgs=%s&page=%s' % (network, int(page) + 1), '', '') tools.closeDirectory('tvshows') def showsPopular(self, page): trakt_list = database.get(trakt.json_response, 12, 'shows/popular?page=%s' % page) if trakt_list is None: return self.showListBuilder(trakt_list) tools.addDirectoryItem(tools.lang(32019), 'showsPopular&page=%s' % (int(page) + 1), '', '') tools.closeDirectory('tvshows') def showsRecommended(self): trakt_list = database.get(trakt.json_response, 12, 'recommendations/shows?ignore_collected=true', limit=True, limitOverride=100) if trakt_list is None: return self.showListBuilder(trakt_list) tools.closeDirectory('tvshows') def showsTrending(self, page): trakt_list = database.get(trakt.json_response, 12, 'shows/trending?page=%s' % page) if trakt_list is None: return self.showListBuilder(trakt_list) tools.addDirectoryItem(tools.lang(32019), 'showsTrending&page=%s' % (int(page) + 1), '', '') tools.closeDirectory('tvshows') def showsPlayed(self, page): trakt_list = database.get(trakt.json_response, 12, 'shows/played?page=%s' % page) if trakt_list is None: return self.showListBuilder(trakt_list) tools.addDirectoryItem(tools.lang(32019), 'showsPlayed&page=%s' % (int(page) + 1), '', '') tools.closeDirectory('tvshows') def showsWatched(self, page): trakt_list = database.get(trakt.json_response, 12, 'shows/watched?page=%s' % page) if trakt_list is None: return self.showListBuilder(trakt_list) tools.addDirectoryItem(tools.lang(32019), 'showsWatched&page=%s' % (int(page) + 1), '', '') tools.closeDirectory('tvshows') def showsCollected(self, page): trakt_list = database.get(trakt.json_response, 12, 'shows/collected?page=%s' % page) if trakt_list is None: return self.showListBuilder(trakt_list) tools.addDirectoryItem(tools.lang(32019), 'showsCollected&page=%s' % (int(page) + 1), '', '') tools.closeDirectory('tvshows') def showsAnticipated(self, page): trakt_list = database.get(trakt.json_response, 12, 'shows/anticipated?page=%s&language=%s' % (page, language_code)) if trakt_list is None: return self.showListBuilder(trakt_list) tools.addDirectoryItem(tools.lang(32019), 'showsAnticipated&page=%s' % (int(page) + 1), '', '') tools.closeDirectory('tvshows') def showsUpdated(self, page): import datetime date = datetime.date.today() - datetime.timedelta(days=31) date = date.strftime('%Y-%m-%d') trakt_list = database.get(trakt.json_response, 12, 'shows/updates/%s?page=%s' % (date, page)) if trakt_list is None: return self.showListBuilder(trakt_list) tools.addDirectoryItem(tools.lang(32019), 'showsUpdated&page=%s' % (int(page) + 1), '', '') tools.closeDirectory('tvshows') def showSearchHistory(self): history = database.getSearchHistory('show') tools.addDirectoryItem(tools.lang(40142), 'showsSearch', '', '') tools.addDirectoryItem(tools.lang(40140), 'clearSearchHistory', '', '', isFolder=False) for i in history: tools.addDirectoryItem(i, 'showsSearch&actionArgs=%s' % tools.quote(i), '', '') tools.closeDirectory('addon') def showsSearch(self, actionArgs=None): if actionArgs == None: k = tools.showKeyboard('', tools.lang(32016)) k.doModal() query = (k.getText() if k.isConfirmed() else None) if query == None or query == '': return else: query = actionArgs database.addSearchHistory(query, 'show') query = tools.deaccentString(tools.display_string(query)) tools.quote(query) tools.closeAllDialogs() tools.closeDirectory('tvshows') tools.execute("Container.Update(%s?action=showsSearchResults&actionArgs=%s, replace)'" % (sysaddon, query)) def showsSearchResults(self, query): query = tools.quote_plus(tools.unquote(query)) trakt_list = trakt.json_response('search/show?query=%s&extended=full&type=show&field=title' % query) if trakt_list is None: return self.showListBuilder(trakt_list) tools.closeDirectory('tvshows') def showsByActor(self, actionArgs): if actionArgs == None: k = tools.showKeyboard('', tools.lang(32016)) k.doModal() query = (k.getText() if k.isConfirmed() else None) if query == None or query == '': return else: query = tools.unquote(actionArgs) database.addSearchHistory(query, 'showActor') query = tools.deaccentString(query) query = query.replace(' ', '-') query = tools.quote_plus(query) trakt_list = trakt.json_response('people/%s/shows' % query, limit=True) try: trakt_list = trakt_list['cast'] except: import traceback traceback.print_exc() trakt_list = [] trakt_list = [i['show'] for i in trakt_list] self.showListBuilder(trakt_list) tools.closeDirectory('tvshows') def showSeasons(self, args): args = tools.get_item_information(args) self.seasonListBuilder(args['ids']['trakt']) tools.closeDirectory('seasons') def seasonEpisodes(self, args): args = tools.get_item_information(args) show_id = args['showInfo']['ids']['trakt'] if 'seasonInfo' in args: season_number = args['seasonInfo']['info']['season'] else: season_number = args['info']['season'] self.episodeListBuilder(show_id, season_number) tools.closeDirectory('episodes', sort='episode') def showGenres(self): tools.addDirectoryItem(tools.lang(32065), 'showGenresGet', '', '', isFolder=True) genres = database.get(trakt.json_response, 24, 'genres/shows') if genres is None: return for i in genres: tools.addDirectoryItem(i['name'], 'showGenresGet&actionArgs=%s' % i['slug'], '', '', isFolder=True) tools.closeDirectory('addons') def showGenreList(self, args, page): if page is None: page = 1 if args is None: genre_display_list = [] genre_string = '' genres = database.get(trakt.json_response, 24, 'genres/shows') for genre in genres: genre_display_list.append(genre['name']) genre_multiselect = tools.showDialog.multiselect(tools.addonName + ": Genre Selection", genre_display_list) if genre_multiselect is None: return for selection in genre_multiselect: genre_string += ', %s' % genres[selection]['slug'] genre_string = genre_string[2:] else: genre_string = args page = int(page) trakt_list = database.get(trakt.json_response, 12, 'shows/popular?genres=%s&page=%s' % (genre_string, page)) if trakt_list is None: return self.showListBuilder(trakt_list) tools.addDirectoryItem(tools.lang(32019), 'showGenresGet&actionArgs=%s&page=%s' % (genre_string, page + 1), None, None) tools.closeDirectory('tvshows') def showsRelated(self, args): trakt_list = database.get(trakt.json_response, 12, 'shows/%s/related' % args) if trakt_list is None: return self.showListBuilder(trakt_list) tools.closeDirectory('tvshows') def showYears(self, year=None, page=None): if year is None: current_year = int(tools.datetime_workaround(datetime.datetime.today().strftime('%Y-%m-%d')).year) all_years = reversed([year for year in range(1900, current_year+1)]) menu_items = [] for year in all_years: menu_items.append(tools.addDirectoryItem(str(year), 'showYears&actionArgs=%s' % year, '', '', bulk_add=True)) tools.addMenuItems(syshandle, menu_items, len(menu_items)) tools.closeDirectory('tvshows') else: if page is None: page = 1 trakt_list = trakt.json_response('shows/popular?years=%s&page=%s' % (year, page)) self.showListBuilder(trakt_list) tools.addDirectoryItem(tools.lang(32019), 'showYears&actionArgs=%s&page=%s' % (year, int(page) + 1), None, None) tools.closeDirectory('tvshows') ###################################################### # MENU TOOLS ###################################################### def seasonListBuilder(self, show_id, smartPlay=False): self.itemList = trakt_database.get_season_list(show_id) self.itemList = [x for x in self.itemList if x is not None and 'info' in x] self.itemList = sorted(self.itemList, key=lambda k: k['info']['season']) if len(self.itemList) == 0: tools.log('We received no titles to build a list', 'error') return hide_specials = False if tools.getSetting('general.hideSpecials') == 'true': hide_specials = True item_list = [] for item in self.itemList: try: if hide_specials and int(item['info']['season']) == 0: continue action = 'seasonEpisodes' args = {'trakt_id': item['showInfo']['ids']['trakt'], 'season': item['info']['season'], 'item_type': 'season'} args = tools.quote(json.dumps(args, sort_keys=True)) item['trakt_object']['show_id'] = item['showInfo']['ids']['trakt'] name = item['info']['season_title'] if not self.is_aired(item['info']) or 'aired' not in item['info']: if tools.getSetting('general.hideUnAired') == 'true': continue name = tools.colorString(name, 'red') name = tools.italic_string(name) item['info']['title'] = name item['info'] = tools.clean_air_dates(item['info']) except: import traceback traceback.print_exc() continue if smartPlay is True: return args cm = [] if tools.getSetting('trakt.auth') != '': cm.append(('Trakt Manager', 'RunPlugin(%s?action=traktManager&actionArgs=%s)' % (sysaddon, args))) if tools.context_addon(): cm = [] item_list.append(tools.addDirectoryItem(name, action, item['info'], item['art'], cm=cm, isFolder=True, isPlayable=False, actionArgs=args, set_ids=item['ids'], bulk_add=True)) tools.addMenuItems(syshandle, item_list, len(item_list)) def episodeListBuilder(self, show_id, season_number, smartPlay=False, hide_unaired=False): try: item_list = [] self.itemList = trakt_database.get_season_episodes(show_id, season_number) self.itemList = [x for x in self.itemList if x is not None and 'info' in x] if len(self.itemList) == 0: tools.log('We received no titles to build a list', 'error') return try: self.itemList = sorted(self.itemList, key=lambda k: k['info']['episode']) except: pass for item in self.itemList: cm = [] try: if tools.getSetting('smartplay.playlistcreate') == 'true' and smartPlay is False: action = 'smartPlay' playable = False else: playable = True action = 'getSources' args = {'trakt_id': item['showInfo']['ids']['trakt'], 'season': item['info']['season'], 'episode': item['info']['episode'], 'item_type': 'episode'} args = tools.quote(json.dumps(args, sort_keys=True)) name = item['info']['title'] if not self.is_aired(item['info']): if tools.getSetting('general.hideUnAired') == 'true' or hide_unaired: continue else: name = tools.colorString(name, 'red') name = tools.italic_string(name) item['info']['title'] = name item['info'] = tools.clean_air_dates(item['info']) except: import traceback traceback.print_exc() continue cm.append((tools.lang(32070), 'XBMC.PlayMedia(%s?action=shufflePlay&actionArgs=%s)' % (sysaddon, args))) cm.append(('Browse Season', 'XBMC.Container.Update(%s?action=seasonEpisodes&actionArgs=%s)' % (sysaddon, tools.quote(json.dumps({'trakt_id': item['showInfo']['ids']['trakt'], 'season': item['info']['season'], 'item_type': 'season'}))))) cm.append((tools.lang(33022), 'PlayMedia(%s?action=getSources&seren_reload=true&actionArgs=%s)' % (sysaddon, args))) cm.append((tools.lang(32066), 'PlayMedia(%s?action=getSources&source_select=true&actionArgs=%s)' % (sysaddon, args))) if tools.getSetting('trakt.auth') != '': cm.append(('Trakt Manager', 'RunPlugin(%s?action=traktManager&actionArgs=%s)' % (sysaddon, args))) if tools.context_addon(): cm = [] if tools.getSetting('premiumize.enabled') == 'true' and tools.getSetting('premiumize.pin') != '': cm.append((tools.lang(32068), 'XBMC.RunPlugin(%s?action=filePicker&actionArgs=%s)' % (sysaddon, args))) item_list.append(tools.addDirectoryItem(name, action, item['info'], item['art'], isFolder=False, isPlayable=playable, actionArgs=args, bulk_add=True, set_ids=item['ids'], cm=cm)) if smartPlay is True: return item_list else: tools.addMenuItems(syshandle, item_list, len(item_list)) except: import traceback traceback.print_exc() def mixedEpisodeBuilder(self, trakt_list, sort=None, hide_watched=False, smartPlay=False, hide_unaired=True, prepend_date=False): self.threadList = [] try: if len(trakt_list) == 0: tools.log('We received no titles to build a list', 'error') return self.itemList = trakt_database.get_episode_list(trakt_list) self.itemList = [x for x in self.itemList if x is not None and 'info' in x] self.itemList = [i for i in self.itemList if 'info' in i and i['info'].get('premiered', None) is not None] if sort is None: self.itemList = sorted(self.itemList, key=lambda i: tools.datetime_workaround(i['info']['premiered'], tools.trakt_gmt_format, False), reverse=True) elif sort is not False: sort_list = [] for trakt_id in sort['id_list']: try: if not sort['type']: item = [i for i in self.itemList if i['ids']['trakt'] == trakt_id][0] else: item = [i for i in self.itemList if i[sort['type']]['ids']['trakt'] == trakt_id][0] sort_list.append(item) except IndexError: continue except: import traceback traceback.print_exc() self.itemList = sort_list item_list = [] for item in self.itemList: if item is None: continue if item['info'].get('title', '') == '': continue if hide_watched and item['info']['playcount'] != 0: continue cm = [] try: name = tools.display_string(item['info']['title']) if not self.is_aired(item['info']) and hide_unaired is True: continue elif not self.is_aired(item['info']): name = tools.colorString(name, 'red') name = tools.italic_string(name) item['info']['title'] = name item['info'] = tools.clean_air_dates(item['info']) args = {'trakt_id': item['showInfo']['ids']['trakt'], 'season': item['info']['season'], 'episode': item['info']['episode'], 'item_type': 'episode'} args = tools.quote(json.dumps(args, sort_keys=True)) if tools.getSetting('smartplay.playlistcreate') == 'true' and smartPlay is False: action = 'smartPlay' playable = False else: playable = True action = 'getSources' if self.title_appends == 'true': name = "%s: %sx%s %s" % (tools.colorString(item['showInfo']['info']['tvshowtitle']), tools.display_string(item['info']['season']).zfill(2), tools.display_string(item['info']['episode']).zfill(2), tools.display_string(item['info']['title'])) if prepend_date: release_day = tools.datetime_workaround(item['info']['aired']) release_day = release_day.strftime('%d %b') name = '[%s] %s' % (release_day, name) cm.append((tools.lang(32069), 'XBMC.Container.Update(%s?action=showSeasons&actionArgs=%s)' % (sysaddon, tools.quote(json.dumps({'trakt_id': item['showInfo']['ids']['trakt'], 'item_type': 'show'}))))) cm.append(('Browse Season', 'XBMC.Container.Update(%s?action=seasonEpisodes&actionArgs=%s)' % (sysaddon, tools.quote(json.dumps({'trakt_id': item['showInfo']['ids']['trakt'], 'season': item['info']['season'], 'item_type': 'season'}))))) cm.append((tools.lang(32070), 'XBMC.PlayMedia(%s?action=shufflePlay&actionArgs=%s)' % (sysaddon, args))) cm.append((tools.lang(32066), 'PlayMedia(%s?action=getSources&source_select=true&actionArgs=%s)' % (sysaddon, args))) cm.append((tools.lang(33022), 'PlayMedia(%s?action=getSources&seren_reload=true&actionArgs=%s)' % (sysaddon, args))) if tools.getSetting('trakt.auth') != '': cm.append(('Trakt Manager', 'RunPlugin(%s?action=traktManager&actionArgs=%s)' % (sysaddon, tools.quote(json.dumps(item['trakt_object']))))) if tools.context_addon(): cm = [] if tools.getSetting('premiumize.enabled') == 'true' and tools.getSetting('premiumize.pin') != '': cm.append((tools.lang(32068), 'XBMC.RunPlugin(%s?action=filePicker&actionArgs=%s)' % (sysaddon, args))) item['info']['title'] = item['info']['originaltitle'] = name item_list.append(tools.addDirectoryItem(name, action, item['info'], item['art'], isFolder=False, isPlayable=playable, actionArgs=args, bulk_add=True, set_ids=item['ids'], cm=cm)) except: import traceback traceback.print_exc() continue if smartPlay is True: return item_list else: tools.addMenuItems(syshandle, item_list, len(item_list)) except: import traceback traceback.print_exc() def showListBuilder(self, trakt_list, forceResume=False, info_only=False): try: if len(trakt_list) == 0: tools.log('We received no titles to build a list', 'error') return except: import traceback traceback.print_exc() return if 'show' in trakt_list[0]: trakt_list = [i['show'] for i in trakt_list] show_ids = [i['ids']['trakt'] for i in trakt_list] self.itemList = trakt_database.get_show_list(show_ids) self.itemList = [x for x in self.itemList if x is not None and 'info' in x] self.itemList = tools.sort_list_items(self.itemList, trakt_list) item_list = [] for item in self.itemList: try: # Add Arguments to pass with items args = {'trakt_id': item['ids']['trakt'], 'item_type': 'show'} args = tools.quote(json.dumps(args, sort_keys=True)) cm = [] name = tools.display_string(item['info']['tvshowtitle']) if info_only == True: return args if not self.is_aired(item['info']): if tools.getSetting('general.hideUnAired') == 'true': continue name = tools.colorString(name, 'red') name = tools.italic_string(name) item['info'] = tools.clean_air_dates(item['info']) if 'setCast' in item: set_cast = item['setCast'] else: set_cast = False if tools.getSetting('smartplay.clickresume') == 'true' or forceResume is True: action = 'playbackResume' else: action = 'showSeasons' # Context Menu Items cm.append((tools.lang(32070), 'XBMC.PlayMedia(%s?action=shufflePlay&actionArgs=%s)' % (sysaddon, args))) cm.append((tools.lang(32020), 'Container.Update(%s?action=showsRelated&actionArgs=%s)' % (sysaddon, item['ids']['trakt']))) cm.append((tools.lang(32069), 'XBMC.Container.Update(%s?action=showSeasons&actionArgs=%s)' % (sysaddon, args))) if tools.getSetting('trakt.auth') != '': cm.append(('Trakt Manager', 'RunPlugin(%s?action=traktManager&actionArgs=%s)' % (sysaddon, args))) cm.append((tools.lang(40153), 'XBMC.PlayMedia(%s?action=playFromRandomPoint&actionArgs=%s' % (sysaddon, args))) if tools.context_addon(): cm = [] except: import traceback traceback.print_exc() continue item_list.append(tools.addDirectoryItem(name, action, item['info'], item['art'], cm=cm, isFolder=True, isPlayable=False, actionArgs=args, bulk_add=True, set_cast=set_cast, set_ids=item['ids'])) tools.addMenuItems(syshandle, item_list, len(item_list)) def runThreads(self, join=True): for thread in self.threadList: thread.start() if join == True: for thread in self.threadList: thread.join() def _start_queue_workers(self): self.queue_finished = False for i in range(40): self.threadList.append(Thread(target=self._queue_worker)) for i in self.threadList: i.start() def _finish_queue_workers(self): self.queue_finished = True for i in self.threadList: i.join() self.threadList = [] def _queue_worker(self): while not self.task_queue.empty() or not self.queue_finished: try: target = self.task_queue.get(timeout=3) except: continue try: target[0](*target[1]) except: import traceback traceback.print_exc() pass def is_aired(self, info): try: try:air_date = info['aired'] except: air_date = info.get('premiered') if air_date == '' or air_date is None: return False if int(air_date[:4]) < 1970: return True time_format = tools.trakt_gmt_format if len(air_date) == 10: time_format = '%Y-%m-%d' air_date = tools.gmt_to_local(air_date, format=time_format) if tools.getSetting('general.datedelay') == 'true': air_date = tools.datetime_workaround(air_date, time_format, False) air_date += datetime.timedelta(days=1) else: air_date = tools.datetime_workaround(air_date, time_format, False) if air_date > datetime.datetime.now(): return False else: return True except: import traceback traceback.print_exc() # Assume an item is not aired if we do not have any information on it or fail to identify return False
Ed57/plugin.video.seren
resources/lib/gui/tvshowMenus.py
tvshowMenus.py
py
39,641
python
en
code
null
github-code
6
32466001503
from tech_news.database import find_news # Requisito 10 def top_5_news(): """Seu código deve vir aqui""" news_list = find_news() classified_news = sorted( news_list, key=lambda news: news["comments_count"], reverse=True ) return [ (news["title"], news["url"]) for news in classified_news if classified_news.index(news) < 5 ] # Requisito 11 def top_5_categories(): """Seu código deve vir aqui""" news_list = find_news() categories = {news["category"]: 0 for news in news_list} classified_categories = sorted(categories, key=lambda category: category) categories = {key: 0 for key in classified_categories} for news in news_list: categories[news["category"]] += 1 ordered_categories = sorted( categories.items(), key=lambda category: (category[1]), reverse=True ) return [ category for category, qnt in ordered_categories if ordered_categories.index((category, qnt)) < 5 ]
janaolive/phyton_raspagem_de_dados
tech_news/analyzer/ratings.py
ratings.py
py
1,013
python
en
code
1
github-code
6
37950984200
import pykka import re import json from gpt_connection import GPT_Connection from tools import ToolRunner from frontend_utils import initial_request from bots.Dispatcher import Dispatcher from bots.Workflow import Workflow from prompts import DISPATCHER_PROMPT_TEMPLATE, INITIAL_PROMPT_TEMPLATE, ITERATING_ACTION_PROMPT_TEMPLATE, FORCE_END_ITERATION_PROMPT, \ PLANT_HEALTH_BOT_DESCRIPTION, PRODUCTION_OUTPUT_BOT_DESCRIPTION, DISTRIBUTION_BOT_DESCRIPTION, \ ONT_PRODUCTION_PLANT, ONT_MACHINES, ONT_WORK_ORDERS, ONT_PRODUCTION_ALLOCATION_PLAN, ONT_DISTRIBUTION_WAREHOUSE, ONT_TRANSIT_ORDER, \ GET_OBJECTS, MODIFY_OBJECT, CREATE_OBJECT from string import Template """ Main Execution Code """ gpt_connection = GPT_Connection() tool_runner = ToolRunner() plant_health_ref = Workflow.start( name="Plant Health Bot", id=2, dispatcher_id=1, bot_description=PLANT_HEALTH_BOT_DESCRIPTION, initial_prompt_template=INITIAL_PROMPT_TEMPLATE, iteration_prompt_template=ITERATING_ACTION_PROMPT_TEMPLATE, force_end_prompt_template=FORCE_END_ITERATION_PROMPT, information=[ONT_PRODUCTION_PLANT, ONT_MACHINES, ONT_WORK_ORDERS], readtools=[GET_OBJECTS], writetools=[MODIFY_OBJECT, CREATE_OBJECT], gpt_connection=gpt_connection, tool_runner=tool_runner ) production_output_ref = Workflow.start( name="Production Output Bot", id=3, dispatcher_id=1, bot_description=PRODUCTION_OUTPUT_BOT_DESCRIPTION, initial_prompt_template=INITIAL_PROMPT_TEMPLATE, iteration_prompt_template=ITERATING_ACTION_PROMPT_TEMPLATE, force_end_prompt_template=FORCE_END_ITERATION_PROMPT, information=[ONT_PRODUCTION_PLANT, ONT_MACHINES, ONT_PRODUCTION_ALLOCATION_PLAN], readtools=[GET_OBJECTS], writetools=[MODIFY_OBJECT, CREATE_OBJECT], gpt_connection=gpt_connection, tool_runner=tool_runner ) distribution_ref = Workflow.start( name="Distribution Bot", id=4, dispatcher_id=1, bot_description=DISTRIBUTION_BOT_DESCRIPTION, initial_prompt_template=INITIAL_PROMPT_TEMPLATE, iteration_prompt_template=ITERATING_ACTION_PROMPT_TEMPLATE, force_end_prompt_template=FORCE_END_ITERATION_PROMPT, information=[ONT_PRODUCTION_PLANT, ONT_DISTRIBUTION_WAREHOUSE, ONT_PRODUCTION_ALLOCATION_PLAN, ONT_TRANSIT_ORDER], readtools=[GET_OBJECTS], writetools=[MODIFY_OBJECT, CREATE_OBJECT], gpt_connection=gpt_connection, tool_runner=tool_runner ) workflows = { "Plant Health Bot": plant_health_ref, "Production Output Bot": production_output_ref, "Distribution Bot": distribution_ref } workflow_ids = { "Plant Health Bot": 2, "Production Output Bot": 3, "Distribution Bot": 4 } dispatcher_ref = Dispatcher.start( id=1, prompt_template=DISPATCHER_PROMPT_TEMPLATE, workflows=workflows, workflow_ids=workflow_ids, gpt_connection=gpt_connection) initial_ask = "We just got a message that the ocelot making machine has broken at Stuffed Animal Plant 8" # initial_ask = "Our trucks carrying transit order T0030 got into an accident." # initial_ask = "Alice has left the company" initial_request(initial_ask) dispatcher_ref.ask(initial_ask) dispatcher_ref.stop()
nhuang25/llm-composition
stuffed_animal_llc/stuffed_animal_llc.py
stuffed_animal_llc.py
py
3,203
python
en
code
0
github-code
6
75125706747
# # 8/19/2020 # You will work with a binary classification problem on a subsample from Kaggle playground competition. The objective of this competition is to predict whether a famous basketball player Kobe Bryant scored a basket or missed a particular shot. # Train data is available in your workspace as bryant_shots DataFrame. It contains data on 10,000 shots with its properties and a target variable "shot\_made\_flag" -- whether shot was scored or not. # One of the features in the data is "game_id" -- a particular game where the shot was made. There are 541 distinct games. So, you deal with a high-cardinality categorical feature. Let's encode it using a target mean! # Suppose you're using 5-fold cross-validation and want to evaluate a mean target encoded feature on the local validation. # Create 5-fold cross-validation kf = KFold(n_splits=5, random_state=123, shuffle=True) # For each folds split for train_index, test_index in kf.split(bryant_shots): cv_train, cv_test = bryant_shots.iloc[train_index], bryant_shots.iloc[test_index] # Create mean target encoded feature cv_train['game_id_enc'], cv_test['game_id_enc'] = mean_target_encoding(train=cv_train, test=cv_test, target='shot_made_flag', categorical='game_id', alpha=5) # Look at the encoding print(cv_train[['game_id', 'shot_made_flag', 'game_id_enc']].sample(n=1)) # <script.py> output: # game_id shot_made_flag game_id_enc # 7106 20500532 0.0 0.361914 # game_id shot_made_flag game_id_enc # 5084 20301100 0.0 0.568395 # game_id shot_made_flag game_id_enc # 6687 20500228 0.0 0.48131 # game_id shot_made_flag game_id_enc # 5046 20301075 0.0 0.252103 # game_id shot_made_flag game_id_enc # 4662 20300515 1.0 0.452637
AndrewAct/DataCamp_Python
Winning a Kaggle Competition in Python/3 Feature Engineering/06_KFold_CrossValidation.py
06_KFold_CrossValidation.py
py
2,188
python
en
code
0
github-code
6
2896601636
from dotenv import load_dotenv import discord from discord.ext import commands import os import re load_dotenv() token = os.getenv('DISCORD_TOKEN') commMark = os.getenv('COMMAND_MARKER') description = 'Discord administration bot' intents = discord.Intents.default() intents.message_content = True intents.members = True bot = commands.Bot(command_prefix=commMark, description=description, intents=intents) #posts log in message on log in @bot.event async def on_ready(): print('Logged on as {bot.user} (ID {bot.user.id})') print('-----------') #------------------------------------------------Functions---------------------------------------------------# def get_rid(ctx, role_input): print("get_rid starting") #debugging - prints once function is called #cleans input and assigns to role_name role_name = role_input.strip() print(role_name) #first trying regext to get the id from the message itself role_id = re.search(r'\d{18}', role_name) roles_list = [] #initializing return list if role_id != None: #checking if re found something role_id = role.id.group(0) # getting readable id roles_list.append(int(role_id)) #getting and appending role-id to list else: #iterating through roles, searching for name match for g_role in ctx.guild.roles: if role_name in str(g_role.name): roles_list.append(int(g_role.id)) #appending to list print(roles_list) #debugging - prints roles_list roleLen = len(roles_list) print('length: ' + str(roleLen)) #debugging - prints length of roles_list print('get_rid finishing') return roles_list, len(roles_list) #similar function to get_rid, but for retrieving user ID def getuid(ctx, user_input): print("get_uid starting") #debugging - prints once function is called #cleans input and assigns to role_name users_list = [] user_name = user_input.strip() print("uid start " + user_name) for g_user in ctx.guild.members: print( "uid for " + str(g_user)) if user_name in str(g_user): users_list.append(int(g_user.id)) print("username match") print("appended " + str(g_user.id)) #appending to list print("get_uid users list " + str(users_list)) else: print("Not a match") print("get_uid list" + str(users_list)) #debugging - prints roles_list userLen = len(users_list) print(userLen) print('get_uid finishing') return users_list, len(users_list) #----------------------------------------- Commands below, functions above-----------------------------------------------------------# #------------------------------------------testing/troubleshooting commands----------------------------------------------------------# #@bot.command() #async def hello(ctx): # await ctx.send(f'Hello {ctx.author.display_name}.') #test command, just echoes the argument #@bot.command() #async def test(ctx, content): # await ctx.send(content) #-----------------------------------------------administrative commands---------------------------------------------------------------# #command to get role ID @bot.command() async def roleid(ctx, role_name: str): try: role, le = get_rid(ctx, role_name) print(role) print(le) if le == 1: roleAdd = role[0] await ctx.send(roleAdd) except: emby = discord.Embed(title="", color=discord.Color.red()) emby.add_field(name="Something went wrong", value="Please check your given argument") await ctx.send(embed=emby) #command to add role to user @bot.command() async def addrole(ctx, role_name: str, user): try: role, le = get_rid(ctx, role_name) print(role) print(le) if le == 1: roleAdd = role[0] print(roleAdd) getuid(user) await ctx.send('Adding role %s to user %s' % (role_name, user)) except: print("except") #command to feth user id's @bot.command() async def userid(ctx, user): print('User ID command called by %s Requesting UserID for %s' % (ctx.author, user )) try: #calls function to get user ID by username, then prints variables for debugging/logging userN, leU = getuid(ctx, user) #outputs all user IDs to chat if leU == 0: await ctx.send("No user found with that name") else: for i in userN: await ctx.send(i) except: emby = discord.Embed(title="", color=discord.Color.red()) emby.add_field(name="Something went wrong", value="Please check your given argument") await ctx.send(embed=emby) #starts the bot bot.run(token)
Srs2311/gene.py
gene.py
gene.py
py
4,778
python
en
code
0
github-code
6
37520613786
import pandas as pd import redis from redisgraph import Graph from config import redisgraph_config from pathlib import Path def redisgraph_import_csv(output: Path) -> None: r = redis.Redis(host=redisgraph_config['host'], port=redisgraph_config['port']) graph_name = 'movie_graph' redis_graph = Graph(graph_name, r) import_csv = pd.read_csv(output / 'neo4j_export.csv') break_column_name = '_start' nodes = pd.DataFrame() relations = pd.DataFrame() active_df = nodes for idx, col in enumerate(import_csv.columns): if col == break_column_name: active_df = relations active_df[col] = import_csv[col] nodes.dropna(how='all', subset=None, inplace=True) relations.dropna(how='all', subset=None, inplace=True) # relations.to_csv('../out/Relations.csv') node_values = nodes['_labels'].unique() rows_to_drop = [node_value for node_value in node_values if 'UNIQUE IMPORT LABEL' in node_value] for row_to_drop in rows_to_drop: nodes = nodes[nodes["_labels"].str.contains(row_to_drop) == False] node_values = nodes['_labels'].unique() for node_type in node_values: node_data = nodes[nodes['_labels'] == node_type] filename = f'../out/{node_type.replace(":", "")}.csv' node_data.dropna(how='all', axis=1, inplace=True) # node_data.to_csv(filename) for node in node_data.iloc: params = '' for param in node_data.columns: if not param.startswith('_'): val = node[param] if not pd.isna(val): try: val = float(val) except: val = f'"{val}"' params = params + f', {param}: {val}' query = f'MERGE ({node["_labels"]} {{id: {node["_id"]} {params} }})' redis_graph.query(query) for node in relations.iloc: query = f""" MATCH (a), (b) WHERE a.id = {node["_start"]} AND b.id = {node["_end"]} CREATE (a)-[:{node['_type']}]->(b) """ redis_graph.query(query)
Wojaqqq/graph_data_exchange_tool
imports/redisgraph_import_csv.py
redisgraph_import_csv.py
py
2,156
python
en
code
0
github-code
6
30353175971
import sys import os from os.path import splitext import glob from common import TestCase def get_tests(): """Get all the tests to run. """ files = glob.glob('test_*.py') return files def run_all(tests): """Run the given tests. """ args = ' '.join(sys.argv[1:]) success = [] fail = [] for test in tests: cmd = 'python %s %s'%(test, args) print(cmd) status = os.system(cmd) if status == 0: print("OK") success.append(test) else: print("FAIL: %s"%test) fail.append(test) print('-'*70) print("%d successful tests, %d failures"%(len(success), len(fail))) for test in fail: print(test) print('-'*70) return len(fail) != 0 class RunAllTests(TestCase): """Runs all the tests in one go, instead of running each test separately. This speeds up the testing. """ def get_tests(self): tests = get_tests() tests = [splitext(t)[0] for t in tests] klasses = [] for test in tests: # Find test. m = __import__(test) m.mayavi = self.script m.application = self.application for name in dir(m): klass = getattr(m, name) try: if issubclass(klass, TestCase) and klass is not TestCase: mod_name = '%s.%s'%(test, name) klasses.append((mod_name, klass)) break except TypeError: continue return klasses def do(self): klasses = self.get_tests() for name, klass in klasses: # Close existing scenes. e = self.script.engine for scene in e.scenes: e.close_scene(scene) print('*'*80) print(name) obj = klass() obj.trait_set(script=self.script) obj.test() def main(): argv = ' '.join(sys.argv) if '--one-shot' in argv: argv = argv.replace('--one-shot', '') sys.argv = argv.split() t = RunAllTests() t.main() else: tests = get_tests() status = run_all(tests) sys.exit(status) if __name__ == "__main__": main()
enthought/mayavi
integrationtests/mayavi/run.py
run.py
py
2,323
python
en
code
1,177
github-code
6
39227047564
import os import numpy as np import scipy.io.wavfile as wavfile from keras.models import Sequential from keras.layers import Dense from keras.utils import normalize from utils import read_textgrid from numpy_operation import get_martix from python_speech_features import mfcc from python_speech_features import delta #from python_speech_features import fbank def to_one_hot(labels, dimension=5): results = np.zeros((len(labels),dimension)) for i, label in enumerate(labels): results[i, label] = 1. return results def read_wav(filename): rate, data = wavfile.read(filename) #only use the 1st channel if stereo if len(data.shape) > 1: data = data[:,0] data = data.astype(np.float32) data = data / 32768 #convert PCM int16 to float return data, rate def feature_extract(filename, wavpath, tgpath): wav_filename = os.path.join(wavpath,filename+'.wav') print(wav_filename) tg_filename = os.path.join(tgpath,filename+'.textgrid') y,sr = read_wav(wav_filename) mfccs = mfcc(signal=y,samplerate=sr,winlen=0.02,winfunc=np.hamming) delta1 = delta(mfccs,1) delta2 = delta(mfccs,2) _mfccs = np.concatenate((mfccs,delta1,delta2),1) _mfccs = normalize(_mfccs) _mfccs = get_martix(_mfccs,30,10) _labels = None if(os.path.exists(tg_filename)): _labels = read_textgrid(tg_filename,len(_mfccs)) _labels = to_one_hot(_labels) return _mfccs,_labels
MakerFace/voice-activation-system
feature_extractor.py
feature_extractor.py
py
1,510
python
en
code
0
github-code
6
17007537325
T = int(input()) for _ in range(T): N, K = [int(x) for x in input().split()] time = [int(x) for x in input().split()] levels = [[0]] for __ in range(K): f, t = [int(x) - 1 for x in input().split()] level = 0 for i in range(len(levels)): if f in levels[i]: level = i break try: if t not in levels[level+1]: levels[level+1].append(t) except: levels.append([]) if t not in levels[level+1]: levels[level+1].append(t) W = int(input()) - 1 level = 0 for i in range(len(levels)): if W in levels[i]: level = i break cost = 0 for l in levels[:level+1]: cost += max([time[i] for i in l]) print(cost)
masonHong/INU-Study
Backjoon/HTJ/동적 계획법 기초 단계/1005 ACM Craft.py
1005 ACM Craft.py
py
835
python
en
code
0
github-code
6
21766876312
#! /usr/bin/python3 # id-DNA.py # http://rosalind.info/problems/dna/ dna_file = open('/home/steve/Dropbox/Rosalind/rosalind_dna.txt', 'r') dna_string = str(dna_file.readlines()) base_count = {"A": 0, "G": 0, "C": 0, "T": 0} for base in dna_string: base_count[base] += 1 print(base_count)
shuysman/Rosalind
id-DNA.py
id-DNA.py
py
295
python
en
code
0
github-code
6
13255530705
import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from math import sqrt from scipy import stats import warnings warnings.filterwarnings("ignore") from statsmodels.formula.api import ols from sklearn.metrics import mean_squared_error, r2_score, explained_variance_score, mean_absolute_error from sklearn.feature_selection import f_regression, SelectKBest, RFE from sklearn.linear_model import LinearRegression, LassoLars, TweedieRegressor from sklearn.preprocessing import PolynomialFeatures from numpy import mean from numpy import std, absolute from sklearn.datasets import make_blobs from sklearn.model_selection import LeaveOneOut from sklearn.model_selection import cross_val_score from sklearn.svm import SVR from sklearn.ensemble import RandomForestRegressor def feature_ranking(X_train_scaled, y_train): lm = LinearRegression() rfe = RFE(lm, 1) rfe.fit(X_train_scaled, y_train) ranks = rfe.ranking_ names = X_train_scaled.columns.tolist() rankdf = pd.DataFrame({'features': names, 'rank': ranks}).set_index('rank').sort_values('rank') return rankdf def cvLinearReg(X_train, y_train): # create loocv procedure cvLR = LeaveOneOut() # create model modelLR = LinearRegression() # evaluate model scoresLR = cross_val_score(modelLR, X_train, y_train, scoring='neg_mean_absolute_error', cv=cvLR, n_jobs=-1) # force positive scoresLR = absolute(scoresLR) # report performance print('MAE: %.3f (%.3f)' % (mean(scoresLR), std(scoresLR))) meanMAE = mean(scoresLR) stddevMAE = std(scoresLR) return meanMAE def cvLassoLars(X_train, y_train, x): # LassoLars # create loocv procedure cvLL = LeaveOneOut() # create model modelLL = LassoLars(alpha=x) # evaluate model scoresLL = cross_val_score(modelLL, X_train, y_train, scoring='neg_mean_absolute_error', cv=cvLL, n_jobs=-1) # force positive scoresLL = absolute(scoresLL) # report performance print('MAE: %.3f (%.3f)' % (mean(scoresLL), std(scoresLL))) meanMAE = mean(scoresLL) stddevMAE = std(scoresLL) return meanMAE def cvTweedie(X_train, y_train, pwr, alf): # Tweedie Regressor # create loocv procedure cvTW = LeaveOneOut() # create model modelTW = TweedieRegressor(power=pwr, alpha=alf) # 0 = normal distribution # evaluate model scoresTW = cross_val_score(modelTW, X_train, y_train, scoring='neg_mean_absolute_error', cv=cvTW, n_jobs=-1) # force positive scoresTW = absolute(scoresTW) # report performance print('MAE: %.3f (%.3f)' % (mean(scoresTW), std(scoresTW))) meanMAE = mean(scoresTW) stddevMAE = std(scoresTW) return meanMAE def cvRandomForest(X_train, y_train, x): # Random Forest Regressor # create loocv procedure cvRF = LeaveOneOut() # create model modelRF = RandomForestRegressor(n_estimators=x, random_state = 123) # evaluate model scoresRF = cross_val_score(modelRF, X_train, y_train, scoring='neg_mean_absolute_error', cv=cvRF, n_jobs=-1) # force positive scoresRF = absolute(scoresRF) # report performance print('MAE: %.3f (%.3f)' % (mean(scoresRF), std(scoresRF))) meanMAE = mean(scoresRF) stddevMAE = std(scoresRF) return meanMAE def cvSVR(X_train, y_train, x): # Support Vector Regressor # create loocv procedure cvSVR = LeaveOneOut() # create model modelSVR = SVR(kernel = x) # evaluate model scoresSVR = cross_val_score(modelSVR, X_train, y_train, scoring='neg_mean_absolute_error', cv=cvSVR, n_jobs=-1) # force positive scoresSVR = absolute(scoresSVR) # report performance print('MAE: %.3f (%.3f)' % (mean(scoresSVR), std(scoresSVR))) meanMAE = mean(scoresSVR) stddevMAE = std(scoresSVR) return meanMAE def get_baseline_mean(y_train): ''' Using mean gets baseline for y dataframe ''' # determine Baseline to beat rows_needed = y_train.shape[0] # create array of predictions of same size as y_train.logerror based on the mean y_hat = np.full(rows_needed, np.mean(y_train)) # calculate the MSE for these predictions, this is our baseline to beat baseline = mean_absolute_error(y_train, y_hat) print("Baseline MAE:", baseline) return baseline, y_hat def get_baseline_median(y_train): ''' Using median gets baseline for y dataframe ''' # determine Baseline to beat rows_needed = y_train.shape[0] # create array of predictions of same size as y_train.logerror based on the median y_hat = np.full(rows_needed, np.median(y_train)) # calculate the MSE for these predictions, this is our baseline to beat baseline = mean_absolute_error(y_train, y_hat) print("Baseline MAE:", baseline) return baseline, y_hat def linear_reg_train(x_scaleddf, target): ''' runs linear regression algorithm ''' lm = LinearRegression() lm.fit(x_scaleddf, target) y_hat = lm.predict(x_scaleddf) LM_MAE = mean_absolute_error(target, y_hat) return LM_MAE def lasso_lars(x_scaleddf, target): ''' runs Lasso Lars algorithm ''' # Make a model lars = LassoLars(alpha=1) # Fit a model lars.fit(x_scaleddf, target) # Make Predictions lars_pred = lars.predict(x_scaleddf) # Computer root mean squared error lars_MAE = mean_absolute_error(target, lars_pred) return lars_MAE def polynomial2(X_trainsdf, target): ''' runs polynomial algorithm ''' # Make a model pf = PolynomialFeatures(degree=2) # Fit and Transform model to get a new set of features...which are the original features squared X_train_squared = pf.fit_transform(X_trainsdf) # Feed new features in to linear model. lm_squared = LinearRegression(normalize=True) lm_squared.fit(X_train_squared, target) # Make predictions lm_squared_pred = lm_squared.predict(X_train_squared) # Compute root mean squared error pf2_MAE = mean_absolute_error(target, lm_squared_pred) return pf2_MAE def tweedie05(X_train_scaled, y_train): ''' runs tweedie algorithm ''' # Make Model tw = TweedieRegressor(power=0, alpha=.5) # 0 = normal distribution # Fit Model tw.fit(X_train_scaled, y_train) # Make Predictions tw_pred = tw.predict(X_train_scaled) # Compute root mean squared error tw_MAE = mean_absolute_error(y_train, tw_pred) return tw_MAE def randomforest_test(x_scaleddf, target, X_test, y_test, est): ''' runs random forest regressor ''' # make model regressor = RandomForestRegressor(n_estimators = est, random_state = 123) # fit the model regressor.fit(x_scaleddf, target) # make predictions y_pred = regressor.predict(X_test) # calculate MAE randMAE = mean_absolute_error(y_test, y_pred) return randMAE, regressor def lasso_lars_test(x_scaleddf, target, X_test, y_test): ''' runs Lasso Lars algorithm ''' # Make a model lars = LassoLars(alpha=1) # Fit a model lars.fit(x_scaleddf, target) # Make Predictions lars_pred = lars.predict(X_test) # calculate MAE lars_MAE = mean_absolute_error(y_test, lars_pred) return lars_MAE, lars, lars_pred def linear_test(x_scaleddf, target, X_test, y_test): ''' runs Lasso Lars algorithm ''' # Make a model lm = LinearRegression() # Fit model on train dataset lm.fit(x_scaleddf, target) # Make Predictions on test dataset y_hat = lm.predict(X_test) # calculate MAE LM_MAE = mean_absolute_error(y_test, y_hat) return LM_MAE, lm, y_hat def SVR_test(x_scaleddf, target, X_test, y_test, kern): ''' runs Support Vector Regressor algorithm ''' # Make a model regressor = SVR(kernel = kern) # Fit model on train dataset regressor.fit(x_scaleddf, target) # Make Predictions on test dataset y_hat = sc_y.inverse_transform(regressor.predict(sc_X.transform(X_test))) # calculate MAE svr_MAE = mean_absolute_error(y_test, y_hat) return svr_MAE, regressor def tweedie_test(X_train, y_train, X_test, y_test, pwr, alf): ''' runs tweedie algorithm ''' # Make Model tw = TweedieRegressor(power=pwr, alpha=alf) # 0 = normal distribution # Fit Model tw.fit(X_train, y_train) # Make Predictions tw_pred = tw.predict(X_test) # Compute root mean squared error tw_MAE = mean_absolute_error(y_test, tw_pred) return tw_MAE, tw, tw_pred def create_visualdf(y_test, y_train, y_test_predLL, y_test_predLR, y_test_predTW, y_test_predrTW): ''' creates dataframe for making visualizations ''' visualdf = pd.DataFrame() visualdf['actual'] = y_test.tract_cases_per_100k visualdf['baseline'] = y_train.tract_cases_per_100k.mean() visualdf['TWpred'] = y_test_predTW visualdf['LRpred'] = y_test_predLR visualdf['LLpred'] = y_test_predLL visualdf['SVI_only'] = y_test_predrTW return visualdf def plot_actual_vs_predicted(visualdf): ''' Produces subplots of actual VS predicted for the top models ''' plt.figure(figsize=(16,8)) #plt.suptitle('Plotting Actual Cases per 100K vs Predicted Cases per 100K') plt.plot(visualdf.actual, visualdf.baseline, alpha=.5, color="gray", label='_nolegend_') #plt.annotate("Baseline: Predict Using Mean", (15, 8)) plt.plot(visualdf.actual, visualdf.actual, alpha=.5, color="blue", label='_nolegend_') #plt.annotate("The Ideal Line: Predicted = Actual", (.5, 1), rotation=15.5) #plt.subplot(1,3,1,) plt.scatter(visualdf.actual, visualdf.SVI_only, alpha=.5, color="blue", s=50, label="Model: TW SVI only") #plt.subplot(1,3,2) plt.scatter(visualdf.actual, visualdf.TWpred, alpha=.5, color="green", s=100, label="Model: TW Top 4 Features") #plt.subplot(1,3,3) # plt.scatter(visualdf.actual, visualdf.LLpred, # alpha=.5, color="orange", s=75, label="Model: LassoLars") plt.legend() plt.xlabel("Actual Cases per 100K") plt.ylabel("Predicted Cases per 100K") # plt.annotate("The polynomial model appears to overreact to noise", (2.0, -10)) # plt.annotate("The OLS model (LinearRegression)\n appears to be most consistent", (15.5, 3)) plt.show() def plotting_errors(visualdf): ''' Plots the errors of the top models with zero error line ''' # plotting Errors in Predictions plt.figure(figsize=(16,8)) plt.axhline(label="No Error") plt.scatter(visualdf.actual, visualdf.SVI_only-visualdf.actual, alpha=.5, color="blue", s=50, label="Model: TW SVI only") plt.scatter(visualdf.actual, visualdf.TWpred-visualdf.actual, alpha=.5, color="green", s=100, label="Model: TW Top 4 Features") # plt.scatter(visualdf.actual, visualdf.LLpred-visualdf.actual, # alpha=.5, color="orange", s=75, label="Model: LassoLars") plt.legend() plt.xlabel("Actual Cases per 100K") plt.ylabel("Residual/Error: Predicted Cases per 100K - Actual Cases per 100K") plt.title("Do the size of errors change as the actual value changes?") # plt.annotate("The polynomial model appears to overreact to noise", (2.0, -10)) # plt.annotate("The OLS model (LinearRegression)\n appears to be most consistent", (15.5, 3)) plt.show() def plotting_histograms(visualdf): ''' Plots Histograms of top models, currently only shows actual vs best performing ''' plt.figure(figsize=(16,8)) plt.hist(visualdf.actual, color='blue', alpha=.5, label="Actual Cases per 100K", histtype=u'step', linewidth=5) plt.hist(visualdf.LRpred, color='orange', alpha=.5, label="Model: TW SVI only") plt.hist(visualdf.TWpred, color='green', alpha=.5, label="Model: TW Top 4 Features") #plt.hist(visualdf.LLpred, color='red', alpha=.5, label="Model: LassoLars") plt.xlabel("Actual Cases per 100K") plt.ylabel("predictions") plt.title("SVI alone is not enough") plt.legend() plt.show()
RyvynYoung/COVID
svi_capstone/scripts_python/model_MAE.py
model_MAE.py
py
12,076
python
en
code
0
github-code
6
13919172422
from odoo import api, fields, models class ProjectTask(models.Model): _inherit = "project.task" @api.depends( "stage_id", "timesheet_ids.unit_amount", "estimate_adjustment", "planned_hours", "child_ids.timesheet_ids.unit_amount", "child_ids.planned_hours", "child_ids.effective_hours", "child_ids.subtask_effective_hours", "child_ids.stage_id", "product_backlog_id.task_hours", ) def _compute_hours_get(self): """ This method is used to calculate weightage based on task stage, timesheet amount, estimate adjustment, planned hours, child task planned hours, backlog task hours, child task effective hours etc """ for task in self: weightage = children_hours = 0 for child_task in task.child_ids: if child_task.stage_id and child_task.stage_id.fold: children_hours += ( child_task.effective_hours + child_task.subtask_effective_hours ) else: children_hours += max( child_task.planned_hours, child_task.effective_hours + child_task.subtask_effective_hours, ) task.subtask_effective_hours = children_hours task.effective_hours = sum(task.sudo().timesheet_ids.mapped("unit_amount")) task.remaining_hours = ( task.planned_hours - task.effective_hours - task.subtask_effective_hours ) # Commented this line as total hours replaced as total hours spent in v15 # here both line added total hours and total hours spent # task.total_hours = max(task.planned_hours, task.effective_hours) task.total_hours_spent = task.effective_hours + task.subtask_effective_hours # task.delay_hours = max(-task.remaining_hours, 0.0) story_estimated_hours = task.product_backlog_id.expected_hours planned_hours = task.planned_hours estimate_adjustment = task.estimate_adjustment if story_estimated_hours > 0.0: weightage = planned_hours / story_estimated_hours hours = planned_hours + estimate_adjustment # hours = hours if hours > 0 else 1 task.weightage = weightage if task.product_backlog_id.task_hours > 0: # New weightage calculation in Version 2 task.weightage = hours / task.product_backlog_id.task_hours # New progress calculation in Version 2 if task.effective_hours and hours > 0: task.progress = (task.effective_hours / hours) * 100 name = fields.Char("Homework", size=256, translate=True) email = fields.Char( "Send mail", size=256, help="An email will be sent upon completion and upon validation of the" "Task to the following recipients. Separate with comma (,)" "each recipient ex: [email protected], [email protected]", ) task_number = fields.Char( "Task Number", readonly=True, copy=False, size=64, help="Sequence of the task number", ) estimate_adjustment = fields.Float() weightage = fields.Float(compute="_compute_hours_get") schedule_date = fields.Datetime(help="Date scheduled for task") def _valid_field_parameter(self, field, name): return name == 'size' or super()._valid_field_parameter(field, name) @api.model def create(self, vals): result = super(ProjectTask, self).create(vals) if result.manager_id: result.message_unsubscribe(partner_ids=[result.manager_id.id]) return result
onesteinbv/ProjectManagement
project_scrum_agile_extended/models/project_task.py
project_task.py
py
3,845
python
en
code
1
github-code
6
38611022
import sys import math def list_mean(L): '''Compute the mean of an array. Expects a non-empty array. Parameters ---------- L : list of int array containing numbers whose mean is desired. Returns ------- m Arithmetic mean of the values in V ''' if L is None: return None if len(L) == 0: return None s = 0 for line in L: s += line m = sum(L)/len(L) return m def list_stdev(L): '''Compute the standard deviation of an array. Expects a non-empty array. Parameters ---------- L : list of int Non-empty array containing numbers whose standard deviation is desired. Returns ------- sd Standard deviation of the values in V ''' if L is None: return None if len(L) == 0: return None else: mean = list_mean(L) sd = math.sqrt(sum([(mean-x)**2 for x in L]) / len(L)) return sd
cu-swe4s-fall-2019/test-driven-development-adziulko
math_lib.py
math_lib.py
py
969
python
en
code
0
github-code
6
36956862348
from angle_controller import * from x_controller import * from z_controller import * from multirotore import * class Autopilot: def __init__(self, x_target, z_target): self.drone = Multirotore(1.0, 0.25, 7.0*(10.0**(-5))) self.angle_controller = Angle_Controller(4.0, 1.57, 2.0, 0.2, 15.0) self.x_controller = X_Controller(0.4, 2.0, 0.5, 0.2, 0.52) self.z_controller = Z_Controller(4.0, 2.0, 20.0, 40.0, 15.0) self.x_target = x_target self.z_target = z_target def evaluate(self, delta_t): theta_target = (-1)*self.x_controller.evaluate(delta_t, self.x_target, self.drone.x, self.drone.vx) out_1 = self.angle_controller.evaluate(delta_t, theta_target, self.drone.theta, self.drone.omega) out_2 = self.z_controller.evaluate(delta_t, self.z_target, self.drone.z, self.drone.vz) f1 = out_2 - out_1 f2 = out_2 + out_1 self.drone.evaluate(delta_t, f1, f2)
SalvoScan/Progetto-Sistemi-Robotici
Sorgenti Progetto/autopilot.py
autopilot.py
py
881
python
en
code
0
github-code
6
88489070
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ CS224N 2018-19: Homework 5 model_embeddings.py: Embeddings for the NMT model Pencheng Yin <[email protected]> Sahil Chopra <[email protected]> Anand Dhoot <[email protected]> Michael Hahn <[email protected]> """ import torch.nn as nn # Do not change these imports; your module names should be # `CNN` in the file `cnn.py` # `Highway` in the file `highway.py` # Uncomment the following two imports once you're ready to run part 1(j) from cnn import CNN from highway import Highway # End "do not change" class ModelEmbeddings(nn.Module): """ Class that converts input words to their CNN-based embeddings. """ def __init__(self, embed_size, vocab): """ Init the Embedding layer for one language @param embed_size (int): Embedding size (dimensionality) for the output @param vocab (VocabEntry): VocabEntry object. See vocab.py for documentation. """ super(ModelEmbeddings, self).__init__() ## A4 code # pad_token_idx = vocab.src['<pad>'] # self.embeddings = nn.Embedding(len(vocab.src), embed_size, padding_idx=pad_token_idx) ## End A4 code ### YOUR CODE HERE for part 1j self.e_char = 50 self.w_word = 21 # same as max_word_length. same value (21) used in function pad_sents_char in utils.py self.embed_size = embed_size # same as e_word self.char_embedding = nn.Embedding(len(vocab.char2id), self.e_char, vocab.char2id['<pad>']) self.cnn = CNN(self.e_char, self.embed_size, self.w_word) self.highway = Highway(self.embed_size) ### END YOUR CODE def forward(self, input): """ Looks up character-based CNN embeddings for the words in a batch of sentences. @param input: Tensor of integers of shape (sentence_length, batch_size, max_word_length) where each integer is an index into the character vocabulary @param output: Tensor of shape (sentence_length, batch_size, embed_size), containing the CNN-based embeddings for each word of the sentences in the batch """ ## A4 code # output = self.embeddings(input) # return output ## End A4 code ### YOUR CODE HERE for part 1j # x_padded has shape : (sentence_length, batch_size, max_word_length) x_padded = input # x_emb has shape : (sentence_length, batch_size, max_word_length, e_char) x_emb = self.char_embedding(x_padded) # x_reshape_4D has shape : (sentence_length, batch_size, e_char, max_word_length) x_reshape_4D = x_emb.permute(0, 1, 3, 2) sentence_length, batch_size, e_char, max_word_length = x_reshape_4D.shape # x_reshape has shape : (-1, e_char, max_word_length) x_reshape = x_reshape_4D.view(-1, e_char, max_word_length) # x_conv_out has shape : (-1, e_word) x_conv_out = self.cnn(x_reshape) # x_word_embed has shape : (-1, e_word) x_word_embed = self.highway(x_conv_out) output = x_word_embed.view(sentence_length, batch_size, self.embed_size) return output ### END YOUR CODE
abgoswam/CS224N-Natural-Language-Processing-with-Deep-Learning
a5/model_embeddings.py
model_embeddings.py
py
3,223
python
en
code
18
github-code
6
14254090936
from __future__ import division, print_function from __future__ import absolute_import, unicode_literals from _GTW import GTW from _TFL import TFL from _TFL.defaultdict import defaultdict import _GTW._OMP._PAP._E164.Country class Country_33 (GTW.OMP.PAP.E164.Country_M) : """Provide phone number mapping for France.""" generated_from = \ "https://en.wikipedia.org/wiki/Telephone_numbers_in_France" generation_date = "2015-07-27 10:05" formatted_sn = GTW.OMP.PAP.E164.Country_M.formatted_sn_4x2 ndc_info_map = \ { "1" : "Île-de-France" , "2" : "Northwest France" , "3" : "Northeast France" , "4" : "Southeast France" , "5" : "Southwest France" , "6" : "Mobile phone services" , "7" : "Mobile phone services" , "8" : "Freephone (numéro vert) and shared-cost services." , "9" : "Non-geographic number (used by VoIP services)" } ndc_max_length = 1 ndc_types_normal = {"geographic", "mobile", "voip"} ndc_usage_map = \ { "1" : "geographic" , "2" : "geographic" , "3" : "geographic" , "4" : "geographic" , "5" : "geographic" , "6" : "mobile" , "7" : "mobile" , "8" : "service" , "9" : "voip" } sn_max_length_map = defaultdict (lambda : 8) sn_mix_length_map = defaultdict (lambda : 8) Country = Country_33 # end class ### __END__ GTW.OMP.PAP.E164._Country__33
xiaochang91/tapyr
_GTW/_OMP/_PAP/_E164/_Country__33.py
_Country__33.py
py
1,578
python
en
code
0
github-code
6
5381080252
from socket import * class sending(): def __init__(self, ip): # Opens the socket(connection) self.host = ip self.port = 13000 self.addr = (self.host, self.port) self.UDPSock = socket(AF_INET, SOCK_DGRAM) def send(self, data): # Sends parameter data to other user self.UDPSock.sendto(data.encode(), self.addr) def const_send(self, data): # Sends parameter data to other user global stop while stop == False: self.UDPSock.sendto(data.encode(), self.addr) def close(): #Closes the socket self.UDPSock.close() os._exit() class recieving(): def __init__(self): # Opens your socket the recieving data self.host = "" self.port = 13000 self.buf = 320 self.addr = (self.host, self.port) self.UDPSock = socket(AF_INET, SOCK_DGRAM) self.UDPSock.bind(self.addr) def recieve(self, select, attack, player, other, active): # Function to recieve data and call function recieved = "None" while True: while len(recieved) != 2: (data,addr) = self.UDPSock.recvfrom(self.buf) recieved = data.decode() recieved = recieved.split() return recieved def connect(self): # Function to recieve data and call function recieved = None while recieved == None: (data,addr) = self.UDPSock.recvfrom(self.buf) recieved = data.decode() return(recieved) def exit(self): # Closes Socket self.UDPSock.close() os._exit() other_ip = " " #Put ip address of other person here Sender = sending(other_ip) Recieve = recieving()
NightHydra/ColorCardBasic
Color CardPVP/Networking.py
Networking.py
py
1,901
python
en
code
0
github-code
6
36321645754
import pygame from dino_runner.components.dinosaur import Dinosaur from dino_runner.components.obstacles.obstacle_manager import ObstacleManager from dino_runner.components.player_hearts.player_heart_manager import PlayerHeartManager from dino_runner.components.power_ups.power_up_manager import PowerUpManager from dino_runner.utils.constants import BG, DEFAULT_TYPE, DIE_IMG, HAMMER_TYPE, ICON, RUNNING, SCREEN_HEIGHT, SCREEN_WIDTH, SHIELD_TYPE, TITLE, FPS from .score import Score from dino_runner.utils.constants import FONT_STYLE class Game: def __init__(self): pygame.init() pygame.display.set_caption(TITLE) pygame.display.set_icon(ICON) self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT)) self.clock = pygame.time.Clock() self.playing = False self.executing = False self.game_speed = 15 self.x_pos_bg = 0 self.y_pos_bg = 380 self.player= Dinosaur() self.obstacle_manager = ObstacleManager() self.power_up_manager = PowerUpManager() self.heart_manager = PlayerHeartManager() self.death_count=0 self.score = Score() def execute(self): self.executing = True while self.executing: if not self.playing: self.show_menu() pygame.quit() def run(self): self.game_speed = 15 self.playing = True self.obstacle_manager.reset_obstacles() self.score.reset_score() self.power_up_manager.reset_power_ups() self.heart_manager.reset_hearts() while self.playing: self.events() self.update() self.draw() def events(self): for event in pygame.event.get(): if event.type == pygame.QUIT: self.playing = False def update(self): user_input = pygame.key.get_pressed() self.player.update(user_input) self.obstacle_manager.update(self.game_speed, self.player, self.on_death) self.score.update(self) self.power_up_manager.update(self.game_speed, self.player, self.score.score) def draw(self): self.clock.tick(FPS) self.screen.fill((255, 255, 255)) self.draw_background() self.player.draw(self.screen) self.obstacle_manager.draw(self.screen) self.score.draw(self.screen) self.power_up_manager.draw(self.screen) self.draw_power_up_active(self.screen) self.heart_manager.draw(self.screen) pygame.display.update() pygame.display.flip() def draw_background(self): image_width = BG.get_width() self.screen.blit(BG, (self.x_pos_bg, self.y_pos_bg)) self.screen.blit(BG, (image_width + self.x_pos_bg, self.y_pos_bg)) if self.x_pos_bg <= -image_width: self.screen.blit(BG, (image_width + self.x_pos_bg, self.y_pos_bg)) self.x_pos_bg = 0 self.x_pos_bg -= self.game_speed def show_menu(self): self.screen.fill((127,255,212))#pintar mi ventana half_screen_height = SCREEN_HEIGHT //2 half_screen_width = SCREEN_WIDTH //2 font = pygame.font.SysFont(FONT_STYLE, 30) if self.death_count == 0:#mostrar mensaje bienvenida self.screen.blit(RUNNING[0],(half_screen_width -35, half_screen_height -140))#mostrar icono text_component = font.render("Press any key to start", True, (20,51,51)) else: self.screen.blit(DIE_IMG,(half_screen_width -35, half_screen_height -140)) text_component = font.render(f"Number of deaths : {self.death_count}", True, (20,51,51))#mostrar el numero de muertes actuales self.screen.blit(text_component, (half_screen_width -300 , half_screen_height +30)) text_component = font.render(f"You die, press any key to restart", True, (20,51,51)) #mostrar mensaje de volver a jugar self.score.show_score(self.screen) # mostrar el puntaje text_rect = text_component.get_rect() text_rect.center = (half_screen_width, half_screen_height) self.screen.blit(text_component, text_rect) pygame.display.update()#actualizar ventana self.handle_key_events_on_menu() #escuchar eventos def handle_key_events_on_menu(self): for event in pygame.event.get(): if event.type == pygame.QUIT: self.executing = False elif event.type == pygame.KEYDOWN: self.run() def on_death(self): has_shield = self.player.type == SHIELD_TYPE is_invencible = has_shield or self.heart_manager.heart_count > 0 has_hammer = self.player.type == HAMMER_TYPE is_revitalizing = has_hammer or self.heart_manager.heart_count > 0 if has_hammer: if self.heart_manager.heart_count < 6 : self.heart_manager.increase_heart() if not has_shield and not has_hammer: self.heart_manager.reduce_heart() if not is_invencible and not is_revitalizing: pygame.time.delay(500) self.playing = False self.death_count += 1 return is_invencible and is_revitalizing def draw_power_up_active(self, screen): if self.player.has_power_up: time_to_show = round ((self.player.power_up_time_up - pygame.time.get_ticks()) / 1000, 2) if time_to_show >= 0: font = pygame.font.SysFont(FONT_STYLE, 18) text_component = font.render(f"{self.player.type.capitalize()} enabled for {time_to_show} seconds", True, (0,0,0)) text_rect = text_component.get_rect() text_rect.center = (500, 40) screen.blit(text_component, text_rect) else: self.player.has_power_up = False self.player.type = DEFAULT_TYPE
Shrinmi/JS-Dino-Runner-Grupo-1
dino_runner/components/game.py
game.py
py
5,940
python
en
code
null
github-code
6
26040479196
from __future__ import annotations from dataclasses import dataclass from typing import Iterable from pants.backend.javascript import install_node_package from pants.backend.javascript.install_node_package import ( InstalledNodePackageRequest, InstalledNodePackageWithSource, ) from pants.backend.javascript.nodejs_project_environment import NodeJsProjectEnvironmentProcess from pants.backend.javascript.package_json import ( NodeBuildScriptEntryPointField, NodeBuildScriptExtraEnvVarsField, NodePackageDependenciesField, ) from pants.core.goals.run import RunFieldSet, RunInSandboxBehavior, RunRequest from pants.core.util_rules.environments import EnvironmentField from pants.engine.env_vars import EnvironmentVars, EnvironmentVarsRequest from pants.engine.internals.selectors import Get from pants.engine.process import Process from pants.engine.rules import Rule, collect_rules, rule from pants.engine.unions import UnionRule @dataclass(frozen=True) class RunNodeBuildScriptFieldSet(RunFieldSet): required_fields = (NodeBuildScriptEntryPointField, NodePackageDependenciesField) run_in_sandbox_behavior = RunInSandboxBehavior.RUN_REQUEST_HERMETIC entry_point: NodeBuildScriptEntryPointField extra_env_vars: NodeBuildScriptExtraEnvVarsField environment: EnvironmentField @rule async def run_node_build_script( field_set: RunNodeBuildScriptFieldSet, ) -> RunRequest: installation = await Get( InstalledNodePackageWithSource, InstalledNodePackageRequest(field_set.address) ) target_env_vars = await Get( EnvironmentVars, EnvironmentVarsRequest(field_set.extra_env_vars.value or ()) ) process = await Get( Process, NodeJsProjectEnvironmentProcess( installation.project_env, args=("--prefix", "{chroot}", "run", str(field_set.entry_point.value)), description=f"Running {str(field_set.entry_point.value)}.", input_digest=installation.digest, extra_env=target_env_vars, ), ) return RunRequest( digest=process.input_digest, args=process.argv, extra_env=process.env, immutable_input_digests=process.immutable_input_digests, ) def rules() -> Iterable[Rule | UnionRule]: return [*collect_rules(), *install_node_package.rules(), *RunNodeBuildScriptFieldSet.rules()]
pantsbuild/pants
src/python/pants/backend/javascript/run/rules.py
rules.py
py
2,379
python
en
code
2,896
github-code
6
41464745589
# Works with Microsoft Windows dos box # Shows some use of WConio written by Chris Gonnerman # Written by Priyend Somaroo # Copyright (c) 2008 Vardaan Enterprises, www.vardaan.com # Use and distribute freely. # No liability for any use of this code will be accepted. Use is # without any warranty whatsoever # Requires the package WConio by Chris Gonnerman # E-Mail : [email protected] # Web : http://newcenturycomputers.net/projects/wconio.html import WConio #Store current attribute settings old_setting = WConio.gettextinfo()[4] & 0x00FF #Clear the screen WConio.clrscr() #Display something in low video WConio.lowvideo() WConio.cputs("Low video\r\n") #Display something in high video WConio.highvideo() WConio.cputs("High video\r\n") #Display something in normal video WConio.normvideo() WConio.cputs("Normal video\r\n") #Display some text in color WConio.textattr(WConio.LIGHTRED) WConio.cputs("Light Red text\r\n") #Display some more text in color WConio.textattr(WConio.LIGHTBLUE) WConio.cputs("Light BLUE text\r\n") #leave a blank line - this shows you that print still works print #Set heading colour but using print WConio.textattr(WConio.LIGHTGREEN) print("Times table\r\n") #Back to normal intensity for white WConio.normvideo() for i in range(12) : WConio.textattr(WConio.WHITE) a = "%2d * 2 = " % (i) WConio.cputs(a) WConio.textattr(WConio.YELLOW) a = "%2d\r\n" % (i*2) WConio.cputs(a) WConio.textattr(WConio.CYAN) WConio.cputs("\n\nPress any key to end\r\n") #Wait for a key to be pressed WConio.getch() #Retore old attribute settings WConio.textattr(old_setting)
egigoka/test
just_learning_some_new_stuff/WConioExample.py
WConioExample.py
py
1,700
python
en
code
2
github-code
6
27009635088
from sklearn.ensemble import AdaBoostRegressor from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor, GradientBoostingRegressor def run(x_train, y_train, x_test, y_test, base_estimator, estimator_params, n_estimators, learning_rate, loss, random_state): base_estimator = getEstimator(base_estimator, estimator_params) reg = AdaBoostRegressor(base_estimator=base_estimator, n_estimators=n_estimators, learning_rate=learning_rate, loss=loss, random_state=random_state).fit(x_train, y_train) return {'train_predict': reg.predict(x_train).tolist(), 'test_predict': reg.predict(x_test).tolist(), 'train_score': reg.score(x_train, y_train), 'test_score': reg.score(x_test, y_test), 'estimator_weights_': reg.estimator_weights_.tolist(), 'estimator_errors_': reg.estimator_errors_.tolist(), 'feature_importances_': reg.feature_importances_.tolist() } def getEstimator(base_estimator, estimator_params): if base_estimator is None: return base_estimator base_estimator.replace("(", "").replace(")", "") if estimator_params is None: estimator_params = {} return { 'GradientBoostingRegressor': GradientBoostingRegressor(*estimator_params), 'ExtraTreesRegressor': ExtraTreesRegressor(*estimator_params), 'RandomForestRegressor': RandomForestRegressor(*estimator_params) }.get(base_estimator, RandomForestRegressor(max_depth=3))
lisunshine1234/mlp-algorithm-python
machine_learning/regression/Ensemble methods/AdaBoostRegressor/run.py
run.py
py
1,615
python
en
code
0
github-code
6
6513615937
from unittest import TestCase from app import app, games # Make Flask errors be real errors, not HTML pages with error info app.config["TESTING"] = True # This is a bit of hack, but don't use Flask DebugToolbar app.config["DEBUG_TB_HOSTS"] = ["dont-show-debug-toolbar"] class BoggleAppTestCase(TestCase): """Test flask app of Boggle.""" def setUp(self): """Stuff to do before every test.""" self.client = app.test_client() app.config["TESTING"] = True def test_homepage(self): """Make sure information is in the session and HTML is displayed""" with self.client as client: response = client.get("/") ... # test that you're getting a template html = response.get_data(as_text=True) # top view verification that route works self.assertEqual(response.status_code, 200) # verify unique html is loading self.assertIn('<table class="board">', html) def test_api_new_game(self): """Test starting a new game.""" with self.client as client: # write a test for this route # save response from get reqeust to route resp = client.get("/api/new-game") data = resp.json # top view cert self.assertEqual(resp.status_code, 200) # is game id a string in json respo self.assertIsInstance(data["gameId"], str) # is board a list on json respo self.assertIsInstance(data["board"], list) # is games in json respon self.assertIn(data["gameId"], games) def test_score_word(self): """ the way a word is validated with defined class methods """ with self.client as client: resp = client.get("api/new-game") # get the json from resp data = resp.json # generate mock board (5x5) gameId = data["gameId"] game = games[gameId] # mock board hard code game.board[0] = ["W", "O", "R", "D", "S"] game.board[1] = ["S", "H", "O", "E", "S"] game.board[2] = ["L", "I", "G", "H", "T"] game.board[3] = ["K", "I", "D", "S", "A"] game.board[4] = ["I", "R", "A", "W", "R"] # high level cert self.assertEqual(resp.status_code, 200) # verify post request from /score-word resp = client.post( "/api/score-word", json={"gameId": gameId, "word": "AXYSB"} ) self.assertEqual(resp.json, {"result": "not_a_word"}) # not on board resp = client.post( "/api/score-word", json={"gameId": gameId, "word": "ROCKS"} ) self.assertEqual(resp.json, {"result": "not_on_board"}) # valid resp = client.post( "/api/score-word", json={"gameId": gameId, "word": "WORDS"} ) self.assertEqual(resp.json, {"result": "word_OK"})
gary-rivera/flask-boggle
test_app.py
test_app.py
py
3,070
python
en
code
0
github-code
6
44191894536
from django.conf.urls import url # include is adding then from django.contrib import admin from .views import( post_home, post_delete, post_update, post_edit, post_create, post_save, ) urlpatterns = [ url(r'^$',post_home), url(r'^delete/$',post_delete), url(r'^update/$',post_update), url(r'^edit/$',post_edit), url(r'^create/$',post_create), url(r'^save/$',post_save), ]
hmoshabbar/DjangoProject
posts/urls.py
urls.py
py
442
python
en
code
0
github-code
6
70510668989
# ------------------------------- # 스코프(scope) 존재할 수 있는 범위 # ------------------------------- # 변수 종류 # - 전역변수(Global Variable) : 파일 전체에서 사용되는 변수, 같은파일에 존재하는 함수, 클래스 등등 함께사용 # - 지역변수(Local Variable) : 특정 영역 안에서만 사용 가능 # # 전역변수와 지역변수의 변수명이 동일한 경우 # - 같은 영역안에 있는 변수 우선 # ------------------------------- year = 2022 month = 12 def showToday(day): #전역변수 사용 알림 global year year+=1 print(f'오늘은 {year}년 {month}월 {day}일 입니다.') print(f'[Before] year=>{year}') showToday(26) print(f'[After] year=>{year}')
CieData/Study
Python,Pandas/ex_scope.py
ex_scope.py
py
740
python
ko
code
1
github-code
6
7961667082
# -*- coding: utf-8 -*- from __future__ import unicode_literals import sys from . import api from . import gui try: import urllib.parse as urllib except: import urllib def play(**kwargs): import xbmcaddon addon = xbmcaddon.Addon(id='plugin.video.ran_de') height = (234, 270, 396, 480, 540, 720)[int(addon.getSetting('video.quality'))] resource = urllib.unquote_plus(kwargs['resource']) video = api.get_video_url(resource, height) if video: gui.play(video) def videos(**kwargs): resource = urllib.unquote_plus(kwargs['resource']) reliveOnly = kwargs['reliveOnly'] api.list_videos(resource, reliveOnly) def index(): from . import thumbnails live_caption = api.get_number_livestreams() if live_caption: live_caption = '[B]Live (%s)[/B]' % live_caption else: live_caption = 'Live (%s)' % live_caption gui.add_folder(live_caption, thumbnails.THUMB_MAIN, {'f': 'videos', 'resource': '/ran-mega/mobile/v1/livestreams.json', 'reliveOnly': False}, 'aktuelle Live Streams') gui.add_folder('Neueste Videos', thumbnails.THUMB_MAIN, {'f': 'videos', 'resource': '/ran-mega/mobile/v1/videos.json', 'reliveOnly': False}, 'Liste der neuesten Videos - über alle Kategorien') gui.add_folder('Neueste Videos - [COLOR blue] Re-Live only [/COLOR]', thumbnails.THUMB_MAIN, {'f': 'videos', 'resource': '/ran-mega/mobile/v1/videos.json', 'reliveOnly': True}, 'Liste der neuesten Re-Lives - über alle Kategorien') gui.add_folder('Fussball', thumbnails.THUMB_MAIN, {'f': 'videos', 'resource': '/ran-mega/mobile/v1/videos/fussball.json', 'reliveOnly': False}, 'Liste der neuesten Fussball-Videos') gui.add_folder('US-Sports', thumbnails.THUMB_MAIN, {'f': 'videos', 'resource': '/ran-mega/mobile/v1/videos/us-sport.json', 'reliveOnly': False}, 'Liste der neuesten US-Sport-Videos (NBA, NFL, NHL)') gui.add_folder('US-Sports: [COLOR blue] Re-Live only [/COLOR]', thumbnails.THUMB_MAIN, {'f': 'videos', 'resource': '/ran-mega/mobile/v1/videos/us-sport.json', 'reliveOnly': True}, 'Liste der neuesten Re-Live-Videos des US-Sports auf ran.de (NBA, NFL, NHL)') gui.add_folder('Tennis', thumbnails.THUMB_MAIN, {'f': 'videos', 'resource': '/ran-mega/mobile/v1/videos/tennis.json', 'reliveOnly': False}, 'Liste der neuesten Tennis-Videos') gui.add_folder('Handball', thumbnails.THUMB_MAIN, {'f': 'videos', 'resource': '/ran-mega/mobile/v1/videos/handball.json', 'reliveOnly': False}, 'Liste der neuesten Handball-Videos') gui.add_folder('Boxen', thumbnails.THUMB_MAIN, {'f': 'videos', 'resource': '/ran-mega/mobile/v1/videos/boxen.json', 'reliveOnly': False}, 'Liste der neuesten Box-Videos') gui.add_folder('Darts', thumbnails.THUMB_MAIN, {'f': 'videos', 'resource': '/ran-mega/mobile/v1/videos/darts.json', 'reliveOnly': False}, 'Liste der neuesten Darts-Videos') gui.add_folder('eSports', thumbnails.THUMB_MAIN, {'f': 'videos', 'resource': '/ran-mega/mobile/v1/videos/esport.json', 'reliveOnly': False}, 'Liste der neuesten eSports-Videos') gui.add_folder('DTM', thumbnails.THUMB_MAIN, {'f': 'videos', 'resource': '/ran-mega/mobile/v1/videos/dtm.json', 'reliveOnly': False}, 'Liste der neuesten Videos der Deutschen Tourenwagen Meisterschaft (DTM)') gui.end_listing() d = dict(p.split('=') for p in sys.argv[2][1:].split('&') if len(p.split('=')) == 2) f = d.pop('f', 'index') exec('{0}(**d)'.format(f))
Maven85/plugin.video.ran_de
resources/lib/index.py
index.py
py
3,418
python
en
code
0
github-code
6
8665336110
from selenium import webdriver import time from selenium.webdriver.common.action_chains import ActionChains browser = webdriver.Chrome() browser.get("http://www.baidu.com/") browser.maximize_window() time.sleep(3) browser.find_element_by_id("kw").send_keys("哈哈") # 定位百度一下按钮 name = browser.find_element_by_id("su") # 右击 # ActionChains(browser).context_click(name).perform() # 双击 ActionChains(browser).double_click(name).perform() time.sleep(3)
Zshuangshuang/Reload
2021_03_12自动化学习/src2021_03_12/testDemo12.py
testDemo12.py
py
472
python
en
code
0
github-code
6
36040788296
from ParadoxTrading.Chart import Wizard from ParadoxTrading.Fetch.ChineseFutures import FetchDominantIndex from ParadoxTrading.Indicator import ZigZag fetcher = FetchDominantIndex() market = fetcher.fetchDayData('20100701', '20170101', 'rb') zigzag = ZigZag(0.1).addMany(market).getAllData() wizard = Wizard() price_view = wizard.addView('price', _adaptive=True) price_view.addLine('market', market.index(), market['closeprice']) price_view.addLine('zigzag', zigzag.index(), zigzag['zigzag']) wizard.show()
ppaanngggg/ParadoxTrading
samples/indicator/zigzag_test.py
zigzag_test.py
py
510
python
en
code
51
github-code
6
30650525881
# Matrix Game experiment # Author: Lucas Cassano # Paper: "Logical Team Q-learning" # =================================== # Import necessary packages from absl import app from absl import flags import numpy as np import matplotlib.pyplot as plt import q_mix flags.DEFINE_string('save_path', '/tmp/', 'directory to save results.') def main(argv): """Run simple 2 agent matrix game.""" nmbr_games = 500 seed = 1 mu = 1e-1 nmbr_agents = 2 qmix_extra_iters = 100 np.random.seed(seed) payoff = np.array([[0, 2, 0], [0, 1, 2]]) #np.array([[8, -12, -12], [-12, 0, 0], [-12, 0, 0]]) # q_joint = np.zeros_like(payoff) nmbr_actions_1, nmbr_actions_2 = payoff.shape q_logic_b = {0: np.zeros([nmbr_games, nmbr_actions_1]), 1: np.zeros([nmbr_games, nmbr_actions_2])} q_logic_u = {0: np.zeros([nmbr_games, nmbr_actions_1]), 1: np.zeros([nmbr_games, nmbr_actions_2])} q_dist = {0: np.zeros([nmbr_games, nmbr_actions_1]), 1: np.zeros([nmbr_games, nmbr_actions_2])} q_ind = {0: np.zeros([nmbr_games, nmbr_actions_1]), 1: np.zeros([nmbr_games, nmbr_actions_2])} q_tran = {0: np.zeros([nmbr_games, nmbr_actions_1]), 1: np.zeros([nmbr_games, nmbr_actions_2])} q_mix_class = q_mix.Qmix(payoff.shape, mu/2) q_mix_out = {0: np.zeros([nmbr_games, nmbr_actions_1]), 1: np.zeros([nmbr_games, nmbr_actions_2])} for n in range(nmbr_games - 1): actions = np.array([np.random.randint(nmbr_actions_1), np.random.randint(nmbr_actions_2)]) #Pick actions uniformly r = payoff[actions[0]][actions[1]] # Logic Team Q-learning for agent in range(nmbr_agents): q_logic_b[agent][n + 1] = q_logic_b[agent][n] q_logic_u[agent][n + 1] = q_logic_u[agent][n] chosen_action = actions[agent] if actions[nmbr_agents - 1 - agent] == np.argmax(q_logic_b[nmbr_agents - 1 - agent][n]): q_logic_b[agent][n + 1][chosen_action] += mu * (r - q_logic_b[agent][n][chosen_action]) q_logic_u[agent][n + 1][chosen_action] += mu * (r - q_logic_u[agent][n][chosen_action]) elif r > q_logic_b[agent][n][chosen_action]: q_logic_b[agent][n + 1][chosen_action] += mu * (r - q_logic_b[agent][n][chosen_action]) # Independent Q-learning for agent in range(nmbr_agents): q_ind[agent][n + 1] = q_dist[agent][n] chosen_action = actions[agent] q_ind[agent][n + 1][chosen_action] += mu * (r - q_dist[agent][n][chosen_action]) # Distributed Q-learning for agent in range(nmbr_agents): q_dist[agent][n + 1] = q_dist[agent][n] chosen_action = actions[agent] if r > q_dist[agent][n][chosen_action]: q_dist[agent][n + 1][chosen_action] += mu * (r - q_dist[agent][n][chosen_action]) # Qtran-base q_joint[actions[0], actions[1]] -= (q_joint[actions[0], actions[1]] - r) q_j = q_joint[actions[0], actions[1]] q_tilde = q_tran[0][n][actions[0]] + q_tran[1][n][actions[1]] for agent in range(nmbr_agents): q_tran[agent][n + 1] = q_tran[agent][n] chosen_action = actions[agent] if q_tran[0][n][actions[0]] == np.max(q_tran[0][n]) and q_tran[1][n][actions[1]] == np.max(q_tran[1][n]): q_tran[agent][n + 1][chosen_action] -= mu * (q_tilde - q_j) else: q_tran[agent][n + 1][chosen_action] -= mu * np.minimum(q_tilde - q_j, 0) # Qmix q_mix_out[0][n + 1] = q_mix_out[0][n] q_mix_out[1][n + 1] = q_mix_out[1][n] for _ in range(qmix_extra_iters): #Needs far extra iters to converge XD actions = np.array([np.random.randint(nmbr_actions_1), np.random.randint(nmbr_actions_2)]) r = payoff[actions[0]][actions[1]] q1, q2, qmix = q_mix_class.learn(actions, r) q_mix_out[0][n + 1][actions[0]] = q1 q_mix_out[1][n + 1][actions[1]] = q2 # Print final Qmix matrices qmix1 = np.zeros([nmbr_actions_1]) qmix2 = np.zeros([nmbr_actions_2]) qmix_total = np.zeros([nmbr_actions_1, nmbr_actions_2]) for a1 in range(nmbr_actions_1): for a2 in range(nmbr_actions_2): qmix1[a1], qmix2[a2], qmix_total[a1, a2] = q_mix_class.obtain_q([a1, a2]) print(qmix1) print(qmix2) print(qmix_total) # Plot results fig1, ax1 = plt.subplots() plt.xlabel('Games', fontsize=25) plt.ylabel('Q-values', fontsize=25) ax1.plot(np.arange(start=0, stop=nmbr_games), q_logic_b[0], 'b') ax1.plot(np.arange(start=0, stop=nmbr_games), q_logic_b[1], 'r') ax1.set_yticks(np.arange(0, 2.01, step=0.5)) ax1.tick_params(axis='both', which='major', labelsize=15) plt.grid() fig1.savefig(fname='biased_logic_matrix_game_1', bbox_inches='tight') fig2, ax2 = plt.subplots() plt.xlabel('Games', fontsize=25) plt.ylabel('Q-values', fontsize=25) ax2.plot(np.arange(start=0, stop=nmbr_games), q_logic_u[0], 'b') ax2.plot(np.arange(start=0, stop=nmbr_games), q_logic_u[1], 'r') ax2.set_yticks(np.arange(0, 2.01, step=0.5)) ax2.tick_params(axis='both', which='major', labelsize=15) plt.grid() fig2.savefig(fname='unbiased_logic_matrix_game_1', bbox_inches='tight') fig3, ax3 = plt.subplots() plt.xlabel('Games', fontsize=25) plt.ylabel('Q-values', fontsize=25) ax3.plot(np.arange(start=0, stop=nmbr_games), q_dist[0], 'b') ax3.plot(np.arange(start=0, stop=nmbr_games), q_dist[1], 'r') ax3.set_yticks(np.arange(0, 2.01, step=0.5)) ax3.tick_params(axis='both', which='major', labelsize=15) plt.grid() fig3.savefig(fname='q_dist_matrix_game_1', bbox_inches='tight') fig4, ax4 = plt.subplots() plt.xlabel('Games', fontsize=25) plt.ylabel('Q-values', fontsize=25) ax4.plot(np.arange(start=0, stop=nmbr_games * qmix_extra_iters, step=qmix_extra_iters), q_mix_out[0], 'b') ax4.plot(np.arange(start=0, stop=nmbr_games * qmix_extra_iters, step=qmix_extra_iters), q_mix_out[1], 'r') ax4.tick_params(axis='both', which='major', labelsize=15) plt.grid() fig4.savefig(fname='q_mix_matrix_game_1', bbox_inches='tight') fig5, ax5 = plt.subplots() plt.xlabel('Games', fontsize=25) plt.ylabel('Q-values', fontsize=25) ax5.plot(np.arange(start=0, stop=nmbr_games), q_ind[0], 'b') ax5.plot(np.arange(start=0, stop=nmbr_games), q_ind[1], 'r') ax5.set_yticks(np.arange(0, 2.01, step=0.5)) ax5.tick_params(axis='both', which='major', labelsize=15) plt.grid() fig5.savefig(fname='ind_q_matrix_game_1', bbox_inches='tight') fig6, ax6 = plt.subplots() plt.xlabel('Games', fontsize=25) plt.ylabel('Q-values', fontsize=25) ax6.plot(np.arange(start=0, stop=nmbr_games), q_tran[0], 'b') ax6.plot(np.arange(start=0, stop=nmbr_games), q_tran[1], 'r') ax6.set_yticks(np.arange(0, 2.01, step=0.5)) ax6.tick_params(axis='both', which='major', labelsize=15) plt.grid() fig6.savefig(fname='q_tran_matrix_game_1', bbox_inches='tight') print(np.expand_dims(q_tran[0][-1], axis=1)) print(np.expand_dims(q_tran[1][-1], axis=0)) print(np.expand_dims(q_tran[0][-1], axis=1) + np.expand_dims(q_tran[1][-1], axis=0)) return 1 if __name__ == '__main__': app.run(main)
lcassano/Logical_Team_Q_Learning_paper
matrix_game/run_matrix_exp.py
run_matrix_exp.py
py
6,910
python
en
code
0
github-code
6
15720162505
from collections import deque def solution(msg): answer = [] ord_index = 64 cach = dict() queue = deque() cach_index = 27 for string in msg: if len(queue) == 0: queue.append(string) elif len(queue) != 0: queue.append(string) queue_string = ''.join(queue) if queue_string not in cach.keys(): cach[queue_string] = cach_index cach_index += 1 tmp = queue.popleft() if len(tmp) == 1: answer.append(ord(tmp) - ord_index) else: answer.append(cach[tmp]) else: queue.clear() queue.append(queue_string) tmp = queue.popleft() if len(tmp) == 1: answer.append(ord(tmp) - ord_index) else: answer.append(cach[tmp]) return answer print(solution("KAKAO")) print(solution("TOBEORNOTTOBEORTOBEORNOT")) print(solution("ABABABABABABABAB"))
grohong/Beajoon_Algorism
프로그래머즈/[3차]압축/main.py
main.py
py
1,011
python
en
code
1
github-code
6
14037338740
import numpy as np import matplotlib.pyplot as plt plt.rcParams['font.family']='serif' plt.rcParams['font.serif']=['Times New Roman'] + plt.rcParams['font.serif'] plt.rcParams['mathtext.fontset']='stix' plt.rcParams['font.size']=10 cm = 1/2.54 Bubble0 = np.genfromtxt("Bubble_0/KellerMiksis_R5.000e-06_fa1.570e+04_pa-1.200e+05.txt", delimiter=" ") Bubble1 = np.genfromtxt("Bubble_1/KellerMiksis_R1.000e-05_fa1.570e+04_pa-1.200e+05.txt", delimiter=" ") fig1 = plt.figure(figsize=(17*cm,5*cm)) ax1 = plt.subplot2grid((1,3),(0,0),colspan=1) ax2 = plt.subplot2grid((1,3),(0,1),colspan=1) ax3 = plt.subplot2grid((1,3),(0,2),colspan=1) plt.subplots_adjust(wspace=1.2*cm,hspace=1.2*cm) ax1.set(xlabel=r'$t$ [$\mu$s]',ylabel=r'$R(t)$ [$\mu$m]') ax1.set_xlim(xmin=600,xmax=750) ax1.set_ylim(ymin=0,ymax=60) ax1.grid(color='gainsboro', linestyle='-', linewidth=0.5) ax1.plot(Bubble0[:, 1]*1e6, Bubble0[:, 3]*1e6, linestyle='solid', linewidth=1,color='steelblue', label=r'$R_0 = 5 \ \mu \mathrm{m}$') ax1.plot(Bubble1[:, 1]*1e6, Bubble1[:, 3]*1e6, linestyle='solid', linewidth=1,color='goldenrod', label=r'$R_0 = 10 \ \mu \mathrm{m}$') ax1.legend(ncol=1,labelspacing=0.2,markerfirst=True,loc='upper right',fontsize='x-small',facecolor='None',edgecolor='None',framealpha=1,frameon=True,bbox_to_anchor=(1, 1)) ax2.set(xlabel=r'$t$ [$\mu$s]',ylabel=r'$\dot{R}(t)$[m/s]') ax2.set_xlim(xmin=600,xmax=750) ax2.set_ylim(ymin=-400,ymax=300) ax2.grid(color='gainsboro', linestyle='-', linewidth=0.5) ax2.plot(Bubble0[:, 1]*1e6, Bubble0[:, 4], linestyle='solid', linewidth=1,color='steelblue') ax2.plot(Bubble1[:, 1]*1e6, Bubble1[:, 4], linestyle='solid', linewidth=1,color='goldenrod') ax3.set_yscale('log') ax3.set(xlabel=r'$t$ [$\mu$s]',ylabel=r'$p_\mathrm{G}(t)$ [Pa]') ax3.set_xlim(xmin=600,xmax=750) ax3.set_ylim(ymin=1e1,ymax=1e10) ax3.grid(color='gainsboro', linestyle='-', linewidth=0.5) ax3.plot(Bubble0[:, 1]*1e6, Bubble0[:, 5], linestyle='solid', linewidth=1.0,color='steelblue') ax3.plot(Bubble1[:, 1]*1e6, Bubble1[:, 5], linestyle='solid', linewidth=1.0,color='goldenrod') ax1.xaxis.set_label_coords(0.5,-0.24) ax2.xaxis.set_label_coords(0.5,-0.24) ax3.xaxis.set_label_coords(0.5,-0.24) ax1.yaxis.set_label_coords(-0.25, 0.5) ax2.yaxis.set_label_coords(-0.25, 0.5) ax3.yaxis.set_label_coords(-0.25, 0.5) fig1.savefig('binaryinteraction.pdf', bbox_inches='tight',pad_inches=0.035)
polycfd/apecss
examples/binaryinteraction/plot_result.py
plot_result.py
py
2,384
python
en
code
13
github-code
6
36702590099
# import os and csv import os import csv # variables to be used later total_months = 0 old_profit = 0 month_change = [] profit_change_list = [] max_increase = ['', 0] max_decrease = ['', 10000000000000000000000000000000000000] total_profit = 0 # open csv file and create csv reader csvpath = os.path.join("/Users/tincho/Desktop/Challenge Reps/Python_Challenge/PyBank/resources/budget_data.csv") with open(csvpath) as csv_file: reader = csv.DictReader(csv_file) for row in reader: # calc total months and total profit total_months = total_months + 1 total_profit = total_profit + int(row['Profit/Losses']) # calculate profit change profit_change = int(row['Profit/Losses']) - old_profit old_profit = int(row['Profit/Losses']) profit_change_list = profit_change_list + [profit_change] month_change = month_change + [row['Date']] # calculate max increase if (profit_change > max_increase[1]): max_increase[0] = row['Date'] max_increase[1] = profit_change # calculate max decrease if (profit_change < max_decrease[1]): max_decrease[0] = row['Date'] max_decrease[1] = profit_change # calculate average profit change profit_change = round(sum(profit_change_list) / len(profit_change_list), 2) # create output chart output = ( f'\nFinancial Analysis\n' f'--------------------------\n' f'Total Months: {total_months}\n' f'total Profit: ${total_profit}\n' f'Profit change: ${profit_change}\n' f'Biggest Increase in Profit: {max_increase[0]} (${max_increase[1]})\n' f'Biggest decrease in Profit: {max_decrease[0]} (${max_decrease[1]})\n' ) print(output) # create text file with open("PyBank_Output", 'w') as txt_file: txt_file.write(output)
mbedino99/Python_Challenge
Pybank/main.py
main.py
py
1,834
python
en
code
0
github-code
6
70547983867
""" Python-Rightscale A stupid wrapper around rightscale's HTTP API """ import types from .actions import RS_DEFAULT_ACTIONS, COLLECTIONS from .httpclient import HTTPClient from .util import get_rc_creds, HookList # magic strings from the 1.5 api DEFAULT_API_PREPATH = '/api' # authenticate here OAUTH2_RES_PATH = '/'.join((DEFAULT_API_PREPATH, 'oauth2')) # start hypermedia searches here ROOT_RES_PATH = '/'.join((DEFAULT_API_PREPATH, 'sessions')) # these *should* be discoverable from the '/api/sessions' route above, but they # are not. there is an open ticket filed with rightscale. until it gets # addressed, it's just more magic: ACCOUNT_INFO_RES_PATH = '/'.join((DEFAULT_API_PREPATH, 'sessions/accounts')) HEALTH_CHECK_RES_PATH = '/'.join((DEFAULT_API_PREPATH, 'health-check')) COLLECTION_TYPE = 'type=collection' def get_resource_method(name, template): """ Creates a function that is suitable as a method for ResourceCollection. """ def rsr_meth(self, **kwargs): http_method = template['http_method'] extra_path = template.get('extra_path') if extra_path: fills = {'res_id': kwargs.pop('res_id', '')} path = self.path + (extra_path % fills) else: path = self.path response = self.client.request(http_method, path, **kwargs) loc = response.headers.get('location', None) if loc: # If the returned code is a 201, then there should be a location # header in the response that we can use to re-get the newly created # resource. loc = response.headers.get('location') response = self.client.get(loc, **kwargs) # At this point, we better have a valid JSON response object try: obj = response.json() except: # The response had no JSON ... not a resource object return if COLLECTION_TYPE in response.content_type: ret = HookList( [Resource(r, path, response, self.client) for r in obj], response=response ) else: ret = Resource(obj, path, response, self.client) return ret rsr_meth.__name__ = name return rsr_meth class Resource(object): """ A single resource. :param dict soul: The essence of the resource as returned by the RightScale API. This is the dictionary of attributes originally returned as the JSON body of the HTTP response from RightScale. :param str path: The path portion of the URL. E.g. ``/api/clouds/1``. :param rightscale.httpclient.HTTPResponse response: The raw response object returned by :meth:`HTTPClient.request`. """ def __init__(self, soul=None, path='', response=None, client=None): if soul is None: soul = {} self.soul = soul self.path = path self.collection_actions = {} self.response = response self.client = client self._links = None def __repr__(self): return '%s(%s)' % (self.__class__.__name__, self.soul) def __str__(self): return str(self.soul) def __cmp__(self, other): return cmp(self.soul, other.soul) @property def content_type(self): if self.response: return self.response.content_type[0] return '' def _get_rel_hrefs(self): rel_hrefs = self.soul.get('links', []) return dict((raw['rel'], raw['href']) for raw in rel_hrefs) @property def href(self): return self._get_rel_hrefs().get('self', '') @property def links(self): # only initialize once, not if empty if self._links is None: _links = self._get_rel_hrefs() collection_actions = COLLECTIONS.get(self.content_type, {}) self.collection_actions = collection_actions for name, action in collection_actions.iteritems(): if action is None and name in _links: del _links[name] continue if name not in _links: _links[unicode(name)] = unicode( '%s/%s' % (self.path, name) ) self._links = _links return self._links def __dir__(self): return self.links.keys() def __getattr__(self, name): path = self.links.get(name) if not path: raise AttributeError('%s object has no attribute %s' % ( self.__class__.__name__, name, )) actions = RS_DEFAULT_ACTIONS.copy() tpl = self.collection_actions.get(name) if tpl: actions.update(tpl) return ResourceCollection(path, self.client, actions) class ResourceCollection(object): def __init__(self, path, client, actions): self.path = path self.client = client for name, template in actions.items(): if not template: continue method = get_resource_method(name, template) setattr(self, name, types.MethodType(method, self, self.__class__)) class RightScale(Resource): def __init__( self, path=DEFAULT_API_PREPATH, refresh_token=None, api_endpoint=None, ): """ Creates and configures the API object. :param str refresh_token: The refresh token provided by Rightscale when API access is enabled. :param api_endpoint: The rightscale subdomain to be hit with API requests. :param str path: The path portion of the URL. E.g. ``/api``. """ super(RightScale, self).__init__({}, path) self.auth_token = None rc_creds = get_rc_creds() # prevent dumb leakage from the environment by only grabbing creds from # rc file if they are not specified to the constructor. if api_endpoint is None: api_endpoint = rc_creds[0] if not api_endpoint: raise ValueError("Can't login with no api endpoint.") self.api_endpoint = api_endpoint if refresh_token is None: refresh_token = rc_creds[1] if not refresh_token: raise ValueError("Can't login. Need refresh token!") self.client = HTTPClient( api_endpoint, {'X-API-Version': '1.5'}, OAUTH2_RES_PATH, refresh_token, ) def health_check(self): # only in 1.5 api docs, not discoverable via href return self.client.get(HEALTH_CHECK_RES_PATH).json() @property def links(self): if not self.soul: try: response = self.client.get(ROOT_RES_PATH) self.response = response self.soul = response.json() except: self.soul = {} return super(RightScale, self).links
brantai/python-rightscale
rightscale/rightscale.py
rightscale.py
py
7,065
python
en
code
7
github-code
6
72851324029
import tensorflow as tf import numpy as np class Estimator: """Estimator class implements the function approximation for DQN. The Estimator class defines a NN that is used by DQN to estimate the Q-function values. It takes the classification state as input, followed by a fully connected layer with sigmoid activation of dimensionality 10. The output is then concatenates with action representations followed by a fully connected layers with sigmoid activation of size 5 and then last linear fully connceted layer with 1 output. Attributes: classifier_placeholder: A TF placeholder of shape any x classifier_state_length for the classification state. action_placeholder: A TF placeholder of shape any x action_state_length for the action (datapoint) state. predictions: A tensor of size any x 1 that contains predictions of the approximation by Q-network summaries: A TF summary object that will contain the stats for result analysis """ def __init__(self, classifier_state_length, action_state_length, is_target_dqn, var_scope_name, bias_average): """Initialises the estimator. A computational graph that computes Q-values starting from classification state and action state. Args: classifier_state_length: An integer indicating the number of features in classifier state. action_state_length: An integer indicating the number of features in action state. is_target_dqn: A boolean indicating if the Estimator is a target network. Only normal (not target) network is trained, the other one is a lagging copy. var_scope_name: A string, can be "dqn" or "target_dqn", for example. bias_average: A float that is used to initialize the bias in the last layer. """ self.classifier_placeholder = tf.placeholder(tf.float32, shape=[None, classifier_state_length], name="X_classifier") self.action_placeholder = tf.placeholder(tf.float32, shape=[None, action_state_length], name="X_datapoint") with tf.variable_scope(var_scope_name): # A fully connected layers with classifier_placeholder as input fc1 = tf.contrib.layers.fully_connected( inputs=self.classifier_placeholder, num_outputs=10, activation_fn=tf.nn.sigmoid, trainable=not is_target_dqn, variables_collections=[var_scope_name], ) # Concatenate the output of first fully connected layer with action_placeholder fc2concat = tf.concat([fc1, self.action_placeholder], 1) # A fully connected layer with fc2concat as input fc3 = tf.contrib.layers.fully_connected( inputs=fc2concat, num_outputs=5, activation_fn=tf.nn.sigmoid, trainable=not is_target_dqn, variables_collections=[var_scope_name] ) # The last linear fully connected layer # The bias on the last layer is initialized to some value # normally it is the - average episode duriation / 2 # like this NN find optimum better even as the mean is not 0 self.predictions = tf.contrib.layers.fully_connected( inputs=fc3, num_outputs=1, biases_initializer=tf.constant_initializer(bias_average), activation_fn=None, trainable=not is_target_dqn, variables_collections=[var_scope_name], ) # Summaries for tensorboard # Can write weights to check how they are developed, # but it takes more space and used only for debugging #tf.summary.histogram("estimator/fc1", fc1) #tf.summary.histogram("estimator/fc2", fc2) #tf.summary.histogram("estimator/fc3", fc3) tf.summary.histogram("estimator/q_values", self.predictions) self.summaries = tf.summary.merge_all()
ksenia-konyushkova/LAL-RL
estimator.py
estimator.py
py
4,139
python
en
code
11
github-code
6
3955950488
# -*- coding: utf-8 -*- """check cache status * check cache status * this file uses standalone """ import sys import os import json import time import subprocess import configparser from glob import glob from state_list import get_error_message, DONE config = configparser.ConfigParser() config.read("config.ini") MAIL_ADDRESS = config.get("general", "mail_address") CACHE_PATH = "./cache" def create_filter_caches(span): """filtered caches to cache list in span Args: span (int): cache check span Returns: filtered_cache_paths (list): cache paths in span """ all_caches = glob(CACHE_PATH + "/*.json") if not all_caches: return False sorted_caches = sorted(all_caches, key=lambda x: os.path.getctime(x), reverse=True) filtered_cache_paths = [] current_time = time.time() for cache in sorted_caches: cache_time = os.path.getctime(cache) if current_time - cache_time < span: filtered_cache_paths.append(cache) else: break return filtered_cache_paths def create_cache_statuses(cache_paths): """create status list from cache_list Args: cache_paths (list): cache paths in span Returns: statuses (list): status list """ statuses = [] try: for cache in cache_paths: ret = json.load(open(cache)) status = ret[-1] statuses.append(str(status)) except FileNotFoundError: # not found cache return False return statuses def create_count_statuses(statuses): """count status appeared times Args: statuses (list): status list Returns: status_counts (list): status key with appeared times """ status_counts = {} for status in statuses: if status in status_counts: # already found, count up status_counts[status] += 1 else: status_counts[status] = 1 return status_counts def create_messages(counts): """status counts to messages Args: counts (collection): status key with appeared times Returns: messages (list): messages """ messages = [] sorted_counts = sorted(counts.items(), key=lambda x: x[1], reverse=True) sorted_keys = [key[0] for key in sorted_counts] if str(DONE) in sorted_keys: messages.append("Good") else: messages.append("*Careful*") for key in sorted_keys: error_message = get_error_message(int(key), 2) messages.append(key + " : " + str(counts[key]) + " " + error_message + "\n") return messages def create_mail_title(span, condition): """time to messages Args: span (int): cache check span condition (str): server condition Returns: title (string): time and conditions to mail title """ span_hour = str(int(span / 3600)) return condition + " " + span_hour + "h" def create_mail_body(messages): """status counts to messages Args: messages (list): messages Returns: body (string): statuses to mail body """ body_message = messages[1:] body = "".join(body_message) return body def create_mail(span): """create mail from caches which made in span Args: span (int): cache check span Returns: title (string): time and conditions to mail title body (string): statuses to mail body """ cache_paths = create_filter_caches(span) if not cache_paths: title = create_mail_title(span, "***Bad***") body = "no cache available" return title, body statuses = create_cache_statuses(cache_paths) if not statuses: title = create_mail_title(span, "***Bad***") body = "may move cache" return title, body counts = create_count_statuses(statuses) messages = create_messages(counts) title = create_mail_title(span, messages[0]) body = create_mail_body(messages) return title, body def main(): """main function Args: Returns: """ # if no args, then exit if not sys.argv[1]: return span = int(sys.argv[1]) title, body = create_mail(span) cmd = 'echo "' + body + '" | mail -s "' + title + '" ' + MAIL_ADDRESS subprocess.call(cmd, shell=True) if __name__ == "__main__": main()
Neilsaw/PriLog_web
watchdog_status.py
watchdog_status.py
py
4,426
python
en
code
30
github-code
6
35848057912
import matplotlib.pyplot as plt def visualize(n, x, y, file_name): plt.scatter(x, y) plt.xlabel('x coordinate') plt.ylabel('y coordinate') plt.title(file_name + ': Number of points: %d' % n) plt.show() def read_from_file(file_name): coord_x = [] coord_y = [] with open(file_name, 'r') as file: n = int(file.readline()) for i in range(n): line = [float(i) for i in file.readline().split()] coord_x.append(line[0]) coord_y.append(line[1]) visualize(n, coord_x, coord_y, file_name) if __name__ == "__main__": files = ['001.dat', '002.dat', '003.dat', '004.dat', '005.dat'] for file_name in files: read_from_file('data_1tsk/' + file_name)
klauchek/Python_3sem
matplotlib_lab/task1.py
task1.py
py
744
python
en
code
0
github-code
6
33850689871
# 第 0012 题:敏感词文本文件 filtered_words.txt,当用户输入敏感词语, # 则用 星号 * 替换 def replaceword(path): word_list = [] with open(path, 'r') as f: for word in f.read().split(): word_list.append(word) # print(word_list) inp = input('请输入一个句子:') for i in word_list: if i in inp: print(inp.replace(i, '*')) if __name__ == '__main__': replaceword('filted_words.txt')
akenYu/learnpy
showme/12/replaceword.py
replaceword.py
py
433
python
en
code
0
github-code
6
19271968449
import MDAnalysis import satoshi_pca as SAS path = "/lustre7/home/lustre3/satoshi/MED" TRR = ["/aff4/test_all.trr", "/eaf1/test_all.trr", "/taf7/test_all.trr", "/aff4_kai/run_all.trr", "/eaf1_kai/run_all.trr", "/taf7_kai/run_all.trr"] PDB = ["/aff4/HEN.pdb", "/eaf1/HEN.pdb", "/taf7/HEN.pdb", "/aff4_kai/aff4kai.pdb", "/eaf1_kai/eaf1kai.pdb", "/taf7_kai/taf7kai.pdb"] PROB = ["/aff4/prob.txt", "/eaf1/prob.txt", "/taf7/prob.txt", "/aff4_kai/prob.dat", "/eaf1_kai/prob.dat", "/taf7_kai/prob.dat"] def PDB_cal(num1): num_pdb = [] RESIDUE = ["N", "C"] for i in open(path + PDB[num1]): f = i.split() if f[2] in RESIDUE: num_pdb.append(int(f[1])) print(len(num_pdb)) return num_pdb def PROB_cal(num1): num_prob = [] num2 = 0 for i in open(path + PROB[num1], "r"): if float(i) != 0: num_prob.append(num2) num2 += 1 return num_prob def TRR_cal(): kai_zahyou = [] for trr in range(6): num_pdb = PDB_cal(trr) num_prob = PROB_cal(trr) u = MDAnalysis.Universe(path + TRR[trr]) frm = u.trajectory frm_itr = iter(frm) del frm, u print(len(num_prob)) """ for i in num_prob: kai = [] x = float(frm[i][0][0]) y = float(frm[i][0][1]) z = float(frm[i][0][2]) for j in num_pdb: kai.append(str(float(frm[i][j][0]) - x)) kai.append(str(float(frm[i][j][1]) - y)) kai.append(str(float(frm[i][j][2]) - z)) kai_zahyou.append(kai) print("kai", len(kai), " kai_zahyou", len(kai_zahyou), "/", len(num_prob), " num_pdb", len(num_pdb)) num2 = 0 while True: try: kai = [] FRM = next(frm_itr) if num2 in num_prob: x = float(FRM[0][0]) y = float(FRM[0][1]) z = float(FRM[0][2]) for j in num_pdb: kai.append(str(float(FRM[j][0]) - x)) kai.append(str(float(FRM[j][1]) - y)) kai.append(str(float(FRM[j][2]) - z)) kai_zahyou.append(kai) print("kai", len(kai), " kai_zahyou", len(kai_zahyou), "/", len(num_prob), " num_pdb", len(num_pdb)) del x, y, z, FRM except StopIteration: break num2 += 1 """ for i in safe_mem(frm_itr, num_prob, num_pdb): kai_zahyou.append(i) print("kai_zahyou", len(kai_zahyou), "/", len(num_prob), " num_pdb", len(num_pdb)) del frm_itr, num_prob, num_pdb return kai_zahyou def safe_mem(frm_itr, num_prob, num_pdb): num2 = 0 while True: try: kai = [] FRM = next(frm_itr) if num2 in num_prob: x = float(FRM[0][0]) y = float(FRM[0][1]) z = float(FRM[0][2]) for j in num_pdb: kai.append(str(float(FRM[j][0]) - x)) kai.append(str(float(FRM[j][1]) - y)) kai.append(str(float(FRM[j][2]) - z)) yield kai # del x, y, z, FRM, kai except StopIteration: del kai break num2 += 1 def PPP(): kai = [0] num1 = 0 for i in range(6): for i in open(path + PROB[i], "r"): if float(i) != 0: num1 += 1 kai.append(num1) return kai if __name__ == '__main__': kai = SAS.pca(TRR_cal()) kai = kai.tolist() path1 = "/lustre7/home/lustre3/satoshi/ALL_PCA/txt/" ttt = "_20201207_2.txt" ligands = ["aff", "eaf", "taf", "affkai", "eafkai", "tafkai"] num = PPP() for i in range(6): f = open(path1+ligands[i]+ttt, "w") for j in range(int(num[i]), int(num[i+1])): f.write(str(kai[j][0])) f.write(" ") f.write(str(kai[j][1])) f.write("\n") f.close()
satoshi-python/Desktop
all_pca_kai.py
all_pca_kai.py
py
4,342
python
en
code
0
github-code
6
37857057401
#! /usr/bin/env python3 from uuid import uuid4 as uuid import argparse import pika from protos import events if __name__ == '__main__': p = argparse.ArgumentParser() p.add_argument("--exchange", required=True) p.add_argument("--pretty-print", default=False, action='store_true') args = p.parse_args() conn = pika.BlockingConnection( pika.ConnectionParameters('localhost') ) channel = conn.channel() channel.exchange_declare(exchange=args.exchange, exchange_type='fanout') queue_name = f"manual_{uuid().hex}" queue = channel.queue_declare(queue=queue_name, exclusive=True) channel.queue_bind(exchange=args.exchange, queue=queue.method.queue) def print_message(ch, method, properties, body): event: events.Event = events.Event().FromString(body) print(event) channel.basic_consume(queue=queue_name, auto_ack=True, on_message_callback=print_message) channel.start_consuming()
cjmcgraw/studious-carnival
rabbitmq/read-from-exchange.py
read-from-exchange.py
py
959
python
en
code
0
github-code
6
34321388716
from datetime import datetime, timedelta import os from airflow import DAG from airflow.operators.dummy_operator import DummyOperator from airflow.operators import (StageToRedshiftOperator, LoadFactOperator, DataQualityOperator) from airflow.operators.subdag_operator import SubDagOperator from subdags.subdag_for_dimensions import load_dimension_subdag from helpers import SqlQueries from quality_checks.sql_queries import QualityChecks AWS_KEY = os.environ.get('AWS_KEY') AWS_SECRET = os.environ.get('AWS_SECRET') # set default args default_args = { 'owner': 'udacity', 'start_date': datetime(2018, 1, 1), 'end_date': datetime(2018, 12, 1), 'email_on_retry': False, 'retries': 3, 'catchup': False, 'retry_delay': timedelta(minutes=5), 'depends_on_past': False, 'wait_for_downstream': True } # dag is complete dag = DAG('udac_example_dag', default_args=default_args, description='Load and transform data in Redshift with Airflow', schedule_interval='@hourly' ) # dummy for node 0 start_operator = DummyOperator(task_id='Begin_execution', dag=dag) # stage events stage_events_to_redshift = StageToRedshiftOperator( task_id='Stage_events', dag=dag, redshift_conn_id="redshift", aws_credentials_id="aws_default", table="staging_events", s3_bucket="udacity-dend", s3_key="log_data", sql_stmt=SqlQueries.log_copy_command, provide_context=True, json_format="s3://udacity-dend/log_json_path.json" ) # stage songs stage_songs_to_redshift = StageToRedshiftOperator( task_id='Stage_songs', dag=dag, redshift_conn_id="redshift", aws_credentials_id="aws_default", table="staging_songs", s3_bucket="udacity-dend", s3_key="song_data", sql_stmt=SqlQueries.song_copy_command, json_format="auto" ) # load dimensions load_dimension_subdag_task = SubDagOperator( subdag=load_dimension_subdag( parent_dag_name="udac_example_dag", task_id="load_dimensions", redshift_conn_id="redshift", start_date=datetime(2018, 1, 1) ), task_id="load_dimensions", dag=dag, ) # load fact load_songplays_table = LoadFactOperator( task_id='Load_songplays_fact_table', dag=dag, redshift_conn_id="redshift", table="songplays", sql_stmt=SqlQueries.songplay_table_insert ) # run quality check run_quality_checks = DataQualityOperator( task_id='Run_data_quality_checks', dag=dag, redshift_conn_id="redshift", sql_stmt=QualityChecks.count_check, tables=['songs', 'time', 'users', 'artists', 'songplays'], ) # dummy for node end end_operator = DummyOperator(task_id='Stop_execution', dag=dag) """ An Overview of the implemented dag: --> stage_events --> // \\ start --> --> load_facts --> load_dimensions --> quality_check --> end \\ // --> stage_songs --> """ # sequence of airflow operations start_operator >> stage_events_to_redshift start_operator >> stage_songs_to_redshift stage_events_to_redshift >> load_songplays_table stage_songs_to_redshift >> load_songplays_table load_songplays_table >> load_dimension_subdag_task load_dimension_subdag_task >> run_quality_checks run_quality_checks >> end_operator
supratim94336/SparkifyDataPipelineWithAirflow
airflow/dags/udacity_dag.py
udacity_dag.py
py
3,401
python
en
code
0
github-code
6
13879858183
import math N = int(input()) muscle = list(map(int, input().split())) muscle = sorted(muscle, reverse=False) max_muscle = 0 pt = math.ceil(N/2) i = 0 ans_list = [] if N % 2 != 0: # 홀수 max_muscle = muscle[N-1] while i <= N-i-2: ans_list.append(muscle[i]+muscle[N-i-2]) i += 1 ans_list.append(max_muscle) else: while i <= N-i-1: ans_list.append(muscle[i]+muscle[N-i-1]) i += 1 print(max(ans_list))
codusl100/algorithm
백준/그리디/서강근육맨.py
서강근육맨.py
py
456
python
en
code
0
github-code
6
40687860073
import wsgiserver from flask import Flask, jsonify def check_quota_response(**kwargs): response = kwargs['response'] return jsonify({ 'version': 1, 'payload': { 'emptyWallet': not response, }, }) def setup_flask_server(json_response): app = Flask(__name__) app.add_url_rule( '/', 'index', check_quota_response, defaults={'response': json_response}, ) app.add_url_rule( '/<path:dummy>', 'index', check_quota_response, defaults={'response': json_response}, ) return app def run_flask(ip, port, response, exit_callback): app = setup_flask_server(response) server = wsgiserver.WSGIServer(app, host=ip, port=port) try: server.start() finally: # When the flask server finishes running, do any other cleanup exit_callback()
magma/magma
lte/gateway/python/magma/pipelined/check_quota_server.py
check_quota_server.py
py
868
python
en
code
1,605
github-code
6
32144143111
from datetime import date from twilio.rest import TwilioRestClient # To find these visit https://www.twilio.com/user/account ACCOUNT_SID = "AC937af250fc201a2c44aad667cf309fa4" AUTH_TOKEN = "6a8accce5860c8f18391bf4ec809d84b" client = TwilioRestClient(ACCOUNT_SID, AUTH_TOKEN) for message in client.messages.list(): message.body def filtering(to_txt): messages = client.messages.list( to=to_txt, date_sent=date(2015,1,8), ) for message in messages: return message.body #filtering(mbl_no)
suparna-ghanvatkar/EBMP
show_text.py
show_text.py
py
515
python
en
code
0
github-code
6
39227052944
# -*- coding: utf-8 -*- """ Spyder Editor This is a temporary script file. """ import os import numpy as np import scipy.io.wavfile as wav import matplotlib.pyplot as plt from keras.models import Sequential from keras.layers import Dense from keras.layers import Dropout from keras.constraints import maxnorm from sklearn.cross_validation import StratifiedKFold from python_speech_features import mfcc from python_speech_features import delta from prepare_data import serialize_data from prepare_data import load_data_from_npy npy_path = './Audio Data/npy data' train_wav_npy_filename = 'train_wav.npy' train_tg_npy_filename = 'train_label.npy' test_wav_npy_filename = 'test_wav.npy' test_tg_npy_filename = 'test_label.npy' val_wav_npy_filename = 'val_wav.npy' val_tg_npy_filename = 'val_label.npy' x_train = load_data_from_npy(os.path.join(npy_path,train_wav_npy_filename)) y_train = load_data_from_npy(os.path.join(npy_path,train_tg_npy_filename)) x_test = load_data_from_npy(os.path.join(npy_path,test_wav_npy_filename)) y_test = load_data_from_npy(os.path.join(npy_path,test_tg_npy_filename)) x_val = load_data_from_npy(os.path.join(npy_path,val_wav_npy_filename)) y_val = load_data_from_npy(os.path.join(npy_path,val_tg_npy_filename)) model = Sequential() model.add(Dropout(0.2, input_shape=(39*41,))) model.add(Dense(39*41,init='normal',activation='relu',W_constraint=maxnorm(3))) model.add(Dropout(0.2)) model.add(Dense(128,activation='relu',W_constraint=maxnorm(3))) model.add(Dropout(0.2)) model.add(Dense(128,activation='relu',W_constraint=maxnorm(3))) model.add(Dropout(0.2)) model.add(Dense(5,activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) # Fit the model history = model.fit(x_train, y_train, epochs=10, batch_size=512,validation_data=(x_val,y_val)) with open('./log_history.txt') as log: log.write(str(history.history)) model.save('./Audio Data/model/model-test1.h5') #plot train and validation loss loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(loss) + 1) plt.plot(epochs, loss, 'bo', label = 'Training loss') plt.plot(epochs, val_loss, 'b', label = 'Validation loss') plt.title('Traing and validtion loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.savefig('./Traing and validtion loss.png') plt.clf() acc = history.history['acc'] val_acc = history.history['val_acc'] plt.plot(epochs, acc, 'bo', label='Traing acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Traing and validation accuracy') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.savefig('Traing and validation accuracy') results = model.evaluate(x_test, y_test) print(results)
MakerFace/voice-activation-system
mfcc-model.py
mfcc-model.py
py
2,823
python
en
code
0
github-code
6
32839631812
import torch from diffusers import DiffusionPipeline def torch_device(): if torch.cuda.is_available(): return "cuda" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): return "mps" return "cpu" def pipeline(model="stabilityai/stable-diffusion-xl-base-0.9", device=torch_device()): torch_dtype = torch.float16 variant = "fp16" # MacOS can only use fp32 if device == "mps": torch_dtype = torch.float32 variant = "fp32" pipe = DiffusionPipeline.from_pretrained( model, torch_dtype=torch_dtype, use_safetensors=True, variant=variant, ) if device == "cpu": pipe.enable_model_cpu_offload() else: pipe.to(device) # 20-30% inference speed up for torch >= 2.0 pipe.unit = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) return pipe
2132660698/sdxl-demos
utils.py
utils.py
py
906
python
en
code
null
github-code
6
43589412425
#!/usr/bin/env python # coding: utf-8 # In[1]: df = None # In[2]: from reaktoro import * import numpy as np from natsort import natsorted from tqdm.notebook import tqdm import os from bokeh.io import show, output_notebook from bokeh.layouts import column from bokeh.plotting import figure from bokeh.models import Range1d, ColumnDataSource from bokeh.layouts import gridplot # In[3]: second = 1 minute = 60 hour = 60 * minute day = 24 * hour week = 7 * day year = 365 * day # In[4]: xl = 0.0 xr = 0.5 ncells = 100 nsteps = 200 dx = (xr - xl) / ncells dt = 60 * minute # 7el Cl H N Na O Z # D = [ 2.032*1.0e-9, 9.311*10.0e-9, 1.902*1.0e-9,1.334*1.0e-9, 1.0e-9, 1.0e-9] D = 1.0e-09 v = 1.0 / week T = 60.0 + 273.15 P = 100 * 1e5 phi = 0.1 # In[5]: xcells = np.linspace(xl, xr, ncells + 1) # In[6]: dirichlet = False # In[7]: output_quantities = """ pH speciesMolality(H+) speciesMolality(Cl-) speciesMolality(Na+) speciesMolality(NO3-) speciesMolarity(OH-) """.split() # In[8]: column_quantities = """ pH Hcation Clanion Nacation NO3anion OHanion """.split() # In[9]: # Create the list of columns stored in dataframes columns = ['step', 'x'] + column_quantities import pandas as pd # In[10]: # Initialize dataframes with above defined columns df = pd.DataFrame(columns=columns) # In[11]: folder_results = 'results-rt-calcite-dolomite' def make_results_folders(): os.system('mkdir -p ' + folder_results) # In[12]: def simulate(): # Construct the chemical system with its phases and species system = define_chemical_system() # Define the initial condition of the reactive transport modeling problem state_ic = define_initial_condition(system) # Define the boundary condition of the reactive transport modeling problem state_bc = define_boundary_condition(system) # Generate indices of partitioning fluid and solid species nelems, ifluid_species, isolid_species = partition_indices(system) # Partitioning fluid and solid species b, bfluid, bsolid, b_bc = partition_elements_in_mesh_cell(ncells, nelems, state_ic, state_bc) # Create a list of chemical states for the mesh cells (one for each cell, initialized to state_ic) states = [state_ic.clone() for _ in range(ncells + 1)] # Create the equilibrium solver object for the repeated equilibrium calculation solver = EquilibriumSolver(system) # Running the reactive transport simulation loop step = 0 # the current step number t = 0.0 # the current time (in seconds) # Output the initial state of the reactive transport calculation outputstate_df(step, system, states) with tqdm(total=nsteps, desc="Reactive transport simulations") as pbar: while step <= nsteps: # Perform transport calculations bfluid, bsolid, b = transport(states, bfluid, bsolid, b, b_bc, nelems, ifluid_species, isolid_species) # Perform reactive chemical calculations states = reactive_chemistry(solver, states, b) # Increment time step and number of time steps t += dt step += 1 # Output the current state of the reactive transport calculation outputstate_df(step, system, states) # Update a progress bar pbar.update(1) # In[ ]: # In[13]: def define_chemical_system(): # Construct the chemical system with its phases and species db = Database('supcrt98.xml') editor = ChemicalEditor(db) editor.addAqueousPhaseWithElements('H N O Na Cl') .setChemicalModelPitzerHMW() .setActivityModelDrummondCO2() system = ChemicalSystem(editor) return system # In[14]: def define_initial_condition(system): problem_ic = EquilibriumProblem(system) problem_ic.setTemperature(T) problem_ic.setPressure(P) problem_ic.add('H2O', 0.001, 'kg') problem_ic.add('NaCl', 1e-4, 'mol') problem_ic.add('HNO3', 1e-4, 'mol') # Calculate the equilibrium states for the initial conditions state_ic = equilibrate(problem_ic) # Scale the volumes of the phases in the initial condition state_ic.scalePhaseVolume('Aqueous', 1.0, 'm3') return state_ic # In[15]: def define_boundary_condition(system): # Define the boundary condition of the reactive transport modeling problem problem_bc = EquilibriumProblem(system) problem_bc.setTemperature(T) problem_bc.setPressure(P) problem_bc.add('H2O', 0.001, 'kg') problem_bc.add('NaCl', 1e-4, 'mol') problem_bc.add('HNO3', 1e-6, 'mol') # Calculate the equilibrium states for the boundary conditions state_bc = equilibrate(problem_bc) # Scale the boundary condition state to 1 m3 state_bc.scaleVolume(1.0, 'm3') return state_bc # In[16]: def partition_indices(system): nelems = system.numElements() els = system.elements() for el in els: print('elements', el.name()) ifluid_species = system.indicesFluidSpecies() isolid_species = system.indicesSolidSpecies() return nelems, ifluid_species, isolid_species # In[17]: def partition_elements_in_mesh_cell(ncells, nelems, state_ic, state_bc): # The concentrations of each element in each mesh cell (in the current time step) b = np.zeros((ncells, nelems)) # Initialize the concentrations (mol/m3) of the elements in each mesh cell b[:] = state_ic.elementAmounts() # The concentrations (mol/m3) of each element in the fluid partition, in each mesh cell bfluid = np.zeros((ncells, nelems)) # The concentrations (mol/m3) of each element in the solid partition, in each mesh cell bsolid = np.zeros((ncells, nelems)) # Initialize the concentrations (mol/m3) of each element on the boundary b_bc = state_bc.elementAmounts() return b, bfluid, bsolid, b_bc # In[18]: def transport(states, bfluid, bsolid, b, b_bc, nelems, ifluid_species, isolid_species): # Collect the amounts of elements from fluid and solid partitions for icell in range(ncells): bfluid[icell] = states[icell].elementAmountsInSpecies(ifluid_species) bsolid[icell] = states[icell].elementAmountsInSpecies(isolid_species) # Get the porosity of the boundary cell bc_cell = 0 phi_bc = states[bc_cell].properties().fluidVolume().val / states[bc_cell].properties().volume().val # print(nelems) # Transport each element in the fluid phase for j in range(nelems): transport_fullimplicit(bfluid[:, j], dt, dx, v, D, phi_bc * b_bc[j]) # Update the amounts of elements in both fluid and solid partitions b[:] = bsolid + bfluid return bfluid, bsolid, b # In[19]: def transport_fullimplicit(u, dt, dx, v, D, ul): # Number of DOFs n = len(u) alpha = D * dt / dx ** 2 beta = v * dt / dx # Upwind finite volume scheme a = np.full(n, -beta - alpha) b = np.full(n, 1 + beta + 2 * alpha) c = np.full(n, -alpha) # Set the boundary condition on the left cell if dirichlet: # Use Dirichlet BC boundary conditions b[0] = 1.0 c[0] = 0.0 u[0] = ul else: # Flux boundary conditions (implicit scheme for the advection) # Left boundary b[0] = 1 + alpha + beta c[0] = -alpha # stays the same as it is defined -alpha u[0] += beta * ul # = dt/dx * v * g, flux that we prescribe is equal v * ul # Right boundary is free a[-1] = - beta b[-1] = 1 + beta # Solve a tridiagonal matrix equation thomas(a, b, c, u) # In[20]: def thomas(a, b, c, d): n = len(d) c[0] /= b[0] for i in range(1, n - 1): c[i] /= b[i] - a[i] * c[i - 1] d[0] /= b[0] for i in range(1, n): d[i] = (d[i] - a[i] * d[i - 1]) / (b[i] - a[i] * c[i - 1]) x = d for i in reversed(range(0, n - 1)): x[i] -= c[i] * x[i + 1] return x # In[21]: def reactive_chemistry(solver, states, b): for icell in range(ncells): solver.solve(states[icell], T, P, b[icell]) return states # In[22]: def outputstate_df(step, system, states): quantity = ChemicalQuantity(system) values = [None] * len(columns) for state, x in zip(states, xcells): values[0] = step values[1] = x quantity.update(state) for quantity_name, i in zip(output_quantities, range(2, len(states))): values[i] = quantity.value(quantity_name) * (100 / (1 - phi) if "phaseVolume" in quantity_name else 1) df.loc[len(df)] = values # In[23]: def titlestr(t): t = t / minute h = int(t) / 60 m = int(t) % 60 return 'Time: %2dh %2dm' % (h, m) # In[24]: def plot_figures_ph(steps, files): plots = [] for i in steps: t = i * dt source = ColumnDataSource(df[df['step'] == i]) p = figure(plot_width=600, plot_height=250) p.line(source.data['x'], source.data['pH'], color='teal', line_width=2, legend_label='pH') p.x_range = Range1d(-0.001, 1.001) p.y_range = Range1d(2.5, 12.0) p.xaxis.axis_label = 'Distance [m]' p.yaxis.axis_label = 'pH' p.legend.location = 'bottom_right' p.title.text = titlestr(t) plots.append([p]) grid = gridplot(plots) show(grid) # In[25]: def plot_figures_aqueous_species(steps, files): plots = [] for i in steps: t = i * dt source = ColumnDataSource(df[df['step'] == i]) p = figure(plot_width=600, plot_height=300) p.line(source.data['x'], source.data['Nacation'], color='orange', line_width=2, legend_label='Na') p.line(source.data['x'], source.data['NO3anion'], color='green', line_width=2, legend_label='NO3-') p.line(source.data['x'], source.data['Clanion'], color='red', line_width=2, legend_label='Cl') p.line(source.data['x'], source.data['Hcation'], color='darkviolet', line_width=2, legend_label='H+') # p.x_range = Range1d(-0.001, 1.0) # p.y_range = Range1d(1e-9, 1e-2) p.xaxis.axis_label = 'Distance [m]' p.yaxis.axis_label = 'Concentration [molal]' p.legend.location = 'top_right' p.title.text = titlestr(t) p.legend.click_policy = 'mute' plots.append([p]) grid = gridplot(plots) show(grid) # In[26]: make_results_folders() # In[27]: simulate() # In[28]: step = 0 df_step = df[df['step'] == step].loc[:, ['x'] + column_quantities] df_step # In[29]: df.shape # In[30]: selected_steps_to_plot = [10, 20] assert all(step <= nsteps for step in selected_steps_to_plot), f"Make sure that selceted steps are less than " f"total amount of steps {nsteps}" # In[31]: print("Collecting files...") files = [file for file in natsorted(os.listdir(folder_results))] # In[32]: output_notebook() # In[33]: plot_figures_ph(selected_steps_to_plot, files) # In[34]: plot_figures_aqueous_species(selected_steps_to_plot, files)
nimaamp/Reactive-transport
Nima Benchmark!.py
Nima Benchmark!.py
py
11,252
python
en
code
0
github-code
6
35227671174
#!/usr/bin/python import argparse def find_max_profit(prices): max = 0 for i, price in enumerate(prices): for j in range(i + 1, len(prices)): profit = prices[j] - price if max == 0 or profit > max: max = profit return max stocks = [1050, 270, 1540, 3800, 2] print(find_max_profit(stocks))
glweems/python-algorithms
stock_prices/stock_prices.py
stock_prices.py
py
354
python
en
code
0
github-code
6
648716471
import glob import logging import os import cv2 import numpy as np import torch from torch.utils.data import Dataset from tqdm import tqdm logfilepath = "" # 따로 지정하지 않으면 terminal에 뜸 if os.path.isfile(logfilepath): os.remove(logfilepath) logging.basicConfig(filename=logfilepath, level=logging.INFO) class DetectionDataset(Dataset): """ Parameters ---------- path : str(jpg) Path to input image directory. transform : object """ CLASSES = ['ng', 'ok'] def __init__(self, path='Dataset/train', transform=None, sequence_number=1, test=False): super(DetectionDataset, self).__init__() if sequence_number < 1 and isinstance(sequence_number, float): logging.error(f"{sequence_number} Must be greater than 0") return self._name = os.path.basename(path) self._sequence_number = sequence_number self._class_path_List = sorted(glob.glob(os.path.join(path, "*")), key=lambda path: self.key_func(path)) self._transform = transform self._items = [] self._itemname = [] self._test = test self._make_item_list() def key_func(self, path): return path def _make_item_list(self): if self._class_path_List: for path in self._class_path_List: class_name = os.path.basename(path) image_path_list = sorted(glob.glob(os.path.join(path, "*")), key=lambda path: self.key_func(path)) for i in tqdm(range(len(image_path_list) - (self._sequence_number - 1))): image_path = image_path_list[i:i + self._sequence_number] self._items.append((image_path, class_name)) base_image = os.path.basename(image_path[-1]) self._itemname.append(base_image) else: logging.info("The dataset does not exist") def __getitem__(self, idx): images = [] image_sequence_path, label = self._items[idx] for image_path in image_sequence_path: image = cv2.imread(image_path, flags=-1) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) images.append(image) images = np.concatenate(images, axis=-1) origin_images = images.copy() if self._transform: one_hot_label = self._one_hot(label) result = self._transform(images, one_hot_label, self._itemname[idx]) if self._test: return result[0], result[1], result[2], torch.as_tensor(origin_images) else: return result[0], result[1], result[2] else: return origin_images, label, self._itemname[idx] def _one_hot(self, label): unit_matrix = np.eye(len(self.CLASSES)) if label == 'ng': label=unit_matrix[0] elif label == 'ok': label=unit_matrix[1] return label @property def classes(self): return self.CLASSES @property def num_class(self): """Number of categories.""" return len(self.CLASSES) def __str__(self): return self._name + " " + "dataset" def __len__(self): return len(self._items) # test if __name__ == "__main__": import random from core.utils.util.utils import plot_bbox sequence_number = 1 root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) dataset = DetectionDataset(path=os.path.join(root, 'Dataset', 'train'), sequence_number=sequence_number) length = len(dataset) sequence_image, label, file_name = dataset[random.randint(0, length - 1)] print('images length:', length) print('sequence image shape:', sequence_image.shape) if sequence_number > 1: sequence_image = sequence_image[:,:,3*(sequence_number-1):] file_name = file_name[-1] plot_bbox(sequence_image, score=None, label=label, class_names=dataset.classes, colors=None, reverse_rgb=True, image_show=True, image_save=False, image_save_path="result", image_name=os.path.basename(file_name), gt=True) ''' images length: 1499 sequence image shape: (720, 1280, 9) '''
DeepFocuser/PyTorch-Detector-alpha
classification/core/utils/dataprocessing/dataset.py
dataset.py
py
4,393
python
en
code
4
github-code
6
41267173090
import csv from getpass import getpass import math import controle def _processa_csv(csv_f, delimiter): # Formato do arquivo: data, aula, carga_horaria (opcional) aulas = [] with open(csv_f, encoding='utf-8-sig') as csvfile: aulas_reader = csv.reader(csvfile, delimiter=delimiter) for linha in aulas_reader: aulas.append(linha) return aulas def conv(dat): if '-' in dat: a, m, d = dat.split('-') return '/'.join([d, m, a]) return dat def processa_csv(codigo, turma, csv_f, delimiter=','): data = controle.base(codigo, turma, 'ProfessorTurmaAulasConfirmar') aulas = _processa_csv(csv_f, delimiter) data["numAulas"] = str(len(aulas)) n = 0 for aula in aulas: d = aula[0] a = aula[1] ch = "2" if len(aula) == 2 else aula[2] data["d_" + str(n + 1)] = conv(str(d)) # 03/05/2017 data["h_" + str(n + 1)] = ch data["a_" + str(n + 1)] = a.encode('iso8859-15') n += 1 return data def main(): # ICC: 1411001 # LP2: 1411181 # P2 : 1411168 # DevWeb: 1411335 # turma = "01" login_ = input("LOGIN: ") senha_ = getpass("SENHA: ") disc = input("DISCIPLINA: ") turma = input("TURMA: ") arq = input("ARQUIVO: ") jsessionid = controle.login(login_, senha_) data = processa_csv(disc, turma, arq) res = open("res.html", "wb") res.write(controle.chamada(data, jsessionid).read()) res.close() print("PROCESSAMENTO ENCERRADO... VERIFIQUE CONTROLE ACADEMICO OU ARQUIVO res.html POR ERROS") if __name__ == '__main__': main()
matheusgr/scripts-controle-academico-ufcg
aulas.py
aulas.py
py
1,624
python
en
code
0
github-code
6
44254934565
# GUI Notepad from tkinter import * from tkinter.messagebox import showinfo from tkinter.filedialog import askopenfilename, asksaveasfilename import os def newFile(): global file root.title("Untitled - Notepad") file = None TextArea.delete(1.0, END) def openFile(): global file file = askopenfilename(defaultextension=".txt", filetypes=[("All files", "*.*"), ("Text Documents", "*.txt")]) if file == "": file = None else: root.title(os.path.basename(file) + "- Notepad") TextArea.delete(1.0, END) f = open(file, "r") TextArea.insert(1.0, f.read()) f.close() def saveFile(): global file if file == None: file = asksaveasfilename(initialfile="Untitled.txt", defaultextension=".txt", filetypes=[("All files", "*.*"), ("Text Documents", "*.txt")]) if file == "": file = None else: # Save it as a New File f = open(file, "w") f.write(TextArea.get(1.0, END)) f.close() root.title(os.path.basename(file) + " - Notepad") else: f = open(file, "w") f.write(TextArea.get(1.0, END)) f.close() def quitApp(): root.destroy() def cut(): TextArea.event_generate(("<<Cut>>")) def copy(): TextArea.event_generate(("<<Copy>>")) def paste(): TextArea.event_generate(("<<Paste>>")) def about(): showinfo("About Notepad", "Notepad by Sajid Majeed") if __name__ == '__main__': # ---------------- Basic tkinter setup ---------------- # Creating an instance of tkinter root = Tk() # Adding title root.title("Notepad") # Setting icon # root.iconbitmap("icon.ico") # Setting default size root.geometry("644x588") # Setting minimum size root.minsize(600, 500) # ---------------- Creating a Menu Bar ---------------- MenuBar = Menu(root) root.config(menu=MenuBar) # File Menu FileMenu = Menu(MenuBar, tearoff=0) # To open a New File FileMenu.add_command(label="New", command=newFile) # To open already existing File FileMenu.add_command(label="Open", command=openFile) # To save the current file FileMenu.add_command(label="Save", command=saveFile) # To add a separating line FileMenu.add_separator() # To quit the notepad FileMenu.add_command(label="Exit", command=quitApp) MenuBar.add_cascade(label="File", menu=FileMenu) # Edit Menu EditMenu = Menu(MenuBar, tearoff=0) # To give a feature of Cut, Copy, Paste EditMenu.add_command(label="Cut", command=cut) EditMenu.add_command(label="Copy", command=copy) EditMenu.add_command(label="Paste", command=paste) MenuBar.add_cascade(label="Edit", menu=EditMenu) # Help Menu HelpMenu = Menu(MenuBar, tearoff=0) HelpMenu.add_command(label="About Notepad", command=about) MenuBar.add_cascade(label="Help", menu=HelpMenu) # ---------------- Creating a Text Area ---------------- # Text area for writing text TextArea = Text(root, font="lucida 13") file = None TextArea.pack(expand=True, fill=BOTH) # Adding Scrollbar using rules from tkinter scroll = Scrollbar(TextArea) scroll.pack(side=RIGHT, fill=Y) scroll.config(command=TextArea.yview) TextArea.config(yscrollcommand=scroll.set) # ---------------- Creating a Bottom Status Bar ---------------- # line_col = StringVar() # line_col.set("Ln 1, Col 1") statusbar = Frame(root, bd=1, relief=SUNKEN) statusbar.pack(side=BOTTOM, fill=X) Label(statusbar, text="UTF-8", width=20).pack(side=RIGHT) Label(statusbar, text="Windows(CRLF)", width=20).pack(side=RIGHT) # Label(statusbar, textvariable=line_col, width=20).pack(side=RIGHT) root.mainloop()
SajidMajeed92/Python_Tkinter
Tkinter/Notepad.py
Notepad.py
py
3,839
python
en
code
11
github-code
6
25793507679
# -*-coding:utf-8-*- __author__ = 'BING' from django.http import HttpResponse from django.shortcuts import render,render_to_response from zhihupaper import apiUse,singelNews,latestNews,beforeNews import re from getPic import GetPic def home(request): api = apiUse() news = latestNews(api) count = news.getnum() stories = news.getstories() return render_to_response('home.html',{'stories':stories}) def mobile(request): api = apiUse() news = latestNews(api) topstories = news.gettopstories() stories = news.getmobilestories() return render_to_response('mobile.html',{'topstories':topstories, 'stories':stories}) def story(request, id): api = apiUse() singelnews = singelNews(api, int(id)) title = singelnews.gettitle() body = singelnews.getbody() image = singelnews.getimage() source = singelnews.getsource() body = replaceUrl(body) body = replaceImg(body, image, title, source) return render_to_response('story.html', {'title': title, 'body': body}) def ajax_morestory(request, date): api = apiUse() beforenews = beforeNews(api, date) stories = beforenews.getstories() return render_to_response('ajax_morestory.html', { 'stories': stories}) def m_ajax_morestory(request, date): api = apiUse() beforenews = beforeNews(api, date) stories = beforenews.getmobilestories() return render_to_response('m_ajax_morestory.html', { 'stories': stories}) def replaceImg(body, image, title, source): pattern = re.compile('<div class=\"img-place-holder\"><\/div>',re.DOTALL) replaceStr = r'<div class="img-wrap"><h1 class="headline-title">%s</h1><span class="img-source">%s</span><img src="/imgurl/url=%s" alt></div>' % (title, source, image) return pattern.sub(replaceStr, body) def replaceUrl(body): pattern = re.compile(r'src=\"', re.DOTALL) replaceStr = r'src="/imgurl/url=' return pattern.sub(replaceStr, body) def get_pic(request, url): url = url[4:] getpic = GetPic(url) req = getpic.get_pic() pic = req.read() return HttpResponse(pic)
codeBing/zhihudaily
paper/views.py
views.py
py
2,087
python
en
code
4
github-code
6
38120413849
#Simplex step using row operations import numpy as np from numpy.linalg import norm, inv import sys def pivot_step(A,i,j): #i=row number, j=column number A[i] = A[i]/A[i,j] for k in range(len(A[:])): #updating the column of the matrix so that they equal to 0 if k!=i: A[k]=A[k]-A[k,j]*A[i] def simplex_step(A,b,c,iB,iN,xB,Binv,irule): coefficient_base = [] for i in (iB): coefficient_base.append(c[0,i-1]) #getting CB^T(coefficient of the basic variables) w = np.dot(coefficient_base,Binv) #getting w z = np.matrix(np.copy(c)) for i in iN: z[0,i-1] = np.dot(w,A[:,i-1]) if (irule == 0): #irule = 0 indicates that the smallest coefficient rule should be used reduced_cost = 0 for i in iN: #finding the negative reduced cost from the reduced cost list of the non-basic variables if c[0,i-1]-z[0,i-1]<reduced_cost: reduced_cost = c[0,i-1]-z[0,i-1] entering_variable = i if reduced_cost == 0: #if there is none, we are at optimality istatus = -1 #at optimality print("OPTIMAL") return [istatus,iB,iN,xB,Binv] y = np.dot(Binv,A[:,entering_variable-1]) #entering column current_ratio = float('inf') #negative constant for i in range(y.size): #finding the minimum ratio if y[i,:] > sys.float_info.epsilon: #greater than epsilon if (xB[i,:]/y[i,:] >= 0) and (xB[i,:]/y[i,:] < current_ratio): current_ratio = xB[i]/y[i,:] leaving_index = i if (current_ratio == float('inf')): #if there is no ratio >= 0, we return the program is unbounded print("Program is unbounded") istatus = 16 #program is unbounded return [istatus,iB,iN,xB,Binv] elif (irule == 1): sorted_iN = iN sorted_iN.sort() reduced_cost = 0 for i in sorted_iN: #finding the negative reduced cost from the reduced cost list of the non-basic variables if c[0,i-1]-z[0,i-1]<0: reduced_cost = c[0,i-1]-z[0,i-1] entering_variable = i break if reduced_cost == 0: #if there is none, we are at optimality istatus = -1 #at optimality return [istatus,iB,iN,xB,Binv] y = np.dot(Binv,A[:,entering_variable-1]) #entering column current_ratio = float('inf') #negative constant for i in range(y.size): #finding the minimum ratio if y[i,:] > sys.float_info.epsilon: #greater than epsilon if (xB[i,:]/y[i,:] >= 0) and (xB[i,:]/y[i,:] < current_ratio): current_ratio = xB[i]/y[i,:] leaving_index = i current_variable = iB[i] elif (xB[i,:]/y[i,:] >= 0) and (xB[i,:]/y[i,:] == current_ratio): if current_variable < iB[i]: current_ratio = xB[i]/y[i,:] leaving_index = i current_variable = iB[i] if (current_ratio == float('inf')): #if there is no ratio >= 0, we return the program is unbounded istatus = 16 #program is unbounded print("Program is unbounded") return [istatus,iB,iN,xB,Binv] entering_index = iN.index(entering_variable) leaving_variable = iB[leaving_index] print("leaving index = " + str(leaving_index)) print("entering index = " + str(entering_index)) print("leaving variable = " + str(leaving_variable)) print("entering variable = " + str(entering_variable)) iB.remove(leaving_variable) #removing the leaving variable from iB iB.insert(leaving_index,entering_variable) #inserting the entering variable to iB iN.remove(entering_variable) #removing the entering variable from iN iN.insert(entering_index,leaving_variable) #inserting the leaving variable to iN if (np.linalg.det(A[:,[index_B-1 for index_B in iB]]) == 0): print("infeasible") istatus = 16 #infeasible return istatus,iB,iN,xB,A[:,[index_B-1 for index_B in iB]] else: Binv = np.concatenate((y, Binv), axis=1) pivot_step(Binv,leaving_index,0) Binv = Binv[:,1:] xB = np.dot(Binv,b) istatus = 0 return [istatus,iB,iN,xB,Binv]
yohanesusanto/Revised-Simplex-Algo
simplex_step1.py
simplex_step1.py
py
4,051
python
en
code
0
github-code
6
17435427439
import pytest import pdb from typing import List class Solution: def fullJustify(self, words: List[str], maxWidth: int) -> List[str]: """ """ def intertwine(ws, sps): # one word + one sps appended t = [] for w, s in zip(ws, sps): t.extend([w, s]) t.append(ws[-1]) return ''.join(t) # 1. Greedy pick words for each line cur_count = 0 buffer = [] lines = [] for w in words: if (cur_count + len(w) + (1 if cur_count else 0)) <= maxWidth: cur_count += len(w) + (1 if cur_count else 0) else: lines.append(buffer) cur_count = len(w) buffer = [] buffer.append(w) if buffer: lines.append(buffer) # 2. Justify spaces using divmod for i in range(len(lines)): remaining = maxWidth - (sum(len(w)+1 for w in lines[i]) - 1) # Left justify last line if i == len(lines)-1: lines[i] = ' '.join(lines[i]) + ' '*remaining # Full justify else: if len(lines[i]) > 1: interval, left = divmod(remaining, len(lines[i])-1) spaces = [' ' *(1+interval+ (1 if j < left else 0)) for j, w in enumerate(lines[i])][:-1] lines[i] = intertwine(lines[i], spaces) else: # Left justfiy if only 1 word per line lines[i] = ' '.join(lines[i]) + ' '*remaining return lines @pytest.mark.parametrize('input, length, output', [ (["This", "is", "an", "example", "of", "text", "justification."],16,["This    is    an", "example  of text", "justification.  "]), (["Listen","to","many,","speak","to","a","few."], 6,["Listen","to ","many ,","speak ","to a","few. "]) ]) def test(input, length, output): Solution().fullJustify(input, length) == output if __name__ == '__main__': sys.exit(pytest.main(['-s', '-v'] + sys.argv))
naubull2/codingtests
leetcode/quick-prep/68_Text_Justification/solution.py
solution.py
py
2,224
python
en
code
0
github-code
6
16866363316
import pytorch_lightning as pl import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.optim.lr_scheduler as lr_scheduler import torch from torchmetrics import Accuracy from loss import create_criterion class Backbone(nn.Module): def __init__(self): super(Backbone, self).__init__() self.feature = nn.Sequential( nn.Conv2d(1, 32, (3, 3), (1, 1)), nn.BatchNorm2d(32), nn.Dropout(0.5), nn.ReLU(), nn.Conv2d(32, 64, (3, 3), (1, 1)), nn.BatchNorm2d(64), nn.Dropout(0.5), nn.ReLU(), ) self.avg = nn.AdaptiveAvgPool2d((1, 1)) self.classifier = nn.Linear(64, 10) def forward(self, x): x = self.feature(x) x = self.avg(x) x = torch.flatten(x, 1) x = self.classifier(x) return x class MNISTModel(pl.LightningModule): def __init__(self, loss, lr): super(MNISTModel, self).__init__() self.net = Backbone() self._criterion = create_criterion(loss) self.acc = Accuracy() self.learning_rate = lr self.save_hyperparameters(ignore="model") def forward(self, x): x = self.net(x) x = F.softmax(x, dim=1) return x def training_step(self, batch, batch_idx): preds, loss, acc, labels = self.__share_step(batch, 'train') self.log("train_loss", loss) self.log("train_accuracy", acc) return {"loss": loss, "pred": preds.detach(), 'labels': labels.detach()} def validation_step(self, batch, batch_idx): preds, loss, acc, labels = self.__share_step(batch, 'val') self.log("val_loss", loss) self.log("val_accuracy", acc) return {"loss": loss, "pred": preds, 'labels': labels} def __share_step(self, batch, mode): x, y = batch y_hat = self.net(x) loss = self._criterion(y_hat, y) acc = self.acc(y_hat, y) return y_hat, loss, acc, y def configure_optimizers(self): optimizer = optim.Adam( self.parameters(), lr=self.learning_rate) scheduler = lr_scheduler.StepLR( optimizer, step_size=10, gamma=0.5 ) return [optimizer], [scheduler]
KyubumShin/MNIST_pl
model.py
model.py
py
2,306
python
en
code
0
github-code
6
4404685461
def users_list(users): user_list = [] for user in users: user_info = {'id': user.id, 'name': user.username, 'advertisements': len(user.advertisements)} user_list.append(user_info) return user_list def advertisements_list(advertisements): adv_list = [] for advertisement in advertisements: adv = {'id': advertisement.id, 'title': advertisement.title, 'created': advertisement.created} adv_list.append(adv) return adv_list
gchernousov/advertisements_api_flask
functions.py
functions.py
py
481
python
en
code
0
github-code
6
19400224964
import urllib.request import re import os from datetime import datetime contador = 1 def download(link, curso): link_video = link diretorio = get_diretorio(link_video, curso) urllib.request.urlretrieve(link_video, diretorio) def get_diretorio(link_video,curso): padrao = "\w{1,50}.mp4" curso = curso.replace(".txt", "") #cria a pasta do video caso não exista if not os.path.isdir(f'./video/{curso}'): os.mkdir(f'./video/{curso}') return f'./video/{curso}/Aula {contador} - '+re.search(padrao, link_video).group() # faz download dos videos for file in os.listdir("./links"): if file.endswith(".txt"): with open('./links/'+os.path.join(file), 'r', encoding='utf-8') as arquivo: for link in arquivo: download(link, file) contador = contador + 1 contador = 1
jonassantos1000/robo_maximo_ibm
main.py
main.py
py
865
python
pt
code
0
github-code
6
71958574589
from __future__ import unicode_literals import frappe import os from frappe.custom.doctype.custom_field.custom_field import create_custom_fields def setup(company=None, patch=True): if not patch: update_address_template() make_custom_fields() add_custom_roles_for_reports() def make_custom_fields(): custom_fields = { 'Company': [ dict(fieldname='siren_number', label='SIREN Number', fieldtype='Data', insert_after='website') ] } create_custom_fields(custom_fields) def add_custom_roles_for_reports(): report_name = 'Fichier des Ecritures Comptables [FEC]' if not frappe.db.get_value('Custom Role', dict(report=report_name)): frappe.get_doc(dict( doctype='Custom Role', report=report_name, roles= [ dict(role='Accounts Manager') ] )).insert() def update_address_template(): """ Read address template from file. Update existing Address Template or create a new one. """ dir_name = os.path.dirname(__file__) template_path = os.path.join(dir_name, 'address_template.html') with open(template_path, 'r') as template_file: template_html = template_file.read() address_template = frappe.db.get_value('Address Template', 'France') if address_template: frappe.db.set_value('Address Template', 'France', 'template', template_html) else: # make new html template for France frappe.get_doc(dict( doctype='Address Template', country='France', template=template_html )).insert()
ektai/erpnext
erpnext/regional/france/setup.py
setup.py
py
1,447
python
en
code
0
github-code
6
30575157530
from Domain.BuildEnumMethods import BuildEnumMethods from .UtilityScriptBase import UtilityScriptBase import logging from .Exceptions.ArgNotFoundException import ArgNotFoundException import json from mongoengine import connect import pandas from Domain.EquityCorporateData import EquityCorporateData class LoadNasdaqTickers(UtilityScriptBase): def __init__(self): UtilityScriptBase.__init__( self ) logging.debug("In Nasdaq Utility Script") ##Change Description self.description = "Loads tickers from nasdaq csv" ##Init args self.args["DB_CONNECTION"] = None self.args["DB_HOST"] = None self.args["DB_PORT"] = None self.args["pathToCsv"] = None def run(self): logging.debug("Prompting for value") self.queryArg("pathToCsv", self.args, "What is the path to the nasdaq csv??\nValue: \t") self.queryArg("DB_CONNECTION", self.args, "What DB Connection?\nValue: \t") self.queryArg("DB_HOST", self.args, "What DB Host?\nValue: \t") self.queryArg("DB_PORT", self.args, "What DB Port?\nValue: \t") connect(self.args["DB_CONNECTION"], host=self.args["DB_HOST"], port=int(self.args["DB_PORT"])) nasdaqDF = self.fileToDf(self.args["pathToCsv"]) equities = EquityCorporateData.build(BuildEnumMethods.DF, DF=nasdaqDF) equitiesInSystem = self.getEquityObjects() for i in equities: i.save() def runWithArgFile(self, argFile): self.parseArgFile(argFile) self.validateArgs() self.run() def parseArgFile(self, argFile): f = open(argFile) data = json.load(f) for i in data: self.args[i] = data[i] def validateArgs(self): if(self.args["pathToCsv"] == None): raise ArgNotFoundException("pathToCsv") if(self.args["DB_CONNECTION"] == None): raise ArgNotFoundException("DB_CONNECTION") if(self.args["DB_PORT"] == None): raise ArgNotFoundException("DB_PORT") if(self.args["DB_HOST"] == None): raise ArgNotFoundException("DB_HOST") def fileToDf(self, filePath : str) -> pandas.DataFrame: return pandas.read_csv(filePath) def getEquityObjects(self) -> [EquityCorporateData]: return EquityCorporateData.objects
jminahan/backtest_framework
UtilityRunner/UtilityScripts/LoadNasdaqTickers.py
LoadNasdaqTickers.py
py
2,352
python
en
code
0
github-code
6
234182160
from PyQt5.QtWidgets import * from PyQt5.QtCore import * from PyQt5.QtGui import * import math class ListDock(QDockWidget): def __init__(self, title, parent = None): super().__init__(title, parent) self.setMinimumWidth(150) self.listwidget = Listbox(self) self.listwidget.setDragEnabled(True) self.layout_vbox = QVBoxLayout(self.listwidget) self.layout_vbox.setContentsMargins(0,0,0,0) self.setWidget(self.listwidget) class Listbox(QListWidget): def __init__(self, parent = None): super().__init__(parent) self.setSelectionMode(QAbstractItemView.SingleSelection) self.setDragEnabled(True) item = QListWidgetItem("test item", self) item.setTextAlignment(Qt.AlignHCenter) item.setSizeHint(QSize(80, 115)) item.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsDragEnabled) item.setData(Qt.UserRole + 1, "test item") def startDrag(self, *args, **kwargs): print("DragListbox::startDrag") try: item = self.currentItem() name = item.data(Qt.UserRole + 1) print("dragging item", name, item) pixmap = QPixmap(item.data(Qt.UserRole)) itemData = QByteArray() dataStream = QDataStream(itemData, QIODevice.WriteOnly) dataStream << pixmap dataStream.writeQString(name) dataStream.writeQString(item.text()) mimeData = QMimeData() mimeData.setData("text/plain", itemData) drag = QDrag(self) drag.setMimeData(mimeData) drag.setHotSpot(QPoint(0, 0)) drag.setPixmap(pixmap) drag.exec_(Qt.MoveAction) except Exception as e: pass class Dock(QDockWidget): def __init__(self, title, parent = None): super().__init__(title,parent) self.dockwidget = QWidget() self.layout_vbox = QVBoxLayout(self.dockwidget) self.layout_vbox.setContentsMargins(0,0,0,0) self.grScene = DockGraph() self.view = DockView(self.grScene, self) self.layout_vbox.addWidget(self.view) self.setWidget(self.dockwidget) class DockView(QGraphicsView): def __init__(self, grScene, parent=None): super().__init__(parent) self.grScene = grScene self.initUI() self.setScene(self.grScene) self.setAcceptDrops(True) self.zoomInFactor = 1.25 self.zoomClamp = True self.zoom = 10 self.zoomStep = 1 self.zoomRange = [0, 10] def dragEnterEvent(self, event): print("dragEnter") print(event.mimeData().text()) if event.mimeData().hasFormat('text/plain'): print("accept item") event.accept() #event.acceptProposedAction() else: print("ignore item") event.ignore() def dragMoveEvent(self, event): event.accept() if event.mimeData().hasText() else event.ignore() def dropEvent(self, event): print("drop") print(event.mimeData().text()) def initUI(self): self.setRenderHints(QPainter.Antialiasing | QPainter.HighQualityAntialiasing | QPainter.TextAntialiasing | QPainter.SmoothPixmapTransform) self.setViewportUpdateMode(QGraphicsView.FullViewportUpdate) self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.setTransformationAnchor(QGraphicsView.AnchorUnderMouse) def mousePressEvent(self, event): if event.button() == Qt.MiddleButton: self.middleMouseButtonPress(event) elif event.button() == Qt.LeftButton: self.rightMouseButtonPress(event) elif event.button() == Qt.RightButton: self.rightMouseButtonPress(event) else: super().mousePressEvent(event) def mouseReleaseEvent(self, event): if event.button() == Qt.MiddleButton: self.middleMouseButtonRelease(event) elif event.button() == Qt.LeftButton: self.leftMouseButtonRelease(event) elif event.button() == Qt.RightButton: self.rightMouseButtonRelease(event) else: super().mouseReleaseEvent(event) def middleMouseButtonPress(self, event): releaseEvent = QMouseEvent(QEvent.MouseButtonRelease, event.localPos(), event.screenPos(), Qt.LeftButton, Qt.NoButton, event.modifiers()) super().mouseReleaseEvent(releaseEvent) self.setDragMode(QGraphicsView.ScrollHandDrag) fakeEvent = QMouseEvent(event.type(), event.localPos(), event.screenPos(), Qt.LeftButton, event.buttons() | Qt.LeftButton, event.modifiers()) super().mousePressEvent(fakeEvent) def middleMouseButtonRelease(self, event): fakeEvent = QMouseEvent(event.type(), event.localPos(), event.screenPos(), Qt.LeftButton, event.buttons() & ~Qt.LeftButton, event.modifiers()) super().mouseReleaseEvent(fakeEvent) self.setDragMode(QGraphicsView.NoDrag) def leftMouseButtonPress(self, event): return super().mousePressEvent(event) def leftMouseButtonRelease(self, event): return super().mouseReleaseEvent(event) def rightMouseButtonPress(self, event): return super().mousePressEvent(event) def rightMouseButtonRelease(self, event): return super().mouseReleaseEvent(event) def wheelEvent(self, event): # calculate our zoom Factor zoomOutFactor = 1 / self.zoomInFactor # calculate zoom if event.angleDelta().y() > 0: zoomFactor = self.zoomInFactor self.zoom += self.zoomStep else: zoomFactor = zoomOutFactor self.zoom -= self.zoomStep clamped = False if self.zoom < self.zoomRange[0]: self.zoom, clamped = self.zoomRange[0], True if self.zoom > self.zoomRange[1]: self.zoom, clamped = self.zoomRange[1], True # set scene scale if not clamped or self.zoomClamp is False: self.scale(zoomFactor, zoomFactor) class DockGraph(QGraphicsScene): def __init__(self): super().__init__() # settings self.gridSize = 20 self.gridSquares = 5 self._color_background = QColor("#393939") self._color_light = QColor("#2f2f2f") self._color_dark = QColor("#292929") self._pen_light = QPen(self._color_light) self._pen_light.setWidth(1) self._pen_dark = QPen(self._color_dark) self._pen_dark.setWidth(2) self.setBackgroundBrush(self._color_background) self.scene_width = 64000 self.scene_height = 64000 self.setSceneRect(-self.scene_width / 2, -self.scene_height / 2, self.scene_width, self.scene_height) def drawBackground(self, painter, rect): super().drawBackground(painter, rect) # here we create our grid left = int(math.floor(rect.left())) right = int(math.ceil(rect.right())) top = int(math.floor(rect.top())) bottom = int(math.ceil(rect.bottom())) first_left = left - (left % self.gridSize) first_top = top - (top % self.gridSize) # compute all lines to be drawn lines_light, lines_dark = [], [] for x in range(first_left, right, self.gridSize): if (x % (self.gridSize*self.gridSquares) != 0): lines_light.append(QLine(x, top, x, bottom)) else: lines_dark.append(QLine(x, top, x, bottom)) for y in range(first_top, bottom, self.gridSize): if (y % (self.gridSize*self.gridSquares) != 0): lines_light.append(QLine(left, y, right, y)) else: lines_dark.append(QLine(left, y, right, y)) # draw the lines painter.setPen(self._pen_light) painter.drawLines(*lines_light) painter.setPen(self._pen_dark) painter.drawLines(*lines_dark)
glace158/Pyside_Test
test/dock.py
dock.py
py
8,154
python
en
code
0
github-code
6
21998644026
from typing import List class Trie: def __init__(self): self.L = 30 self.left = None self.right = None def insert(self, val: int): node = self for i in range(self.L, -1, -1): bit = (val >> i) & 1 if bit == 0: if not node.left: node.left = Trie() node = node.left else: if not node.right: node.right = Trie() node = node.right def get_max_xor(self, val: int) -> int: ans, node = 0, self for i in range(self.L, -1, -1): bit = (val >> i) & 1 check = False if bit == 0: if node.right: node = node.right check = True else: node = node.left else: if node.left: node = node.left check = True else: node = node.right if check: ans |= 1 << i return ans class Solution: def maximizeXor(self, nums: List[int], queries: List[List[int]]) -> List[int]: n, q = len(nums), len(queries), nums.sort() queries = [(x, m, i) for i, (x, m) in enumerate(queries)] queries.sort(key=lambda query: query[1]) ans = [0] * q t = Trie() idx = 0 for x, m, qid in queries: while idx < n and nums[idx] <= m: t.insert(nums[idx]) idx += 1 if idx == 0: ans[qid] = -1 else: ans[qid] = t.get_max_xor(x) return ans
hangwudy/leetcode
1700-1799/1707. 与数组中元素的最大异或值.py
1707. 与数组中元素的最大异或值.py
py
1,735
python
en
code
0
github-code
6
27577609762
import random def coin(): if random.randint(0, 1) == 1: return "heads" else: return "tails" if __name__ == '__main__': heads_tally = 0 tails_tally = 0 count = 0 for toss in range(10000): if coin() == "heads": heads_tally = heads_tally + 1 elif coin() == "tails": tails_tally = tails_tally + 1 break count = count + 1 print(f"{heads_tally} {tails_tally} {count}")
Micjohn01/C13-Python-Exercise
Practice/coin_simulation.py
coin_simulation.py
py
479
python
en
code
null
github-code
6
27259780990
"""We are the captains of our ships and we stay 'till the end. We see our stories through. """ """701. Insert into a Binary Search Tree [Recursive] """ class TreeNode: def __init__(self, val): self.val = val self.left = None self.right = None class Solution: def insertIntoBST(self, root, val): if not root: return TreeNode(val) if val < root.val: root.left = self.insertIntoBST(root.left, val) else: root.right = self.insertIntoBST(root.right, val) return root
asperaa/back_to_grind
Trees/701. Insert into a Binary Search Tree_clean.py
701. Insert into a Binary Search Tree_clean.py
py
566
python
en
code
1
github-code
6
73769261309
import time from Pages.CCTVCheckout import CCTV_Checkout from TestData.Data import Testdata from Utilities.BaseClass import BaseClass class Test_two(BaseClass): def test_CCTVCheckOut(self): log = self.getlogger() # For log file CCTV = CCTV_Checkout(self.driver) # Call the page class log.info("Click CCTV menu button") CCTV.get_CCTV_homeButton() # CCTV.get_scroll() log.info("Click CCTV View dates") CCTV.get_ViewDate() time.sleep(5) CCTV.get_scroll() time.sleep(5) log.info("Click ON CCTV Book Button") CCTV.get_ButtonBook() time.sleep(9) CCTV.get_scroll() time.sleep(9) log.info("Click on CCTV Package") CCTV.get_select_package() CCTV.get_Name() CCTV.get_Email() CCTV.get_Mobile() log.info("Click on Next button for address") time.sleep(5) CCTV.get_Next() # CCTV.get_postcode() # CCTV.get_address() CCTV.get_city() time.sleep(3) CCTV.get_AutoList() time.sleep(5) CCTV.get_Next2() CCTV.get_stripe()
azmul94/Get-Licensed-web
Tests/test_2_CCTVCheckout.py
test_2_CCTVCheckout.py
py
1,161
python
en
code
0
github-code
6
21471880105
import Picklizer import sys import time #/home/nickshiell/storage/TestSet inputCSVDir = '' outputPKLDir = '' # Make sure that the command line args are present if len(sys.argv) == 3: inputCSVDir = sys.argv[1] outputPKLDir = sys.argv[2] else: print('ERROR: invalid command line args: ', sys.argv) exit(0) startTime = time.time() Picklizer.PickleProductionRun(inputCSVDir,outputPKLDir) executionTime = (time.time() - startTime) print('Execution time for ', inputCSVDir,': ' + str(executionTime),'[s]')
ResearchComputingServices/NewspaperSortByTicker
Pickling/productionScript.py
productionScript.py
py
519
python
en
code
0
github-code
6
10972428124
# # train.py # @author [email protected] # @description # @created 2020-12-09T16:35:56.524Z+05:30 # @last-modified 2020-12-11T20:05:30.671Z+05:30 # ########### Help ########### ''' python train.py \ --data_dir /Users/aman.gupta/Documents/eagleview/utilities/onsite_data_fetch/fetched_images/annotated_combined_thumbnail_after_may_2020/splitted_letterbox_training_data \ --log_dir ./logs \ --epochs 1 \ --save_interval 5 \ --print_interval 1 \ --batch_size 64 \ --name exp0 ''' ############################# import matplotlib.pyplot as plt import numpy as np import torch from torch import nn import torch.nn.functional as F from torchvision import datasets, transforms, models import argparse import os from utils import (load_split_train_test, plot_classes_preds, save_checkpoint) from torch.utils.tensorboard import SummaryWriter import time from model import Model import sys import configs if __name__ == '__main__': parser = argparse.ArgumentParser(description="this script trains the classification model") parser.add_argument("--data_dir",required = True,help="training data path") parser.add_argument("--log_dir",required=False,default="./logs",type=str,help="dir to save logs") parser.add_argument("--epochs",default=10,type =int, help="number of epochs to train a model") parser.add_argument("--save_interval",default=100,type = int,help="interval to save model") parser.add_argument("--print_interval",default=10,type = int,help="interval to print log") parser.add_argument("--lr",default=0.003,type = float,help="learning rate") parser.add_argument("--batch_size",default=4,type = int,help="batch size") parser.add_argument("--test_split",default=0.2,type = float,help="test split out of 1.0") parser.add_argument("--name",default="exp0",type = str,help="experiment name") args = parser.parse_args() os.makedirs(args.log_dir,exist_ok=True) #tensorboard writter # default `log_dir` is "runs" - we'll be more specific here writer = SummaryWriter(args.log_dir) ##load data data_dir = args.data_dir trainloader, testloader = load_split_train_test(data_dir, args.batch_size) print(trainloader.dataset.classes) # sys.exit() ##load model device = torch.device("cuda" if torch.cuda.is_available() else "cpu") output_layers = len(configs.CLASSES) model_obj = Model(output_layers,device,args.lr) model,optimizer,criterion = model_obj.model,model_obj.optimizer,model_obj.criterion ## training loop epochs = args.epochs steps = 0 running_loss = 0 print_every = args.print_interval train_losses, test_losses = [], [] try: print("Training Started") for epoch in range(epochs): for inputs, labels in trainloader: steps += 1 inputs, labels = inputs.to(device), labels.to(device) optimizer.zero_grad() logps = model.forward(inputs) loss = criterion(logps, labels) loss.backward() optimizer.step() running_loss += loss.item() if steps % print_every == 0: test_loss = 0 accuracy = 0 model.eval() with torch.no_grad(): for inputs, labels in testloader: inputs, labels = inputs.to(device), labels.to(device) logps = model.forward(inputs) batch_loss = criterion(logps, labels) test_loss += batch_loss.item() ps = torch.exp(logps) top_p, top_class = ps.topk(1, dim=1) equals = top_class == labels.view(*top_class.shape) accuracy += torch.mean(equals.type(torch.FloatTensor)).item() train_losses.append(running_loss/len(trainloader)) test_losses.append(test_loss/len(testloader)) # ...log the running loss writer.add_scalar('loss/training_loss', running_loss / print_every, global_step=epoch * len(trainloader) + steps) # ...log the test loss writer.add_scalar('loss/test_loss', test_loss/len(testloader), global_step=epoch * len(trainloader) + steps) # ...log the test Accuracy writer.add_scalar('test Accuracy', accuracy/len(testloader), global_step=epoch * len(trainloader) + steps) # ...log a Matplotlib Figure showing the model's predictions on a # random mini-batch writer.add_figure('predictions vs. actuals', plot_classes_preds(model, inputs, labels), global_step=epoch * len(trainloader) + steps) print(f"Epoch {epoch+1}/{epochs}.. " f"Step :{steps}.. " f"Train loss: {running_loss/print_every:.3f}.. " f"Test loss: {test_loss/len(testloader):.3f}.. " f"Test accuracy: {accuracy/len(testloader):.3f}") running_loss = 0 model.train() if steps % args.save_interval==0: path = os.path.join(args.log_dir,"checkpoints",args.name,f"epochs-{epochs}-steps-{steps}") save_checkpoint(path,epoch,model,optimizer,train_losses) print(f"checkpoint saved at :{path}") path = os.path.join(args.log_dir,"checkpoints",args.name,"last") save_checkpoint(path,epoch,model,optimizer,train_losses) print(f"checkpoint saved at :{path}") except KeyboardInterrupt: path = os.path.join(args.log_dir,"checkpoints",args.name,"last") save_checkpoint(path,epoch,model,optimizer,train_losses) print(f"Training interrupted checkpoint saved at :{path}")
aman0044/resnet-classifier
train.py
train.py
py
6,456
python
en
code
0
github-code
6
7160221934
# Answer to Lift queries # https://www.hackerearth.com/practice/basic-programming/implementation/basics-of-implementation/practice-problems/algorithm/lift-queries A, B = 0, 7 for _ in range(int(input())): n = int(input()) if abs(n-A) < abs(n-B): A = n elif abs(n-A) > abs(n-B): B = n elif A < B: A = n else: B = n print('A' if A == n else 'B')
CompetitiveCode/HackerEarth
Basic Programming/Input Output/Lift queries.py
Lift queries.py
py
402
python
en
code
1
github-code
6
2140106274
import numpy as np from pyqtgraph.Qt import QtGui, QtCore import init_multi as im import func_multi as fm ''' BEGING PROGRAM ''' # функция отрисовки изображения, использует convert() для получения данных из fifo def showTrack(i): x1, y1 = fm.ellipseCreate(20+5*i, 30, 50, 70-3*i, 0.05*i) im.ellipse1.setData(x1, y1) x2, y2 = fm.ellipseCreate(40-5*i, 20, 40, 80+3*i, 0.07*i) im.ellipse2.setData(x2, y2) tx, ty = fm.ellipseCross(x1, y1, x2, y2) ''' tx = np.zeros((1)) ty = np.zeros((1)) tx[0] = 10 + i*5 ty[0] = 20 + i*6 ''' fm.plot2track(tx, ty) im.cross.setData(tx, ty) def showCfar(i): data_target = fm.getFifoCfar() print("\tdata_target", data_target.shape) data_target[:, im.FNSAMPLES/2] = 1 im.img1.setImage(data_target) im.img2.setImage(data_target) def showAf(i): data_af = fm.getFifoAf(2*im.AF_SIZE) print("\tdata_af", data_af.shape) #Z = np.sin(i*im.d_gl) / im.d2_gl #im.plot_gl.setData(z=Z) im.plot_gl.setData(z=data_af) def updateData(): global i showTrack(i) #showCfar(i) if im.AF_UDP == 1: print("[%d]" % (i)) showAf(i) # программирование таймера # запуск этой же функции снова через 1 сек #im.QtCore.QTimer.singleShot(0.1, updateData) if i < im.frames - 1: i += 1 t.start(100) i = 0 t = QtCore.QTimer() t.timeout.connect(updateData) t.setSingleShot(True) t.start(0) if __name__ == '__main__': print("Start") # запуск в первый раз чтобы подхватить таймер #updateData() ## Start Qt event loop unless running in interactive mode. import sys if (sys.flags.interactive != 1) or not hasattr(mi.QtCore, 'PYQT_VERSION'): im.QtGui.QApplication.instance().exec_() im.fifo.close() exit(0) ''' '''
NazimAliev/public
embedded/passive-radar-fpga-dsp-arm/multix86/main_multi.py
main_multi.py
py
1,879
python
ru
code
0
github-code
6
19346831151
import tensorflow as tf import numpy as np import pickle import os import re import text2emotion as te class EmotionRecognitionModel(): def __init__(self) -> None: self.model = tf.keras.models.load_model(os.path.join(os.path.dirname(__file__), './trained_model/emotion_model.h5')) self.index_to_classes = {0: 'fear', 1: 'anger', 2: 'sadness', 3: 'surprise', 4: 'joy', 5: 'love'} self.classes_to_index = {'anger': 1, 'fear': 0, 'joy': 4, 'love': 5, 'sadness': 2, 'surprise': 3} def preprocess(self, tweets): whitelist = set('abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ$') for tweet in tweets: tweet = tweet.lower() tweet = re.sub('http[s]?://\S+', '', tweet) answer = ''.join(filter(whitelist.__contains__, tweet)) tweet = answer return tweets def get_sequences(self, tokenizer, tweets): sequences = tokenizer.texts_to_sequences(tweets) padded_sequences = tf.keras.preprocessing.sequence.pad_sequences(sequences, truncating='post', maxlen=50, padding='post') return padded_sequences def create_tokenizer(self): path = os.path.join(os.path.dirname(__file__), './trained_model/tokenizer.pickle') with open(path, 'rb') as handle: tokenizer = pickle.load(handle) return tokenizer def recognise_emotion(self, tweet): tweet = self.preprocess(tweet) tokenizer = self.create_tokenizer(); tweet_sequence = self.get_sequences(tokenizer, tweet) predict_x= self.model(np.expand_dims(tweet_sequence, axis=-1)) classes_x= np.argmax(predict_x) emotion = self.index_to_classes.get(classes_x) tweet_and_emotion = {'tweet':tweet[0],'emotion':emotion} return tweet_and_emotion # second method def preprocess_text2emotion(self, tweet): whitelist = set('abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ$') tweet = tweet.lower() tweet = re.sub('http[s]?://\S+', '', tweet) answer = ''.join(filter(whitelist.__contains__, tweet)) return answer def recognize_emotion_text2emotion(self,text): res_tweet=self.preprocess_text2emotion(text) emotions=te.get_emotion(res_tweet) return emotions
Socialet/web-backend
app/toolkit/EmotionRecognition/model.py
model.py
py
2,317
python
en
code
0
github-code
6
38447711574
import sys import os import json from parse import validate_file if __name__ == '__main__': outputs_dir = sys.argv[1] submission_name = sys.argv[2] submission = {} for input_path in os.listdir("inputs"): graph_name = input_path.split('.')[0] output_file = f'{outputs_dir}/{graph_name}.out' if os.path.exists(output_file) and validate_file(output_file): output = open(f'{outputs_dir}/{graph_name}.out').read() submission[input_path] = output with open(submission_name, 'w') as f: f.write(json.dumps(submission))
Sea-Snell/170project
prepare_submission.py
prepare_submission.py
py
588
python
en
code
7
github-code
6
22094537095
import logging import datetime import sqlite3 import voluptuous as vol from homeassistant.helpers.event import async_track_time_interval from homeassistant.core import callback from homeassistant.helpers import config_validation as cv _LOGGER = logging.getLogger(__name__) DOMAIN = 'hasentinel' CONFIG_SCHEMA = vol.Schema({ DOMAIN: vol.Schema({ 'entities': vol.All(cv.ensure_list, [{ vol.Required('entity_id'): cv.entity_id, vol.Required('urgency'): cv.string }]) }) }, extra=vol.ALLOW_EXTRA) def setup(hass, config): """Set up the HASentinel component.""" conf = config[DOMAIN] entities = conf.get('entities') urgency_to_minutes = { 'low': 7*24*60, # 1 week 'medium': 48*60, # 48 hours 'high': 60 # 1 hour (For testing purposes, change this back to 24*60 for production) } conn = sqlite3.connect('/config/hasentinel.db') cursor = conn.cursor() cursor.execute(''' CREATE TABLE IF NOT EXISTS entity_states ( entity_id TEXT PRIMARY KEY, device_id TEXT, last_seen TEXT, urgency_level TEXT, reported INTEGER ) ''') def check_entities(now): """Routine to check entities' availability.""" for entity in entities: entity_id = entity['entity_id'] urgency = entity['urgency'] state = hass.states.get(entity_id) if not state: _LOGGER.error(f"Error fetching state for {entity_id}") continue last_seen_attribute = state.attributes.get("last_seen") current_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') cursor.execute("SELECT last_seen, reported FROM entity_states WHERE entity_id = ?", (entity_id,)) record = cursor.fetchone() if last_seen_attribute: last_seen_dt = datetime.datetime.fromisoformat(last_seen_attribute.replace('Z', '+00:00')) elif record: last_seen_dt = datetime.datetime.strptime(record[0], '%Y-%m-%d %H:%M:%S') else: last_seen_dt = datetime.datetime.now() delta = datetime.datetime.now() - last_seen_dt if state.state != "unavailable" or (last_seen_attribute and delta.total_seconds() <= urgency_to_minutes[urgency] * 60): if record: cursor.execute("UPDATE entity_states SET last_seen = ?, reported = 0 WHERE entity_id = ?", (current_time, entity_id)) else: cursor.execute("INSERT INTO entity_states (entity_id, device_id, last_seen, urgency_level, reported) VALUES (?, ?, ?, ?, 0)", (entity_id, state.attributes.get("device_id", ""), current_time, urgency)) else: if record and delta.total_seconds() > urgency_to_minutes[urgency] * 60 and record[1] == 0: cursor.execute("UPDATE entity_states SET reported = 1 WHERE entity_id = ?", (entity_id,)) conn.commit() # Set up the routine to run every minute async_track_time_interval(hass, check_entities, datetime.timedelta(minutes=1)) return True
dennis-bell/HASentinel
custom_components/hasentinel/__init__.py
__init__.py
py
3,269
python
en
code
0
github-code
6
17433146324
from sikuli import * from test_helper import * import os def open_handler(event): Debug.user("Successfully opened flex.") event.stopObserver() wait(45) # Don't stop observer, to give it time to open before # the next script runs. # Open Flex from the start screen def open_new_project(project_name="hello"): wait("Createanewpr.png", 300) click("Createanewpr.png") type(project_name) click("OK.png") if exists(Pattern("OK-2.png").similar(0.88)): click(Pattern("OK-1.png").similar(0.86)) else: click(Pattern("Qpen.png").similar(0.80)) onAppear("1435347136957.png", open_handler) observe(300)
sillsdev/FwIntegrationTests
general_tests/helpers/open_new_project.sikuli/open_new_project.py
open_new_project.py
py
661
python
en
code
1
github-code
6
10915507732
''' problem: You are given a string S and width w. Your task is to wrap the string into a paragraph of width w. Input Format-- The first line contains a string, S. The second line contains the width, w. Constraints-- 0<len(s)<1000 0<=w<=len(s) Output Format-- Print the text wrapped paragraph. Sample Input 0 ABCDEFGHIJKLIMNOQRSTUVWXYZ 4 Sample Output 0 ABCD EFGH IJKL IMNO QRST UVWX YZ ''' #code is here import textwrap def wrap(string, max_width): l=[] for i in range(len(string)): l.append(string[i:max_width+i]) string=string[max_width-1:] return "\n".join(l) if __name__ == '__main__': string, max_width = input(), int(input()) result = wrap(string, max_width) print(result)
harshitbansal373/python
text-wrap.py
text-wrap.py
py
737
python
en
code
15
github-code
6