id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
3361790 | <reponame>nearj/mpvr-motionfiltering
# coding: utf-8
import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import config.definitions as defs
time_tag = 'Time'
incidence_tag = 'DizzinessRange'
res = [f for f in glob.glob(defs.DATA_RAW_MOTION_DIR + "*.csv") if "3DI" in f and "3DI_0" not in f]
tmp = []
for f in res:
print(f)
df = pd.read_csv(f)
times = pd.to_datetime(df[time_tag].str.split().str[1]).astype(int) / 10 ** 9
times -= times[1]
tmp.append(pd.DataFrame(df[incidence_tag].values[1:], times.values[1:]))
def _sampling_time(times):
ret_dts = [] # delta times
ret_ind = [] # sampled indices
grid = np.linspace(0, 105, 3*105 + 1)
grid_idx = 0
for i in range(0, 941):
diff = times[i] - grid[grid_idx]
if np.abs(diff) < 0.061:
ret_dts.append(times[i])
ret_ind.append(i + 1)
grid_idx += 1
if grid_idx >= 316:
return np.array(ret_ind, dtype = np.int), np.array(ret_dts)
return None
res = []
for t in tmp:
ind, ts = _sampling_time(t.index.values)
res.append(ts)
res.append(ind)
df = pd.DataFrame(np.asarray(res).T)
k = 1
for t in tmp:
res = []
ind, ts = _sampling_time(t.index.values)
res.append(ts)
res.append(ind)
df = pd.DataFrame(np.asarray(res).T)
df.columns = ['Time', 'Index']
df.to_csv('./data/raw/timestamp/3DI_{:2}.csv'.format(k))
incidence = []
j = 0
for i in range(len(ind)):
incidence.append(np.sum(t.values[j:i+1]))
j = i
df = pd.DataFrame(np.asarray(incidence))
df.to_csv('./data/raw/incidence/3DI_{:2}.csv'.format(k))
columns = []
for i in range(30):
index.append('3DI_{:02}_Time'.format(i))
index.append('3DI_{:02}_Incidence'.format(i))
df.columns = columns
for i in range(5):
plt.clf()
plt.figure(figsize=(18, 6))
axes = [plt.subplot(321+j) for j in range(6)]
for j in range(6):
axes[j].bar(df.iloc[:,6*i+3*j], df.iloc[:,6*i+3*j+2], width=0.1)
axes[j].set_ylim([0,3])
axes[j].set_xlabel('3DI_{:02}'.format(6*i+j+1), fontsize=7)
plt.tight_layout()
plt.savefig('./data/raw/test/3DI_{:02}.png'.format(6*i+1, 6*i+6))
tot = np.sum(df.iloc[:,2::3], axis=1)
plt.clf()
plt.figure(figsize=(18,6))
plt.bar(df.iloc[:,0], tot, width=0.1)
plt.xlabel('3DI_total', fontsize=7)
plt.tight_layout()
plt.savefig('./data/raw/test/3DI_total.png')
def save_fig(scenario, data, incidence, time):
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib.colors import ListedColormap, BoundaryNorm
directory = './data/processed/result/3di_2018/graph/MPentropy/{:s}.png'.format(scenario)
ylim = [-250000, 250000]
plt.close()
x = []
y = []
for i in range(len(data)-1):
x.append(time[i])
y.append(data[i])
if data[i] * data[i + 1] <= 0:
x.append((time[i] + time[i+1]) / 2)
if data[i] > 0:
y.append(-0.0000000001)
else:
y.append(0.000000001)
i += 1
x.append(time[i])
y.append(data[i])
points = np.array([x, y]).T.reshape(-1,1,2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
fig, axes = plt.subplots(2, 1, sharex=True)
fig.set_size_inches(18, 6)
cmap = ListedColormap(['C1', 'C0'])
norm = BoundaryNorm([-10000000, 0, 10000000], cmap.N)
lc = LineCollection(segments, cmap=cmap, norm=norm)
lc.set_array(np.array([1 if t > 0 else -1 for t in y]))
axes[0].add_collection(lc)
axes[1].bar(time, incidence, width=0.1)
axes[0].set_xlabel('time', fontsize = 18)
axes[0].set_ylabel('MP Entropy', fontsize = 18)
axes[0].set_xlim(x[0], x[-1])
axes[0].set_ylim(ylim)
axes[0].set_xticks(np.arange(int(time[0]), int(time[-1])+1, 2))
axes[0].grid(axis='x')
axes[1].set_ylabel('Incidence', fontsize = 18)
axes[1].set_ylim([0,3])
axes[1].set_xticks(np.arange(int(time[0]), int(time[-1])+1, 2))
axes[1].grid(axis='x')
plt.tight_layout()
plt.savefig(directory)
def save_fig_total(scenario, datas, incidence, time):
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib.colors import ListedColormap, BoundaryNorm
plt.close()
ylim = [-250000, 250000]
fig, axes = plt.subplots(2, 1, sharex=True)
fig.set_size_inches(18, 6)
for data in datas:
x = []
y = []
for i in range(len(data)-1):
x.append(time[i])
y.append(data[i])
if data[i] * data[i + 1] <= 0:
x.append((time[i] + time[i+1]) / 2)
if data[i] > 0:
y.append(-0.0000000001)
else:
y.append(0.000000001)
i += 1
x.append(time[i])
y.append(data[i])
points = np.array([x, y]).T.reshape(-1,1,2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
cmap = ListedColormap(['C1', 'C0'])
norm = BoundaryNorm([-10000000, 0, 10000000], cmap.N)
lc = LineCollection(segments, cmap=cmap, norm=norm)
lc.set_array(np.array([1 if t > 0 else -1 for t in y]))
axes[0].add_collection(lc)
axes[1].bar(time, incidence, width=0.1)
axes[0].set_xlabel('time', fontsize = 18)
axes[0].set_ylabel('MP Entropy', fontsize = 18)
axes[0].set_xlim(x[0], x[-1])
axes[0].set_ylim(ylim)
axes[0].set_xticks(np.arange(int(time[0]), int(time[-1])+1, 2))
axes[0].grid(axis='x')
axes[1].set_ylabel('Incidence', fontsize = 18)
axes[1].set_ylim([0,3])
axes[1].set_xticks(np.arange(int(time[0]), int(time[-1])+1, 2))
axes[1].grid(axis='x')
plt.tight_layout()
directory = './data/processed/result/3di_2018/graph/MPentropy/{:s}.png'.format(scenario)
plt.savefig(directory)
| StarcoderdataPython |
3306374 | <reponame>iharthi/cargo-scanner<filename>cargoscan.py
#!/usr/bin/env python2
from __future__ import print_function
import serial_led
import random
import pygame as pg
import argparse
import draw
import os
ports = serial_led.SerialLedController.port_list()
default_alarm = os.path.join(os.path.split(os.path.realpath(__file__))[0], 'alarm.wav')
parser = argparse.ArgumentParser(
description='Scan for explosives and guns! The station will be safe with your awesome scanning gear, officer.')
parser.add_argument('-p','--serial_port', required=False, help='serial port for LED controller')
parser.add_argument('-s','--speed', type=int, default=9600, required=False,
help='serial port speed')
parser.add_argument('-F','--fullscreen', action='store_true', default=False, required=False,
help='Run in fullscreen')
parser.add_argument('-f','--fps', type=int, default=30, required=False,help='Aim for FPS')
parser.add_argument('-a', '--alarm', default=default_alarm, required=False, help='Alarm sound file')
args = parser.parse_args()
if args.serial_port is None:
led = serial_led.DummyLedController()
else:
ports = serial_led.SerialLedController.port_list()
if args.serial_port not in ports:
print("{} is not a valid serial port. Valid serial ports are: {}".format(
str(args.serial_port),
", ".join(ports)
))
exit(1)
led = serial_led.SerialLedController(args.serial_port, args.speed)
def scan_segments(mode=None):
result = []
n_segments = 10
if mode == "CLEAR":
return [True] * n_segments
elif mode == "RED":
return [False] * n_segments
elif mode == "LAST":
return [True] * (n_segments-1) + [False]
elif mode == "BOTTOM":
return [False] * 4 + [True] * (n_segments-4)
elif mode == "TOP":
return [True] * (n_segments-4) + [False] * 4
elif mode == "MIDDLE":
return [True] * 3 + [False] * 4 + [True] * 3
elif mode == "FUNERAL":
return [True, True, False, False, True, False, False, False, False, False]
percent = 40
if mode == "HIGH":
percent = 80
if mode == "LOW":
percent = 20
for i in range(n_segments):
if random.randint(0,100) >= percent:
result.append(True)
else:
result.append(False)
return result
def color_for_segment(segment):
return (255,0,0) if not segment else (0,255,0)
def frame_color_for_segment(segment):
return (255,200,200) if not segment else (200,200,255)
def colors_for_segments(segments):
return [color_for_segment(x) for x in segments]
def random_mode():
return "".join([str(random.randint(0,1)) for x in range(8)])
try:
pg.init()
pg.display.set_caption("Scanner")
pg.mouse.set_visible(False)
pg.mixer.music.load(args.alarm)
if args.fullscreen:
pg.display.set_mode((0, 0), pg.FULLSCREEN)
else:
pg.display.set_mode((800, 600))
clock = pg.time.Clock()
fps = 30.0
screen = pg.display.get_surface()
s = 1
segments = scan_segments("CLEAR")
running = False
treshold = 3
speed = 0.03
speed_mappings = {
ord('q'): 0.01,
ord('w'): 0.02,
ord('e'): 0.03,
ord('r'): 0.04,
ord('t'): 0.05,
}
mode_mappings = {
ord('o'): 'NORMAL',
ord('p'): 'HIGH',
ord('i'): 'LOW',
ord('u'): 'TOP',
ord('j'): 'MIDDLE',
ord('m'): 'BOTTOM',
ord('['): 'FUNERAL',
}
mode_bits = {
'NORMAL':'00111010',
'HIGH':'11011111',
'LOW':'00001000',
'TOP':'10000000',
'MIDDLE':'00110000',
'BOTTOM':'00000001',
'FUNERAL': '01010110',
}
mode = 'NORMAL'
rm1 = random_mode()
rm2 = random_mode()
rm3 = random_mode()
ticks = 0
while True:
set_leds = False
if running:
s += speed
if s >= 1:
s = 1
running = False
set_leds = True
ticks += 1
rm3 = random_mode()
if ticks % 10 == 0:
rm2 = random_mode()
if ticks == 100:
rm1 = random_mode()
ticks = 0
for event in pg.event.get():
if event.type == pg.QUIT:
led.stop_all()
exit()
if (event.type == pg.KEYDOWN):
if event.key == 291:
led.stop_all()
exit()
if event.key == 32:
if not running:
running = True
s = 0
segments = scan_segments(mode)
if event.key >= ord('0') and event.key <= ord('9'):
treshold = event.key - ord('0')
if not running:
set_leds = True
if event.key in speed_mappings:
speed = speed_mappings[event.key]
if event.key in mode_mappings:
mode = mode_mappings[event.key]
active_segment = int(s*10)
reds = len([x for x in segments[:active_segment] if not x])
screen.fill((0, 0, 0))
if running:
draw.frame(screen, frame_color_for_segment(segments[active_segment]))
else:
if reds >= treshold:
draw.frame(screen, (255, 0, 0))
else:
draw.frame(screen, (0,255,0))
if running:
if segments[active_segment]:
led.set_mode(0b011)
else:
led.set_mode(0b101)
draw.ray(screen, (255, 255, 255), s)
draw.progress(screen, (255, 255, 255), colors_for_segments(segments), s)
draw.reds(screen, reds, treshold)
draw.tresh(screen, treshold)
draw.mode(screen, ["00001011","01000110", mode_bits[mode], rm1, rm2, rm3 ])
if set_leds:
if reds >= treshold:
led.set_mode(0b100)
pg.mixer.music.play()
else:
led.set_mode(0b010)
pg.display.flip()
clock.tick(args.fps)
finally:
led.stop_all()
| StarcoderdataPython |
43574 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import os
from collections import defaultdict
import numpy as np
from scipy.spatial import distance
from tqdm import tqdm
np.set_printoptions(threshold=np.inf, suppress=True)
def main(args):
num_batches = args.num_batches
bert_data = defaultdict(list)
s_or_e_bert_data = defaultdict(list)
print('loading data...')
for para_idx in range(num_batches):
bert_filename = os.path.join(args.in_dir, 'bert_b{}.npz'.format(para_idx + 1))
bert_outputs = np.load(bert_filename)
for k, v in bert_outputs.items():
bert_data[k].append(v)
sbert_filename = os.path.join(args.in_dir, '{}_b{}.npz'.format(args.model, para_idx + 1))
sbert_outputs = np.load(sbert_filename)
for k, v in sbert_outputs.items():
s_or_e_bert_data[k].append(v)
print('stacking all examples of both bert and {}...'.format(args.model))
for k, v in s_or_e_bert_data.items():
s_or_e_bert_data[k] = np.concatenate(v) # stack along batch dim
for k, v in bert_data.items():
bert_data[k] = np.concatenate(v) # stack along batch dim
print('begin computing...')
all_para_distances = [[] for _ in range(12)]
all_q_distances = [[] for _ in range(12)]
# 500 examples paragraphs
for para_idx in tqdm(range(500)):
in_ids = bert_data['input_ids'][para_idx]
seg_ids = bert_data['segment_ids'][para_idx]
feature_ids = bert_data['feature_id'][para_idx]
q_ids = s_or_e_bert_data["question_ids"][para_idx]
c_ids = s_or_e_bert_data["context_ids"][para_idx]
q_length = np.sum(q_ids.astype(np.bool))
c_length = np.sum(c_ids.astype(np.bool))
sequence_length = np.sum(in_ids.astype(np.bool))
second_length = np.sum(seg_ids.astype(np.bool))
first_length = sequence_length - second_length
if not (c_length == second_length):
print('shifted paragraphs:', feature_ids, c_length, second_length)
continue
if not (q_length == first_length):
print('shifted questions:', feature_ids, q_length, first_length)
continue
for l in range(12):
b_layer_vectors = bert_data['layer{}'.format(l)][para_idx]
s_layer_vectors = s_or_e_bert_data['layer{}'.format(l)][para_idx]
# b_pvs is layer paragraph tokens vectors for bert
b_pvs = b_layer_vectors[first_length:second_length]
s_pvs = s_layer_vectors[len(q_ids):len(q_ids) + c_length]
# calculate variance of distances of 5 paragraph vectors to the centroid
p_dist = np.mean([distance.cosine(b_p, s_p) for b_p, s_p in zip(b_pvs, s_pvs)])
all_para_distances[l].append(p_dist)
# q_pvs is layer question tokens vectors for bert
b_qvs = b_layer_vectors[:first_length]
s_qvs = s_layer_vectors[:q_length]
q_dist = np.mean([distance.cosine(b_q, s_q) for b_q, s_q in zip(b_qvs, s_qvs)])
all_q_distances[l].append(q_dist)
# all_para_variances has 12 list, each has 100 variances
all_para_mean_variances = [np.mean(v) for v in all_para_distances]
all_q_mean_variances = [np.mean(v) for v in all_q_distances]
print(all_para_mean_variances)
print(all_q_mean_variances)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('in_dir', type=str, default=None)
parser.add_argument('-n', '--num_batches', type=int, default=20)
parser.add_argument('-m', '--model', type=str, default='sbert', choices=('ebert', 'sbert'),
help='choose which model compare distance')
main(parser.parse_args())
| StarcoderdataPython |
3225008 | from rest_framework import status
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.viewsets import ReadOnlyModelViewSet
from .models import ExchangeRate
from .serializers import ExchangeRateSerializer
from .tasks import update_exchange_rates
class UpdateRatesAPIView(APIView):
permission_classes = (AllowAny,)
authentication_classes = []
def post(self, request, *args, **kwargs):
update_exchange_rates.delay()
return Response(status=status.HTTP_200_OK)
class ExchangeRatesViewSet(ReadOnlyModelViewSet):
queryset = ExchangeRate.objects.all()
serializer_class = ExchangeRateSerializer
permission_classes = (AllowAny,)
authentication_classes = []
lookup_field = 'currency'
| StarcoderdataPython |
170803 | # -*- coding: utf-8 -*-
from __future__ import print_function, division, unicode_literals
from __future__ import absolute_import
import numpy as np, time, sys, smbus
# default addresses and ChipIDs of Bosch BMP 085/180 and BMP/E 280 sensors
BMP_I2CADDR = 0x77
BMP_I2CADDR2 = 0x76
#BMP_I2CADDR = 0x76 # alternative device I2C address
BMP180_CHIPID = 0x55
BMP280_CHIPID = 0x58
BME280_CHIPID = 0x60
# ID register:
REG_ID = 0xD0
# code of driver classes included below
class BMPx80Config(object):
'''digital thermometer DS18B20Config configuration and interface'''
def __init__(self, confdict = None):
self.BMP_I2CADDR = BMP_I2CADDR
if confdict==None: confdict={}
if 'I2CADDR' in confdict:
self.BMP_I2CADDR = confdict['I2CADDR']
print("BMPx80: I2C address set to %x "%(self.BMP_I2CADDR) )
if 'NChannels' in confdict:
self.NChannels = confdict["NChannels"]
else:
self.NChannels = 2
self.ChanLims = [[-40., 85.],[300., 1100.], [0., 100.]]
self.ChanNams = ['T','P', 'H']
self.ChanUnits= ['°C','hPa', '%']
def init(self):
try:
# set up I2C bus
busnum = 1
bus = smbus.SMBus(busnum) # Rev 2 Pi, Pi 2 & Pi 3 uses bus 1
# Rev 1 Pi uses bus 0
except Exception as e:
print("BMPx80: Error initialising I2C bus - exit")
print(str(e))
sys.exit(1)
try:
try:
# find out which sensor we have:
(self.chipID,) = bus.read_i2c_block_data(self.BMP_I2CADDR, REG_ID, 1)
except:
# try secondary address (BMP280)
print("BMPx80: trying secondary address %x "%(BMP_I2CADDR2) )
(self.chipID,) = bus.read_i2c_block_data(BMP_I2CADDR2, REG_ID, 1)
self.BMP_I2CADDR = BMP_I2CADDR2
# set up sensor
print("BMPx80: ChipID %x "%(self.chipID) )
if self.chipID == BMP180_CHIPID:
self.sensor = BMP085(address=self.BMP_I2CADDR, busnum=busnum, i2c_interface=smbus.SMBus)
elif self.chipID == BMP280_CHIPID:
self.sensor = BMP280(address=self.BMP_I2CADDR, busnum=busnum, i2c_interface=smbus.SMBus)
elif self.chipID == BME280_CHIPID:
self.sensor = BME280(address=self.BMP_I2CADDR, i2c = bus)
else:
print("BMPx80: unknown chip ID - exiting")
sys.exit(1)
except Exception as e:
print("BMPx80: Error setting up device - exit")
print(str(e))
sys.exit(1)
def acquireData(self, buf):
if self.chipID == BME280_CHIPID:
buf[0], p, h = self.sensor.readAll() # temp., press., hum.
if self.NChannels > 1:
buf[1] = p
if self.NChannels > 2:
buf[2] = h
else:
buf[0] = self.sensor.read_temperature() # in degC
if self.NChannels > 1:
buf[1] = self.sensor.read_pressure()/100. # in hPa
def closeDevice(self):
# nothing to do here
pass
## ----- driver section -----------
# driver code for BMP085/180,
# adapted from original code by <NAME>, (c) Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Operating Modes
BMP085_ULTRALOWPOWER = 0
BMP085_STANDARD = 1
BMP085_HIGHRES = 2
BMP085_ULTRAHIGHRES = 3
# BMP085 Registers
BMP085_CAL_AC1 = 0xAA # R Calibration data (16 bits)
BMP085_CAL_AC2 = 0xAC # R Calibration data (16 bits)
BMP085_CAL_AC3 = 0xAE # R Calibration data (16 bits)
BMP085_CAL_AC4 = 0xB0 # R Calibration data (16 bits)
BMP085_CAL_AC5 = 0xB2 # R Calibration data (16 bits)
BMP085_CAL_AC6 = 0xB4 # R Calibration data (16 bits)
BMP085_CAL_B1 = 0xB6 # R Calibration data (16 bits)
BMP085_CAL_B2 = 0xB8 # R Calibration data (16 bits)
BMP085_CAL_MB = 0xBA # R Calibration data (16 bits)
BMP085_CAL_MC = 0xBC # R Calibration data (16 bits)
BMP085_CAL_MD = 0xBE # R Calibration data (16 bits)
BMP085_CONTROL = 0xF4
BMP085_TEMPDATA = 0xF6
BMP085_PRESSUREDATA = 0xF6
# Commands
BMP085_READTEMPCMD = 0x2E
BMP085_READPRESSURECMD = 0x34
class BMP085(object):
def __init__(self, mode=BMP085_STANDARD, address=BMP_I2CADDR, i2c=None, busnum=1, i2c_interface=None):
# Check that mode is valid.
if mode not in [BMP085_ULTRALOWPOWER, BMP085_STANDARD, BMP085_HIGHRES, BMP085_ULTRAHIGHRES]:
raise ValueError('Unexpected mode value {0}. Set mode to one of ' +
'BMP085_ULTRALOWPOWER, BMP085_STANDARD, BMP085_HIGHRES, or BMP085_ULTRAHIGHRES'.format(mode))
self._mode = mode
# Create I2C device.
if i2c is None:
import Adafruit_GPIO.I2C as I2C
i2c = I2C
self._device = i2c.get_i2c_device(address, busnum=busnum, i2c_interface=i2c_interface)
# Load calibration values.
self._load_calibration()
def _load_calibration(self):
self.cal_AC1 = self._device.readS16BE(BMP085_CAL_AC1) # INT16
self.cal_AC2 = self._device.readS16BE(BMP085_CAL_AC2) # INT16
self.cal_AC3 = self._device.readS16BE(BMP085_CAL_AC3) # INT16
self.cal_AC4 = self._device.readU16BE(BMP085_CAL_AC4) # UINT16
self.cal_AC5 = self._device.readU16BE(BMP085_CAL_AC5) # UINT16
self.cal_AC6 = self._device.readU16BE(BMP085_CAL_AC6) # UINT16
self.cal_B1 = self._device.readS16BE(BMP085_CAL_B1) # INT16
self.cal_B2 = self._device.readS16BE(BMP085_CAL_B2) # INT16
self.cal_MB = self._device.readS16BE(BMP085_CAL_MB) # INT16
self.cal_MC = self._device.readS16BE(BMP085_CAL_MC) # INT16
self.cal_MD = self._device.readS16BE(BMP085_CAL_MD) # INT16
def _load_datasheet_calibration(self):
# Set calibration from values in the datasheet example. Useful for debugging the
# temp and pressure calculation accuracy.
self.cal_AC1 = 408
self.cal_AC2 = -72
self.cal_AC3 = -14383
self.cal_AC4 = 32741
self.cal_AC5 = 32757
self.cal_AC6 = 23153
self.cal_B1 = 6190
self.cal_B2 = 4
self.cal_MB = -32767
self.cal_MC = -8711
self.cal_MD = 2868
def read_raw_temp(self):
"""Reads the raw (uncompensated) temperature from the sensor."""
self._device.write8(BMP085_CONTROL, BMP085_READTEMPCMD)
time.sleep(0.005) # Wait 5ms
raw = self._device.readU16BE(BMP085_TEMPDATA)
return raw
def read_raw_pressure(self):
"""Reads the raw (uncompensated) pressure level from the sensor."""
self._device.write8(BMP085_CONTROL, BMP085_READPRESSURECMD + (self._mode << 6))
if self._mode == BMP085_ULTRALOWPOWER:
time.sleep(0.005)
elif self._mode == BMP085_HIGHRES:
time.sleep(0.014)
elif self._mode == BMP085_ULTRAHIGHRES:
time.sleep(0.026)
else:
time.sleep(0.008)
msb = self._device.readU8(BMP085_PRESSUREDATA)
lsb = self._device.readU8(BMP085_PRESSUREDATA+1)
xlsb = self._device.readU8(BMP085_PRESSUREDATA+2)
raw = ((msb << 16) + (lsb << 8) + xlsb) >> (8 - self._mode)
return raw
def read_temperature(self):
"""Gets the compensated temperature in degrees celsius."""
UT = self.read_raw_temp()
# Datasheet value for debugging:
#UT = 27898
# Calculations below are taken straight from section 3.5 of the datasheet.
X1 = ((UT - self.cal_AC6) * self.cal_AC5) >> 15
X2 = (self.cal_MC << 11) // (X1 + self.cal_MD)
B5 = X1 + X2
temp = ((B5 + 8) >> 4) / 10.0
return temp
def read_pressure(self):
"""Gets the compensated pressure in Pascals."""
UT = self.read_raw_temp()
UP = self.read_raw_pressure()
# Datasheet values for debugging:
#UT = 27898
#UP = 23843
# Calculations below are taken straight from section 3.5 of the datasheet.
# Calculate true temperature coefficient B5.
X1 = ((UT - self.cal_AC6) * self.cal_AC5) >> 15
X2 = (self.cal_MC << 11) // (X1 + self.cal_MD)
B5 = X1 + X2
# Pressure Calculations
B6 = B5 - 4000
X1 = (self.cal_B2 * (B6 * B6) >> 12) >> 11
X2 = (self.cal_AC2 * B6) >> 11
X3 = X1 + X2
B3 = (((self.cal_AC1 * 4 + X3) << self._mode) + 2) // 4
X1 = (self.cal_AC3 * B6) >> 13
X2 = (self.cal_B1 * ((B6 * B6) >> 12)) >> 16
X3 = ((X1 + X2) + 2) >> 2
B4 = (self.cal_AC4 * (X3 + 32768)) >> 15
B7 = (UP - B3) * (50000 >> self._mode)
if B7 < 0x80000000:
p = (B7 * 2) // B4
else:
p = (B7 // B4) * 2
X1 = (p >> 8) * (p >> 8)
X1 = (X1 * 3038) >> 16
X2 = (-7357 * p) >> 16
p = p + ((X1 + X2 + 3791) >> 4)
return p
def read_altitude(self, sealevel_pa=101325.0):
"""Calculates the altitude in meters."""
# Calculation taken straight from section 3.6 of the datasheet.
pressure = float(self.read_pressure())
altitude = 44330.0 * (1.0 - pow(pressure / sealevel_pa, (1.0/5.255)))
return altitude
def read_sealevel_pressure(self, altitude_m=0.0):
"""Calculates the pressure at sealevel when given a known altitude in
meters. Returns a value in Pascals."""
pressure = float(self.read_pressure())
p0 = pressure / pow(1.0 - altitude_m/44330.0, 5.255)
return p0
# driver code for BMP280 (Guenter Quast, 2018)
# adapted vom code by <NAME> <<EMAIL>>
#
# Based on the Adafruit BMP280 Driver C++ driver and the BMP085 python lib.
# - https://github.com/adafruit/Adafruit_BMP280_Library
# - https://github.com/adafruit/Adafruit_Python_BMP
#
# Datasheet: https://www.adafruit.com/datasheets/BST-BMP280-DS001-11.pdf
# BMP280 Registers
BMP280_DIG_T1 = 0x88 # R Unsigned Calibration data (16 bits)
BMP280_DIG_T2 = 0x8A # R Signed Calibration data (16 bits)
BMP280_DIG_T3 = 0x8C # R Signed Calibration data (16 bits)
BMP280_DIG_P1 = 0x8E # R Unsigned Calibration data (16 bits)
BMP280_DIG_P2 = 0x90 # R Signed Calibration data (16 bits)
BMP280_DIG_P3 = 0x92 # R Signed Calibration data (16 bits)
BMP280_DIG_P4 = 0x94 # R Signed Calibration data (16 bits)
BMP280_DIG_P5 = 0x96 # R Signed Calibration data (16 bits)
BMP280_DIG_P6 = 0x98 # R Signed Calibration data (16 bits)
BMP280_DIG_P7 = 0x9A # R Signed Calibration data (16 bits)
BMP280_DIG_P8 = 0x9C # R Signed Calibration data (16 bits)
BMP280_DIG_P9 = 0x9E # R Signed Calibration data (16 bits)
BMP280_CONTROL = 0xF4
BMP280_RESET = 0xE0
BMP280_CONFIG = 0xF5
BMP280_PRESSUREDATA = 0xF7
BMP280_TEMPDATA = 0xFA
class BMP280(object):
def __init__(self, address=BMP_I2CADDR, i2c=None, busnum=1, i2c_interface= None):
# Adadfruit I2C interface
if i2c is None:
import Adafruit_GPIO.I2C as I2C
i2c = I2C
self._device = i2c.get_i2c_device(address,
busnum = busnum, i2c_interface = i2c_interface)
if self._device.readU8(REG_ID) != BMP280_CHIPID:
raise Exception('Unsupported chip')
# Load calibration values.
self._load_calibration()
self._device.write8(BMP280_CONTROL, 0x3F)
def _load_calibration(self):
self.cal_t1 = int(self._device.readU16(BMP280_DIG_T1)) # UINT16
self.cal_t2 = int(self._device.readS16(BMP280_DIG_T2)) # INT16
self.cal_t3 = int(self._device.readS16(BMP280_DIG_T3)) # INT16
self.cal_p1 = int(self._device.readU16(BMP280_DIG_P1)) # UINT16
self.cal_p2 = int(self._device.readS16(BMP280_DIG_P2)) # INT16
self.cal_p3 = int(self._device.readS16(BMP280_DIG_P3)) # INT16
self.cal_p4 = int(self._device.readS16(BMP280_DIG_P4)) # INT16
self.cal_p5 = int(self._device.readS16(BMP280_DIG_P5)) # INT16
self.cal_p6 = int(self._device.readS16(BMP280_DIG_P6)) # INT16
self.cal_p7 = int(self._device.readS16(BMP280_DIG_P7)) # INT16
self.cal_p8 = int(self._device.readS16(BMP280_DIG_P8)) # INT16
self.cal_p9 = int(self._device.readS16(BMP280_DIG_P9)) # INT16
def _load_datasheet_calibration(self):
# Set calibration from values in the datasheet example. Useful for debugging the
# temp and pressure calculation accuracy.
self.cal_t1 = 27504
self.cal_t2 = 26435
self.cal_t3 = -1000
self.cal_p1 = 36477
self.cal_p2 = -10685
self.cal_p3 = 3024
self.cal_p4 = 2855
self.cal_p5 = 140
self.cal_p6 = -7
self.cal_p7 = 15500
self.cal_p8 = -14500
self.cal_p9 = 6000
def read_raw(self, register):
"""Reads the raw (uncompensated) temperature or pressure from the sensor."""
raw = self._device.readU16BE(register)
raw <<= 8
raw = raw | self._device.readU8(register + 2)
raw >>= 4
return raw
def _compensate_temp(self, raw_temp):
""" Compensate temperature """
t1 = (((raw_temp >> 3) - (self.cal_t1 << 1)) *
(self.cal_t2)) >> 11
t2 = (((((raw_temp >> 4) - (self.cal_t1)) *
((raw_temp >> 4) - (self.cal_t1))) >> 12) *
(self.cal_t3)) >> 14
return t1 + t2
def read_temperature(self):
"""Gets the compensated temperature in degrees celsius."""
raw_temp = self.read_raw(BMP280_TEMPDATA)
compensated_temp = self._compensate_temp(raw_temp)
temp = float(((compensated_temp * 5 + 128) >> 8)) // 100
return temp
def read_pressure(self):
"""Gets the compensated pressure in Pascals."""
raw_temp = self.read_raw(BMP280_TEMPDATA)
compensated_temp = self._compensate_temp(raw_temp)
raw_pressure = self.read_raw(BMP280_PRESSUREDATA)
p1 = compensated_temp - 128000
p2 = p1 * p1 * self.cal_p6
p2 += (p1 * self.cal_p5) << 17
p2 += self.cal_p4 << 35
p1 = ((p1 * p1 * self.cal_p3) >> 8) + ((p1 * self.cal_p2) << 12)
p1 = ((1 << 47) + p1) * (self.cal_p1) >> 33
if 0 == p1:
return 0
p = 1048576 - raw_pressure
p = (((p << 31) - p2) * 3125) // p1
p1 = (self.cal_p9 * (p >> 13) * (p >> 13)) >> 25
p2 = (self.cal_p8 * p) >> 19
p = ((p + p1 + p2) >> 8) + ((self.cal_p7) << 4)
return float(p // 256)
def read_altitude(self, sealevel_pa=101325.0):
"""Calculates the altitude in meters."""
# Calculation taken straight from section 3.6 of the datasheet.
pressure = float(self.read_pressure())
altitude = 44330.0 * (1.0 - pow(pressure // sealevel_pa, (1.0 // 5.255)))
return altitude
def read_sealevel_pressure(self, altitude_m=0.0):
"""Calculates the pressure at sealevel when given a known altitude in
meters. Returns a value in Pascals."""
pressure = float(self.read_pressure())
p0 = pressure // pow(1.0 - altitude_m // 44330.0, 5.255)
return p0
# driver code for BMP/E280,
# adapted from original code by <NAME>
#--------------------------------------
## import smbus
from ctypes import c_short
from ctypes import c_byte
from ctypes import c_ubyte
# some helper functions
def getShort(data, index):
# return two bytes from data as a signed 16-bit value
return c_short((data[index+1] << 8) + data[index]).value
def getUShort(data, index):
# return two bytes from data as an unsigned 16-bit value
return (data[index+1] << 8) + data[index]
def getChar(data,index):
# return one byte from data as a signed char
result = data[index]
if result > 127:
result -= 256
return result
def getUChar(data,index):
# return one byte from data as an unsigned char
result = data[index] & 0xFF
return result
# Register Addresses
REG_DATA = 0xF7
REG_CONTROL = 0xF4
REG_CONFIG = 0xF5
REG_CONTROL_HUM = 0xF2
REG_HUM_MSB = 0xFD
REG_HUM_LSB = 0xFE
# Oversample setting - page 27
OVERSAMPLE_TEMP = 2
OVERSAMPLE_PRES = 2
MODE = 1
# Oversample setting for humidity register - page 26
OVERSAMPLE_HUM = 2
class BME280(object):
"""Class to represent the Bosch BMP280 temperature and pressure sensor
"""
def __init__(self, address=BMP_I2CADDR, i2c=None):
self.DEVICE = address
if i2c==None:
self.bus = smbus.SMBus(1) # Rev 2 Pi, Pi 2 & Pi 3 uses bus 1
# Rev 1 Pi uses bus 0
else:
self.bus = i2c
# initialise calibration constants from device
self.init()
def init(self):
self.bus.write_byte_data(self.DEVICE, REG_CONTROL_HUM, OVERSAMPLE_HUM)
control = OVERSAMPLE_TEMP<<5 | OVERSAMPLE_PRES<<2 | MODE
self.bus.write_byte_data(self.DEVICE, REG_CONTROL, control)
# Read blocks of calibration data from EEPROM
# See Page 22 data sheet
cal1 = self.bus.read_i2c_block_data(self.DEVICE, 0x88, 24)
cal2 = self.bus.read_i2c_block_data(self.DEVICE, 0xA1, 1)
cal3 = self.bus.read_i2c_block_data(self.DEVICE, 0xE1, 7)
# Convert byte data to word values
self.dig_T1 = getUShort(cal1, 0)
self.dig_T2 = getShort(cal1, 2)
self.dig_T3 = getShort(cal1, 4)
self.dig_P1 = getUShort(cal1, 6)
self.dig_P2 = getShort(cal1, 8)
self.dig_P3 = getShort(cal1, 10)
self.dig_P4 = getShort(cal1, 12)
self.dig_P5 = getShort(cal1, 14)
self.dig_P6 = getShort(cal1, 16)
self.dig_P7 = getShort(cal1, 18)
self.dig_P8 = getShort(cal1, 20)
self.dig_P9 = getShort(cal1, 22)
self.dig_H1 = getUChar(cal2, 0)
self.dig_H2 = getShort(cal3, 0)
self.dig_H3 = getUChar(cal3, 2)
self.dig_H4 = getChar(cal3, 3)
self.dig_H4 = (self.dig_H4 << 24) >> 20
self.dig_H4 = self.dig_H4 | (getChar(cal3, 4) & 0x0F)
self.dig_H5 = getChar(cal3, 5)
self.dig_H5 = (self.dig_H5 << 24) >> 20
self.dig_H5 = self.dig_H5 | (getUChar(cal3, 4) >> 4 & 0x0F)
self.dig_H6 = getChar(cal3, 6)
# Wait in ms (Datasheet Appendix B: Measurement time and current calculation)
wait_time = 1.25 + (2.3 * OVERSAMPLE_TEMP) + ((2.3 * OVERSAMPLE_PRES) + 0.575) + ((2.3 * OVERSAMPLE_HUM)+0.575)
time.sleep(wait_time/1000) # Wait the required time
def readAll(self):
# Read temperature/pressure/humidity
data = self.bus.read_i2c_block_data(self.DEVICE, REG_DATA, 8)
pres_raw = (data[0] << 12) | (data[1] << 4) | (data[2] >> 4)
temp_raw = (data[3] << 12) | (data[4] << 4) | (data[5] >> 4)
hum_raw = (data[6] << 8) | data[7]
#Refine temperature
var1 = ((((temp_raw>>3)-(self.dig_T1<<1)))*(self.dig_T2)) >> 11
var2 = (((((temp_raw>>4) - (self.dig_T1)) * ((temp_raw>>4) - (self.dig_T1))) >> 12) * (self.dig_T3)) >> 14
t_fine = var1+var2
t = float(((t_fine * 5) + 128) >> 8);
# Refine pressure and adjust for temperature
var1 = t_fine / 2.0 - 64000.0
var2 = var1 * var1 * self.dig_P6 / 32768.0
var2 = var2 + var1 * self.dig_P5 * 2.0
var2 = var2 / 4.0 + self.dig_P4 * 65536.0
var1 = (self.dig_P3 * var1 * var1 / 524288.0 + self.dig_P2 * var1) / 524288.0
var1 = (1.0 + var1 / 32768.0) * self.dig_P1
if var1 == 0:
p=0
else:
p = 1048576.0 - pres_raw
p = ((p - var2 / 4096.0) * 6250.0) / var1
var1 = self.dig_P9 * p * p / 2147483648.0
var2 = p * self.dig_P8 / 32768.0
p = p + (var1 + var2 + self.dig_P7) / 16.0
# Refine humidity
h = t_fine - 76800.0
h = (hum_raw - (self.dig_H4 * 64.0 + self.dig_H5 / 16384.0 * h))
h = h * (self.dig_H2 / 65536.0 * (1.0 + self.dig_H6 / 67108864.0 * h * (1.0 + self.dig_H3 / 67108864.0 * h)))
h = h * (1.0 - self.dig_H1 * h / 524288.0)
if h > 100:
h = 100
elif h < 0:
h = 0.
return t/100.0, p/100, h
| StarcoderdataPython |
1766306 | <filename>src/python/detectors/hashlib_constructor/hashlib_constructor.py
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# {fact [email protected] defects=1}
def constructor_noncompliant():
import hashlib
text = "ExampleString"
# Noncompliant: uses the new() constructor instead of the hashlib
# constructor, which is slower.
result = hashlib.new('sha256', text.encode())
print("The hexadecimal equivalent of SHA256 is : ")
print(result.hexdigest())
# {/fact}
# {fact [email protected] defects=0}
def constructor_compliant():
import hashlib
text = "ExampleString"
# Compliant: uses the hashlib constructor over the new(), which is faster.
result = hashlib.sha256(text.encode())
print("The hexadecimal equivalent of SHA256 is : ")
print(result.hexdigest())
# {/fact}
| StarcoderdataPython |
3222772 | <reponame>DPBayes/robust-private-lr<filename>python/synthdata/test_rlr.py
#!/bin/env python3
# Differentially private Bayesian linear regression
# <NAME> 2016-2017
# University of Helsinki Department of Computer Science
# Helsinki Institute of Information Technology HIIT
# Synthetic data
# Precision measure: MSE
# Run: python3 test.py param
# where
# - param = 0 runs test cases with fixed n and varying eps
# - param = 1 runs test cases with fixed eps and varying n
# This program does 50-fold cross-validation.
import sys
import os
# Create a new Theano compile directory on local node (used in cluster computing)
if len(sys.argv) > 1:
v1 = int(sys.argv[1])
mypath1 = "theano"
mypath2 = mypath1+"/theano-tmp-"+str(v1)
if not os.path.exists(mypath1):
os.mkdir(mypath1)
if not os.path.exists(mypath2):
os.mkdir(mypath2)
os.environ["THEANO_FLAGS"] = "base_compiledir="+mypath1+",compiledir="+mypath2
import diffpri as dp
import numpy as np
import csv
# Import data
datapath = '/scratch/work/niemina7/cliptest/' # TODO: set path for input and output data
f = open(datapath+'x_data.csv','rt')
reader = csv.reader(f,delimiter=',')
x = np.array(list(reader)).astype(float)
f.close()
f = open(datapath+'y_data.csv','rt')
reader = csv.reader(f,delimiter=',')
y = np.array(list(reader)).astype(float)
f.close()
# Arguments
if len(sys.argv) > 1:
param = int(sys.argv[1])
else: # default
param = 0
# Test cases
eps = [0.1,0.2,0.5,1.0,2.0,5.0,10.0]
pv_size = [0,100,200,300,400,500,600,700,800]
pv_max = max(pv_size)
d = 10
n_npv = 10
n_test = 100
mcmc = True # use priors instead of fixed values for precision parameter lambda,lambda_0
n_cv = 50
drugid = 0
# Fetch clipping threshold omegas
if param == 0:
t = 'A'
else:
t = 'B'
f = open(datapath+t+'-WX.csv','rt')
reader = csv.reader(f,delimiter=',')
WX = np.array(list(reader)).astype(float)
f.close()
f = open(datapath+t+'-WY.csv','rt')
reader = csv.reader(f,delimiter=',')
WY = np.array(list(reader)).astype(float)
f.close()
if param == 0:
# Cross-validation
for seed in range(n_cv):
S = np.zeros(len(eps),dtype=np.float64)
for i in range(len(eps)):
e = eps[i]
n_pv = 500
w_x = WX[i]
w_y = WY[i]
# Process data
nxx_pv,nxx_npv,nxy_pv,nxy_npv,nyy_pv,nyy_npv,x_test,y_test,B_x,B_y,n_train,private = dp.processData(x,y,d,n_test,n_pv,n_npv,pv_max,w_x,w_y,drugid,seed)
private = False # modification: rlr
# Fit model
if mcmc:
pred = dp.predictMCMC(n_train,nxx_pv,nxx_npv,nxy_pv,nxy_npv,nyy_pv,nyy_npv,B_x,B_y,e,x_test,private)
else:
pred = dp.predictL(nxx_pv,nxx_npv,nxy_pv,nxy_npv,B_x,B_y,e,x_test,private)
# Evaluate
S[i] = dp.precision(pred,y_test)
# Save results into file
csvpath = datapath+'synth-rlr-'+t+'-'+str(seed)+'.csv'
np.savetxt(csvpath,S,delimiter=',')
if param == 1:
# Cross-validation
for seed in range(n_cv):
S = np.zeros(len(pv_size),dtype=np.float64)
for i in range(len(pv_size)):
n_pv = pv_size[i]
e = 2.0
w_x = WX[i]
w_y = WY[i]
# Process data
nxx_pv,nxx_npv,nxy_pv,nxy_npv,nyy_pv,nyy_npv,x_test,y_test,B_x,B_y,n_train,private = dp.processData(x,y,d,n_test,n_pv,n_npv,pv_max,w_x,w_y,drugid,seed)
private = False # modification: rlr
# Fit model
if mcmc:
pred = dp.predictMCMC(n_train,nxx_pv,nxx_npv,nxy_pv,nxy_npv,nyy_pv,nyy_npv,B_x,B_y,e,x_test,private)
else:
pred = dp.predictL(nxx_pv,nxx_npv,nxy_pv,nxy_npv,B_x,B_y,e,x_test,private)
# Evaluate
S[i] = dp.precision(pred,y_test)
# Save results into file
csvpath = datapath+'synth-rlr-'+t+'-'+str(seed)+'.csv'
np.savetxt(csvpath,S,delimiter=',')
| StarcoderdataPython |
1689164 | <filename>examples/countries/src/main.py
import time
from jobs import count_eur_currency_countries
if __name__ == '__main__':
codes = []
with open('countries.txt', 'r') as f:
for line in f.readlines():
codes.append(line.split()[0])
start_time = time.time()
count_eur_currency_countries.distribute(codes)
count_eur_currency_countries.wait_results()
countries = map(
lambda country: country.decode('utf-8'),
count_eur_currency_countries.results)
countries = sorted(countries)
print('Work completed in {:.2f} s'.format(time.time() - start_time))
print('{} countries use EUR currency:'.format(len(countries)))
print(', '.join(countries))
| StarcoderdataPython |
1734736 | from app import db, login_manager
from flask_login import UserMixin
from datetime import datetime
@login_manager.user_loader
def load_user(id):
return User.query.get(int(id))
class Marking(db.Model):
__tablename__ = "marking"
marker_id = db.Column(db.Integer, db.ForeignKey('user.id'), primary_key=True)
marked_id = db.Column(db.Integer, db.ForeignKey('incidence.id'), primary_key=True)
timestamp = db.Column(db.DateTime, default=datetime.utcnow)
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True, unique=True)
name = db.Column(db.String(), nullable=False)
number = db.Column(db.Integer(), nullable=False)
gender = db.Column(db.String(), nullable=False)
password = db.Column(db.String(), nullable=False)
profile_image = db.Column(db.String(), nullable=False, default="<PASSWORD>")
user_type = db.Column(db.String(), nullable=False, default="citizen")
location = db.Column(db.String(), nullable=True)
date_joined = db.Column(db.DateTime(), nullable=False, default=datetime.utcnow)
account_pend = db.Column(db.Boolean(), nullable=True)
date_pended = db.Column(db.DateTime(), nullable=True)
incidence = db.relationship("Incidence", backref="author", lazy=True)
safe = db.relationship("Safe", backref="marker", lazy=True)
comment = db.relationship("Comment", backref="comm_ent", lazy=True)
message_sent = db.relationship("Message", backref="author", lazy=True)
marked = db.relationship('Marking', foreign_keys=[Marking.marker_id],\
backref=db.backref('marker', lazy='joined'), lazy='dynamic',\
cascade='all, delete-orphan')
notify_carrier = db.relationship('Notifications',\
foreign_keys='Notifications.carrier_id',\
backref='n_carrier', lazy='dynamic')
notify_reciever = db.relationship('Notifications',\
foreign_keys='Notifications.reciever_id',\
backref='n_reciever', lazy='dynamic')
def __repr__(self):
return f" {self.name}, {self.number}, {self.profile_image}, {self.date_joined}"
def mark(self, user):
if not self.is_marking(user):
f = Marking(marker=self, marked=user)
db.session.add(f)
def unmark(self, user):
f = self.marked.filter_by(marked_id=user.id).first()
if f:
db.session.delete(f)
def is_marking(self, user):
return self.marked.filter_by(
marked_id=user.id).first() is not None
class Incidence(db.Model):
id = db.Column(db.Integer, primary_key=True, unique=True)
name = db.Column(db.String(), nullable=False)
location = db.Column(db.String(), nullable=False)
date_added = db.Column(db.DateTime(), nullable=False, default=datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
safes = db.relationship("Safe", backref="marked_incidence", lazy=True)
comments = db.relationship("Comment", backref="commenter", lazy=True)
markers = db.relationship('Marking', foreign_keys=[Marking.marked_id],\
backref=db.backref('marked', lazy='joined'), lazy='dynamic',\
cascade='all, delete-orphan')
def is_marked_by(self, pst):
return self.markers.filter_by(
marker_id=pst.id).first() is not None
def __repr__(self):
return f" {self.name}, {self.date_added}"
class Safe(db.Model):
id = db.Column(db.Integer, primary_key=True, unique=True)
incidence_id = db.Column(db.Integer, db.ForeignKey('incidence.id'), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
date_added = db.Column(db.DateTime(), nullable=False, default=datetime.utcnow)
class Message(db.Model):
id = db.Column(db.Integer, primary_key=True)
sender_id = db.Column(db.Integer, db.ForeignKey('user.id'))
body = db.Column(db.String(140))
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
def __repr__(self):
return f'<Message {self.body}>'
class Notifications(db.Model):
id = db.Column(db.Integer, primary_key=True)
carrier_name = db.Column(db.String(50))
carrier_id = db.Column(db.Integer, db.ForeignKey('user.id'))
reciever_id = db.Column(db.Integer, db.ForeignKey('user.id'))
action_type = db.Column(db.String(140))
date_carried = db.Column(db.DateTime, index=True, default=datetime.utcnow)
class Comment(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
incidence_id = db.Column(db.Integer, db.ForeignKey('incidence.id'), nullable=False)
body = db.Column(db.String(140))
timestamp = db.Column(db.DateTime(), default=datetime.utcnow)
def __repr__(self):
return f'<Message {self.body}>' | StarcoderdataPython |
4833151 | import re
from collections import defaultdict
from datetime import date, timedelta
from artifactory import ArtifactoryPath
from teamcity.messages import TeamcityServiceMessages
from artifactory_cleanup.rules.base import Rule
TC = TeamcityServiceMessages()
class RuleForDocker(Rule):
"""
Parent class for Docker rules
"""
def get_docker_images_list(self, docker_repo):
_href = "{}/api/docker/{}/v2/_catalog".format(self.artifactory_server, docker_repo)
r = self.artifactory_session.get(_href)
r.raise_for_status()
content = r.json()
return content['repositories']
def get_docker_tags_list(self, docker_repo, docker_image):
_href = "{}/api/docker/{}/v2/{}/tags/list".format(self.artifactory_server, docker_repo, docker_image)
r = self.artifactory_session.get(_href)
r.raise_for_status()
content = r.json()
return content['tags']
def _collect_docker_size(self, new_result):
docker_repos = list(set(x['repo'] for x in new_result))
if docker_repos:
aql = ArtifactoryPath(self.artifactory_server, session=self.artifactory_session)
args = ['items.find', {"$or": [{"repo": repo} for repo in docker_repos]}]
artifacts_list = aql.aql(*args)
for artifact in new_result:
artifact['size'] = sum([docker_layer['size'] for docker_layer in artifacts_list if
docker_layer['path'] == '{}/{}'.format(artifact['path'], artifact['name'])])
def filter_result(self, result_artifacts):
""" Determines the size of deleted images """
new_result = super(RuleForDocker, self).filter_result(result_artifacts)
self._collect_docker_size(new_result)
return new_result
class delete_docker_images_older_than(RuleForDocker):
""" Removes Docker image older than ``days`` days"""
def __init__(self, *, days):
self.days = timedelta(days=days)
def _aql_add_filter(self, aql_query_list):
today = date.today()
older_than_date = today - self.days
older_than_date_txt = older_than_date.isoformat()
print('Delete docker images older than {}'.format(older_than_date_txt))
update_dict = {
"modified": {
"$lt": older_than_date_txt,
},
"name": {
"$match": 'manifest.json',
}
}
aql_query_list.append(update_dict)
return aql_query_list
def _filter_result(self, result_artifact):
for artifact in result_artifact:
artifact['path'], docker_tag = artifact['path'].rsplit('/', 1)
artifact['name'] = docker_tag
return result_artifact
class delete_docker_images_not_used(RuleForDocker):
""" Removes Docker image not downloaded ``days`` days """
def __init__(self, *, days):
self.days = timedelta(days=days)
def _aql_add_filter(self, aql_query_list):
last_day = date.today() - self.days
print('Delete docker images not used from {}'.format(last_day.isoformat()))
update_dict = {
"name": {
"$match": 'manifest.json',
},
"$or": [
{"stat.downloaded": {"$lte": last_day.isoformat()}}, # Скачивались давно
{"$and": [
{"stat.downloads": {"$eq": None}}, # Не скачивались
{"created": {"$lte": last_day.isoformat()}
}]},
],
}
aql_query_list.append(update_dict)
return aql_query_list
def _filter_result(self, result_artifact):
for artifact in result_artifact:
artifact['path'], docker_tag = artifact['path'].rsplit('/', 1)
artifact['name'] = docker_tag
return result_artifact
class keep_latest_n_version_images_by_property(Rule):
r"""
Leaves ``count`` Docker images with the same major.
If you need to add minor then put 2 or if patch then put 3.
:param custom_regexp: how to determine version.
По умолчанию ``r'(^ \d*\.\d*\.\d*.\d+$)``. Ищет версию в ``properties`` файла ``manifest.json``
"""
def __init__(self, count, custom_regexp=r'(^\d*\.\d*\.\d*.\d+$)', number_of_digits_in_version=1):
self.count = count
self.custom_regexp = custom_regexp
self.property = r'docker.manifest'
self.number_of_digits_in_version = number_of_digits_in_version
def _filter_result(self, result_artifact):
artifacts_by_path_and_name = defaultdict(list)
for artifact in result_artifact[:]:
property = artifact['properties'][self.property]
version = re.findall(self.custom_regexp, property)
if len(version) == 1:
version_splitted = version[0].split('.')
key = artifact['path'] + '/' + version_splitted[0]
key += ".".join(version_splitted[:self.number_of_digits_in_version])
artifacts_by_path_and_name[key].append([version_splitted[0], artifact])
for artifactory_with_version in artifacts_by_path_and_name.values():
artifactory_with_version.sort(key=lambda x: [int(x) for x in x[0].split('.')])
good_artifact_count = len(artifactory_with_version) - self.count
if good_artifact_count < 0:
good_artifact_count = 0
good_artifacts = artifactory_with_version[good_artifact_count:]
for artifact in good_artifacts:
self.remove_artifact(artifact[1], result_artifact)
return result_artifact
class delete_docker_image_if_not_contained_in_properties(RuleForDocker):
"""
.. warning::
Multiscanner project specific rule https://wiki.ptsecurity.com/x/koFIAg
Remove Docker image, if it is not found in the properties of the artifact repository.
"""
def __init__(self, docker_repo, properties_prefix, image_prefix=None, full_docker_repo_name=None):
self.docker_repo = docker_repo
self.properties_prefix = properties_prefix
self.image_prefix = image_prefix
self.full_docker_repo_name = full_docker_repo_name
def get_properties_dict(self, result_artifact):
properties_dict = defaultdict(dict)
for artifact in result_artifact:
if artifact.get('properties'):
properties_with_image = [x for x in artifact['properties'].keys()
if x.startswith(self.properties_prefix)]
for i in properties_with_image:
# Create a dictionary with a property key, without a prefix.
# Property = docker.image, prefix = docker. -> key = image
properties_dict[i[len(self.properties_prefix):]].setdefault(artifact['properties'][i], True)
return properties_dict
def _filter_result(self, result_artifact):
images = self.get_docker_images_list(self.docker_repo)
properties_dict = self.get_properties_dict(result_artifact)
result_docker_images = []
for image in images:
# legacy
image_legacy = None
if self.image_prefix and image.startswith(self.image_prefix):
# Remove the prefix from the image name
image_legacy = image[len(self.image_prefix):]
elif not self.image_prefix:
continue
if image in properties_dict.keys() or image_legacy in properties_dict.keys():
tags = self.get_docker_tags_list(self.docker_repo, image)
for tag in tags:
docker_name = '{}:{}'.format(image, tag)
docker_name_legacy = None
if self.full_docker_repo_name:
docker_name_legacy = '{}/{}'.format(self.full_docker_repo_name, docker_name)
# If this docker tag is not found in the metadata properties, then add it to the list for deletion
if not properties_dict[image].pop(docker_name, None) \
and not properties_dict[image_legacy].pop(docker_name, None) \
and not properties_dict[image_legacy].pop(docker_name_legacy, None):
result_docker_images.append({'repo': self.docker_repo,
'path': image,
'name': tag,
})
return result_docker_images
class delete_docker_image_if_not_contained_in_properties_value(RuleForDocker):
"""
Remove Docker image, if it is not found in the properties of the artifact repository
.. warning::
Multiscanner project specific rule https://wiki.ptsecurity.com/x/koFIAg
"""
def __init__(self, docker_repo, properties_prefix, image_prefix=None, full_docker_repo_name=None):
self.docker_repo = docker_repo
self.properties_prefix = properties_prefix
self.image_prefix = image_prefix
self.full_docker_repo_name = full_docker_repo_name
def get_properties_values(self, result_artifact):
""" Creates a list of artifact property values if the value starts with self.properties_prefix"""
properties_values = set()
for artifact in result_artifact:
properties_values |= set((artifact['properties'].get(x) for x in artifact.get('properties', {})
if x.startswith(self.properties_prefix)))
return properties_values
def _filter_result(self, result_artifact):
images = self.get_docker_images_list(self.docker_repo)
properties_values = self.get_properties_values(result_artifact)
result_docker_images = []
for image in images:
if not image.startswith(self.image_prefix):
continue
# For debag output all properties that begin as image
values_with_image_name = [x for x in properties_values if x.startswith(image)]
TC.blockOpened('Values of properties with name as image {}'.format(image))
for value in values_with_image_name:
print(value)
TC.blockClosed('Values of properties with name as image {}'.format(image))
tags = self.get_docker_tags_list(self.docker_repo, image)
TC.blockOpened('Checking image {}'.format(image))
for tag in tags:
docker_name = '{}:{}'.format(image, tag)
print('INFO - Checking docker with name {}'.format(docker_name))
# If this Docker tag is not found in the metadata properties, then add it to the list for deletion
if docker_name not in properties_values:
result_docker_images.append({'repo': self.docker_repo,
'path': image,
'name': tag,
})
TC.blockClosed('Checking image {}'.format(image))
return result_docker_images
| StarcoderdataPython |
3244301 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class DataTag(object):
def __init__(self):
self._aggregate = None
self._alias = None
self._code = None
@property
def aggregate(self):
return self._aggregate
@aggregate.setter
def aggregate(self, value):
self._aggregate = value
@property
def alias(self):
return self._alias
@alias.setter
def alias(self, value):
self._alias = value
@property
def code(self):
return self._code
@code.setter
def code(self, value):
self._code = value
def to_alipay_dict(self):
params = dict()
if self.aggregate:
if hasattr(self.aggregate, 'to_alipay_dict'):
params['aggregate'] = self.aggregate.to_alipay_dict()
else:
params['aggregate'] = self.aggregate
if self.alias:
if hasattr(self.alias, 'to_alipay_dict'):
params['alias'] = self.alias.to_alipay_dict()
else:
params['alias'] = self.alias
if self.code:
if hasattr(self.code, 'to_alipay_dict'):
params['code'] = self.code.to_alipay_dict()
else:
params['code'] = self.code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = DataTag()
if 'aggregate' in d:
o.aggregate = d['aggregate']
if 'alias' in d:
o.alias = d['alias']
if 'code' in d:
o.code = d['code']
return o
| StarcoderdataPython |
1760286 | numero=int(input("Ingrese el numero: "))
contador=0
for i in range (1,numero+1):
if numero%i==0:
contador=contador+1
if contador==2:
print("el numero es primo")
else:
print("el numero no es primo") | StarcoderdataPython |
97043 | <reponame>Neural-Plasma/deepDiffusion<gh_stars>0
import numpy as np
from os.path import join as pjoin
import config
def dataxy(f,m,u):
f["/%d"%int(m)] = u
# f["/%d"%int(t)+"/energy"] = KE
# dsetE.resize(dsetE.shape[0]+1, axis=0)
# dsetE[-1:] = KE
#
# dsetQ.resize(dsetQ.shape[0]+1, axis=0)
# dsetQ[-1:] = Qcollect
#
# with open(pjoin(path,'energy.txt'),"ab") as f:
# np.savetxt(f, np.column_stack([t, KE]))
return 0
def attributes(f,lx,ly,nx,ny,nsteps):
f.attrs["lx"] = lx
f.attrs["ly"] = ly
# f.attrs["dx"] = config.dx
# f.attrs["dy"] = config.dy
f.attrs["nx"] = nx
f.attrs["ny"] = ny
f.attrs["nsteps"] = nsteps
return 0
def dustDiagn(f,fduration):
f["/fall_duration"] = fduration
# f["/%d"%int(t)+"/energy"] = KE
return 0
| StarcoderdataPython |
1792646 | <filename>revitron/export.py
"""
The ``export`` submodule hosts all classes related to sheet export such as **DWG** and **PDF**.
For example sending the currently active sheet to a PDF printer in the network works as follows::
exporter = revitron.PDFExporter(printerAddress, printerPath)
exporter.printSheet(revitron.ACTIVEVIEW,
'A0',
'Landscape',
'C:/pdf',
'{Sheet Number}-{Sheet Title}')
Please check out the
`export tool <https://github.com/revitron/revitron-ui/blob/master/Revitron.tab/Revitron.panel/Export.pulldown/Export%20Sheets%20as%20PDF.pushbutton/Export%20Sheets%20as%20PDF_script.py>`_
of the **Revitron UI** extension to learn how to export a selection of sheets
with a felxible configuration stored in a document.
"""
#-*- coding: UTF-8 -*-
import os, shutil, time, sys, glob, re
# from pyrevit import script
from System.Collections.Generic import List
class DWGExporter:
"""
Export sheets as DWG named by a file naming template.
"""
def __init__(self, setupName):
"""
Inits a new DWGExporter instance.
Args:
setupName (string): The name of a stored export setup
"""
import revitron
self.options = revitron.DB.DWGExportOptions().GetPredefinedOptions(revitron.DOC, setupName)
def exportSheet(self, sheet, directory, template = False):
"""
Exports a sheet.
Args:
sheet (object): A Revit sheet
directory (string): The export directory
template (string, optional): A name template. Defaults to '{Sheet Number}-{Sheet Name}'.
Returns:
bool: False on error, True on success
"""
import revitron
if revitron.Element(sheet).getClassName() != 'ViewSheet':
print(':face_with_rolling_eyes: Element is not a sheet!')
return False
if not directory:
directory = self.output
if not template:
template = '{Sheet Number}-{Sheet Name}'
fullPath = os.path.join(directory, revitron.ParameterTemplate(sheet, template).render() + '.dwg')
path = os.path.dirname(fullPath)
file = os.path.basename(fullPath)
if not os.path.exists(path):
os.makedirs(path)
db = revitron.DB
self.options.MergedViews = True
self.options.TargetUnit = db.ExportUnit.Default
success = revitron.DOC.Export(path, file, List[db.ElementId]([sheet.Id]), self.options)
if success:
return fullPath
return False
class PDFExporter:
"""
Export sheets as PDF named by a file naming template.
"""
def __init__(self, printer, output):
"""
Inits a new PDFExporter instance.
Args:
printer (string): The printer network adress
output (string): The printer output directory
"""
import revitron
self.printer = printer
self.output = output
self.manager = revitron.DOC.PrintManager
self.sizes = dict()
if self.manager.PrinterName.lower() != self.printer.lower():
print('Setting current printer to: ' + self.printer)
print('Please submit your sheets to be exported again ...')
self.manager.SelectNewPrintDriver(self.printer)
self.manager.Apply()
sys.exit()
self.manager.PrintRange = revitron.DB.PrintRange.Select
self.manager.PrintToFile = True
self.manager.CombinedFile = False
self.manager.Apply()
for size in self.manager.PaperSizes:
self.sizes[size.Name] = size
def printSheet(self, sheet, size, orientation = 'Landscape', directory = False, template = False):
"""
Prints a sheet.
Args:
sheet (object): A Revit sheet
size (string): A size name like A0 or A4
orientation (string, optional): The orientation, 'Landscape' or 'Portrait'. Defaults to 'Landscape'.
directory (string, optional): A custom output directory. Defaults to False.
template (string, optional): A name template. Defaults to '{Sheet Number}-{Sheet Name}'.
Returns:
bool: False on error, True on success
"""
import revitron
if revitron.Element(sheet).getClassName() != 'ViewSheet':
print(':face_with_rolling_eyes: Element is not a sheet!')
return False
if not directory:
directory = self.output
if not template:
template = '{Sheet Number}-{Sheet Name}'
path = os.path.join(directory, revitron.ParameterTemplate(sheet, template).render() + '.pdf')
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
transaction = revitron.Transaction()
viewSet = revitron.DB.ViewSet()
viewSet.Insert(sheet)
viewSheetSetting = self.manager.ViewSheetSetting
viewSheetSetting.CurrentViewSheetSet.Views = viewSet
viewSheetSetting.SaveAs("_temp_")
self.manager.PrintSetup.SaveAs("_temp_")
self.manager.Apply()
orientation = getattr(revitron.DB.PageOrientationType, orientation)
# Set current print page settings.
printParameters = self.manager.PrintSetup.CurrentPrintSetting.PrintParameters
printParameters.ZoomType = revitron.DB.ZoomType.Zoom
printParameters.Zoom = 100
printParameters.PaperPlacement = revitron.DB.PaperPlacementType.Center
printParameters.PageOrientation = orientation
printParameters.PaperSize = self.sizes[size]
printParameters.RasterQuality = revitron.DB.RasterQualityType.High
# Set in-session print settings.
printParameters = self.manager.PrintSetup.InSession.PrintParameters
printParameters.ZoomType = revitron.DB.ZoomType.Zoom
printParameters.Zoom = 100
printParameters.PaperPlacement = revitron.DB.PaperPlacementType.Center
printParameters.PageOrientation = orientation
printParameters.PaperSize = self.sizes[size]
printParameters.RasterQuality = revitron.DB.RasterQualityType.High
# Again save settings.
try:
self.manager.PrintSetup.Save()
except:
self.manager.PrintSetup.SaveAs("_temp2_")
self.manager.Apply()
self.manager.SubmitPrint(sheet)
viewSheetSetting.Delete()
transaction.rollback()
# Move file form temp output to directory.
timePassed = time.time()
moved = False
while (time.time() - timePassed) < 30 and not moved:
time.sleep(0.5)
tempFiles = glob.glob(self.tempOutputPattern(sheet))
if tempFiles:
tempFile = tempFiles[0]
time.sleep(2)
if os.access(tempFile, os.W_OK):
try:
shutil.move(tempFile, path)
moved = True
except:
pass
if moved:
return path
return False
def tempOutputPattern(self, sheet):
"""
Create a glob pattern to identify a printed PDF in the system output directory to be able to
move it to its correct location and rename it according to the given template.
Please note that the PDF network printer has to be configured to save PDFs following the below naming scheme::
[Revit File] - Sheet - [Sheet Number] - [Sheet Name].pdf
For example::
Project1 - Sheet - A101 - Unnamed.pdf
Args:
sheet (object): A Revit sheet objetc
Returns:
string: The generated glob pattern
"""
import revitron
nr = re.sub(r'[^a-zA-Z0-9]+', '*', revitron.Element(sheet).get('Sheet Number'))
name = re.sub(r'[^a-zA-Z0-9]+', '*', revitron.Element(sheet).get('Sheet Name'))
rvt = re.sub(r'\.rvt$', '', os.path.basename(revitron.DOC.PathName))
return '{}/{}*Sheet*{}*{}*.pdf'.format(self.output, rvt, nr, name)
| StarcoderdataPython |
103231 | # Generated by Django 3.0.7 on 2020-06-29 03:46
from django.db import migrations
import jutil.modelfields
class Migration(migrations.Migration):
dependencies = [
("jacc", "0021_auto_20191209_2231"),
]
operations = [
migrations.AlterField(
model_name="account",
name="currency",
field=jutil.modelfields.SafeCharField(
blank=True,
choices=[("EUR", "EUR"), ("USD", "USD")],
default="EUR",
max_length=3,
verbose_name="currency",
),
),
migrations.AlterField(
model_name="account",
name="name",
field=jutil.modelfields.SafeCharField(blank=True, db_index=True, default="", max_length=64, verbose_name="name"),
),
migrations.AlterField(
model_name="accountentry",
name="description",
field=jutil.modelfields.SafeCharField(blank=True, default="", max_length=256, verbose_name="description"),
),
migrations.AlterField(
model_name="accountentrysourcefile",
name="name",
field=jutil.modelfields.SafeCharField(blank=True, db_index=True, default="", max_length=255, verbose_name="name"),
),
migrations.AlterField(
model_name="accounttype",
name="code",
field=jutil.modelfields.SafeCharField(db_index=True, max_length=32, unique=True, verbose_name="code"),
),
migrations.AlterField(
model_name="accounttype",
name="name",
field=jutil.modelfields.SafeCharField(db_index=True, max_length=64, unique=True, verbose_name="name"),
),
migrations.AlterField(
model_name="contract",
name="name",
field=jutil.modelfields.SafeCharField(blank=True, db_index=True, default="", max_length=128, verbose_name="name"),
),
migrations.AlterField(
model_name="entrytype",
name="code",
field=jutil.modelfields.SafeCharField(db_index=True, max_length=64, unique=True, verbose_name="code"),
),
migrations.AlterField(
model_name="entrytype",
name="identifier",
field=jutil.modelfields.SafeCharField(blank=True, db_index=True, default="", max_length=40, verbose_name="identifier"),
),
migrations.AlterField(
model_name="entrytype",
name="name",
field=jutil.modelfields.SafeCharField(blank=True, db_index=True, default="", max_length=128, verbose_name="name"),
),
migrations.AlterField(
model_name="invoice",
name="filename",
field=jutil.modelfields.SafeCharField(blank=True, db_index=True, default="", max_length=255, verbose_name="filename"),
),
migrations.AlterField(
model_name="invoice",
name="notes",
field=jutil.modelfields.SafeTextField(blank=True, default="", verbose_name="notes"),
),
migrations.AlterField(
model_name="invoice",
name="number",
field=jutil.modelfields.SafeCharField(blank=True, db_index=True, default="", max_length=32, verbose_name="invoice number"),
),
migrations.AlterField(
model_name="invoice",
name="state",
field=jutil.modelfields.SafeCharField(
blank=True,
choices=[("N", "Not due yet"), ("D", "Due"), ("L", "Late"), ("P", "Paid")],
db_index=True,
default="",
max_length=1,
verbose_name="state",
),
),
migrations.AlterField(
model_name="invoice",
name="type",
field=jutil.modelfields.SafeCharField(
blank=True,
choices=[("I1", "Invoice"), ("I2", "Credit Note")],
db_index=True,
default="I1",
max_length=2,
verbose_name="type",
),
),
]
| StarcoderdataPython |
3261691 | # -*- encoding: utf-8 -*-
'''
@project : LeetCode
@File : replaceWords.py
@Contact : <EMAIL>
@Desc :
在英语中,我们有一个叫做 词根(root)的概念,它可以跟着其他一些词组成另一个较长的单词——我们称这个词为 继承词(successor)。例如,词根an,跟随着单词 other(其他),可以形成新的单词 another(另一个)。
现在,给定一个由许多词根组成的词典和一个句子。你需要将句子中的所有继承词用词根替换掉。如果继承词有许多可以形成它的词根,则用最短的词根替换它。
你需要输出替换之后的句子。
示例 1:
输入: dict(词典) = ["cat", "bat", "rat"]
sentence(句子) = "the cattle was rattled by the battery"
输出: "the cat was rat by the bat"
注:
输入只包含小写字母。
1 <= 字典单词数 <=1000
1 <= 句中词语数 <= 1000
1 <= 词根长度 <= 100
1 <= 句中词语长度 <= 1000
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/replace-words
@Modify Time @Author @Version @Desciption
------------ ------- -------- -----------
2020-03-13 zhan 1.0 None
'''
from collections import defaultdict
from typing import List
class TriNode:
def __init__(self):
self.is_word = False
self.children = defaultdict(TriNode)
class Trie:
def __init__(self):
self.root = TriNode()
def insert(self,word):
'''
往字典树中插入单词word
:param word: str 类型
:return:
'''
curNode = self.root
for char in word:
curNode = curNode.children[char]
curNode.is_word = True
def search(self,word):
'''
搜索word是否在trie字典树种
:param word: str类型
:return:
'''
curNode = self.root
for char in word:
curNode = curNode.children[char]
if curNode is None: return False
return curNode.is_word
def startswith(self, prefix):
'''
Returns if there is any word in the trie that starts with the given prefix.
:param prefix:str类型
:return:
'''
curNode = self.root
for char in prefix:
curNode = curNode.children[char]
if curNode is None: return False
return True
def get_prefix(self,word):
'''
查找word 在trie中出现的最短前缀
:param word:
:return:
'''
curNode = self.root
for i, letter in enumerate(word):
curNode = curNode.children[letter]
if curNode is not None and curNode.is_word == True:
return word[:i+1]
return ''
class Solution:
def replaceWords(self, dict: List[str], sentence: str) -> str:
trie = Trie()
for word in dict:
trie.insert(word)
ans = ''
for word in sentence.split():
rStr = trie.get_prefix(word)
if rStr: ans += rStr + ' '
else:ans += word + ' '
return ans[:-1]
if __name__ == '__main__':
dict = ["cat", "bat", "rat"]
sentence = "the cattle was rattled by the battery"
ans = Solution().replaceWords(dict, sentence)
print(ans)
| StarcoderdataPython |
97071 | from cytoolz import cons
from cytoolz import merge
from merlin import specs
def test_only():
registry = [{'key': 1, 'ubid': 'a'}, {'key': 2, 'ubid': 'b'},
{'key': 3, 'ubid': 'c'}, {'key': 4, 'ubid': 'd'}]
expected = tuple([{'key': 1, 'ubid': 'a'}, {'key': 4, 'ubid': 'd'}])
result = specs.only(ubids=['a', 'd'], specs=registry)
assert expected == result
expected = tuple([{'key': 1, 'ubid': 'a'}, {'key': 4, 'ubid': 'd'}])
result = specs.only(ubids=['d', 'a'], specs=registry)
assert expected == result
expected = tuple()
result = specs.only(ubids=['z'], specs=registry)
assert expected == result
def test_mapped():
registry = [{'key': 1, 'ubid': 'a'}, {'key': 2, 'ubid': 'b'},
{'key': 3, 'ubid': 'c'}, {'key': 4, 'ubid': 'd'}]
ubids = {'red': ['a', 'd'], 'blue': ['b', 'c']}
expected = {'red': ({'key': 1, 'ubid': 'a'}, {'key': 4, 'ubid': 'd'}),
'blue': ({'key': 2, 'ubid': 'b'}, {'key': 3, 'ubid': 'c'})}
result = specs.mapped(ubids=ubids, specs=registry)
assert expected == result
def test_exist():
registry = [{'key': 1, 'ubid': 'a'}, {'key': 2, 'ubid': 'b'},
{'key': 3, 'ubid': 'c'}, {'key': 4, 'ubid': 'd'}]
ubids = ['a', 'd']
assert True==specs.exist(ubids=ubids, specs=registry)
ubids = ['a', 'b', 'x']
assert False==specs.exist(ubids=ubids, specs=registry)
def test_index():
registry = [{'key': 1, 'ubid': 'a'}, {'key': 2, 'ubid': 'b'},
{'key': 3, 'ubid': 'c'}, {'key': 4, 'ubid': 'd'}]
results = specs.index(registry)
# check that dicts were rekeyed into a new dict
assert all(map(lambda r: r in results, ['a', 'b', 'c', 'd']))
# check structure of new dict values
assert all(map(lambda r: 'ubid' in r and 'key' in r, results.values()))
def test_ubids():
registry = [{'key': 1, 'ubid': 'a'}, {'key': 2, 'ubid': 'b'},
{'key': 3, 'ubid': 'c'}, {'key': 4, 'ubid': 'd'}]
data = list(cons({'nope': 'z'}, registry))
good = filter(lambda f: 'ubid' in f, data)
assert set(map(lambda u: u['ubid'], good)) == set(specs.ubids(data))
def test_ubids():
ubids = [{'ubid': 1}, {'ubid': 2}, {'ubid': 3}, {'ubid': 4}]
assert len(specs.ubids(ubids)) == 4
def test_refspec():
specmap = {'red': [{'key': 1, 'ubid': 'a'}, {'key': 2, 'ubid': 'b'}],
'blue': [{'key': 3, 'ubid': 'c'}, {'key': 4, 'ubid': 'd'}]}
assert type(specs.refspec(specmap)) is dict
| StarcoderdataPython |
34056 | # Author: btjanaka (<NAME>)
# Problem: (UVa) 247
import sys
from collections import defaultdict
def kosaraju(g, g_rev):
order = []
visited = set()
def visit(u):
visited.add(u)
for v in g[u]:
if v not in visited:
visit(v)
order.append(u)
for u in g:
if u not in visited: visit(u)
components = []
visited.clear()
def build_comp(u):
components[-1].append(u)
visited.add(u)
for v in g_rev[u]:
if v not in visited:
build_comp(v)
for u in order[::-1]:
if u not in visited:
components.append([])
build_comp(u)
return components
def main():
case = 1
while True:
# input
n, m = map(int, input().split())
if n == 0 and m == 0: break
g, g_rev = defaultdict(set), defaultdict(set)
for _ in range(m):
u, v = input().strip().split()
g[u].add(v)
g[v]
g_rev[v].add(u)
g_rev[u]
# output
if case != 1: print()
print(f"Calling circles for data set {case}:")
for c in kosaraju(g, g_rev):
print(", ".join(c))
case += 1
main()
| StarcoderdataPython |
1787748 | import uuid
def generate_vehicle_id():
return 'veh_' + uuid.uuid4().hex | StarcoderdataPython |
53600 | from noval import GetApp,_
import noval.iface as iface
import noval.plugin as plugin
import tkinter as tk
from tkinter import ttk,messagebox
import noval.preference as preference
from noval.util import utils
import noval.ui_utils as ui_utils
import noval.consts as consts
MAX_WINDOW_MENU_NUM_ITEMS = 30
##class WindowMenuService(wx.lib.pydocview.WindowMenuService):
## """description of class"""
## def InstallControls(self, frame, menuBar=None, toolBar=None, statusBar=None, document=None):
## wx.lib.pydocview.WindowMenuService.InstallControls(self,frame,menuBar,toolBar,statusBar,document)
## windowMenu = menuBar.GetWindowsMenu()
## windowMenu.Append(constants.ID_CLOSE_ALL,_("Close All"),_("Close all open documents"))
## wx.EVT_MENU(frame, constants.ID_CLOSE_ALL, frame.ProcessEvent)
##
## if wx.GetApp().GetUseTabbedMDI():
## windowMenu.Append(constants.ID_RESTORE_WINDOW_LAYOUT,_("&Restore Default Layout"),_("Restore default layout of main frame"))
## wx.EVT_MENU(frame, constants.ID_RESTORE_WINDOW_LAYOUT, frame.ProcessEvent)
## wx.EVT_MENU(frame, self.SELECT_MORE_WINDOWS_ID, frame.ProcessEvent)
##
## def ProcessEvent(self, event):
## """
## Processes a Window menu event.
## """
## id = event.GetId()
## if id == constants.ID_RESTORE_WINDOW_LAYOUT:
## ret = wx.MessageBox(_("Are you sure want to restore the default window layout?"), wx.GetApp().GetAppName(),
## wx.YES_NO | wx.ICON_QUESTION,wx.GetApp().MainFrame)
## if ret == wx.YES:
## wx.GetApp().MainFrame.LoadDefaultPerspective()
## return True
## elif id == constants.ID_CLOSE_ALL:
## wx.GetApp().MainFrame.OnCloseAllDocs(event)
## return Truefrom noval.util import utils
## else:
## return wx.lib.pydocview.WindowMenuService.ProcessEvent(self,event)
##
##
## def BuildWindowMenu(self, currentFrame):
## """
## Builds the Window menu and adds menu items for all of the open documents in the DocManager.
## """
## if wx.GetApp().GetUseTabbedMDI():
## currentFrame = wx.GetApp().GetTopWindow()
##
## windowMenuIndex = currentFrame.GetMenuBar().FindMenu(_("&Window"))
## windowMenu = currentFrame.GetMenuBar().GetMenu(windowMenuIndex)
##
## if wx.GetApp().GetUseTabbedMDI():
## notebook = wx.GetApp().GetTopWindow()._notebook
## numPages = notebook.GetPageCount()
##
## for id in self._selectWinIds:
## item = windowMenu.FindItemById(id)
## if item:
## windowMenu.DeleteItem(item)
##
## if windowMenu.FindItemById(self.SELECT_MORE_WINfrom noval.util import utilsDOWS_ID):
## windowMenu.Remove(self.SELECT_MORE_WINDOWS_ID)
## if numPages == 0 and self._sep:
## windowMenu.DeleteItem(self._sep)
## self._sep = None
##
## if numPages > len(self._selectWinIds):
## for i in range(len(self._selectWinIds), numPages):
## self._selectWinIds.append(wx.NewId())
## wx.EVT_MENU(currentFrame, self._selectWinIds[i], self.OnCtrlKeySelect)
##
## for i in range(0, min(numPages,utils.ProfileGetInt("WindowMenuDisplayNumber",wx.lib.pydocview.WINDOW_MENU_NUM_ITEMS))):
## if i == 0 and not self._sep:
## self._sep = windowMenu.AppendSeparator()
## if i < 9:
## menuLabel = "%s\tCtrl+%s" % (notebook.GetPageText(i), i+1)
## else:from noval.util import utils
## menuLabel = notebook.GetPageText(i)
## windowMenu.Append(self._selectWinIds[i], menuLabel)
##
## if numPages > wx.lib.pydocview.WINDOW_MENU_NUM_ITEMS: # Add the more items item
## if not windowMenu.FindItemById(self.SELECT_MORE_WINDOWS_ID):
## windowMenu.Append(self.SELECT_MORE_WINDOWS_ID, _("&More Windows..."))
##
##
## def _GetWindowMenuFrameList(self, currentFrame=None):
## """
## Returns the Frame associated with each menu item in the Window menu.
## """
## frameList = []
## # get list of windows for documents
## for doc in self._docManager.GetDocuments():
## for view in doc.GetViews():
## if hasattr(view,"GetType"):
## frame = view.GetFrame()
## if frame not in frameList:
## if frame == currentFrame and len(framimport noval.preference as preferenceeList) >= WINDOW_MENU_NUM_ITEMS:
## frameList.insert(WINDOW_MENU_NUM_ITEMS - 1, frame)
## else:
## frameList.append(frame)
## return frameList
##
## def OnSelectMoreWindows(self, event):
## """
## Called when the "Window/Select More Windows..." menu item is selected and enables user to
## select from the Frames that do not in the Window list. Useful when there are more than
## 10 open frames in the application.
## """
## frames = self._GetWindowMenuFrameList() # TODO - make the current window the first one
## strings = map(lambda frame: frame.GetTitle(), frames)
## # Should preselect the current window, but not supported by wx.GetSingleChoice
## res = wx.GetSingleChoiceIndex(_("Select a window to show:"),
## _("Select Window"),
## strings,
## wx.GetApp().MainFrame)
## if res == -1:
## return
## frames[res].SetFocus()
##
class WindowsOptionPanel(ui_utils.CommonOptionPanel):
"""
"""
def __init__(self, parent):
ui_utils.CommonOptionPanel.__init__(self, parent)
self._loadLayoutCheckVar = tk.IntVar(value=utils.profile_get_int("LoadLastPerspective", True))
loadLayoutCheckBox = ttk.Checkbutton(self.panel, text=_("Load the last window layout at start up"),variable=self._loadLayoutCheckVar)
loadLayoutCheckBox.pack(fill=tk.X)
## self._window_menu_display_number_ctrl = wx.TextCtrl(self, -1, str(config.ReadInt("WindowMenuDisplayNumber",wx.lib.pydocview.WINDOW_MENU_NUM_ITEMS)), size=(30,-1),\
## validator=NumValidator(_("Window Menu Display Number"),1,MAX_WINDOW_MENU_NUM_ITEMS))
## lsizer.AddMany([(wx.StaticText(self, label=_("Number of Window menus displayed") + "(%d-%d): " % \
## (1,MAX_WINDOW_MENU_NUM_ITEMS)),
## 0, wx.ALIGN_CENTER_VERTICAL), ((5, 5), 0),
## (self._window_menu_display_number_ctrl,
## 0, wx.ALIGN_CENTER_VERTICAL)])
self._hideMenubarCheckVar = tk.IntVar(value=utils.profile_get_int("HideMenubarFullScreen", False))
hideMenubarCheckBox = ttk.Checkbutton(self.panel, text= _("Hide menubar When full screen display"),variable=self._hideMenubarCheckVar)
hideMenubarCheckBox.pack(fill=tk.X)
self._useCustommenubarCheckVar = tk.IntVar(value=utils.profile_get_int("USE_CUSTOM_MENUBAR", False))
useCustommenubarCheckBox = ttk.Checkbutton(self.panel, text= _("Use custom menubar"),variable=self._useCustommenubarCheckVar)
useCustommenubarCheckBox.pack(fill=tk.X)
row = ttk.Frame(self.panel)
self._scaling_label = ttk.Label(row, text=_("UI scaling factor:"))
self._scaling_label.pack(fill=tk.X,side=tk.LEFT)
self._scaleVar = tk.StringVar(value=utils.profile_get('UI_SCALING_FACTOR',''))
scalings = sorted({0.5, 0.75, 1.0, 1.25, 1.33, 1.5, 2.0, 2.5, 3.0, 4.0})
combobox = ttk.Combobox(
row,
exportselection=False,
textvariable=self._scaleVar,
state="readonly",
height=15,
values=tuple(scalings),
)
combobox.pack(fill=tk.X,side=tk.LEFT)
row.pack(fill=tk.X)
clear_window_layout_btn = ttk.Button(self.panel, text=_("Clear Window layout configuration information"),command=self.ClearWindowLayoutConfiguration)
clear_window_layout_btn.pack(anchor=tk.W,pady=consts.DEFAUT_HALF_CONTRL_PAD_Y)
def OnOK(self, optionsDialog):
if utils.profile_get('UI_SCALING_FACTOR','') != self._scaleVar.get():
messagebox.showinfo(GetApp().GetAppName(),_("Scale changes will not appear until the application is restarted."),parent=self)
if utils.profile_get_int('USE_CUSTOM_MENUBAR',0) != self._useCustommenubarCheckVar.get():
messagebox.showinfo(GetApp().GetAppName(),_("Menubar changes will not appear until the application is restarted."),parent=self)
utils.profile_set("LoadLastPerspective", self._loadLayoutCheckVar.get())
utils.profile_set("HideMenubarFullScreen", self._hideMenubarCheckVar.get())
utils.profile_set("USE_CUSTOM_MENUBAR", self._useCustommenubarCheckVar.get())
scale = self._scaleVar.get()
if not scale:
scale = "default"
utils.profile_set("UI_SCALING_FACTOR", scale)
# config.WriteInt("WindowMenuDisplayNumber", int(self._window_menu_display_number_ctrl.GetValue()))
return True
def ClearWindowLayoutConfiguration(self):
config = GetApp().GetConfig()
config.DeleteEntry("DefaultPerspective")
config.DeleteEntry("LastPerspective")
messagebox.showinfo(GetApp().GetAppName(),_("Already Clear Window layout configuration information"))
class WindowServiceLoader(plugin.Plugin):
plugin.Implements(iface.CommonPluginI)
def Load(self):
preference.PreferenceManager().AddOptionsPanelClass(preference.ENVIRONMENT_OPTION_NAME,"Appearance",WindowsOptionPanel)
| StarcoderdataPython |
3280168 | <reponame>mikando/salvia-blockchain
from typing import Any, Dict, List, Optional
import aiohttp
from salvia.cmds.units import units
from salvia.consensus.block_record import BlockRecord
from salvia.rpc.farmer_rpc_client import FarmerRpcClient
from salvia.rpc.full_node_rpc_client import FullNodeRpcClient
from salvia.rpc.wallet_rpc_client import WalletRpcClient
from salvia.util.config import load_config
from salvia.util.default_root import DEFAULT_ROOT_PATH
from salvia.util.ints import uint16
from salvia.util.misc import format_bytes
from salvia.util.misc import format_minutes
from salvia.util.network import is_localhost
SECONDS_PER_BLOCK = (24 * 3600) / 4608
async def get_harvesters(farmer_rpc_port: Optional[int]) -> Optional[Dict[str, Any]]:
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if farmer_rpc_port is None:
farmer_rpc_port = config["farmer"]["rpc_port"]
farmer_client = await FarmerRpcClient.create(self_hostname, uint16(farmer_rpc_port), DEFAULT_ROOT_PATH, config)
plots = await farmer_client.get_harvesters()
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if farmer is running at {farmer_rpc_port}")
else:
print(f"Exception from 'harvester' {e}")
return None
farmer_client.close()
await farmer_client.await_closed()
return plots
async def get_blockchain_state(rpc_port: Optional[int]) -> Optional[Dict[str, Any]]:
blockchain_state = None
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if rpc_port is None:
rpc_port = config["full_node"]["rpc_port"]
client = await FullNodeRpcClient.create(self_hostname, uint16(rpc_port), DEFAULT_ROOT_PATH, config)
blockchain_state = await client.get_blockchain_state()
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if full node is running at {rpc_port}")
else:
print(f"Exception from 'full node' {e}")
client.close()
await client.await_closed()
return blockchain_state
async def get_average_block_time(rpc_port: Optional[int]) -> float:
try:
blocks_to_compare = 500
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if rpc_port is None:
rpc_port = config["full_node"]["rpc_port"]
client = await FullNodeRpcClient.create(self_hostname, uint16(rpc_port), DEFAULT_ROOT_PATH, config)
blockchain_state = await client.get_blockchain_state()
curr: Optional[BlockRecord] = blockchain_state["peak"]
if curr is None or curr.height < (blocks_to_compare + 100):
client.close()
await client.await_closed()
return SECONDS_PER_BLOCK
while curr is not None and curr.height > 0 and not curr.is_transaction_block:
curr = await client.get_block_record(curr.prev_hash)
if curr is None:
client.close()
await client.await_closed()
return SECONDS_PER_BLOCK
past_curr = await client.get_block_record_by_height(curr.height - blocks_to_compare)
while past_curr is not None and past_curr.height > 0 and not past_curr.is_transaction_block:
past_curr = await client.get_block_record(past_curr.prev_hash)
if past_curr is None:
client.close()
await client.await_closed()
return SECONDS_PER_BLOCK
client.close()
await client.await_closed()
return (curr.timestamp - past_curr.timestamp) / (curr.height - past_curr.height)
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if full node is running at {rpc_port}")
else:
print(f"Exception from 'full node' {e}")
client.close()
await client.await_closed()
return SECONDS_PER_BLOCK
async def get_wallets_stats(wallet_rpc_port: Optional[int]) -> Optional[Dict[str, Any]]:
amounts = None
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if wallet_rpc_port is None:
wallet_rpc_port = config["wallet"]["rpc_port"]
wallet_client = await WalletRpcClient.create(self_hostname, uint16(wallet_rpc_port), DEFAULT_ROOT_PATH, config)
amounts = await wallet_client.get_farmed_amount()
#
# Don't catch any exceptions, the caller will handle it
#
finally:
wallet_client.close()
await wallet_client.await_closed()
return amounts
async def is_farmer_running(farmer_rpc_port: Optional[int]) -> bool:
is_running = False
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if farmer_rpc_port is None:
farmer_rpc_port = config["farmer"]["rpc_port"]
farmer_client = await FarmerRpcClient.create(self_hostname, uint16(farmer_rpc_port), DEFAULT_ROOT_PATH, config)
await farmer_client.get_connections()
is_running = True
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if farmer is running at {farmer_rpc_port}")
else:
print(f"Exception from 'farmer' {e}")
farmer_client.close()
await farmer_client.await_closed()
return is_running
async def get_challenges(farmer_rpc_port: Optional[int]) -> Optional[List[Dict[str, Any]]]:
signage_points = None
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if farmer_rpc_port is None:
farmer_rpc_port = config["farmer"]["rpc_port"]
farmer_client = await FarmerRpcClient.create(self_hostname, uint16(farmer_rpc_port), DEFAULT_ROOT_PATH, config)
signage_points = await farmer_client.get_signage_points()
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if farmer is running at {farmer_rpc_port}")
else:
print(f"Exception from 'farmer' {e}")
farmer_client.close()
await farmer_client.await_closed()
return signage_points
async def challenges(farmer_rpc_port: Optional[int], limit: int) -> None:
signage_points = await get_challenges(farmer_rpc_port)
if signage_points is None:
return None
signage_points.reverse()
if limit != 0:
signage_points = signage_points[:limit]
for signage_point in signage_points:
print(
(
f"Hash: {signage_point['signage_point']['challenge_hash']} "
f"Index: {signage_point['signage_point']['signage_point_index']}"
)
)
async def summary(
rpc_port: Optional[int],
wallet_rpc_port: Optional[int],
harvester_rpc_port: Optional[int],
farmer_rpc_port: Optional[int],
) -> None:
all_harvesters = await get_harvesters(farmer_rpc_port)
blockchain_state = await get_blockchain_state(rpc_port)
farmer_running = await is_farmer_running(farmer_rpc_port)
wallet_not_ready: bool = False
wallet_not_running: bool = False
amounts = None
try:
amounts = await get_wallets_stats(wallet_rpc_port)
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
wallet_not_running = True
else:
wallet_not_ready = True
print("Farming status: ", end="")
if blockchain_state is None:
print("Not available")
elif blockchain_state["sync"]["sync_mode"]:
print("Syncing")
elif not blockchain_state["sync"]["synced"]:
print("Not synced or not connected to peers")
elif not farmer_running:
print("Not running")
else:
print("Farming")
if amounts is not None:
print(f"Total salvia farmed: {amounts['farmed_amount'] / units['salvia']}")
print(f"User transaction fees: {amounts['fee_amount'] / units['salvia']}")
print(f"Block rewards: {(amounts['farmer_reward_amount'] + amounts['pool_reward_amount']) / units['salvia']}")
print(f"Last height farmed: {amounts['last_height_farmed']}")
class PlotStats:
total_plot_size = 0
total_plots = 0
if all_harvesters is not None:
harvesters_local: dict = {}
harvesters_remote: dict = {}
for harvester in all_harvesters["harvesters"]:
ip = harvester["connection"]["host"]
if is_localhost(ip):
harvesters_local[harvester["connection"]["node_id"]] = harvester
else:
if ip not in harvesters_remote:
harvesters_remote[ip] = {}
harvesters_remote[ip][harvester["connection"]["node_id"]] = harvester
def process_harvesters(harvester_peers_in: dict):
for harvester_peer_id, plots in harvester_peers_in.items():
total_plot_size_harvester = sum(map(lambda x: x["file_size"], plots["plots"]))
PlotStats.total_plot_size += total_plot_size_harvester
PlotStats.total_plots += len(plots["plots"])
print(f" {len(plots['plots'])} plots of size: {format_bytes(total_plot_size_harvester)}")
if len(harvesters_local) > 0:
print(f"Local Harvester{'s' if len(harvesters_local) > 1 else ''}")
process_harvesters(harvesters_local)
for harvester_ip, harvester_peers in harvesters_remote.items():
print(f"Remote Harvester{'s' if len(harvester_peers) > 1 else ''} for IP: {harvester_ip}")
process_harvesters(harvester_peers)
print(f"Plot count for all harvesters: {PlotStats.total_plots}")
print("Total size of plots: ", end="")
print(format_bytes(PlotStats.total_plot_size))
else:
print("Plot count: Unknown")
print("Total size of plots: Unknown")
if blockchain_state is not None:
print("Estimated network space: ", end="")
print(format_bytes(blockchain_state["space"]))
else:
print("Estimated network space: Unknown")
minutes = -1
if blockchain_state is not None and all_harvesters is not None:
proportion = PlotStats.total_plot_size / blockchain_state["space"] if blockchain_state["space"] else -1
minutes = int((await get_average_block_time(rpc_port) / 60) / proportion) if proportion else -1
if all_harvesters is not None and PlotStats.total_plots == 0:
print("Expected time to win: Never (no plots)")
else:
print("Expected time to win: " + format_minutes(minutes))
if amounts is None:
if wallet_not_running:
print("For details on farmed rewards and fees you should run 'salvia start wallet' and 'salvia wallet show'")
elif wallet_not_ready:
print("For details on farmed rewards and fees you should run 'salvia wallet show'")
else:
print("Note: log into your key using 'salvia wallet show' to see rewards for each key")
| StarcoderdataPython |
183013 | <reponame>bovarysme/advent
def pairs(digits):
for i in range(len(digits) - 1):
yield digits[i], digits[i+1]
yield digits[-1], digits[0]
def halfway(digits):
half = len(digits) // 2
for i in range(half):
yield digits[i], digits[half+i]
for i in range(half, len(digits)):
yield digits[i], digits[i-half]
def solve(iterator, digits):
return sum(int(x) for x, y in iterator(digits) if x == y)
def part_one(digits):
return solve(pairs, digits)
def part_two(digits):
return solve(halfway, digits)
if __name__ == '__main__':
assert part_one('1122') == 3
assert part_one('1111') == 4
assert part_one('1234') == 0
assert part_one('91212129') == 9
assert part_two('1212') == 6
assert part_two('1221') == 0
assert part_two('123425') == 4
assert part_two('123123') == 12
assert part_two('12131415') == 4
with open('inputs/day1.txt', 'r') as f:
digits = f.read().rstrip()
print('Answer for part one:', part_one(digits))
print('Answer for part two:', part_two(digits))
| StarcoderdataPython |
1603417 | <gh_stars>0
"""
Class :py:class:`CMWMain` is a QWidget for interactive image
============================================================
Usage ::
import sys
from PyQt5.QtWidgets import QApplication
from psana.graphqt.CMWMain import CMWMain
app = QApplication(sys.argv)
w = CMWMain(None, app)
w.show()
app.exec_()
See:
- :class:`CMWMain`
- :class:`CMWMainTabs`
- :class:`CMConfigParameters`
- `graphqt documentation <https://lcls-psana.github.io/graphqt/py-modindex.html>`_.
Created on 2017-02-01 by <NAME>
Adopted for LCLS2 on 2018-02-26 by <NAME>
"""
#import os
#import math
#------------------------------
import logging
logger = logging.getLogger(__name__)
#------------------------------
from math import floor
from PyQt5.QtWidgets import QWidget, QHBoxLayout, QVBoxLayout, QSplitter, QTextEdit
from PyQt5.QtGui import QPen, QBrush
from PyQt5.QtCore import Qt, QPoint
from psana.graphqt.CMConfigParameters import cp
from psana.graphqt.QWLoggerStd import QWLoggerStd#, QWFilter
from psana.pyalgos.generic.Utils import print_kwargs, print_parser, is_in_command_line
#from psana.graphqt.QWUtils import selectFromListInPopupMenu
from psana.graphqt.Frame import Frame
from psana.graphqt.QWIcons import icon
from psana.graphqt.Styles import style
#------------------------------
#class CMWMain(Frame) :
class CMWMain(QWidget) :
_name = 'CMWMain'
def __init__(self, parser=None) : # **dict_opts) :
#Frame.__init__(self, parent=None, mlw=1)
QWidget.__init__(self, parent=None)
#self._name = self.__class__.__name__
cp.cmwmain = self
self.proc_parser(parser)
self.main_win_width = cp.main_win_width
self.main_win_height = cp.main_win_height
self.main_win_pos_x = cp.main_win_pos_x
self.main_win_pos_y = cp.main_win_pos_y
#icon.set_icons()
from psana.graphqt.CMWMainTabs import CMWMainTabs
self.wtab = CMWMainTabs()
#self.wlog = QWLogger(log, cp, show_buttons=False)
self.wlog = QWLoggerStd(cp, show_buttons=False)
self.wtmp = QTextEdit('Some text')
#self.vbox = QVBoxLayout()
#self.vbox.addWidget(self.wtab)
#self.vbox.addStretch(1)
#self.wrig = QWidget()
#self.wrig.setLayout(self.vbox)
self.vspl = QSplitter(Qt.Vertical)
#self.vspl.addWidget(self.wrig)
self.vspl.addWidget(self.wtab)
self.vspl.addWidget(self.wlog)
#self.hspl = QSplitter(Qt.Horizontal)
#self.hspl.addWidget(self.vspl)
#self.hspl.addWidget(self.wtmp)
#self.hspl.addWidget(self.wrig)
self.mbox = QHBoxLayout()
self.mbox.addWidget(self.vspl)
self.setLayout(self.mbox)
self.set_style()
#self.set_tool_tips()
self.connect_signals_to_slots()
#self.move(self.pos()) # + QPoint(self.width()+5, 0))
def connect_signals_to_slots(self) :
pass
#self.connect(self.wbut.but_reset, QtCore.SIGNAL('clicked()'), self.on_but_reset)
#self.connect(self.wbut.but_save, QtCore.SIGNAL('clicked()'), self.on_but_save)
#------------------------------
def proc_parser(self, parser=None) :
self.parser=parser
if parser is None :
return
(popts, pargs) = parser.parse_args()
self.args = pargs
self.opts = vars(popts)
self.defs = vars(parser.get_default_values())
host = popts.host # self.opts['host']
port = popts.port # self.opts['port']
cp.user = popts.user
cp.upwd = popts.upwd
exp = popts.experiment
det = popts.detector
loglevel = popts.loglevel.upper()
logdir = popts.logdir
#if host != self.defs['host'] : cp.cdb_host.setValue(host)
#if port != self.defs['port'] : cp.cdb_port.setValue(port)
#if exp != self.defs['experiment'] : cp.exp_name.setValue(exp)
#if det != self.defs['detector'] : cp.data_source.setValue(det)
#if loglevel != self.defs['loglevel'] : cp.log_level.setValue(loglevel)
#if logdir != self.defs['logdir'] : cp.log_prefix.setValue(logdir)
if is_in_command_line(None, '--host') : cp.cdb_host.setValue(host)
if is_in_command_line(None, '--port') : cp.cdb_port.setValue(port)
if is_in_command_line('-e', '--experiment') : cp.exp_name.setValue(exp)
if is_in_command_line('-d', '--detector') : cp.data_source.setValue(det)
if is_in_command_line('-l', '--loglevel') : cp.log_level.setValue(loglevel)
if is_in_command_line('-L', '--logdir') : cp.log_prefix.setValue(logdir)
if loglevel == 'DEBUG' :
print(40*'_')
print_parser(parser)
print_kwargs(self.opts)
#------------------------------
def set_tool_tips(self) :
pass
#self.butStop.setToolTip('Not implemented yet...')
def set_style(self) :
#self.setGeometry(50, 50, 500, 600)
self.setGeometry(self.main_win_pos_x .value(),\
self.main_win_pos_y .value(),\
self.main_win_width .value(),\
self.main_win_height.value())
w_height = self.main_win_height.value()
self.setMinimumSize(500, 400)
w = self.main_win_width.value()
self.layout().setContentsMargins(0,0,0,0)
spl_pos = cp.main_vsplitter.value()
self.vspl.setSizes((spl_pos,w_height-spl_pos,))
#self.wrig.setMinimumWidth(350)
#self.wrig.setMaximumWidth(450)
#self.wrig.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Ignored)
#self.hspl.moveSplitter(w*0.5,0)
#self.setFixedSize(800,500)
#self.setMinimumSize(500,800)
#self.setStyleSheet("background-color:blue; border: 0px solid green")
#self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
#self. setStyleSheet(style.styleBkgd)
#self.butSave.setStyleSheet(style.styleButton)
#self.butExit.setStyleSheet(style.styleButton)
#self.butELog.setStyleSheet(style.styleButton)
#self.butFile.setStyleSheet(style.styleButton)
#self.butELog .setVisible(False)
#self.butFBrowser.setVisible(False)
#self.but1.raise_()
def closeEvent(self, e) :
logger.debug('%s.closeEvent' % self._name)
#try : self.wspe.close()
#except : pass
self.wtab.close()
self.on_save()
QWidget.closeEvent(self, e)
def resizeEvent(self, e):
#logger.debug('resizeEvent', self._name)
#logger.info('CMWMain.resizeEvent: %s' % str(self.size()))
pass
def moveEvent(self, e) :
#logger.debug('moveEvent', self._name)
#self.position = self.mapToGlobal(self.pos())
#self.position = self.pos()
#logger.debug('moveEvent - pos:' + str(self.position), __name__)
#logger.info('CMWMain.moveEvent - move window to x,y: ', str(self.mapToGlobal(QPoint(0,0))))
#self.wimg.move(self.pos() + QPoint(self.width()+5, 0))
pass
def key_usage(self) :
return 'Keys:'\
'\n V - view/hide tabs'\
'\n'
if __name__ == "__main__" :
def keyPressEvent(self, e) :
#print('keyPressEvent, key=', e.key())
if e.key() == Qt.Key_Escape :
self.close()
elif e.key() == Qt.Key_V :
self.wtab.view_hide_tabs()
else :
logger.info(self.key_usage())
def on_save(self):
point, size = self.mapToGlobal(QPoint(-5,-22)), self.size() # Offset (-5,-22) for frame size.
x,y,w,h = point.x(), point.y(), size.width(), size.height()
msg = 'Save main window x,y,w,h : %d, %d, %d, %d' % (x,y,w,h)
logger.info(msg) #, self._name)
#print(msg)
#Save main window position and size
self.main_win_pos_x .setValue(x)
self.main_win_pos_y .setValue(y)
self.main_win_width .setValue(w)
self.main_win_height.setValue(h)
spl_pos = self.vspl.sizes()[0]
msg = 'Save main v-splitter position %d' % spl_pos
logger.debug(msg)
cp.main_vsplitter.setValue(spl_pos)
cp.printParameters()
cp.saveParametersInFile() # moved to PSConfigParameters
if cp.save_log_at_exit.value() :
pass
# ?????
#log.saveLogInFile(cp.log_file.value())
#print('Saved log file: %s' % cp.log_file.value())
#log.saveLogTotalInFile(fnm.log_file_total())
#------------------------------
def calibman(parser=None) :
import sys
#sys.stdout = sys.stderr = open('/dev/null', 'w') # open('%s-stdout-stderr' % cp.log_file.value(), 'w')
from PyQt5.QtWidgets import QApplication
#logging.basicConfig(format='%(asctime)s %(name)s %(levelname)s: %(message)s', datefmt='%H:%M:%S', level=logging.DEBUG)
app = QApplication(sys.argv)
w = CMWMain(parser)
w.show()
app.exec_()
del w
del app
#------------------------------
if __name__ == "__main__" :
calibman()
#------------------------------
| StarcoderdataPython |
3384277 | <filename>mysite/domain/article/value_objects.py<gh_stars>1-10
from dataclasses import dataclass
from typing import (
Optional,
)
from domain.common.file import (
File,
)
ArticleId = int
Cursor = str
@dataclass(frozen=True)
class ArticleInput:
title: str
description: str
id: Optional[int] = None
image: Optional[File] = None
@dataclass(frozen=True)
class QueryOption:
'''
:ivar limit: query limit
:ivar cursor: cursor value or `None`.
'''
limit: int
cursor: Optional[Cursor] = None
| StarcoderdataPython |
143036 | #!/usr/bin/env python3
# -*- encoding:utf-8 -*-
import os
import logging
from flask import Flask, redirect, url_for
from flask_mdict import __version__, init_app, mdict_query2
logger = logging.getLogger(__name__)
def create_app(mdict_dir='content'):
logging.basicConfig(
level=20,
format='%(message)s',
)
mdict_dir = os.path.realpath(mdict_dir)
app = Flask(__name__, template_folder=None, static_folder=None)
app.config['MDICT_DIR'] = mdict_dir
app.config['MDICT_CACHE'] = False
app.config['SECRET_KEY'] = "<KEY>"
app.config['APP_DB'] = os.path.join(mdict_dir, 'flask_mdict.db')
app.config['WFD_DB'] = os.path.join(mdict_dir, 'ecdict_wfd.db')
app.config['INDEX_DIR'] = None
app.config['APP_NAME'] = 'Flask Mdict'
init_app(app, url_prefix='/')
logger.info(' * app db: %s' % app.config['APP_DB'])
wfd_db = app.config['WFD_DB']
if os.path.exists(wfd_db):
logger.info(f' * Word Frequency Database: {wfd_db}"')
else:
logger.error(' * Could not found "Word Frequency Database - {wfd_db}"!')
@app.route('/favicon.ico')
def favicon():
return redirect(url_for('mdict.static', filename='logo.ico'))
return app
| StarcoderdataPython |
1725452 | <reponame>FusionSolutions/python-fssignal
# Builtin modules
from __future__ import annotations
import traceback, signal as _signal
from threading import Event
from time import monotonic, sleep
from typing import Callable, Dict, Any, Iterator, Iterable, Optional, Union, Type
# Third party modules
# Local modules
# Program
class KillSignal(Exception): pass
class SignalIterator(Iterator[Any]):
__slots__ = ("event", "it", "checkDelay", "lastCheck")
event:Event
it:Iterator[Any]
checkDelay:float
lastCheck:float
def __init__(self, event:Event, it:Iterable[Any], checkDelay:float=1.0):
self.event = event
self.it = it.__iter__()
self.checkDelay = checkDelay
self.lastCheck = monotonic()
def __iter__(self) -> Iterator[Any]:
return self
def __next__(self) -> Any:
m = monotonic()
if m-self.lastCheck > self.checkDelay:
self.lastCheck = m
if self.event.is_set():
raise KillSignal
return self.it.__next__()
class BaseSignal:
_force:bool
@classmethod
def get(self) -> bool:
if isinstance(Signal._handler, Signal):
return Signal._handler._get(self._force)
return False
@classmethod
def getSoft(self) -> bool:
if isinstance(Signal._handler, Signal):
return Signal._handler._get(False)
return False
@classmethod
def getHard(self) -> bool:
if isinstance(Signal._handler, Signal):
return Signal._handler._get(True)
return False
@classmethod
def check(self) -> None:
if isinstance(Signal._handler, Signal):
return Signal._handler._check(self._force)
@classmethod
def checkSoft(self) -> None:
if isinstance(Signal._handler, Signal):
return Signal._handler._check(False)
@classmethod
def checkHard(self) -> None:
if isinstance(Signal._handler, Signal):
return Signal._handler._check(True)
@classmethod
def sleep(self, seconds:Union[int, float], raiseOnKill:bool=False) -> None:
if isinstance(Signal._handler, Signal):
return Signal._handler._sleep(seconds, raiseOnKill, self._force)
return sleep(seconds)
@classmethod
def signalSoftKill(self, *args:Any, **kwargs:Any) -> None:
if isinstance(Signal._handler, Signal):
return Signal._handler._signalSoftKill(*args, **kwargs)
@classmethod
def signalHardKill(self, *args:Any, **kwargs:Any) -> None:
if isinstance(Signal._handler, Signal):
return Signal._handler._signalHardKill(*args, **kwargs)
@classmethod
def iter(self, it:Iterable[Any], checkDelay:float=1.0) -> Iterable[Any]:
if isinstance(Signal._handler, Signal):
return Signal._handler._iter(it, checkDelay, self._force)
return it
@classmethod
def softKill(self) -> None:
if isinstance(Signal._handler, Signal):
return Signal._handler._softKill()
@classmethod
def hardKill(self) -> None:
if isinstance(Signal._handler, Signal):
return Signal._handler._hardKill()
@classmethod
def reset(self) -> None:
if isinstance(Signal._handler, Signal):
return Signal._handler._reset()
@classmethod
def getSoftSignal(self) -> Type[BaseSignal]:
return SoftSignal
@classmethod
def getHardSignal(self) -> Type[BaseSignal]:
return HardSignal
@classmethod
def isActivated(self) -> bool:
return isinstance(Signal._handler, Signal)
class SoftSignal(BaseSignal):
_force:bool = False
class HardSignal(BaseSignal):
_force:bool = True
class Signal(HardSignal):
_handler:Optional[Signal] = None
softKillFn:Optional[Callable[..., Any]]
hardKillFn:Optional[Callable[..., Any]]
forceKillCounterFn:Optional[Callable[[int, int], Any]]
counter:int
forceCounter:int
eSoft:Event
eHard:Event
def __init__(self, softKillFn:Optional[Callable[..., Any]]=None, hardKillFn:Optional[Callable[..., Any]]=None,
forceKillCounterFn:Optional[Callable[[int, int], Any]]=None, forceCounter:int=10):
self.softKillFn = softKillFn
self.hardKillFn = hardKillFn
self.forceKillCounterFn = forceKillCounterFn
self.counter = 0
self.forceCounter = forceCounter
self.eSoft = Event()
self.eHard = Event()
Signal._handler = self
self._activate()
def __getstate__(self) -> Dict[str, Any]:
return {
"softKillFn":self.softKillFn,
"hardKillFn":self.hardKillFn,
"forceCounter":self.forceCounter,
"forceKillCounterFn":self.forceKillCounterFn,
"eSoft":self.eSoft,
"eHard":self.eHard,
}
def __setstate__(self, states:Dict[str, Any]) -> None:
self.softKillFn = states["softKillFn"]
self.hardKillFn = states["hardKillFn"]
self.forceCounter = states["forceCounter"]
self.forceKillCounterFn = states["forceKillCounterFn"]
self.eSoft = states["eSoft"]
self.eHard = states["eHard"]
self._activate()
def _activate(self) -> None:
_signal.signal(_signal.SIGINT, Signal.signalSoftKill)
_signal.signal(_signal.SIGTERM, Signal.signalHardKill)
def _get(self, force:bool=True) -> bool:
if force:
return self.eHard.is_set()
return self.eSoft.is_set()
def _check(self, force:bool=True) -> None:
if (force and self.eHard.is_set()) or (not force and self.eSoft.is_set()):
raise KillSignal
return None
def _sleep(self, seconds:Union[int, float], raiseOnKill:bool=False, force:bool=True) -> None:
if (self.eHard if force else self.eSoft).wait(float(seconds)) and raiseOnKill:
raise KillSignal
return None
def _iter(self, it:Iterable[Any], checkDelay:float=1.0, force:bool=True) -> Iterator[Any]:
return SignalIterator(self.eHard if force else self.eSoft, it, checkDelay)
def _signalSoftKill(self, *args:Any, **kwargs:Any) -> None:
self._softKill()
if not self.eHard.is_set():
self.counter += 1
if callable(self.forceKillCounterFn):
try:
self.forceKillCounterFn(self.counter, self.forceCounter)
except:
traceback.print_exc()
if self.counter >= self.forceCounter:
self._hardKill()
def _signalHardKill(self, *args:Any, **kwargs:Any) -> None:
self._softKill()
self._hardKill()
def _softKill(self) -> None:
if not self.eSoft.is_set():
self.eSoft.set()
if callable(self.softKillFn):
try:
self.softKillFn()
except:
traceback.print_exc()
def _hardKill(self) -> None:
if not self.eHard.is_set():
self.eHard.set()
if callable(self.hardKillFn):
try:
self.hardKillFn()
except:
traceback.print_exc()
def _reset(self) -> None:
self.eSoft.clear()
self.eHard.clear()
self.counter = 0
T_Signal = Union[Signal, Type[BaseSignal]]
| StarcoderdataPython |
3330176 | <reponame>GolemZ2K/SimpleNTP
#!/usr/bin/env python3
#coding:utf-8
'''
Simple time synchronizer, to synchronization time in a lan.
usage: python3 sntp.py [-h] [-s] [-i interval]
SimpleNtp server and client
optional arguments:
-h, --help show this help message and exit
-s run as ntp server
-i interval interval of broadcasting (default: 5 seconds)
'''
import socket, traceback, json
import time, datetime, os, sys
import argparse
import ipaddress
import socket
import threading
ip = socket.gethostbyname(socket.gethostname())
net = ipaddress.IPv4Network(ip + '/24', False)
ADDR = net.broadcast_address.compressed # or just '192.168.1.255'
PORT = 6666
TIME_FORMAT = '%(year)s-%(month)s-%(day)s %(hour)s:%(minute)s:%(second)s.%(microsecond)s'
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
localtimezone = -time.timezone//3600
print('Local timezone:', localtimezone)
isLinux = 'linux' in sys.platform
def procCmdline():
'''command line'''
parser = argparse.ArgumentParser(description='SimpleNtp server and client', add_help=True)
parser.add_argument('-s', action='store_true', default=False, help='run as ntp server')
parser.add_argument('-i', type=int, default=5, metavar='interval', help='interval of broadcasting (default: 5 seconds)')
args = parser.parse_args()
args.i = args.i if args.i >= 1 else 5
return args
def run(args):
if args.s:
server(args)
else:
client(args)
def toTimeDict(curtime):
return {
'year' : curtime.year,
'month' : curtime.month,
#'weekday' :curtime.weekday()+1,
'day' : curtime.day,
'hour' : curtime.hour,
'minute' : curtime.minute,
'second' : curtime.second,
'microsecond' : curtime.microsecond
}
def server(args):
print ("MyNtp Server. ")
print ("press Ctrl + c to stop ")
dest = (ADDR, PORT)
print ('Broadcasting time to %s every %d seconds'%(dest, args.i))
while True:
try:
curtime = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc)
timedict = toTimeDict(curtime)
data = {'time' : timedict}
msg = json.dumps(data)
s.sendto(msg.encode('utf-8'), dest)
curtime = curtime.astimezone(datetime.timezone(datetime.timedelta(hours=localtimezone)))
timedict = toTimeDict(curtime)
timestr = TIME_FORMAT % timedict
print('Broadcasting time: %s'%timestr)
except (KeyboardInterrupt, SystemExit):
return
except:
traceback.print_exc()
try:
time.sleep(args.i)
except (KeyboardInterrupt, SystemExit):
print('')
return
def client(args):
print ("MyNtp Client. ")
print ("press Ctrl + c to stop ")
dest = ('', PORT)
s.bind(dest)
print ('Listening for broadcast at', s.getsockname())
while True:
try:
message, addr = s.recvfrom(8192)
data = json.loads(message.decode('utf-8'))
timedict = data['time']
curtime = datetime.datetime(**timedict).replace(tzinfo=datetime.timezone.utc)
curtime = curtime.astimezone(datetime.timezone(datetime.timedelta(hours=localtimezone)))
timedict = toTimeDict(curtime)
timestr = TIME_FORMAT % timedict
print('Got time: %s from %s'%(timestr, addr))
if isLinux:
os.system("date -s '%s';hwclock -w"%timestr)
else:
timedict['microsecond'] = str(timedict['microsecond'])[:2] #for windows,only get first two number
os.system('time %(hour)s:%(minute)s:%(second)s.%(microsecond)s'%timedict)
os.system('date %(year)s-%(month)s-%(day)s'%timedict)
except (KeyboardInterrupt, SystemExit):
print('')
return
except:
traceback.print_exc()
def main():
args = procCmdline()
t = threading.Thread(target=run, args=(args,))
t.setDaemon(True)
t.start()
while True:
try:
time.sleep(1)
except (KeyboardInterrupt, SystemExit):
print('')
return
if __name__ == '__main__':
main()
| StarcoderdataPython |
1764743 | # Copyright 2019-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from akg.utils import kernel_exec as utils
from tests.common.test_op.ascend import SecondOrder_diag_combine_matrix
from tests.common.gen_random import random_gaussian
def diag_combine_matrix_run(shape, dtype, attrs):
"""
ops run func.
"""
if len(shape) == 1:
shape = shape[0]
mod = utils.op_build_test(SecondOrder_diag_combine_matrix.diag_combine_matrix_1, [shape], [dtype], kernel_name='diag_combine_matrix', attrs=attrs)
exp_output, inputs, output = gen_data1(dtype, shape)
acu_output = utils.mod_launch(mod, (inputs, output), expect=exp_output)
TestCase_Result=np.allclose(acu_output, exp_output, rtol=5e-03, equal_nan=True)
return inputs,acu_output,exp_output,TestCase_Result
else:
print(len(shape))
input_shape = []
input_dtype = []
for val in shape:
input_shape.append(val)
input_dtype.append(dtype)
exp_output, inputs, output = gen_data2(input_dtype, input_shape)
input1, input2 = inputs
mod = utils.op_build_test(SecondOrder_diag_combine_matrix.diag_combine_matrix_2, input_shape, input_dtype, kernel_name='diag_combine_matrix', attrs=attrs)
acu_output = utils.mod_launch(mod, (input1, input2, output), expect=exp_output)
TestCase_Result=np.allclose(acu_output, exp_output, rtol=5e-03, equal_nan=True)
return input1,acu_output,exp_output,TestCase_Result
def gen_data1(dtype, shape):
"""
generate data.
"""
inputs = random_gaussian(shape, miu=1, sigma=10.0).astype(dtype)
batch_dim = shape[1]
batch_size = shape[0]
matrix_dim = batch_size * batch_dim
exp_output = np.zeros((matrix_dim, matrix_dim)).astype(dtype)
for i in range(batch_size):
for j in range(batch_dim):
for k in range(batch_dim):
exp_output[i * batch_dim + j, i * batch_dim + k] = inputs[i, j, k]
output = np.full((matrix_dim,matrix_dim), np.nan, dtype)
return exp_output, inputs, output
def gen_data2(dtype, shape):
"""
generate data.
"""
input_matrix_num = len(dtype)
inputs = []
matrix_dim = 0
for i in range(input_matrix_num):
shape_ = shape[i]
dtype_ = dtype[i]
matrix_dim += shape_[1] * shape_[0]
inputs_tmp = random_gaussian(shape_, miu=1, sigma=10.0).astype(dtype_)
inputs.append(inputs_tmp)
output = np.full((matrix_dim,matrix_dim), np.nan, dtype[0])
dim = 0
exp_output = np.zeros((matrix_dim, matrix_dim)).astype(dtype[0])
for i in range(input_matrix_num):
batch_size = shape[i][0]
batch_dim = shape[i][1]
for j in range(batch_size):
for m in range(batch_dim):
for n in range(batch_dim):
exp_output[dim + j * batch_dim + m, dim + j * batch_dim + n] = inputs[i][j,m,n]
dim += batch_dim * batch_size
return exp_output, inputs, output
| StarcoderdataPython |
3359549 | from .dataset import Dataset
from .repository import Repository, StaticRepository
__all__ = [
'Dataset',
'Repository',
'StaticRepository',
]
| StarcoderdataPython |
4803164 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Check if pickle can serialize CoTeDe's dataset.
This is critical to allow running CoTeDe with multiprocessing.
"""
import pickle
import numpy as np
from cotede.qc import ProfileQC
from data import DummyData
def test_serialize_ProfileQC():
""" Serialize ProfileQC
"""
profile = DummyData()
pqc = ProfileQC(profile)
pqc2 = pickle.loads(pickle.dumps(pqc))
assert sorted(pqc.data.keys()) == sorted(pqc2.data.keys())
for v in pqc.data:
assert np.allclose(pqc[v], pqc2[v])
assert sorted(pqc.attributes.keys()) == sorted(pqc2.attributes.keys())
for v in pqc.attributes:
assert pqc.attributes[v] == pqc2.attributes[v]
assert sorted(pqc.flags.keys()) == sorted(pqc2.flags.keys())
for v in pqc.flags:
for f in pqc.flags[v]:
assert np.allclose(pqc.flags[v][f], pqc2.flags[v][f])
| StarcoderdataPython |
3208446 | <reponame>jasonsbrooks/ARTIST
#!/usr/bin/env python
"""
Train an ngram model
$ python -m ngram.train -o outdir [-t poolsize] [-k key] [-u username] [-p password]
where:
- `outdir` is where the trained models will be saved
- `poolsize` is the number of databases
- `key` is the key to save the models in
- `username` is the database username
- `password` is the database password
"""
import numpy as np
import sys, os, re, music21
from optparse import OptionParser
from multiprocessing import Process
from collections import deque
from sqlalchemy import desc, asc
from db import Song, Track, Note, get_sessions
from ngram_helper import key_transpose_pitch
from exceptions import InvalidKeySignature
NUM_NOTES = 128
class RomanTrainer(object):
"""
A RomanTrainer is the model trainer
1. for a given process / database, and
2. for a given roman numeral.
"""
def __init__(self,p_id,rt_id,counts,options):
"""
Initialize the RomanTrainer
Args:
p_id: process id
rt_id: roman numeral id
counts: counts matrix
options: options passed into script
"""
self.p_id = p_id
self.rt_id = rt_id
self.counts = counts
self.triple = deque()
self.options = options
# assume the user has specified a major key
self.dest_key = (music21.key.Key(options.key).sharps,0)
def transposed_triple(self):
"""
Transpose a triple into the appropriate key
Returns:
int[]: the transposed triple
"""
res = []
notes = list(self.triple)
for note in notes:
src_key = (note.track.key_sig_top,note.track.key_sig_bottom)
res.append(key_transpose_pitch(note.pitch,src_key,self.dest_key))
return res
def train(self,note):
"""
Train this RomanTrained on a given note
Args:
note: the note to train on
"""
self.triple.append(note)
if len(self.triple) > 3:
# remove the old note
old_note = self.triple.popleft()
try:
# increment the matrix, where appropriate
np.add.at(self.counts, tuple(self.transposed_triple()), 1)
except InvalidKeySignature, e:
# remove the bad note, append the old note.
self.triple.pop()
self.triple.appendleft(old_note)
def write(self):
"""
Write the numpy counts matrix out to file.
"""
with open(os.path.join(self.options.outdir,str(self.p_id),str(self.rt_id) + ".npy"), 'w') as outfile:
np.save(outfile, self.counts)
class TrackTrainer(Process):
"""
Separate process to train ngram models, all music sourcing from one database
"""
def __init__(self,p_id,session,options):
"""
Initialize the TrackTrainer process
Args:
p_id: process id
session: the database session to load from
options (dict): options passed to script
"""
Process.__init__(self)
self.session = session
self.options = options
self.rts = []
matrix_size = (NUM_NOTES, NUM_NOTES, NUM_NOTES)
# construct the roman trainers
for i in xrange(7):
rt = RomanTrainer(p_id,i + 1,np.zeros(matrix_size, dtype=np.int16),options)
self.rts.append(rt)
def run(self):
"""
Start the process, training on each track separately
"""
# iterate through all the tracks
for trk in self.session.query(Track).all():
self.train(trk)
# write all the rts
for rt in self.rts:
rt.write()
def train(self,trk):
"""
Train the ngram model on a specific track
Args:
trk: the track on which to train
"""
print os.path.basename(trk.song.title), ":", trk.instr_name
# skip percurssion tracks
regexp = re.compile(r'drum|cymbal', re.IGNORECASE)
if trk.channel == 9 or regexp.search(trk.instr_name) is not None:
# print 'skipped percussion track'
return
# skip bass tracks
regexp = re.compile(r'bass', re.IGNORECASE)
if (trk.channel >= 32 and trk.channel <= 39) or regexp.search(trk.instr_name) is not None:
# print 'skipped bass track'
return
# and through all the notes in a track
for note in trk.notes:
if note.pitch < 0 or note.pitch >= NUM_NOTES:
pass
# train using the appropriate rt
if note.roman:
self.rts[note.roman-1].train(note)
def main():
parser = OptionParser()
parser.add_option("-o", "--outdir", dest="outdir")
parser.add_option("-t", "--poolsize", dest="pool_size", default=8, type="int")
parser.add_option("-k", "--key", dest="key", default="C")
parser.add_option("-u", "--username", dest="db_username", default="postgres")
parser.add_option("-p", "--password", dest="db_password", default="<PASSWORD>")
(options, args) = parser.parse_args()
# make the process output directory if not there already
for p_id in xrange(options.pool_size):
print options.outdir
pt = os.path.join(options.outdir,str(p_id) + "/")
print pt
if not os.path.exists(pt):
os.mkdir(pt)
sessions = get_sessions(options.pool_size,options.db_username,options.db_password)
processes = []
# construct and start the threads
for i in xrange(options.pool_size):
p = TrackTrainer(str(i),sessions[i],options)
processes.append(p)
p.start()
# wait for processes to complete
for p in processes:
p.join()
# construct cumulative counts matrices
matrix_size = (NUM_NOTES, NUM_NOTES, NUM_NOTES)
cumulative_counts = []
for i in xrange(7):
cumulative_counts.append(np.zeros(matrix_size, dtype=np.int16))
for p_id in xrange(options.pool_size):
for rt_id in xrange(7):
with open(os.path.join(options.outdir + "/",str(p_id) + "/",str(rt_id + 1) + ".npy")) as f:
counts = np.load(f)
cumulative_counts[rt_id] = np.add(cumulative_counts[rt_id],counts)
for i in xrange(7):
with open(os.path.join(options.outdir + "/",str(i+1) + ".npy"), "w") as f:
np.save(f,cumulative_counts[i])
if __name__ == '__main__':
main()
| StarcoderdataPython |
1675887 | from lwr.lwr_client.action_mapper import from_dict
def preprocess(job_directory, setup_actions):
for setup_action in setup_actions:
name = setup_action["name"]
input_type = setup_action["type"]
action = from_dict(setup_action["action"])
path = job_directory.calculate_path(name, input_type)
action.write_to_path(path)
__all__ = [preprocess]
| StarcoderdataPython |
1752060 | <filename>tests/test_rfc6962.py
from ctutlz import rfc6962
def test_parse_log_entry_type_0():
tdf = b'\x00\x00'
parse, offset = rfc6962._parse_log_entry_type(tdf)
assert offset == 2
assert parse == {
'tdf': b'\x00\x00',
'val': 0,
}
def test_parse_log_entry_type_1():
tdf = b'\x00\x01'
parse, offset = rfc6962._parse_log_entry_type(tdf)
assert offset == 2
assert parse == {
'tdf': b'\x00\x01',
'val': 1,
}
def test_log_entry_type_0_from_tdf():
tdf = b'\x00\x00anything'
log_entry_type = rfc6962.LogEntryType(arg=tdf)
assert log_entry_type.is_x509_entry is True
assert log_entry_type.is_precert_entry is False
assert log_entry_type.tdf == b'\x00\x00'
assert str(log_entry_type) == 'x509_entry'
assert log_entry_type._parse == {
'tdf': b'\x00\x00',
'val': 0,
}
def test_log_entry_type_0_from_parse():
parse = {
'tdf': b'\x00\x00',
'val': 0,
}
log_entry_type = rfc6962.LogEntryType(arg=parse)
assert log_entry_type.is_x509_entry is True
assert log_entry_type.is_precert_entry is False
assert log_entry_type.tdf == b'\x00\x00'
assert str(log_entry_type) == 'x509_entry'
assert log_entry_type._parse == {
'tdf': b'\x00\x00',
'val': 0,
}
def test_log_entry_type_1_from_tdf():
tdf = b'\x00\x01'
log_entry_type = rfc6962.LogEntryType(arg=tdf)
assert log_entry_type.is_x509_entry is False
assert log_entry_type.is_precert_entry is True
assert log_entry_type.tdf == b'\x00\x01'
assert str(log_entry_type) == 'precert_entry'
assert log_entry_type._parse == {
'tdf': b'\x00\x01',
'val': 1,
}
def test_log_entry_type_1_from_parse():
parse = {
'tdf': b'\x00\x01',
'val': 1,
}
log_entry_type = rfc6962.LogEntryType(arg=parse)
assert log_entry_type.is_x509_entry is False
assert log_entry_type.is_precert_entry is True
assert log_entry_type.tdf == b'\x00\x01'
assert str(log_entry_type) == 'precert_entry'
assert log_entry_type._parse == {
'tdf': b'\x00\x01',
'val': 1,
}
def test_signature_type_0_from_tdf():
tdf = b'\x00\x01\x02\x03\x04\x05\x06\x07\x89'
signature_type = rfc6962.SignatureType(arg=tdf)
assert signature_type.is_certificate_timestamp is True
assert signature_type.is_tree_hash is False
assert signature_type._parse == {
'tdf': b'\x00',
'val': 0,
}
def test_signature_type_0_from_parse():
parse = {
'tdf': b'\x00',
'val': 0,
}
signature_type = rfc6962.SignatureType(arg=parse)
assert signature_type.is_certificate_timestamp is True
assert signature_type.is_tree_hash is False
assert signature_type._parse == {
'tdf': b'\x00',
'val': 0,
}
def test_signature_type_1_from_tdf():
tdf = b'\x01'
signature_type = rfc6962.SignatureType(arg=tdf)
assert signature_type.is_certificate_timestamp is False
assert signature_type.is_tree_hash is True
assert signature_type._parse == {
'tdf': b'\x01',
'val': 1,
}
def test_signature_type_1_from_parse():
parse = {
'tdf': b'\x01',
'val': 1,
}
signature_type = rfc6962.SignatureType(arg=parse)
assert signature_type.is_certificate_timestamp is False
assert signature_type.is_tree_hash is True
assert signature_type._parse == {
'tdf': b'\x01',
'val': 1,
}
def test_version_from_tdf():
tdf = b'\x00anything'
version = rfc6962.Version(tdf)
assert version.is_v1 is True
assert version._parse == {
'tdf': b'\x00',
'val': 0,
}
# invalid version number
invalid_tdf = b'\x10'
version = rfc6962.Version(invalid_tdf)
assert version.is_v1 is False
assert version._parse == {
'tdf': b'\x10',
'val': 16,
}
def test_version_from_parse():
parse = {
'val': 0,
'tdf': b'\x00',
}
version = rfc6962.Version(arg=parse)
assert version.is_v1 is True
assert version._parse == {
'tdf': b'\x00',
'val': 0,
}
def test_SignedCertificateTimestamp_from_tdf():
tdf = (b'\x00\xeeK\xbd\xb7u\xce`\xba\xe1Bi\x1f\xab\xe1\x9ef\xa3\x0f~_\xb0r'
b'\xd8\x83\x00\xc4{\x89z\xa8\xfd\xcb\x00\x00\x01]\xe7\x11\xf5\xf7'
b'\x00\x00\x04\x03\x00F0D\x02 ph\xa0\x08\x96H\xbc\x1b\x11\x0e\xd0'
b'\x98\x02\xa8\xac\xb8\x19-|,\xe5\x0e\x9e\xf8/_&\xf7b\x88\xb4U\x02 X'
b'\xbc\r>jFN\x0e\xda\x0b\x1b\xb5\xc0\x1a\xfd\x90\x91\xb0&\x1b\xdf'
b'\xdc\x02Z\xd4zd\xd7\x80c\x0f\xd5')
sct = rfc6962.SignedCertificateTimestamp(arg=tdf)
assert sct.log_id.tdf == (b'\xeeK\xbd\xb7u\xce`\xba\xe1Bi\x1f\xab\xe1\x9ef'
b'\xa3\x0f~_\xb0r\xd8\x83\x00\xc4{\x89z\xa8\xfd'
b'\xcb')
assert sct.tdf == tdf
| StarcoderdataPython |
1756802 | import pandas as pd
def kinase_rank(heatmap, df1, df2, predictor_idx, predictor_col):
"""
Sort the Jaccard's Index for each kinases from the two compared predictors (index, columns) in given heatmap.
Parameters
----------
heatmap: dataframe
Jaccard's Index matrix
df1, df2: dataframe
prediction of the two compared predictors
predictor_idx: str
predictor represent by the idx of the heatmap
predictor_col: str
predictor represent by the cols of the heatmap
Returns
--------
kin_rank: dataframe
'Kinase': Kinase Name
'number of overlapped kinase'
'Rank in ' + predictor_col: predictor_idx kinase rank in predictor_col
'Rank in ' + predictor_idx: predictor_col kinase rank in predictor_idx
'number of kinase in '+ predictor_col
'number of kinase in '+ predictor_idx
"""
# get list of unique kinases from the compaired predictors
kinases1 = df1['Kinase Name'].drop_duplicates().tolist()
kinases2 = df2['Kinase Name'].drop_duplicates().tolist()
# get the number of kinases predicted by the two compaired predictors
num_idx_kin = len(kinases1)
num_col_kin = len(kinases2)
# get the overlapped kinases
overlap_kin = list(set(kinases1) & set(kinases2))
# get the number of overlapped kinases
num_overlap_kin = len(overlap_kin)
# create a dataframe for the ranking summary
kin_rank = pd.DataFrame(columns=[
'Kinase', # Kinase Name
'number of overlapped kinase', # of the two predictors
'Rank in ' + predictor_col, # predictor_idx kinase rank in predictor_col
'Rank in ' + predictor_idx, # predictor_col kinase rank in predictor_idx
'number of kinase in '+ predictor_col,
'number of kinase in '+ predictor_idx,
]
)
# fill the dataframe columns
kin_rank['Kinase'] = overlap_kin
kin_rank['number of overlapped kinase'] = num_overlap_kin
kin_rank['number of kinase in '+ predictor_col] = num_col_kin
kin_rank['number of kinase in '+ predictor_idx] = num_idx_kin
# iterate through the heatmap indexes
for i in heatmap.index:
# sort values of each row
li = heatmap.loc[i , : ].sort_values(ascending=False)
li = li.reset_index()
li = li.rename(columns={'index': 'kin'})
# if the kinase that represented by the index is present in both compared predictors
if li[i].nunique() != 1:
# iterate through the row,
for index, row in li.iterrows():
# find the same kinase
if li.at[index,'kin'] == i:
# get the position in the sorted list
rank = index+1
# fill the rank in the dataframe
kin_rank.loc[kin_rank.Kinase == i, ['Rank in ' + predictor_col]] = rank
# iterate through the heatmap cols
for j in heatmap.columns:
# sort values of each col
li = heatmap[j].sort_values(ascending=False)
li = li.reset_index()
li = li.rename(columns={'index': 'kin'})
# if the kinase that represented by the col is present in both compared predictors
if li[j].nunique() != 1:
# iterate through the col
for index, row in li.iterrows():
# find the same kinase
if li.at[index,'kin'] == j:
# get the position in the sorted list
rank = index+1
# fill the rank in the dataframe
kin_rank.loc[kin_rank.Kinase == j, ['Rank in ' + predictor_idx]] = rank
print (len(kin_rank.index), 'overlapped kinases \n',
predictor_idx, ': ', num_idx_kin, 'kinases','\n',
predictor_col, ': ',num_col_kin, 'kinases', '\n')
return kin_rank
| StarcoderdataPython |
1737264 | import numpy as np
def gini(labels):
total_size = len(labels)
label_quantities = np.unique(labels, return_counts=True)
sum_of_probs_sq = 0
for num_of_elem in label_quantities[1]:
sum_of_probs_sq += (num_of_elem / total_size) ** 2
return 1 - sum_of_probs_sq
| StarcoderdataPython |
81331 | <filename>src/BeautifulSoupExample.py
from bs4 import BeautifulSoup
import requests
URL_BASE = "http://jarroba.com/"
MAX_PAGES = 20
counter = 0
for i in range(1, MAX_PAGES):
# Construyo la URL
if i > 1:
url = "%spage/%d/" % (URL_BASE, i)
else:
url = URL_BASE
# Realizamos la petición a la web
req = requests.get(url)
# Comprobamos que la petición nos devuelve un Status Code = 200
statusCode = req.status_code
if statusCode == 200:
# Pasamos el contenido HTML de la web a un objeto BeautifulSoup()
html = BeautifulSoup(req.text, "html.parser")
# Obtenemos todos los divs donde estan las entradas
entradas = html.find_all('div', {'class': 'col-md-4 col-xs-12'})
# Recorremos todas las entradas para extraer el título, autor y fecha
for entrada in entradas:
counter += 1
titulo = entrada.find('span', {'class': 'tituloPost'}).getText()
autor = entrada.find('span', {'class': 'autor'}).getText()
fecha = entrada.find('span', {'class': 'fecha'}).getText()
# Imprimo el Título, Autor y Fecha de las entradas
print("%d - %s | %s | %s" % (counter, titulo, autor, fecha))
else:
# Si ya no existe la página y me da un 400
break | StarcoderdataPython |
3307530 | """
python -c "import cyth, doctest; print(doctest.testmod(cyth.cyth_macros))"
"""
from __future__ import absolute_import, division, print_function
import parse
import re
from functools import partial
from .cyth_decorators import macro
def parens(str_):
return '(' + str_ + ')'
def is_constant(expr, known_constants=[]):
if re.match(r'\d+', expr) or expr in known_constants:
return True
return False
def get_slicerange(slice_, dim_name='dim0'):
"""
slice_ = 'start:stop:stride'
"""
tup = slice_.split(':')
if len(tup) == 0:
raise AssertionError('')
if len(tup) == 1:
if is_constant(tup[0]):
slicerange = ('1',)
else:
raise NotImplementedError('custom input')
elif len(tup) == 2:
start = '0' if tup[0] == '' else tup[0]
stop = dim_name if tup[1] == '' else tup[1]
slicerange = (start, stop)
elif len(tup) == 3:
start = '0' if tup[0] == '' else tup[0]
stop = dim_name if tup[1] == '' else tup[1]
stride = '1' if tup[2] == '' else tup[2]
slicerange = (start, stop, stride)
else:
raise AssertionError('??')
return slicerange
def make_gensym_function(suffix='gensym'):
gensym_dict = {}
def gensym(prefix):
number = gensym_dict.get(prefix, 0)
gensym_dict[prefix] = number + 1
return '%s__%s%s' % (prefix, suffix, number)
return gensym
@macro
def numpy_fancy_index_macro(gensym, lines):
return map(partial(numpy_fancy_index_assign, gensym), lines)
def numpy_fancy_index_assign(gensym, line):
"""
Input is gaurenteed to be one numpy array assigning to another.
Still in development. May break for complicated cases.
>>> from cyth.cyth_macros import *
>>> gensym = make_gensym_function()
>>> line = '_iv21s = invVR_mats[:, 1, :, 2:42, ::-1, , 3:, :]'
>>> line = '_iv21s = invVR_mats[:, 1, :]'
>>> block1 = numpy_fancy_index_assign(gensym, '_iv11s = invVR_mats[:, 0, 0]')
>>> block2 = numpy_fancy_index_assign(gensym, '_iv12s = invVR_mats[:, :, 1]')
>>> block3 = numpy_fancy_index_assign(gensym, '_iv21s = invVR_mats[:, 1, :]')
>>> block4 = numpy_fancy_index_assign(gensym, '_iv22s = invVR_mats[:, 1, 1]')
>>> print('\n'.join((block1, block2, block3, block4)))
A shape is a tuple of array dimensions
"""
# Split at assignment
LHS, RHS = line.split('=')
# Parse out the fancy index slice
_parseresult = parse.parse('{arr2}[{fancy_slice}]', RHS.strip())
_fancy_slice = _parseresult.named['fancy_slice']
arr1 = LHS.strip()
arr2 = _parseresult.named['arr2']
# Break fancy slices into individual slices
slice_list = [_.strip() for _ in _fancy_slice.split(',')]
dim_ix_idx_fmt = gensym('dim{ix}_idx')
dim_ix_fmt = gensym('dim{ix}')
defdim_fmt = 'cdef size_t {dim_ix_fmt} = {{arr2}}.shape[{{ix}}]'.format(dim_ix_fmt=dim_ix_fmt)
#alloc_ouput_fmt = 'cdef np.ndarray {arr1} = np.ndarray(({LHS_shape}), {arr2}.dtype)'
alloc_ouput_fmt = '{arr1} = np.ndarray(({LHS_shape}), {arr2}.dtype)'
alloc_ouput_fmt = '{arr1} = np.ndarray(({LHS_shape}), {arr2}.dtype)'
for_fmt = '{indent}for {dim_x} in range({dimsize}):'
assign_index_fmt = '{indent}{arr1}[{LHS_index}] = {arr2}[{RHS_index}]'
RHS_slicerange_list = [get_slicerange(slice_, dim_ix_fmt.format(ix=ix)) for ix, slice_ in enumerate(slice_list)]
LHS_slicerange_list = [slicerange for slicerange in RHS_slicerange_list if
slicerange[0] != '1']
LHS_dimsize_list = [slicerange[0]
if len(slicerange) == 1 else
slicerange[1] + ' - ' + slicerange[0]
for slicerange in LHS_slicerange_list]
RHS_dimsize_list = [slicerange[0]
if len(slicerange) == 1 else
slicerange[1] + ' - ' + slicerange[0]
for slicerange in RHS_slicerange_list ]
#LHS_shape_list = [', '.join(slicerange) for slicerange in LHS_slicerange_list]
LHS_shape = parens(', '.join(LHS_dimsize_list))
#RHS_shape = parens(', '.join(RHS_dimsize_list))
indent = ''
for_list = []
# indexes into the array shapes
LHS_shapex_list = []
RHS_shapex_list = []
# So hacky and special cased
for ix, dimsize in enumerate(RHS_dimsize_list):
rhs_slicerange = RHS_slicerange_list[ix]
dim_x = dim_ix_idx_fmt.format(ix=ix)
if len(rhs_slicerange) == 1:
RHS_shapex_list.append(dimsize)
#LHS_shapex_list.append('0')
else:
LHS_shapex_list.append(dim_x)
RHS_shapex_list.append(dim_x)
for_list.append(for_fmt.format(indent=indent, dim_x=dim_x,
dimsize=dimsize))
indent += ' '
RHS_defdim_list = [defdim_fmt.format(ix=ix, arr2=arr2) for ix in range(len(slice_list))]
# Make index expression
LHS_index = ', '.join(LHS_shapex_list)
RHS_index = ', '.join(RHS_shapex_list)
alloc_output_line = alloc_ouput_fmt.format(
arr1=arr1, arr2=arr2, LHS_shape=LHS_shape)
defdim_assign = '\n'.join(RHS_defdim_list)
for_lines = '\n'.join(for_list)
assign_index_line = assign_index_fmt.format(
indent=indent, arr1=arr1, LHS_index=LHS_index, arr2=arr2, RHS_index=RHS_index)
output_lines = [defdim_assign, alloc_output_line, for_lines, assign_index_line]
output_block = '\n' + '\n'.join(output_lines) + '\n\n'
print(output_block)
return output_block
def numpy_fancy_index_assign1(gensym, line):
"""
Input is gaurenteed to be one numpy array assigning to another.
Still in development. May break for complicated cases.
>>> from cyth.cyth_macros import *
>>> gensym = make_gensym_function()
>>> line = '_iv21s = invVR_mats[:, 1, :]'
>>> block1 = numpy_fancy_index_assign(gensym, '_iv11s = invVR_mats[:, 0, 0]')
>>> block2 = numpy_fancy_index_assign(gensym, '_iv12s = invVR_mats[:, :, 1]')
>>> block3 = numpy_fancy_index_assign(gensym, '_iv21s = invVR_mats[:, 1, :]')
>>> block4 = numpy_fancy_index_assign(gensym, '_iv22s = invVR_mats[:, 1, 1]')
>>> print('\n'.join((block1, block2, block3, block4)))
A shape is a tuple of array dimensions
"""
# Split at assignment
LHS, RHS = line.split('=')
# Parse out the fancy index slice
_parseresult = parse.parse('{arr2}[{fancy_slice}]', RHS.strip())
_fancy_slice = _parseresult.named['fancy_slice']
arr1 = LHS.strip()
arr2 = _parseresult.named['arr2']
# Break fancy slices into individual slices
slice_list = [_.strip() for _ in _fancy_slice.split(',')]
dim_ix_idx_fmt = gensym('dim{ix}_idx')
dim_ix_fmt = gensym('dim{ix}')
defdim_fmt = 'cdef Py_ssize_t {dim_ix_fmt} = {{arr2}}.shape[{{ix}}]'.format(dim_ix_fmt=dim_ix_fmt)
#alloc_ouput_fmt = 'cdef np.ndarray {arr1} = np.ndarray(({LHS_shape}), {arr2}.dtype)'
alloc_ouput_fmt = '{arr1} = np.empty(({LHS_shape}))'
#, {arr2}.dtype
for_fmt = '{indent}for {dim_x} in range({dimsize}):'
assign_index_fmt = '{indent}{arr1}[{LHS_index}] = {arr2}[{RHS_index}]'
RHS_slicerange_list = [get_slicerange(slice_, dim_ix_fmt.format(ix=ix))
for ix, slice_ in enumerate(slice_list)]
LHS_slicerange_list = [slicerange for slicerange in RHS_slicerange_list if
slicerange[0] != '1']
LHS_dimsize_list = [slicerange[0]
if len(slicerange) == 1 else
slicerange[1] + ' - ' + slicerange[0]
for slicerange in LHS_slicerange_list]
RHS_dimsize_list = [slicerange[0]
if len(slicerange) == 1 else
slicerange[1] + ' - ' + slicerange[0]
for slicerange in RHS_slicerange_list ]
#LHS_shape_list = [', '.join(slicerange) for slicerange in LHS_slicerange_list]
LHS_shape = parens(', '.join(LHS_dimsize_list))
#RHS_shape = parens(', '.join(RHS_dimsize_list))
indent = ''
for_list = []
# indexes into the array shapes
LHS_shapex_list = []
RHS_shapex_list = []
# So hacky and special cased
for ix, dimsize in enumerate(RHS_dimsize_list):
rhs_slicerange = RHS_slicerange_list[ix]
dim_x = dim_ix_idx_fmt.format(ix=ix)
if len(rhs_slicerange) == 1:
RHS_shapex_list.append(dimsize)
#LHS_shapex_list.append('0')
else:
LHS_shapex_list.append(dim_x)
RHS_shapex_list.append(dim_x)
for_list.append(for_fmt.format(indent=indent, dim_x=dim_x,
dimsize=dimsize))
indent += ' '
RHS_defdim_list = [defdim_fmt.format(ix=ix, arr2=arr2) for ix in range(len(slice_list))]
# Make index expression
LHS_index = ', '.join(LHS_shapex_list)
RHS_index = ', '.join(RHS_shapex_list)
alloc_output_line = alloc_ouput_fmt.format(
arr1=arr1, arr2=arr2, LHS_shape=LHS_shape)
defdim_assign = '\n'.join(RHS_defdim_list)
for_lines = '\n'.join(for_list)
assign_index_line = assign_index_fmt.format(
indent=indent, arr1=arr1, LHS_index=LHS_index, arr2=arr2, RHS_index=RHS_index)
output_lines = [defdim_assign, alloc_output_line, for_lines, assign_index_line]
output_block = '\n' + '\n'.join(output_lines) + '\n\n'
print(output_block)
return output_block
| StarcoderdataPython |
3325999 | <reponame>z3r0zh0u/pyole
#!/usr/bin/env python
"""
pyvba example: file classification based on file format
currently support files in following format:
* ole format
* openxml format
* mhtml format
* base64 encoded mhtml format
"""
import os
import re
import sys
import time
import zlib
import shutil
import base64
import zipfile
import hashlib
import argparse
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from pyvba import *
def vba_info(filename):
try:
vbafile = VBAFile(filename)
if vbafile.PROJECT != None:
print '###### VBA Project Properties ######\n'
print '[Project Property]'
for key, value in vbafile.PROJECT.Property.iteritems():
print key + ' = ' + value
print '\n[Host Extenders]'
for key, value in vbafile.PROJECT.HostExtenders.iteritems():
print key + ' = ' + value
print '\n[Workspace]'
for key, value in vbafile.PROJECT.Workspace.iteritems():
print key + ' = ' + value
print '\n###### VBA Project Records ######\n'
print '[Information Record]'
SysKind = vbafile.dir.InformationRecord.SysKindRecord.SysKind
if SysKind == 0x00:
print 'SysKind: ' + str(hex(SysKind)) + ' (16-bit Windows Platforms)'
elif SysKind == 0x01:
print 'SysKind: ' + str(hex(SysKind)) + ' (32-bit Windows Platforms)'
elif SysKind == 0x02:
print 'SysKind: ' + str(hex(SysKind)) + ' (Macintosh Platforms)'
elif SysKind == 0x03:
print 'SysKind: ' + str(hex(SysKind)) + ' (64-bit Windows Platforms)'
print 'CodePage: ' + str(hex(vbafile.dir.InformationRecord.CodePageRecord.CodePage))
print 'ProjectName: ' + vbafile.dir.InformationRecord.NameRecord.ProjectName
print 'DocString: ' + vbafile.dir.InformationRecord.DocStringRecord.DocString
print 'HelpFilePath1: ' + vbafile.dir.InformationRecord.HelpFilePathRecord.HelpFile1
print 'HelpFilePath2: ' + vbafile.dir.InformationRecord.HelpFilePathRecord.HelpFile2
print 'HelpContext: ' + str(hex(vbafile.dir.InformationRecord.HelpContextRecord.HelpContext))
print 'MajorVersion: ' + str(hex(vbafile.dir.InformationRecord.VersionRecord.MajorVersion))
print 'MinorVersion: ' + str(hex(vbafile.dir.InformationRecord.VersionRecord.MinorVersion))
print 'Constants: ' + vbafile.dir.InformationRecord.ConstantsRecord.Constants
print '\n[Reference Record]'
for ReferenceRecord in vbafile.dir.ReferencesRecord.ReferenceArray:
if ReferenceRecord[0] is not None:
print 'Name: ' + ReferenceRecord[0].Name
if isinstance(ReferenceRecord[1], ReferenceControlRecord):
print 'Type: ControlRecord'
elif isinstance(ReferenceRecord[1], ReferenceRegisteredRecord):
print 'Type: RegisteredRecord'
print 'Libid: ' + ReferenceRecord[1].Libid
elif isinstance(ReferenceRecord[1], ReferenceProjectRecord):
print 'Type: ProjectRecord'
print 'LibidAbsolute: ' + ReferenceRecord[1].LibidAbsolute
print 'LibidRelative: ' + ReferenceRecord[1].LibidRelative
print 'MajorVersion: ' + str(hex(ReferenceRecord[1].MajorVersion))
print 'MinorVersion: ' + str(hex(ReferenceRecord[1].MinorVersion))
else:
print 'Unknown reference record type.'
print '-------------------------'
print '\n[Module Record]'
print 'ModuleCookie: ' + str(hex(vbafile.dir.ModulesRecord.CookieRecord.Cookie))
for ModuleRecord in vbafile.dir.ModulesRecord.ModuleArray:
print '-------------------------'
print 'ModuleName: ' + ModuleRecord.NameRecord.ModuleName
print 'SizeOfModuleName: ' + str(hex(ModuleRecord.NameRecord.SizeOfModuleName))
print 'ModuleNameUnicode: ' + ModuleRecord.NameUnicodeRecord.ModuleNameUnicode
print 'SizeOfModuleNameUnicode: ' + str(hex(ModuleRecord.NameUnicodeRecord.SizeOfModuleNameUnicode))
print 'StreamName: ' + ModuleRecord.StreamNameRecord.StreamName
print 'DocString: ' + ModuleRecord.DocStringRecord.DocString
print 'TextOffset: ' + str(hex(ModuleRecord.OffsetRecord.TextOffset))
print 'HelpContext: ' + str(hex(ModuleRecord.HelpContextRecord.HelpContext))
print 'Cookie: ' + str(hex(ModuleRecord.CookieRecord.Cookie))
print 'Type: ' + str(hex(ModuleRecord.TypeRecord.Id))
if ModuleRecord.ReadOnlyRecord is not None:
print 'ReadOnly: True'
if ModuleRecord.PrivateRecord is not None:
print 'Private: True'
codepage = 'cp' + str(vbafile.dir.InformationRecord.CodePageRecord.CodePage)
if codepage == 'cp10000':
modulename = ModuleRecord.NameRecord.ModuleName.decode('mac_roman')
else:
modulename = ModuleRecord.NameRecord.ModuleName.decode(codepage)
moduledata = vbafile.OLE.find_object_by_name(modulename)
if moduledata is not None and len(moduledata) > ModuleRecord.OffsetRecord.TextOffset:
code = moduledata[ModuleRecord.OffsetRecord.TextOffset:]
print 'SourceCodeSize:', str(hex(len(code)))
code = vbafile._decompress(code)
print 'SourceCode:'
print code
except Exception as e:
print e
return False
def find_unique_vba(file_list):
vba_list = list()
vba_hash_list = list()
cookie_list = list()
for filename in file_list:
ole_file = extract_ole_file(filename)
if ole_file is not None:
try:
ole_hash = hashlib.md5(open(ole_file, 'rb').read()).hexdigest()
if ole_hash not in vba_hash_list:
vba_hash_list.append(ole_hash)
vbafile = VBAFile(ole_file)
cookie = str(vbafile.dir.ModulesRecord.CookieRecord.Cookie)
if cookie not in cookie_list:
cookie_list.append(cookie)
vba_list.append(filename)
except Exception as e:
print filename + ': ' + str(e)
if ole_file[0x00:0x07] == 'tmpole_':
os.remove(ole_file)
return vba_list
def parse_vba_info(file_list, unique):
if unique:
vba_list = find_unique_vba(file_list)
print 'Unique VBA: ' + str(len(vba_list))
else:
vba_list = file_list
print 'Total VBA: ' + str(len(vba_list))
i = 0
for filename in vba_list:
print '<---------- VBA #' + str(i) + ' ---------->'
print 'File: ' + filename + '\n'
ole_file = extract_ole_file(filename)
if ole_file is not None:
vba_info(ole_file)
if ole_file[0x00:0x07] == 'tmpole_':
os.remove(ole_file)
else:
print 'Unsupport file format.'
i += 1
def classify_files(filedir):
file_lists = dict()
ole_list = list()
openxml_list = list()
mhtml_list = list()
base64_list = list()
other_list = list()
for root, dirs, files in os.walk(filedir):
for file in files:
filename = os.path.join(root, file)
data = open(filename, 'rb').read()
if data[0x00:0x08] == '\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1':
ole_list.append(filename)
continue
if data[0x00:0x04] == '\x50\x4b\x03\x04':
openxml_list.append(filename)
continue
if data[0x00:0x08] == 'IE1JTUUt':
base64_list.append(filename)
continue
if data.find('MIME-Version') != -1 or \
data.find('<?mso-application progid="Word.Document"?>') != -1:
mhtml_list.append(filename)
continue
other_list.append(filename)
file_lists['ole'] = ole_list
file_lists['openxml'] = openxml_list
file_lists['base64'] = base64_list
file_lists['mhtml'] = mhtml_list
file_lists['other'] = other_list
return file_lists
def mov_unsupported_files(file_list, out_dir):
support_files = list()
for filename in file_list:
ole_file = extract_ole_file(filename)
if ole_file is not None:
try:
vbafile = VBAFile(ole_file)
support_files.append(filename)
except Exception as e:
print filename + ': Unable to parse the VBA structure.'
if False == os.path.isdir(out_dir):
os.makedirs(out_dir)
name = os.path.basename(filename)
newfile = os.path.join(out_dir, name)
shutil.move(filename, newfile)
print 'Unsupported file moved to:', newfile
if ole_file[0x00:0x07] == 'tmpole_':
os.remove(ole_file)
else:
print filename + ': Unsupported file.'
if False == os.path.isdir(out_dir):
os.makedirs(out_dir)
name = os.path.basename(filename)
newfile = os.path.join(out_dir, name)
shutil.move(filename, newfile)
print 'Unsupported file moved to:', newfile
return support_files
def parse_files(filedir, action, vbainfo, unique, move_unsupport):
file_lists = classify_files(filedir)
if action:
out_dir = 'classified_' + time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
if move_unsupport:
unsupport_dir = 'classified_unsupported_' + time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
if len(file_lists['ole']) > 0:
print '######################################'
print 'Files in OLE format:'
print '######################################'
for filename in file_lists['ole']:
name = os.path.basename(filename)
print name
print 'Totle number: ' + str(len(file_lists['ole']))
if move_unsupport:
unsupport_ole_dir = os.path.join(unsupport_dir, 'ole')
support_files = mov_unsupported_files(file_lists['ole'], unsupport_ole_dir)
file_lists['ole'] = support_files
if vbainfo:
parse_vba_info(file_lists['ole'], unique)
if action:
ole_dir = os.path.join(out_dir, 'ole')
os.makedirs(ole_dir)
for filename in file_lists['ole']:
name = os.path.basename(filename)
newfile = os.path.join(ole_dir, name)
if 1 == action:
shutil.copy2(filename, newfile)
elif 2 == action:
shutil.move(filename, newfile)
if len(file_lists['openxml']) > 0:
print '######################################'
print 'Files in OPEN XML format:'
print '######################################'
for filename in file_lists['openxml']:
name = os.path.basename(filename)
print name
print 'Totle number: ' + str(len(file_lists['openxml']))
if move_unsupport:
unsupport_openxml_dir = os.path.join(unsupport_dir, 'openxml')
support_files = mov_unsupported_files(file_lists['openxml'], unsupport_openxml_dir)
file_lists['openxml'] = support_files
if vbainfo:
parse_vba_info(file_lists['openxml'], unique)
if action:
openxml_dir = os.path.join(out_dir, 'openxml')
os.makedirs(openxml_dir)
for filename in file_lists['openxml']:
name = os.path.basename(filename)
newfile = os.path.join(openxml_dir, name)
if 1 == action:
shutil.copy2(filename, newfile)
elif 2 == action:
shutil.move(filename, newfile)
if len(file_lists['mhtml']) > 0:
print '######################################'
print 'Files in MHTML format:'
print '######################################'
for filename in file_lists['mhtml']:
name = os.path.basename(filename)
print name
print 'Totle number: ' + str(len(file_lists['mhtml']))
if move_unsupport:
unsupport_mhtml_dir = os.path.join(unsupport_dir, 'mhtml')
support_files = mov_unsupported_files(file_lists['mhtml'], unsupport_mhtml_dir)
file_lists['mhtml'] = support_files
if vbainfo:
parse_vba_info(file_lists['mhtml'], unique)
if action:
mhtml_dir = os.path.join(out_dir, 'mhtml')
os.makedirs(mhtml_dir)
for filename in file_lists['mhtml']:
name = os.path.basename(filename)
newfile = os.path.join(mhtml_dir, name)
if 1 == action:
shutil.copy2(filename, newfile)
elif 2 == action:
shutil.move(filename, newfile)
if len(file_lists['base64']) > 0:
print '######################################'
print 'Files in base64 encoded MHTML format:'
print '######################################'
for filename in file_lists['base64']:
name = os.path.basename(filename)
print name
print 'Totle number: ' + str(len(file_lists['base64']))
if move_unsupport:
unsupport_b64mhtml_dir = os.path.join(unsupport_dir, 'b64mhtml')
support_files = mov_unsupported_files(file_lists['base64'], unsupport_b64mhtml_dir)
file_lists['base64'] = support_files
if vbainfo:
parse_vba_info(file_lists['base64'], unique)
if action:
b64mhtml_dir = os.path.join(out_dir, 'b64mhtml')
os.makedirs(b64mhtml_dir)
for filename in file_lists['base64']:
name = os.path.basename(filename)
newfile = os.path.join(b64mhtml_dir, name)
if 1 == action:
shutil.copy2(filename, newfile)
elif 2 == action:
shutil.move(filename, newfile)
if len(file_lists['other']) > 0:
print '######################################'
print 'Files in unsupport file format:'
print '######################################'
for filename in file_lists['other']:
name = os.path.basename(filename)
print name
print 'Totle number: ' + str(len(file_lists['other']))
if action:
other_dir = os.path.join(out_dir, 'other')
os.makedirs(other_dir)
for filename in file_lists['other']:
name = os.path.basename(filename)
newfile = os.path.join(other_dir, name)
if 1 == action:
shutil.copy2(filename, newfile)
elif 2 == action:
shutil.move(filename, newfile)
if action:
print '######################################'
if 1 == action:
print 'The classified files are copied to: ' + out_dir
elif 2 == action:
print 'The classified files are moved to: ' + out_dir
def extract_ole_file(filename):
data = open(filename, 'rb').read()
tmp_file = 'tmpole_' + time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
if data[0x00:0x08] == '\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1':
return filename
if data[0x00:0x04] == '\x50\x4b\x03\x04':
try:
zf = zipfile.ZipFile(filename, 'r')
for name in zf.namelist():
if name[-14:] == 'vbaProject.bin':
data = zf.read(name)
open(tmp_file, 'wb').write(data)
return tmp_file
print filename + ': No vbaProject.bin found in zip arachive.'
except Exception as e:
print filename + ': ' + str(e)
return None
if data[0x00:0x08] == 'IE1JTUUt':
m = re.search('IE1JTU[0-9a-zA-Z/+=\x0d\x0a]{1000,}', data)
if m is not None:
b64data = m.group(0)
data = base64.b64decode(b64data)
if data.find('MIME-Version') != -1 or \
data.find('<?mso-application progid="Word.Document"?>') != -1:
m = re.search('Q[\x0d\x0a]*W[\x0d\x0a]*N[\x0d\x0a]*0[\x0d\x0a]*a[\x0d\x0a]*X[0-9a-zA-Z/+=\x0d\x0a\x20]{1000,}', data)
if m is not None:
b64data = m.group(0)
data = base64.b64decode(b64data)
try:
data = zlib.decompress(data[0x32:])
open(tmp_file, 'wb').write(data)
return tmp_file
except Exception as e:
print filename + ': ' + str(e)
return None
if __name__ == '__main__':
init_logging(False)
parser = argparse.ArgumentParser(description='Macro file classification based on file format')
parser.add_argument('directory', action='store', help='path to the sample directory')
parser.add_argument('-vi', '--vba-info', action='store_true', help='extract VBA information')
parser.add_argument('-u', '--unique', action='store_true', help='remove duplicated VBA information')
parser.add_argument('-mu', '--move-unsupport', action='store_true', help='move unsupported files to a separate folder')
group = parser.add_mutually_exclusive_group()
group.add_argument('-c', '--copy', action='store_true', help='copy files in each group to a separate folder')
group.add_argument('-m', '--move', action='store_true', help='move files in each group to a separate folder')
args = parser.parse_args()
action = 0
unique = False
vbainfo = False
if False == os.path.isdir(args.directory):
print 'Invalid directory:', args.directory
exit(0)
if args.vba_info:
vbainfo = True
if vbainfo and args.unique:
unique = True
if args.copy:
action = 1
elif args.move:
action = 2
parse_files(args.directory, action, vbainfo, unique, args.move_unsupport)
| StarcoderdataPython |
3390649 | <filename>level3.py
from star import Star
from levelbase import LevelBase
import jngl
class Level(LevelBase):
def __init__(self):
LevelBase.__init__(self)
self.stars = []
for x in range(170, 770, 160):
for y in range(250, 430, 160):
self.stars.append(Star(x, y, "blue"))
def drawHints(self):
jngl.print("Blue stars are worth 10 points.", 180, 170)
jngl.print("You can press M to turn off the music.", 160, 325) | StarcoderdataPython |
11987 | # Generated by Django 2.2.4 on 2019-08-14 09:13
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("order", "0071_order_gift_cards")]
operations = [
migrations.RenameField(
model_name="order",
old_name="shipping_price_gross",
new_name="shipping_price_gross_amount",
),
migrations.RenameField(
model_name="order",
old_name="shipping_price_net",
new_name="shipping_price_net_amount",
),
migrations.RenameField(
model_name="order", old_name="total_gross", new_name="total_gross_amount"
),
migrations.RenameField(
model_name="order", old_name="total_net", new_name="total_net_amount"
),
migrations.RenameField(
model_name="orderline",
old_name="unit_price_gross",
new_name="unit_price_gross_amount",
),
migrations.RenameField(
model_name="orderline",
old_name="unit_price_net",
new_name="unit_price_net_amount",
),
migrations.AddField(
model_name="order",
name="currency",
field=models.CharField(
default=settings.DEFAULT_CURRENCY,
max_length=settings.DEFAULT_CURRENCY_CODE_LENGTH,
),
),
migrations.AddField(
model_name="orderline",
name="currency",
field=models.CharField(
default=settings.DEFAULT_CURRENCY,
max_length=settings.DEFAULT_CURRENCY_CODE_LENGTH,
),
),
]
| StarcoderdataPython |
4831684 | <gh_stars>1-10
#!/usr/bin/env python3
from olctools.accessoryFunctions.accessoryFunctions import combinetargets, MetadataObject, make_path, \
run_subprocess, SetupLogging
from olctools.databasesetup import enterobase_api_download, get_mlst, get_rmlst
from datetime import datetime
from argparse import ArgumentParser
from subprocess import call
import urllib.request
from glob import glob
import logging
import tarfile
import zipfile
import shutil
import click
import gzip
import ssl
import os
__author__ = 'adamkoziol'
class DatabaseSetup(object):
def cowbat(self):
"""
Run all the methods
"""
logging.info('Beginning COWBAT database downloads')
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'genesippr')):
self.sipprverse_targets(databasepath=self.databasepath)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'coregenome')):
self.cowbat_targets(databasepath=self.databasepath)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'ConFindr')):
self.confindr_targets()
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'rMLST')):
self.rmlst(databasepath=self.databasepath,
credentials=self.credentials)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'mash')):
self.mash(databasepath=self.databasepath)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'MLST')):
if self.enterobase:
self.enterobase_mlst(databasepath=self.databasepath)
self.mlst(databasepath=self.databasepath)
else:
self.mlst(databasepath=self.databasepath,
genera=('Bacillus', 'Campylobacter', 'Cronobacter', 'Escherichia', 'Listeria',
'Salmonella', 'Staphylococcus', 'Vibrio', 'Yersinia'))
# if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'cgMLST')):
# self.enterobase_cgmlst(databasepath=self.databasepath)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'univec')):
self.univec(databasepath=self.databasepath)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'resfinder')):
self.cge_db_downloader(databasepath=self.databasepath,
analysistype='resfinder',
dbname='resfinder_db')
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'virulence')):
self.cge_db_downloader(databasepath=self.databasepath,
analysistype='virulence',
dbname='virulencefinder_db')
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'serosippr')):
self.cge_db_downloader(databasepath=self.databasepath,
analysistype='serosippr',
dbname='serotypefinder_db')
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'pointfinder')):
self.cge_db_downloader(databasepath=self.databasepath,
analysistype='pointfinder',
dbname='pointfinder_db')
# if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'clark')):
# self.clark(databasepath=self.databasepath)
self.download_date()
def sipprverse_full(self):
"""
Run a subset of the methods - only the targets used in the sipprverse are required here
"""
logging.info('Beginning sipprverse full database downloads')
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'genesippr')):
self.sipprverse_targets(databasepath=self.databasepath)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'ConFindr')):
self.confindr_targets()
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'mash')):
self.mash(databasepath=self.databasepath)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'MLST')):
self.mlst(databasepath=self.databasepath)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'rMLST')):
self.rmlst(databasepath=self.databasepath,
credentials=self.credentials)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'resfinder')):
self.cge_db_downloader(databasepath=self.databasepath,
analysistype='resfinder',
dbname='resfinder_db')
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'virulence')):
self.cge_db_downloader(databasepath=self.databasepath,
analysistype='virulence',
dbname='virulencefinder_db')
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'serosippr')):
self.cge_db_downloader(databasepath=self.databasepath,
analysistype='serosippr',
dbname='serotypefinder_db')
def sipprverse_method(self):
"""
Reduced subset again. Only sipprverse, MASH, and confindr targets are required
"""
logging.info('Beginning sipprverse method database downloads')
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'genesippr')):
self.sipprverse_targets(databasepath=self.databasepath)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'ConFindr')):
self.confindr_targets()
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'mash')):
self.mash(databasepath=self.databasepath)
def rmlst_method(self):
"""
Run only the rMLST download
"""
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'rMLST')):
self.rmlst(databasepath=self.databasepath,
credentials=self.credentials)
def sipprverse_targets(self, databasepath, database_name='sipprverse', download_id='18130808'):
"""
Download OLC-specific sipprverse targets
:param databasepath: path to use to save the database
:param database_name: name of current database
:param download_id: figshare identifier of .tar.gz file
"""
self.custom_databases(databasepath=databasepath,
database_name=database_name,
download_id=download_id)
def cowbat_targets(self, databasepath, database_name='COWBAT', download_id='25319129'):
"""
Download OLC-specific COWBAT targets
:param databasepath: path to use to save the database
:param database_name: name of current database
:param download_id: figshare identifier of .tar.gz file
"""
self.custom_databases(databasepath=databasepath,
database_name=database_name,
download_id=download_id)
def confindr_targets(self, database_name='ConFindr'):
"""
Download OLC-specific ConFindr targets
:param database_name: name of current database
"""
logging.info('Downloading ConFindr databases.')
# NOTE: Need ConFindr >= 0.5.0 for this to work.
secret_file = os.path.join(self.credentials, 'secret.txt')
condfindr_download = 'confindr_database_setup -s {secret} -o {output}'\
.format(secret=secret_file,
output=os.path.join(self.databasepath, database_name))
# Use subprocess.call rather than run_subprocess, as there is a prompt for input from the user
call(condfindr_download, shell=True)
@staticmethod
def mlst(databasepath, genera=('Escherichia', 'Salmonella', 'Yersinia',
'Campylobacter', 'Cronobacter', 'Listeria',
'Bacillus', 'Staphylococcus', 'Vibrio')):
"""
Download the necessary up-to-date MLST profiles and alleles from pubmlst
:param databasepath: path to use to save the database
:param genera: default genera for which alleles and profiles should be downloaded
"""
logging.info('Downloading MLST databases from PubMLST')
for genus in genera:
# Create an object to pass to the get_mlst script
args = MetadataObject()
# Populate the object with the necessary attributes
args.genus = genus
args.repository_url = 'http://pubmlst.org/data/dbases.xml'
args.force_scheme_name = False
args.path = os.path.join(databasepath, 'MLST', genus)
# Create the name of the file to be used to determine if the database download and setup was successful
completefile = os.path.join(args.path, 'complete')
# Only download the files if the download was not previously successful
if not os.path.isfile(completefile):
# Run the download
get_mlst.main(args)
# Create and populate the complete.txt file
with open(completefile, 'w') as complete:
complete.write('\n'.join(glob(os.path.join(args.path, '*'))))
def enterobase_mlst(self, databasepath, genera=('Escherichia', 'Salmonella', 'Yersinia')):
"""
Download the necessary up-to-date MLST profiles and alleles from Enterobase
:param databasepath: path to use to save the database
:param genera: default genera for which alleles and profiles should be downloaded
"""
logging.info('Downloading MLST databases from Enterobase')
for genus in genera:
self.enterobase_download(scheme='MLST_Achtman',
databasepath=databasepath,
analysis='MLST',
genus=genus)
def enterobase_download(self, scheme, databasepath, analysis, genus):
"""
Download the appropriate scheme (MLST_Achtman/cgMLST) for the requested genus
:param scheme: Scheme to download
:param databasepath: path to use to save the database
:param analysis: MLST or cgMLST
:param genus: Genus for which alleles and profiles should be downloaded
"""
enterobase_genus = self.genus_dict[genus]
enterobase_cmd = 'python -m olctools.databasesetup.enterobase_api_download -o {eg} -s {scheme} -d {db}' \
.format(eg=enterobase_genus,
scheme=scheme,
db=databasepath)
if not os.path.isdir(os.path.join(databasepath, analysis, genus)):
#call(enterobase_cmd, shell=True)
out, err = run_subprocess(enterobase_cmd)
print(out, err)
@staticmethod
def rmlst(databasepath, credentials):
"""
Get the most up-to-date profiles and alleles from pubmlst. Note that you will need the necessary access token
and secret for this to work
:param databasepath: path to use to save the database
:param credentials: path to folder containing accessory token and secret.txt files
"""
logging.info('Downloading rMLST database')
# Set the name of the file to be used to determine if the database download and set-up was successful
completefile = os.path.join(databasepath, 'rMLST', 'complete')
if not os.path.isfile(completefile):
# Create an object to send to the rMLST download script
args = MetadataObject()
# Add the path and start time attributes
args.path = databasepath
args.logging = logging
args.credentials = credentials
# Run the rMLST download
get_rmlst.Get(args)
# Create and populate the complete.txt file
with open(completefile, 'w') as complete:
complete.write('\n'.join(glob(os.path.join(databasepath, 'rMLST', '*'))))
def enterobase_cgmlst(self, databasepath, genera=('Escherichia', 'Yersinia')):
"""
Download the necessary up-to-date cgMLST profiles and alleles from Enterobase
:param databasepath: path to use to save the database
:param genera: default genera for which alleles and profiles should be downloaded
"""
logging.info('Downloading cMLST databases from Enterobase')
for genus in genera:
self.enterobase_download(scheme='cgMLST',
databasepath=databasepath,
analysis='cgMLST',
genus=genus)
def clark(self, databasepath):
"""
Download and set-up the CLARK database using the set_targets.sh script. Use defaults of bacteria for database
type, and species for taxonomic level
:param databasepath: path to use to save the database
"""
# Determine the location of the CLARK scripts
self.clarkpath = os.path.dirname(shutil.which('CLARK'))
if self.clarkpath is not None:
self.clarkpath = os.path.join(self.clarkpath)
if self.clarkpath:
logging.info('Downloading CLARK database')
# Create the folder in which the database is to be stored
databasepath = self.create_database_folder(databasepath, 'clark')
# Set the call to create the database - use the --light option, as we don't require the full database
targetcall = 'cd {clarkpath} && ../opt/clark/set_targets.sh {dbpath} bacteria --species --light'\
.format(clarkpath=self.clarkpath,
dbpath=databasepath)
# Download the database
self.database_clone(targetcall, databasepath)
else:
logging.warning('No CLARK scripts detected in $PATH. Cannot download database.')
def mash(self, databasepath):
"""
Download the pre-computed sketch of the RefSeq database, and compress it with gzip
:param databasepath: path to use to save the database
"""
logging.info('Downloading pre-computed RefSeq MASH sketches')
# Create the folder in which the database is to be stored
databasepath = self.create_database_folder(databasepath=databasepath,
database='mash')
output_file = os.path.join(databasepath, 'assembly_summary_refseq.txt')
# Download the assembly summary RefSeq document
if not os.path.isfile(output_file):
self.database_download(output_file=output_file,
database_path=databasepath,
target_url='ftp://ftp.ncbi.nih.gov/genomes/ASSEMBLY_REPORTS/'
'assembly_summary_refseq.txt'
)
# Set the call to create the database
output_file = os.path.join(databasepath, 'RefSeqSketchesDefaults.msh')
# Download the database
if not os.path.isfile(output_file):
self.database_download(output_file=output_file,
database_path=databasepath,
target_url='https://obj.umiacs.umd.edu/marbl_publications/mash/refseq.genomes.k21s1000.msh',
complete=True)
def univec(self, databasepath):
"""
Download the UniVec core database
:param databasepath: path to use to save the database
"""
logging.info('Downloading univec database')
databasepath = self.create_database_folder(databasepath, 'univec')
# Set the name of the output file
outputfile = os.path.join(databasepath, 'UniVec_core.tfa')
target_url = 'ftp://ftp.ncbi.nlm.nih.gov/pub/UniVec/UniVec_Core'
self.database_download(output_file=outputfile,
target_url=target_url,
database_path=databasepath)
# Create a copy of the file with a .fasta extension
if os.path.isfile(outputfile):
renamed = os.path.splitext(outputfile)[0] + '.fasta'
shutil.copy(outputfile, renamed)
@staticmethod
def url_request(target_url, output_file, context):
"""
Use urllib to download the requested file from the target URL. Use the click progress bar to print download
progress
:param target_url: URL from which the file is to be downloaded
:param output_file: Name and path of local copy of file
:param context: ssl.create_default_context() with check_hostname set to False, and verify_mode
set to ssl.CERT_NONE
"""
# Create the request
request = urllib.request.urlopen(target_url, context=context)
# Open the destination file to write
with open(output_file, 'wb') as targets:
# Calculate the total file size - will be used by the progress bar
total_length = int(request.headers.get('content-length'))
# Create a click progress bar using the total length calculated above
with click.progressbar(length=total_length,
label='Downloading files') as bar:
while True:
# Break up the download into chunks of 4096 bytes
data = request.read(4096)
# Break the loop when the download finishes/errors
if not data:
break
# Write the chunk to file
targets.write(data)
# Update the progress bar
bar.update(len(data))
def custom_databases(self, databasepath, database_name, download_id, f_type='files', post_id=None,
compression='tar', nested=False, complete=False):
"""
Download and extract a .tar.gz file from figshare
:param databasepath: Name and path of where the database files are to be downloaded
:param database_name: Name of the database e.g. sipprverse
:param download_id: Figshare ID of the targets file
:param f_type: STR MOB-suite databases have the 'articles' keyword in the figshare URL, while OLC databases
all have the 'files' keyword
:param post_id: STR MOB-suite databases have 'versions/1' appended at the end of the figshare URL.
:param compression: STR MOB-suite databases are .zip files, while OLC databases are .tar.gz
:param nested: Boolean of whether the targets file has nested folders that must be accounted for
:param complete: Boolean of whether the completefile should be completed
"""
logging.info('Downloading {} databases'.format(database_name))
# ConFindr has a nested 'databases' folder
if nested:
databasepath = os.path.join(databasepath, database_name)
# Set the name and path of the file that is created when the download is successful
completefile = os.path.join(databasepath, 'complete')
# Create the database folder if necessary
make_path(databasepath)
# Set the name of the targets file
tar_file = os.path.join(databasepath, download_id)
# Create the target download call
target_url = 'https://ndownloader.figshare.com/{type}/{id}'.format(type=f_type,
id=download_id)
if post_id:
target_url += '/{post}'.format(post=post_id)
logging.debug(target_url)
if not os.path.isfile(completefile):
self.url_request(target_url=target_url,
output_file=tar_file,
context=self.context)
# Decompress the file
self.decompress(databasepath=databasepath,
database_name=database_name,
compression=compression,
compressed_file=tar_file)
# Create the completefile
if complete:
with open(completefile, 'w') as complete:
complete.write('')
@staticmethod
def decompress(databasepath, database_name, compression, compressed_file):
"""
Decompress the provided file using the appropriate library
:param databasepath: Name and path of where the database files are to be downloaded
:param database_name: Name of the database e.g. sipprverse
:param compression: STR MOB-suite databases are .zip files, while OLC databases are .tar.gz
:param compressed_file: Compressed file to process
"""
# Extract the databases from the archives
if os.path.isfile(compressed_file):
if compression == 'tar':
logging.info('Extracting {dbname} from archives'.format(dbname=database_name))
with tarfile.open(compressed_file, 'r') as tar:
# Decompress the archive
tar.extractall(path=databasepath)
elif compression == 'gz':
with gzip.open(compressed_file, 'rb') as gz:
file_name = os.path.basename(os.path.splitext(compressed_file)[0])
output_file = os.path.join(databasepath,
database_name,
file_name)
logging.info('Extracting {file_name} from archives'.format(file_name=file_name))
with open(output_file, 'wb') as output:
shutil.copyfileobj(gz, output)
else:
logging.info('Extracting {dbname} from archives'.format(dbname=database_name))
with zipfile.ZipFile(compressed_file, 'r') as zip_file:
zip_file.extractall(path=databasepath)
# Delete the archive file
os.remove(compressed_file)
def cge_db_downloader(self, databasepath, analysistype, dbname, extension_in='fsa', extension_out='tfa'):
"""
Clones CGE databases into appropriate folder. Creates properly formatted file with non-redundant sequences
:param databasepath: path to use to save the database
:param analysistype: The name of the database folder to create
:param dbname: The name of the database repository on bitbucket
:param extension_in: The file extension of the FASTA files in the database
:param extension_out: The desired extension for the FASTA files
"""
logging.info('Downloading {} database'.format(analysistype))
if analysistype == 'serosippr':
databasepath = os.path.join(databasepath, analysistype, 'Escherichia')
else:
databasepath = os.path.join(databasepath, analysistype)
targetcall = 'git clone https://bitbucket.org/genomicepidemiology/{db}.git {atype}'\
.format(db=dbname,
atype=databasepath)
# Download the database
self.database_clone(targetcall, databasepath)
# Create a variable to use in creating the combined targets file
extension = extension_in
# If the extension_out is different than extension_in, rename the files to have the appropriate extension
if extension_in != extension_out:
# Create a list of all the FASTA files with the input extension
fastafiles = glob(os.path.join(databasepath, '*.{ext}'.format(ext=extension_in)))
for fasta in fastafiles:
# Split the extension
filename = os.path.splitext(fasta)[0]
# Rename the files
os.rename(fasta, '{fn}.{ex}'.format(fn=filename,
ex=extension_out))
# Update the variable to use when creating the combined targets file
extension = extension_out
# Create the combined targets file to use in the OLC pipelines
if not os.path.isfile(os.path.join(databasepath, 'combinedtargets.fasta')):
# Create the combinedtargets.fasta file - this will combine all the FASTA files in the downloaded database
# into a properly-formatted, non-redundant FASTA database
databasefiles = glob(os.path.join(databasepath, '*.{ext}'.format(ext=extension)))
combinetargets(databasefiles, databasepath)
@staticmethod
def create_database_folder(databasepath, database):
"""
Create an appropriately named folder in which the database is to be stored
:param databasepath: path to use to save the database
:param database: the name of the database folder to create
:return: the absolute path of the folder
"""
logging.info('Setting up {} database'.format(database))
# Define the path to store the database files
databasepath = os.path.join(databasepath, database)
# Create the path as required
make_path(databasepath)
return databasepath
def database_download(self, output_file, target_url, database_path, complete=False):
"""
Check to see if the download has previously been completed. Run the download if necessary. Create the
completefile if required
:param output_file: Name and path of local copy of downloaded target
:param target_url: URL of the target to download
:param database_path: Path on the local filesystem in which the file is to be downloaded
:param complete: Boolean to determine whether a completefile should be created
"""
# Create a file to store the logs; it will be used to determine if the database was downloaded and set-up
completefile = os.path.join(database_path, 'complete')
if not os.path.isfile(completefile):
self.url_request(target_url=target_url,
output_file=output_file,
context=self.context)
if complete:
# Create the completefile
with open(completefile, 'w') as complete:
complete.write('')
@staticmethod
def database_clone(targetcall, databasepath, complete=False):
"""
Checks to see if the database has already been downloaded. If not, runs the system call to
download the database, and writes stdout and stderr to the logfile
:param targetcall: system call to download, and possibly set-up the database
:param databasepath: absolute path of the database
:param complete: boolean variable to determine whether the complete file should be created
"""
# Create a file to store the logs; it will be used to determine if the database was downloaded and set-up
completefile = os.path.join(databasepath, 'complete')
# Run the system call if the database is not already downloaded
if not os.path.isfile(completefile):
out, err = run_subprocess(targetcall)
if complete:
# Create the database completeness assessment file and populate it with the out and err streams
with open(completefile, 'w') as complete:
complete.write(out)
complete.write(err)
def download_date(self):
"""
Write the current date to file.
"""
with open(os.path.join(self.databasepath, 'download_date'), 'w') as download:
download.write('{:%Y-%m-%d}'.format(datetime.today()))
def __init__(self, databasepath=None, debug=False, credentials=None, overwrite=False, enterobase=False):
# Initialise the custom logging handler
SetupLogging(debug)
# Create class variables from arguments
if databasepath.startswith('~'):
self.databasepath = os.path.abspath(os.path.expanduser(os.path.join(databasepath)))
else:
self.databasepath = os.path.abspath(os.path.join(databasepath))
make_path(self.databasepath)
assert os.path.isdir(self.databasepath)
if credentials:
if credentials.startswith('~'):
self.credentials = os.path.abspath(os.path.expanduser(os.path.join(credentials)))
else:
self.credentials = os.path.abspath(os.path.join(credentials))
self.overwrite = overwrite
assert type(self.overwrite) is bool, 'Overwrite variable must be a Boolean. You provided "{var}" with ' \
'type {type}'.format(var=self.overwrite,
type=type(self.overwrite))
self.clarkpath = str()
# Enterobase
self.enterobase = enterobase
self.genus_dict = {
'Escherichia': 'ecoli',
'Salmonella': 'senterica',
'Yersinia': 'yersinia'
}
# Create a context to all for disabling SSL verifying
self.context = ssl.create_default_context()
self.context.check_hostname = False
self.context.verify_mode = ssl.CERT_NONE
# If the script is called from the command line, then call the argument parser
if __name__ == '__main__':
# Parser for arguments
parser = ArgumentParser(description='Downloads and sets up required databases')
parser.add_argument('-d', '--databasepath',
required=True,
help='Absolute path to location to store database files. Include any version numbers if '
'required.')
parser.add_argument('-c,', '--credentials',
help='Name and path of folder containing required rMLST credentials.')
parser.add_argument('-o', '--overwrite',
action='store_true',
help='Optionally allow for the overwriting of database files in the databasepath. Defaults to '
'False, so if the output folder exists, that part of the download will be skipped.')
parser.add_argument('-s', '--sipprverse_full',
action='store_true',
help='Optionally only download the databases used in the sipprverse. These include: genesippr, '
'GDCS, sixteenS, ConFindr, MASH, MLST, rMLST, ResFindr, VirulenceFinder, '
'and SerotypeFinder')
parser.add_argument('-m', '--sipprverse_method',
action='store_true',
help='Optionally only download the databases used by the sipprverse method: genesippr, '
'sixteenS, GDCS, MASH, and ConFindr')
parser.add_argument('-e', '--enterobase',
action='store_true',
help='Use Enterobase to download MLST definitions for Escherichia, Salmonella, and Yersinia, '
'as well as cgMLST schemes for Escherichia and Yersinia. Disabled by default')
parser.add_argument('-v', '--verbose',
action='store_true',
help='Option to include debug level logging messages. Default is false')
parser.add_argument('-r', '--rmlst',
action='store_true',
help='Optionally only download the rMLST database')
parser.add_argument('-res', '--resfinder',
action='store_true',
help='Only download the Resfinder database')
# Get the arguments into an object
arguments = parser.parse_args()
# Create an object
pipeline = DatabaseSetup(databasepath=arguments.databasepath,
debug=arguments.verbose,
credentials=arguments.credentials,
overwrite=arguments.overwrite,
enterobase=arguments.enterobase)
# Run the appropriate analyses
if arguments.resfinder:
pipeline.cge_db_downloader(
databasepath=pipeline.databasepath,
analysistype='resfinder',
dbname='resfinder_db'
)
raise SystemExit
if not arguments.credentials:
logging.error('Please provide the name and path of the folder containing your rMLST credentials with the -c '
'argument.')
raise SystemExit
if arguments.sipprverse_full:
pipeline.sipprverse_full()
elif arguments.sipprverse_method:
pipeline.sipprverse_method()
elif arguments.rmlst:
pipeline.rmlst_method()
else:
pipeline.cowbat()
| StarcoderdataPython |
180723 | """Command models to wait for target temperature of a Temperature Module."""
from __future__ import annotations
from typing import Optional, TYPE_CHECKING
from typing_extensions import Literal, Type
from pydantic import BaseModel, Field
from ..command import AbstractCommandImpl, BaseCommand, BaseCommandCreate
if TYPE_CHECKING:
from opentrons.protocol_engine.state import StateView
from opentrons.protocol_engine.execution import EquipmentHandler
AwaitTemperatureCommandType = Literal["temperatureModule/awaitTemperature"]
class AwaitTemperatureParams(BaseModel):
"""Input parameters to wait for a Temperature Module's target temperature."""
moduleId: str = Field(..., description="Unique ID of the Temperature Module.")
class AwaitTemperatureResult(BaseModel):
"""Result data from waiting for a Temperature Module's target temperature."""
class AwaitTemperatureImpl(
AbstractCommandImpl[AwaitTemperatureParams, AwaitTemperatureResult]
):
"""Execution implementation of a Temperature Module's await temperature command."""
def __init__(
self,
state_view: StateView,
equipment: EquipmentHandler,
**unused_dependencies: object,
) -> None:
self._state_view = state_view
self._equipment = equipment
async def execute(self, params: AwaitTemperatureParams) -> AwaitTemperatureResult:
"""Wait for a Temperature Module's target temperature."""
# Allow propagation of ModuleNotLoadedError and WrongModuleTypeError.
module_substate = self._state_view.modules.get_temperature_module_substate(
module_id=params.moduleId
)
# Raises error if no target temperature
target_temp = module_substate.get_plate_target_temperature()
# Allow propagation of ModuleNotAttachedError.
temp_hardware_module = self._equipment.get_module_hardware_api(
module_substate.module_id
)
if temp_hardware_module is not None:
await temp_hardware_module.await_temperature(
awaiting_temperature=target_temp
)
return AwaitTemperatureResult()
class AwaitTemperature(BaseCommand[AwaitTemperatureParams, AwaitTemperatureResult]):
"""A command to set a Temperature Module's target temperature."""
commandType: AwaitTemperatureCommandType = "temperatureModule/awaitTemperature"
params: AwaitTemperatureParams
result: Optional[AwaitTemperatureResult]
_ImplementationCls: Type[AwaitTemperatureImpl] = AwaitTemperatureImpl
class AwaitTemperatureCreate(BaseCommandCreate[AwaitTemperatureParams]):
"""A request to create a Temperature Module's set temperature command."""
commandType: AwaitTemperatureCommandType
params: AwaitTemperatureParams
_CommandCls: Type[AwaitTemperature] = AwaitTemperature
| StarcoderdataPython |
1652892 | nome = str(input('Digite o seu nome: ')).lower().strip()
if nome == 'talyson':
print('Que nome bonito!')
#elif nome == 'andré' or nome =='alves':
# print('Nome muito bonito também!')
elif nome in '<NAME>':
print('Nome daora, parça')
print(f'Tenha um ótimo dia, {nome}!')
| StarcoderdataPython |
94984 | <reponame>prateeksingh0001/FlexNeuART<filename>scripts/py_flexneuart/utils.py<gh_stars>0
"""Misc FlexNeuART utils."""
from jnius import autoclass
JHashMap = autoclass('java.util.HashMap')
def dict_to_hash_map(dict_obj):
"""Convert a Python dictionary to a Java HashMap object. Caution:
values in the dictionary need to be either simple types, or
proper Java object references created through jnius autoclass.
:param dict_obj: a Python dictionary whose values and keys are either simple types
or Java objects creates via jnius autoclass
:return: a Java HashMap
"""
res = JHashMap()
for k, v in dict_obj.items():
res.put(k, v)
return res
| StarcoderdataPython |
3277250 | from utils.db.mongo_orm import *
# 类名定义 collection
class TestReportDetail(Model):
class Meta:
database = db
collection = 'testReportDetail'
# 字段
_id = ObjectIdField() # reportDetailId
reportId = ObjectIdField()
projectId = ObjectIdField()
testSuiteId = ObjectIdField()
testCaseId = ObjectIdField()
resultDetail = DictField()
createAt = DateField()
def __str__(self):
return "reportId:{} - testSuiteId:{} - testCaseId:{}".format(self.reportId, self.testSuiteId, self.testCaseId)
if __name__ == '__main__':
pass
| StarcoderdataPython |
84197 | <reponame>f1uzz/shadow<filename>shadow/__main__.py
from shadow import main
main()
| StarcoderdataPython |
1702544 | <reponame>Epistoteles/Modergator<filename>ocr-api/text_scene_detection_utility.py
from model.craft import CRAFT
import model.craft_utils as craft_utils
import torch
from collections import OrderedDict
import torch.backends.cudnn as cudnn
import time
import numpy as np
import model.imgproc as imgproc
import cv2
from torch.autograd import Variable
from operator import itemgetter
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def build_model():
# load net
net = CRAFT() # initialize
# Load weights
net.load_state_dict(copyStateDict(torch.load('ocr-api/model/craft_mlt_25k.pth', map_location='cpu')))
net = net.to(device)
net = torch.nn.DataParallel(net)
cudnn.benchmark = False
return net
def text_detection(net, image, text_threshold, link_threshold, low_text, cuda, poly, refine_net=None):
t0 = time.time()
# resize
img_resized, target_ratio, size_heatmap = imgproc.resize_aspect_ratio(image, square_size=1280, interpolation=cv2.INTER_LINEAR, mag_ratio=1.5)
ratio_h = ratio_w = 1 / target_ratio
# preprocessing
x = imgproc.normalizeMeanVariance(img_resized)
x = torch.from_numpy(x).permute(2, 0, 1) # [h, w, c] to [c, h, w]
x = Variable(x.unsqueeze(0)) # [c, h, w] to [b, c, h, w]
#if cuda:
# x = x.cuda()
# forward pass
with torch.no_grad():
y, feature = net(x)
# make score and link map
score_text = y[0,:,:,0].cpu().data.numpy()
score_link = y[0,:,:,1].cpu().data.numpy()
# refine link
if refine_net is not None:
with torch.no_grad():
y_refiner = refine_net(y, feature)
score_link = y_refiner[0,:,:,0].cpu().data.numpy()
t0 = time.time() - t0
t1 = time.time()
# Post-processing
boxes, polys = craft_utils.getDetBoxes(score_text, score_link, text_threshold, link_threshold, low_text, poly)
# coordinate adjustment
boxes = craft_utils.adjustResultCoordinates(boxes, ratio_w, ratio_h)
polys = craft_utils.adjustResultCoordinates(polys, ratio_w, ratio_h)
for k in range(len(polys)):
if polys[k] is None: polys[k] = boxes[k]
t1 = time.time() - t1
# render results (optional)
render_img = score_text.copy()
render_img = np.hstack((render_img, score_link))
ret_score_text = imgproc.cvt2HeatmapImg(render_img)
return boxes, polys, ret_score_text
def copyStateDict(state_dict):
if list(state_dict.keys())[0].startswith("module"):
start_idx = 1
else:
start_idx = 0
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = ".".join(k.split(".")[start_idx:])
new_state_dict[name] = v
return new_state_dict
| StarcoderdataPython |
3310007 | <filename>LeetCodeSolutions/python/49_Group_Anagrams.py
class Solution(object):
def groupAnagrams(self, strs):
"""
:type strs: List[str]
:rtype: List[List[str]]
"""
d = {}
for s in sorted(strs):
key = tuple(sorted(s))
d[key] = d.get(key, []) + [s]
return d.values() | StarcoderdataPython |
161285 | <reponame>biothings/mychem.info<filename>src/hub/dataload/sources/unii/unii_upload.py
import os
import glob
from .unii_parser import load_data
from hub.dataload.uploader import BaseDrugUploader
import biothings.hub.dataload.storage as storage
from biothings.hub.datatransform import DataTransformMDB
from hub.datatransform.keylookup import MyChemKeyLookup
SRC_META = {
"url": 'https://fdasis.nlm.nih.gov/srs/',
"license": "public domain",
"license_url" : "https://www.nlm.nih.gov/copyright.html",
"license_url_short": "http://bit.ly/2Pg8Oo9"
}
class UniiUploader(BaseDrugUploader):
name = "unii"
storage_class = storage.IgnoreDuplicatedStorage
__metadata__ = {"src_meta" : SRC_META}
keylookup = MyChemKeyLookup([('inchikey', 'unii.inchikey'),
('pubchem', 'unii.pubchem'),
('unii', 'unii.unii')],
copy_from_doc=True,
)
def load_data(self,data_folder):
self.logger.info("Load data from '%s'" % data_folder)
record_files = glob.glob(os.path.join(data_folder,"*Records*.txt"))
assert len(record_files) == 1, "Expecting one record.txt file, got %s" % repr(record_files)
input_file = record_files.pop()
assert os.path.exists(input_file), "Can't find input file '%s'" % input_file
# disable keylookup - unii is a base collection used for drugname lookup
# and should be loaded first, (keylookup commented out)
# return self.keylookup(load_data)(input_file)
return load_data(input_file)
def post_update_data(self,*args,**kwargs):
for field in ("unii.unii","unii.preferred_term"):
self.logger.info("Indexing '%s'" % field)
self.collection.create_index(field,background=True)
@classmethod
def get_mapping(klass):
mapping = {
"unii": {
"properties": {
"unii": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
'copy_to': ['all'],
},
"preferred_term": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"registry_number": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"ec": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"ncit": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"rxcui": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"itis": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"ncbi": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"plants": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"grin": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"inn_id": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"molecular_formula": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"inchikey": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"smiles": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"unii_type": {
"type": "text"
},
"pubchem": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"mpns": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
}
}
}
}
return mapping
| StarcoderdataPython |
3353516 | import cProfile
from agents.agent_mcts import generate_move
from main import human_vs_agent
cProfile.run(
"human_vs_agent(generate_move, generate_move)", "mmab.dat"
)
import pstats
# from pstats import SortKey
with open("output_time.txt", "w") as f:
p = pstats.Stats("mmab.dat", stream=f)
p.sort_stats("time").print_stats()
with open("output_calls.txt", "w") as f:
p = pstats.Stats("mmab.dat", stream=f)
p.sort_stats("calls").print_stats()
| StarcoderdataPython |
53612 | <gh_stars>0
#!/usr/bin/env python
import sys, os, time, urllib, urllib.request, shutil, re, lxml, threading, queue, multiprocessing
import hashlib
from bs4 import BeautifulSoup
from urllib.parse import urlparse
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.expected_conditions import staleness_of
# 2015Webcast.py gets all the information for 2016 webcasts in the following format:
# name, youtubeUp, sha256, dir, vidlink
# and stores the information into a csv format in a text file
#TODO youtube-Upload: unlisted, no votes, comments disabled... anything else?
#url = "http://matterhorn2-player-1.lt.ucsc.edu:8080/engage/ui/index.html" #2015
# init()
def init():
# 2016 webcast gallery link
root_url = "http://matterhorn-player-1.lt.ucsc.edu:8080/engage/ui/index.html"
# load chrome webdriver
chrome = webdriver.Chrome()
chrome.get(root_url)
return chrome
# Open next page
def next_page(chrome, wait):
try:
# next button clickable
wait.until(EC.element_to_be_clickable((By.LINK_TEXT, 'Next')))
chrome.find_element_by_link_text("Next").click() # click next button
except Exception as e:
print("next_page error:", e, flush=True)
return 1
else:
return 0
def get_webcast_list(src):
webcastlist = []
html = BeautifulSoup(src, 'lxml')
for table in html.find_all('table'):
for a_tag in table.find_all('a'):
if a_tag.text != "":
# adds (title, link) to webcastlist
title = a_tag.text
link = real_link(a_tag.get('href'))
info = (title, link)
webcastlist.append(info)
return webcastlist
def real_link(suffix):
base_url = "http://matterhorn-player-1.lt.ucsc.edu:8080/engage/ui/"
return base_url + suffix
# main
def main(argv):
chrome = init()
# max time out = 60 seconds
wait = WebDriverWait(chrome, 60)
time.sleep(5)
file = open('links.txt', 'a')
while True:
info = get_webcast_list(chrome.page_source)
for title, link in info:
file.write(title + ', ' + link + '\n')
print('writing...\n' + title + ', ' + link + '\n', flush=True)
if next_page(chrome, wait) == 1:
break;
else:
time.sleep(3)
file.close()
chrome.close()
if __name__ == "__main__":
main(sys.argv[1:])
| StarcoderdataPython |
1775803 | <filename>backend/api/auth/serializers.py
from django.db.migrations import serializer
from rest_framework import serializers
from account.models import User
class SerializerUser(serializers.ModelSerializer):
"""
Classe de Serialização do Model Usuario
"""
class Meta:
model = User
exclude = ('password', 'user_permissions', 'groups', 'is_staff', 'is_active', 'is_superuser')
class SerializerUserCreateUpdate(serializers.ModelSerializer):
"""
Classe de Serialização do Model User para API de criação de usuário
"""
password = serializers.CharField(
write_only=True
)
class Meta:
model = User
exclude = ('user_permissions', 'groups', 'is_staff', 'is_active', 'is_superuser')
def update(self, instance, validated_data):
if 'password' in validated_data:
instance.set_password(validated_data['password'])
instance.save()
return instance
return instance
def create(self, validated_data):
user = super(SerializerUserCreateUpdate, self).create(validated_data)
if 'password' in validated_data:
user.set_password(validated_data['password'])
user.save()
return user | StarcoderdataPython |
4832591 | # -*- coding:utf-8 -*-
"""
Simple static page generator.
Uses Jinja2 to compile templates.
"""
from __future__ import absolute_import, print_function
import inspect
import logging
import os
import re
import shutil
import warnings
from jinja2 import Environment, FileSystemLoader
from .reloader import Reloader
def _has_argument(func):
"""Test whether a function expects an argument.
:param func:
The function to be tested for existence of an argument.
"""
if hasattr(inspect, 'signature'):
# New way in python 3.3
sig = inspect.signature(func)
return bool(sig.parameters)
else:
# Old way
return bool(inspect.getargspec(func).args)
class Site(object):
"""The Site object.
:param environment:
A :class:`jinja2.Environment`.
:param searchpath:
A string representing the name of the directory to search for
templates.
:param contexts:
A list of `regex, context` pairs. Each context is either a dictionary
or a function that takes either no argument or or the current template
as its sole argument and returns a dictionary. The regex, if matched
against a filename, will cause the context to be used.
:param rules:
A list of `regex, function` pairs used to override template
compilation. `regex` must be a regex which if matched against a
filename will cause `function` to be used instead of the default.
`function` must be a function which takes a Jinja2 Environment, the
filename, and the context and renders a template.
:param encoding:
The encoding of templates to use.
:param logger:
A logging.Logger object used to log events.
:param staticpaths:
List of directory names to get static files from (relative to
searchpath).
:param mergecontexts:
A boolean value. If set to ``True``, then all matching regex from the
contexts list will be merged (in order) to get the final context.
Otherwise, only the first matching regex is used. Defaults to
``False``.
"""
def __init__(self,
environment,
searchpath,
outpath,
encoding,
logger,
contexts=None,
rules=None,
staticpaths=None,
mergecontexts=False,
):
self._env = environment
self.searchpath = searchpath
self.outpath = outpath
self.encoding = encoding
self.logger = logger
self.contexts = contexts or []
self.rules = rules or []
self.staticpaths = staticpaths
self.mergecontexts = mergecontexts
@property
def template_names(self):
return self._env.list_templates(filter_func=self.is_template)
@property
def templates(self):
"""Generator for templates."""
for template_name in self.template_names:
yield self.get_template(template_name)
@property
def static_names(self):
return self._env.list_templates(filter_func=self.is_static)
def get_template(self, template_name):
"""Get a :class:`jinja2.Template` from the environment.
:param template_name: A string representing the name of the template.
"""
return self._env.get_template(template_name)
def get_context(self, template):
"""Get the context for a template.
If no matching value is found, an empty context is returned.
Otherwise, this returns either the matching value if the value is
dictionary-like or the dictionary returned by calling it with
*template* if the value is a function.
If several matching values are found, the resulting dictionaries will
be merged before being returned if mergecontexts is True. Otherwise,
only the first matching value is returned.
:param template: the template to get the context for
"""
context = {}
for regex, context_generator in self.contexts:
if re.match(regex, template.name):
if inspect.isfunction(context_generator):
if _has_argument(context_generator):
context.update(context_generator(template))
else:
context.update(context_generator())
else:
context.update(context_generator)
if not self.mergecontexts:
break
return context
def get_rule(self, template_name):
"""Find a matching compilation rule for a function.
Raises a :exc:`ValueError` if no matching rule can be found.
:param template_name: the name of the template
"""
for regex, render_func in self.rules:
if re.match(regex, template_name):
return render_func
raise ValueError("no matching rule")
def is_static(self, filename):
"""Check if a file is a static file (which should be copied, rather
than compiled using Jinja2).
A file is considered static if it lives in any of the directories
specified in ``staticpaths``.
:param filename: the name of the file to check
"""
if self.staticpaths is None:
# We're not using static file support
return False
for path in self.staticpaths:
if filename.startswith(path):
return True
return False
def is_partial(self, filename):
"""Check if a file is a partial.
Partial files are not rendered, but they are used in rendering
templates.
A file is considered a partial if it or any of its parent directories
are prefixed with an ``'_'``.
:param filename: the name of the file to check
"""
return any((x.startswith("_") for x in filename.split(os.path.sep)))
def is_ignored(self, filename):
"""Check if a file is an ignored file.
Ignored files are neither rendered nor used in rendering templates.
A file is considered ignored if it or any of its parent directories
are prefixed with an ``'.'``.
:param filename: the name of the file to check
"""
return any((x.startswith(".") for x in filename.split(os.path.sep)))
def is_template(self, filename):
"""Check if a file is a template.
A file is a considered a template if it is neither a partial nor
ignored.
:param filename: the name of the file to check
"""
if self.is_partial(filename):
return False
if self.is_ignored(filename):
return False
if self.is_static(filename):
return False
return True
def _ensure_dir(self, template_name):
"""Ensure the output directory for a template exists."""
head = os.path.dirname(template_name)
if head:
file_dirpath = os.path.join(self.outpath, head)
if not os.path.exists(file_dirpath):
os.makedirs(file_dirpath)
def render_template(self, template, context=None, filepath=None):
"""Render a single :class:`jinja2.Template` object.
If a Rule matching the template is found, the rendering task is
delegated to the rule.
:param template:
A :class:`jinja2.Template` to render.
:param context:
Optional. A dictionary representing the context to render
*template* with. If no context is provided, :meth:`get_context` is
used to provide a context.
:param filepath:
Optional. A file or file-like object to dump the complete template
stream into. Defaults to to ``os.path.join(self.outpath,
template.name)``.
"""
self.logger.info("Rendering %s..." % template.name)
if context is None:
context = self.get_context(template)
try:
rule = self.get_rule(template.name)
except ValueError:
self._ensure_dir(template.name)
if filepath is None:
filepath = os.path.join(self.outpath, template.name)
template.stream(**context).dump(filepath, self.encoding)
else:
rule(self, template, **context)
def render_templates(self, templates, filepath=None):
"""Render a collection of :class:`jinja2.Template` objects.
:param templates:
A collection of Templates to render.
:param filepath:
Optional. A file or file-like object to dump the complete template
stream into. Defaults to to ``os.path.join(self.outpath,
template.name)``.
"""
for template in templates:
self.render_template(template, filepath)
def copy_static(self, files):
for f in files:
input_location = os.path.join(self.searchpath, f)
output_location = os.path.join(self.outpath, f)
print("Copying %s to %s." % (f, output_location))
self._ensure_dir(f)
shutil.copy2(input_location, output_location)
def get_dependencies(self, filename):
"""Get a list of files that depends on the file named *filename*.
:param filename: the name of the file to find dependencies of
"""
if self.is_partial(filename):
return self.templates
elif self.is_template(filename):
return [self.get_template(filename)]
elif self.is_static(filename):
return [filename]
else:
return []
def render(self, use_reloader=False):
"""Generate the site.
:param use_reloader: if given, reload templates on modification
"""
self.render_templates(self.templates)
self.copy_static(self.static_names)
if use_reloader:
self.logger.info("Watching '%s' for changes..." %
self.searchpath)
self.logger.info("Press Ctrl+C to stop.")
Reloader(self).watch()
def __repr__(self):
return "Site('%s', '%s')" % (self.searchpath, self.outpath)
class Renderer(Site):
def __init__(self, *args, **kwargs):
warnings.warn("Renderer was renamed to Site.")
super(Renderer, Site).__init__(*args, **kwargs)
def run(self, use_reloader=False):
return self.render(use_reloader)
def make_site(searchpath="templates",
outpath=".",
contexts=None,
rules=None,
encoding="utf8",
extensions=None,
staticpaths=None,
filters=None,
env_globals=None,
env_kwargs=None,
mergecontexts=False):
"""Create a :class:`Site <Site>` object.
:param searchpath:
A string representing the absolute path to the directory that the Site
should search to discover templates. Defaults to ``'templates'``.
If a relative path is provided, it will be coerced to an absolute path
by prepending the directory name of the calling module. For example, if
you invoke staticjinja using ``python build.py`` in directory ``/foo``,
then *searchpath* will be ``/foo/templates``.
:param outpath:
A string representing the name of the directory that the Site
should store rendered files in. Defaults to ``'.'``.
:param contexts:
A list of *(regex, context)* pairs. The Site will render templates
whose name match *regex* using *context*. *context* must be either a
dictionary-like object or a function that takes either no arguments or
a single :class:`jinja2.Template` as an argument and returns a
dictionary representing the context. Defaults to ``[]``.
:param rules:
A list of *(regex, function)* pairs. The Site will delegate
rendering to *function* if *regex* matches the name of a template
during rendering. *function* must take a :class:`jinja2.Environment`
object, a filename, and a context as parameters and render the
template. Defaults to ``[]``.
:param encoding:
A string representing the encoding that the Site should use when
rendering templates. Defaults to ``'utf8'``.
:param extensions:
A list of :ref:`Jinja extensions <jinja-extensions>` that the
:class:`jinja2.Environment` should use. Defaults to ``[]``.
:param staticpaths:
List of directories to get static files from (relative to searchpath).
Defaults to ``None``.
:param filters:
A dictionary of Jinja2 filters to add to the Environment.
Defaults to ``{}``.
:param env_globals:
A mapping from variable names that should be available all the time to
their values. Defaults to ``{}``.
:param env_kwargs:
A dictionary that will be passed as keyword arguments to the
jinja2 Environment. Defaults to ``{}``.
:param mergecontexts:
A boolean value. If set to ``True``, then all matching regex from the
contexts list will be merged (in order) to get the final context.
Otherwise, only the first matching regex is used. Defaults to
``False``.
"""
# Coerce search to an absolute path if it is not already
if not os.path.isabs(searchpath):
# TODO: Determine if there is a better way to write do this
calling_module = inspect.getmodule(inspect.stack()[-1][0])
# Absolute path to project
project_path = os.path.realpath(os.path.dirname(
calling_module.__file__))
searchpath = os.path.join(project_path, searchpath)
if env_kwargs is None:
env_kwargs = {}
env_kwargs['loader'] = FileSystemLoader(searchpath=searchpath,
encoding=encoding,
followlinks=True)
env_kwargs.setdefault('extensions', extensions or [])
environment = Environment(**env_kwargs)
if filters:
environment.globals.update(filters)
if env_globals:
environment.globals.update(env_globals)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
return Site(environment,
searchpath=searchpath,
outpath=outpath,
encoding=encoding,
logger=logger,
rules=rules,
contexts=contexts,
staticpaths=staticpaths,
mergecontexts=mergecontexts,
)
def make_renderer(*args, **kwargs):
warnings.warn("make_renderer was renamed to make_site.")
return make_site(*args, **kwargs)
| StarcoderdataPython |
157275 | import os
from pathlib import Path
import torch
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
import logging
from data_aug.utils import set_seeds
from data_aug.models import ResNet18
from data_aug.datasets import get_cifar10
from bnn_priors.third_party.calibration_error import ece
@torch.no_grad()
def test_bma(net, data_loader, samples_dir, nll_criterion=None, device=None):
net.eval()
ens_logits = []
ens_nll = []
for sample_path in tqdm(Path(samples_dir).rglob('*.pt'), leave=False):
net.load_state_dict(torch.load(sample_path))
all_logits = []
all_Y = []
all_nll = torch.tensor(0.0).to(device)
for X, Y in tqdm(data_loader, leave=False):
X, Y = X.to(device), Y.to(device)
_logits = net(X)
all_logits.append(_logits)
all_Y.append(Y)
if nll_criterion is not None:
all_nll += nll_criterion(_logits, Y)
all_logits = torch.cat(all_logits)
all_Y = torch.cat(all_Y)
ens_logits.append(all_logits)
ens_nll.append(all_nll)
ens_logits = torch.stack(ens_logits)
ens_nll = torch.stack(ens_nll)
ce_nll = - torch.distributions.Categorical(logits=ens_logits)\
.log_prob(all_Y).sum(dim=-1).mean(dim=-1)
nll = ens_nll.mean(dim=-1)
prob_pred = ens_logits.softmax(dim=-1).mean(dim=0)
acc = (prob_pred.argmax(dim=-1) == all_Y).sum().item() / all_Y.size(0)
ece_val = ece(all_Y.cpu().numpy(), prob_pred.cpu().numpy(), num_bins=30)
return { 'acc': acc, 'nll': nll, 'ce_nll': ce_nll, 'ece': ece_val }
def main(seed=None, device=0, data_dir=None, samples_dir=None, batch_size=2048):
if data_dir is None and os.environ.get('DATADIR') is not None:
data_dir = os.environ.get('DATADIR')
assert Path(samples_dir).is_dir()
torch.backends.cudnn.benchmark = True
set_seeds(seed)
device = f"cuda:{device}" if (device >= 0 and torch.cuda.is_available()) else "cpu"
train_data, test_data = get_cifar10(root=data_dir, seed=seed, augment=False)
train_loader = DataLoader(train_data, batch_size=batch_size, num_workers=2)
test_loader = DataLoader(test_data, batch_size=batch_size, num_workers=2)
net = ResNet18(num_classes=10).to(device).eval()
train_metrics = test_bma(net, train_loader, samples_dir, device=device)
train_metrics = { f'train/{k}': v for k, v in train_metrics.items() }
test_metrics = test_bma(net, test_loader, samples_dir, device=device)
test_metrics = { f'test/{k}': v for k, v in test_metrics.items() }
logging.info(train_metrics)
logging.info(test_metrics)
return train_metrics, test_metrics
def main_sweep(sweep_dir=None):
import yaml
import pickle
results = []
for d in tqdm(os.listdir(sweep_dir)):
samples_dir = Path(sweep_dir) / d / 'samples'
if not samples_dir.is_dir():
continue
logging.info(f'{samples_dir}')
config_file = Path(sweep_dir) / d / 'config.yaml'
with open(config_file, 'r') as f:
config = yaml.safe_load(f)
config['run_id'] = d
train_metrics, test_metrics = main(samples_dir=samples_dir)
results.append({ **config, **train_metrics, **test_metrics })
with open(f'{sweep_dir.split("/")[-1]}.pkl', 'wb') as f:
pickle.dump(results, f)
if __name__ == '__main__':
import fire
logging.getLogger().setLevel(logging.INFO)
fire.Fire(dict(run=main, sweep=main_sweep))
| StarcoderdataPython |
3879 | <filename>src/cms/forms/languages/language_form.py
from django import forms
from ...models import Language
class LanguageForm(forms.ModelForm):
"""
Form for creating and modifying language objects
"""
class Meta:
model = Language
fields = [
"code",
"english_name",
"native_name",
"text_direction",
]
| StarcoderdataPython |
187093 | <reponame>Ali-Parandeh/Data_Science_Playground
# Create database engine for data.db
engine = create_engine('sqlite:///data.db')
# Write query to get date, tmax, and tmin from weather
query = """
SELECT date,
tmax,
tmin
FROM weather;
"""
# Make a data frame by passing query and engine to read_sql()
temperatures = pd.read_sql(query, engine)
# View the resulting data frame
print(temperatures)
'''
script.py> output:
date tmax tmin
0 12/01/2017 52 42
1 12/02/2017 48 39
2 12/03/2017 48 42
3 12/04/2017 51 40
...
119 03/30/2018 62 44
120 03/31/2018 58 39
[121 rows x 3 columns]
Selecting columns is useful when you only want a few columns from a table.
If you want most of the columns, it may be easier to load them all and then use pandas to drop unwanted columns.
''' | StarcoderdataPython |
48454 | # -*- coding: utf-8 -*-
# Generated by Django 1.11a1 on 2017-05-11 08:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='allBook',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('book_id', models.BigIntegerField()),
('ISBN', models.CharField(max_length=20)),
('name', models.CharField(max_length=20)),
('price', models.IntegerField()),
],
),
migrations.CreateModel(
name='favor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.BigIntegerField()),
('book_id', models.BigIntegerField()),
],
),
migrations.CreateModel(
name='message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.TimeField()),
('date', models.DateField(auto_now_add=True)),
('book_id', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='student_users',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.BigIntegerField(unique=True)),
('user_name', models.CharField(max_length=20)),
('name', models.CharField(max_length=20)),
('phone_number', models.CharField(max_length=15)),
('mail', models.EmailField(max_length=254)),
],
),
migrations.CreateModel(
name='subscribeBooks',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.BigIntegerField()),
('book_id', models.BigIntegerField()),
],
),
migrations.CreateModel(
name='Test',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
],
),
]
| StarcoderdataPython |
3265355 | from homebrain import Agent, Event, Dispatcher
class IDFilter(Agent):
"""Listens to a certain trigger event, and emits a target event once the specified count has been reached."""
autostart = False
def __init__(self, ID, target=None):
Agent.__init__(self)
self.target = target if target is not None else self.identifier
self.dispatcher = Dispatcher()
self.id = ID
def handle_event(self, event):
if "id" in event and event["id"] == self.id:
outgoing_event = Event(type=self.target, data=event["data"])
self.dispatcher.put_event(outgoing_event)
| StarcoderdataPython |
1638333 | from datetime import datetime
import base64
import fudge
from nose.tools import eq_ as eq
from onelogin.saml.test.util import assert_raises
from onelogin.saml import (
Response,
ResponseValidationError,
ResponseNameIDError,
ResponseConditionError,
)
test_response = """<samlp:Response
xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol"
xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion"
ID="identifier_2"
InResponseTo="identifier_1"
Version="2.0"
IssueInstant="2004-12-05T09:22:05Z"
Destination="https://sp.example.com/SAML2/SSO/POST">
<saml:Issuer>https://idp.example.org/SAML2</saml:Issuer>
<samlp:Status>
<samlp:StatusCode
Value="urn:oasis:names:tc:SAML:2.0:status:Success"/>
</samlp:Status>
<saml:Assertion
xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion"
ID="identifier_3"
Version="2.0"
IssueInstant="2004-12-05T09:22:05Z">
<saml:Issuer>https://idp.example.org/SAML2</saml:Issuer>
<ds:Signature
xmlns:ds="http://www.w3.org/2000/09/xmldsig#">foo signature</ds:Signature>
<saml:Subject>
<saml:NameID
Format="urn:oasis:names:tc:SAML:2.0:nameid-format:transient">
3f7b3dcf-1674-4ecd-92c8-1544f346baf8
</saml:NameID>
<saml:SubjectConfirmation
Method="urn:oasis:names:tc:SAML:2.0:cm:bearer">
<saml:SubjectConfirmationData
InResponseTo="identifier_1"
Recipient="https://sp.example.com/SAML2/SSO/POST"
NotOnOrAfter="2004-12-05T09:27:05Z"/>
</saml:SubjectConfirmation>
</saml:Subject>
<saml:Conditions
NotBefore="2004-12-05T09:17:05Z"
NotOnOrAfter="2004-12-05T09:27:05Z">
<saml:AudienceRestriction>
<saml:Audience>https://sp.example.com/SAML2</saml:Audience>
</saml:AudienceRestriction>
</saml:Conditions>
<saml:AuthnStatement
AuthnInstant="2004-12-05T09:22:00Z"
SessionIndex="identifier_3">
<saml:AuthnContext>
<saml:AuthnContextClassRef>
urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport
</saml:AuthnContextClassRef>
</saml:AuthnContext>
</saml:AuthnStatement>
</saml:Assertion>
</samlp:Response>
"""
class TestResponse(object):
def setUp(self):
fudge.clear_expectations()
@fudge.with_fakes
def test__init__(self):
fake_base64 = fudge.Fake('base64')
fake_base64.remember_order()
decode = fake_base64.expects('b64decode')
decode.with_args('foo response')
decode.returns('foo decoded response')
fake_etree = fudge.Fake('etree')
fake_etree.remember_order()
from_string = fake_etree.expects('fromstring')
from_string.with_args('foo decoded response')
from_string.returns('foo document')
res = Response(
response='foo response',
signature='foo signature',
_base64=fake_base64,
_etree=fake_etree,
)
eq(res._document, 'foo document')
eq(res._signature, 'foo signature')
@fudge.with_fakes
def test_get_name_id_simple(self):
encoded_response = base64.b64encode(test_response)
res = Response(
response=encoded_response,
signature=None,
)
name_id = res.name_id
eq('3f7b3dcf-1674-4ecd-92c8-1544f346baf8', name_id)
@fudge.with_fakes
def test_get_name_id_multiple(self):
response = """<samlp:Response
xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol"
xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion"
ID="identifier_2"
InResponseTo="identifier_1"
Version="2.0"
IssueInstant="2004-12-05T09:22:05Z"
Destination="https://sp.example.com/SAML2/SSO/POST">
<saml:Issuer>https://idp.example.org/SAML2</saml:Issuer>
<samlp:Status>
<samlp:StatusCode
Value="urn:oasis:names:tc:SAML:2.0:status:Success"/>
</samlp:Status>
<saml:Assertion
xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion"
ID="identifier_3"
Version="2.0"
IssueInstant="2004-12-05T09:22:05Z">
<saml:Issuer>https://idp.example.org/SAML2</saml:Issuer>
<ds:Signature
xmlns:ds="http://www.w3.org/2000/09/xmldsig#">...</ds:Signature>
<saml:Subject>
<saml:NameID
Format="urn:oasis:names:tc:SAML:2.0:nameid-format:transient">
3f7b3dcf-1674-4ecd-92c8-1544f346baf8
</saml:NameID>
<saml:NameID
Format="urn:oasis:names:tc:SAML:2.0:nameid-format:transient">
foo-copy
</saml:NameID>
<saml:SubjectConfirmation
Method="urn:oasis:names:tc:SAML:2.0:cm:bearer">
<saml:SubjectConfirmationData
InResponseTo="identifier_1"
Recipient="https://sp.example.com/SAML2/SSO/POST"
NotOnOrAfter="2004-12-05T09:27:05Z"/>
</saml:SubjectConfirmation>
</saml:Subject>
<saml:Conditions
NotBefore="2004-12-05T09:17:05Z"
NotOnOrAfter="2004-12-05T09:27:05Z">
<saml:AudienceRestriction>
<saml:Audience>https://sp.example.com/SAML2</saml:Audience>
</saml:AudienceRestriction>
</saml:Conditions>
<saml:AuthnStatement
AuthnInstant="2004-12-05T09:22:00Z"
SessionIndex="identifier_3">
<saml:AuthnContext>
<saml:AuthnContextClassRef>
urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport
</saml:AuthnContextClassRef>
</saml:AuthnContext>
</saml:AuthnStatement>
</saml:Assertion>
</samlp:Response>
"""
encoded_response = base64.b64encode(response)
res = Response(
response=encoded_response,
signature=None,
)
msg = assert_raises(
ResponseNameIDError,
res._get_name_id,
)
eq(
str(msg),
('There was a problem getting the name ID: Found more than one '
+ 'name ID'
),
)
@fudge.with_fakes
def test_get_name_id_none(self):
response = """<samlp:Response
xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol"
xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion"
ID="identifier_2"
InResponseTo="identifier_1"
Version="2.0"
IssueInstant="2004-12-05T09:22:05Z"
Destination="https://sp.example.com/SAML2/SSO/POST">
<saml:Issuer>https://idp.example.org/SAML2</saml:Issuer>
<samlp:Status>
<samlp:StatusCode
Value="urn:oasis:names:tc:SAML:2.0:status:Success"/>
</samlp:Status>
<saml:Assertion
xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion"
ID="identifier_3"
Version="2.0"
IssueInstant="2004-12-05T09:22:05Z">
<saml:Issuer>https://idp.example.org/SAML2</saml:Issuer>
<ds:Signature
xmlns:ds="http://www.w3.org/2000/09/xmldsig#">...</ds:Signature>
<saml:Subject>
<saml:SubjectConfirmation
Method="urn:oasis:names:tc:SAML:2.0:cm:bearer">
<saml:SubjectConfirmationData
InResponseTo="identifier_1"
Recipient="https://sp.example.com/SAML2/SSO/POST"
NotOnOrAfter="2004-12-05T09:27:05Z"/>
</saml:SubjectConfirmation>
</saml:Subject>
<saml:Conditions
NotBefore="2004-12-05T09:17:05Z"
NotOnOrAfter="2004-12-05T09:27:05Z">
<saml:AudienceRestriction>
<saml:Audience>https://sp.example.com/SAML2</saml:Audience>
</saml:AudienceRestriction>
</saml:Conditions>
<saml:AuthnStatement
AuthnInstant="2004-12-05T09:22:00Z"
SessionIndex="identifier_3">
<saml:AuthnContext>
<saml:AuthnContextClassRef>
urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport
</saml:AuthnContextClassRef>
</saml:AuthnContext>
</saml:AuthnStatement>
</saml:Assertion>
</samlp:Response>
"""
encoded_response = base64.b64encode(response)
res = Response(
response=encoded_response,
signature=None,
)
msg = assert_raises(
ResponseNameIDError,
res._get_name_id,
)
eq(
str(msg),
('There was a problem getting the name ID: Did not find a name '
+ 'ID'
),
)
@fudge.with_fakes
def test_is_valid_not_before_missing(self):
response = """<samlp:Response
xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol"
xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion"
ID="identifier_2"
InResponseTo="identifier_1"
Version="2.0"
IssueInstant="2004-12-05T09:22:05Z"
Destination="https://sp.example.com/SAML2/SSO/POST">
<saml:Issuer>https://idp.example.org/SAML2</saml:Issuer>
<samlp:Status>
<samlp:StatusCode
Value="urn:oasis:names:tc:SAML:2.0:status:Success"/>
</samlp:Status>
<saml:Assertion
xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion"
ID="identifier_3"
Version="2.0"
IssueInstant="2004-12-05T09:22:05Z">
<saml:Issuer>https://idp.example.org/SAML2</saml:Issuer>
<ds:Signature
xmlns:ds="http://www.w3.org/2000/09/xmldsig#">foo signature</ds:Signature>
<saml:Subject>
<saml:NameID
Format="urn:oasis:names:tc:SAML:2.0:nameid-format:transient">
3f7b3dcf-1674-4ecd-92c8-1544f346baf8
</saml:NameID>
<saml:SubjectConfirmation
Method="urn:oasis:names:tc:SAML:2.0:cm:bearer">
<saml:SubjectConfirmationData
InResponseTo="identifier_1"
Recipient="https://sp.example.com/SAML2/SSO/POST"
NotOnOrAfter="2004-12-05T09:27:05Z"/>
</saml:SubjectConfirmation>
</saml:Subject>
<saml:Conditions
NotOnOrAfter="2004-12-05T09:27:05Z">
<saml:AudienceRestriction>
<saml:Audience>https://sp.example.com/SAML2</saml:Audience>
</saml:AudienceRestriction>
</saml:Conditions>
<saml:AuthnStatement
AuthnInstant="2004-12-05T09:22:00Z"
SessionIndex="identifier_3">
<saml:AuthnContext>
<saml:AuthnContextClassRef>
urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport
</saml:AuthnContextClassRef>
</saml:AuthnContext>
</saml:AuthnStatement>
</saml:Assertion>
</samlp:Response>
"""
encoded_response = base64.b64encode(response)
res = Response(
response=encoded_response,
signature=None,
)
msg = assert_raises(
ResponseConditionError,
res.is_valid,
)
eq(str(msg),
('There was a problem validating a condition: Did not find NotBefore '
+ 'condition'
),
)
@fudge.with_fakes
def test_is_valid_not_on_or_after_missing(self):
response = """<samlp:Response
xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol"
xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion"
ID="identifier_2"
InResponseTo="identifier_1"
Version="2.0"
IssueInstant="2004-12-05T09:22:05Z"
Destination="https://sp.example.com/SAML2/SSO/POST">
<saml:Issuer>https://idp.example.org/SAML2</saml:Issuer>
<samlp:Status>
<samlp:StatusCode
Value="urn:oasis:names:tc:SAML:2.0:status:Success"/>
</samlp:Status>
<saml:Assertion
xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion"
ID="identifier_3"
Version="2.0"
IssueInstant="2004-12-05T09:22:05Z">
<saml:Issuer>https://idp.example.org/SAML2</saml:Issuer>
<ds:Signature
xmlns:ds="http://www.w3.org/2000/09/xmldsig#">foo signature</ds:Signature>
<saml:Subject>
<saml:NameID
Format="urn:oasis:names:tc:SAML:2.0:nameid-format:transient">
3f7b3dcf-1674-4ecd-92c8-1544f346baf8
</saml:NameID>
<saml:SubjectConfirmation
Method="urn:oasis:names:tc:SAML:2.0:cm:bearer">
<saml:SubjectConfirmationData
InResponseTo="identifier_1"
Recipient="https://sp.example.com/SAML2/SSO/POST"
NotOnOrAfter="2004-12-05T09:27:05Z"/>
</saml:SubjectConfirmation>
</saml:Subject>
<saml:Conditions
NotBefore="2004-12-05T09:27:05Z">
<saml:AudienceRestriction>
<saml:Audience>https://sp.example.com/SAML2</saml:Audience>
</saml:AudienceRestriction>
</saml:Conditions>
<saml:AuthnStatement
AuthnInstant="2004-12-05T09:22:00Z"
SessionIndex="identifier_3">
<saml:AuthnContext>
<saml:AuthnContextClassRef>
urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport
</saml:AuthnContextClassRef>
</saml:AuthnContext>
</saml:AuthnStatement>
</saml:Assertion>
</samlp:Response>
"""
encoded_response = base64.b64encode(response)
res = Response(
response=encoded_response,
signature=None,
)
msg = assert_raises(
ResponseConditionError,
res.is_valid,
)
eq(str(msg),
('There was a problem validating a condition: Did not find '
+ 'NotOnOrAfter condition'
),
)
@fudge.with_fakes
def test_is_valid_current_time_earlier(self):
encoded_response = base64.b64encode(test_response)
res = Response(
response=encoded_response,
signature=None,
)
def fake_clock():
return datetime(2004, 12, 05, 9, 16, 45, 462796)
msg = assert_raises(
ResponseValidationError,
res.is_valid,
_clock=fake_clock,
)
eq(str(msg),
('There was a problem validating the response: Current time is '
+ 'earlier than NotBefore condition'
),
)
@fudge.with_fakes
def test_is_valid_current_time_on_or_after(self):
encoded_response = base64.b64encode(test_response)
res = Response(
response=encoded_response,
signature=None,
)
def fake_clock():
return datetime(2004, 12, 05, 9, 30, 45, 462796)
msg = assert_raises(
ResponseValidationError,
res.is_valid,
_clock=fake_clock,
)
eq(str(msg),
('There was a problem validating the response: Current time is '
+ 'on or after NotOnOrAfter condition'
),
)
@fudge.with_fakes
def test_is_valid_simple(self):
encoded_response = base64.b64encode(test_response)
res = Response(
response=encoded_response,
signature='foo signature',
)
def fake_clock():
return datetime(2004, 12, 05, 9, 18, 45, 462796)
fake_verifier = fudge.Fake(
'verifier',
callable=True,
)
fake_verifier.times_called(1)
fake_verifier.with_args(res._document, 'foo signature')
fake_verifier.returns(True)
msg = res.is_valid(
_clock=fake_clock,
_verifier=fake_verifier,
)
eq(msg, True)
| StarcoderdataPython |
1644730 | <gh_stars>0
# -*- coding: utf-8 -*-
"""Homework 5.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1wEyRe0MurtqVNBeqe0w4Sp20QUJktjHa
"""
import pandas as pd
"""Take 10 candidates and find what packs commited to them and where those packs are located.
FEC Dataset
Also merge practice files from w3
"""
HCrime = pd.read_csv("hate_crime.csv")
HCrime.head()
HCrime.describe()
zips = pd.read_json("us-zips.json")
zips.head()
zips.fields.count()
candidates = pd.read_csv("ccl.txt", sep="|", header=None)
c_headers = pd.read_csv("ccl_header_file.csv")
c_headers = list(c_headers.columns.values)
candidates.columns = c_headers
candidates.head()
candidates.dtypes
cms = pd.read_csv("cm.txt", sep="|", header=None)
cm_headers = pd.read_csv("cm_header_file.csv")
cm_headers = list(cm_headers.columns.values)
cms.columns = cm_headers
cms.head()
cms.drop(['CAND_ID'], axis = 1, inplace = True)
candidates.drop(['CMTE_DSGN','CMTE_TP'], axis = 1, inplace= True)
candidates_merged = candidates.merge(cms,on = 'CMTE_ID', how = 'left')
candidates_merged.head(10)
c_names = pd.read_csv("cn.txt", sep="|", header=None)
cn_headers = pd.read_csv("cn_header_file.csv")
cn_headers = list(cn_headers.columns.values)
c_names.columns = cn_headers
c_names.head(10)
c_names_new = c_names[['CAND_ID','CAND_NAME','CAND_PTY_AFFILIATION','CAND_OFFICE_ST']]
cand_names_merge = candidates_merged.merge(c_names_new,on = 'CAND_ID', how = 'left')
cand_names_merge.head(10)
cand_names_merge.dropna(subset=['CAND_NAME'], inplace = True)
cand_names_merge.head(10)
# Here we are grouping by the locations of the committees and finding how many candidates are in each location
cands_per_cmtearea = cand_names_merge.groupby(['CMTE_CITY','CMTE_ST']).agg({'CAND_ID': "nunique"})
cands_per_cmtearea
cands_per_cmtearea.sort_values(by = 'CAND_ID', ascending = False, inplace = True)
cands_per_cmtearea.head(10)
# The largest number of candidates are followed by committees based out of DC, Alexandria, and Houston
#This calculation is very similar except it just located the number of unique committees in each area
cmte_per_cmtearea = cand_names_merge.groupby(['CMTE_CITY','CMTE_ST']).agg({'CMTE_ID': "nunique"})
cmte_per_cmtearea.sort_values(by = 'CMTE_ID', ascending = False, inplace = True)
cmte_per_cmtearea.head(10)
# im curious how often committee IDs and Candidate IDs are repeated/ if they are repeated.
C_ids = cand_names_merge.CAND_ID.value_counts()
C_ids.head(10)
CM_ids = cand_names_merge.CMTE_ID.value_counts()
CM_ids.head(10)
cand_names_merge[cand_names_merge['CAND_ID']== 'P60022118']
# candidate has 10 entries, 4 with different committees and 6 without committees
# I could change the join type to prevent these no committee entries
cand_cmte_merged = candidates.merge(cms,on = 'CMTE_ID', how = 'inner')
cand_cmte_merged = cand_cmte_merged.merge(c_names_new,on = 'CAND_ID', how = 'left')
cand_cmte_merged.dropna(subset=['CAND_NAME'], inplace = True)
C_ids2 = cand_cmte_merged.CAND_ID.value_counts()
C_ids2.head(10)
cand_cmte_merged[cand_cmte_merged['CAND_ID']== 'P60022118']
cand_cmte_merged[cand_cmte_merged['CAND_ID']== 'S2FL00441']
# still some duplicate entries exist which leaves questions
# I think I want to eliminate any enties where CAND_ID, CMTE_ID and CAND_ELECTION_YR are Identical
unique_combos = cand_cmte_merged.drop_duplicates(subset=["CAND_ID","CMTE_ID",'CAND_ELECTION_YR'])
C_ids2 = unique_combos.CAND_ID.value_counts()
C_ids2.head(10)
cand_cmte_merged[cand_cmte_merged['CAND_ID']== 'S4IA00129']
# now it appears every entry is a unique combo of CMTE CAND and YEAR
CMTEParties = unique_combos.CMTE_PTY_AFFILIATION.value_counts()
print(CMTEParties.head(10))
CANDParties = unique_combos.CAND_PTY_AFFILIATION.value_counts()
print(CANDParties.head(10))
diffParty = unique_combos[unique_combos['CAND_PTY_AFFILIATION'] != unique_combos['CMTE_PTY_AFFILIATION']]
diffParty.head(10)
diffParty.count()
# 686 times the candidate and committee have different party affiliations
CMTEParties = diffParty.CMTE_PTY_AFFILIATION.value_counts()
print(CMTEParties.head(10))
CANDParties = diffParty.CAND_PTY_AFFILIATION.value_counts()
print(CANDParties.head(10)) | StarcoderdataPython |
1640435 | <reponame>andreped/GSI-RADS<gh_stars>0
from segmentation.src.Utils.configuration_parser import *
from segmentation.src.PreProcessing.pre_processing import run_pre_processing
from segmentation.src.Inference.predictions import run_predictions
from segmentation.src.Inference.predictions_reconstruction import reconstruct_post_predictions
from segmentation.src.Utils.io import dump_predictions
MODELS_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../', 'resources/models')
sys.path.insert(1, MODELS_PATH)
def predict(input_filename, output_path, selected_model, brain_mask_filename=None):
"""
"""
pre_processing_parameters = PreProcessingParser(model_name=selected_model)
valid_extensions = ['.h5', '.hd5', '.hdf5', '.hdf', '.ckpt']
model_path = ''
for e, ext in enumerate(valid_extensions):
model_path = os.path.join(MODELS_PATH, selected_model, 'model' + ext)
if os.path.exists(model_path):
break
if not os.path.exists(model_path):
raise ValueError('Could not find any model on Docker image matching the requested type \'{}\'.'.format(selected_model))
nib_volume, resampled_volume, data, crop_bbox = run_pre_processing(filename=input_filename,
pre_processing_parameters=pre_processing_parameters,
storage_prefix=output_path,
brain_mask_filename=brain_mask_filename)
predictions = run_predictions(data=data, model_path=model_path, parameters=pre_processing_parameters)
final_predictions = reconstruct_post_predictions(predictions=predictions, parameters=pre_processing_parameters,
crop_bbox=crop_bbox, nib_volume=nib_volume, resampled_volume=resampled_volume)
dump_predictions(predictions=final_predictions, parameters=pre_processing_parameters, nib_volume=nib_volume,
storage_prefix=output_path)
| StarcoderdataPython |
1657430 | from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from django.core.exceptions import MultipleObjectsReturned
from mighty import functions
from mighty.apps import MightyConfig as conf
from mighty.applications.logger.apps import LoggerConfig
import datetime, sys, logging
logger = logging.getLogger(__name__)
class BaseCommand(BaseCommand):
help = 'Command Base override by Mighty'
position = 0
prefix_bar = 'Percent'
current_info = ''
errors = []
def get_total(self):
return self.total if self.total else 0
def set_position(self, pos=1):
self.position+=pos
def get_current_info(self):
return self.current_info
def progress_bar(self, bar_length=20):
if self.verbosity > 0:
percent = self.position / self.get_total()
if self.progressbar:
arrow = '-' * int(round(percent * bar_length)-1) + '>'
spaces = ' ' * (bar_length - len(arrow))
sys.stdout.write("\r{0}: [{1}] {2}% ({3}/{4}) {5}".format(
self.prefix_bar,
arrow + spaces,
int(round(percent * 100)),
self.position,
self.get_total(),
self.get_current_info(),
)
)
sys.stdout.flush()
else:
sys.stdout.write("\r{0}: {1}% ({2}/{3}) {4}".format(
self.prefix_bar,
int(round(percent * 100)),
self.position,
self.get_total(),
self.get_current_info())
)
print()
if self.position == self.get_total(): print()
def create_parser(self, prog_name, subcommand, **kwargs):
self.subcommand = subcommand
return super().create_parser(prog_name, subcommand)
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument('--total', default=0)
parser.add_argument('--encoding', default='utf8')
parser.add_argument('--logfile', default="%s_%s.log" % (str(self.subcommand).lower(), f"{datetime.datetime.now():%Y%m%d_%H%M%S_%f}"))
parser.add_argument('--progressbar', action="store_true")
def handle(self, *args, **options):
self.encoding = options.get('encoding')
self.logfile = options.get('logfile')
self.progressbar = options.get('progressbar')
self.verbosity = options.get('verbosity', 0)
logger.debug('start')
self.makeJob()
self.showErrors()
logger.debug('end')
def makeJob(self):
self.do()
def showErrors(self):
for error in self.errors:
logger.info(error)
def do(self):
raise NotImplementedError("Command should implement method do(self)")
class ModelBaseCommand(BaseCommand):
help = 'Commande Model Base'
manager = 'objects'
label = None
model = None
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument('--create', action="store_true")
parser.add_argument('--label', default=None)
parser.add_argument('--model', default=None)
parser.add_argument('--filter', default=None)
parser.add_argument('--manager', default='objects')
parser.add_argument('--search', action="store_true")
def handle(self, *args, **options):
self.create = options.get('create')
self.label = options.get('label', self.label)
self.model = options.get('model', self.model)
self.manager = options.get('manager', self.manager)
self.filter = options.get('filter')
self.search = options.get('search')
super().handle(*args, **options)
def get_queryset(self, *args, **kwargs):
label = kwargs.get('label', self.label)
model = kwargs.get('model', self.model)
manager = kwargs.get('manager', self.manager)
model = functions.get_model(label, model)
return getattr(model, manager).filter(**dict(x.split(',') for x in self.filter.split(';')) if self.filter else {})
def do(self):
self.each_objects()
def each_objects(self):
qs = self.get_queryset()
self.total = len(qs)
for obj in self.get_queryset():
self.current_object = obj
self.set_position()
self.progress_bar()
self.on_object(obj)
def on_object(self, object):
raise NotImplementedError("Command should implement method on_object(self, obj)")
| StarcoderdataPython |
24719 | <filename>spec/python/test_cast_nested.py<gh_stars>10-100
# Autogenerated from KST: please remove this line if doing any edits by hand!
import unittest
from cast_nested import CastNested
class TestCastNested(unittest.TestCase):
def test_cast_nested(self):
with CastNested.from_file('src/switch_opcodes.bin') as r:
self.assertEqual(r.opcodes_0_str.value, u"foobar")
self.assertEqual(r.opcodes_0_str_value, u"foobar")
self.assertEqual(r.opcodes_1_int.value, 66)
self.assertEqual(r.opcodes_1_int_value, 66)
| StarcoderdataPython |
1716359 | <filename>lib/figures.py
"""
@author: <NAME>
https://github.com/diegoscastanho
"""
import matplotlib.pyplot as plt
import numpy as np
class PlotGraphics:
"""
Plot graphics
"""
def __init__(self, obj):
self.database = obj.database_radiobutton.isChecked()
self.real_prediction_test = obj.rxp_test_radiobutton.isChecked()
self.real_prediction_training = obj.rxp_training_radiobutton.isChecked()
self.global_best_fit_evolution = obj.gb_fitness_radiobutton.isChecked()
self.global_best_cost_evolution = obj.gb_cost_radiobutton.isChecked()
self.boxplot_fitness = obj.boxplot_fitness_radiobutton.isChecked()
self.boxplot_cost = obj.boxplot_cost_radiobutton.isChecked()
self.data = obj.json_data
def run(self):
"""
Generate graphics
"""
if self.database:
self.generate_database()
# self.generate_test_figure()
if self.real_prediction_test:
self.generate_test_figure()
if self.real_prediction_training:
self.generate_training_figure()
if self.global_best_fit_evolution:
self.generate_global_best_fit_evolution()
if self.global_best_cost_evolution:
self.generate_global_best_cost_evolution()
if self.boxplot_fitness:
self.generate_boxplot_fitness()
if self.boxplot_cost:
self.generate_boxplot_cost()
def generate_database(self):
"""
Generate database
"""
real = self.data[0]["obj"].hospit
# title = "PSO_training_hospitalizations_delay " + str(delay_day)
plt.plot(real, 'black', label='observado')
plt.ylabel('Número de Hospitalizações')
plt.xlabel('Dias')
plt.grid(True)
# plt.legend()
plt.show()
def generate_test_figure(self):
"""
Generate the training figure
"""
prediction = self.data[0]['best'].test_prediction
real = self.data[0]["obj"].hospit_test
plt.plot(real, 'r', label='observado')
plt.plot(prediction, 'b', label='simulado')
plt.ylabel('Número de hospitalizações')
plt.xlabel('Dias')
plt.grid(True)
plt.legend()
# plt.savefig('/home/diego/Desktop/ae_de/real_prediction_test.png', dpi=150)
plt.show()
def generate_training_figure(self):
"""
Generate the training figure
"""
prediction = self.data[0]['best'].train_prediction
real = self.data[0]["obj"].hospit_train
plt.plot(real, 'r', label='observado')
plt.plot(prediction, 'b', label='simulado')
plt.ylabel('Número de hospitalizações')
plt.xlabel('Dias')
plt.grid(True)
plt.legend()
# plt.savefig('/home/diego/Desktop/mse_pso_real_prediction_trainning.png', dpi=150)
plt.show()
def generate_global_best_fit_evolution(self):
"""
Generate global_best_fit_evolution
"""
fit_evolution = self.data[0]['best'].fit_evolution
plt.plot(fit_evolution)
plt.ylabel('Evolução do fitness')
plt.xlabel('Iterações')
plt.grid(True)
plt.legend()
# plt.savefig('/home/diego/Desktop/ae_de/global_best_fit_evolution.png', dpi=150)
plt.show()
def generate_global_best_cost_evolution(self):
"""
Generate global_best_cost_evolution
"""
cost_evolution = self.data[0]['best'].cost_evolution
cost_type = ""
if self.data[0]['obj'].type_ae_cost:
cost_type = "Erro Absoluto (AE)"
if self.data[0]['obj'].type_mse_cost:
cost_type = "Erro Quadrático Médio - MSE "
plt.plot(cost_evolution)
plt.ylabel(cost_type)
plt.xlabel('Iterações')
plt.grid(True)
plt.legend()
# plt.savefig('/home/diego/Desktop/ae_de/global_best_cost_evolution.png', dpi=150)
plt.show()
def generate_boxplot_fitness(self):
"""
Generate Boxplot fitness
"""
fit_lits, plot_labels = list(), list()
for lag in self.data:
fit_list = list()
for part in lag['best_particles']:
fit_list.append(part.fit)
fit_lits.append(fit_list)
aux = "lag_" + str(lag['obj'].day_lag)
plot_labels.append(aux)
# title = "Box Plot Fitness - " + lag['obj'].algorithm
_, ax = plt.subplots()
# ax.set_title(title)
ax.boxplot(fit_lits, labels = plot_labels)
ax.set_ylabel('Fitness')
# plt.savefig('/home/diego/Desktop/mse_pso_boxplot_fitness.png', dpi=150)
plt.show()
def generate_boxplot_cost(self):
"""
Generate boxplot_cost
"""
cost_lits, plot_labels = list(), list()
for lag in self.data:
cost_lit = list()
for part in lag['best_particles']:
cost_lit.append(part.cost)
cost_lits.append(cost_lit)
aux = "lag_" + str(lag['obj'].day_lag)
plot_labels.append(aux)
cost_type = ""
if self.data[0]['obj'].type_ae_cost:
cost_type = "Erro Absoluto (AE)"
if self.data[0]['obj'].type_mse_cost:
cost_type = "Erro Quadrático Médio - MSE "
# title = "Box Plot Cost - " + lag['obj'].algorithm
_, ax = plt.subplots()
# ax.set_title(title)
ax.boxplot(cost_lits, labels = plot_labels)
ax.set_ylabel(cost_type)
# plt.savefig('/home/diego/Desktop/ae_de/boxplot_cost.png', dpi=150)
plt.show()
| StarcoderdataPython |
1608628 | <reponame>patricebechard/chatbot<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: <NAME>
@email: <EMAIL>
Created on Thu Jan 18 17:45:44 2018
Utilities for the seq2seq chatbot
"""
import time
import math
import torch
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# USE_CUDA = False
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return "%s (- %s)" % (asMinutes(s), asMinutes(rs))
import matplotlib.pyplot as plt
from matplotlib import ticker
import numpy as np
def showPlot(points):
plt.figure()
fix, ax = plt.subplots()
# this locator puts ticks at regular intervals
loc = ticker.MultipleLocator(base=0.2)
ax.yaxis.set_major_locator(loc)
plt.plot(points)
def showAttention(input_sentence, output_words, attentions):
# Set up figure with colorbar
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(attentions.numpy(), cmap='bone')
fig.colorbar(cax)
# Set up axes
ax.set_xticklabels([''] + input_sentence.split(' ') +
['<EOS>'], rotation=90)
ax.set_yticklabels([''] + output_words)
# Show label at every tick
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
def evaluateAndShowAttention(input_sentence):
output_words, attentions = evaluate(encoder1, attn_decoder1, input_sentence)
print('input = ', input_sentence)
print('output = ', ' '.join(output_words))
showAttention(input_sentence, output_words, attentions) | StarcoderdataPython |
4838276 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Internal APIs and core implementation of weight compression API."""
from typing import List, Any, Mapping
import dataclasses
import tensorflow as tf
# Workaround to prevent MLIR from constant folding the
# compressed weights into the original weights. For instance,
# if we decompose `self.kernel` into `u` and `v`, we need to
# make sure that decompression occurs during inference, instead
# of during MLIR optimization which could multiply `u` and `v`
# given that they are constants.
#
# TODO(tfmot): make this more stable. This currently relies
# on the TensorFlow Lite MLIR converter to not constant
# fold through `tf.cond`, even though it already does
# for `tf.while`.
def _prevent_constant_folding(tensor, dummy_inputs):
tensor = tf.identity(tensor)
outputs = tf.cond(
tf.reduce_sum(dummy_inputs) > 0, lambda: tensor, lambda: tensor)
return outputs
class _TrainingWrapper(tf.keras.layers.Wrapper):
"""Represent modifications to training graph for weight compression."""
def __init__(self, layer, algorithm, compressible_weights: List[str]):
self.algorithm = algorithm
self.compressible_weights = compressible_weights
self.original_add_weight = layer.add_weight
setattr(layer, 'add_weight', self._skip_compressible_weights)
super(_TrainingWrapper, self).__init__(layer)
def _skip_compressible_weights(self, *args, **kwargs):
# Match for compressible weights based on `name` parameter.
#
# This depends on common practice where every layer's call
# to `self.add_weight` follows this form:
#
# self.`name` = self.add_weight(name=`name`)
#
# where the attribute name matches the variable name.
#
# TODO(tfmot): check if depending on this practice
# is safe for both builtin and custom Keras layers.
# Regardless, raise an exception if name is None, which
# means that the practice has not been followed.
name = None
if args:
name = args[0]
if 'name' in kwargs:
name = kwargs['name']
if name not in self.compressible_weights:
return self.original_add_weight(*args, **kwargs)
# If weight is compressible, substitute in a dummy tensor
# with the same shape as what would have been added.
# Returning an empty tensor would cause ** to fail.
shape = None
if args and len(args) > 1:
shape = args[1]
if 'shape' in kwargs:
shape = kwargs['shape']
return tf.zeros(shape)
def build(self, input_shape):
# Building nested layer via `super` must happen first
# so that the nested layer's variables
# are available to `init_training_weights_repr`.
super(_TrainingWrapper, self).build(input_shape)
# Add weights needed by algorithm during training.
self.training_weights = {}
for attr_name in self.compressible_weights:
compressible_weight = getattr(self.layer, attr_name)
# Note that as standard in `build` methods, the handling of pretrained
# weights actually occurs outside the wrapper. This only initializes
# weights with dummy values. Additionally, we don't have access to the
# actual values of the nested layer's weights since they are no longer
# variables, due to `_skip_compressible_weights` from `__init__`.
assert isinstance(compressible_weight, tf.Tensor)
weight_reprs = self.algorithm.init_training_weights_repr(
compressible_weight)
weights = []
for weight_repr in weight_reprs:
weight = self.add_weight(**dataclasses.asdict(weight_repr))
weights.append(weight)
self.training_weights[attr_name] = weights
def call(self, inputs):
for attr_name in self.compressible_weights:
# TODO(tfmot): move constant folding prevention to the inference graph
# only, since constant folding won't happen during training.
training_weight_tensors = []
for v in self.training_weights[attr_name]:
training_weight_tensors.append(
_prevent_constant_folding(v.read_value(), inputs))
weight_tensor = self.algorithm.training(*training_weight_tensors)
setattr(self.layer, attr_name, weight_tensor)
# This assumes that all changes to the forward pass happen "prior" to
# the nested layer's portion of the forward pass. This suffices since
# the scope of this API is to only optimize the weights.
return self.layer.call(inputs)
# TODO(tfmot): deduplicate code with _TrainingWrapper.
class _InferenceWrapper(tf.keras.layers.Wrapper):
"""Represent modifications to inference graph for weight compression."""
def __init__(self, layer, algorithm,
training_tensors: Mapping[str, List[tf.Tensor]]):
self.algorithm = algorithm
# training_tensors is a map from compressible attributes (e.g. 'kernel')
# to tensors (not variables to prevent model size increasing) with the
# same shape as the corresponding variables used during training.
self.training_tensors = training_tensors
self.original_add_weight = layer.add_weight
setattr(layer, 'add_weight', self._skip_compressible_weights)
super(_InferenceWrapper, self).__init__(layer)
def _skip_compressible_weights(self, *args, **kwargs):
# Match for compressible weights based on `name` parameter.
#
# This depends on common practice where every layer's call
# to `self.add_weight` follows this form:
#
# self.`name` = self.add_weight(name=`name`)
#
# where the attribute name matches the variable name.
#
# TODO(tfmot): check if depending on this practice
# is safe for both builtin and custom Keras layers.
# Regardless, raise an exception if name is None, which
# means that the practice has not been followed.
name = None
if args:
name = args[0]
if 'name' in kwargs:
name = kwargs['name']
if name not in self.training_tensors:
return self.original_add_weight(*args, **kwargs)
# If weight is compressible, substitute in a dummy tensor
# with the same shape as what would have been added.
# Returning an empty tensor would cause ** to fail.
shape = None
if args and len(args) > 1:
shape = args[1]
if 'shape' in kwargs:
shape = kwargs['shape']
return tf.zeros(shape)
def build(self, input_shape):
super(_InferenceWrapper, self).build(input_shape)
# Add weights needed by algorithm during inference.
self.compressed_weights = {}
for attr_name in self.training_tensors:
training_tensors = self.training_tensors[attr_name]
compressed_tensors = self.algorithm.compress(*training_tensors)
weights = []
for t in compressed_tensors:
weight = self.add_weight(name='TODO', shape=t.shape)
weights.append(weight)
self.compressed_weights[attr_name] = weights
def call(self, inputs, training=None):
for attr_name in self.training_tensors:
# TODO(tfmot): understand how read_value() is converted to
# inference in TensorFlow Lite.
compressed_weight_tensors = []
for v in self.compressed_weights[attr_name]:
compressed_weight_tensors.append(
_prevent_constant_folding(v.read_value(), inputs))
weight_tensor = self.algorithm.decompress(*compressed_weight_tensors)
setattr(self.layer, attr_name, weight_tensor)
# TODO(tfmot): handle training arg if needed given this is inference only.
return self.layer.call(inputs)
def _map_to_training_weights(
pretrained_weights: List[List[Any]], algorithm, layer,
compressible_weights: List[str]) -> List[List[Any]]:
"""Construct the training weight values from the layer's pretrained weights.
The weight values have the same structure as the output of
`tf.keras.layers.Layer.get_weights`.
Args:
pretrained_weights: layer's pretrained weights, retrieved via
layer.get_weights()
algorithm: weight compression algorithm
layer: layer
compressible_weights: weight attributes of layer that should be compressed
Returns:
Initial weight values for training.
"""
# Need to know for each layer that kernel is the first weight, etc.
# TODO(tfmot): consider implication on custom Keras layers. The
# user has to pass in the information that 'kernel' is the first
# variable, 'bias' is the second variable, and so on.
# TODO(tfmot): see if Keras can introduce changes to simplify this.
original_weights = []
training_weights = []
if isinstance(layer, tf.keras.layers.Conv2D) or \
isinstance(layer, tf.keras.layers.Dense):
weights = ['kernel', 'bias']
for i, weight in enumerate(weights):
pretrained_weight = pretrained_weights[i]
if weight in compressible_weights:
weight_reprs = algorithm.init_training_weights_repr(pretrained_weight)
for weight_repr in weight_reprs:
# Assumes initializer is tf.keras.initializers.Constant.
# TODO(tfmot): add check for this assumption.
# TODO(tfmot): the documentation for
# tf.keras.initializers.Constant(value)
# suggests that the `value` cannot be any arbitrary shape and
# only a single scalar value. It works in this implementation
# to make `value` any tensor - check this.
training_weights.append(weight_repr.initializer(
shape=None, dtype=weight_repr.dtype))
else:
original_weights.append(pretrained_weight)
return training_weights + original_weights
# TODO(tfmot): same TODOs as _map_to_training_weights.
def _map_to_inference_weights(training_weights, algorithm, training_tensors):
"""Construct the inference weight values from the weights after training.
The weight values have the same structure as the output of
`tf.keras.layers.Layer.get_weights`.
Args:
training_weights: layer's weights from training, retrieved via
layer.get_weights()
algorithm: weight compression algorithm
training_tensors: map from compressible weight attribute (e.g. 'kernel') to
relevant tensors.
Returns:
Initial weight values for training.
Example:
training_weights = [kernel_var1, kernel_var2, bias]
training_tensors = {'kernel': [kernel_var1, kernel_var2]}
expected output: [compress([kernel_var1, kernel_var2]), bias]
"""
compressed_weights = []
weights = ['kernel', 'bias']
layer_weights_i = 0
for weight in weights:
if weight in training_tensors:
compressed = algorithm.compress(*training_tensors[weight])
for c in compressed:
compressed_weights.append(c.numpy())
layer_weights_i += len(training_tensors[weight])
else:
compressed_weights.append(training_weights[layer_weights_i])
layer_weights_i += 1
return compressed_weights
def create_layer_for_training(layer, algorithm):
"""Internal API to create layer for training with weight compression."""
# TODO(tfmot): move these checks to public API for
# visibility.
if not isinstance(algorithm, object):
raise ValueError('`_create_layer_for_training` requires `algorithm` '
'to be an instantiated object, as opposed '
'to the class itself.')
# Currently only supports a layer being built. The non-built
# case may work fine as is, but it needs to be tested, together
# with the followup API for exporting the model when the training
# and inference graphs differ.
if not layer.built:
raise ValueError('`_create_layer_for_training` requires `layer` to '
'be built.')
pretrained_weights = layer.get_weights()
input_shape = layer.input_shape
compressible_weights = algorithm.get_compressible_weights(layer)
# Clone layer for two reasons:
#
# 1) Avoid unnecessary variable creation which undoes the benefits of
# compression. For instance, if we factorize `kernel` into `a` and `b`,
# since `a` and `b` collectively take less space than `kernel`, we
# no longer want to `kernel` to take up space as a variable.
#
# The design depends on replacing the layer's `add_weight`
# method to prevent variable creation, before `add_weight` is called
# in the layer's `build`. Since the layer is built already, we undo
# this by cloning the layer.
#
# 2) The unoptimized layer and the optimized layer are now independent
# of each other and training one will not affect the other.
#
# TODO(tfmot): consider if it's okay to avoid this complexity during training
# and only add it during inference, which is when model size really matters.
# TODO(tfmot): handle custom Keras layer case.
cloned_layer = layer.__class__.from_config(layer.get_config())
# TODO(tfmot): consider if this manner of handling build hinders
# support for subclassed models in trying to set the attributes
# that are layers while ensuring that the underlying trainable weights
# have been created already.
wrapped_layer = _TrainingWrapper(cloned_layer, algorithm,
compressible_weights)
if compressible_weights:
# Set pretrained weight values.
wrapped_layer.build(input_shape)
training_weights = _map_to_training_weights(pretrained_weights, algorithm,
layer, compressible_weights)
wrapped_layer.set_weights(training_weights)
return wrapped_layer
def create_layer_for_inference(layer: _TrainingWrapper, algorithm):
"""Internal API to create layer for inference with weight compression."""
# TODO(tfmot): move these checks to public API for
# visibility.
if not isinstance(algorithm, object):
raise ValueError('`_create_layer_for_inference` requires `algorithm` '
'to be an instantiated object, as opposed '
'to the class itself.')
if not layer.built:
raise ValueError(
'`_create_layer_for_inference` requires `layer` to be built.')
# Process layer.
nested_layer = layer.layer
input_shape = layer.input_shape
# Construct map from attribute (e.g. 'kernel') to tensor versions of
# variables used during training.
compressible_training_tensors = {}
for attr, weights in layer.training_weights.items():
compressible_training_tensors[attr] = [w.read_value() for w in weights]
# Process nested layer.
#
# TODO(tfmot): same TODOs as in _create_layer_for_training.
cloned_layer = nested_layer.__class__.from_config(nested_layer.get_config())
layer_for_inference = _InferenceWrapper(cloned_layer, algorithm,
compressible_training_tensors)
layer_for_inference.build(input_shape)
if layer.get_weights():
# Set weights of layer for inference according to what was trained.
inference_weights = _map_to_inference_weights(
layer.get_weights(), algorithm, compressible_training_tensors)
layer_for_inference.set_weights(inference_weights)
return layer_for_inference
| StarcoderdataPython |
1772683 | <gh_stars>100-1000
"""
Battle commands. They only can be used when a character is in a combat.
"""
from evennia.utils import logger
from muddery.server.commands.base_command import BaseCommand
from muddery.server.utils.localized_strings_handler import _
class CmdCombatInfo(BaseCommand):
"""
Get combat info.
Usage:
{"cmd":"combat_info",
"args":""
}
Observes your combat.
"""
key = "combat_info"
locks = "cmd:all()"
def func(self):
"""
Return the overall combat informations to the caller.
"""
caller = self.caller
if not caller.is_in_combat():
# If the caller is not in combat.
caller.msg({"msg":_("You are not in combat!")})
return
# Get combat's appearance and the character's available commands.
appearance = caller.ndb.combat_handler.get_appearance()
message = {"combat_info": appearance,
"combat_commands": caller.get_combat_commands()}
caller.msg(message)
class CmdLeaveCombat(BaseCommand):
"""
Get combat info.
Usage:
{"cmd":"leave_combat",
"args":""
}
Observes your combat.
"""
key = "leave_combat"
locks = "cmd:all()"
def func(self):
"""
Left the current combat.
"""
caller = self.caller
if not caller.is_in_combat():
# If the caller is not in combat.
caller.msg({"msg":_("You are not in combat!")})
return
caller.leave_combat()
# ------------------------------------------------------------
# cast a skill in combat
# ------------------------------------------------------------
class CmdCastCombatSkill(BaseCommand):
"""
Cast a skill when the caller is in combat.
Usage:
{
"cmd": "cast_combat_skill",
"args": <skill's key>,
}
or:
{
"cmd": "cast_combat_skill",
"args":
{
"skill": <skill's key>,
"target": <skill's target>,
}
}
"""
key = "cast_combat_skill"
locks = "cmd:all()"
help_cateogory = "General"
def func(self):
"Cast a skill in a combat."
caller = self.caller
args = self.args
if not caller.is_alive():
caller.msg({"alert": _("You are died.")})
return
if not caller.is_in_combat():
caller.msg({"alert": _("You can only cast this skill in a combat.")})
return
if caller.is_auto_cast_skill():
caller.msg({"alert": _("You can not cast skills manually.")})
return
if not args:
caller.msg({"alert": _("You should select a skill to cast.")})
return
# get skill and target
skill_key = None
target = None
if isinstance(args, str):
# If the args is a skill's key.
skill_key = args
else:
# If the args is skill's key and target.
if not "skill" in args:
caller.msg({"alert": _("You should select a skill to cast.")})
return
skill_key = args["skill"]
# Get target
if "target" in args:
target_id = int(args["target"])
try:
# cast this skill.
caller.cast_combat_skill(skill_key, target_id)
except Exception as e:
caller.msg({"alert": _("Can not cast this skill.")})
logger.log_tracemsg("Can not cast skill %s: %s" % (skill_key, e))
return
| StarcoderdataPython |
157545 | # -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""`gcloud domains registrations search-domains` command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.domains import registrations
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.domains import resource_args
from googlecloudsdk.command_lib.domains import util
_FORMAT = """\
table(
domainName:label=DOMAIN,
availability:label=AVAILABILITY,
yearlyPrice.price():label=YEARLY_PRICE,
domainNotices.list():label=NOTICES
)
"""
class SearchDomains(base.DescribeCommand):
"""Search for available domains.
Search for available domains relevant to a specified query.
This command uses cached domain name availability information. Use the
get-register-params command to get up-to-date availability information.
## EXAMPLES
To search for domains for ``my-new-project'', run:
$ {command} my-new-project
To search for a specific domain, like ``example.com'', and get suggestions for
other domain endings, run:
$ {command} example.com
"""
@staticmethod
def Args(parser):
resource_args.AddLocationResourceArg(parser, 'to search domains in')
parser.display_info.AddTransforms({'price': util.TransformMoneyType})
parser.display_info.AddFormat(_FORMAT)
base.Argument(
'domain_query',
help=('Domain search query. '
'May be a domain name or arbitrary search terms.'),
).AddToParser(parser)
def Run(self, args):
"""Run the search domains command."""
api_version = registrations.GetApiVersionFromArgs(args)
client = registrations.RegistrationsClient(api_version)
location_ref = args.CONCEPTS.location.Parse()
# Sending the query direcyly to server (without normalization).
suggestions = client.SearchDomains(location_ref, args.domain_query)
for s in suggestions:
try:
s.domainName = util.PunycodeToUnicode(s.domainName)
except UnicodeError:
pass # Do not change the domain name.
if not suggestions:
suggestions.append(client.messages.RegisterParameters())
return suggestions
| StarcoderdataPython |
4808547 | <filename>test/end-to-end/Graphs/Python/algorithms/depth_first_search.py
from data.unweighted_node import UnweightedNode
from data.weighted_node import WeightedNode
def unweighted_depth_first_search(start):
nodes = []
visited = set()
traverse_unweighted_depth_first_search(start, nodes, visited)
return nodes
def traverse_unweighted_depth_first_search(start, nodes, visited):
nodes.append(start)
visited.add(start)
for neighbor in start.get_neighbors_in_order():
if not neighbor in visited:
traverse_unweighted_depth_first_search(neighbor, nodes, visited)
def weighted_depth_first_search(start):
nodes = []
visited = set()
traverse_weighted_depth_first_search(start, nodes, visited)
return nodes
def traverse_weighted_depth_first_search(start, nodes, visited):
nodes.append(start)
visited.add(start)
for node in start.get_neighbors_in_order():
if not node in visited:
traverse_weighted_depth_first_search(node, nodes, visited)
| StarcoderdataPython |
1633626 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 9 22:56:24 2020
@author: <NAME>
Input files: quantification tsv file
Output file: pdf files
Description: Used to plot scatter plots of technical replicates
"""
import argparse
import pandas as pd
import matplotlib
#do not use Xwindows backend
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import itertools
from scipy import stats
import math
# function to plot scatter plots
def scatter_plot(TPM_log, TPMs, name_1, name_2, lim_plot, organism):
g1 = sns.JointGrid('rep1',y='rep2', data=TPM_log, height = 6)
g1 = g1.plot_joint(plt.scatter,edgecolor="black", linewidth = 0.5)
# calculate pearson's correlation coefficient
stat = lambda a,b: stats.pearsonr(TPMs.rep1,TPMs.rep2)
# add label with pearson's correlation coefficient
g1 =g1.annotate(stat, template="{stat}: {val:.4f}", stat="Pearson's r", loc="upper left", fontsize=15) # {stat}: {val:.2f} (p = {p:.3g}) with p-value
g1.ax_marg_x.set_axis_off()
g1.ax_marg_y.set_axis_off()
plt.xlabel(r'$\mathrm{log_{10}TPM}$'+ '\n' + '\n' + name_1.split('_TPM')[0] ,fontsize=15, labelpad= 5)
plt.ylabel(name_2.split('_TPM')[0] + '\n' + '\n'+ r'$\mathrm{log_{10}TPM}$', fontsize=15, labelpad =5)
plt.xlim(-0.15, lim_plot)
plt.ylim(-0.15, lim_plot)
plt.title(organism,fontsize=15,fontweight="bold")
plt.tick_params(axis="both", labelsize=15)
plt.subplots_adjust(bottom=0.1)
# save plot
plt.savefig('scatter_plot_' + name_1 + '_' + name_2 + '_' + organism + '.pdf', dpi = 300,bbox_inches='tight')
plt.close(plt.gcf())
parser = argparse.ArgumentParser(description="""Plots scatter plots of replicates""")
parser.add_argument("-q", "--quantification_table", metavar='<quantification_table_host>', help="Path to the quantification table")
parser.add_argument("-a", "--gene_attribute", help="gene attribute")
parser.add_argument("-org", "--organism", help = 'host or pathogen')
args = parser.parse_args()
quantification_table_path = args.quantification_table
gene_attribute = args.gene_attribute
# read quantification table as data frame
col_names = pd.read_csv(quantification_table_path, sep = '\t', nrows=0).columns
types_dict = {gene_attribute: str}
types_dict.update({col: float for col in col_names if col not in types_dict})
quantification_table = pd.read_csv(quantification_table_path, sep = '\t',index_col=0,dtype=types_dict)
# extract columns with 'TPM' suffix
TMP_column = [column for column in quantification_table.columns if 'TPM' in column]
#find axes' limits
TPM_table_plus_1 = quantification_table[TMP_column] + 1
TPM_table_log = TPM_table_plus_1.apply(np.log10, axis=1)
max_TPM = TPM_table_log.max().max()
lim_plot = math.ceil(max_TPM) + 0.5
# remove '_TPM' suffix from sample names
columns_conditions_no_TPM = [column[:-4] for column in TMP_column]
# extract conditions - remove indication of which replicate it is (_1, _2 or _3)
columns_conditions = [name.rsplit('_', 1)[0] for name in columns_conditions_no_TPM]
# find positions of technical replicates in quantification table
patterns = {}
for i in range(0,len(columns_conditions)): #iterate over number of samples
prefix = columns_conditions[i] # extract condition
d1 = {prefix:[i]} # dictionary with condition and its position in columns conditions, which is equivalent to positions in quantification table
if prefix in patterns.keys(): # if condition already exist in the dictionary, then append the position
patterns[prefix].insert(patterns[prefix][0],list(d1.values())[0][0])
else: # append condition and its position
patterns.update(d1)
# create list of replicates' position lists, e.g. [[1, 2, 0], [3, 4, 5], [6, 7, 8]]
condition_list = [patterns[condition] for condition in patterns.keys()]
# plot scatter plots of technical replicates
for cond in condition_list: # iterate over list of technical replicates' positions
if len(cond) > 1:
sample_cond = [TMP_column[c] for c in cond] # extract column names for replicates in quantification table
TPMs = quantification_table[sample_cond]
combinations = list(itertools.combinations(TPMs, 2)) # create list of possible combinations of sample pairs
for com in combinations: # iterate over sample pair combinations
TPMs = pd.concat([quantification_table[com[0]], quantification_table[com[1]]], axis=1) # extract TPM values of two replicates
TPMs.columns = ['rep1','rep2']
TPM_plus1 = TPMs + 1 # add 1 to deal with 0 values
TPM_log = TPM_plus1.apply(np.log10, axis=1) # log-transformation
#plot scatter plot
scatter_plot(TPM_log,TPMs, com[0],com[1],lim_plot, args.organism)
| StarcoderdataPython |
1772373 | import inspect
class ProblemSizeCounter:
def __init__ (self, J, F, L, M, P):
self._initNumberOfVariables(J, F, L, M, P)
self._initNumberOfConstraints(J, F, L, M, P)
def _initNumberOfVariables(self, J, F, L, M, P):
self.numberOfVariablesX = P * L * F
self.numberOfVariablesY = P * F * J
self.totalNumberOfVariables = self.numberOfVariablesX + self.numberOfVariablesY
def _initNumberOfConstraints(self, J, F, L, M, P):
self.numberOfDemandConstraints = J * P
self.numberOfMachineCapacityConstraints = L * F
self.numberOfVariablesCompatibilityConstraints = P * F
self.numberOfResourcesConstraints = M * F
self.totalNumberOfConstraints = self.numberOfDemandConstraints \
+ self.numberOfMachineCapacityConstraints \
+ self.numberOfVariablesCompatibilityConstraints \
+ self.numberOfResourcesConstraints
def __str__ (self):
attributesToPrint = [
"numberOfVariablesX",
"numberOfVariablesY",
"totalNumberOfVariables",
"numberOfDemandConstraints",
"numberOfMachineCapacityConstraints",
"numberOfVariablesCompatibilityConstraints",
"numberOfResourcesConstraints",
"totalNumberOfConstraints"
]
string = "ProblemSizeCounter[\n"
for attribute in attributesToPrint:
value = getattr(self, attribute)
string += f"\t{attribute} = {value}\n"
string += "]"
return string
| StarcoderdataPython |
3314972 | from pyutilib.component.core import *
class IPackage2Util(Interface):
"""Interface for Package2 utilities"""
class Package2Util(Plugin):
implements(IPackage1Util)
| StarcoderdataPython |
3206104 | class LineOutput:
"""
default output class, output key and value
as (key, value) pair separated by separator
"""
@staticmethod
def collect(key, value, separator = '\t'):
"""
collect the key and value, output them to
a line separated by a separator character
@param key: key part in (key, value) pair
@type key: C{string}
@param value: value part in (key, value) pair
@type value: C{string}
@param separator: character to separate the key and value
@type separator: C{string}
"""
keystr = str(key)
valuestr = str(value)
print '%s%s%s' % (keystr, separator, valuestr)
| StarcoderdataPython |
4819967 | <filename>tests/test_falcon_idempotency.py
import falcon
import pytest
from falcon import testing
def resolve_request_method(client, request_method):
"""
Simple utility which can be used to ease the burden of
parametrizing for POST and DELETE metods
Parameters
----------
client: falcon.testing.TestClietn
request_method: str
Expecting one of: 'post', or 'delete'
Returns
-------
func
Bounded method from client
"""
if request_method == "post":
return client.simulate_post
elif request_method == "delete":
return client.simulate_delete
else:
raise ValueError("Invalid request_method parameter")
class TestSimpleIdempotency(object):
"""
Test `falcon_idempotency.middlewares.SimpleIdempotencyMiddleware`
"""
@pytest.mark.parametrize("request_method", ["delete", "post"])
def test_inapplicable_requests(self, client, request_method):
"""
Ensure that we do not consider requests which do not
include valid idempotency keys
"""
simulate_request = resolve_request_method(client, request_method)
# Send a request with no headers at all
first_request = simulate_request("/mock")
second_request = simulate_request("/mock")
first_request.headers["trans_id"] != second_request.headers["trans_id"]
# Send a request with key set to None; these should be ignored
first_request = simulate_request("/mock", headers={"Idempotency-Key": None})
second_request = simulate_request("/mock", headers={"Idempotency-Key": None})
first_request.headers["trans_id"] != second_request.headers["trans_id"]
# Send a request with key set to ''; these should be ignored
first_request = simulate_request("/mock", headers={"Idempotency-Key": ""})
second_request = simulate_request("/mock", headers={"Idempotency-Key": ""})
first_request.headers["trans_id"] != second_request.headers["trans_id"]
@pytest.mark.parametrize("request_method", ["delete", "post"])
def test_unrelated_requests(self, client, request_method):
"""
Ensure that we do not couple two unique requests' responses
if their idempotency keys do not match
"""
simulate_request = resolve_request_method(client, request_method)
first_request = simulate_request("/mock", headers={"Idempotency-Key": "ABCD"})
second_request = simulate_request("/mock", headers={"Idempotency-Key": "DEFG"})
first_request.headers["trans_id"] != second_request.headers["trans_id"]
@pytest.mark.parametrize("request_method", ["delete", "post"])
def test_related_requests(self, client, request_method):
"""
Ensure we send the first response for a second request with the
same idempotency key
"""
simulate_request = resolve_request_method(client, request_method)
first_request = simulate_request("/mock", headers={"Idempotency-Key": "ABCD"})
second_request = simulate_request("/mock", headers={"Idempotency-Key": "ABCD"})
first_request.headers["trans_id"] == second_request.headers["trans_id"]
class TestIdempotencyMiddleware(object):
"""
Tests associated with `falcon_idempotency.middlewares.IdempotencyMiddleware`.
Notes
-----
The tests are similar to that of `TestSimpleIdempotencyMiddleware`. However,
note that the implementations of `IdempotencyMiddleware` require functionality
to be explicitly enabled via the use of mixins/class attributes.
"""
@pytest.mark.parametrize(
["endpoint", "request_method"],
[
("/postonly", "post"),
("/postonly", "delete"),
("/deleteonly", "post"),
("/deleteonly", "delete"),
],
)
def test_inapplicable_requests(self, client2, endpoint, request_method):
"""
Ensure that we do not consider requests which do not
include valid idempotency keys
"""
simulate_request = resolve_request_method(client2, request_method)
# Send a request with no headers at all
first_request = simulate_request("/postonly")
second_request = simulate_request("/postonly")
first_request.headers["trans_id"] != second_request.headers["trans_id"]
# Send a request with key set to None; these should be ignored
first_request = simulate_request("/postonly", headers={"Idempotency-Key": None})
second_request = simulate_request(
"/postonly", headers={"Idempotency-Key": None}
)
first_request.headers["trans_id"] != second_request.headers["trans_id"]
# Send a request with key set to ''; these should be ignored
first_request = simulate_request("/postonly", headers={"Idempotency-Key": ""})
second_request = simulate_request("/postonly", headers={"Idempotency-Key": ""})
first_request.headers["trans_id"] != second_request.headers["trans_id"]
def test_posts_are_idempotency(self, client2):
"""
Ensure post requests send to /postonly are idempotent
"""
first_request = client2.simulate_post(
"/postonly", headers={"Idempotency-Key": "ABCD"}
)
second_request = client2.simulate_post(
"/postonly", headers={"Idempotency-Key": "ABCD"}
)
first_request.headers["trans_id"] == second_request.headers["trans_id"]
def test_deletes_are_not_idempotent(self, client2):
"""
Ensure that deletes are not idempotent regardless if the keys
are identical. This is because resource associated with /postonly
endpoint does not explicitly enable idempotent deletes
"""
first_request = client2.simulate_delete(
"/postonly", headers={"Idempotency-Key": "ABCD"}
)
second_request = client2.simulate_delete(
"/postonly", headers={"Idempotency-Key": "ABCD"}
)
first_request.headers["trans_id"] != second_request.headers["trans_id"]
def test_posts_are_not_idempotent(self, client2):
"""
Ensure that posts are not idempotent regardless if the keys
are identical. This is because resource associated with /deleteonly
endpoint does not explicitly enable idempotent posts
"""
first_request = client2.simulate_post(
"/deleteonly", headers={"Idempotency-Key": "ABCD"}
)
second_request = client2.simulate_post(
"/deleteonly", headers={"Idempotency-Key": "ABCD"}
)
first_request.headers["trans_id"] != second_request.headers["trans_id"]
def test_deletes_are_idempotent(self, client2):
"""
Ensure delete requests send to /postonly are idempotent
"""
first_request = client2.simulate_delete(
"/deleteonly", headers={"Idempotency-Key": "ABCD"}
)
second_request = client2.simulate_delete(
"/deleteonly", headers={"Idempotency-Key": "ABCD"}
)
first_request.headers["trans_id"] == second_request.headers["trans_id"]
| StarcoderdataPython |
3264835 | <reponame>bcornelusse/microgrid-bench<filename>microgrid/model/generator.py
from microgrid.model.device import Device
class Generator(Device):
def __init__(self, name, params):
"""
:param name: Cf. parent class
:param params: dictionary of params, must include a capacity value , a steerable flag, and a min_stable_generation value
"""
super(Generator, self).__init__(name)
self.capacity = None
self.steerable = False
self.min_stable_generation = 0.0
for k in params.keys():
if k in self.__dict__.keys():
self.__setattr__(k, params[k])
assert (self.capacity is not None)
| StarcoderdataPython |
1702383 | import os
def init():
if not os.path.exists('db.json'):
import json
from collections import OrderedDict
init_data = OrderedDict()
accounts = OrderedDict()
notes = OrderedDict()
init_data['auto_named'] = 0
init_data['accounts'] = accounts
init_data['notes'] = notes
with open('db.json', 'w') as fp:
json.dump(init_data, fp)
| StarcoderdataPython |
3326050 | from django.core.management.base import BaseCommand
from fluff.pillow import FluffPillowProcessor
from pillowtop.utils import get_pillow_by_name
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'pillow_name',
)
parser.add_argument(
'--noinput',
action='store_true',
dest='noinput',
default=False,
help='Skip important confirmation warnings.',
)
def handle(self, pillow_name, **options):
pillow = get_pillow_by_name(pillow_name)
if not options['noinput']:
confirm = input(
"""
You have requested to wipe %s table
Type 'yes' to continue, or 'no' to cancel:
""" % pillow.pillow_id
)
if confirm != 'yes':
print("\tWipe cancelled.")
return
for processor in pillow.processors:
engine = processor.get_sql_engine()
table = processor.indicator_class().table
engine.execute(table.delete())
| StarcoderdataPython |
1614996 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from classifier.config import configurable
from .utils import *
from .build import ARCHITECTURE_REGISTRY
@ARCHITECTURE_REGISTRY.register()
class resnet18mtl(nn.Module):
@configurable
def __init__(self, in_chan=3, out_chan=[2], pretrained=True, downsample=0):
super(resnet18mtl, self).__init__()
if type(out_chan) is not list:
out_chan = [out_chan]
self.in_chan = in_chan
self.out_chan = out_chan
self.pretrained = pretrained
self.downsample = downsample
self.model = torchvision.models.resnet18(pretrained=pretrained)
if in_chan != 3:
self.model.conv1 = nn.Conv2d(in_chan, 64, kernel_size=7, stride=2, padding=3, bias=False)
heads = []
for out_c in out_chan:
heads.append(nn.Linear(512, out_c))
self.model.outs = nn.ModuleList(heads)
if downsample >= 1:
self.model.conv1 = nn.Conv2d(in_chan, 64, kernel_size=7, stride=1, padding=3, bias=False)
if downsample >= 2:
self.model.maxpool = Identity()
if downsample >= 3:
self.model.layer2[0].conv1 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=False)
self.model.layer2[0].downsample[0] = nn.Conv2d(64, 128, kernel_size=1, stride=1, bias=False)
if downsample >= 4:
self.model.layer3[0].conv1 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1, bias=False)
self.model.layer3[0].downsample[0] = nn.Conv2d(128, 256, kernel_size=1, stride=1, bias=False)
if downsample >= 5:
self.model.layer4[0].conv1 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1, bias=False)
self.model.layer4[0].downsample[0] = nn.Conv2d(256, 512, kernel_size=1, stride=1, bias=False)
@classmethod
def from_config(cls, cfg):
return {
"in_chan": cfg.MODEL.CHANIN,
"out_chan": cfg.MODEL.CHANOUT,
"pretrained": cfg.MODEL.PRETRAINED,
"downsample": cfg.MODEL.DOWNSAMPLE,
}
def return_vis_layer(self):
return self.model.layer4[-1]
def forward(self, x):
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
x = self.model.layer3(x)
x = self.model.layer4(x)
x = self.model.avgpool(x)
x = torch.flatten(x, 1)
outs = []
for fc in self.model.outs:
outs.append(fc(x))
return outs
@ARCHITECTURE_REGISTRY.register()
class resnet34mtl(nn.Module):
@configurable
def __init__(self, in_chan=3, out_chan=[2], pretrained=True, downsample=0):
super(resnet34mtl, self).__init__()
if type(out_chan) is not list:
out_chan = [out_chan]
self.model = torchvision.models.resnet34(pretrained=pretrained)
if in_chan != 3:
self.model.conv1 = nn.Conv2d(in_chan, 64, kernel_size=7, stride=2, padding=3, bias=False)
heads = []
for out_c in out_chan:
heads.append(nn.Linear(512, out_c))
self.model.outs = nn.ModuleList(heads)
if downsample >= 1:
self.model.conv1 = nn.Conv2d(in_chan, 64, kernel_size=7, stride=1, padding=3, bias=False)
if downsample >= 2:
self.model.maxpool = Identity()
if downsample >= 3:
self.model.layer2[0].conv1 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=False)
self.model.layer2[0].downsample[0] = nn.Conv2d(64, 128, kernel_size=1, stride=1, bias=False)
if downsample >= 4:
self.model.layer3[0].conv1 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1, bias=False)
self.model.layer3[0].downsample[0] = nn.Conv2d(128, 256, kernel_size=1, stride=1, bias=False)
if downsample >= 5:
self.model.layer4[0].conv1 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1, bias=False)
self.model.layer4[0].downsample[0] = nn.Conv2d(256, 512, kernel_size=1, stride=1, bias=False)
@classmethod
def from_config(cls, cfg):
return {
"in_chan": cfg.MODEL.CHANIN,
"out_chan": cfg.MODEL.CHANOUT,
"pretrained": cfg.MODEL.PRETRAINED,
"downsample": cfg.MODEL.DOWNSAMPLE,
}
def return_vis_layer(self):
return self.model.layer4[-1]
def forward(self, x):
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
x = self.model.layer3(x)
x = self.model.layer4(x)
x = self.model.avgpool(x)
x = torch.flatten(x, 1)
outs = []
for fc in self.model.outs:
outs.append(fc(x))
return outs
@ARCHITECTURE_REGISTRY.register()
class resnet50mtl(nn.Module):
@configurable
def __init__(self, in_chan=3, out_chan=[2], pretrained=True, downsample=0):
super(resnet50mtl, self).__init__()
if type(out_chan) is not list:
out_chan = [out_chan]
self.model = torchvision.models.resnet50(pretrained=pretrained)
if in_chan != 3:
self.model.conv1 = nn.Conv2d(in_chan, 64, kernel_size=7, stride=2, padding=3, bias=False)
heads = []
for out_c in out_chan:
heads.append(nn.Linear(2048, out_c))
self.model.outs = nn.ModuleList(heads)
if downsample >= 1:
self.model.conv1 = nn.Conv2d(in_chan, 64, kernel_size=7, stride=1, padding=3, bias=False)
if downsample >= 2:
self.model.maxpool = Identity()
if downsample >= 3:
self.model.layer2[0].conv2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=False)
self.model.layer2[0].downsample[0] = nn.Conv2d(256, 512, kernel_size=1, stride=1, bias=False)
if downsample >= 4:
self.model.layer3[0].conv2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False)
self.model.layer3[0].downsample[0] = nn.Conv2d(512, 1024, kernel_size=1, stride=1, bias=False)
if downsample >= 5:
self.model.layer4[0].conv2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False)
self.model.layer4[0].downsample[0] = nn.Conv2d(1024, 2048, kernel_size=1, stride=1, bias=False)
@classmethod
def from_config(cls, cfg):
return {
"in_chan": cfg.MODEL.CHANIN,
"out_chan": cfg.MODEL.CHANOUT,
"pretrained": cfg.MODEL.PRETRAINED,
"downsample": cfg.MODEL.DOWNSAMPLE,
}
def return_vis_layer(self):
return self.model.layer4[-1]
def forward(self, x):
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
x = self.model.layer3(x)
x = self.model.layer4(x)
x = self.model.avgpool(x)
x = torch.flatten(x, 1)
outs = []
for fc in self.model.outs:
outs.append(fc(x))
return outs
@ARCHITECTURE_REGISTRY.register()
class resnet101mtl(nn.Module):
@configurable
def __init__(self, in_chan=3, out_chan=[2], pretrained=True, downsample=0):
super(resnet101mtl, self).__init__()
if type(out_chan) is not list:
out_chan = [out_chan]
self.model = torchvision.models.resnet101(pretrained=pretrained)
if in_chan != 3:
self.model.conv1 = nn.Conv2d(in_chan, 64, kernel_size=7, stride=2, padding=3, bias=False)
heads = []
for out_c in out_chan:
heads.append(nn.Linear(2048, out_c))
self.model.outs = nn.ModuleList(heads)
if downsample >= 1:
self.model.conv1 = nn.Conv2d(in_chan, 64, kernel_size=7, stride=1, padding=3, bias=False)
if downsample >= 2:
self.model.maxpool = Identity()
if downsample >= 3:
self.model.layer2[0].conv2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=False)
self.model.layer2[0].downsample[0] = nn.Conv2d(256, 512, kernel_size=1, stride=1, bias=False)
if downsample >= 4:
self.model.layer3[0].conv2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False)
self.model.layer3[0].downsample[0] = nn.Conv2d(512, 1024, kernel_size=1, stride=1, bias=False)
if downsample >= 5:
self.model.layer4[0].conv2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False)
self.model.layer4[0].downsample[0] = nn.Conv2d(1024, 2048, kernel_size=1, stride=1, bias=False)
@classmethod
def from_config(cls, cfg):
return {
"in_chan": cfg.MODEL.CHANIN,
"out_chan": cfg.MODEL.CHANOUT,
"pretrained": cfg.MODEL.PRETRAINED,
"downsample": cfg.MODEL.DOWNSAMPLE,
}
def return_vis_layer(self):
return self.model.layer4[-1]
def forward(self, x):
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
x = self.model.layer3(x)
x = self.model.layer4(x)
x = self.model.avgpool(x)
x = torch.flatten(x, 1)
outs = []
for fc in self.model.outs:
outs.append(fc(x))
return outs
@ARCHITECTURE_REGISTRY.register()
class resnet152mtl(nn.Module):
@configurable
def __init__(self, in_chan=3, out_chan=[2], pretrained=True, downsample=0):
super(resnet152mtl, self).__init__()
if type(out_chan) is not list:
out_chan = [out_chan]
self.model = torchvision.models.resnet152(pretrained=pretrained)
if in_chan != 3:
self.model.conv1 = nn.Conv2d(in_chan, 64, kernel_size=7, stride=2, padding=3, bias=False)
heads = []
for out_c in out_chan:
heads.append(nn.Linear(2048, out_c))
self.model.outs = nn.ModuleList(heads)
if downsample >= 1:
self.model.conv1 = nn.Conv2d(in_chan, 64, kernel_size=7, stride=1, padding=3, bias=False)
if downsample >= 2:
self.model.maxpool = Identity()
if downsample >= 3:
self.model.layer2[0].conv2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=False)
self.model.layer2[0].downsample[0] = nn.Conv2d(256, 512, kernel_size=1, stride=1, bias=False)
if downsample >= 4:
self.model.layer3[0].conv2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False)
self.model.layer3[0].downsample[0] = nn.Conv2d(512, 1024, kernel_size=1, stride=1, bias=False)
if downsample >= 5:
self.model.layer4[0].conv2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False)
self.model.layer4[0].downsample[0] = nn.Conv2d(1024, 2048, kernel_size=1, stride=1, bias=False)
@classmethod
def from_config(cls, cfg):
return {
"in_chan": cfg.MODEL.CHANIN,
"out_chan": cfg.MODEL.CHANOUT,
"pretrained": cfg.MODEL.PRETRAINED,
"downsample": cfg.MODEL.DOWNSAMPLE,
}
def return_vis_layer(self):
return self.model.layer4[-1]
def forward(self, x):
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
x = self.model.layer3(x)
x = self.model.layer4(x)
x = self.model.avgpool(x)
x = torch.flatten(x, 1)
outs = []
for fc in self.model.outs:
outs.append(fc(x))
return outs
| StarcoderdataPython |
87817 | <reponame>QuintonWeenink/distributed-system
import json
import subprocess
execfile("member.py")
class Clue:
def __init__(self, id, data):
self.data = data
self.id = id
class iRummy:
def __init__(self, crewSize, port):
print json.dumps(self.wake())
print json.dumps(self.prepare())
self.images = 20
self.clues = {}
self.crew = []
self.crewSize = crewSize
self.port = port
res = self.add(str(self.crewSize))
print res['message']
crewids = res['data']
self.createMembers(crewids)
print json.dumps(self.shipout())
def getPirateClues(self):
res = self.getClues()
print res['message']
data = res['data']
for pirate in data:
for member in self.crew:
if member.res['id'] == pirate["id"]:
member.clues = pirate["data"]
def displayState(self):
print "State: %d" % self.empCount
def displayEmployee(self):
print "Name : ", self.name, ", Salary: ", self.salary
def wake(self):
print "Rummy.wake called"
return self.reqRummy( ["-w"] )
def createMembers(self, crewids):
print "Rummy.createMembers called with " + str(crewids)
for id in crewids:
self.crew.append(Member(id))
def cleanUpMembers(self, killed):
print "Rummy.cleanUpMembers called"
for member in killed:
self.crew.remove(member)
def gather(self):
print "Rummy.gather called"
return self.reqRummy( ["-g"] )
def unlock(self):
print "Rummy.unlock called"
return self.reqRummy( ["-u"] )
def prepare(self):
print "Rummy.prepare called"
return self.reqRummy( ["-p"] )
def add(self, size):
print "Rummy.add called"
return self.reqRummy( ["-a", str(size)] )
def remove(self, pirates):
print "Rummy.remove called"
return self.reqRummy( ["-r", str(pirates)] )
def shipout(self):
print "Rummy.shipout called"
return self.reqRummy( ["-s"] )
def getClues(self):
print "Rummy.getClues called"
self.clues = self.reqRummy( ["-c"] )
return self.clues
def verify(self, clues):
#print "Rummy.verify called"
return self.reqRummy( ["-v", json.dumps(clues)] )
def reqRummy(self, commands):
args = ["python", "rummy.pyc"]
for command in commands:
args.append(command)
output = subprocess.check_output(args).strip()
obj = json.loads(str(output))
return obj
def printCrew(self):
for pirate in self.crew:
print "{"
pirate.toString()
print "}" | StarcoderdataPython |
27134 | <reponame>jdavidagudelo/tensorflow-models<filename>research/steve/toy_demo.py
from __future__ import division
from __future__ import print_function
from builtins import range
from past.utils import old_div
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import scipy
import matplotlib.pyplot as plt
import seaborn as sns
### Hyperparameters
NONTERMINAL_STATE_COUNT = 100
NOISE_AMOUNT = 0.1
TRAIN_STEPS = 10000
Q_ENSEMBLE_SIZE = 8
MODEL_ENSEMBLE_SIZE = 8
HORIZON = 5
TRIAL_N = 10
### Helper functions
initial_state = 0
terminal_state = NONTERMINAL_STATE_COUNT + 1
nonterminal_state_count = NONTERMINAL_STATE_COUNT
state_count = NONTERMINAL_STATE_COUNT + 1
final_reward = NONTERMINAL_STATE_COUNT
colors = sns.color_palette('husl', 4)
plt.rcParams["figure.figsize"] = (6, 5)
def step(state):
if state == terminal_state:
next_state = terminal_state
else:
next_state = state + 1
if state == terminal_state:
reward = 0
elif state + 1 == terminal_state:
reward = final_reward
else:
reward = -1
return next_state, reward
def noisy_step(state):
if state == terminal_state:
next_state = terminal_state
elif np.random.random([]) < NOISE_AMOUNT:
next_state = np.random.randint(0, state_count)
else:
next_state = state + 1
if state == terminal_state:
reward = 0
elif state + 1 == terminal_state:
reward = final_reward
else:
reward = -1
return next_state, reward
def get_error(Q):
losses = np.square(np.arange(state_count) - Q[:-1])
return np.mean(losses)
def downsample(array, factor):
pad_size = np.ceil(old_div(float(array.size), factor)) * factor - array.size
array_padded = np.append(array, np.zeros([pad_size.astype(np.int64)]) * np.NaN)
return scipy.nanmean(array_padded.reshape(-1, factor), axis=1)
######################
### Main experiments
######################
# Basic Q
if True:
print("Running basic Q-learning.")
trial_results = []
for run_i in range(TRIAL_N):
print("Trial %d" % run_i)
Q = np.random.randint(0, state_count, [state_count + 1]).astype(np.float64)
Q[state_count] = 0
losses = []
for step_i in range(TRAIN_STEPS):
state = np.random.randint(0, state_count)
next_state, reward = step(state)
Q[state] = reward + Q[next_state]
losses.append(get_error(Q))
trial_results.append(losses)
print("...complete.\n")
result = np.stack(trial_results, axis=1)
means = np.mean(result, axis=1)
stdevs = np.std(result, axis=1)
plt.plot(means, label="Basic Q-learning", color=colors[0])
plt.fill_between(np.arange(TRAIN_STEPS), means - stdevs, means + stdevs, alpha=.2, color=colors[0])
with open('Toy-v1/baseline.csv', 'w') as f:
data = []
for frame_i in range(result.shape[0]):
for loss in result[frame_i]:
data.append("%f,%f,%f,%f" % (frame_i, frame_i, frame_i, loss))
f.write("\n".join(data))
# Ensemble Q
if True:
print("Running ensemble Q-learning.")
trial_results = []
for run_i in range(TRIAL_N):
print("Trial %d" % run_i)
Q = np.random.randint(0, state_count, [Q_ENSEMBLE_SIZE, state_count + 1]).astype(np.float64)
Q[:, state_count] = 0
losses = []
for step_i in range(TRAIN_STEPS):
for q_ensemble_i in range(Q_ENSEMBLE_SIZE):
state = np.random.randint(0, state_count)
next_state, reward = step(state)
Q[q_ensemble_i, state] = reward + np.mean(Q[:, next_state])
losses.append(get_error(np.mean(Q, axis=0)))
trial_results.append(losses)
print("...complete.\n")
result = np.stack(trial_results, axis=1)
means = np.mean(result, axis=1)
stdevs = np.std(result, axis=1)
plt.plot(means, label="Ensemble Q-learning", color=colors[1])
plt.fill_between(np.arange(TRAIN_STEPS), means - stdevs, means + stdevs, alpha=.2, color=colors[1])
# Ensemble MVE-Oracle
if True:
print("Running ensemble oracle MVE.")
trial_results = []
for run_i in range(TRIAL_N):
print("Trial %d" % run_i)
Q = np.random.randint(0, state_count, [Q_ENSEMBLE_SIZE, state_count + 1]).astype(np.float64)
Q[:, state_count] = 0
losses = []
for step_i in range(TRAIN_STEPS):
for q_ensemble_i in range(Q_ENSEMBLE_SIZE):
state = np.random.randint(0, state_count)
next_state, reward = step(state)
# MVE rollout
target = reward
for _ in range(HORIZON):
next_state, reward = step(next_state)
target += reward
target += np.mean(Q[:, next_state])
Q[q_ensemble_i, state] = target
losses.append(get_error(np.mean(Q, axis=0)))
trial_results.append(losses)
print("...complete.\n")
result = np.stack(trial_results, axis=1)
means = np.mean(result, axis=1)
stdevs = np.std(result, axis=1)
plt.plot(means, label="MVE-oracle", color=colors[2])
plt.fill_between(np.arange(TRAIN_STEPS), means - stdevs, means + stdevs, alpha=.2, color=colors[2])
with open('Toy-v1/mve_oracle.csv', 'w') as f:
data = []
for frame_i in range(result.shape[0]):
for loss in result[frame_i]:
data.append("%f,%f,%f,%f" % (frame_i, frame_i, frame_i, loss))
f.write("\n".join(data))
# Ensemble MVE-Noisy
if True:
print("Running ensemble noisy MVE.")
trial_results = []
for run_i in range(TRIAL_N):
print("Trial %d" % run_i)
Q = np.random.randint(0, state_count, [Q_ENSEMBLE_SIZE, state_count + 1]).astype(np.float64)
Q[:, state_count] = 0
losses = []
for step_i in range(TRAIN_STEPS):
for q_ensemble_i in range(Q_ENSEMBLE_SIZE):
state = np.random.randint(0, state_count)
next_state, reward = step(state)
# MVE rollout
targets = []
first_next_state, first_reward = next_state, reward
for model_ensemble_i in range(MODEL_ENSEMBLE_SIZE):
next_state, reward = first_next_state, first_reward
target = reward
for _ in range(HORIZON):
next_state, reward = noisy_step(next_state)
target += reward
target += np.mean(Q[:, next_state])
targets.append(target)
Q[q_ensemble_i, state] = np.mean(targets)
losses.append(get_error(np.mean(Q, axis=0)))
trial_results.append(losses)
print("...complete.\n")
result = np.stack(trial_results, axis=1)
means = np.mean(result, axis=1)
stdevs = np.std(result, axis=1)
plt.plot(means, label="MVE-noisy", color=colors[2], linestyle='dotted')
plt.fill_between(np.arange(TRAIN_STEPS), means - stdevs, means + stdevs, alpha=.2, color=colors[2])
with open('Toy-v1/mve_noisy.csv', 'w') as f:
data = []
for frame_i in range(result.shape[0]):
for loss in result[frame_i]:
data.append("%f,%f,%f,%f" % (frame_i, frame_i, frame_i, loss))
f.write("\n".join(data))
# STEVE-Oracle
if True:
print("Running ensemble oracle STEVE.")
trial_results = []
oracle_q_estimate_errors = []
oracle_mve_estimate_errors = []
oracle_steve_estimate_errors = []
oracle_opt_estimate_errors = []
for run_i in range(TRIAL_N):
print("Trial %d" % run_i)
Q = np.random.randint(0, state_count, [Q_ENSEMBLE_SIZE, state_count + 1]).astype(np.float64)
Q[:, state_count] = 0
losses = []
q_estimate_errors = []
mve_estimate_errors = []
steve_estimate_errors = []
opt_estimate_errors = []
steve_beat_freq = []
for step_i in range(TRAIN_STEPS):
_q_estimate_errors = []
_mve_estimate_errors = []
_steve_estimate_errors = []
_opt_estimate_errors = []
_steve_beat_freq = []
for q_ensemble_i in range(Q_ENSEMBLE_SIZE):
state = np.random.randint(0, state_count)
next_state, reward = step(state)
# STEVE rollout
Q_est_mat = np.zeros([HORIZON + 1, Q_ENSEMBLE_SIZE])
reward_est_mat = np.zeros([HORIZON + 1, 1])
first_next_state, first_reward = next_state, reward
next_state, reward = first_next_state, first_reward
Q_est_mat[0, :] = Q[:, next_state]
reward_est_mat[0, 0] = reward
for timestep_i in range(1, HORIZON + 1):
next_state, reward = step(next_state)
Q_est_mat[timestep_i, :] = Q[:, next_state]
reward_est_mat[timestep_i, 0] = reward
all_targets = Q_est_mat + np.cumsum(reward_est_mat, axis=0)
# STEVE weight calculation
estimates = np.mean(all_targets, axis=1)
confidences = old_div(1., (np.var(all_targets, axis=1) + 1e-8))
coefficients = old_div(confidences, np.sum(confidences))
target = np.sum(estimates * coefficients)
Q[q_ensemble_i, state] = target
true_target = state + 1. if state != terminal_state else 0.
_q_estimate_errors.append(np.square(estimates[0] - true_target))
_mve_estimate_errors.append(np.square(estimates[-1] - true_target))
_steve_estimate_errors.append(np.square(np.sum(estimates * coefficients) - true_target))
_opt_estimate_errors.append(np.min(np.square(estimates - true_target)))
losses.append(get_error(np.mean(Q, axis=0)))
q_estimate_errors.append(np.mean(_q_estimate_errors))
mve_estimate_errors.append(np.mean(_mve_estimate_errors))
steve_estimate_errors.append(np.mean(_steve_estimate_errors))
opt_estimate_errors.append(np.mean(_opt_estimate_errors))
trial_results.append(losses)
oracle_q_estimate_errors.append(q_estimate_errors)
oracle_mve_estimate_errors.append(mve_estimate_errors)
oracle_steve_estimate_errors.append(steve_estimate_errors)
oracle_opt_estimate_errors.append(opt_estimate_errors)
print("...complete.\n")
result = np.stack(trial_results, axis=1)
means = np.mean(result, axis=1)
stdevs = np.std(result, axis=1)
plt.plot(means, label="STEVE-oracle", color=colors[3])
plt.fill_between(np.arange(TRAIN_STEPS), means - stdevs, means + stdevs, alpha=.2, color=colors[3])
with open('Toy-v1/steve_oracle.csv', 'w') as f:
data = []
for frame_i in range(result.shape[0]):
for loss in result[frame_i]:
data.append("%f,%f,%f,%f" % (frame_i, frame_i, frame_i, loss))
f.write("\n".join(data))
# STEVE-Noisy
if True:
print("Running ensemble noisy STEVE.")
trial_results = []
noisy_q_estimate_errors = []
noisy_mve_estimate_errors = []
noisy_steve_estimate_errors = []
noisy_opt_estimate_errors = []
noisy_steve_beat_freq = []
for run_i in range(TRIAL_N):
print("Trial %d" % run_i)
Q = np.random.randint(0, state_count, [Q_ENSEMBLE_SIZE, state_count + 1]).astype(np.float64)
Q[:, state_count] = 0
losses = []
q_estimate_errors = []
mve_estimate_errors = []
steve_estimate_errors = []
opt_estimate_errors = []
steve_beat_freq = []
for step_i in range(TRAIN_STEPS):
_q_estimate_errors = []
_mve_estimate_errors = []
_steve_estimate_errors = []
_opt_estimate_errors = []
_steve_beat_freq = []
for q_ensemble_i in range(Q_ENSEMBLE_SIZE):
state = np.random.randint(0, state_count)
next_state, reward = step(state)
# STEVE rollout
Q_est_mat = np.zeros([HORIZON + 1, MODEL_ENSEMBLE_SIZE, Q_ENSEMBLE_SIZE])
reward_est_mat = np.zeros([HORIZON + 1, MODEL_ENSEMBLE_SIZE, 1])
first_next_state, first_reward = next_state, reward
for model_ensemble_i in range(MODEL_ENSEMBLE_SIZE):
next_state, reward = first_next_state, first_reward
Q_est_mat[0, model_ensemble_i, :] = Q[:, next_state]
reward_est_mat[0, model_ensemble_i, 0] = reward
for timestep_i in range(1, HORIZON + 1):
next_state, reward = noisy_step(next_state)
Q_est_mat[timestep_i, model_ensemble_i, :] = Q[:, next_state]
reward_est_mat[timestep_i, model_ensemble_i, 0] = reward
all_targets = Q_est_mat + np.cumsum(reward_est_mat, axis=0)
# STEVE weight calculation
all_targets = np.reshape(all_targets, [HORIZON + 1, MODEL_ENSEMBLE_SIZE * Q_ENSEMBLE_SIZE])
estimates = np.mean(all_targets, axis=1)
confidences = old_div(1., (np.var(all_targets, axis=1) + 1e-8))
coefficients = old_div(confidences, np.sum(confidences))
target = np.sum(estimates * coefficients)
# target = estimates[0]
Q[q_ensemble_i, state] = target
true_target = state + 1. if state != terminal_state else 0.
_q_estimate_errors.append(np.square(estimates[0] - true_target))
_mve_estimate_errors.append(np.square(estimates[-1] - true_target))
_steve_estimate_errors.append(np.square(np.sum(estimates * coefficients) - true_target))
_opt_estimate_errors.append(np.min(np.square(estimates - true_target)))
_steve_beat_freq.append(float(np.square(estimates[0] - true_target) > np.square(target - true_target)))
losses.append(get_error(np.mean(Q, axis=0)))
q_estimate_errors.append(np.mean(_q_estimate_errors))
mve_estimate_errors.append(np.mean(_mve_estimate_errors))
steve_estimate_errors.append(np.mean(_steve_estimate_errors))
opt_estimate_errors.append(np.mean(_opt_estimate_errors))
steve_beat_freq.append(np.mean(_steve_beat_freq))
trial_results.append(losses)
noisy_q_estimate_errors.append(q_estimate_errors)
noisy_mve_estimate_errors.append(mve_estimate_errors)
noisy_steve_estimate_errors.append(steve_estimate_errors)
noisy_opt_estimate_errors.append(opt_estimate_errors)
noisy_steve_beat_freq.append(steve_beat_freq)
print("...complete.\n")
result = np.stack(trial_results, axis=1)
means = np.mean(result, axis=1)
stdevs = np.std(result, axis=1)
plt.plot(means, label="STEVE-noisy", color=colors[3], linestyle='dotted')
plt.fill_between(np.arange(TRAIN_STEPS), means - stdevs, means + stdevs, alpha=.2, color=colors[3])
with open('Toy-v1/steve_noisy.csv', 'w') as f:
data = []
for frame_i in range(result.shape[0]):
for loss in result[frame_i]:
data.append("%f,%f,%f,%f" % (frame_i, frame_i, frame_i, loss))
f.write("\n".join(data))
# ### Display results
# plt.title("Comparison of convergence rates")
# plt.legend()
# plt.savefig("comparison.pdf")
# plt.show()
#
# ### Display secondary results - error comparison
# DOWNSAMPLE = 50
# colors = sns.color_palette('husl', 8)
# for i, (error_curve, label) in enumerate([
# (oracle_q_estimate_errors, "Oracle Q error"),
# (oracle_mve_estimate_errors, "Oracle MVE error"),
# (oracle_steve_estimate_errors, "Oracle STEVE error"),
# # (oracle_opt_estimate_errors, "Oracle minimum single-estimate error"),
# ]):
# result = np.stack(error_curve, axis=1)
# means = downsample(np.mean(result, axis=1), DOWNSAMPLE)
# stdevs = downsample(np.std(result, axis=1), DOWNSAMPLE)
# plt.plot(means, label=label, color=colors[i])
# plt.fill_between(np.arange(means.shape[0]), means - stdevs, means + stdevs, alpha=.2, color=colors[i])
#
# plt.title("Comparison of errors for oracle dynamics")
# plt.legend()
# plt.show()
#
# for i, (error_curve, label) in enumerate([
# (noisy_q_estimate_errors, "Noisy Q error"),
# (noisy_mve_estimate_errors, "Noisy MVE error"),
# (noisy_steve_estimate_errors, "Noisy STEVE error"),
# # (noisy_opt_estimate_errors, "Noisy minimum single-estimate error"),
# # (trial_steve_beat_freq, "STEVE beat freq"),
# ]):
# result = np.stack(error_curve, axis=1)
# means = downsample(np.mean(result, axis=1), DOWNSAMPLE)
# stdevs = downsample(np.std(result, axis=1), DOWNSAMPLE)
# plt.plot(means, label=label, color=colors[i])
# plt.fill_between(np.arange(means.shape[0]), means - stdevs, means + stdevs, alpha=.2, color=colors[i])
#
# plt.title("Comparison of errors for noisy dynamics")
# plt.legend()
# plt.show()
| StarcoderdataPython |
3371307 | <filename>cliport/eval.py
"""Ravens main training script."""
import os
import pickle
import json
import numpy as np
import hydra
from cliport import agents
from cliport import dataset
from cliport import tasks
from cliport.utils import utils
from cliport.environments.environment import Environment
@hydra.main(config_path='./cfg', config_name='eval')
def main(vcfg):
# Load train cfg
tcfg = utils.load_hydra_config(vcfg['train_config'])
# Initialize environment and task.
env = Environment(
vcfg['assets_root'],
disp=vcfg['disp'],
shared_memory=vcfg['shared_memory'],
hz=480,
record_cfg=vcfg['record']
)
# Choose eval mode and task.
mode = vcfg['mode']
eval_task = vcfg['eval_task']
if mode not in {'train', 'val', 'test'}:
raise Exception("Invalid mode. Valid options: train, val, test")
# Load eval dataset.
dataset_type = vcfg['type']
if 'multi' in dataset_type:
ds = dataset.RavensMultiTaskDataset(vcfg['data_dir'],
tcfg,
group=eval_task,
mode=mode,
n_demos=vcfg['n_demos'],
augment=False)
else:
ds = dataset.RavensDataset(os.path.join(vcfg['data_dir'], f"{eval_task}-{mode}"),
tcfg,
n_demos=vcfg['n_demos'],
augment=False)
all_results = {}
name = '{}-{}-n{}'.format(eval_task, vcfg['agent'], vcfg['n_demos'])
# Save path for results.
json_name = f"multi-results-{mode}.json" if 'multi' in vcfg['model_path'] else f"results-{mode}.json"
save_path = vcfg['save_path']
print(f"Save path for results: {save_path}")
if not os.path.exists(save_path):
os.makedirs(save_path)
save_json = os.path.join(save_path, f'{name}-{json_name}')
# Load existing results.
existing_results = {}
if os.path.exists(save_json):
with open(save_json, 'r') as f:
existing_results = json.load(f)
# Make a list of checkpoints to eval.
ckpts_to_eval = list_ckpts_to_eval(vcfg, existing_results)
# Evaluation loop
print(f"Evaluating: {str(ckpts_to_eval)}")
for ckpt in ckpts_to_eval:
model_file = os.path.join(vcfg['model_path'], ckpt)
if not os.path.exists(model_file) or not os.path.isfile(model_file):
print(f"Checkpoint not found: {model_file}")
continue
elif not vcfg['update_results'] and ckpt in existing_results:
print(f"Skipping because of existing results for {model_file}.")
continue
results = []
mean_reward = 0.0
# Run testing for each training run.
for train_run in range(vcfg['n_repeats']):
# Initialize agent.
utils.set_seed(train_run, torch=True)
agent = agents.names[vcfg['agent']](name, tcfg, None, ds)
# Load checkpoint
agent.load(model_file)
print(f"Loaded: {model_file}")
record = vcfg['record']['save_video']
n_demos = vcfg['n_demos']
# Run testing and save total rewards with last transition info.
for i in range(0, n_demos):
print(f'Test: {i + 1}/{n_demos}')
episode, seed = ds.load(i)
goal = episode[-1]
total_reward = 0
np.random.seed(seed)
# set task
if 'multi' in dataset_type:
task_name = ds.get_curr_task()
task = tasks.names[task_name]()
print(f'Evaluating on {task_name}')
else:
task_name = vcfg['eval_task']
task = tasks.names[task_name]()
task.mode = mode
env.seed(seed)
env.set_task(task)
obs = env.reset()
info = env.info
reward = 0
# Start recording video (NOTE: super slow)
if record:
video_name = f'{task_name}-{i+1:06d}'
if 'multi' in vcfg['model_task']:
video_name = f"{vcfg['model_task']}-{video_name}"
env.start_rec(video_name)
for _ in range(task.max_steps):
act = agent.act(obs, info, goal)
lang_goal = info['lang_goal']
print(f'Lang Goal: {lang_goal}')
obs, reward, done, info = env.step(act)
total_reward += reward
print(f'Total Reward: {total_reward:.3f} | Done: {done}\n')
if done:
break
results.append((total_reward, info))
mean_reward = np.mean([r for r, i in results])
print(f'Mean: {mean_reward} | Task: {task_name} | Ckpt: {ckpt}')
# End recording video
if record:
env.end_rec()
all_results[ckpt] = {
'episodes': results,
'mean_reward': mean_reward,
}
# Save results in a json file.
if vcfg['save_results']:
# Load existing results
if os.path.exists(save_json):
with open(save_json, 'r') as f:
existing_results = json.load(f)
existing_results.update(all_results)
all_results = existing_results
with open(save_json, 'w') as f:
json.dump(all_results, f, indent=4)
def list_ckpts_to_eval(vcfg, existing_results):
ckpts_to_eval = []
# Just the last.ckpt
if vcfg['checkpoint_type'] == 'last':
last_ckpt = 'last.ckpt'
ckpts_to_eval.append(last_ckpt)
# Validation checkpoints that haven't been already evaluated.
elif vcfg['checkpoint_type'] == 'val_missing':
checkpoints = sorted([c for c in os.listdir(vcfg['model_path']) if "steps=" in c])
ckpts_to_eval = [c for c in checkpoints if c not in existing_results]
# Find the best checkpoint from validation and run eval on the test set.
elif vcfg['checkpoint_type'] == 'test_best':
result_jsons = [c for c in os.listdir(vcfg['results_path']) if "results-val" in c]
if 'multi' in vcfg['model_task']:
result_jsons = [r for r in result_jsons if "multi" in r]
else:
result_jsons = [r for r in result_jsons if "multi" not in r]
if len(result_jsons) > 0:
result_json = result_jsons[0]
with open(os.path.join(vcfg['results_path'], result_json), 'r') as f:
eval_res = json.load(f)
best_checkpoint = 'last.ckpt'
best_success = -1.0
for ckpt, res in eval_res.items():
if res['mean_reward'] > best_success:
best_checkpoint = ckpt
best_success = res['mean_reward']
print(best_checkpoint)
ckpt = best_checkpoint
ckpts_to_eval.append(ckpt)
else:
print("No best val ckpt found. Using last.ckpt")
ckpt = 'last.ckpt'
ckpts_to_eval.append(ckpt)
# Load a specific checkpoint with a substring e.g: 'steps=10000'
else:
print(f"Looking for: {vcfg['checkpoint_type']}")
checkpoints = [c for c in os.listdir(vcfg['model_path']) if vcfg['checkpoint_type'] in c]
checkpoint = checkpoints[0] if len(checkpoints) > 0 else ""
ckpt = checkpoint
ckpts_to_eval.append(ckpt)
return ckpts_to_eval
if __name__ == '__main__':
main()
| StarcoderdataPython |
1630918 | <reponame>ryankurte/codechecker
# -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
"""
Helper for thrift api calls.
"""
import sys
from thrift.protocol.TProtocol import TProtocolException
from thrift.Thrift import TApplicationException
import codechecker_api_shared
from codechecker_common.logger import get_logger
LOG = get_logger('system')
def truncate_arg(arg, max_len=100):
""" Truncate the given argument if the length is too large. """
if isinstance(arg, str) and len(arg) > max_len:
return arg[:max_len] + "..."
return arg
def ThriftClientCall(function):
""" Wrapper function for thrift client calls.
- open and close transport,
- log and handle errors
"""
funcName = function.__name__
def wrapper(self, *args, **kwargs):
self.transport.open()
func = getattr(self.client, funcName)
try:
try:
return func(*args, **kwargs)
except TApplicationException as ex:
# If the session is expired we will try to reset the token and
# call the API function again.
if "Error code 401" not in ex.message:
raise ex
# Generate a new token
self._reset_token()
return func(*args, **kwargs)
except codechecker_api_shared.ttypes.RequestFailed as reqfailure:
LOG.error('Calling API endpoint: %s', funcName)
if reqfailure.errorCode ==\
codechecker_api_shared.ttypes.ErrorCode.DATABASE:
LOG.error('Database error on server\n%s',
str(reqfailure.message))
elif reqfailure.errorCode ==\
codechecker_api_shared.ttypes.ErrorCode.AUTH_DENIED:
LOG.error('Authentication denied\n %s',
str(reqfailure.message))
elif reqfailure.errorCode ==\
codechecker_api_shared.ttypes.ErrorCode.UNAUTHORIZED:
LOG.error('Unauthorized to access\n %s',
str(reqfailure.message))
LOG.error('Ask the product admin for additional access '
'rights.')
elif reqfailure.errorCode ==\
codechecker_api_shared.ttypes.ErrorCode.API_MISMATCH:
LOG.error('Client/server API mismatch\n %s',
str(reqfailure.message))
else:
LOG.error('API call error: %s\n%s', funcName, str(reqfailure))
sys.exit(1)
except TApplicationException as ex:
LOG.error("Internal server error: %s", str(ex.message))
sys.exit(1)
except TProtocolException as ex:
if ex.type == TProtocolException.UNKNOWN:
LOG.error('Unknown thrift error')
elif ex.type == TProtocolException.INVALID_DATA:
LOG.error('Thrift invalid data error.')
elif ex.type == TProtocolException.NEGATIVE_SIZE:
LOG.error('Thrift negative size error.')
elif ex.type == TProtocolException.SIZE_LIMIT:
LOG.error('Thrift size limit error.')
elif ex.type == TProtocolException.BAD_VERSION:
LOG.error('Thrift bad version error.')
LOG.error(funcName)
# Do not print the argument list if it contains sensitive
# information such as passwords.
# Also it is possible that one of the argument is too large to log
# the full content of it (for example the 'b64zip' parameter of the
# 'massStoreRun' API function). For this reason we have to truncate
# the arguments.
if funcName != "performLogin":
LOG.error([truncate_arg(arg) for arg in args])
LOG.error(kwargs)
LOG.exception("Request failed.")
sys.exit(1)
except OSError as oserr:
LOG.error("Connection failed.")
LOG.error(oserr.strerror)
LOG.error("Check if your CodeChecker server is running.")
sys.exit(1)
finally:
self.transport.close()
return wrapper
| StarcoderdataPython |
1710470 | <gh_stars>10-100
from mocap_dataset import MocapDataset
from skeleton import Skeleton
import numpy as np
import os
skeleton_H36M = Skeleton(offsets=[
[ 0. , 0. , 0. ],
[-132.948591, 0. , 0. ],
[ 0. , -442.894612, 0. ],
[ 0. , -454.206447, 0. ],
[ 0. , 0. , 162.767078],
[ 0. , 0. , 74.999437],
[ 132.948826, 0. , 0. ],
[ 0. , -442.894413, 0. ],
[ 0. , -454.20659 , 0. ],
[ 0. , 0. , 162.767426],
[ 0. , 0. , 74.999948],
[ 0. , 0.1 , 0. ],
[ 0. , 233.383263, 0. ],
[ 0. , 257.077681, 0. ],
[ 0. , 121.134938, 0. ],
[ 0. , 115.002227, 0. ],
[ 0. , 257.077681, 0. ],
[ 0. , 151.034226, 0. ],
[ 0. , 278.882773, 0. ],
[ 0. , 251.733451, 0. ],
[ 0. , 0. , 0. ],
[ 0. , 0. , 99.999627],
[ 0. , 100.000188, 0. ],
[ 0. , 0. , 0. ],
[ 0. , 257.077681, 0. ],
[ 0. , 151.031437, 0. ],
[ 0. , 278.892924, 0. ],
[ 0. , 251.72868 , 0. ],
[ 0. , 0. , 0. ],
[ 0. , 0. , 99.999888],
[ 0. , 137.499922, 0. ],
[ 0. , 0. , 0. ]
],
parents=[-1, 0, 1, 2, 3, 4, 0, 6, 7, 8, 9, 0, 11, 12, 13, 14, 12,
16, 17, 18, 19, 20, 19, 22, 12, 24, 25, 26, 27, 28, 27, 30],
joints_left=[1, 2, 3, 4, 5, 24, 25, 26, 27, 28, 29, 30, 31],
joints_right=[6, 7, 8, 9, 10, 16, 17, 18, 19, 20, 21, 22, 23]) | StarcoderdataPython |
1625460 | """
Define a GRB object holding BATSE data associated with a single GRB trigger,
and a GRBCollection to hold GRB instances for many GRBs.
Created 2012-05-06 by <NAME>
(adapted from an earlier version for the 4B catalog)
"""
from collections import OrderedDict
from os.path import join, exists
from os import mkdir
import urllib
from PIL import Image
from locations import root, trigger_paths, trigger_url, raw_cache
from locations import ascii64_paths, ascii64_url
from drm import DRMs_DISCSC
from ascii64 import ASCII64
class GRB(object):
"""
Provide access to BATSE data for a single GRB, both through persistant
local storage and via access to the CGRO SSC data archive.
"""
# In order of columns (after trigger column) in the flux table:
flux_attributes = [
'F1', 'F1_err', 'F2', 'F2_err', 'F3', 'F3_err', 'F4', 'F4_err', # 0-7
'F64ms', 'F64ms_err', 't64ms', # 8-10
'F256ms', 'F256ms_err', 't256ms', # 11-13
'F1024ms', 'F1024ms_err', 't1024ms', # 14-16
]
# In order of columns (after trigger column) in the duration table:
duration_attributes = [
'T50', 'T50_err', 'T50_start', # 0-2
'T90', 'T90_err', 'T90_start', # 3-5
]
def __init__(self, basic_record):
"""Define a burst from its record in the basic table."""
# Break record into the trigger/name, Ulysses flag, and rest,
# since there may be no space between the name and Uflag.
# (The Ulysses flag is only in the published 4B catalog.)
start = basic_record[:18].strip().split()
# Uflag = basic_record[16]
cols = basic_record[18:].strip().split()
self.trigger = int(start[0])
self.name = start[1] + '_' + start[2]
self.desig = start[2]
self.in_4B = start[1] == '4B'
self.TJDate = int(cols[0]) # integer date
self.secs = float(cols[1]) # seconds on that day
self.TJD = self.TJDate + self.secs/86400.
# All angles are in degrees.
self.RA = float(cols[2])
self.dec = float(cols[3])
self.long = float(cols[4])
self.lat = float(cols[5])
self.drxn_err = float(cols[6]) # error circle radius
self.geo = float(cols[7]) # angle from nadir
if cols[8] == 'Y': # overwrote previous trigger
self.overwrote = True
else:
self.overwrote = False
if cols[9] == 'Y': # overwritten by later trigger
self.incomplete = True
else:
self.incomplete = False
self.has_flux = False # bursts with gaps will have no flux data
self.has_durn = False
self.comments = [] # store (flag, comment) tuples
# Store path elements for local and remote data access.
self.group, self.dir, tail = trigger_paths(self.trigger)
self.local_dir = join(self.group, self.dir)
self.remote_dir = trigger_url + tail
self.a64_group, self.a64_rfname, tail = ascii64_paths(self.trigger)
self.a64_remote = ascii64_url + tail
def set_bright(self, trigger, cols):
"""Add peak flux & fluence data from a brightness table record."""
if trigger != self.trigger:
raise ValueError, 'Flux data for wrong trigger!'
for i, name in enumerate(self.flux_attributes):
setattr(self, name, float(cols[i]))
self.has_flux = True
def set_durn(self, trigger, cols):
"""Add duration data from a duration table record."""
if trigger != self.trigger:
raise ValueError, 'Duration data for wrong trigger!'
for i, name in enumerate(self.duration_attributes):
setattr(self, name, float(cols[i]))
self.has_durn = True
# Replaced below.
# def grb_dir(self):
# """
# Return the path to the directory in the local database holding data
# associated with this GRB, creating the directory if necessary.
# """
# group = join(root, self.group)
# if not exists(group):
# mkdir(group)
# dir_path = join(root, self.local_dir)
# if not exists(dir_path):
# mkdir(dir_path)
# return dir_path
def set_grb_dir(self):
"""
Set the path to this GRB's local data directory, creating the directory
and its containing group if needed.
This is not done in __init__ so the group and burst directories are
created only when detailed data are loaded for a burst.
"""
group = join(root, self.group)
if not exists(group):
mkdir(group)
self.grb_dir = join(root, self.local_dir)
if not exists(self.grb_dir):
mkdir(self.grb_dir)
def file_check(self, fname):
"""
Check to see if the named file exists in this bursts data directory.
"""
path = join(self.grb_dir, fname)
return exists(path)
def cached_path(self, fname):
"""
Return the path to a persistant local copy of a remote file.
Point to a local copy of the resource if present; otherwise, fetch it
remotely and save a copy locally, then return the local path.
"""
resource = join(self.grb_dir, fname)
if not exists(resource):
remote = self.remote_dir + fname
urllib.urlretrieve(remote, resource)
return resource
def raw_cached_path(self, fname):
"""
Return the path to a local copy of a remote file; it is stored in the
raw_cache directory for temporary use.
Point to an existing local copy of the resource if present; otherwise,
fetch it remotely and save a copy locally, then return the local path.
"""
resource = join(root, raw_cache, fname)
if not exists(resource):
remote = self.remote_dir + fname
urllib.urlretrieve(remote, resource)
return resource
def open_cached(self, fname):
raise NotImplementedError()
def clear_cached(self, fname):
raise NotImplementedError()
def save_pickle(self, obj, fname):
"""
Save an object associated with this GRB as a pickle in the database.
"""
raise NotImplementedError()
def load_pickle(self, fname):
"""
Retrive an object associated with this GRB from a pickle in the
database.
"""
def show_lc(self):
"""
Show light curve GIF files in the default image browser, merging 4ch
and summed light curves.
"""
ch_path = '%d_4ch.gif' % self.trigger
sum_path = '%d_sum.gif' % self.trigger
ch_path = self.cached_path(ch_path)
sum_path = self.cached_path(sum_path)
ch_im = Image.open(ch_path)
# Some ch plots are higher than others, so note the height.
w, h = ch_im.size
sum_im = Image.open(sum_path)
both = Image.new("RGB", (1610, max(600, h)))
both.paste(ch_im, (0,0))
both.paste(sum_im, (810,0))
both.show()
# This version takes less space, but looks horrible due to interpolation!
# both = Image.new("RGB", (1210, 600))
# both.paste(ch_im, (0,0))
# sum_im.thumbnail((400,300), Image.BILINEAR)
# both.paste(sum_im, (810,150))
# both.show()
def load_discsc_drms(self):
"""
Load DISCSC DRM data for the triggered detectors.
"""
self.discsc_drms = DRMs_DISCSC(self)
def load_ascii64(self):
"""
Load 64 ms count data from COSSC ASCII files.
These files compile DISCLA, PREB, and DISCSC data over an interval
determined by examination of trigger and pre-trigger data by the
BATSE team.
NOTE: The DISCLA data is in 1024 ms bins; the counts are divided by
16 to distribute across the tabulated 64 ms bins. The DISCLA bins
can overlap with the 1st PREB bin, in which case the PREB overlap
is subtracted and the remaining counts divided among earlier bins.
For full documentation, see:
ftp://legacy.gsfc.nasa.gov/compton/data/batse/ascii_data/64ms/README
"""
self.ascii64 = ASCII64(self)
def __getattr__(self, name):
"""
Implement auto-loading of some data by catching attributes not yet
assigned in the instance dict.
"""
if name == 'grb_dir':
self.set_grb_dir()
return self.grb_dir
elif name == 'discsc_drms':
self.load_discsc_drms()
return self.discsc_drms
elif name == 'ascii64':
self.load_ascii64()
return self.ascii64
else:
raise AttributeError('No attribute "%s"!' % name)
def __str__(self):
s = 'Trigger: %i\n' % self.trigger
s += 'Name: %s\n' % self.name
s += 'TJD: %f\n' % self.TJD
s += '(RA, Dec) (long, lat) (deg): (%6.2f %6.2f) (%6.2f %6.2f)\n' % \
(self.RA, self.dec, self.lat, self.long)
s += 'Drxn err (deg): %5.2f\n' % self.drxn_err
s += 'Complete? %s' % (not self.incomplete)
if self.has_flux:
s += '\nCh 1-4 fluence (ergs): %7.1e %7.1e %7.1e %7.1e\n' % \
(self.F1, self.F2, self.F3, self.F4)
s += 'Ch 1-4 S/N: %7.1e %7.1e %7.1e %7.1e\n' % \
(self.F1/self.F1_err, self.F2/self.F2_err, self.F3/self.F3_err, self.F4/self.F4_err)
s += '64ms F_pk (cts/cm^2/s), S/N, t: %6.2f %6.2f %7.2f' % \
(self.F64ms, self.F64ms/self.F64ms_err, self.t64ms)
if self.has_durn:
s += '\nT50, T90: %6.2f %6.2f' % (self.T50, self.T90)
if self.comments:
for entry in self.comments:
s += '\n%s: %s' % entry
return s
class GRBCollection(OrderedDict):
"""
Store GRB objects in an ordered dict accessed by BATSE trigger number
(with the key being the integer trigger number, not a string).
GRB data may also be accessed as attributes in two ways:
.t# where # is the trigger number; returns a GRB instance
.b# where # is YYMMDD from the traditional GRBYY... designation;
returns a list of GRB instances
Note that multiple bursts may occur on the same day, so the .b# value is a
list of GRBs that occurred on that day. The 4B catalog distinguished
such bursts with designators that add a letter suffix (B, C, D after
the first burst), nominally ordered by intensity. The 5Bp bursts (even
those from 4B) are not so labeled.
"""
def __init__(self, *args, **kwds):
# Note: "*args, **kwds" are needed to support re-creating the instance
# after pickling, via OrderedDict's pickling interface.
OrderedDict.__init__(self, *args, **kwds)
# These dicts map from 't' and 'b' attributes to trigger numbers.
self.t_attrs = {}
self.b_attrs = {}
def add(self, grb):
"""
Add a GRB instance to the collection.
"""
self[grb.trigger] = grb
t_attr = 't%i' % grb.trigger
b_attr = 'b%s' % grb.desig[0:6]
self.t_attrs[t_attr] = grb.trigger
if not (b_attr in self.b_attrs):
self.b_attrs[b_attr] = [grb]
else:
self.b_attrs[b_attr].append(grb)
def __getattr__(self, name):
"""
Return values associated with .t# or .b# attributes.
"""
if name[0] == 't':
tnum = self.t_attrs[name]
return self[tnum]
elif name[0] == 'b':
return [grb for grb in self.b_attrs[name]]
else:
raise AttributeError('Illegal attribute!')
# These are experiments to support unpickling; the solution was to provide
# *args and **kwds arguments to the superclass initializer.
# def __reduce__(self):
# """
# Method used to support pickling of an OrderedDict subclass.
# """
# t = OrderedDict.__reduce__(self)
# return (t[0], ()) + t[2:]
# def __reduce__(self):
# """Return state information for pickling."""
# items = [[k, self[k]] for k in self]
# inst_dict = vars(self).copy()
# for k in vars(OrderedDict()):
# inst_dict.pop(k, None)
# if inst_dict:
# return (self.__class__, (items,), inst_dict)
# return self.__class__, (items,)
# def __reduce__(self):
# return self.__class__, (OrderedDict(self),)
| StarcoderdataPython |
1772698 | <reponame>aolarchive/Hydro
__author__ = 'yanivshalev'
| StarcoderdataPython |
1678482 | """Unit tests for database components."""
import unittest
import pyodbc
from shared.utils import get_test_case_name
class TestDb(unittest.TestCase):
"""Unit tests for database components."""
@classmethod
def setUpClass(cls):
"""Execute this before the tests."""
cls.connection = TestDb.get_connection()
@staticmethod
def get_connection():
"""Return connection to mobydq database."""
connection_string = 'driver={PostgreSQL Unicode};server=db;port=5432;database=mobydq;uid=postgres;pwd=<PASSWORD>;' # Should be moved to config file
connection = pyodbc.connect(connection_string)
connection.setdecoding(pyodbc.SQL_WCHAR, encoding='utf-8')
connection.setencoding(encoding='utf-8')
return connection
def rollback(self):
"""Rollback uncommitted database transactions."""
self.connection.execute('ROLLBACK;')
return True
def create_data_source(self, test_case_name: str):
"""Create a data source in the database and return its id and password."""
insert_data_source_query = f'''INSERT INTO base.data_source (name, connection_string, data_source_type_id) VALUES ('{test_case_name}', 'Test connection string', 1) RETURNING id, password;'''
cursor = self.connection.execute(insert_data_source_query)
row = cursor.fetchone()
data_source_id = row[0]
password = row[1]
return data_source_id, password
def create_indicator(self, test_case_name: str, indicator_group_id: int, user_group_id: int):
"""Create an indicator in the database and return its id."""
insert_indicator_query = f'''INSERT INTO base.indicator (name, flag_active, indicator_type_id, indicator_group_id, user_group_id) VALUES ('{test_case_name}', true, 1, {indicator_group_id}, '{user_group_id}') RETURNING id;'''
cursor = self.connection.execute(insert_indicator_query)
indicator_id = cursor.fetchone()[0]
return indicator_id
def create_indicator_group(self, test_case_name: str, user_group_id: int):
"""Create an indicator group in the database and return its id."""
insert_indicator_group_query = f'''INSERT INTO base.indicator_group (name, user_group_id) VALUES ('{test_case_name}', '{user_group_id}') RETURNING id;'''
cursor = self.connection.execute(insert_indicator_group_query)
indicator_group_id = cursor.fetchone()[0]
return indicator_group_id
def create_user(self, test_case_name: str):
"""Create a user in the database and return its id."""
insert_user_query = f'''INSERT INTO base.user (email, role) values ('{test_case_name}', 'admin') RETURNING id;'''
cursor = self.connection.execute(insert_user_query)
user_id = cursor.fetchone()[0]
return user_id
def create_user_group(self, test_case_name: str):
"""Create a user group in the database and return its id."""
insert_user_group_query = f'''INSERT INTO base.user_group (name) VALUES ('{test_case_name}') RETURNING id;'''
cursor = self.connection.execute(insert_user_group_query)
user_group_id = cursor.fetchone()[0]
return user_group_id
def create_user_group_membership(self, user_group_id: int, user_id: int):
"""Create a user group user in the database and return its id."""
insert_user_group_membership_query = f'''INSERT INTO base.user_group_membership (user_group_id, user_id) VALUES ({user_group_id}, {user_id}) RETURNING id;'''
cursor = self.connection.execute(insert_user_group_membership_query)
user_group_membership_id = cursor.fetchone()[0]
return user_group_membership_id
def delete_user_group(self, user_group_id: int):
"""Delete a user group from the database."""
delete_user_group_query = f'''DELETE FROM base.user_group WHERE id = {user_group_id};'''
self.connection.execute(delete_user_group_query)
return True
def delete_user_group_membership(self, user_group_id: int):
"""Delete a user group membership from the database."""
delete_user_group_membership_query = f'''DELETE FROM base.user_group_membership WHERE user_group_id = {user_group_id};'''
self.connection.execute(delete_user_group_membership_query)
return True
def update_user(self, user_id: int):
"""Update a user in the database and return its updated_by_id and updated_date."""
update_user_query = f'''UPDATE base.user SET role = 'advanced' WHERE id = {user_id} RETURNING updated_by_id, updated_date, created_date;'''
cursor = self.connection.execute(update_user_query)
row = cursor.fetchone()
updated_by_id = row[0]
updated_date = row[1]
created_date = row[2]
return updated_by_id, updated_date, created_date
def update_data_source(self, data_source_id: int):
"""Update a data source in the database and return its password."""
update_data_source_query = f'''UPDATE base.data_source SET password = '<PASSWORD>' WHERE id = {data_source_id} RETURNING password;'''
cursor = self.connection.execute(update_data_source_query)
password = cursor.fetchone()[0]
return password
def test_function_duplicate_indicator(self):
"""Unit tests for custom function duplicate_indicator."""
# Insert user group
test_case_name = get_test_case_name()
user_group_id = self.create_user_group(test_case_name)
# Insert indicator group
indicator_group_id = self.create_indicator_group(test_case_name, user_group_id)
# Insert indicator
indicator_id = self.create_indicator(test_case_name, indicator_group_id, user_group_id)
# Insert test parameter
insert_parameter_query = f'''INSERT INTO base.parameter (value, indicator_id, parameter_type_id, user_group_id) VALUES ('{test_case_name}', {indicator_id}, 1, {user_group_id});'''
self.connection.execute(insert_parameter_query)
# Call test duplicate indicator function
new_test_case_name = get_test_case_name()
call_test_duplicate_indicator_query = f'''SELECT base.duplicate_indicator({indicator_id}, '{new_test_case_name}');'''
self.connection.execute(call_test_duplicate_indicator_query)
# Get new indicator and parameter
select_new_indicator_query = f'''SELECT a.name, b.value FROM base.indicator a INNER JOIN base.parameter b ON a.id = b.indicator_id WHERE a.name = '{new_test_case_name}';'''
cursor = self.connection.execute(select_new_indicator_query)
row = cursor.fetchone()
# Assert duplicated indicator name and parameter value
indiator_name = row[0]
parameter_value = row[1]
self.assertEqual(indiator_name, new_test_case_name)
self.assertEqual(parameter_value, test_case_name)
# Rollback uncommitted data
self.rollback()
def test_function_execute_batch(self):
"""Unit tests for custom function execute_batch."""
# Insert user group
test_case_name = get_test_case_name()
user_group_id = self.create_user_group(test_case_name)
# Insert indicator group
indicator_group_id = self.create_indicator_group(test_case_name, user_group_id)
# Insert indicator
self.create_indicator(test_case_name, indicator_group_id, user_group_id)
# Call execute batch function
call_execute_batch_query = f'''SELECT base.execute_batch({indicator_group_id});'''
self.connection.execute(call_execute_batch_query)
# Get batch and indicator session
select_batch_query = f'''SELECT A.status, B.status FROM base.batch A INNER JOIN base.session B ON A.id = B.batch_id WHERE A.indicator_group_id = {indicator_group_id};'''
cursor = self.connection.execute(select_batch_query)
row = cursor.fetchone()
# Assert batch and session status are Pending
batch_status = row[0]
session_status = row[1]
self.assertEqual(batch_status, 'Pending')
self.assertEqual(session_status, 'Pending')
# Rollback uncommitted data
self.rollback()
def test_function_get_current_user_id(self):
"""Unit tests for custom function get_current_user_id."""
# Insert user
test_case_name = get_test_case_name()
user_id = self.create_user(test_case_name)
user = f'user_{user_id}'
# Change role
set_role_query = f'''SET ROLE {user};'''
self.connection.execute(set_role_query)
# Get current user Id based on current role
select_query = f'''SELECT base.get_current_user_id();'''
cursor = self.connection.execute(select_query)
current_user_id = cursor.fetchone()[0]
# Assert user Id is equal to Id extracted from role
self.assertEqual(user_id, current_user_id)
# Reverse current role to postgres
set_role_query = f'''SET ROLE postgres;'''
self.connection.execute(set_role_query)
# Rollback uncommitted data
self.rollback()
def test_function_test_data_source(self):
"""Unit tests for custom function test_data_source."""
# Insert data source
test_case_name = get_test_case_name()
data = self.create_data_source(test_case_name)
data_source_id = data[0]
# Call test data source function
call_test_data_source_query = f'''SELECT base.test_data_source({data_source_id});'''
self.connection.execute(call_test_data_source_query)
# Get data source connectivity status
select_data_source_query = f'''SELECT connectivity_status FROM base.data_source WHERE id = '{data_source_id}';'''
cursor = self.connection.execute(select_data_source_query)
connectivity_status = cursor.fetchone()[0]
# Assert connectivity status is Pending
self.assertEqual(connectivity_status, 'Pending')
# Rollback uncommitted data
self.rollback()
def test_trigger_create_user(self):
"""Unit tests for trigger function create_user."""
# Insert user
test_case_name = get_test_case_name()
user_id = self.create_user(test_case_name)
user = f'user_{user_id}'
# Get user and admin role
select_user_role_query = f'''SELECT a.rolname, c.rolname FROM pg_catalog.pg_roles a INNER JOIN pg_catalog.pg_auth_members b ON a.oid = b.member INNER JOIN pg_catalog.pg_roles c ON b.roleid = c.oid AND c.rolname = 'admin' WHERE a.rolname = '{user}';'''
cursor = self.connection.execute(select_user_role_query)
row = cursor.fetchone()
# Assert user was created and admin role granted
self.assertEqual(row[0], user)
self.assertEqual(row[1], 'admin')
# Get user and default user group
select_user_role_query = f'''SELECT a.rolname, c.rolname FROM pg_catalog.pg_roles a INNER JOIN pg_catalog.pg_auth_members b ON a.oid = b.member INNER JOIN pg_catalog.pg_roles c ON b.roleid = c.oid AND c.rolname = 'user_group_1' WHERE a.rolname = '{user}';'''
cursor = self.connection.execute(select_user_role_query)
row = cursor.fetchone()
# Assert user was created and user group granted
self.assertEqual(row[0], user)
self.assertEqual(row[1], 'user_group_1')
# Rollback uncommitted data
self.rollback()
def test_trigger_create_user_group(self):
"""Unit tests for trigger function create_user_group."""
# Insert user group
test_case_name = get_test_case_name()
user_group_id = self.create_user_group(test_case_name)
user_group = f'user_group_{user_group_id}'
# Get user group role
select_user_role_query = f'''SELECT a.rolname AS user_group FROM pg_catalog.pg_roles a WHERE a.rolname = '{user_group}';'''
cursor = self.connection.execute(select_user_role_query)
row = cursor.fetchone()
# Assert user group role was created
self.assertEqual(row[0], user_group)
# Rollback uncommitted data
self.rollback()
def test_trigger_data_source_insert_password(self):
"""Unit tests for trigger function data_source_insert_password."""
# Insert data source
test_case_name = get_test_case_name()
data = self.create_data_source(test_case_name)
password = data[1]
# Assert password is encrypted
self.assertNotEqual(password, '<PASSWORD>')
# Rollback uncommitted data
self.rollback()
def test_trigger_data_source_update_password(self):
"""Unit tests for trigger function data_source_update_password."""
# Insert data source
test_case_name = get_test_case_name()
data = self.create_data_source(test_case_name)
data_source_id = data[0]
password = data[1]
# Update data source
updated_password = self.update_data_source(data_source_id)
# Assert password is encrypted
self.assertNotEqual(updated_password, '<PASSWORD>')
self.assertNotEqual(password, <PASSWORD>)
# Rollback uncommitted data
self.rollback()
def test_trigger_delete_children(self):
"""Unit tests for trigger function delete_children."""
# Insert user
test_case_name = get_test_case_name()
user_id = self.create_user(test_case_name)
# Insert user group
user_group_id = self.create_user_group(test_case_name)
# Insert user group membership
user_group_membership_id = self.create_user_group_membership(user_group_id, user_id)
# Delete user group
self.delete_user_group(user_group_id)
# Get user group membership
select_user_group_membership_query = f'''SELECT id FROM base.user_group_membership WHERE id = '{user_group_membership_id}';'''
cursor = self.connection.execute(select_user_group_membership_query)
row = cursor.fetchone()
# Assert user group membership has been deleted
self.assertTrue(row is None)
# Rollback uncommitted data
self.rollback()
def test_trigger_delete_user_group(self):
"""Unit tests for trigger function delete_user_group."""
# Insert user group
test_case_name = get_test_case_name()
user_group_id = self.create_user_group(test_case_name)
user_group = f'user_group_{user_group_id}'
# Delete user group
self.delete_user_group(user_group_id)
# Get user group role
select_user_role_query = f'''SELECT a.rolname AS user_group FROM pg_catalog.pg_roles a WHERE a.rolname = '{user_group}';'''
cursor = self.connection.execute(select_user_role_query)
row = cursor.fetchone()
# Assert user group role has been deleted
self.assertTrue(row is None)
# Rollback uncommitted data
self.rollback()
def test_trigger_grant_user_group(self):
"""Unit tests for trigger function grant_user_group."""
# Insert user
test_case_name = get_test_case_name()
user_id = self.create_user(test_case_name)
user = f'user_{user_id}'
# Insert user group
user_group_id = self.create_user_group(test_case_name)
user_group = f'user_group_{user_group_id}'
# Insert user group user
self.create_user_group_membership(user_group_id, user_id)
# Get user and user group roles
select_user_group_membership_query = f'''SELECT a.rolname, c.rolname FROM pg_catalog.pg_roles a INNER JOIN pg_catalog.pg_auth_members b ON a.oid = b.member INNER JOIN pg_catalog.pg_roles c ON b.roleid = c.oid WHERE a.rolname = '{user}' AND c.rolname = '{user_group}';'''
cursor = self.connection.execute(select_user_group_membership_query)
row = cursor.fetchone()
# Assert user was created and user group role granted
self.assertEqual(row[0], user)
self.assertEqual(row[1], user_group)
# Rollback uncommitted data
self.rollback()
def test_kill_execute_batch(self):
"""Unit tests for custom function kill_execute_batch."""
# Insert user group
test_case_name = get_test_case_name()
user_group_id = self.create_user_group(test_case_name)
# Insert indicator group
indicator_group_id = self.create_indicator_group(test_case_name, user_group_id)
# Insert indicator
self.create_indicator(test_case_name, indicator_group_id, user_group_id)
# Call execute batch function
call_execute_batch_query = f'''SELECT base.execute_batch({indicator_group_id});'''
self.connection.execute(call_execute_batch_query)
# Get batch
select_batch_query = f'''SELECT A.id FROM base.batch A WHERE A.indicator_group_id = {indicator_group_id};'''
cursor = self.connection.execute(select_batch_query)
row = cursor.fetchone()
batch_id = row[0]
# Call kill execute batch function
call_kill_execute_batch_query = f'''SELECT base.kill_execute_batch({batch_id});'''
self.connection.execute(call_kill_execute_batch_query)
# Get batch and session with updated status
select_batch_query = f'''SELECT A.status, B.status FROM base.batch A INNER JOIN base.session B ON A.id = B.batch_id WHERE A.id = {batch_id};'''
cursor = self.connection.execute(select_batch_query)
row = cursor.fetchone()
# Assert batch and session status are Pending
batch_status = row[0]
session_status = row[1]
self.assertEqual(batch_status, 'Killed')
self.assertEqual(session_status, 'Killed')
# Rollback uncommitted data
self.rollback()
def test_kill_test_data_source(self):
"""Unit tests for custom function kill_test_data_source."""
# Insert data source
test_case_name = get_test_case_name()
data = self.create_data_source(test_case_name)
data_source_id = data[0]
# Call test data source function
call_test_data_source_query = f'''SELECT base.test_data_source({data_source_id});'''
self.connection.execute(call_test_data_source_query)
# Call kill test data source function
call_kill_test_data_source_query = f'''SELECT base.kill_test_data_source({data_source_id});'''
self.connection.execute(call_kill_test_data_source_query)
# Get data source connectivity status
select_data_source_status_query = f'''SELECT A.connectivity_status FROM base.data_source A WHERE A.id = {data_source_id};'''
cursor = self.connection.execute(select_data_source_status_query)
row = cursor.fetchone()
# Assert batch and session status are Pending
data_source_status = row[0]
self.assertEqual(data_source_status, 'Killed')
# Rollback uncommitted data
self.rollback()
def test_trigger_revoke_user_group(self):
"""Unit tests for trigger function revoke_user_group."""
# Insert user
test_case_name = get_test_case_name()
user_id = self.create_user(test_case_name)
user = f'user_{user_id}'
# Insert user group
user_group_id = self.create_user_group(test_case_name)
user_group = f'user_group_{user_group_id}'
# Insert user group user
user_group_membership_id = self.create_user_group_membership(user_group_id, user_id)
# Delete user group user
insert_user_group_membership_query = f'''DELETE FROM base.user_group_membership WHERE id = {user_group_membership_id};'''
self.connection.execute(insert_user_group_membership_query)
# Get user and user group
select_user_group_membership_query = f'''SELECT a.rolname, c.rolname FROM pg_catalog.pg_roles a INNER JOIN pg_catalog.pg_auth_members b ON a.oid = b.member INNER JOIN pg_catalog.pg_roles c ON b.roleid = c.oid WHERE a.rolname = '{user}' AND c.rolname = '{user_group}';'''
cursor = self.connection.execute(select_user_group_membership_query)
row = cursor.fetchone()
# Assert user group role has been revoked
self.assertTrue(row is None)
# Rollback uncommitted data
self.rollback()
def test_trigger_update_updated_by_id(self):
"""Unit tests for trigger function update_updated_by_id."""
# Insert user
test_case_name = get_test_case_name()
user_id = self.create_user(test_case_name)
user = f'user_{user_id}'
# Change current role to new user
set_role_query = f'''SET ROLE {user};'''
self.connection.execute(set_role_query)
# Update user
data = self.update_user(user_id)
updated_by_id = data[0]
# Assert user Id is equal updated by Id
self.assertEqual(user_id, updated_by_id)
# Reverse current role to postgres
set_role_query = f'''SET ROLE postgres;'''
self.connection.execute(set_role_query)
# Rollback uncommitted data
self.rollback()
def test_trigger_update_updated_date(self):
"""Unit tests for trigger function update_updated_date."""
# Insert user
test_case_name = get_test_case_name()
user_id = self.create_user(test_case_name)
# Commit is necessary here for the test case to pass
# It ensure updated_date will be greater than created_date
self.connection.commit()
# Update user
data = self.update_user(user_id)
updated_date = data[1]
created_date = data[2]
# Assert created_date is older than updated_date
self.assertLess(created_date, updated_date)
# Delete committed data
delete_user_group_membership_query = f'''DELETE FROM base.user_group_membership WHERE user_id = {user_id};'''
self.connection.execute(delete_user_group_membership_query)
delete_user_query = f'''DELETE FROM base.user WHERE id = {user_id};'''
self.connection.execute(delete_user_query)
self.connection.commit()
def test_trigger_update_user_permission(self):
"""Unit tests for trigger function update_user_permission."""
# Insert user
test_case_name = get_test_case_name()
user_id = self.create_user(test_case_name)
user = f'user_{user_id}'
# Update user role to advanced
self.update_user(user_id)
# Get user and role
select_user_role_query = f'''SELECT a.rolname AS user, c.rolname AS role FROM pg_catalog.pg_roles a INNER JOIN pg_catalog.pg_auth_members b ON a.oid = b.member INNER JOIN pg_catalog.pg_roles c ON b.roleid = c.oid AND c.rolname = 'advanced' WHERE a.rolname = '{user}';'''
cursor = self.connection.execute(select_user_role_query)
row = cursor.fetchone()
# Assert user was created and advanced role granted
self.assertEqual(row[0], user)
self.assertEqual(row[1], 'advanced')
# Rollback uncommitted data
self.rollback()
@classmethod
def tearDownClass(cls):
"""Execute this at the end of the tests."""
cls.connection.close()
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3368253 | # coding: utf-8
""" Project Euler problem #36. """
def problem():
u""" Solve the problem.
The number, 197, is called a circular prime because all rotations of the
digits: 197, 971, and 719, are themselves prime.
There are thirteen such primes below 100: 2, 3, 5, 7, 11, 13, 17, 31, 37,
71, 73, 79, and 97.
How many circular primes are there below one million?
Answer: 872187
"""
_sum = 0
for num in range(1, 10**6):
if is_palindrome(num):
bnum = str(bin(num))[2:]
if bnum == bnum[::-1]:
_sum += num
return _sum
def is_palindrome(num):
""" Check for number is palindrome. """
num = str(num)
return num == num[::-1]
if __name__ == '__main__':
print problem()
| StarcoderdataPython |
1612368 | <reponame>iamdaguduizhang/Small_module
# -*- coding:utf-8 -*-
# @Time : 2019/10/10 10:57
# @Author : Dg
import datetime
import time
from multiprocessing import Process
def sleep(data):
time.sleep(data)
print("休息{}秒".format(data))
if __name__ == "__main__":
print(datetime.datetime.now())
p1 = Process(target=sleep, args=(3, ))
p2 = Process(target=sleep, args=(2, ))
p3 = Process(target=sleep, args=(3, ))
p4 = Process(target=sleep, args=(4, ))
# p2.daemon = True # deamon属性默认为False,True的时候进程随主进程的结束而结束。 因为p1执行结束后,p2还要执行一秒,但是在这一秒之前
# 主进程已经执行完毕了,所以p2就随之结束了,就不会有打印了.
p1.start() # 进程start 就会开始运行, 4个start就相当与已经有四个并发的进程了。
# p1.join()
p2.start()
p3.start()
p4.start()
p1.join() # join方法,让主进程阻塞,直到该进程执行结束。 因为p1阻塞主进程,所以p1的打印在主进程的打印之前
p4.join()
# p1.join()# 而且在主主进程打印之前p1就执行结束了,所以p1的存货状态是False
# print(1, p1.is_alive())
# print(2, p2.is_alive())
# print(3, p3.is_alive())
print('主线程')
print(datetime.datetime.now())
# https://www.cnblogs.com/hwlong/p/8952510.html
# from multiprocessing import Process
# def aa():
# pass
#
# p1 = Process(target=aa, args=())
# p1 = Process(target=aa, args=())
# p1 = Process(target=aa, args=())
# p1 = Process(target=aa, args=())
| StarcoderdataPython |
1716105 | <filename>test/api/document/test_load_web_document.py
# -----------------------------------------------------------------------------------
# <copyright company="Aspose" file="test_load_web_document.py">
# Copyright (c) 2020 Aspose.Words for Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import os
import dateutil.parser
import asposewordscloud.models.requests
from test.base_test_context import BaseTestContext
#
# Example of how to load web document.
#
class TestLoadWebDocument(BaseTestContext):
#
# Test for loading web document.
#
def test_load_web_document(self):
requestDataSaveOptions = asposewordscloud.SaveOptionsData(file_name='google.doc', save_format='doc', dml_effects_rendering_mode='1', dml_rendering_mode='1', update_sdt_content=False, zip_output=False)
requestData = asposewordscloud.LoadWebDocumentData(loading_document_url='http://google.com', save_options=requestDataSaveOptions)
request = asposewordscloud.models.requests.LoadWebDocumentRequest(data=requestData)
result = self.words_api.load_web_document(request)
self.assertIsNotNone(result, 'Error has occurred.')
self.assertIsNotNone(result.save_result, 'Validate LoadWebDocument response')
self.assertIsNotNone(result.save_result.dest_document, 'Validate LoadWebDocument response')
self.assertEqual('google.doc', result.save_result.dest_document.href)
| StarcoderdataPython |
1705937 | <gh_stars>1-10
#Qiita API v2対応
import sys, json, requests, re
from QiitaAPI import *
class QiitaAPIv2(QiitaAPI):
#v2 APIのベースURL
BASEURL='https://qiita.com/api/v2/'
#V2 APIの要素定義
API_PROP_TITLE='title'
API_PROP_ITEMID='id'
API_PROP_URL='url'
API_PROP_TAGS='tags'
API_PROP_USER='user'
API_PROP_VIEW='page_views_count'
API_PROP_LIKE='likes_count'
API_PROP_HTML='rendered_body'
API_PROP_MARKDOWN='body'
API_PROP_CREATE_TIME='created_at'
API_PROP_UPDATE_TIME='updated_at'
API_PROP_PAGE='page'
API_PROP_PER_PAGE='per_page'
#stockはstockersの要素数をカウントする
API_PROP_STOCK='stock'
#stockはcommentsの要素数をカウントする
API_PROP_COMMENT='comment'
#1回の取得データ最大数の制限
DEFAULT_RESULT_MAX=5000
#内部で使うタグ
#conf設定。ここ以外の各要素はoutputに使うので、QiitaAPIと合わせる
CONF_PROP_SHOW='show'
CONF_PROP_ITEM='item'
CONF_PROP_USER=QiitaAPI.COMMON_USER
CONF_PROP_TOKEN='access_token'
#内部情報管理用
MNG_PROP_SHOW='show'
MNG_PROP_HAVE_LIST='have_list'
MNG_PROP_API_PROP='property'
#http header parse
HTTP_PROP_HEADER='header'
HTTP_PROP_LINK='Link'
HTTP_PROP_COUNT='Total-Count'
HTTP_PROP_BODY='body'
#private
def _parse_setting(self, data):
#デフォルトを設定
self._set_default()
#認証設定
if self.CONF_PROP_TOKEN in data:
token=data[self.CONF_PROP_TOKEN]
self._headers['Authorization']=f'Bearer {token}'
#ユーザー名設定
if QiitaAPI.COMMON_USER in data:
self._user=data[QiitaAPI.COMMON_USER]
#最大数設定
if QiitaAPI.COMMON_MAX in data:
self._result_max=data[QiitaAPI.COMMON_MAX]
#show関連のデータがないならデフォルトをそのまま使う
if not self.MNG_PROP_SHOW in data:
return
#item関連の設定更新
self._set_item_config(data[self.MNG_PROP_SHOW])
#user関連の設定更新
self._set_user_config(data[self.MNG_PROP_SHOW])
#デフォルト設定を行う
def _set_default(self):
#デフォルトを設定
self._headers={}
self._result_max=self.DEFAULT_RESULT_MAX
#item関連
self._item_config={
self.API_PROP_PAGE:1,
self.API_PROP_PER_PAGE:100,
QiitaAPI.ITEM_RAW:False,
#itemは{responseキー:{表示するか, 一覧取得で取得可能か, IDの対応}}
self.CONF_PROP_ITEM:{
self.API_PROP_TITLE: {self.MNG_PROP_SHOW:True , self.MNG_PROP_HAVE_LIST:True , self.MNG_PROP_API_PROP:QiitaAPI.ITEM_TITLE},
self.API_PROP_URL: {self.MNG_PROP_SHOW:False, self.MNG_PROP_HAVE_LIST:True , self.MNG_PROP_API_PROP:QiitaAPI.ITEM_URL},
self.API_PROP_TAGS: {self.MNG_PROP_SHOW:True , self.MNG_PROP_HAVE_LIST:True , self.MNG_PROP_API_PROP:QiitaAPI.ITEM_TAGS},
self.API_PROP_MARKDOWN :{self.MNG_PROP_SHOW:False, self.MNG_PROP_HAVE_LIST:True , self.MNG_PROP_API_PROP:QiitaAPI.ITEM_MARKDOWN_DATA},
self.API_PROP_HTML: {self.MNG_PROP_SHOW:False, self.MNG_PROP_HAVE_LIST:True , self.MNG_PROP_API_PROP:QiitaAPI.ITEM_HTML_DATA},
self.API_PROP_CREATE_TIME: {self.MNG_PROP_SHOW:False , self.MNG_PROP_HAVE_LIST:True , self.MNG_PROP_API_PROP:QiitaAPI.ITEM_CREATED_AT},
self.API_PROP_UPDATE_TIME: {self.MNG_PROP_SHOW:False , self.MNG_PROP_HAVE_LIST:True , self.MNG_PROP_API_PROP:QiitaAPI.ITEM_UPDATED_AT},
self.API_PROP_UPDATE_TIME: {self.MNG_PROP_SHOW:False , self.MNG_PROP_HAVE_LIST:True , self.MNG_PROP_API_PROP:QiitaAPI.ITEM_UPDATED_AT},
self.API_PROP_VIEW: {self.MNG_PROP_SHOW:False, self.MNG_PROP_HAVE_LIST:False, self.MNG_PROP_API_PROP:QiitaAPI.ITEM_VIEW},
self.API_PROP_USER: {self.MNG_PROP_SHOW:False, self.MNG_PROP_HAVE_LIST:True, self.MNG_PROP_API_PROP:QiitaAPI.ITEM_USER},
self.API_PROP_LIKE: {self.MNG_PROP_SHOW:True, self.MNG_PROP_HAVE_LIST:True , self.MNG_PROP_API_PROP:QiitaAPI.ITEM_LIKE},
self.API_PROP_STOCK: {self.MNG_PROP_SHOW:False, self.MNG_PROP_HAVE_LIST:False, self.MNG_PROP_API_PROP:QiitaAPI.ITEM_STOCK},
self.API_PROP_COMMENT: {self.MNG_PROP_SHOW:False, self.MNG_PROP_HAVE_LIST:False, self.MNG_PROP_API_PROP:QiitaAPI.ITEM_COMMENT}
}
}
#user関連
self._user_config={
self.API_PROP_PAGE:1,
self.API_PROP_PER_PAGE:100,
QiitaAPI.ITEM_RAW:False,
self.CONF_PROP_USER:{
#itemは{responseキー:{表示するか, IDの対応}}
self.API_PROP_ITEMID: {self.MNG_PROP_SHOW:True, self.MNG_PROP_API_PROP:QiitaAPI.USER_ID}
}
}
#item周りの設定更新を行う
def _set_item_config(self, show_data):
#itemがないならデフォルトで
if not self.CONF_PROP_ITEM in show_data:
return
for key, value in show_data[self.CONF_PROP_ITEM].items():
#item要素以外はそのまま代入
if key in self._item_config:
if key != self.CONF_PROP_ITEM:
self._item_config[key]=value
#それ以外はconfigテーブルの表示に関して更新する
else:
self._update_item_config(key, value)
#item_configの更新を行う
def _update_item_config(self, key, value):
self._update_show_config(key, value, self._item_config[self.CONF_PROP_ITEM])
#user周りの更新を行う
def _set_user_config(self, show_data):
#userがないならデフォルトで
if not self.CONF_PROP_USER in show_data:
return
for key, value in show_data[self.CONF_PROP_USER].items():
#item要素はそのまま代入
if key in self._user_config:
if key != self.CONF_PROP_USER:
self._user_config[key]=value
#それ以外はconfigテーブルの表示に関して更新する
else:
self._update_user_config(key, value)
#user_configの更新を行う
def _update_user_config(self, key, value):
self._update_show_config(key, value, self._user_config[self.CONF_PROP_USER])
#item_configの更新を行う
def _update_show_config(self, key, value, config):
#正しいkey, valueじゃないなら無視
if type(value) is not bool:
return
for index, tables in config.items():
if tables[self.MNG_PROP_API_PROP] == key:
#アップデート
config[index][self.MNG_PROP_SHOW]=value
break
#get response body
def _get_api_response_body(self, extraurl):
res=self._callapi(extraurl)
res_body=json.loads(res.text)
return res_body
#get response all
def _get_api_response_all(self, extraurl, has_baseurl):
if has_baseurl:
res=self._send_get_req(extraurl)
else:
res=self._callapi(extraurl)
res_all={self.HTTP_PROP_HEADER:res.headers}
res_all[self.HTTP_PROP_BODY]=json.loads(res.text)
return res_all
def _get_next_link(self, header):
#Linkタグがないならreturn
if not self.HTTP_PROP_LINK in header:
return ""
#format:
#{'Link': '<url>; rel="first",<url>; rel="prev" , <url>; rel="next", <url>; rel="last"'}
linklist=re.split(r',', header[self.HTTP_PROP_LINK])
#<url>; rel="xxx"で分割
for link_raw in linklist:
#無駄文字を削除後;で分割
link_split=re.split(r';', re.sub(r"[<>\" ]", "", link_raw))
# rel="next"を採用
if link_split[1] == 'rel=next':
return link_split[0]
#nextが無かった
return ""
#update response, parse header, and return next
def _update_response_and_get_next_link(self, url, response_all):
#responseを取得
response=self._get_api_response_all(url, True)
#最大数を超えてしまいそうならそこでstop
merged_count=len(response[self.HTTP_PROP_BODY])+len(response_all)
if self._result_max < merged_count:
return ""
#responseを追加。listなのでextendだけでOK
response_all.extend(response[self.HTTP_PROP_BODY])
#最大値に行ったら終了
if self._result_max == merged_count:
return ""
else:
#次のURLを返却
return self._get_next_link(response[self.HTTP_PROP_HEADER])
#get response with link 1st
def _get_api_response_with_link(self, extraurl, response):
next_url=self.BASEURL+extraurl
#nextがなくなるまでgetの繰り返し
while len(next_url) is not 0:
next_url=self._update_response_and_get_next_link(next_url, response)
return response
#qiita api call
def _callapi(self, extraurl):
url=self.BASEURL+extraurl
return self._send_get_req(url)
#get request direct
def _send_get_req(self, url):
try:
#print(url)
res=requests.get(url, headers=self._headers)
#正しい結果か?
if not self._is_valid_response(res):
sys.exit()
return res
except:
print(f'Failed to get {url}')
traceback.print_exc()
sys.exit()
#check response data
def _is_valid_response(self, res):
return res.status_code is 200
def _get_page_query(self):
return f'page={self._item_config[self.API_PROP_PAGE]}&per_page={self._item_config[self.API_PROP_PER_PAGE]}'
#get user raw result
def _get_user_items_by_api(self):
query=self._get_page_query()
if hasattr(self, '_user'):
url=f'users/{self._user}/items?{query}'
else :
url=f'authenticated_user/items?{query}'
#user itemsはlink系
response=[]
self._get_api_response_with_link(url, response)
return response
#get items raw result
def _get_items_by_api(self):
query=self._get_page_query()
response=[]
self._get_api_response_with_link(f'items?{query}', response)
return response
#get_itemの生データ取得
def _get_item_by_api(self, item):
return self._get_api_response_body(f'items/{item}')
#viewはitemsから取得するしかない
def _get_views(self, item):
#ヘッダにtokenが設定されていないと意味ないのでチェック
if not 'Authorization' in self._headers:
return None
#viewはitemsの情報内から取得可能
res=self._get_api_response_body(f'items/{item}')
return res[self.API_PROP_VIEW]
def _get_stock(self, item):
#stockはstockersから
#stockのqueryはユーザー情報に依存(いらないかも)
query=f'page={self._user_config[self.API_PROP_PAGE]}&per_page={self._user_config[self.API_PROP_PER_PAGE]}'
res=self._get_api_response_all(f'items/{item}/stockers?${query}', False)
return res[self.HTTP_PROP_HEADER][self.HTTP_PROP_COUNT]
def _get_comment(self, item):
#commentはcommentsから
#commentのqueryはユーザー情報に依存(いらないかも)
query=f'page={self._user_config[self.API_PROP_PAGE]}&per_page={self._user_config[self.API_PROP_PER_PAGE]}'
res=self._get_api_response_all(f'items/{item}/comments?${query}', False)
return res[self.HTTP_PROP_HEADER][self.HTTP_PROP_COUNT]
def _get_extra_item_data(self, item_config, item):
if item_config[self.MNG_PROP_API_PROP] == QiitaAPI.ITEM_VIEW:
return self._get_views(item)
elif item_config[self.MNG_PROP_API_PROP] == QiitaAPI.ITEM_STOCK:
return self._get_stock(item)
elif item_config[self.MNG_PROP_API_PROP] == QiitaAPI.ITEM_COMMENT:
return self._get_comment(item)
else:
return None
def _parse_result(self, item_config, raw_result):
if item_config[self.MNG_PROP_API_PROP] == QiitaAPI.ITEM_USER:
return self._parse_raw_user(raw_result)
else:
return raw_result
def _does_show_item(self, key, config):
if not key in config:
return False
return config[key][self.MNG_PROP_SHOW]
def _parse_raw_item(self, itemid, itemdetail):
#stock情報を追加しておく
if self._item_config[self.CONF_PROP_ITEM][self.API_PROP_STOCK][self.MNG_PROP_SHOW]:
itemdetail[self.API_PROP_STOCK]=None
#comment情報を追加しておく
if self._item_config[self.CONF_PROP_ITEM][self.API_PROP_COMMENT][self.MNG_PROP_SHOW]:
itemdetail[self.API_PROP_COMMENT]=None
response={}
for key, value in itemdetail.items():
#非表示データはスキップ
if not self._does_show_item(key, self._item_config[self.CONF_PROP_ITEM]):
continue
this_item_config=self._item_config[self.CONF_PROP_ITEM][key]
#user一覧にある情報はそのままparse
if this_item_config[self.MNG_PROP_HAVE_LIST]:
response[this_item_config[self.MNG_PROP_API_PROP]]=self._parse_result(this_item_config, value)
#それ以外は取得しなおし
else:
#Noneなら取り直し、それ以外はそのまま
if value == None:
response[this_item_config[self.MNG_PROP_API_PROP]]=self._get_extra_item_data(this_item_config, itemid)
else:
response[this_item_config[self.MNG_PROP_API_PROP]]=value
return response
def _parse_raw_user(self, raw_data):
if self._user_config[QiitaAPI.ITEM_RAW]:
return raw_data
response={}
for key, value in raw_data.items():
#非表示データはスキップ
if not self._does_show_item(key, self._user_config[self.CONF_PROP_USER]):
continue
#今は特にデータ編集もいらないのでそのまま代入
this_config=self._user_config[self.CONF_PROP_USER][key]
response[this_config[self.MNG_PROP_API_PROP]]=value
return response
#parse raw items
def _parse_raw_items(self, raw_data):
#データを加工して返却
response={}
for itemdetail in raw_data:
itemid=itemdetail[self.API_PROP_ITEMID]
response[itemid]=self._parse_raw_item(itemid, itemdetail)
return response
#public
# itemsを取得する。
# @ret dict of {itemid:{'titlle', other(related to conf}}
# @note __init__データにでユーザー指定がある場合は、そのユーザー情報を利用します
def get_items(self):
#u記事一覧の取得
raw_data = self._get_items_by_api()
#raw指定ならそのまま返却
if self._item_config[QiitaAPI.ITEM_RAW]:
return raw_data
#データを加工して返却
else:
return self._parse_raw_items(raw_data)
# アカウントの全情報を取得する
# @ret dict of {itemid:{'titlle', other(related to conf}}
def get_user_items(self):
#userの記事一覧の取得
raw_data = self._get_user_items_by_api()
#raw指定ならそのまま返却
if self._item_config[QiitaAPI.ITEM_RAW]:
return raw_data
#データを加工して返却
else:
return self._parse_raw_items(raw_data)
# item情報を取得する
# @ret dict of {itemid:{'titlle', other(related to conf}
def get_item(self, item):
#userの記事一覧の取得
raw_data = self._get_item_by_api(item)
#raw指定ならそのまま返却
if self._item_config[QiitaAPI.ITEM_RAW]:
return raw_data
#データを加工して返却
else:
return self._parse_raw_item(item, raw_data)
| StarcoderdataPython |
1690653 | <reponame>leonhard-s/auraxium
"""Base classes for the Auraxium object model.
These classes define shared functionality required by all object
representations of API data, and defines the basic class hierarchy used
throughout the PlanetSide 2 object model.
"""
import abc
import logging
from typing import Any, ClassVar, List, Optional, Type, TypeVar, Union
import pydantic
from .models.base import RESTPayload
from ._cache import TLRUCache
from .census import Query
from .errors import PayloadError
from ._rest import RequestClient
from .types import CensusData
from ._support import deprecated
__all__ = [
'Ps2Object',
'Cached',
'Named'
]
CachedT = TypeVar('CachedT', bound='Cached')
NamedT = TypeVar('NamedT', bound='Named')
Ps2ObjectT = TypeVar('Ps2ObjectT', bound='Ps2Object')
_log = logging.getLogger('auraxium.ps2')
class Ps2Object(metaclass=abc.ABCMeta):
"""Common base class for all PS2 object representations.
This requires that subclasses overwrite the :attr:`collection` and
:attr:`id_field` names, which are used to tie the class to its
corresponding API counterpart.
.. attribute:: collection
:type: str
The API collection linked to this type.
.. attribute:: id_field
:type: str
The field name containing the unique ID for this type.
.. note::
This will generally match the ``<type>_id`` convention, but
some collections like ``outfit_member`` or ``profile_2`` use
custom names. This attribute provides support for the latter.
"""
collection: ClassVar[str] = 'bogus'
_model: ClassVar[Type[RESTPayload]]
id_field: ClassVar[str] = 'bogus_id'
def __init__(self, data: CensusData, client: RequestClient) -> None:
"""Initialise the object.
This sets the object's :attr:`id` attribute and populates the
instance using the provided payload.
:param auraxium.types.CensusData data: The census response
dictionary to populate the object with.
:param auraxium.Client client: The client object to use for
requests performed via this object.
"""
id_ = int(str(data[self.id_field]))
_log.debug('Instantiating <%s:%d> using payload: %s',
self.__class__.__name__, id_, data)
self.id = id_
self._client = client
try:
self.data = self._model(**data)
except pydantic.ValidationError as err:
_log.warning(
'Encountered unsupported payload: %s\n'
'This message means that the Auraxium data model must '
'be updated. Please ensure you are on the latest '
'version of the Auraxium library and report this '
'message to the project maintainers.', data)
raise PayloadError(
f'Unable to instantiate {self.__class__.__name__} instance '
f'from given payload: {err}', data) from err
def __eq__(self, o: Any) -> bool:
if not isinstance(o, self.__class__):
return False
return self.id == o.id
def __getattr__(self, name: str) -> Any:
"""Fallback for missing attributes.
This allows missing attribute in the :class:`Ps2Object`
instance to fall back to its corresponding data class.
If the attribute cannot be found there either, an
:exc:`AttributeError` is raised as normal.
"""
# Re-raising or propagating the inner exception would only clutter up
# the exception traceback, so we raise one "from scratch" instead.
if hasattr(self.data, name):
return getattr(self.data, name)
raise AttributeError(name)
def __hash__(self) -> int:
return hash((self.__class__, self.id))
def __repr__(self) -> str:
"""Return the unique string representation of this object.
This will take the form of ``<Class:id>``, e.g.
``<Weapon:108>``.
"""
return f'<{self.__class__.__name__}:{self.id}>'
@classmethod
@deprecated('0.2', '0.3', replacement=':meth:`auraxium.Client.count`')
async def count(cls, client: RequestClient, **kwargs: Any) -> int:
"""Return the number of items matching the given terms.
:param auraxium.Client client: The client through which to
perform the request.
:param kwargs: Any number of query filters to apply.
:return: The number of entries entries.
"""
# NOTE: The following is a runtime-only compatibility hack and violates
# type hinting. This is scheduled for removal as per the decorator.
return await client.count(cls, **kwargs) # type: ignore
@classmethod
@deprecated('0.2', '0.3', replacement=':meth:`auraxium.Client.find`')
async def find(cls: Type[Ps2ObjectT], results: int = 10, *,
offset: int = 0, promote_exact: bool = False,
check_case: bool = True, client: RequestClient,
**kwargs: Any) -> List[Ps2ObjectT]:
"""Return a list of entries matching the given terms.
This returns up to as many entries as indicated by the results
argument. Note that it may be fewer if not enough matches are
found.
:param int results: The maximum number of results.
:param int offset: The number of entries to skip. Useful for
paginated views.
:param bool promote_exact: If enabled, exact matches to
non-exact searches will always come first in the return
list.
:param bool check_case: Whether to check case when comparing
strings. Note that case-insensitive searches are much more
expensive.
:param auraxium.Client client: The client through which to
perform the request.
:param kwargs: Any number of filters to apply.
:return: A list of matching entries.
"""
# NOTE: The following is a runtime-only compatibility hack and violates
# type hinting. This is scheduled for removal as per the decorator.
return await client.find( # type: ignore
cls, results=results, offset=offset, promote_exact=promote_exact,
check_case=check_case, **kwargs)
@classmethod
@deprecated('0.2', '0.3', replacement=':meth:`auraxium.Client.get`')
async def get(cls: Type[Ps2ObjectT], client: RequestClient,
check_case: bool = True, **kwargs: Any
) -> Optional[Ps2ObjectT]:
"""Return the first entry matching the given terms.
Like :meth:`Ps2Object.get`, but will only return one item.
:param auraxium.Client client: The client through which to
perform the request.
:param bool check_case: Whether to check case when comparing
strings. Note that case-insensitive searches are much more
expensive.
:return: A matching entry, or :obj:`None` if not found.
"""
# NOTE: The following is a runtime-only compatibility hack and violates
# type hinting. This is scheduled for removal as per the decorator.
return await client.get( # type: ignore
cls, results=1, check_case=check_case, **kwargs)
@classmethod
@deprecated('0.2', '0.3', replacement=':meth:`auraxium.Client.get`')
async def get_by_id(cls: Type[Ps2ObjectT], id_: int, *,
client: RequestClient) -> Optional[Ps2ObjectT]:
"""Retrieve an object by its unique Census ID.
:param int id\\_: The unique ID of the object.
:param auraxium.Client client: The client through which to
perform the request.
:return: The entry with the matching ID, or :obj:`None` if not
found.
"""
# NOTE: The following is a runtime-only compatibility hack and violates
# type hinting. This is scheduled for removal as per the decorator.
return await client.get_by_id(cls, id_) # type: ignore
def query(self) -> Query:
"""Return a query from the current object.
This is a utility method targeted at advanced users and
developers. It is generally not required for most use cases.
"""
query = Query(self.collection, service_id=self._client.service_id)
query.add_term(field=self.id_field, value=self.id)
return query
class Cached(Ps2Object, metaclass=abc.ABCMeta):
"""Base class for cacheable data types.
This generates a cache for each subclass that allows the storage
and retrieval of objects by ID. This cache may be customised using
keyword arguments as part of the class definition.
This customisation is done via two parameters: the cache size and
the TTU.
The cache size defines the maximum number of items the cache may
bold before it will discard the least recently used item for every
new item added.
The TTU (time-to-use) will independently discard items that are
older than the given number of seconds to ensure data does not go
too far out of date.
"""
_cache: ClassVar[TLRUCache[int, Any]]
def __init__(self, data: CensusData, client: RequestClient) -> None:
"""Initialise the cached object.
After initialising this object via the parent class's
initialiser, this adds the current class to the cache.
:param auraxium.types.CensusData data: The API response to
instantiate the object from.
:param auraxium.Client client: The client used to retrieve the
object.
"""
super().__init__(data=data, client=client)
self._cache.add(self.id, self)
@classmethod
def __init_subclass__(cls, cache_size: int,
cache_ttu: float = 0.0) -> None:
"""Initialise a cacheable subclass.
This sets up the TLRU cache for the given subclass using the
keyword arguments provided in the class definitions.
:param int cache_size: The maximum number of items in the
cache. Once the cache reaches this number of items, it will
delete the least recently used item for every new item
added.
:param float cache_ttu: The time-to-use for cache items. If an
item is older than TTU allows, it will be re-fetched
regardless of how often it is accessed.
"""
super().__init_subclass__()
_log.debug('Setting up cache for %s (size: %d, ttu: %.1f sec.)',
cls.__name__, cache_size, cache_ttu)
cls._cache = TLRUCache(size=cache_size, ttu=cache_ttu,
name=f'{cls.__name__}_Cache')
@classmethod
def alter_cache(cls, size: int, ttu: Optional[float] = None) -> None:
"""Modify the class cache to use a new size and TTU.
This will update and clear the cache for the current class.
This allows customisation of the class depending on your
use-case.
:param int size: The new cache size.
:param float ttu: The new item TTU.
:raises ValueError: Raised if the size is less than 1.
"""
if size < 1:
raise ValueError(f'{size} is not a valid cache size')
cls._cache.clear()
cls._cache.size = size
if ttu is not None:
cls._cache.ttu = ttu
@classmethod
def _check_cache(cls: Type[CachedT], id_: int) -> Optional[CachedT]:
"""Attempt to restore an item from the cache.
If the item cannot be found, :obj:`None` will be returned
instead.
:param int id_: The unique identifier the item is cached by.
:return: An existing instance if found, or :obj:`None` if the
object has not been retrieved before or expired.
"""
return cls._cache.get(id_)
@classmethod
@deprecated('0.2', '0.3', replacement=':meth:`auraxium.Client.get`')
async def get_by_id(cls: Type[CachedT], id_: int, *, # type: ignore
client: RequestClient) -> Optional[CachedT]:
"""Retrieve an object by by ID.
This query uses caches and might return an existing instance if
the object has been recently retrieved.
:param int id\\_: The unique id of the object.
:param auraxium.Client client: The client through which to
perform the request.
:return: The object matching the given ID or :obj:`None` if no
match was found.
"""
_log.debug('<%s:%d> requested', cls.__name__, id_)
if (instance := cls._cache.get(id_)) is not None:
_log.debug('%r restored from cache', instance)
return instance # type: ignore
_log.debug('<%s:%d> not cached, generating API query...',
cls.__name__, id_)
return await super().get_by_id(id_, client=client) # type: ignore
class Named(Cached, cache_size=0, cache_ttu=0.0, metaclass=abc.ABCMeta):
"""Mix-in class for named objects.
This extends the functionality provided by
:class:`~auraxium.base.Cached` to also cache objects retrieved via
:meth:`Named.get_by_name`. The cache will also store the locale
used for the request.
"""
_cache: ClassVar[TLRUCache[Union[int, str], Any]] # type: ignore
def __init__(self, *args: Any, locale: Optional[str] = None,
**kwargs: Any) -> None:
"""Initialise the named object.
This sets the object's id attribute and adds it to the cache.
:param locale: The locale under which to cache this object.
:type locale: str or None
:param args: Any extra positional arguments are forwarded to
the :class:`~auraxium.base.Cached` class's initialiser.
:param kwargs: Any keyword arguments are forwarded to the
:class:`~auraxium.base.Cached` class's initialiser.
"""
super().__init__(*args, **kwargs)
if (locale is not None
and (name := getattr(self.name, locale, None)) is not None):
key = f'{locale}_{name.lower()}'
self._cache.add(key, self)
def __repr__(self) -> str:
"""Return the unique string representation of the faction.
This will take the form of ``<class:id:name>``, e.g.
``<Item:2:NC4 Mag-Shot>``.
"""
return (f'<{self.__class__.__name__}:{self.id}:'
f'\'{self.name}\'>')
def __str__(self) -> str:
"""Return the string representation of this object.
This retrieves the :atr:``Named.name`` attribute for the
English locale.
"""
return str(self.name)
@classmethod
@deprecated('0.2', '0.3', replacement=':meth:`auraxium.Client.get`')
async def get_by_name(cls: Type[NamedT], name: str, *, locale: str = 'en',
client: RequestClient) -> Optional[NamedT]:
"""Retrieve an object by its unique name.
If the same query has been performed recently, it may be
restored from cache instead.
This query is always case-insensitive.
:param str name: The name to search for.
:param str locale: The locale of the search key.
:param auraxium.Client client: The client through which to
perform the request.
:return: The entry with the matching name, or :obj:`None` if
not found.
"""
# NOTE: The following is a runtime-only compatibility hack and violates
# type hinting. This is scheduled for removal as per the decorator.
return client.get_by_name(cls, name, locale=locale) # type: ignore
class ImageMixin(Ps2Object, metaclass=abc.ABCMeta):
"""A mixin class for types supporting image access."""
def image(self) -> str:
"""Return the default image for this type."""
image_id: int = self.data.image_id # type: ignore
return self._image_url(image_id)
@staticmethod
def _image_url(image_id: int) -> str:
"""Return the URL for a given image ID."""
url = 'https://census.daybreakgames.com/files/ps2/images/static/'
return url + f'{image_id}.png'
| StarcoderdataPython |
193550 | from env_common import get_screen
from common import select_action_policy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import RMSprop
from itertools import count
import numpy as np
import gym
import visdom
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Hyperparameters
printout_freq = 1
num_episodes = 300
batch_size = 5
learning_rate_policy = 0.001
learning_rate_value = 0.001
gamma = 0.99
lam = 0.99 # lambda for GAE-lambda
train_v_iters = 10
train_pi_iters = 10
clip_ratio=0.1 # how far can new policy deviate from old policy
# Initialize visualization
viz = visdom.Visdom()
loss_window = viz.line(
Y=torch.zeros((1)),
X=torch.zeros((1)),
opts=dict(xlabel='step', ylabel='Loss', title='Training loss'))
episode_length_window = viz.line(
Y=torch.zeros((1)),
X=torch.zeros((1)),
opts=dict(xlabel='step', ylabel='Episode length', title='Episode length'))
# Initialize environment and replay buffer
env = gym.make("CartPole-v0")
env.reset()
init_screen = get_screen(env)
_, _, screen_height, screen_width = init_screen.shape
num_actions = env.action_space.n
class Buffer:
def __init__(self, gamma, lam):
self.buffer = []
self.advantages = []
self.discounted_rewards = []
self.gamma = gamma
self.lam = lam
def add(self, state, action, logp, value, reward):
self.buffer.append((state, action, logp, value, reward))
def get(self, i):
"""Return state, action, log probability of action, discounted advantage and discounted reward at i.
Requires that finalize() has been called previously to calculate
discounted rewards.
"""
if i >= len(self.buffer) or i >= len(self.advantages) or i >= len(self.discounted_rewards):
return None
else:
state, action, logp, _, _ = self.buffer[i]
reward = self.discounted_rewards[i]
advantage = self.advantages[i]
return state, torch.FloatTensor([action]).to(device), logp, advantage, reward
def finalize(self):
"""Call at end of sample collection to calculate advantages and discounted rewards.
"""
_, _, _, values, rewards = zip(*self.buffer)
# Calculate advantages
self.advantages = [0] * len(self.buffer)
for i in range(len(self.advantages)-1):
if rewards[i] != 0: # if reward is zero, we ended the episode
delta = rewards[i] + self.gamma * values[i+1] - values[i]
self.advantages[i] = delta.item()
# Discount advantages
running_add = 0
for i in reversed(range(len(self.advantages))):
if self.advantages[i] == 0:
running_add = 0
else:
running_add = running_add * self.gamma * self.lam + self.advantages[i]
self.advantages[i] = running_add
# Normalize advantages
adv_mean = np.mean(self.advantages)
adv_std = np.std(self.advantages)
for i in range(steps):
self.advantages[i] = (self.advantages[i] - adv_mean) / adv_std
# Calculate discounted rewards
self.discounted_rewards = [0] * len(self.buffer)
running_add = 0
for i in reversed(range(len(self.discounted_rewards))):
if rewards[i] == 0:
running_add = 0
else:
running_add = running_add * self.gamma + rewards[i]
self.discounted_rewards[i] = running_add
def empty(self):
self.buffer = []
self.advantages = []
self.discounted_rewards = []
buffer = Buffer(gamma, lam)
class PolicyNet(nn.Module):
def __init__(self, h, w, outputs):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=5, stride=2)
self.bn1 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2)
self.bn3 = nn.BatchNorm2d(32)
def conv2d_size_out(size, kernel_size = 5, stride = 2):
return (size - (kernel_size - 1) - 1) // stride + 1
convw = conv2d_size_out(conv2d_size_out(conv2d_size_out(w)))
convh = conv2d_size_out(conv2d_size_out(conv2d_size_out(h)))
linear_input_size = convw * convh * 32
self.head = nn.Linear(linear_input_size, outputs)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
return self.head(x.view(x.size(0), -1))
policy_network = PolicyNet(screen_height, screen_width, num_actions).to(device)
value_network = PolicyNet(screen_height, screen_width, 1).to(device)
optimizer_policy = RMSprop(policy_network.parameters(), lr=learning_rate_policy)
optimizer_value = RMSprop(value_network.parameters(), lr=learning_rate_value)
# Store duration of episodes to test performance
episode_durations = []
# Training loop
steps = 0
training_step = 0
for episode in range(num_episodes):
env.reset()
last_screen = get_screen(env)
current_screen = get_screen(env)
state = current_screen - last_screen
state = state.to(device)
for t in count():
action, logp, val = select_action_policy(state, policy_network, value_network)
_, reward, done, _ = env.step(action)
# Move to next state
last_screen = current_screen
current_screen = get_screen(env)
next_state = current_screen - last_screen
next_state = next_state.to(device)
# To mark boundarys between episodes
if done:
reward = 0
buffer.add(state, float(action), logp, val, reward)
state = next_state
steps += 1
if done:
episode_durations.append(t + 1)
viz.line(X=torch.ones((1, 1))*episode, Y=torch.ones((1, 1)) * episode_durations[-1],
win=episode_length_window, update='append', name='Episode durations')
# Plot 50 episode averages
if len(episode_durations) >= 50:
mean = np.mean(episode_durations[-50:])
viz.line(X=torch.ones((1, 1))*episode, Y=torch.ones((1, 1)) * mean,
win=episode_length_window, update='append', name='Mean episode durations')
break
# Update policy
if episode > 0 and episode % batch_size == 0:
# Compute discounted rewards
buffer.finalize()
# Policy function learning
for i in range(train_pi_iters):
optimizer_policy.zero_grad()
for i in range(steps):
state, action, logp_old, advantage, _ = buffer.get(i)
probs = policy_network(state).squeeze(0)
m = torch.distributions.Categorical(logits=probs)
logp = m.log_prob(action) # new log probability
# PPO loss
ratio = torch.exp(logp - logp_old)
clip_adv = torch.clamp(ratio, 1-clip_ratio, 1+clip_ratio) * advantage
policy_loss = -(torch.min(ratio * advantage, clip_adv))
policy_loss.backward()
optimizer_policy.step()
# Value function learning
for i in range(train_v_iters):
optimizer_value.zero_grad()
for i in range(steps):
state, action, _, _, reward = buffer.get(i)
value_loss = ((value_network(state).squeeze(0) - reward)**2).mean()
value_loss.backward()
optimizer_value.step()
if training_step % printout_freq == 0:
viz.line(X=torch.ones((1, 1)) * training_step, Y=torch.ones((1, 1)) * policy_loss.item(),
win=loss_window, update='append', name='Policy loss')
training_step = training_step + 1
buffer.empty()
steps = 0 | StarcoderdataPython |
3361623 | <filename>adjutant_ui/content/forgot_password/forms.py
# Copyright (c) 2016 Catalyst IT Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf import settings
from django import forms
from django import http
from django.utils.translation import ugettext_lazy as _
from horizon import forms as hforms
from horizon.utils import functions as utils
from adjutant_ui.api import adjutant
# Username is ignored, use email address instead.
USERNAME_IS_EMAIL = True
class ForgotPasswordForm(hforms.SelfHandlingForm):
username = forms.CharField(
max_length=255, label=_("User Name"),
widget=forms.TextInput(attrs={"autofocus": "autofocus"}))
email = forms.EmailField(label=_("Email"))
def __init__(self, *args, **kwargs):
super(ForgotPasswordForm, self).__init__(*args, **kwargs)
if (hasattr(settings, 'USERNAME_IS_EMAIL') and
getattr(settings, 'USERNAME_IS_EMAIL')):
self.fields.pop('username')
self.fields['email'].widget = forms.TextInput(
attrs={"autofocus": "autofocus"})
def clean(self, *args, **kwargs):
# validate username and email?
return super(ForgotPasswordForm, self).clean(*args, **kwargs)
def handle(self, request, data):
try:
submit_response = adjutant.forgotpassword_submit(
request, data)
if submit_response.ok:
return True
except Exception:
pass
# Send the user back to the login page.
msg = _("The password reset service is currently unavailable. "
"Please try again later.")
response = http.HttpResponseRedirect(settings.LOGOUT_URL)
utils.add_logout_reason(self.request, response, msg)
return response
| StarcoderdataPython |
1651693 | from PeptideBuilder import Geometry
import PeptideBuilder
import Bio.PDB
from Bio.PDB import calc_angle, rotaxis, Vector
from math import *
import numpy as np
def bytes2string(tbt_array):
return tbt_array.numpy().astype(dtype=np.uint8).tostring().split(b'\00')[0].decode("utf-8")
def generateAA(aaName):
geo = Geometry.geometry(aaName)
geo.phi=0
geo.psi_im1=0
structure = PeptideBuilder.initialize_res(geo)
tx = -np.pi/2.0
Rx = np.array([[1,0,0], [0, cos(tx), -sin(tx)], [0, sin(tx), cos(tx)]])
for atom in structure.get_atoms():
atom.transform(Rx, np.array([0,0,0]))
nAtom = list(structure.get_atoms())[0]
nV = nAtom.get_coord()
I = np.identity(3)
for atom in structure.get_atoms():
atom.transform(I, -nV)
R = rotaxis(np.pi, list(structure.get_atoms())[1].get_vector())
for atom in structure.get_atoms():
atom.transform(R, np.array([0,0,0]))
# print(list(structure.get_atoms())[1].get_coord(), list(structure.get_atoms())[1])
out = Bio.PDB.PDBIO()
out.set_structure(structure)
out.save( "example.pdb" )
return structure[0]['A'][1]
def transform(structure):
tx = -np.pi/2.0
Rx = np.array([[1,0,0], [0, cos(tx), -sin(tx)], [0, sin(tx), cos(tx)]])
for atom in structure.get_atoms():
atom.transform(Rx, np.array([0,0,0]))
nAtom = list(structure.get_atoms())[0]
nV = nAtom.get_coord()
I = np.identity(3)
for atom in structure.get_atoms():
atom.transform(I, -nV)
R = rotaxis(np.pi, list(structure.get_atoms())[1].get_vector())
for atom in structure.get_atoms():
atom.transform(R, np.array([0,0,0]))
return structure | StarcoderdataPython |
1769069 | import copy
import falcon
import json
import os
import shutil
import subprocess
import sys
import traceback
from datetime import datetime, timedelta
from sqlalchemy import func, desc
from sqlalchemy.exc import SQLAlchemyError
import model
import endpoint
import util
from db import engine, session
from util import UserInfo
# Cache of the current year.
c_year = None
c_year_update = None
class JSONTranslator(object):
def process_request(self, req, resp):
return
def process_response(self, req, resp, endpoint):
if 'result' not in req.context:
return
resp.body = json.dumps(
req.context['result'],
sort_keys=True,
indent=4,
ensure_ascii=False,
)
class Authorizer(object):
def process_request(self, req, resp):
if req.auth:
token_str = req.auth.split(' ')[-1]
try:
token = session.query(model.Token).get(token_str)
if token is not None:
if (req.relative_uri != '/auth' and
token.expire < datetime.utcnow()):
# user timeouted
req.context['user'] = UserInfo()
return
try:
req.context['user'] = UserInfo(
session.query(model.User).get(token.user),
token_str
)
return
except AttributeError:
pass
except:
session.rollback()
req.context['user'] = UserInfo()
class Year_fill(object):
# This middleware has 2 purposes:
# 1) Get current year.
# 2) Test connection with db. (this is very important!)
def process_request(self, req, resp):
if req.method == 'OPTIONS':
return
try:
if ('YEAR' in req.headers):
req.context['year'] = req.headers['YEAR']
req.context['year_obj'] = session.query(model.Year).\
get(req.context['year'])
else:
year_obj = session.query(model.Year).\
order_by(desc(model.Year.id)).first()
req.context['year_obj'] = year_obj
req.context['year'] = year_obj.id
except SQLAlchemyError:
session.rollback()
try:
if ('YEAR' in req.headers):
req.context['year'] = req.headers['YEAR']
req.context['year_obj'] = session.query(model.Year).\
get(req.context['year'])
else:
year_obj = session.query(model.Year).\
order_by(desc(model.Year.id)).first()
req.context['year_obj'] = year_obj
req.context['year'] = year_obj.id
except:
session.rollback()
raise
def log(req, resp):
try:
ip = req.env['HTTP_X_FORWARDED_FOR'].split(',')[-1].strip()
except KeyError:
ip = req.env['REMOTE_ADDR']
print('[%s] [%s] [%s] [%s] %s' %
(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), ip, req.method,
resp.status, req.relative_uri))
sys.stdout.flush()
class Logger(object):
def process_request(self, req, resp):
log(req, resp)
def log_sink(req, resp):
resp.status = falcon.HTTP_404
# Uncomment this to log sink
# log(req, resp)
class Corser(object):
def process_response(self, request, response, resource):
origin = request.get_header('Origin')
if origin in ('http://localhost:4200',
'https://ksi.fi.muni.cz',
'https://kyzikos.fi.muni.cz'):
response.set_header('Access-Control-Allow-Origin', origin)
response.set_header('Access-Control-Allow-Headers',
'authorization,content-type,year')
response.set_header('Access-Control-Allow-Methods',
'OPTIONS,PUT,POST,GET,DELETE')
def error_handler(ex, req, resp, params):
if isinstance(ex, falcon.HTTPError):
req.context['result'] = {
'errors': [ {
'status': ex.status,
'title': ex.title,
'detail': ex.description,
} ]
}
resp.status = ex.status
else:
req.context['result'] = {
'errors': [ {
'status': '500',
'title': 'Internal server error',
'detail': 'Vnitřní chyba serveru, kontaktujte správce backendu.',
} ]
}
resp.status = falcon.HTTP_500
log(req, resp)
if resp.status == falcon.HTTP_500:
dt = datetime.now().strftime('[%Y-%m-%d %H:%M:%S]')
lines = '\n'.join(
[dt + ' ' + line for line in traceback.format_exc().split('\n')]
)
print(lines)
# Add Logger() to middleware for logging
api = falcon.API(middleware=[JSONTranslator(), Authorizer(), Year_fill(),
Corser()])
api.add_error_handler(Exception, handler=error_handler)
api.req_options.auto_parse_form_urlencoded = True
# Odkomentovat pro vytvoreni tabulek v databazi
# model.Base.metadata.create_all(engine)
# Create /tmp/box with proper permissions (for sandbox)
if os.path.isdir(util.programming.EXEC_PATH):
shutil.rmtree(util.programming.EXEC_PATH, ignore_errors=True)
try:
os.makedirs(util.programming.EXEC_PATH)
except FileExistsError:
pass
p = subprocess.Popen(["setfacl", "-d", "-m", "group:ksi:rwx",
util.programming.EXEC_PATH])
p.wait()
if p.returncode != 0:
raise Exception("Cannot change umask to %s!" %
(util.programming.EXEC_PATH))
api.add_route('/robots.txt', endpoint.Robots())
api.add_route('/csp', endpoint.CSP())
api.add_route('/articles', endpoint.Articles())
api.add_route('/articles/{id}', endpoint.Article())
api.add_route('/achievements', endpoint.Achievements())
api.add_route('/achievements/{id}', endpoint.Achievement())
api.add_route('/posts', endpoint.Posts())
api.add_route('/posts/{id}', endpoint.Post())
api.add_route('/tasks', endpoint.Tasks())
api.add_route('/tasks/{id}', endpoint.Task())
api.add_route('/taskDetails/{id}', endpoint.TaskDetails())
api.add_route('/modules/{id}', endpoint.Module())
api.add_route('/modules/{id}/submit', endpoint.ModuleSubmit())
api.add_route('/modules/{id}/submitFiles', endpoint.ModuleSubmit()) # alias required for swagger
api.add_route('/submFiles/{id}', endpoint.ModuleSubmittedFile())
api.add_route('/threads', endpoint.Threads())
api.add_route('/threads/{id}', endpoint.Thread())
api.add_route('/threadDetails/{id}', endpoint.ThreadDetails())
api.add_route('/users', endpoint.Users())
api.add_route('/users/{id}', endpoint.User())
api.add_route('/profile/picture', endpoint.PictureUploader())
api.add_route('/profile/{id}', endpoint.OrgProfile())
api.add_route('/profile/', endpoint.Profile())
api.add_route('/basicProfile/', endpoint.BasicProfile())
api.add_route('/images/{context}/{id}', endpoint.Image())
api.add_route('/content', endpoint.Content())
api.add_route('/taskContent/{id}', endpoint.TaskContent())
api.add_route('/task-content/{id}/{view}', endpoint.TaskContent())
api.add_route('/registration', endpoint.Registration())
api.add_route('/auth', endpoint.Authorize())
api.add_route('/logout', endpoint.Logout())
api.add_route('/runCode/{id}/submit', endpoint.RunCode())
api.add_route('/feedback', endpoint.FeedbackEmail())
api.add_route('/settings/changePassword', endpoint.ChangePassword())
api.add_route('/forgottenPassword', endpoint.ForgottenPassword())
api.add_route('/waves', endpoint.Waves())
api.add_route('/waves/{id}', endpoint.Wave())
api.add_route('/years', endpoint.Years())
api.add_route('/years/{id}', endpoint.Year())
api.add_route('/feedbacks', endpoint.FeedbacksTask())
api.add_route('/feedbacks/{id}', endpoint.FeedbackTask())
"""
task-content endpoint contains: (defined in endpoint/content.py, see also
./gunicorn_cfg.py)
* /taskContent/{id}/zadani/{file_path}
* /taskContent/{id}/reseni/{file_path}
* /taskContent/[id]/icon/{file_name}
"""
api.add_route('/admin/evaluations/{id}', endpoint.admin.Evaluation())
api.add_route('/admin/corrections', endpoint.admin.Corrections())
api.add_route('/admin/corrections/{id}', endpoint.admin.Correction())
api.add_route('/admin/correctionsInfos', endpoint.admin.CorrectionsInfo())
api.add_route('/admin/correctionsInfos/{id}', endpoint.admin.CorrectionInfo())
api.add_route('/admin/correctionsEmail/{id}', endpoint.admin.CorrectionsEmail())
api.add_route('/admin/corrections/{id}/publish', endpoint.admin.CorrectionsPublish())
api.add_route('/admin/subm/eval/{eval_id}/', endpoint.admin.SubmFilesEval())
api.add_route('/admin/subm/task/{task_id}/', endpoint.admin.SubmFilesTask())
api.add_route('/admin/e-mail/', endpoint.admin.Email())
api.add_route('/admin/atasks/', endpoint.admin.Tasks())
api.add_route('/admin/atasks/{id}', endpoint.admin.Task())
api.add_route('/admin/atasks/{id}/deploy', endpoint.admin.TaskDeploy())
api.add_route('/admin/atasks/{id}/merge', endpoint.admin.TaskMerge())
api.add_route('/admin/waves/{id}/diff', endpoint.admin.WaveDiff())
api.add_route('/admin/achievements/grant', endpoint.admin.AchievementGrant())
api.add_route('/admin/user-export', endpoint.admin.UserExport())
api.add_route('/admin/evalCodes/{id}', endpoint.admin.EvalCode())
api.add_route('/admin/execs', endpoint.admin.Execs())
api.add_route('/admin/execs/{id}', endpoint.admin.Exec())
api.add_route('/admin/monitoring-dashboard', endpoint.admin.MonitoringDashboard())
api.add_route('/unsubscribe/{id}', endpoint.Unsubscribe())
api.add_sink(log_sink)
| StarcoderdataPython |
37343 | <filename>src/code/data-structures/DoublyLinkedList/DoublyLinkedList.py<gh_stars>0
class DoublyLinkedList:
""" Private Node Class """
class _Node:
def __init__(self, value):
self.value = value
self.next = None
self.prev = None
def __str__(self):
return self.value
def __init__(self):
self.head = None
self.tail = None
self.length = 0
def push_beginning(self, value):
node = self._Node(value)
if self.length == 0:
self.head = node
self.tail = node
else:
node.next = self.head
self.head.prev = node
self.head = node
self.length += 1
return True
def push_end(self, value):
node = self._Node(value)
if self.length == 0:
self.head = node
self.tail = node
else:
node.prev = self.tail
self.tail.next = node
self.tail = node
self.length += 1
return True
def push_at_index(self, value, index):
if self._is_empty():
raise IndexError("List is empty")
self._is_out_of_bounds(index)
if index == 0:
self.push_beginning(value)
if index >= self.length - 1:
self.push_end(value)
else:
node = self._Node(value)
i = 0
temp_node = self.head
while i < index - 1:
temp_node = temp_node.next
i += 1
node.next = temp_node.next
temp_node.next.prev = node
node.prev = temp_node
temp_node.next = node
self.length += 1
return True
def remove_beginning(self):
if self._is_empty():
raise IndexError("List is empty")
value = self.head.value
self.head = self.head.next
self.head.prev.next = None
self.head.prev = None
self.length -= 1
return value
def remove_end(self):
if self._is_empty():
raise IndexError("List is empty")
value = self.tail.value
self.tail = self.tail.prev
self.tail.next.prev = None
self.tail.next = None
self.length -= 1
return value
def remove_at_index(self, index):
if self._is_empty():
raise IndexError("List is empty")
self._is_out_of_bounds(index)
if index == 0:
self.remove_beginning()
if index >= self.length - 1:
self.remove_end()
else:
i = 0
temp_node = self.head
while i < index - 1:
temp_node = temp_node.next
i += 1
node_remove = temp_node.next
value = node_remove.value
temp_node.next = node_remove.next
node_remove.next = None
temp_node.next.prev = temp_node
node_remove.prev = None
return value
def get_value_at(self, index):
if self._is_empty():
raise IndexError("List is empty")
self._is_out_of_bounds(index)
i = 0
temp_node = self.head
while i < index:
temp_node = temp_node.next
i += 1
return temp_node.value
def set_value_at(self, value, index):
if self._is_empty():
raise IndexError("List is empty")
self._is_out_of_bounds(index)
i = 0
temp_node = self.head
while i < index:
temp_node = temp_node.next
i += 1
temp_node.value = value
return True
def reverse_list(self):
temp_node_head = self.head
temp_node_tail = self.tail
i = 0
while i < int(self.length / 2):
temp_value = temp_node_tail.value
temp_node_tail.value = temp_node_head.value
temp_node_head.value = temp_value
temp_node_tail = temp_node_tail.prev
temp_node_head = temp_node_head.next
i += 1
return True
""" Helper methods """
def size(self):
return self.length
def _is_empty(self):
return self.length == 0
def _is_out_of_bounds(self, idx):
if idx >= self.length:
raise IndexError('Index out of bounds')
def __str__(self):
temp_node = self.head
lst_str = "["
while temp_node is not None:
lst_str += str(temp_node.value)
if temp_node.next is not None:
lst_str += ","
temp_node = temp_node.next
lst_str += "]"
return lst_str
| StarcoderdataPython |
3371981 | <filename>python/multiprocessingHttpRequests.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020 damian <<EMAIL>>
#
# Distributed under terms of the MIT license.
import concurrent.futures
import requests
URLS = [
'http://httpbin.org/get',
'http://httpbin.org/get',
'http://httpbin.org/get',
'http://httpbin.org/get',
'http://httpbin.org/get',
'http://httpbin.org/get',
'http://httpbin.org/get',
'http://httpbin.org/get',
'http://httpbin.org/get',
'http://httpbin.org/get',
'http://httpbin.org/get',
'http://httpbin.org/get',
'http://httpbin.org/get',
'http://httpbin.org/get',
'http://httpbin.org/get',
'http://httpbin.org/get',
'http://httpbin.org/get',
'http://httpbin.org/get',
'http://httpbin.org/get'
]
# Retrieve a single page and report the URL and contents
def load_url(url, timeout):
print(f"Sending request to url: {url}")
response = requests.get(url)
return response.json()
# We can use a with statement to ensure threads are cleaned up promptly
with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:
# Start the load operations and mark each future with its URL
future_to_url = {executor.submit(load_url, url, 60): url for url in URLS}
for future in concurrent.futures.as_completed(future_to_url):
url = future_to_url[future]
try:
data = future.result()
except Exception as exc:
print('%r generated an exception: %s' % (url, exc))
else:
print('%r page is %d bytes' % (url, len(data)))
| StarcoderdataPython |
3241519 | <reponame>FRANCISAnas/django-on-aws
import json
from os import getenv
import urllib3
http = urllib3.PoolManager()
def handler(event, context):
"""Lambda function to forward SNS notifications to Slack"""
try:
# Initialize variables
url = getenv("SLACK_WEBHOOK_URL")
text = event["Records"][0]["Sns"]["Message"]
# Check slack configuration
if not url:
print(f"Invalid slack configuration. SNS message: {text}")
return
# Post SNS message to Slack
msg = {
"channel": "#general",
"username": "SNS",
"text": text,
"icon_emoji": ":cloud:",
}
encoded_msg = json.dumps(msg).encode("utf-8")
resp = http.request("POST", url, body=encoded_msg)
print({"message": text, "status_code": resp.status, "response": resp.data})
except Exception as excpt:
print(f"Execution failed... {excpt}")
| StarcoderdataPython |
174520 | from django import forms
from django.core.exceptions import ValidationError
from .models import Process
class ProcessCancelForm(forms.ModelForm):
def __init__(self, user=None, **kwargs):
self.user = user
super(ProcessCancelForm, self).__init__(**kwargs)
def clean(self):
data = super(ProcessCancelForm, self).clean()
if not self.instance.can_cancel(user=self.user):
raise ValidationError("You can't cancel that process at this time.")
return data
class Meta:
model = Process
fields = []
| StarcoderdataPython |
3253814 | <gh_stars>1-10
from os.path import join, dirname, abspath
import sys
sys.path.append(dirname(dirname(abspath(__file__))))
try:
import jackal_navi_envs
except:
pass
import gym
import numpy as np
try:
sys.path.remove('/opt/ros/melodic/lib/python2.7/dist-packages')
except:
pass
import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from tianshou.env import SubprocVectorEnv, DummyVectorEnv
from policy import DQNPolicy, DuelingDQN
from tianshou.data import Collector, ReplayBuffer, PrioritizedReplayBuffer
from collector import Collector as Fake_Collector
from offpolicy import offpolicy_trainer
sys.path.append('/opt/ros/melodic/lib/python2.7/dist-packages')
import pickle
import argparse
import json
from datetime import datetime
import os
parser = argparse.ArgumentParser(description = 'Jackal navigation simulation')
parser.add_argument('--config', dest = 'config_path', type = str, default = 'configs/dqn.json', help = 'path to the configuration file')
parser.add_argument('--save', dest = 'save_path', type = str, default = 'results/', help = 'path to the saving folder')
args = parser.parse_args()
config_path = args.config_path
save_path = args.save_path
# Load the config files
with open(config_path, 'rb') as f:
config = json.load(f)
env_config = config['env_config']
wrapper_config = config['wrapper_config']
training_config = config['training_config']
# Config logging
now = datetime.now()
dt_string = now.strftime("%Y_%m_%d_%H_%M")
save_path = os.path.join(save_path, config['section'] + "_" + dt_string)
if not os.path.exists(save_path):
os.mkdir(save_path)
writer = SummaryWriter(save_path)
with open(os.path.join(save_path, 'config.json'), 'w') as fp:
json.dump(config, fp)
# initialize the env --> num_env can only be one right now
if not config['use_container']:
wrapper_dict = jackal_navi_envs.jackal_env_wrapper.wrapper_dict
env = wrapper_dict[wrapper_config['wrapper']](gym.make('jackal_discrete-v0', **env_config), **wrapper_config['wrapper_args'])
train_envs = DummyVectorEnv([lambda: env for _ in range(1)])
state_shape = env.observation_space.shape or env.observation_space.n
action_shape = env.action_space.shape or env.action_space.n
else:
train_envs = config
Collector = Fake_Collector
state_shape = 721+len(config['env_config']['param_list']) if config['env'] == 'jackal' else 4
action_shape = 2**len(config['env_config']['param_list'])+1 if config['env'] == 'jackal' else 2
print(state_shape, action_shape, config['env_config']['param_list'], len(config['env_config']['param_list']), len(config['env_config']['param_list'])**2)
# config random seed
np.random.seed(config['seed'])
torch.manual_seed(config['seed'])
if not config['use_container']:
train_envs.seed(config['seed'])
'''
net = Net(training_config['layer_num'], state_shape, action_shape, config['device']).to(config['device'])
optim = torch.optim.Adam(net.parameters(), lr=training_config['learning_rate'])
'''
'''
class DuelingDQN(nn.Module):
def __init__(self, state_shape, action_shape, hidden_layer = [64, 64], cnn = True, feature_layer = [256]):
super().__init__()
if cnn:
self.feature = nn.Sequential(
nn.Linear(720, feature_layer[0]), nn.ReLU(inplace=True)
)
feature_shape = feature_layer[0] + int(np.log2(action_shape-1))+1
else:
self.feature = lambda x: x.view(x.shape[0], -1)
feature_shape = state_shape
layers = [np.prod(feature_shape)] + hidden_layer
self.value = []
self.advantage = []
for i, o in zip(layers[:-1], layers[1:]):
self.value.append(nn.Linear(i, o))
self.value.append(nn.ReLU(inplace=True))
self.advantage.append(nn.Linear(i, o))
self.advantage.append(nn.ReLU(inplace=True))
self.advantage.append(nn.Linear(o, np.prod(action_shape)))
self.value.append(nn.Linear(o, 1))
self.value = nn.Sequential(*self.value)
self.advantage = nn.Sequential(*self.advantage)
def forward(self, obs, state=None, info={}):
if not isinstance(obs, torch.Tensor):
obs = torch.tensor(obs, dtype=torch.float)
batch = obs.shape[0]
laser = obs.view(batch, 1, -1)[:,:,:720]
params = obs.view(batch, -1)[:, 720:]
embedding = self.feature(laser).view(batch, -1)
feature = torch.cat((embedding, params), dim = 1)
advantage = self.advantage(feature)
value = self.value(feature)
logits = value + advantage - advantage.mean(1, keepdim=True)
return logits, state
'''
net = DuelingDQN(state_shape, action_shape, hidden_layer = training_config['hidden_layer'], cnn = training_config['cnn'])
optim = torch.optim.Adam(net.parameters(), lr=training_config['learning_rate'])
policy = DQNPolicy(
net, optim, training_config['gamma'], training_config['n_step'],
grad_norm_clipping = training_config['grad_norm_clipping'],
target_update_freq=training_config['target_update_freq'])
if training_config['prioritized_replay']:
buf = PrioritizedReplayBuffer(
training_config['buffer_size'],
alpha=training_config['alpha'], beta=training_config['beta'])
else:
buf = ReplayBuffer(training_config['buffer_size'])
policy.set_eps(1)
train_collector = Collector(policy, train_envs, buf)
train_collector.collect(n_step=training_config['pre_collect'])
def delect_log():
for dirname, dirnames, filenames in os.walk('/u/zifan/.ros/log'):
for filename in filenames:
p = join(dirname, filename)
if p.endswith('.log') and dirname != '/u/zifan/.ros/log':
os.remove(p)
train_fn =lambda e: [policy.set_eps(max(0.1, 1-(e-1)/training_config['epoch']/training_config['exploration_ratio'])),
torch.save(policy.state_dict(), os.path.join(save_path, 'policy_%d.pth' %(e)))]
result = offpolicy_trainer(
policy, train_collector, training_config['epoch'],
training_config['step_per_epoch'], training_config['collect_per_step'],
training_config['batch_size'], update_per_step=training_config['update_per_step'],
train_fn=train_fn, writer=writer)
import shutil
shutil.rmtree('/u/zifan/buffer', ignore_errors=True) # a way to force all the actor stops
train_envs.close()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.