id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
258478
|
# import tensorflow as tf
import oneflow as flow
import oneflow.typing as tp
from typing import Tuple
import math
import shutil
import numpy as np
import time
from core.function import train
from core.function import validate
from core.loss import JointsMSELoss
from core.make_dataset import CocoDataset
from core.hrnet import HRNet
from core.make_ground_truth import GroundTruth
from core.metric import PCK
# from test import test_during_training
from configuration.base_config import Config
from utils.tools import get_config_params
#config
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
cfg = get_config_params(Config.TRAINING_CONFIG_NAME)
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0" #指定第五张卡可见
#flow.config.gpu_device_num(5)
# loss and optimizer
loss = JointsMSELoss()
# loss_metric = tf.metrics.Mean()
pck = PCK()
# accuracy_metric = tf.metrics.Mean()
@flow.global_function(type="train",function_config=func_config)
def train_step(images: tp.Numpy.Placeholder((cfg.BATCH_SIZE, 256, 256, 3), dtype=flow.float32),
target: tp.Numpy.Placeholder((cfg.BATCH_SIZE, 64, 64, 17), dtype=flow.float32),
target_weight: tp.Numpy.Placeholder((cfg.BATCH_SIZE, 17, 1), dtype=flow.float32),
) -> Tuple[tp.Numpy, tp.Numpy, tp.Numpy]:
outputs = HRNet(images, training=True)
# if isinstance(outputs, list):
# losses = loss.call(outputs[0], target, target_weight)
# for output in outputs[1:]:
# losses += loss.call(output, target, target_weight)
# else:
# output = outputs
# losses = loss.call(output, target, target_weight)
# measure accuracy and record loss
losses = loss.call(outputs, target, target_weight)
# Set learning rate as 0.001
lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [0.001])
# Set Adam optimizer
flow.optimizer.Adam(lr_scheduler, do_bias_correction=False).minimize(losses)
return losses, outputs, target
@flow.global_function(function_config=func_config)
def val_step(images: tp.Numpy.Placeholder((cfg.BATCH_SIZE, 256, 256, 3), dtype=flow.float32),
target: tp.Numpy.Placeholder((cfg.BATCH_SIZE, 64, 64, 17), dtype=flow.float32),
target_weight: tp.Numpy.Placeholder((cfg.BATCH_SIZE, 17, 1), dtype=flow.float32),
) -> Tuple[tp.Numpy, tp.Numpy, tp.Numpy]:
outputs = HRNet(images, training=False)
losses = loss.call(outputs, target, target_weight)
return losses, outputs, target
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count if self.count != 0 else 0
def train(epoch,dataset_train,dataset_length_train,loss, pck):
losses = AverageMeter()
acc = AverageMeter()
print("****************** train *****************")
for i, batch_data in enumerate(dataset_train):
# measure data loading time
gt = GroundTruth(cfg, batch_data)
images, target, target_weight = gt.get_ground_truth()
images = np.ascontiguousarray(images)
target = np.ascontiguousarray(target)
target_weight = np.ascontiguousarray(target_weight)
# compute output
loss, outputs, target = train_step(images, target, target_weight)
# measure accuracy and record loss
losses.update(loss.item(), images.shape[0])
_, avg_acc, cnt, pred = pck.call(network_output=outputs, target=target)
acc.update(avg_acc, cnt)
# measure elapsed time
if (i + 1) % cfg.PRINT_FREQ == 0:
print("{}th epoch, {}/{}th batch, Loss {loss.avg:.10f}, Accuracy {acc.avg:.5f}. ".format(epoch + 1, i + 1,dataset_length_train, loss=losses, acc=acc))
# if i > 100:
# print(i,'break')
# break
def validate(epoch,dataset_valid,loss,pck,best_perf):
# batch_time = AverageMeter()
losses = AverageMeter()
acc = AverageMeter()
# end = time.time()
for i, batch_data in enumerate(dataset_valid):
# compute output
gt = GroundTruth(cfg, batch_data)
images, target, target_weight = gt.get_ground_truth()
images = np.ascontiguousarray(images)
target = np.ascontiguousarray(target)
target_weight = np.ascontiguousarray(target_weight)
# compute output
loss, outputs, target = val_step(images, target, target_weight)
# measure accuracy and record loss
losses.update(loss.item(), images.shape[0])
_, avg_acc, cnt, pred = pck.call(network_output=outputs, target=target)
acc.update(avg_acc, cnt)
# if (i + 1) % cfg.PRINT_FREQ == 0:
# print("{}th epoch, Loss {loss.avg:.10f}, Accuracy {acc.avg:.5f}. ".format(epoch + 1, loss=losses, acc=acc))
print("****************** evalute *****************")
print("{}th epoch, Loss {loss.avg:.10f}, Accuracy {acc.avg:.5f}. ".format(epoch + 1, loss=losses, acc=acc))
print("The best acc: {}. ".format(best_perf))
return losses.avg,acc.avg
def main():
# Dataset
print('start load data')
coco_train = CocoDataset(config_params=cfg, dataset_type="train")
dataset_train, dataset_length_train = coco_train.generate_dataset()
coco_valid = CocoDataset(config_params=cfg, dataset_type="valid")
dataset_valid, _ = coco_valid.generate_dataset()
print('finish load data')
check_point = flow.train.CheckPoint()
if cfg.LOAD_WEIGHTS_BEFORE_TRAINING:
assert os.path.isdir(cfg.save_weights_dir)
print('start load model')
check_point.load(cfg.save_weights_dir)
print('finished load model')
best_perf = 0.
pre_epoch = -1
begin_epoch = cfg.LOAD_WEIGHTS_FROM_EPOCH
print("****************** start training *****************")
for epoch in range(begin_epoch, cfg.EPOCHS):
start = time.time()
train(epoch,dataset_train,dataset_length_train,loss, pck)
print("Time for epoch {} is {} sec.".format(epoch+ 1, time.time() - start))
# evaluate on validation set
loss_val, acc_val = validate(epoch,dataset_valid,loss, pck,best_perf)
if epoch + 1 > 1 and acc_val > best_perf:
best_perf = acc_val
if pre_epoch != -1:
# delete the previous best checkpoint
print("delete the previous best {}th epoch model".format(pre_epoch))
shutil.rmtree(os.path.join(cfg.save_weights_dir, "{}th_epoch".format(pre_epoch)))
# save parameters
print("start save the best model")
check_point.save(
os.path.join(cfg.save_weights_dir, "{}th_epoch".format(epoch + 1))
)
pre_epoch = epoch + 1
print("finished save the best epoch model")
print("*************** Train {} done *****************")
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
6614857
|
<gh_stars>1-10
import os
import itertools
import json
import requests
import numpy as np
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
KEY = os.environ.get('KEY')
SENDGRID_API_KEY = os.environ.get('SENDGRID_API_KEY')
SENDER = os.environ.get('SENDER')
def driving_time_and_distance(ori, dest):
"""get the dict of distance between two places
Args:
ori (str): Place A
dest (str): Place B
Returns:
dict: return a dict of distance description
"""
url = f"https://maps.googleapis.com/maps/api/distancematrix/json?key={KEY}&origins={ori}&destinations={dest}&mode=driving&language=en-EN&sensor=false"
result= json.loads(requests.get(url).text)
return {"distance_value": result["rows"][0]["elements"][0]["distance"]["value"], "distance_text": result["rows"][0]["elements"][0]["distance"]["text"], "duration_text": result["rows"][0]["elements"][0]["duration"]["text"], "duration_value": result["rows"][0]["elements"][0]["duration"]["value"]}
def distance_matrix_gcp(destinations):
"""get the pairwise distance matrix with gcp
Args:
df (pd.Dataframe): a dataframe with a column "address"
Returns:
dict: return a dict of distance description
"""
indice = range(len(destinations))
dis_max = np.zeros(shape=(len(destinations),len(destinations)))
for pair in itertools.combinations(indice, 2):
dis = driving_time_and_distance(destinations[pair[0]]["address"], destinations[pair[1]]["address"])['distance_value']
dis_max[pair[0]][pair[1]] = dis
dis_max[pair[1]][pair[0]] = dis
return {"distance_matrix": dis_max.tolist()}
def send_email (dest_mail, subject, text):
"""Send email to carriers
Args:
dest_mail (str): target email
subject (str): email subject
text (str): email content
Returns:
"""
message = Mail(
from_email=SENDER,
to_emails=dest_mail,
subject=subject,
html_content=text)
try:
sg = SendGridAPIClient(f"{SENDGRID_API_KEY}")
response = sg.send(message)
print(response.status_code)
print(response.body)
print(response.headers)
except Exception as e:
print(e.message)
|
StarcoderdataPython
|
6656331
|
"""
File: DaqDevDiscovery02.py
Library Call Demonstrated: mcculw.ul.get_net_device_descriptor()
mcculw.ul.create_daq_device()
mcculw.ul.release_daq_device()
Purpose: Discovers a Network DAQ device and assigns board
number to the detected device.
Demonstration: Displays the detected DAQ device and flashes the
LED of the selected device.
Other Library Calls: mcculw.ul.ignore_instacal()
mcculw.ul.flash_led()
"""
from __future__ import absolute_import, division, print_function
from builtins import * # @UnusedWildImport
import tkinter as tk
from mcculw import ul
from mcculw.ul import ULError
try:
from ui_examples_util import (UIExample, show_ul_error,
validate_positive_int_entry)
except ImportError:
from .ui_examples_util import (UIExample, show_ul_error,
validate_positive_int_entry)
class DaqDevDiscovery02(UIExample):
def __init__(self, master):
super(DaqDevDiscovery02, self).__init__(master)
ul.ignore_instacal()
self.board_num = 0
self.device_created = False
self.create_widgets()
def discover_device(self):
host = self.host_entry.get()
port = self.get_port()
timeout_ms = 5000
try:
# Release any previously created device
if self.device_created:
ul.release_daq_device(self.board_num)
self.device_created = False
descriptor = ul.get_net_device_descriptor(host, port, timeout_ms)
if descriptor is not None:
# Create the DAQ device from the descriptor
ul.create_daq_device(self.board_num, descriptor)
self.device_created = True
self.status_label["text"] = "DAQ Device Discovered"
self.flash_led_button["state"] = "normal"
self.device_name_label["text"] = descriptor.product_name
self.device_id_label["text"] = descriptor.unique_id
else:
self.status_label["text"] = "No Device Discovered"
self.flash_led_button["state"] = "disabled"
self.device_name_label["text"] = ""
self.device_id_label["text"] = ""
except ULError as e:
self.status_label["text"] = "No Device Discovered"
self.flash_led_button["state"] = "disabled"
self.device_name_label["text"] = ""
self.device_id_label["text"] = ""
show_ul_error(e)
def flash_led(self):
try:
# Flash the device LED
ul.flash_led(self.board_num)
except ULError as e:
show_ul_error(e)
def get_port(self):
try:
return int(self.port_entry.get())
except ValueError:
return 0
def create_widgets(self):
'''Create the tkinter UI'''
main_frame = tk.Frame(self)
main_frame.pack(fill=tk.X, anchor=tk.NW)
positive_int_vcmd = self.register(validate_positive_int_entry)
settings_grid_frame = tk.Frame(main_frame)
settings_grid_frame.pack(anchor=tk.NW)
curr_row = 0
host_label = tk.Label(settings_grid_frame)
host_label["text"] = "Hostname or IP Address:"
host_label.grid(row=curr_row, column=0, sticky=tk.W, padx=3, pady=3)
self.host_entry = tk.Entry(settings_grid_frame)
self.host_entry.grid(
row=curr_row, column=1, sticky=tk.W, padx=3, pady=3)
curr_row += 1
port_label = tk.Label(settings_grid_frame)
port_label["text"] = "Port:"
port_label.grid(row=curr_row, column=0, sticky=tk.W, padx=3, pady=3)
self.port_entry = tk.Entry(
settings_grid_frame, validate="key",
validatecommand=(positive_int_vcmd, "%P"))
self.port_entry.grid(
row=curr_row, column=1, sticky=tk.W, padx=3, pady=3)
self.port_entry.insert(0, "54211")
discover_button = tk.Button(main_frame)
discover_button["text"] = "Discover DAQ Device"
discover_button["command"] = self.discover_device
discover_button.pack(padx=3, pady=3)
self.status_label = tk.Label(main_frame)
self.status_label["text"] = "Status"
self.status_label.pack(anchor=tk.NW, padx=3, pady=3)
results_group = tk.LabelFrame(self, text="Discovered Device")
results_group.pack(fill=tk.X, anchor=tk.NW, padx=3, pady=3)
device_info_frame = tk.Frame(results_group)
device_info_frame.pack(anchor=tk.NW)
curr_row = 0
device_name_left_label = tk.Label(device_info_frame)
device_name_left_label["text"] = "Device Identifier:"
device_name_left_label.grid(
row=curr_row, column=0, sticky=tk.W, padx=3, pady=3)
self.device_name_label = tk.Label(device_info_frame)
self.device_name_label.grid(
row=curr_row, column=1, sticky=tk.W, padx=3, pady=3)
curr_row += 1
device_id_left_label = tk.Label(device_info_frame)
device_id_left_label["text"] = "Device Identifier:"
device_id_left_label.grid(row=curr_row, sticky=tk.W, padx=3, pady=3)
self.device_id_label = tk.Label(device_info_frame)
self.device_id_label.grid(
row=curr_row, column=1, sticky=tk.W, padx=3, pady=3)
self.flash_led_button = tk.Button(results_group)
self.flash_led_button["text"] = "Flash LED"
self.flash_led_button["command"] = self.flash_led
self.flash_led_button["state"] = "disabled"
self.flash_led_button.pack(padx=3, pady=3)
button_frame = tk.Frame(self)
button_frame.pack(fill=tk.X, side=tk.RIGHT, anchor=tk.SE)
quit_button = tk.Button(button_frame)
quit_button["text"] = "Quit"
quit_button["command"] = self.master.destroy
quit_button.grid(row=0, column=1, padx=3, pady=3)
# Start the example if this module is being run
if __name__ == "__main__":
# Start the example
DaqDevDiscovery02(master=tk.Tk()).mainloop()
|
StarcoderdataPython
|
3566609
|
<reponame>threefoldtech/jumpscale_core9<gh_stars>0
from jumpscale import j
import base64
from .SerializerBase import SerializerBase
class SerializerBase64(SerializerBase):
def __init__(self):
SerializerBase.__init__(self)
def dumps(self, s):
if j.data.types.string.check(s):
b = s.encode()
else:
b = s
return base64.b64encode(b).decode()
def loads(self, b):
if j.data.types.string.check(b):
b = b.encode()
return base64.b64decode(b).decode()
|
StarcoderdataPython
|
1686706
|
<gh_stars>1-10
import os
__author__ = "<NAME>"
__version__ = 1.0
def xmlMarkup(games, team_ab, team_name, team_record):
'''Markup the RSS feed using the data obtained.
:param games: list of games that the team played this season
:type games: list of GameData
:param team_ab: the team's abbreviated name
:type team_ab: string
:param team_name: the team's name
:type team_name: string'''
file_name = team_ab + "_feed.xml"
'''Used code from http://stackoverflow.com/questions/7935972/
writing-to-a-new-directory-in-python-without-changing-directory'''
script_dir = os.path.dirname(os.path.abspath(__file__))
dest_dir = os.path.join(script_dir, "feeds", team_ab)
try:
os.makedirs(dest_dir)
except OSError:
pass
path = os.path.join(dest_dir, file_name)
with open(path, 'w') as xml:
xml.write('<?xml version="1.0" encoding="UTF-8" ?>\n')
xml.write("<rss version='2.0'>\n")
xml.write("<channel>\n")
xml.write("<title>%s - %s</title>\n" % (team_name, team_record))
xml.write("<description>Latest %s scores</description>\n" % team_name)
xml.write("<link>http://espn.go.com/nhl/team/schedule/_/name/%s</link>\n"
% team_ab)
for game in games:
xml.write("<item>\n")
xml.write("<title>%s</title>\n" % game.headline)
xml.write("<link>%s</link>\n" % game.link)
xml.write("</item>\n")
xml.write("</channel>\n</rss>")
xml.close()
|
StarcoderdataPython
|
4841614
|
import math, inspect, os, sys, time
import pymel.core as pm
import maya.cmds as cmds
from zMayaTools.menus import Menu
from zMayaTools import maya_logging, maya_helpers
log = maya_logging.get_log()
# Notes:
#
# - This doesn't handle inbetween targets. There's no good way to export those to
# use them with game engines, so I don't use them.
# - The source and destination blend shapes should have one target each. Multi-target
# blend shapes aren't supported.
# - If you request a retargetted blend shape, one will always be created. We don't
# check to see if the source blend shape actually has any effect on the target, so if
# you retarget an elbow corrective on the body to the shoes, it'll just create an empty
# target. That's harmless, and checking if the mesh has actually changed would be
# slower.
#
# A "set" button to set the selection to the source or destination blend shape
# would be nice, as well as a "swap" button, but Maya's GUI system is so awful
# to use I just don't want to figure out how to make a clean UI for it.
def duplicate_mesh_from_plug(shape_attr, name):
"""
Given a deformed mesh, create a new mesh that's a clean copy of its base mesh.
Return the transform.
"""
# Create a new mesh, and copy the base mesh of src_node.
transform_node = pm.createNode('transform', n=name)
# Copy the mesh.
mesh_node = pm.createNode('mesh', n=name + 'OrigShape', p=transform_node)
shape_attr.connect(mesh_node.attr('inMesh'))
# Access vertex data on the copy, to force the data to copy. Otherwise it won't be copied, since
# we're disconnecting it before the display will refresh.
transform_node.vtx[0]
shape_attr.disconnect(mesh_node.attr('inMesh'))
return transform_node
def duplicate_base_mesh(node):
"""
Given a deformed mesh, create a new mesh that's a clean copy of its base mesh.
Return the transform.
"""
# Create a new mesh, and copy the base mesh of src_node.
transform_node = duplicate_mesh_from_plug(node.getShapes()[-1].attr('worldMesh[0]'), name=node.nodeName() + 'Copy')
# Copy the world space transform.
pm.xform(transform_node, ws=True, matrix=pm.xform(node, q=True, ws=True, matrix=True))
return transform_node
#def attach_deformer(mesh, deformer_node):
# """
# Given a shape and a deformer node, attach the deformer to the shape.
#
# Maya does this when you create a mesh with pm.deformer, but not if
# Given a transform node with a shape,
# """
# group_parts = pm.createNode('groupParts')
# group_parts.attr('inputComponents').set(1, 'vtx[*]', type='componentList')
# group_parts.attr('ihi').set(False)
# mesh.attr('worldMesh[0]').connect(group_parts.attr('inputGeometry'))
#
# group_id = pm.createNode('groupId')
# group_id.attr('groupId').connect(group_parts.attr('groupId'))
# group_id.attr('ihi').set(False)
#
# group_parts.attr('outputGeometry').connect(deformer_node.attr('input[0]').attr('inputGeometry'))
# group_id.attr('groupId').connect(deformer_node.attr('input[0]').attr('groupId'))
#
# src_mesh_output = pm.createNode('mesh', n=mesh.getTransform().nodeName() + 'CopyShape', p=mesh.getTransform())
# deformer_node.attr('outputGeometry[0]').connect(src_mesh_output.attr('inMesh'))
# mesh.attr('intermediateObject').set(1)
def prep_for_retargetting(blend_shape, restores):
"""
Prepare blend_shape for being used as the source for retargetting.
- We need to be able to set blend shape weights, so disconnect anything connected
to all weights.
- Make sure all weights and groups are visible, and all groups have a weight of 1, so
# they don't prevent us from enabling weights.
- Set all blend shape weights to 0, so we can enable them one at a time.
Changes will be added to restores, so they'll be reverted by maya_helpers.restores()
when we're done.
"""
# Make sure the blendShape itself is turned on.
restores.append(maya_helpers.SetAndRestoreAttr(blend_shape.envelope, 1))
for directory_entry in blend_shape.targetDirectory:
restores.append(maya_helpers.SetAndRestoreAttr(directory_entry.directoryVisibility, 1))
# Setting directoryParentVisibility will ensure that the directory isn't hidden from its
# parent being hidden, or from a blendShape group on the shapeEditorManager being hidden.
restores.append(maya_helpers.SetAndRestoreAttr(directory_entry.directoryParentVisibility, 1))
restores.append(maya_helpers.SetAndRestoreAttr(directory_entry.directoryWeight, 1))
for target_visibility in blend_shape.targetVisibility:
print target_visibility
restores.append(maya_helpers.SetAndRestoreAttr(target_visibility, 1))
# XXX: untested
for inbetween_info_group in blend_shape.inbetweenInfoGroup:
for inbetween_info in inbetween_info_group.inbetweenInfo:
restores.append(maya_helpers.SetAndRestoreAttr(inbetween_info.inbetweenVisibility, 1))
for weight in blend_shape.weight:
# This will also disconnect anything connected to the weight.
restores.append(maya_helpers.SetAndRestoreAttr(weight, 0))
def redst_blend_shapes(src_node, dst_node, src_blend_shape_node, dst_blend_shape_node, blend_shape_indices, connect_weights, use_cvwrap):
try:
pm.waitCursor(state=True)
# Remember the selection, so we can restore it.
old_selection = pm.ls(sl=True)
# Delete any nodes created while doing this when we're done.
with maya_helpers.temporary_namespace():
with maya_helpers.restores() as restores:
prep_for_retargetting(src_blend_shape_node, restores)
src_to_dst_weights = redst_blend_shapes_inner(src_node, dst_node, src_blend_shape_node, dst_blend_shape_node, blend_shape_indices, use_cvwrap=use_cvwrap)
# Copy or connect weights. Do this after we finish the above, since we need to let maya_helpers.restores()
# restore the original weights before we copy them, or they'll all be set to 0.
for src_weight, dst_weight in src_to_dst_weights.items():
if connect_weights:
# Connect the source blend shape's weight to the target.
src_weight.connect(dst_weight)
else:
# Copy the source weight.
dst_weight.set(src_weight.get())
return src_to_dst_weights
finally:
pm.waitCursor(state=False)
pm.select(old_selection)
def add_blend_shape_index_to_directory(directory_entry, idx):
"""
Add idx to a blendShape.targetDirectory child list if it's not already present, and
remove it from all other directories.
"""
# Add idx to directory_entry.
child_indices = directory_entry.childIndices.get() or []
if idx not in child_indices:
child_indices.append(idx)
directory_entry.childIndices.set(child_indices, type='Int32Array')
# Remove idx from all others.
for other_directory_entry in directory_entry.array():
if other_directory_entry == directory_entry:
continue
child_indices = other_directory_entry.childIndices.get() or []
if idx in child_indices:
child_indices.remove(idx)
other_directory_entry.childIndices.set(child_indices, type='Int32Array')
if idx >= 0:
# For blend shape targets, set parentDirectory to match.
directory_entry.node().parentDirectory[idx].set(directory_entry.index())
def find_blend_shape_directory_by_blend_shape_idx(blend_shape, idx):
"""
Find the target directory of the given blend shape index.
If it's not found, return the root directory.
"""
for directory in blend_shape.targetDirectory:
# childIndices will be None if there are no entries.
if idx in (directory.childIndices.get() or []):
return directory
# If we can't find it, return the root directory.
return blend_shape.targetDirectory[0]
def find_directory_entry_by_name(blend_shape, name):
"""
Find and return the targetDirectory with the given name, or None if it doesn't exist.
"""
for directory_entry in blend_shape.targetDirectory:
if directory_entry.directoryName.get() == name:
return directory_entry
return None
def recursively_create_hierarchy(src_directory_entry, dst_blend_shape):
"""
Create a matching blend shape directory hierarchy for dst_directory_entry in
dst_blend_shape.
Return the corresponding targetDirectory in the destination blendShape node.
"""
# If this is the root, stop.
if src_directory_entry.index() == 0:
return dst_blend_shape.targetDirectory[0]
# Recursively create the parent. dst_directory_parent is the target directory of the parent.
parent_index = src_directory_entry.parentIndex.get()
parent_directory = src_directory_entry.array()[parent_index]
dst_directory_parent = recursively_create_hierarchy(parent_directory, dst_blend_shape)
# If a directory with the same name already exists in dst_blend_shape, use it.
dst_directory_entry = find_directory_entry_by_name(dst_blend_shape, src_directory_entry.directoryName.get())
if dst_directory_entry is not None:
return dst_directory_entry
# Create the directory, copying attributes from the source.
new_shape_directory_index = max(dst_blend_shape.targetDirectory.get(mi=True)) + 1
dst_directory_entry = dst_blend_shape.targetDirectory[new_shape_directory_index]
dst_directory_entry.directoryName.set(src_directory_entry.directoryName.get())
dst_directory_entry.directoryVisibility.set(dst_directory_parent.directoryVisibility.get())
dst_directory_entry.directoryParentVisibility.set(dst_directory_parent.directoryParentVisibility.get())
dst_directory_entry.directoryWeight.set(dst_directory_parent.directoryWeight.get())
dst_directory_entry.parentIndex.set(dst_directory_parent.index())
# Add the new directory to the childIndices list of the parent. Groups are stored as
# the inverse of their index. (Do this even if we're not newly creating the directory to
# make sure it's present, but use the existing directory index.)
add_blend_shape_index_to_directory(dst_directory_parent, -dst_directory_entry.index())
return dst_directory_entry
def create_matching_blend_shape_directory(src_blend_shape, src_blend_shape_index, dst_blend_shape, dst_blend_shape_index):
"""
Create a blend shape directory hierarchy in dst_blend_shape for dst_blend_shape_index,
matching the grouping of src_blend_shape_index in src_blend_shape, and add
dst_blend_shape_index to it.
"""
# Find the target directory for the source blend shape.
src_directory = find_blend_shape_directory_by_blend_shape_idx(src_blend_shape, src_blend_shape_index)
# Create the directory hierarchy, and move the blend shape into it.
dst_directory_entry = recursively_create_hierarchy(src_directory, dst_blend_shape)
add_blend_shape_index_to_directory(dst_directory_entry, dst_blend_shape_index)
def redst_blend_shapes_inner(src_node, dst_node, src_blend_shape_node, dst_blend_shape_node, blend_shape_indices,
use_cvwrap=True):
# Duplicate the destination mesh.
dst_node_copy = duplicate_base_mesh(dst_node)
# Wrap dst_node_copy to src_node, so the destination mesh follows blend shapes on the source mesh.
wrap_deformer(src_node, dst_node_copy, auto_weight_threshold=True, falloff_mode=0, use_cvwrap_if_available=use_cvwrap)
# Find all blend shape names. We require that blend shapes have a name, and always give
# blend shapes the same name as their source.
dst_blend_shape_name_to_index = {}
for idx in dst_blend_shape_node.weightIndexList():
src_weight = dst_blend_shape_node.attr('w').elementByLogicalIndex(idx)
target_name = pm.aliasAttr(src_weight, q=True)
if target_name is None:
log.warning('Warning: destination blend shape has an unused blend shape %i' % idx)
continue
dst_blend_shape_name_to_index[target_name] = idx
def get_unused_blend_shape_index(preferred_idx):
# Return an index that isn't in use in dst_blend_shape_name_to_index.
index_list = sorted(dst_blend_shape_name_to_index.values())
if preferred_idx not in index_list:
return preferred_idx
for idx in xrange(0, len(index_list)+1):
if idx not in index_list:
return idx
raise RuntimeError('Not reachable')
# Do the actual retargetting.
src_to_dst_weights = {}
for idx in blend_shape_indices:
# Make sure that we aren't connected backwards, or this would create a cycle.
src_weight = src_blend_shape_node.attr('weight').elementByLogicalIndex(idx)
# Each index is a weight attribute on the source blend shape deformer. Get the name of the blend shape.
target_name = pm.aliasAttr(src_weight, q=True)
if target_name is None:
log.warning('Error: blend shape index %i has no name' % idx)
continue
# Find the blend shape in the target with this name. If it already exists, we'll just update
# it. Otherwise, we'll pick a new index and create a new one.
new_idx = dst_blend_shape_name_to_index.get(target_name)
if new_idx is None:
# There's no existing blend shape by this name. Find an unused one, and record that we've
# used it.
#
# If it's available, use the same blend shape index, so the new blendShape deformer is compatible
# with the old one in reference edits.
new_idx = get_unused_blend_shape_index(preferred_idx=idx)
dst_blend_shape_name_to_index[target_name] = new_idx
dst_weight = dst_blend_shape_node.attr('weight').elementByLogicalIndex(new_idx)
if dst_weight.isConnectedTo(src_weight):
log.warning('Warning: the destination target %s is a source for the source %s. Are the blend shapes selected backwards?' % (
dst_weight, src_weight))
continue
# Enable the blend shape target on the source object. This will deform dst_node_copy through
# the wrap deformer.
weight = src_blend_shape_node.attr('w').elementByLogicalIndex(idx)
weight.set(1)
# Duplicate dst_node_copy in its deformed state.
new_blend_shape_target = pm.duplicate(dst_node_copy, n='DeformedTarget')[0]
# Disconnect the source weight from the destination weight before updating the blend shape
# weight, or it'll print an unexplained "Problems occurred with dependency graph setup".
# (Come on, guys, "there were problems" isn't an error message.)
if src_weight.isConnectedTo(dst_weight):
src_weight.disconnect(dst_weight)
# Set the blend shape.
pm.blendShape(dst_blend_shape_node, e=True, t=(dst_node, new_idx, new_blend_shape_target, 1.0))
# Rename the blend shape target to match the source. Don't do this if we have no
# alias, since that causes a crash.
if target_name:
old_alias = pm.aliasAttr(dst_weight, q=True)
if old_alias:
pm.aliasAttr(dst_blend_shape_node.attr(old_alias), rm=True)
pm.aliasAttr(target_name, dst_weight)
# Disable the target.
weight.set(0)
# Create a matching blend shape directory, and add the new blend shape to it.
create_matching_blend_shape_directory(src_blend_shape_node, idx, dst_blend_shape_node, new_idx)
# We don't need the copied target. Once we delete this, the blend shape will be baked into
# the deformer.
pm.delete(new_blend_shape_target)
src_to_dst_weights[src_weight] = dst_weight
return src_to_dst_weights
def _create_wrap(control_object, target,
threshold=0,
max_distance=0,
influence_type=2, # 1 for point, 2 for face
exclusive=False,
auto_weight_threshold=False,
falloff_mode=0): # 0 for volume, 1 for surface
old_selection = pm.ls(sl=True)
pm.select(target)
pm.select(control_object, add=True)
cmd = 'doWrapArgList "7" { "1", "%(threshold)s", "%(max_distance)s", "%(influence_type)s", "%(exclusive)s", "%(auto_weight_threshold)s", ' \
'"%(render_influences)s", "%(falloff_mode)s" };' % {
'threshold': threshold,
'max_distance': max_distance,
'influence_type': influence_type,
'exclusive': 1 if exclusive else 0,
'auto_weight_threshold': 1 if auto_weight_threshold else 0,
'render_influences': 1, # Never set this to 0.
'falloff_mode': falloff_mode,
}
deformer_node = pm.mel.eval(cmd)[0]
# Restore the old selection.
pm.select(old_selection)
return pm.PyNode(deformer_node)
def _create_cvwrap(control_object, target):
"""
Create a wrap deformer with cvwrap, if available. If the cvwrap plugin isn't available,
return None.
"""
if not maya_helpers.load_plugin('cvwrap.mll', required=False):
return None
old_selection = pm.ls(sl=True)
pm.select(target)
pm.select(control_object, add=True)
deformer_node = cmds.cvWrap()
# Restore the old selection.
pm.select(old_selection)
return pm.PyNode(deformer_node)
def wrap_deformer(control_mesh, target,
use_cvwrap_if_available=False,
threshold=0,
max_distance=0,
influence_type=2, # 1 for point, 2 for face
exclusive=False,
auto_weight_threshold=False,
falloff_mode=0): # 0 for volume, 1 for surface
# If any nodes are meshes, move up to the transform.
selection = target.getParent() if target.nodeType() == 'mesh' else target
control_transform = control_mesh.getParent() if control_mesh.nodeType() == 'mesh' else control_mesh
deformer_node = None
if use_cvwrap_if_available:
deformer_node = _create_cvwrap(control_transform, selection)
if deformer_node is None:
log.warning('The cvwrap plugin isn\'t available.')
if deformer_node is None:
deformer_node = _create_wrap(control_transform, selection, threshold, max_distance, influence_type,
exclusive, auto_weight_threshold, falloff_mode)
return deformer_node
def copy_attr(src_attr, dst_attr):
"""
Copying attributes is tricky. If we just attr.set(attr2.get()), it'll be very slow
since PyMel is very inefficient at large data sets. Using cmds instead introduces
new problems: we can't set large data sets all at once with it, so we'd have to break
it apart. Instead, use MPlug.getSetAttrCmds to get the commands that would be used to
set up the node in an .MA file, and run them on our copy.
"""
# Get the command list to set the values on the attribute.
command_list = []
src_attr.__apimplug__().getSetAttrCmds(command_list)
# The command list operates on the selected node, so select the destination node.
old_selection = pm.ls(sl=True)
pm.select(dst_attr.node())
# Run the commands.
for cmd in command_list:
pm.mel.eval(cmd)
pm.select(old_selection)
#class WrappedBlendShapes(object):
# """
# Work with wrapped blend shapes. A wrapped blend shape is one that receives retargetted
# blend shapes from a source.
# """
# @classmethod
# def get_blend_shape_wrap_target(cls, follower_blend_shape):
# """
# Return a list of all blendShape nodes that receive retargets from blend_shape.
# """
# assert isinstance(follower_blend_shape, pm.nodetypes.BlendShape), 'Expected a blendShape, got: %s' % follower_blend_shape.type()
# if not pm.attributeQuery('wrappingBlendShape', node=follower_blend_shape.nodeName(), exists=True):
# return None
#
# dst_attr = follower_blend_shape.attr('wrappingBlendShape')
#
# # There can only be zero or one incoming connections.
# connections = dst_attr.connections(s=True, d=False)
# assert len(connections) <= 1
# if not connections:
# return None
# return connections[0]
#
# @classmethod
# def find_following_blend_shapes(cls, blend_shape):
# """
# Return a list of all blendShape nodes that receive retargets from blend_shape.
# """
# cls._assert_is_blend_shape(blend_shape)
#
# # Find all blend shapes that are retargetted from this one.
# connections = blend_shape.attr('message').listConnections(type='blendShape', p=True, s=False, d=True)
# connections = [c.node() for c in connections if c.attrName() == 'wrappingBlendShape']
# return connections
#
# @classmethod
# def add_wrap(cls, follower_blend_shape, blend_shape_node_to_follow):
# cls._assert_is_blend_shape(follower_blend_shape)
# cls._assert_is_blend_shape(blend_shape_node_to_follow)
#
# # An array of message attributes would be more useful, but there's no way to create that with addAttr.
# if not pm.attributeQuery('wrappingBlendShape', node=follower_blend_shape.nodeName(), exists=True):
# pm.addAttr(follower_blend_shape, ln='wrappingBlendShape', at='message')
#
# src_attr = blend_shape_node_to_follow.attr('message')
# dst_attr = follower_blend_shape.attr('wrappingBlendShape')
# if not src_attr.isConnectedTo(dst_attr):
# src_attr.connect(dst_attr)
#
# @classmethod
# def remove_wrap(cls, follower_blend_shape):
# cls._assert_is_blend_shape(follower_blend_shape)
#
# # Just delete the wrappingBlendShape attribute.
# if not pm.attributeQuery('wrappingBlendShape', node=follower_blend_shape.nodeName(), exists=True):
# return
#
# pm.deleteAttr(follower_blend_shape.attr('wrappingBlendShape'))
#
# @classmethod
# def find_dst_blend_shape_node(cls, src_blend_shape_node, dst_mesh):
# """
# Given a source blend shape and a target mesh, find the blend shape on the target
# mesh which receives retargetting from the source blend shape.
#
# The SourceBlendShape attribute is used to track this. It's very inconvenient to
# select multiple blend shape nodes and specific attributes on them, so this allows
# only having to select the meshes.
# """
# cls._assert_is_blend_shape(src_blend_shape_node)
#
# connections = cls.find_following_blend_shapes(src_blend_shape_node)
#
# # One of the blend shapes should be in the history of dst_mesh.
# # XXX: look at futures of the base mesh
# dst_mesh_history = dst_mesh.listHistory()
# for conn in connections:
# if conn.node() in dst_mesh_history:
# return conn.node()
# return None
#
# @classmethod
# def _assert_is_blend_shape(cls, node):
# assert isinstance(node, pm.nodetypes.BlendShape), 'Expected a blendShape, got: %s' % node.type()
class UI(maya_helpers.OptionsBox):
title = 'Retarget Blend Shapes'
def get_src_blend_shape_name(self):
return pm.optionMenuGrp('sourceBlendShapeList', q=True, v=True)
def get_dst_blend_shape_name(self):
return pm.optionMenuGrp('dstBlendShapeList', q=True, v=True)
def option_box_load(self):
"""
Load saved settings from optionVars to the UI.
"""
connect_weights_to_source = self.optvars['zBlendShapeRetargettingConnectWeightsToSource'] == 1
pm.checkBoxGrp('connectWeightsToSource', edit=True, value1=connect_weights_to_source)
use_cvwrap = self.optvars['zBlendShapeRetargettingUseCvwrap'] == 1
pm.checkBoxGrp('useCvWrap', edit=True, value1=use_cvwrap)
# optionMenuGrp will throw RuntimeError if the value doesn't exist, eg. the saved blendShape
# node doesn't exist in the scene.
# def set_option_from_blend_shape_list(blend_shape_list, option_var):
# menu_items = pm.optionMenuGrp(blend_shape_list, q=True, itemListLong=True)
# all_menu_items = [pm.menuItem(item, q=True, label=True) for item in menu_items]
# src_blend_shape = pm.optionVar(q=option_var)
# if src_blend_shape in all_menu_items:
# pm.optionMenuGrp(blend_shape_list, e=True, v=src_blend_shape)
# set_option_from_blend_shape_list('sourceBlendShapeList', 'zBlendShapeRetargettingSourceBlendShape')
# set_option_from_blend_shape_list('dstBlendShapeList', 'zBlendShapeRetargettingTargetBlendShape')
def option_box_save(self):
self.optvars['zBlendShapeRetargettingConnectWeightsToSource'] = pm.checkBoxGrp('connectWeightsToSource', q=True, value1=False)
self.optvars['zBlendShapeRetargettingUseCvwrap'] = pm.checkBoxGrp('useCvWrap', q=True, value1=False)
def option_box_apply(self):
# Get the selected blendShapes.
src_blend_shape = pm.optionMenuGrp('sourceBlendShapeList', q=True, v=True)
dst_blend_shape = pm.optionMenuGrp('dstBlendShapeList', q=True, v=True)
src_blend_shape = pm.ls(src_blend_shape)[0]
dst_blend_shape = pm.ls(dst_blend_shape)[0]
if not src_blend_shape:
pm.warning('No source blend shape is selected')
return
if not dst_blend_shape:
pm.warning('No target blend shape is selected')
return
if src_blend_shape == dst_blend_shape:
pm.warning('The source and destination blend shapes are the same')
return
# These were selected from the UI, so unless the scene changed while the dialog was
# open these should always be blendShape nodes.
assert isinstance(src_blend_shape, pm.nodetypes.BlendShape), 'Node %s isn\'t a blend shape' % src_blend_shape.nodeName()
assert isinstance(dst_blend_shape, pm.nodetypes.BlendShape), 'Node %s isn\'t a blend shape' % dst_blend_shape.nodeName()
# Get the selected blend shape targets to retarget.
blend_shape_targets = self.get_selected_src_blend_shape_targets()
blend_shape_indices = [target.index() for target in blend_shape_targets]
if not blend_shape_indices:
pm.warning('No blend shape targets are selected')
return
src_blend_shape_targets = pm.ls(pm.blendShape(src_blend_shape, q=True, g=True))
dst_blend_shape_targets = pm.ls(pm.blendShape(dst_blend_shape, q=True, g=True))
# Make sure that both blend shapes have just one target.
assert len(src_blend_shape_targets) == 1, 'Blend shape %s must have one target, has %i: %s' % (
src_blend_shape.nodeName(),
len(src_blend_shape_targets), ', '.join(src_blend_shape_targets))
assert len(dst_blend_shape_targets) == 1, 'Blend shape %s must have one target, has %i: %s' % (
dst_blend_shape.nodeName(),
len(dst_blend_shape_targets), ', '.join(dst_blend_shape_targets))
# Find the transforms for the source and destination node.
src_node = src_blend_shape_targets[0].getTransform()
dst_node = dst_blend_shape_targets[0].getTransform()
# Check the selected nodes.
assert isinstance(src_node, pm.nodetypes.Transform), 'The source node %s isn\'t a transform' % src_node.nodeName()
assert isinstance(dst_node, pm.nodetypes.Transform), 'The destination node %s isn\'t a transform' % dst_node.nodeName()
assert src_node.getShape() is not None, 'The source node %s isn\'t a mesh' % dst_node.nodeName()
assert dst_node.getShape() is not None, 'The destination node %s isn\'t a mesh' % dst_node.nodeName()
# # Find all blendShapes that are following this one.
# following_blend_shapes = WrappedBlendShapes.find_following_blend_shapes(src_blend_shape)
#
# # Find a blend shape node on dst_node that's in following_blend_shapes. We can either
# # look in the history of the output mesh, or in the future of the base mesh, and both can
# # have wrong matches.
# for node in dst_node.getShapes()[-1].listFuture():
# if node in following_blend_shapes:
# dst_blend_shape = node
# break
# else:
# raise RuntimeError('Couldn\'t find a blend shape node on %s which is following %s' % (dst_node.name(), src_blend_shape.name()))
connect_weights = pm.checkBoxGrp('connectWeightsToSource', q=True, value1=False)
use_cvwrap = pm.checkBoxGrp('useCvWrap', q=True, value1=False)
redst_blend_shapes(src_node, dst_node, src_blend_shape, dst_blend_shape, blend_shape_indices, connect_weights=connect_weights, use_cvwrap=use_cvwrap)
def options_box_setup(self):
self.optvars.add('zBlendShapeRetargettingConnectWeightsToSource', 'int', 0)
self.optvars.add('zBlendShapeRetargettingUseCvwrap', 'int', 1)
parent = pm.columnLayout(adjustableColumn=True)
def add_blend_shape_selector(name, label, refresh_on_change):
pm.optionMenuGrp(name, label=label)
# Create a list of blendShapes.
bnArray = pm.ls(type='blendShape')
for entry in bnArray:
pm.menuItem(label=entry)
if not bnArray:
pm.menuItem(label='No Blend Shape Selected')
# When the source selection is changed, update the source blend shape list.
def changed(value):
self.refresh_src_blend_shape_list()
if refresh_on_change:
pm.optionMenuGrp(name, edit=True, changeCommand=changed)
add_blend_shape_selector('sourceBlendShapeList', 'Source blendShape', True)
add_blend_shape_selector('dstBlendShapeList', 'Target blendShape', False)
pm.separator()
pm.textScrollList('blendShapeTargetList', numberOfRows=10, allowMultiSelection=True)
# showIndexedItem=4 )
pm.separator()
pm.checkBoxGrp('connectWeightsToSource', numberOfCheckBoxes=1, value1=False, label='Connect weights to source')
pm.checkBoxGrp('useCvWrap', numberOfCheckBoxes=1, value1=False, label='Use cvwrap instead of wrap')
self.refresh_src_blend_shape_list()
def refresh_src_blend_shape_list(self):
pm.textScrollList('blendShapeTargetList', edit=True, removeAll=True)
src_blend_shape = self.get_src_blend_shape_name()
if src_blend_shape is None:
return
if src_blend_shape == 'No Blend Shape Selected':
return
# The blend shape array is sparse, so keep a mapping from list indices to blend
# shape weight indices. Note that for some reason, these are 1-based.
self.src_blend_shape_map = {}
# Add the blend shape targets in the source blend shape to the list.
src_blend_shape = pm.ls(src_blend_shape)[0]
weight_idx_list = src_blend_shape.weightIndexList()
src_weights = src_blend_shape.attr('weight')
for weight_idx in weight_idx_list:
weight = src_weights.elementByLogicalIndex(weight_idx)
target_name = pm.aliasAttr(weight, q=True)
pm.textScrollList('blendShapeTargetList', edit=True, append=target_name)
idx = pm.textScrollList('blendShapeTargetList', q=True, numberOfItems=True)
self.src_blend_shape_map[idx] = weight
def get_selected_src_blend_shape_targets(self):
selection = pm.textScrollList('blendShapeTargetList', q=True, selectIndexedItem=True)
return [self.src_blend_shape_map[idx] for idx in selection]
|
StarcoderdataPython
|
8182583
|
from abc import ABCMeta, abstractmethod
class StrategyBase(metaclass=ABCMeta):
def __init__(self, n, m, k):
self.loads = [0] * n
self.n = n
self.m = m
self.k = k
self.choices_left = k
@abstractmethod
def decide(self, bin): # TODO: might add a choices_left as an input
pass
@abstractmethod
def note(self, bin):
pass
@abstractmethod
def reset(self):
pass
def decide_(self, bin):
decision = self.decide(bin)
if not decision:
self.choices_left -= 1
return decision
def note_(self, bin):
self.loads[bin] += 1
self.choices_left = self.k
self.note(bin)
def reset_(self):
self.loads = [0] * self.n
self.reset()
|
StarcoderdataPython
|
12830990
|
<gh_stars>1-10
"""exact-astro setup.py."""
import io
import pathlib
import re
from setuptools import setup
__version__ = re.search(
r'__version__\s*=\s*[\'"]([^\'"]*)[\'"]', # It excludes inline comment too
io.open('exact/__init__.py', encoding='utf_8_sig').read(),
).group(1)
install_requires = ['numpy', 'scipy']
packages = ['exact']
description = 'Exact solutions to astrophysical problems.'
long_description = (pathlib.Path(__file__).parent / 'README.md').read_text()
setup(
name='exact',
version=__version__,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/dmentipl/exact-astro',
description=description,
long_description=long_description,
long_description_content_type='text/markdown',
packages=packages,
license='MIT',
install_requires=install_requires,
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Astronomy",
],
)
|
StarcoderdataPython
|
11213443
|
# _*_ coding: utf-8 _*_
"""
Created by Alimazing on 2018/6/17.
"""
from sqlalchemy import Column, Integer, String, SmallInteger
from app.models.base import Base
__author__ = 'Alimazing'
class Image(Base):
id = Column(Integer, primary_key=True, autoincrement=True)
_url = Column('url', String(255))
_from = Column('from', SmallInteger, default=1) # 1表示存在本地, 2表示存在网络上
def keys(self):
self.hide('id', '_url','_from').append('url')
return self.fields
@property
def url(self):
return self.get_url(self._url)
@staticmethod
def get_img_by_id(id):
return Image.query.filter_by(id=id).first_or_404()
|
StarcoderdataPython
|
365164
|
"""Custom User Manager."""
from django.contrib.auth.models import BaseUserManager
from django.utils.translation import ugettext_lazy as _
class UserManager(BaseUserManager):
"""
Custom user manager model where email is the unique identifier
for authentication instead of username.
"""
use_in_migrations = True
def create_user(self, email, password, **extra_fields):
"""Create and save user with given email and password."""
fields_names = ["email", ]
values = [email, ]
field_value_map = dict(zip(fields_names, values))
for field, value in field_value_map.items():
if not value:
raise ValueError(_(f"The {field} must be set!"))
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save()
return user
def create_superuser(self, email, password, **extra_fields):
"""
Creaete and save superuser with email,
password and correct defaults.
"""
extra_fields.setdefault("is_active", True)
extra_fields.setdefault("is_staff", True)
extra_fields.setdefault("is_superuser", True)
return self.create_user(email, password, **extra_fields)
|
StarcoderdataPython
|
6549634
|
"""Decodes a binary string to a json representation of the intervals
after the merge overlapping intervals turing machine have processed them. Reads
json from the command line and outputs the initial tape."""
import json
import sys
from vim_turing_machine.constants import BITS_PER_NUMBER
from vim_turing_machine.constants import BLANK_CHARACTER
def decode_intervals(intervals, num_bits=BITS_PER_NUMBER):
result = []
clean_intervals = intervals.replace(BLANK_CHARACTER, '').replace(' ', '')
index = 0
while index < len(clean_intervals):
begin = clean_intervals[index:index + num_bits]
begin = int(begin, 2)
index += num_bits
end = clean_intervals[index:index + num_bits]
end = int(end, 2)
index += num_bits
result.append([begin, end])
return result
if __name__ == '__main__':
print(json.dumps(decode_intervals(sys.argv[1], int(sys.argv[2]))))
|
StarcoderdataPython
|
1698969
|
from __future__ import unicode_literals
from decimal import Decimal
from django.db import models
class Movie(models.Model):
title = models.CharField(
verbose_name='Movie Title',
max_length=255
)
genre = models.CharField(
verbose_name='Genre',
max_length=255
)
rating = models.DecimalField(
verbose_name='Rating out of 10',
default=Decimal(0),
max_digits=5,
decimal_places=2
)
description = models.TextField(
verbose_name='Description',
max_length=1000
)
casting = models.CharField(
verbose_name='Castings for the Movie',
max_length=1000
)
class Meta:
db_table = 'movie'
app_label = 'movies'
verbose_name = 'Movie'
verbose_name_plural = 'Movies'
|
StarcoderdataPython
|
1949772
|
<filename>parrot/settings.py<gh_stars>0
import environ
from pathlib import Path
env = environ.Env()
BASE_DIR = Path(__file__).parent.parent
environ.Env.read_env(str(BASE_DIR / '.env'))
SECRET_KEY = env('PARROT_SECRET_KEY')
DEBUG = env.bool('DEBUG', False)
ALLOWED_HOSTS = env.list('PARROT_ALLOWED_HOSTS', default=['127.0.0.1'])
INSTALLED_APPS = [
'simpleui',
'django_extensions',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.postgres',
'http_stubs',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
if DEBUG:
INSTALLED_APPS.append('debug_toolbar')
MIDDLEWARE.append('debug_toolbar.middleware.DebugToolbarMiddleware')
ROOT_URLCONF = 'parrot.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / Path('templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': env.str('PARROT_DB_NAME', 'parrot'),
'USER': env.str('PARROT_DB_USER', 'parrot'),
'PASSWORD': env.str('PARROT_DB_PASSWORD', '<PASSWORD>'),
'HOST': env.str('PARROT_DB_HOST', 'parrot-database'),
},
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.'
'UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = False
USE_L10N = False
USE_TZ = True
STATICFILES_DIRS = [
BASE_DIR / Path('static'),
]
STATIC_URL = '/static/'
STATIC_ROOT = BASE_DIR / Path('static_build')
# Admin UI settings
SIMPLEUI_HOME_INFO = False
SIMPLEUI_STATIC_OFFLINE = True
SIMPLEUI_DEFAULT_THEME = 'layui.css'
SIMPLEUI_ANALYSIS = False
SIMPLEUI_LOGO = STATIC_URL + 'parrot_icon.png'
SIMPLEUI_ICON = {
'HTTP Stubs': 'fas fa-feather-alt',
'Stubs': 'fas fa-feather-alt',
'Logs': 'fas fa-layer-group',
}
INTERNAL_IPS = env.list('PARROT_INTERNAL_HOSTS', default=['127.0.0.1'])
|
StarcoderdataPython
|
8050833
|
<reponame>ahme0307/Ynet
from __future__ import print_function
import os
import numpy as np
import pdb
import cv2
from fnmatch import fnmatch
from skimage.io import imsave, imread
import pickle
import pylab
import imageio
import matplotlib.pyplot as plt
#Prepare training and test set
def create_train_data(param):
filenames_img = []
filenames_mask = []
if os.path.exists('imgs_trainPath.npy')==True and os.path.exists('imgs_mask_trainPath.npy')==True :
print('Training set already exists and loaded from file')
return
data_path=param.data_path
Gpaths=[x for x in next(os.walk(data_path))][1]
Gpaths=[os.path.join(data_path,x) for x in Gpaths]
images = os.listdir(data_path)
total =sum(len(os.listdir(os.path.join(y,'GT'))) for y in (Gpaths))
i = 0
print('-'*30)
print('Creating trainig images...')
print('-'*30)
img_mask=[]
#pdb.set_trace()
for video_number in range(len(images)):
for imagename in os.listdir(os.path.join(Gpaths[video_number],images[video_number])):
if os.path.exists(os.path.join(Gpaths[video_number],images[video_number], imagename)):
mask_nameRoot,ext =os.path.splitext(imagename)
else:
print("Wrong Format!")
pdb.set_trace()
if os.path.exists(os.path.join(Gpaths[video_number],'GT', '%s%s%s' %(mask_nameRoot,'_mask',ext))):
temp=os.path.join(Gpaths[video_number],'GT', '%s%s%s' %(mask_nameRoot,'_mask',ext))
elif os.path.exists(os.path.join(Gpaths[video_number],'GT', '%s%s' %(mask_nameRoot,ext))):
temp=os.path.join(Gpaths[video_number],'GT', '%s%s' %(mask_nameRoot,ext))
elif os.path.exists(os.path.join(Gpaths[video_number],'GT', '%s%s%s' %('p',mask_nameRoot,ext))):
temp=os.path.join(Gpaths[video_number],'GT', '%s%s%s' %('p',mask_nameRoot,ext))
elif os.path.exists(os.path.join(Gpaths[video_number],'GT', '%s%s%s' %(mask_nameRoot,'_GT',ext))):
temp = os.path.join(Gpaths[video_number],'GT', '%s%s%s' %(mask_nameRoot,'_GT',ext))
else:
print("Ground Truth Image not found")
pdb.set_trace()
try:
filenames_img.append(os.path.join(Gpaths[video_number],images[video_number], imagename))
filenames_mask.append(temp)
except ValueError:
pdb.set_trace()
if i % 1000 == 0:
print('Done: {0}/{1} images'.format(i, total))
i += 1
if i == total:
print('Loading done.')
np.save('imgs_trainPath.npy', filenames_img)
np.save('imgs_mask_trainPath.npy', filenames_mask)
print('Saving to .npy files done.')
print('Loading done.')
return
def load_train_data():
imgs_train = np.load('imgs_trainPath.npy')
imgs_mask_train = np.load('imgs_mask_trainPath.npy')
return imgs_train, imgs_mask_train
def create_test_data(param):
filenames_img = []
filenames_mask = []
if os.path.exists('imgs_test.npy')==True and os.path.exists('imgs_id_test.npy')==True :
print('Test set already exists and loaded from file')
return
data_path_test=param.data_path_test
Gpaths=[x for x in next(os.walk(data_path_test))][1]
Gpaths=[os.path.join(data_path_test,x) for x in Gpaths]
# pdb.set_trace()
images = os.listdir(data_path_test)
total =sum(len(os.listdir(os.path.join(y,'GT'))) for y in (Gpaths))
i = 0
print('-'*30)
print('Creating test images...')
print('-'*30)
for video_number in range(len(images)):
for imagename in os.listdir(os.path.join(Gpaths[video_number],images[video_number])):
if os.path.exists(os.path.join(Gpaths[video_number],images[video_number], imagename)):
mask_name,ext =os.path.splitext(imagename)
#pdb.set_trace()
mask_name ='%s%s' %(mask_name,'.tif')
if not os.path.exists(os.path.join(Gpaths[video_number],'GT', mask_name)):
print("Mask not Found")
try:
filenames_img.append(os.path.join(Gpaths[video_number],images[video_number], imagename))
filenames_mask.append(os.path.join(Gpaths[video_number],'GT', mask_name))
except ValueError:
pdb.set_trace()
if i % 1000 == 0:
print('Done: {0}/{1} images'.format(i, total))
i += 1
if i == total:
print('Loading done.')
np.save('imgs_test.npy', filenames_img)
np.save('imgs_id_test.npy', filenames_mask)
print('Saving to .npy files done.')
print('Loading done.')
return
def load_test_data():
imgs_test = np.load('imgs_test.npy')
#imgs_test = np.memmap('imgs_test.npy', mode='r')
imgs_id = np.load('imgs_id_test.npy')
#imgs_id = np.memmap('imgs_id_test.npy', mode='r')
return imgs_test, imgs_id
def plot_imagesT(images, cls_true, cls_pred=None, smooth=True, filename='test.png'):
#pdb.set_trace()
assert len(images) == len(cls_true)
fig, axes = plt.subplots(4, 4,figsize=(60, 60))
if cls_pred is None:
hspace = 0.6
else:
hspace = 0.9
fig.subplots_adjust(hspace=hspace, wspace=0.3)
# Interpolation type.
if smooth:
interpolation = 'spline16'
else:
interpolation = 'nearest'
count1 =0
count2 =0
for i, ax in enumerate(axes.flat):
if i < len(images)*2:
# Plot image.
if i % 2 ==0:
ax.imshow(np.uint8(images[count1]),interpolation=interpolation)
count1+= 1
else:
ax.imshow(np.uint8(cls_true[count2]),interpolation=interpolation,cmap=plt.get_cmap('gray'))
count2+= 1
ax.set_xticks([])
ax.set_yticks([])
# plt.rcParams["figure.figsize"] = (60,60)
plt.savefig(filename,dpi=100)
plt.show()
|
StarcoderdataPython
|
5098613
|
<reponame>stonebig/opt_einsum<gh_stars>0
"""
Support for random optimizers, including the random-greedy path.
"""
import functools
import heapq
import math
import numbers
import random
import time
from collections import deque
from . import helpers, paths
__all__ = ["RandomGreedy", "random_greedy", "random_greedy_128"]
class RandomOptimizer(paths.PathOptimizer):
"""Base class for running any random path finder that benefits
from repeated calling, possibly in a parallel fashion. Custom random
optimizers should subclass this, and the ``setup`` method should be
implemented with the following signature::
def setup(self, inputs, output, size_dict):
# custom preparation here ...
return trial_fn, trial_args
Where ``trial_fn`` itself should have the signature::
def trial_fn(r, *trial_args):
# custom computation of path here
return ssa_path, cost, size
Where ``r`` is the run number and could for example be used to seed a
random number generator. See ``RandomGreedy`` for an example.
Parameters
----------
max_repeats : int, optional
The maximum number of repeat trials to have.
max_time : float, optional
The maximum amount of time to run the algorithm for.
minimize : {'flops', 'size'}, optional
Whether to favour paths that minimize the total estimated flop-count or
the size of the largest intermediate created.
parallel : {bool, int, or executor-pool like}, optional
Whether to parallelize the random trials, by default ``False``. If
``True``, use a ``concurrent.futures.ProcessPoolExecutor`` with the same
number of processes as cores. If an integer is specified, use that many
processes instead. Finally, you can supply a custom executor-pool which
should have an API matching that of the python 3 standard library
module ``concurrent.futures``. Namely, a ``submit`` method that returns
``Future`` objects, themselves with ``result`` and ``cancel`` methods.
pre_dispatch : int, optional
If running in parallel, how many jobs to pre-dispatch so as to avoid
submitting all jobs at once. Should also be more than twice the number
of workers to avoid under-subscription. Default: 128.
Attributes
----------
path : list[tuple[int]]
The best path found so far.
costs : list[int]
The list of each trial's costs found so far.
sizes : list[int]
The list of each trial's largest intermediate size so far.
See Also
--------
RandomGreedy
"""
def __init__(self, max_repeats=32, max_time=None, minimize='flops', parallel=False, pre_dispatch=128):
if minimize not in ('flops', 'size'):
raise ValueError("`minimize` should be one of {'flops', 'size'}.")
self.max_repeats = max_repeats
self.max_time = max_time
self.minimize = minimize
self.better = paths.get_better_fn(minimize)
self.parallel = parallel
self.pre_dispatch = pre_dispatch
self.costs = []
self.sizes = []
self.best = {'flops': float('inf'), 'size': float('inf')}
self._repeats_start = 0
@property
def path(self):
"""The best path found so far.
"""
return paths.ssa_to_linear(self.best['ssa_path'])
@property
def parallel(self):
return self._parallel
@parallel.setter
def parallel(self, parallel):
# shutdown any previous executor if we are managing it
if getattr(self, '_managing_executor', False):
self._executor.shutdown()
self._parallel = parallel
self._managing_executor = False
if parallel is False:
self._executor = None
return
if parallel is True:
from concurrent.futures import ProcessPoolExecutor
self._executor = ProcessPoolExecutor()
self._managing_executor = True
return
if isinstance(parallel, numbers.Number):
from concurrent.futures import ProcessPoolExecutor
self._executor = ProcessPoolExecutor(parallel)
self._managing_executor = True
return
# assume a pool-executor has been supplied
self._executor = parallel
def _gen_results_parallel(self, repeats, trial_fn, args):
"""Lazily generate results from an executor without submitting all jobs at once.
"""
self._futures = deque()
# the idea here is to submit at least ``pre_dispatch`` jobs *before* we
# yield any results, then do both in tandem, before draining the queue
for r in repeats:
if len(self._futures) < self.pre_dispatch:
self._futures.append(self._executor.submit(trial_fn, r, *args))
continue
yield self._futures.popleft().result()
while self._futures:
yield self._futures.popleft().result()
def _cancel_futures(self):
if self._executor is not None:
for f in self._futures:
f.cancel()
def setup(self, inputs, output, size_dict):
raise NotImplementedError
def __call__(self, inputs, output, size_dict, memory_limit):
# start a timer?
if self.max_time is not None:
t0 = time.time()
trial_fn, trial_args = self.setup(inputs, output, size_dict)
r_start = self._repeats_start + len(self.costs)
r_stop = r_start + self.max_repeats
repeats = range(r_start, r_stop)
# create the trials lazily
if self._executor is not None:
trials = self._gen_results_parallel(repeats, trial_fn, trial_args)
else:
trials = (trial_fn(r, *trial_args) for r in repeats)
# assess the trials
for ssa_path, cost, size in trials:
# keep track of all costs and sizes
self.costs.append(cost)
self.sizes.append(size)
# check if we have found a new best
found_new_best = self.better(cost, size, self.best['flops'], self.best['size'])
if found_new_best:
self.best['flops'] = cost
self.best['size'] = size
self.best['ssa_path'] = ssa_path
# check if we have run out of time
if (self.max_time is not None) and (time.time() > t0 + self.max_time):
break
self._cancel_futures()
return self.path
def __del__(self):
# if we created the parallel pool-executor, shut it down
if getattr(self, '_managing_executor', False):
self._executor.shutdown()
def thermal_chooser(queue, remaining, nbranch=8, temperature=1, rel_temperature=True):
"""A contraction 'chooser' that weights possible contractions using a
Boltzmann distribution. Explicitly, given costs ``c_i`` (with ``c_0`` the
smallest), the relative weights, ``w_i``, are computed as:
w_i = exp( -(c_i - c_0) / temperature)
Additionally, if ``rel_temperature`` is set, scale ``temperature`` by
``abs(c_0)`` to account for likely fluctuating cost magnitudes during the
course of a contraction.
Parameters
----------
queue : list
The heapified list of candidate contractions.
remaining : dict[str, int]
Mapping of remaining inputs' indices to the ssa id.
temperature : float, optional
When choosing a possible contraction, its relative probability will be
proportional to ``exp(-cost / temperature)``. Thus the larger
``temperature`` is, the further random paths will stray from the normal
'greedy' path. Conversely, if set to zero, only paths with exactly the
same cost as the best at each step will be explored.
rel_temperature : bool, optional
Whether to normalize the ``temperature`` at each step to the scale of
the best cost. This is generally beneficial as the magnitude of costs
can vary significantly throughout a contraction.
nbranch : int, optional
How many potential paths to calculate probability for and choose from
at each step.
Returns
-------
cost, k1, k2, k12
"""
n = 0
choices = []
while queue and n < nbranch:
cost, k1, k2, k12 = heapq.heappop(queue)
if k1 not in remaining or k2 not in remaining:
continue # candidate is obsolete
choices.append((cost, k1, k2, k12))
n += 1
if n == 0:
return None
if n == 1:
return choices[0]
costs = [choice[0][0] for choice in choices]
cmin = costs[0]
# adjust by the overall scale to account for fluctuating absolute costs
if rel_temperature:
temperature *= max(1, abs(cmin))
# compute relative probability for each potential contraction
if temperature == 0.0:
energies = [1 if c == cmin else 0 for c in costs]
else:
# shift by cmin for numerical reasons
energies = [math.exp(-(c - cmin) / temperature) for c in costs]
# randomly choose a contraction based on energies
chosen, = random.choices(range(n), weights=energies)
cost, k1, k2, k12 = choices.pop(chosen)
# put the other choise back in the heap
for other in choices:
heapq.heappush(queue, other)
return cost, k1, k2, k12
def ssa_path_compute_cost(ssa_path, inputs, output, size_dict):
"""Compute the flops and max size of an ssa path.
"""
inputs = list(map(frozenset, inputs))
output = frozenset(output)
remaining = set(range(len(inputs)))
total_cost = 0
max_size = 0
for i, j in ssa_path:
k12, flops12 = paths.calc_k12_flops(inputs, output, remaining, i, j, size_dict)
remaining.discard(i)
remaining.discard(j)
remaining.add(len(inputs))
inputs.append(k12)
total_cost += flops12
max_size = max(max_size, helpers.compute_size_by_dict(k12, size_dict))
return total_cost, max_size
def _trial_greedy_ssa_path_and_cost(r, inputs, output, size_dict, choose_fn, cost_fn):
"""A single, repeatable, greedy trial run. Returns ``ssa_path`` and cost.
"""
if r == 0:
# always start with the standard greedy approach
choose_fn = None
else:
random.seed(r)
ssa_path = paths.ssa_greedy_optimize(inputs, output, size_dict, choose_fn, cost_fn)
cost, size = ssa_path_compute_cost(ssa_path, inputs, output, size_dict)
return ssa_path, cost, size
class RandomGreedy(RandomOptimizer):
"""
Parameters
----------
cost_fn : callable, optional
A function that returns a heuristic 'cost' of a potential contraction
with which to sort candidates. Should have signature
``cost_fn(size12, size1, size2, k12, k1, k2)``.
temperature : float, optional
When choosing a possible contraction, its relative probability will be
proportional to ``exp(-cost / temperature)``. Thus the larger
``temperature`` is, the further random paths will stray from the normal
'greedy' path. Conversely, if set to zero, only paths with exactly the
same cost as the best at each step will be explored.
rel_temperature : bool, optional
Whether to normalize the ``temperature`` at each step to the scale of
the best cost. This is generally beneficial as the magnitude of costs
can vary significantly throughout a contraction. If False, the
algorithm will end up branching when the absolute cost is low, but
stick to the 'greedy' path when the cost is high - this can also be
beneficial.
nbranch : int, optional
How many potential paths to calculate probability for and choose from
at each step.
kwargs
Supplied to RandomOptimizer.
See Also
--------
RandomOptimizer
"""
def __init__(self, cost_fn='memory-removed-jitter', temperature=1.0,
rel_temperature=True, nbranch=8, **kwargs):
self.cost_fn = cost_fn
self.temperature = temperature
self.rel_temperature = rel_temperature
self.nbranch = nbranch
super().__init__(**kwargs)
@property
def choose_fn(self):
"""The function that chooses which contraction to take - make this a
property so that ``temperature`` and ``nbranch`` etc. can be updated
between runs.
"""
if self.nbranch == 1:
return None
return functools.partial(thermal_chooser, temperature=self.temperature,
nbranch=self.nbranch, rel_temperature=self.rel_temperature)
def setup(self, inputs, output, size_dict):
fn = _trial_greedy_ssa_path_and_cost
args = (inputs, output, size_dict, self.choose_fn, self.cost_fn)
return fn, args
def random_greedy(inputs, output, idx_dict, memory_limit=None, **optimizer_kwargs):
"""
"""
optimizer = RandomGreedy(**optimizer_kwargs)
return optimizer(inputs, output, idx_dict, memory_limit)
random_greedy_128 = functools.partial(random_greedy, max_repeats=128)
|
StarcoderdataPython
|
361398
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
n_input = input()
int_input = input().split()
print(all([int(i) > 0 for i in int_input]) and any([j == j[::-1] for j in int_input]))
|
StarcoderdataPython
|
9771732
|
<filename>Main/Articles/urls.py
from django.urls import path
from .views import (
PostListView,
PostDetailView,
PostCreateView,
PostUpdateView,
PostDeleteView
)
urlpatterns = [
path("",PostListView.as_view(), name="art_list"),
path("art_add/",PostCreateView.as_view(), name="art_add"),
path("art_desc/<int:pk>/",PostDetailView.as_view(), name="art_detail"),
path("art_desc/<int:pk>/update/",PostUpdateView.as_view(), name="art_update"),
path("art_desc/<int:pk>/delete/",PostDeleteView.as_view(), name="art_delete"),
]
|
StarcoderdataPython
|
87253
|
<filename>sutils/applications/cancel/cancel.py<gh_stars>0
import sys
from . import core
def run(options):
if options['all']:
run_all(force=options['force'])
elif options['last'] is not None:
run_last(options['last'], force=options['force'])
elif options['first'] is not None:
run_first(options['first'], force=options['force'])
else:
sys.exit(0) # end of program
def run_all(force=False):
core.cancel_all(force=force)
def run_last(N, force=False):
core.cancel_last(N, force=force)
def run_first(N, force=False):
core.cancel_first(N, force=force)
|
StarcoderdataPython
|
142959
|
<reponame>Bpowers4/turicreate<gh_stars>1-10
# -*- coding: utf-8 -*-
# Copyright © 2019 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
import os as _os
import turicreate as _tc
import turicreate.toolkits.libtctensorflow
from turicreate.toolkits._main import ToolkitError as _ToolkitError
import numpy as _np
import tempfile
import coremltools as _coremltools
from copy import copy as _copy
from array import array as _array
import sys as _sys
from . import util as test_util
import unittest
import pytest
def _build_bitmap_data():
"""
Build an SFrame from 10 saved drawings.
"""
from os.path import join as _join, realpath as _realpath
from os.path import splitext as _splitext, basename as _basename
from os.path import dirname as _dirname
drawings_dir = _join(_dirname(_realpath(__file__)), "drawings")
sf = _tc.image_analysis.load_images(drawings_dir, with_path=True)
sf = sf.rename({"image": "drawing", "path": "label"})
sf["label"] = sf["label"].apply(
lambda filepath: _splitext(_basename(filepath))[0][:-1]
# Extract the class name from the filename, "check1.png" -> "check"
# [:-1] is to get "check" out of "check1"
)
return sf
def _build_stroke_data():
"""
Build an SFrame by generating 10 random stroke-based drawings.
Each stroke is generated by doing a random walk on a canvas.
"""
num_rows_in_sframe = 10
drawings, labels = [], []
random = _np.random.RandomState(100)
def _generate_random_point(point=None):
if point is not None:
dx = random.choice([-1, 0, 1])
dy = random.choice([-1, 0, 1])
next_x, next_y = point["x"] + dx, point["y"] + dy
else:
next_x, next_y = random.randint(1000), random.randint(1000)
return {"x": next_x, "y": next_y}
for label in range(num_rows_in_sframe):
num_strokes = random.randint(10)
drawing = []
for stroke_id in range(num_strokes):
drawing.append([])
num_points = random.randint(500)
last_point = None
for point_id in range(num_points):
last_point = _generate_random_point(last_point)
drawing[-1].append(last_point)
drawings.append(drawing)
labels.append(label)
return _tc.SFrame({"drawing": drawings, "label": labels})
class DrawingClassifierTest(unittest.TestCase):
@classmethod
def setUpClass(self, warm_start=None):
self.feature = "drawing"
self.target = "label"
self.check_cross_sf = _build_bitmap_data()
self.stroke_sf = _build_stroke_data()
self.warm_start = warm_start
self.max_iterations = 10
self.check_cross_model = _tc.drawing_classifier.create(
self.check_cross_sf,
self.target,
feature=self.feature,
max_iterations=self.max_iterations,
warm_start=warm_start,
)
self.stroke_model = _tc.drawing_classifier.create(
self.stroke_sf,
self.target,
feature=self.feature,
max_iterations=1,
warm_start=warm_start,
)
self.trains = [self.check_cross_sf, self.stroke_sf]
self.models = [self.check_cross_model, self.stroke_model]
def test_create_with_missing_value_bitmap(self):
sf = self.check_cross_sf.append(
_tc.SFrame(
{
self.feature: _tc.SArray([None], dtype=_tc.Image),
self.target: ["check"],
}
)
)
try:
_tc.drawing_classifier.create(sf, self.target)
except _ToolkitError as e:
self.assertTrue("dropna" in str(e))
def test_create_with_missing_value_in_label(self):
sf = self.check_cross_sf
sf = sf.remove_column(self.target)
sf = sf.add_column(_tc.SArray([None] * len(sf), dtype=str), self.target)
try:
_tc.drawing_classifier.create(sf, self.target)
except _ToolkitError as e:
self.assertTrue("dropna" in str(e))
def test_create_with_missing_feature(self):
for sf in self.trains:
with self.assertRaises(_ToolkitError):
_tc.drawing_classifier.create(sf, self.target, feature="wrong_feature")
def test_create_with_missing_target(self):
for sf in self.trains:
with self.assertRaises(_ToolkitError):
_tc.drawing_classifier.create(sf, "wrong_target")
def test_create_with_empty_dataset(self):
for sf in self.trains:
with self.assertRaises(_ToolkitError):
_tc.drawing_classifier.create(sf[:0], self.target, feature=self.feature)
def test_create_with_missing_coordinates_in_stroke_input(self):
drawing = [[{"x": 1.0, "y": 1.0}], [{"x": 0.0}, {"y": 0.0}]]
sf = _tc.SFrame({self.feature: [drawing], self.target: ["missing_coordinates"]})
with self.assertRaises(_ToolkitError):
_tc.drawing_classifier.create(sf, self.target)
def test_create_with_wrongly_typed_coordinates_in_stroke_input(self):
drawing = [[{"x": 1.0, "y": 0}], [{"x": "string_x?!", "y": 0.1}]]
sf = _tc.SFrame({self.feature: [drawing], self.target: ["string_x_coordinate"]})
with self.assertRaises(_ToolkitError):
_tc.drawing_classifier.create(sf, self.target)
def test_create_with_None_coordinates_in_stroke_input(self):
drawing = [[{"x": 1.0, "y": None}], [{"x": 1.1, "y": 0.1}]]
sf = _tc.SFrame({self.feature: [drawing], self.target: ["none_y_coordinate"]})
with self.assertRaises(_ToolkitError):
_tc.drawing_classifier.create(sf, self.target, feature=self.feature)
def test_create_with_validation_set_None(self):
for data in self.trains:
_tc.drawing_classifier.create(
data,
self.target,
feature=self.feature,
validation_set=None,
max_iterations=1,
)
def test_create_with_verbose_False(self):
for data in self.trains:
args = [data, self.target]
kwargs = {
"feature": self.feature,
"max_iterations": 1,
}
test_util.assert_longer_verbose_logs(
_tc.drawing_classifier.create, args, kwargs
)
def test_create_with_no_validation_set(self):
for data in self.trains:
_tc.drawing_classifier.create(
data, self.target, feature=self.feature, max_iterations=1
)
def test_create_with_empty_drawing_in_stroke_input(self):
drawing = []
sf = _tc.SFrame({self.feature: [drawing], self.target: ["empty_drawing"]})
# Should not error out, it should silently ignore the empty drawing
_tc.drawing_classifier.create(
sf, self.target, feature=self.feature, max_iterations=1
)
def test_create_with_empty_stroke_in_stroke_input(self):
drawing = [[{"x": 1.0, "y": 0.0}], [], [{"x": 1.1, "y": 0.1}]]
sf = _tc.SFrame({self.feature: [drawing], self.target: ["empty_drawing"]})
# Should not error out, it should silently ignore the empty stroke
_tc.drawing_classifier.create(
sf, self.target, feature=self.feature, max_iterations=1
)
def test_create_with_fixed_random_seed(self):
for data in self.trains:
model_1 = _tc.drawing_classifier.create(
data,
self.target,
feature=self.feature,
validation_set=None,
max_iterations=3,
random_seed=86,
)
model_2 = _tc.drawing_classifier.create(
data,
self.target,
feature=self.feature,
validation_set=None,
max_iterations=3,
random_seed=86,
)
pred_1 = model_1.predict(data)
pred_2 = model_2.predict(data)
for i in range(len(pred_1)):
self.assertEqual(pred_1[i], pred_2[i])
def test_predict_with_sframe(self):
for index in range(len(self.models)):
model = self.models[index]
sf = self.trains[index]
for output_type in ["class", "probability_vector"]:
preds = model.predict(sf, output_type=output_type)
if output_type == "class":
assert preds.dtype == sf[self.target].dtype
else:
assert preds.dtype == _array
assert len(preds) == len(sf)
def test_predict_with_sarray(self):
for index in range(len(self.models)):
model = self.models[index]
sf = self.trains[index]
for output_type in ["class", "probability_vector"]:
preds = model.predict(sf[self.feature], output_type=output_type)
if output_type == "class":
assert preds.dtype == sf[self.target].dtype
else:
assert preds.dtype == _array
assert len(preds) == len(sf)
def test_predict_topk(self):
k = 2
for index in range(len(self.models)):
model = self.models[index]
sf = self.trains[index]
for output_type in ["rank", "probability"]:
preds = model.predict_topk(sf, k=k, output_type=output_type)
assert "id" in preds.column_names()
assert "class" in preds.column_names()
if output_type == "rank":
assert preds["rank"].dtype == int
assert sorted(preds["rank"].unique()) == [0, 1]
else:
assert output_type == "probability"
assert preds["probability"].dtype == float
assert len(preds) == k * len(sf)
def test_predict_output_type_probability_with_sframe(self):
for index in range(len(self.models)):
model = self.models[index]
sf = self.trains[index]
if len(sf[self.target].unique()) > 2:
with self.assertRaises(_ToolkitError):
model.predict(sf, output_type="probability")
else:
preds = model.predict(sf, output_type="probability")
assert preds.dtype == float
def test_predict_output_type_probability_with_sarray(self):
for index in range(len(self.models)):
model = self.models[index]
sf = self.trains[index]
if len(sf[self.target].unique()) > 2:
with self.assertRaises(_ToolkitError):
model.predict(sf[self.feature], output_type="probability")
else:
preds = model.predict(sf[self.feature], output_type="probability")
assert preds.dtype == float
def test_evaluate_without_ground_truth(self):
for index in range(len(self.trains)):
model = self.models[index]
sf = self.trains[index]
sf_without_ground_truth = sf.select_columns([self.feature])
with self.assertRaises(_ToolkitError):
model.evaluate(sf_without_ground_truth)
def test_evaluate_with_ground_truth(self):
all_metrics = [
"accuracy",
"auc",
"precision",
"recall",
"f1_score",
"log_loss",
"confusion_matrix",
"roc_curve",
]
for index in range(len(self.models)):
model = self.models[index]
sf = self.trains[index]
individual_run_results = dict()
for metric in all_metrics:
evaluation = model.evaluate(sf, metric=metric)
assert metric in evaluation
individual_run_results[metric] = evaluation[metric]
evaluation = model.evaluate(sf, metric="auto")
for metric in all_metrics:
if metric in ["confusion_matrix", "roc_curve"]:
test_util.SFrameComparer()._assert_sframe_equal(
individual_run_results[metric], evaluation[metric]
)
else:
assert metric in evaluation
assert individual_run_results[metric] == evaluation[metric]
def test_evaluate_with_unsupported_metric(self):
for index in range(len(self.trains)):
model = self.models[index]
sf = self.trains[index]
with self.assertRaises(_ToolkitError):
model.evaluate(sf, metric="unsupported")
def test_save_and_load(self):
for index in range(len(self.models)):
old_model, data = self.models[index], self.trains[index]
with test_util.TempDirectory() as filename:
old_model.save(filename)
new_model = _tc.load_model(filename)
old_preds = old_model.predict(data)
new_preds = new_model.predict(data)
assert (
new_preds.dtype == old_preds.dtype
and (new_preds == old_preds).all()
)
def test_export_coreml(self):
import coremltools
import platform
max_iters_ans = [str(self.max_iterations), "1"]
warm_start_ans = "" if self.warm_start is None else self.warm_start
for i, model in enumerate(self.models):
filename = tempfile.NamedTemporaryFile(suffix=".mlmodel").name
model.export_coreml(filename)
# Load the model back from the CoreML model file
coreml_model = coremltools.models.MLModel(filename)
self.assertDictEqual(
{
"com.github.apple.turicreate.version": _tc.__version__,
"com.github.apple.os.platform": platform.platform(),
"target": self.target,
"feature": self.feature,
"type": "drawing_classifier",
"warm_start": warm_start_ans,
"max_iterations": max_iters_ans[i],
"version": "2",
},
dict(coreml_model.user_defined_metadata),
)
expected_result = (
"Drawing classifier created by Turi Create (version %s)"
% (_tc.__version__)
)
self.assertEquals(expected_result, coreml_model.short_description)
@unittest.skipIf(_sys.platform != "darwin", "Core ML only supported on Mac")
def test_export_coreml_with_predict(self):
for test_number in range(len(self.models)):
feature = self.feature
model = self.models[test_number]
sf = self.trains[test_number]
if self.warm_start:
prefix = "pretrained" + str(test_number)
else:
prefix = "scratch" + str(test_number)
filename = tempfile.NamedTemporaryFile(
prefix=prefix, suffix=".mlmodel"
).name
model.export_coreml(filename)
mlmodel = _coremltools.models.MLModel(filename)
tc_preds = model.predict(sf)
if test_number == 1:
# stroke input
sf[feature] = _tc.drawing_classifier.util.draw_strokes(sf[self.feature])
for row_number in range(len(sf)):
core_ml_preds = mlmodel.predict(
{"drawing": sf[feature][row_number]._to_pil_image()}
)
assert core_ml_preds[self.target] == tc_preds[row_number]
if test_number == 1:
sf = sf.remove_column(feature)
def test_draw_strokes_sframe(self):
sf = self.stroke_sf
sf["rendered"] = _tc.drawing_classifier.util.draw_strokes(sf[self.feature])
for index in range(len(sf["rendered"])):
rendered = sf["rendered"][index]
assert (
type(rendered) == _tc.Image
and rendered.channels == 1
and rendered.width == 28
and rendered.height == 28
)
def test_draw_strokes_single_input(self):
sf = self.stroke_sf
single_bitmap = _tc.drawing_classifier.util.draw_strokes(sf[self.feature][0])
assert (
type(single_bitmap) == _tc.Image
and single_bitmap.channels == 1
and single_bitmap.width == 28
and single_bitmap.height == 28
)
def test_repr(self):
for model in self.models:
self.assertEqual(type(str(model)), str)
self.assertEqual(type(model.__repr__()), str)
def test_summary(self):
for model in self.models:
model.summary()
def test_summary_str(self):
for model in self.models:
self.assertTrue(isinstance(model.summary("str"), str))
def test_summary_dict(self):
for model in self.models:
self.assertTrue(isinstance(model.summary("dict"), dict))
def test_summary_invalid_input(self):
for model in self.models:
with self.assertRaises(_ToolkitError):
model.summary(model.summary("invalid"))
with self.assertRaises(_ToolkitError):
model.summary(model.summary(0))
with self.assertRaises(_ToolkitError):
model.summary(model.summary({}))
class DrawingClassifierFromScratchTest(DrawingClassifierTest):
@classmethod
def setUpClass(self):
super(DrawingClassifierFromScratchTest, self).setUpClass(warm_start=None)
class DrawingClassifierUsingQuickdraw245(DrawingClassifierTest):
@classmethod
def setUpClass(self):
super(DrawingClassifierUsingQuickdraw245, self).setUpClass(
warm_start="quickdraw_245_v0"
)
|
StarcoderdataPython
|
5086605
|
<reponame>ska-sa/scape
"""Unit test suite for scape."""
import unittest
# pylint: disable-msg=W0403
import test_stats
import test_gaincal
import test_scan
import test_scape
import test_xdmfits
def suite():
loader = unittest.TestLoader()
testsuite = unittest.TestSuite()
testsuite.addTests(loader.loadTestsFromModule(test_stats))
testsuite.addTests(loader.loadTestsFromModule(test_gaincal))
testsuite.addTests(loader.loadTestsFromModule(test_scan))
testsuite.addTests(loader.loadTestsFromModule(test_scape))
testsuite.addTests(loader.loadTestsFromModule(test_xdmfits))
return testsuite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
StarcoderdataPython
|
38929
|
<reponame>xswz8015/infra
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import subprocess
import argparse
import sys
import os
import re
import json
import codecs
import platform
# Used to run commands through powershell on windows and collect the responses
# and log files that may be generated by the command run. This is meant for use
# in situations where the commands either don't return the results to STDOUT,
# instead writing to a log file, or if the commands do return results but do not
# write the logs to STDOUT. This can also be used to run regular windows
# executables as well, powershell will happily execute them for you.
# ALLOWLIST filters the files that can be read to STDOUT
ALLOWLIST = [
re.compile(y) for y in [
'^.*\.log$', # Log files
]
]
# TODO(anushruth): Update list with all other possibilities.
codec_map = {
codecs.BOM_UTF16_LE: 'utf-16-le',
codecs.BOM_UTF32: 'utf-32',
codecs.BOM_UTF8: 'utf-8-sig'
}
def main(argv):
"""Runs the given powershell command and writes all the logs to stdout """
# Ensure we are running on windows. Until MS releases powershell for other
# platforms
if platform.system() != 'Windows':
print(json.dumps(gen_result('Not run on Windows')))
return
parser = argparse.ArgumentParser(
description="""Runs a powershell command,
waits for its completion and returns all
the output logs to stdout as a json""",
epilog="""Meant to be used by the powershell
recipe module to run a command that
generates multiple logs and stream the logs
back to the recipe""")
# command is the actual powershell command required to be run
parser.add_argument(
'--command', required=True, help='powershell command to execute')
parser.add_argument(
'--logs',
required=False,
nargs='*',
help='log file or dir to watch and return stuff from')
parser.add_argument(
'--ret_codes',
required=False,
type=int,
default=[0],
nargs='*',
help='return codes to treat as success')
parser.add_argument(
'args', nargs='*', help='optionals args to the powershell command')
iput = parser.parse_args(argv[1:])
logs = exec_ps(
iput.command, iput.logs, args=iput.args, ret_codes=iput.ret_codes)
print(json.dumps(logs))
def ensure_logs(logs):
""" Checks if any log dir doesn't exist and creates it"""
if not logs:
return
for log in logs:
if not os.path.exists(log) and not is_allowed(log):
# If the path doesn't exist and is not a file. Create the dir
os.makedirs(log)
def exec_ps(command, logs, args, ret_codes):
""" Executes a power shell command and waits for it to complete.
Returns all the logs on completion as a json to stdout.
command: path to script/executable/batchfile, powershell command
logs: list of log files and directories.
args: optional args to the command
ret_codes: optional success return codes
Returns dict containing keys 'results' and every '<log-file>' in logs."""
ensure_logs(logs)
# powershell command
psc = ['powershell', '-Command', command] + args
# l contains all the logs + return values
l = {}
# Attempt to execute the command
try:
output = subprocess.check_output(psc, stderr=subprocess.STDOUT)
try:
# Check if the output is a json file
jout = json.loads(output)
except Exception as j:
# It's not known if the script completed successfully
l = gen_result(
'No json object returned. Check stdout_stderr for ' +
'results. {}'.format(j), True)
# not a json return script
l['stdout_stderr'] = output
else:
# It's a json file
l['results'] = jout
except subprocess.CalledProcessError as e:
# Non zero return by the script/cmd run
l = gen_result(e.output, e.returncode in ret_codes)
l['results']['Command'] = e.cmd
l['results']['ReturnCode'] = e.returncode
l['stdout_stderr'] = e.output
except Exception as e:
# Failed to run the command for some reason
l = gen_result(str(e))
l['results']['Command'] = ' '.join(psc)
finally:
# Read all the logs to stdout
if logs:
for log in logs:
if os.path.isdir(log):
for k, v in read_logs([os.path.join(log, x) for x in os.listdir(log)
]).items():
l[k] = v
else:
op = read_logs(log, l)
l[log] = op[log]
return l
def read_logs(logs):
""" Reads all the given files to RAM and returns a dict of contents.
logs: list of log files and directories.
Returns dict containing keys for each log file and its contents
as value. """
l = {}
for log in logs:
if not os.path.isdir(log) and is_allowed(log):
f = open(log, 'r')
contents = f.read()
l[log] = contents
# Some logs may be encoded differently. Check if they have the unicode
# BOM at the start and decode them. ADKSetup is known to generate logs
# with different encodings during the same run.
for k, v in codec_map.items():
if len(contents) >= len(k) and k == contents[:len(k)]:
# See codec_map for codec to decode string mapping
l[log] = contents.decode(v)
break
f.close()
return l
def is_allowed(l):
""" Implements ALLOWLIST
l: log file to check
Returns True if l matches anything in ALLOWLIST, false otherwise. """
for d in ALLOWLIST:
if d.match(l) != None:
return True
return False
def gen_result(err, success=False):
""" gen_result returns the result dict with given params"""
return {'results': {'Success': success, 'ErrorInfo': {'Message': err,}}}
if __name__ == '__main__':
main(sys.argv)
|
StarcoderdataPython
|
1806957
|
<gh_stars>1000+
import unittest
class TestMixins(unittest.TestCase):
def testLocal(self):
from pulsar.utils.structures import AttributeDictionary
from pulsar.utils.log import LocalMixin
elem = LocalMixin()
el = elem.local
self.assertTrue(isinstance(el, AttributeDictionary))
self.assertEqual(id(elem.local), id(el))
self.assertEqual(elem.local.process, None)
elem.local.process = True
self.assertEqual(elem.local.process, True)
|
StarcoderdataPython
|
224727
|
<filename>secure_transfer/forms.py
from django import forms
from django.core.exceptions import ValidationError
from .models import ProtectedItem
class ProtectedWithPasswordForm(forms.Form):
token = forms.CharField(widget=forms.HiddenInput)
password = forms.CharField(widget=forms.PasswordInput)
def clean(self):
cleaned_data = super().clean()
token = cleaned_data.get("token")
password = cleaned_data.get("password")
if token and password:
self.item = ProtectedItem.objects.get(token=token)
if not self.item or not self.item.check_password(password):
raise ValidationError("password incorrect")
if self.item.is_expired:
raise ValidationError("link expired")
|
StarcoderdataPython
|
114697
|
<gh_stars>0
def fibonacci(n):
if n == 0:
return (0, 1)
else:
a, b = fibonacci(n // 2)
c = a * (b * 2 - a)
d = a * a + b * b
if n % 2 == 0:
return (c, d)
else:
return (d, c + d)
x = 0
y = 0
num = 1
while len(str(x)) < 1000:
x, y = fibonacci(num)
num += 1
print(len(str(y)),len(str(x)),num)
|
StarcoderdataPython
|
8022689
|
from prometheus_client import make_wsgi_app, Gauge
from wsgiref.simple_server import make_server
from redis.sentinel import Sentinel
import redis,sys,os
APP_HOSTNAME = os.getenv('APP_HOSTNAME', '127.0.0.1')
APP_PORT = os.getenv('APP_PORT', 9000)
REDIS_HOSTNAME = os.getenv('REDIS_HOSTNAME', '127.0.0.1')
REDIS_PORT = os.getenv('PORT', 6379)
REDIS_PASS = os.getenv('REDIS_PASS', 'none')
ENV = os.getenv('ENV', 'local')
QLIST = os.getenv('QLIST').split(' ')
SENTINEL_HOST = os.getenv('SENTINEL_HOST', 'none')
SENTINEL_REDIS_NAME = os.getenv('SENTINEL_REDIS_NAME', 'none')
SENTINEL_PORT = os.getenv('SENTINEL_PORT', 26379)
SENTINEL_REDIS_PASSWORD = os.getenv('SENTINEL_REDIS_PASSWORD', 'none')
if QLIST is None:
sys.exit('Please set env: QLIST')
g = Gauge('redis_queue_length', 'Length of queues', ['env','queue_name'])
def get_metrics():
if SENTINEL_HOST == 'none':
r = redis.Redis(host=REDIS_HOSTNAME, port=REDIS_PORT, password=REDIS_PASS)
else:
hosts = [SENTINEL_HOST]
sentinel = Sentinel([(h, SENTINEL_port) for h in hosts], socket_timeout=0.1)
r = sentinel.master_for(SENTINEL_REDIS_NAME, password=SENTINEL_REDIS_PASSWORD)
try:
r.ping()
except redis.ConnectionError:
print("Cannot make connection to redis")
pass
for q in QLIST:
qlen = r.llen(q)
g.labels(ENV,q).set(qlen)
metrics_app = make_wsgi_app()
def my_app(environ, start_fn):
if environ['PATH_INFO'] == '/metrics':
get_metrics()
return metrics_app(environ, start_fn)
httpd = make_server(APP_HOSTNAME, APP_PORT, my_app)
httpd.serve_forever()
|
StarcoderdataPython
|
11220179
|
<filename>2021/day15.py
from mylib.aoc_frame import Day
import mylib.no_graph_lib as nog
class PartA(Day):
def compute(self, d): # return puzzle result, get parsing data from attributes of d
return do(d, 1)
class PartB(PartA):
def compute(self, d): # return puzzle result, get parsing data from attributes of d
return do(d, 5)
def do(d, size_factor):
levels = d.text.splitlines()
size = len(levels)
high = size * size_factor - 1
def next_edges(p, _):
for neighbor in nog.matrix_neighbors(p, ((0, high), (0, high)), no_diagonals=True):
x, y = neighbor
level = int(levels[y % size][x % size]) + y // size + x // size
while level > 9:
level -= 9
yield neighbor, level
traversal = nog.TraversalShortestPaths(next_edges)
traversal.start_from((0, 0)).go_to((high, high))
return traversal.distance
Day.do_day(day=15, year=2021, part_a=PartA, part_b=PartB)
|
StarcoderdataPython
|
5009798
|
import json
import pytest
from pathlib import Path
from core import GeneratorFactory, Config, resize, make_transparent
base_dir = Path().cwd() / 'example'
background_path = base_dir / 'small_bg.jpg'
sub_image_path = base_dir / 'wings.png'
logo_first_part_path = base_dir / 'shikimori-glyph.png'
logo_second_part_path = base_dir / 'shikimori-logo.png'
header_font = base_dir / 'Noto_Serif' / 'NotoSerif-Bold.ttf'
text_font = base_dir / 'Noto_Serif' / 'NotoSerif-Regular.ttf'
subheader_font = base_dir / 'Noto_Serif_JP' / 'NotoSerifJP-Bold.otf'
sub_image = resize(make_transparent(sub_image_path), 0.45)
logo_first_part = resize(make_transparent(logo_first_part_path), 0.2)
logo_second_part = resize(make_transparent(logo_second_part_path), 0.2)
average_anime = [
{
"score": 9.15,
"name": "Fullmetal Alchemist: Brotherhood",
"japanese_synonyms": "[\"\\u92fc\\u306e\\u932c\\u91d1\\u8853\\u5e2b FULLMETAL ALCHEMIST\"]"
},
{
"score": 9.09,
"name": "Gintama°",
"japanese_synonyms": "[\"\\u9280\\u9b42\\u00b0\"]"
},
{
"score": 9.09,
"name": "Steins;Gate",
"japanese_synonyms": "[\"STEINS;GATE\"]"
},
{
"score": 9.09,
"name": "Shingeki no Kyojin Season 3 Part 2",
"japanese_synonyms": "[\"\\u9032\\u6483\\u306e\\u5de8\\u4eba Season3 Part.2\"]"
},
{
"score": 9.06,
"name": "Fruits Basket: The Final",
"japanese_synonyms": "[\"\\u30d5\\u30eb\\u30fc\\u30c4\\u30d0\\u30b9\\u30b1\\u30c3\\u30c8 The Final\"]"
},
{
"score": 9.06,
"name": "Hunter x Hunter (2011)",
"japanese_synonyms": "[\"HUNTER\\u00d7HUNTER\\uff08\\u30cf\\u30f3\\u30bf\\u30fc\\u00d7\\u30cf\\u30f3\\u30bf\\u30fc\\uff09\"]"
},
{
"score": 9.06,
"name": "Gintama'",
"japanese_synonyms": "[\"\\u9280\\u9b42'\"]"
},
{
"score": 9.05,
"name": "Gintama: The Final",
"japanese_synonyms": "[\"\\u9280\\u9b42 THE FINAL\"]"
},
{
"score": 9.04,
"name": "<NAME>",
"japanese_synonyms": "[\"\\u9280\\u6cb3\\u82f1\\u96c4\\u4f1d\\u8aac\"]"
},
{
"score": 9.04,
"name": "Gintama': Enchousen",
"japanese_synonyms": "[\"\\u9280\\u9b42' \\u5ef6\\u9577\\u6226\"]"
}
]
shortest_anime = [
{
"name": "F",
"japanese_synonyms": "[\"\\uff26-\\u30a8\\u30d5\"]",
"l": 1
},
{
"name": "X",
"japanese_synonyms": "[\"X\\u2212\\u30a8\\u30c3\\u30af\\u30b9\\u2212\"]",
"l": 1
},
{
"name": "K",
"japanese_synonyms": "[\"K\"]",
"l": 1
},
{
"name": "◯",
"japanese_synonyms": "[\"\\u25ef\"]",
"l": 1
},
{
"name": "C³",
"japanese_synonyms": "[\"\\u30b7\\u30fc\\u30ad\\u30e5\\u30fc\\u30d6\"]",
"l": 2
},
{
"name": "Yu",
"japanese_synonyms": "[\"\\u9b5a\"]",
"l": 2
},
{
"name": "TO",
"japanese_synonyms": "[\"\\u30c8\\u30a5\\u30fc\"]",
"l": 2
},
{
"name": "Ai",
"japanese_synonyms": "[\"\\u611b\"]",
"l": 2
},
{
"name": "We",
"japanese_synonyms": "[\"We\"]",
"l": 2
},
{
"name": "OZ",
"japanese_synonyms": "[\"\\u30aa\\u30ba\"]",
"l": 2
}
]
longest_anime = [
{
"name": "Kochira Katsushikaku Kameari Kouenmae Hashutsujo: Seton Tankentai! Sumidagawa no Chikai - Omoide no Shiroi Kujira wo Sagase!",
"japanese_synonyms": "[\"\\u3053\\u3061\\u3089\\u845b\\u98fe\\u533a\\u4e80\\u6709\\u516c\\u5712\\u524d\\u6d3e\\u51fa\\u6240 \\u30b7\\u30fc\\u30c8\\u30f3\\u63a2\\u691c\\u968a!\\u9685\\u7530\\u5ddd\\u306e\\u8a93\\u3044\\u301c\\u601d\\u3044\\u51fa\\u306e\\u767d\\u3044\\u9be8\\u3092\\u63a2\\u305b!\\u301c\"]",
"l": 124
},
{
"name": "<NAME>: Ryou-san to Chuuken Lucky Monogatari - Kameari Dai Houimou wo Kawase!!",
"japanese_synonyms": "[\"\\u3053\\u3061\\u3089\\u845b\\u98fe\\u533a\\u4e80\\u6709\\u516c\\u5712\\u524d\\u6d3e\\u51fa\\u6240 \\u4e21\\u3055\\u3093\\u3068\\u5fe0\\u72ac\\u30e9\\u30c3\\u30ad\\u30fc\\u7269\\u8a9e \\u301c\\u4e80\\u6709\\u5927\\u5305\\u56f2\\u7db2\\u3092\\u304b\\u308f\\u305b!!\"]",
"l": 120
},
{
"name": "<NAME>ouenmae Hashutsujo: Ryoutsu no Asakusa Renewal Daisakusen!! - Aa, Omoide no Hanayashiki",
"japanese_synonyms": "[\"\\u3053\\u3061\\u3089\\u845b\\u98fe\\u533a\\u4e80\\u6709\\u516c\\u5712\\u524d\\u6d3e\\u51fa\\u6240 \\u4e21\\u6d25\\u306e\\u6d45\\u8349\\u30ea\\u30cb\\u30e5\\u30fc\\u30a2\\u30eb\\u5927\\u4f5c\\u6226!! \\u301c\\u3042\\u3041 \\u601d\\u3044\\u51fa\\u306e\\u82b1\\u3084\\u3057\\u304d\\u301c\"]",
"l": 117
},
{
"name": "<NAME>: Ryoutsu vs. Nakimushi Idol!? Nihon 1-shuu Dai Sugoroku Game!!",
"japanese_synonyms": "[\"\\u3053\\u3061\\u3089\\u845b\\u98fe\\u533a\\u4e80\\u6709\\u516c\\u5712\\u524d\\u6d3e\\u51fa\\u6240 \\u4e21\\u6d25VS\\u6ce3\\u304d\\u866b\\u30a2\\u30a4\\u30c9\\u30eb!? \\u65e5\\u672c1\\u5468\\u5927\\u3059\\u3054\\u308d\\u304f\\u30b2\\u30fc\\u30e0!!\"]",
"l": 111
},
{
"name": "<NAME> <NAME>: Mezase! Kameari Superstar!! Ryoutsu-shiki Idol e no Michi!",
"japanese_synonyms": "[\"\\u3053\\u3061\\u3089\\u845b\\u98fe\\u533a\\u4e80\\u6709\\u516c\\u5712\\u524d\\u6d3e\\u51fa\\u6240 \\u76ee\\u6307\\u305b!\\u4e80\\u6709\\u30b9\\u30fc\\u30d1\\u30fc\\u30b9\\u30bf\\u30fc!!\\u4e21\\u6d25\\u5f0f\\u30a2\\u30a4\\u30c9\\u30eb\\u3078\\u306e\\u9053!\"]",
"l": 108
},
{
"name": "Shin no Nakama ja Nai to Yuusha no Party wo Oidasareta node, Henkyou de Slow Life suru Koto ni Shimashita",
"japanese_synonyms": "[\"\\u771f\\u306e\\u4ef2\\u9593\\u3058\\u3083\\u306a\\u3044\\u3068\\u52c7\\u8005\\u306e\\u30d1\\u30fc\\u30c6\\u30a3\\u30fc\\u3092\\u8ffd\\u3044\\u51fa\\u3055\\u308c\\u305f\\u306e\\u3067\\u3001\\u8fba\\u5883\\u3067\\u30b9\\u30ed\\u30fc\\u30e9\\u30a4\\u30d5\\u3059\\u308b\\u3053\\u3068\\u306b\\u3057\\u307e\\u3057\\u305f\"]",
"l": 105
},
{
"name": "<NAME>: Shijou Saikyou no Maou no Shiso, Tensei shite Shison-tachi no Gakkou e Kayou",
"japanese_synonyms": "[\"\\u9b54\\u738b\\u5b66\\u9662\\u306e\\u4e0d\\u9069\\u5408\\u8005 \\uff5e\\u53f2\\u4e0a\\u6700\\u5f37\\u306e\\u9b54\\u738b\\u306e\\u59cb\\u7956\\u3001\\u8ee2\\u751f\\u3057\\u3066\\u5b50\\u5b6b\\u305f\\u3061\\u306e\\u5b66\\u6821\\u3078\\u901a\\u3046\\uff5e\"]",
"l": 105
},
{
"name": "<NAME>: Ryou-san no Sushi Kui Nee! - <NAME>uro Taiketsu!!",
"japanese_synonyms": "[\"\\u3053\\u3061\\u3089\\u845b\\u98fe\\u533a\\u4e80\\u6709\\u516c\\u5712\\u524d\\u6d3e\\u51fa\\u6240 \\u4e21\\u3055\\u3093\\u306e\\u5bff\\u53f8\\u98df\\u3044\\u306d\\u3048!\\u301c\\u9802\\u4e0a\\u30de\\u30b0\\u30ed\\u5bfe\\u6c7a!!\\u301c\"]",
"l": 104
},
{
"name": "<NAME>: Washi to Ore!? - Bokura wa Asakusa Shounen Tanteidan!",
"japanese_synonyms": "[\"\\u3053\\u3061\\u3089\\u845b\\u98fe\\u533a\\u4e80\\u6709\\u516c\\u5712\\u524d\\u6d3e\\u51fa\\u6240 \\u30ef\\u30b7\\u3068\\u4ffa!?\\u301c\\u307c\\u304f\\u3089\\u306f\\u6d45\\u8349\\u5c11\\u5e74\\u63a2\\u5075\\u56e3!\\u301c\"]",
"l": 103
},
{
"name": "<NAME> OVA 2: Hamuchanzu no Takara Sagashi Daisaku - Hamuha! Suteki na Umi no Natsuyasumi",
"japanese_synonyms": "[\"\\u30cf\\u30e0\\u3061\\u3083\\u3093\\u305a\\u306e\\u5b9d\\u3055\\u304c\\u3057\\u5927\\u4f5c\\u6226\\u301c\\u306f\\u3080\\u306f\\u30fc!\\u3059\\u3066\\u304d\\u306a\\u6d77\\u306e\\u306a\\u3064\\u3084\\u3059\\u307f\\u301c\"]",
"l": 99
}
]
def get_average_anime():
for i in range(len(average_anime)):
item = average_anime[i]
japanese = json.loads(item['japanese_synonyms'])
yield {'header': item['name'], 'subheader': japanese[0] or None,
'main_text': f'#{i + 1} anime in history'}
def get_shortest_anime():
for i in range(len(shortest_anime)):
item = shortest_anime[i]
japanese = json.loads(item['japanese_synonyms'])
yield {'header': item['name'], 'subheader': japanese[0] or None,
'main_text': f'#{i + 1} аниме с самым коротким названием'}
def get_longest_anime():
for i in range(len(longest_anime)):
item = longest_anime[i]
japanese = json.loads(item['japanese_synonyms'])
yield {'header': item['name'], 'subheader': japanese[0] or None,
'main_text': f'#{i + 1} аниме с самым длинным названием'}
def test_full_case(image_similarity):
config = Config(
background=background_path,
sub_image=sub_image,
logo_first_part=logo_first_part,
logo_second_part=logo_second_part,
header_font=header_font,
text_font=text_font,
small_text_font=text_font,
subheader_font=subheader_font,
main_font=text_font,
site='anime-recommend.ru',
)
banner_factory = GeneratorFactory.banner(config)
fname = image_similarity['filename']
banner_factory.generate_file(
fname,
header='Fruits Basket: The Final',
main_text='The best anime ever!',
subheader='フルーツバスケット The Final',
small_text='based on'
)
def test_full_case_ru(image_similarity):
config = Config(
background=background_path,
sub_image=sub_image,
logo_first_part=logo_first_part,
logo_second_part=logo_second_part,
header_font=header_font,
text_font=text_font,
small_text_font=text_font,
subheader_font=subheader_font,
main_font=text_font,
site='anime-recommend.ru',
)
banner_factory = GeneratorFactory.banner(config)
fname = image_similarity['filename']
banner_factory.generate_file(
fname,
header='Fruits Basket: The Final',
main_text='Лучшее аниме в истории!',
subheader='フルーツバスケット The Final',
small_text='по данным'
)
@pytest.mark.parametrize('anime', get_average_anime())
def test_using_table(anime, image_similarity):
config = Config(
background=background_path,
sub_image=sub_image,
logo_first_part=logo_first_part,
logo_second_part=logo_second_part,
header_font=header_font,
text_font=text_font,
small_text_font=text_font,
subheader_font=subheader_font,
main_font=text_font,
site='anime-recommend.ru',
)
banner_factory = GeneratorFactory.banner(config)
fname = image_similarity['filename']
banner_factory.generate_file(
fname,
header=anime['header'],
main_text=anime['main_text'],
subheader=anime['subheader'],
small_text='по данным'
)
@pytest.mark.parametrize('s_anime', get_shortest_anime())
def test_shortest_anime(s_anime, image_similarity):
config = Config(
background=background_path,
sub_image=sub_image,
logo_first_part=logo_first_part,
logo_second_part=logo_second_part,
header_font=header_font,
text_font=text_font,
small_text_font=text_font,
subheader_font=subheader_font,
main_font=text_font,
site='anime-recommend.ru',
)
banner_factory = GeneratorFactory.banner(config)
fname = image_similarity['filename']
banner_factory.generate_file(
fname,
header=s_anime['header'],
main_text=s_anime['main_text'],
subheader=s_anime['subheader']
)
@pytest.mark.parametrize('l_anime', get_longest_anime())
def test_longest_anime(l_anime, image_similarity):
config = Config(
background=background_path,
sub_image=sub_image,
logo_first_part=logo_first_part,
logo_second_part=logo_second_part,
header_font=header_font,
text_font=text_font,
small_text_font=text_font,
subheader_font=subheader_font,
main_font=text_font,
site='anime-recommend.ru',
)
banner_factory = GeneratorFactory.banner(config)
fname = image_similarity['filename']
banner_factory.generate_file(
fname,
header=l_anime['header'],
main_text=l_anime['main_text'],
subheader=l_anime['subheader']
)
def test_without_site(image_similarity):
config = Config(
background=background_path,
sub_image=sub_image,
logo_first_part=logo_first_part,
logo_second_part=logo_second_part,
header_font=header_font,
text_font=text_font,
small_text_font=text_font,
subheader_font=subheader_font,
main_font=text_font,
)
banner_factory = GeneratorFactory.banner(config)
fname = image_similarity['filename']
banner_factory.generate_file(
fname,
header='Fruits Basket: The Final',
main_text='Лучшее аниме в истории!',
subheader='フルーツバスケット The Final',
small_text='по данным'
)
def test_without_small_text(image_similarity):
config = Config(
background=background_path,
sub_image=sub_image,
logo_first_part=logo_first_part,
logo_second_part=logo_second_part,
header_font=header_font,
text_font=text_font,
small_text_font=text_font,
subheader_font=subheader_font,
main_font=text_font,
site='anime-recommend.ru',
)
banner_factory = GeneratorFactory.banner(config)
fname = image_similarity['filename']
banner_factory.generate_file(
fname,
header='Fruits Basket: The Final',
main_text='Лучшее аниме в истории!',
subheader='フルーツバスケット The Final',
)
def test_without_subheader(image_similarity):
config = Config(
background=background_path,
sub_image=sub_image,
logo_first_part=logo_first_part,
logo_second_part=logo_second_part,
header_font=header_font,
text_font=text_font,
small_text_font=text_font,
subheader_font=subheader_font,
main_font=text_font,
site='anime-recommend.ru',
)
banner_factory = GeneratorFactory.banner(config)
fname = image_similarity['filename']
banner_factory.generate_file(
fname,
header='Fruits Basket: The Final',
main_text='Лучшее аниме в истории!',
small_text='по данным'
)
def test_without_logo(image_similarity):
config = Config(
background=background_path,
sub_image=sub_image,
header_font=header_font,
text_font=text_font,
small_text_font=text_font,
subheader_font=subheader_font,
main_font=text_font,
site='anime-recommend.ru',
)
banner_factory = GeneratorFactory.banner(config)
fname = image_similarity['filename']
banner_factory.generate_file(
fname,
header='Fruits Basket: The Final',
subheader='フルーツバスケット The Final',
main_text='Лучшее аниме в истории!',
small_text='по данным'
)
def test_without_logo_and_subimage(image_similarity):
config = Config(
background=background_path,
header_font=header_font,
text_font=text_font,
small_text_font=text_font,
subheader_font=subheader_font,
main_font=text_font,
site='anime-recommend.ru',
)
banner_factory = GeneratorFactory.banner(config)
fname = image_similarity['filename']
banner_factory.generate_file(
fname,
header='Fruits Basket: The Final',
subheader='フルーツバスケット The Final',
main_text='Лучшее аниме в истории!',
)
def test_minimal_setup(image_similarity):
config = Config(
background=background_path,
header_font=header_font,
text_font=text_font,
small_text_font=text_font,
subheader_font=subheader_font,
main_font=text_font,
site='anime-recommend.ru',
)
banner_factory = GeneratorFactory.banner(config)
fname = image_similarity['filename']
banner_factory.generate_file(
fname,
header='Fruits Basket: The Final',
main_text='Лучшее аниме в истории!',
)
|
StarcoderdataPython
|
8185148
|
<filename>safe_transaction_service/contracts/migrations/0002_auto_20210119_1136.py
# Generated by Django 3.1.5 on 2021-01-19 11:36
from django.db import migrations, models
import safe_transaction_service.contracts.models
class Migration(migrations.Migration):
dependencies = [
("contracts", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="contract",
name="display_name",
field=models.CharField(blank=True, default="", max_length=200),
),
migrations.AddField(
model_name="contract",
name="logo",
field=models.ImageField(
default=None,
null=True,
upload_to=safe_transaction_service.contracts.models.get_contract_logo_path,
),
),
]
|
StarcoderdataPython
|
3232986
|
<filename>src/day13/day13-1.py<gh_stars>0
f = open('day13.txt', 'r')
data = f.readlines()
timestamp = int(data[0])
buses = list(data[1].strip().split(','))
while 'x' in buses:
buses.remove('x')
buses = list(map(int, buses))
print(timestamp)
print(buses)
reminders = list(map(lambda x: timestamp % x, buses))
print(reminders)
next_pass = []
for i in range(len(buses)):
next_pass.append(buses[i] - reminders[i])
print(next_pass)
print(buses[next_pass.index(min(next_pass))] * min(next_pass))
|
StarcoderdataPython
|
1688042
|
import re
regex_pattern = r'(?<=^)M{0,3}(C[MD]|D?C{0,3})(X[CL]|L?X{0,3})(I[VX]|V?I{0,3})(?=$)' # Do not delete 'r'.
print(str(bool(re.match(regex_pattern, input()))))
|
StarcoderdataPython
|
3434904
|
<filename>seeds/utils/parsing.py<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Collection of functions that perform different types of parsing
"""
__author__ = "<NAME> <<EMAIL>>"
__credits__ = "<NAME>"
import re
from seeds.SEEDSError import *
def parse_int_rangelist(s, sorted=False):
"""Parse a list of numeric ranges. These lists are a comma-separated list
of either single numbers or ranges, specified by number-number.
Parameters:
s
A string containing a comma-separated list of integers and ranges of
integers
sorted
Whether or not to sort the resulting list (default: False)
"""
range_pattern = "\s*(\-?\d+)\s*\-\s*(\-?\d+)\s*"
retval = []
if s:
tokens = s.split(",")
for t in tokens:
match = re.match(range_pattern, t)
if match:
start = int(match.group(1))
end = int(match.group(2))
for i in range(start, end+1):
retval.append(i)
else:
try:
x = int(t)
retval.append(x)
except ValueError:
raise IntRangelistFormatError(s)
if sorted:
retval.sort()
return retval
def parse_version_string(s):
"""Parse a version string and return a 3-element dict with keys 'operator',
'major', and 'minor'. Input strings are of the form:
<operator><major_version>.<minor_version>
Where <operator> is one of: <, <=, =, >=, or >. Although not recommended,
when the operator is omitted, = will be used.
"""
pattern = '^\s*(?P<operator>[<>=]+)?\s*(?P<major>\d+)\.(?P<minor>\d+)(\.(?P<patch>\d+))?\s*$'
match = re.match(pattern, s)
if match:
retval = {}
if match.group('operator') == None:
retval['operator'] = '='
else:
retval['operator'] = match.group('operator')
retval['major'] = int(match.group('major'))
retval['minor'] = int(match.group('minor'))
if match.group('patch'):
retval['patch'] = int(match.group('patch'))
else:
retval['patch'] = 0
retval['version'] = (int(match.group('major')), int(match.group('minor')), int(retval['patch']))
return retval
else:
raise VersionStringFormatError("'{s}' is not a valid version string".format(s=s))
|
StarcoderdataPython
|
1944767
|
<filename>neutron/tests/unit/agent/linux/test_tc_lib.py
# Copyright 2016 OVH SAS
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron_lib import constants
from neutron_lib.exceptions import qos as qos_exc
from neutron_lib.services.qos import constants as qos_consts
from pyroute2.netlink import rtnl
from neutron.agent.linux import tc_lib
from neutron.common import utils
from neutron.privileged.agent.linux import tc_lib as priv_tc_lib
from neutron.tests import base
DEVICE_NAME = "tap_device"
KERNEL_HZ_VALUE = 1000
BW_LIMIT = 2000 # [kbps]
BURST = 100 # [kbit]
LATENCY = 50 # [ms]
TC_FILTERS_OUTPUT = (
'filter protocol all pref 49152 u32 \nfilter protocol all pref '
'49152 u32 fh 800: ht divisor 1 \nfilter protocol all pref 49152 u32 fh '
'fc00:e968:6179::de52:7100 order 2048 key ht 800 \n match 00000000/00000000 at 0\n '
'police 0x1e rate %(bw)skbit burst %(burst)skbit mtu 2Kb action \n'
'drop overhead 0b \n ref 1 bind 1'
) % {'bw': BW_LIMIT, 'burst': BURST}
class BaseUnitConversionTest(object):
def test_convert_to_kilobits_bare_value(self):
value = "1000"
expected_value = 8 # kbit
self.assertEqual(
expected_value,
tc_lib.convert_to_kilobits(value, self.base_unit)
)
def test_convert_to_kilobits_bytes_value(self):
value = "1000b"
expected_value = 8 # kbit
self.assertEqual(
expected_value,
tc_lib.convert_to_kilobits(value, self.base_unit)
)
def test_convert_to_kilobits_bits_value(self):
value = "1000bit"
expected_value = utils.bits_to_kilobits(1000, self.base_unit)
self.assertEqual(
expected_value,
tc_lib.convert_to_kilobits(value, self.base_unit)
)
def test_convert_to_kilobits_megabytes_value(self):
value = "1m"
expected_value = utils.bits_to_kilobits(
self.base_unit ** 2 * 8, self.base_unit)
self.assertEqual(
expected_value,
tc_lib.convert_to_kilobits(value, self.base_unit)
)
def test_convert_to_kilobits_megabits_value(self):
value = "1mbit"
expected_value = utils.bits_to_kilobits(
self.base_unit ** 2, self.base_unit)
self.assertEqual(
expected_value,
tc_lib.convert_to_kilobits(value, self.base_unit)
)
def test_convert_to_bytes_wrong_unit(self):
value = "1Zbit"
self.assertRaises(
tc_lib.InvalidUnit,
tc_lib.convert_to_kilobits, value, self.base_unit
)
class TestSIUnitConversions(BaseUnitConversionTest, base.BaseTestCase):
base_unit = constants.SI_BASE
class TestIECUnitConversions(BaseUnitConversionTest, base.BaseTestCase):
base_unit = constants.IEC_BASE
class TestHandleFromHexToString(base.BaseTestCase):
def test_run(self):
test_cases = [(0x1, '0:1'),
(0x2a003f, '2a:3f'),
(0xf0000, 'f:0'),
(0xffffffff, 'ffff:ffff'),
(0x12345678, '1234:5678')]
for _in, expected in test_cases:
self.assertEqual(expected, tc_lib._handle_from_hex_to_string(_in))
class TestTcCommand(base.BaseTestCase):
def setUp(self):
super(TestTcCommand, self).setUp()
self.tc = tc_lib.TcCommand(DEVICE_NAME, KERNEL_HZ_VALUE)
self.mock_list_tc_qdiscs = mock.patch.object(tc_lib,
'list_tc_qdiscs').start()
self.mock_add_tc_qdisc = mock.patch.object(tc_lib,
'add_tc_qdisc').start()
self.mock_delete_tc_qdisc = mock.patch.object(
tc_lib, 'delete_tc_qdisc').start()
self.mock_list_tc_filters = mock.patch.object(
tc_lib, 'list_tc_filters').start()
self.mock_add_tc_filter_policy = mock.patch.object(
tc_lib, 'add_tc_filter_policy').start()
def test_check_kernel_hz_lower_then_zero(self):
self.assertRaises(
tc_lib.InvalidKernelHzValue,
tc_lib.TcCommand, DEVICE_NAME, 0
)
self.assertRaises(
tc_lib.InvalidKernelHzValue,
tc_lib.TcCommand, DEVICE_NAME, -100
)
def test_get_filters_bw_limits(self):
self.mock_list_tc_filters.return_value = [{'rate_kbps': BW_LIMIT,
'burst_kb': BURST}]
bw_limit, burst_limit = self.tc.get_filters_bw_limits()
self.assertEqual(BW_LIMIT, bw_limit)
self.assertEqual(BURST, burst_limit)
def test_get_filters_bw_limits_no_filters(self):
self.mock_list_tc_filters.return_value = []
bw_limit, burst_limit = self.tc.get_filters_bw_limits()
self.assertIsNone(bw_limit)
self.assertIsNone(burst_limit)
def test_get_filters_bw_limits_no_rate_info(self):
self.mock_list_tc_filters.return_value = [{'other_values': 1}]
bw_limit, burst_limit = self.tc.get_filters_bw_limits()
self.assertIsNone(bw_limit)
self.assertIsNone(burst_limit)
def test_get_tbf_bw_limits(self):
self.mock_list_tc_qdiscs.return_value = [
{'qdisc_type': 'tbf', 'max_kbps': BW_LIMIT, 'burst_kb': BURST}]
self.assertEqual((BW_LIMIT, BURST), self.tc.get_tbf_bw_limits())
def test_get_tbf_bw_limits_when_wrong_qdisc(self):
self.mock_list_tc_qdiscs.return_value = [{'qdisc_type': 'other_type'}]
self.assertEqual((None, None), self.tc.get_tbf_bw_limits())
def test_set_tbf_bw_limit(self):
self.tc.set_tbf_bw_limit(BW_LIMIT, BURST, LATENCY)
self.mock_add_tc_qdisc.assert_called_once_with(
DEVICE_NAME, 'tbf', parent='root', max_kbps=BW_LIMIT,
burst_kb=BURST, latency_ms=LATENCY, kernel_hz=self.tc.kernel_hz,
namespace=self.tc.namespace)
def test_update_filters_bw_limit(self):
self.tc.update_filters_bw_limit(BW_LIMIT, BURST)
self.mock_add_tc_qdisc.assert_called_once_with(
self.tc.name, 'ingress', namespace=self.tc.namespace)
self.mock_delete_tc_qdisc.assert_called_once_with(
self.tc.name, is_ingress=True, raise_interface_not_found=False,
raise_qdisc_not_found=False, namespace=self.tc.namespace)
self.mock_add_tc_filter_policy.assert_called_once_with(
self.tc.name, tc_lib.INGRESS_QDISC_ID, BW_LIMIT, BURST,
tc_lib.MAX_MTU_VALUE, 'drop', priority=49)
def test_delete_filters_bw_limit(self):
self.tc.delete_filters_bw_limit()
self.mock_delete_tc_qdisc.assert_called_once_with(
DEVICE_NAME, is_ingress=True, raise_interface_not_found=False,
raise_qdisc_not_found=False, namespace=self.tc.namespace)
def test_delete_tbf_bw_limit(self):
self.tc.delete_tbf_bw_limit()
self.mock_delete_tc_qdisc.assert_called_once_with(
DEVICE_NAME, parent='root', raise_interface_not_found=False,
raise_qdisc_not_found=False, namespace=self.tc.namespace)
def test_get_ingress_qdisc_burst_value_burst_not_none(self):
self.assertEqual(
BURST, self.tc.get_ingress_qdisc_burst_value(BW_LIMIT, BURST)
)
def test_get_ingress_qdisc_burst_no_burst_value_given(self):
expected_burst = BW_LIMIT * qos_consts.DEFAULT_BURST_RATE
self.assertEqual(
expected_burst,
self.tc.get_ingress_qdisc_burst_value(BW_LIMIT, None)
)
def test_get_ingress_qdisc_burst_burst_value_zero(self):
expected_burst = BW_LIMIT * qos_consts.DEFAULT_BURST_RATE
self.assertEqual(
expected_burst,
self.tc.get_ingress_qdisc_burst_value(BW_LIMIT, 0)
)
class TcTestCase(base.BaseTestCase):
def setUp(self):
super(TcTestCase, self).setUp()
self.mock_add_tc_qdisc = mock.patch.object(
priv_tc_lib, 'add_tc_qdisc').start()
self.namespace = 'namespace'
def test_add_tc_qdisc_htb(self):
tc_lib.add_tc_qdisc('device', 'htb', parent='root', handle='1:',
namespace=self.namespace)
self.mock_add_tc_qdisc.assert_called_once_with(
'device', parent=rtnl.TC_H_ROOT, kind='htb', handle='1:0',
namespace=self.namespace)
self.mock_add_tc_qdisc.reset_mock()
tc_lib.add_tc_qdisc('device', 'htb', parent='root', handle='2',
namespace=self.namespace)
self.mock_add_tc_qdisc.assert_called_once_with(
'device', parent=rtnl.TC_H_ROOT, kind='htb', handle='2:0',
namespace=self.namespace)
self.mock_add_tc_qdisc.reset_mock()
tc_lib.add_tc_qdisc('device', 'htb', parent='root', handle='3:12',
namespace=self.namespace)
self.mock_add_tc_qdisc.assert_called_once_with(
'device', parent=rtnl.TC_H_ROOT, kind='htb', handle='3:0',
namespace=self.namespace)
self.mock_add_tc_qdisc.reset_mock()
tc_lib.add_tc_qdisc('device', 'htb', parent='root', handle=4,
namespace=self.namespace)
self.mock_add_tc_qdisc.assert_called_once_with(
'device', parent=rtnl.TC_H_ROOT, kind='htb', handle='4:0',
namespace=self.namespace)
self.mock_add_tc_qdisc.reset_mock()
tc_lib.add_tc_qdisc('device', 'htb', parent='root',
namespace=self.namespace)
self.mock_add_tc_qdisc.assert_called_once_with(
'device', parent=rtnl.TC_H_ROOT, kind='htb',
namespace=self.namespace)
self.mock_add_tc_qdisc.reset_mock()
tc_lib.add_tc_qdisc('device', 'htb', parent='root', handle=5)
self.mock_add_tc_qdisc.assert_called_once_with(
'device', parent=rtnl.TC_H_ROOT, kind='htb', handle='5:0',
namespace=None)
self.mock_add_tc_qdisc.reset_mock()
def test_add_tc_qdisc_tbf(self):
tc_lib.add_tc_qdisc('device', 'tbf', parent='root', max_kbps=10000,
burst_kb=1500, latency_ms=70, kernel_hz=250,
namespace=self.namespace)
burst = tc_lib._get_tbf_burst_value(10000, 1500, 70) * 1024 / 8
self.mock_add_tc_qdisc.assert_called_once_with(
'device', parent=rtnl.TC_H_ROOT, kind='tbf', rate=10000 * 128,
burst=burst, latency=70000, namespace=self.namespace)
def test_add_tc_qdisc_tbf_missing_arguments(self):
self.assertRaises(
qos_exc.TcLibQdiscNeededArguments, tc_lib.add_tc_qdisc,
'device', 'tbf', parent='root')
def test_add_tc_qdisc_wrong_qdisc_type(self):
self.assertRaises(qos_exc.TcLibQdiscTypeError, tc_lib.add_tc_qdisc,
mock.ANY, 'wrong_qdic_type_name')
def test_list_tc_qdiscs_htb(self):
qdisc = {'index': 2, 'handle': 327680, 'parent': 4294967295,
'attrs': (('TCA_KIND', 'htb'), )}
with mock.patch.object(priv_tc_lib, 'list_tc_qdiscs') as \
mock_list_tc_qdiscs:
mock_list_tc_qdiscs.return_value = tuple([qdisc])
qdiscs = tc_lib.list_tc_qdiscs('device',
namespace=self.namespace)
self.assertEqual(1, len(qdiscs))
self.assertEqual('root', qdiscs[0]['parent'])
self.assertEqual('5:0', qdiscs[0]['handle'])
self.assertEqual('htb', qdiscs[0]['qdisc_type'])
@mock.patch('pyroute2.netlink.rtnl.tcmsg.common.tick_in_usec', 15.625)
def test_list_tc_qdiscs_tbf(self):
tca_tbf_params = {'buffer': 9375000,
'rate': 320000,
'limit': 208000}
qdisc = {'index': 2, 'handle': 327681, 'parent': 4294967295,
'attrs': (
('TCA_KIND', 'tbf'),
('TCA_OPTIONS', {'attrs': (
('TCA_TBF_PARMS', tca_tbf_params), )}))
}
with mock.patch.object(priv_tc_lib, 'list_tc_qdiscs') as \
mock_list_tc_qdiscs:
mock_list_tc_qdiscs.return_value = tuple([qdisc])
qdiscs = tc_lib.list_tc_qdiscs('device',
namespace=self.namespace)
self.assertEqual(1, len(qdiscs))
self.assertEqual('root', qdiscs[0]['parent'])
self.assertEqual('5:1', qdiscs[0]['handle'])
self.assertEqual('tbf', qdiscs[0]['qdisc_type'])
self.assertEqual(2500, qdiscs[0]['max_kbps'])
self.assertEqual(1500, qdiscs[0]['burst_kb'])
self.assertEqual(50, qdiscs[0]['latency_ms'])
def test__get_tbf_burst_value_when_burst_bigger_then_minimal(self):
result = tc_lib._get_tbf_burst_value(BW_LIMIT, BURST, KERNEL_HZ_VALUE)
self.assertEqual(BURST, result)
def test__get_tbf_burst_value_when_burst_smaller_then_minimal(self):
result = tc_lib._get_tbf_burst_value(BW_LIMIT, 0, KERNEL_HZ_VALUE)
self.assertEqual(2, result)
class TcPolicyClassTestCase(base.BaseTestCase):
def setUp(self):
super(TcPolicyClassTestCase, self).setUp()
self.mock_add_tc_policy_class = mock.patch.object(
priv_tc_lib, 'add_tc_policy_class').start()
self.mock_list_tc_policy_classes = mock.patch.object(
priv_tc_lib, 'list_tc_policy_classes').start()
self.namespace = 'namespace'
def test_add_tc_policy_class(self):
tc_lib.add_tc_policy_class(
'device', 'root', '1:10', min_kbps=1000, max_kbps=2000,
burst_kb=1600, namespace=self.namespace)
self.mock_add_tc_policy_class.assert_called_once_with(
'device', rtnl.TC_H_ROOT, '1:10', 'htb', rate=1000 * 128,
ceil=2000 * 128, burst=1600 * 128, namespace=self.namespace)
@mock.patch('pyroute2.netlink.rtnl.tcmsg.common.tick_in_usec', 15.625)
def test_list_tc_policy_classes(self):
htb_params = {'buffer': 12500000, 'ceil': 256000, 'rate': 192000}
self.mock_list_tc_policy_classes.return_value = tuple([
{'index': 3, 'handle': 65537, 'parent': 4294967295,
'attrs': (
('TCA_KIND', 'htb'),
('TCA_OPTIONS', {
'attrs': tuple([('TCA_HTB_PARMS', htb_params)])}))
}])
_class = tc_lib.list_tc_policy_class('device',
namespace=self.namespace)[0]
reference = {'device': 'device',
'index': 3,
'namespace': self.namespace,
'parent': 'root',
'classid': '1:1',
'qdisc_type': 'htb',
'min_kbps': 1500,
'max_kbps': 2000,
'burst_kb': 1200}
self.assertEqual(reference, _class)
class TcFilterTestCase(base.BaseTestCase):
def test__mac_to_pyroute2_keys(self):
mac = '01:23:45:67:89:ab'
offset = 10
keys = tc_lib._mac_to_pyroute2_keys(mac, offset)
high = {'value': 0x1234567,
'mask': 0xffffffff,
'offset': 10,
'key': '0x1234567/0xffffffff+10'}
low = {'value': 0x89ab0000,
'mask': 0xffff0000,
'offset': 14,
'key': '<KEY>'}
self.assertEqual(high, keys[0])
self.assertEqual(low, keys[1])
@mock.patch.object(priv_tc_lib, 'add_tc_filter_match32')
def test_add_tc_filter_vxlan(self, mock_add_filter):
tc_lib.add_tc_filter_vxlan('device', 'parent', 'classid',
'12:34:56:78:90:ab', 52, namespace='ns')
keys = ['0x3400/0xffffff00+32', '0x12345678/0xffffffff+42',
'0x90ab0000/0xffff0000+46']
mock_add_filter.assert_called_once_with(
'device', 'parent', 1, 'classid', keys, namespace='ns')
|
StarcoderdataPython
|
12820335
|
<filename>scripts/clear_lists.py
from database import *
'''
С помощью этого скрипта вы можете очистить списки бота.
После выполнения этой программы необходимо перезапустить бота,
чтобы изменения вступили в силу.
'''
if __name__ == '__main__':
if database:
delete_lists = [
"blacklisted", # чёрный список
"whitelisted", # белый список
"admin", # список администраторов
]
for list_role in delete_lists:
Role.delete().where(Role.role == list_role).execute()
|
StarcoderdataPython
|
3261523
|
<reponame>gojek/CureIAM<filename>CureIAM/models/__init__.py
"""A package for models as data store packaged with this project.
"""
|
StarcoderdataPython
|
5125532
|
<gh_stars>1-10
import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = 'rakomon',
version = '0.1.2',
author = '<NAME>',
author_email = '<EMAIL>',
description = 'Dead simple, configurable monitoring service based on tornado and apscheduler.',
license = 'MIT',
keywords = 'simple stupid monitoring',
url = 'https://github.com/khvn26/rakomon',
packages=['rakomon'],
install_requires=[
'APscheduler',
'tornado'
],
long_description=read('README.md'),
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3 :: Only',
'Topic :: System :: Monitoring',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
],
)
|
StarcoderdataPython
|
356001
|
<gh_stars>1-10
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackQueryHandler # Extended Telegram API
import logging
import telegram
from telegram.chataction import ChatAction # pure Telegram API (send_message method) #REVIEW
from telegram import InlineKeyboardButton, InlineKeyboardMarkup, ChatAction, InlineQueryResultArticle, InputTextMessageContent, ReplyKeyboardMarkup
import requests #3rd party module
import scrape
import time
import re
# format = "%(asctime)s: %(message)s"
# logging.basicConfig(format=format, level=logging.DEBUG,
# datefmt="%H:%M:%S")
class RespondToCommands():
""" Class to initialize the telegram dispacher and respond to the commands """
def __init__(self, botToken):
self.botToken = botToken
#update.message.text
def unknown(self, update, context):
print("Enter: Unknown")
context.bot.send_message(chat_id=update.message.chat_id, text="Sorry, I cannot understand that command.")
def start(self, update, context):
print("Enter: Start")
context.bot.send_message(chat_id=update.message.chat_id, text="Hey! \nWelcome to Amazon Price Tracker bot. \nStart with adding your product:\n /add <amazon URL> \n\nSet price alert for your product:\n /alert <price> \n\n Only one product can be tracked per user. ")
def preprocessURL(self, productURL):
""" Validate and clean the amazon.in URL """
# remove everything after ?ref= , /ref= or a ? and return a valid URL else False
print("Enter: preProcessURL")
amazonURL = re.match(r'^(https://|http://)(www.)*amazon.in/[a-zA-Z0-9-]*[/]*dp/[A-Z0-9]+[/?]?',productURL)
if amazonURL:
print(f"amazonURL:{amazonURL.group(0)}")
print("preprocessURL: True")
return True, amazonURL.group(0)
else:
print("preprocessURL: False")
return False, ""
def addProduct(self, update, context):
print("Enter: addProduct")
""" Add product to tracking for the user """
# TODO Add typing decoration
context.bot.send_message(chat_id=update.message.chat_id, text="Adding product to tracking... Please wait")
# addArgs arguments passed with /add command. Expected to be amazon url
addArgs = ''.join(context.args)
print(addArgs)
headers = {
# "User-Agent": 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36' }
"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36' }
isValid, validURL = self.preprocessURL(addArgs)
if isValid:
response = requests.get(validURL, headers=headers, timeout=5)
print(response.status_code)
print(type(response.status_code))
print("Before: respone.status_code")
if response.status_code == 200:
# add product to tracking list
addProduct = scrape.UpdateTrackingList(validURL, update.message.chat_id)
if addProduct.addToTrackingList():
print("Bot: After if addToTrackingList")
context.bot.send_message(chat_id=update.message.chat_id, text="Great! Product is successfully added. Now set price alerts:\n\n /alert <price> ")
else:
context.bot.send_message(chat_id=update.message.chat_id, text="Only one product per user allowed.")
else:
context.bot.send_message(chat_id=update.message.chat_id, text="Product URL unreachable. Try again later.")
else:
context.bot.send_message(chat_id=update.message.chat_id, text="Invalid arguments. Not a vaild Amazon.in URL.")
def setAlertValue(self, update, context):
""" Set price threshold for the user """
print("Enter: setAlertValue")
#context.bot.send_chat_action(chat_id=update.message.chat_id, action=telegram.ChatAction.TYPING)
updateAlert = scrape.ChangeAlert(update.message.chat_id, ''.join(context.args))
if updateAlert.priceTypeCheck():
if updateAlert.changeAlert():
print("Bot: After if chageAlert")
context.bot.send_message(chat_id=update.message.chat_id, text="Price updated to {}".format(''.join(context.args)))
else:
context.bot.send_message(chat_id=update.message.chat_id, text="You must add a product first.")
else:
context.bot.send_message(chat_id=update.message.chat_id, text="Invalid price. Ex:\n /alert 8000")
def caps(self, update, context):
""" Reply with CAPS of args. Just for debugging """
# text_caps = ' '.join(context.args)
# print(text_caps)
# print(len(text_caps))
# context.bot.send_message(chat_id=update.message.chat_id, text=text_caps)
def initializeBot(self):
""" Initialize the bot """
# creating updater and dispatcher
updater = Updater(token=self.botToken, use_context=True)
dispatcher = updater.dispatcher
# create "start" command handler
start_handler = CommandHandler('start', self.start)
dispatcher.add_handler(start_handler)
# create "add" command handler
addHandler = CommandHandler('add', self.addProduct)
dispatcher.add_handler(addHandler)
# create "alert" command handler
alertHandler = CommandHandler('alert', self.setAlertValue)
dispatcher.add_handler(alertHandler)
# create "caps" command handler
caps_handler = CommandHandler('caps', self.caps)
dispatcher.add_handler(caps_handler)
# *** unknown handler MUST be added at LAST ***
# create "unknown" command handler
unknown_handler = MessageHandler(Filters.text, self.unknown)
dispatcher.add_handler(unknown_handler)
# Start the webhook
# Change amazon-price-tracker-bot with your own heroku app name
# Uncomment the below code to make it work with heroku.
# also use uptimerobot service to keep the container alive
# ***********************************
# import os
# PORT = os.environ.get('PORT')
# updater.start_webhook(listen="0.0.0.0", port=int(PORT), url_path=self.botToken)
# updater.bot.setWebhook("https://{}.herokuapp.com/{}".format("amazon-price-tracker-bot", self.botToken))
# watch the requests
print("Starting: Updater Pool")
updater.start_polling()
class TelegramNotification():
""" Class to send telegram notification """
def __init__(self, botToken, chatId):
print("Enter: TelegramNotification class")
self.botToken = botToken
self.chatId = chatId
def sendNotification(self, productTitle, productPrice):
print("Enter: Notification to Telegram")
bot = telegram.Bot(token=self.botToken)
bot.send_message(chat_id=self.chatId, text="PRICE REDUCED"+" \n"+productTitle.strip()+" \n"+productPrice.strip()+" \n \n")
|
StarcoderdataPython
|
8101111
|
<gh_stars>1-10
#!/usr/bin/env python3
#============================================================
# IMSNG Pipeline
# => Processing
# Data Monitoring => Processing => Transient Search
#============================================================
#%%
# Library
#------------------------------------------------------------
import os
import glob
import numpy as np
import warnings
warnings.filterwarnings(action='ignore')
import time
st = time.time()
start_localtime = time.strftime('%Y-%m-%d %H:%M:%S (%Z)', time.localtime())
import sys
sys.path.append('/home/paek/imsngpy')
# IMSNGpy modules
from tableutil import getccdinfo
from preprocess import *
from misc import *
from phot import *
from util import *
#
# Astropy
from astropy.io import ascii
from astropy.io import fits
from astropy.table import Table
from astropy import units as u
from astropy.coordinates import SkyCoord
from ccdproc import ImageFileCollection
from astropy.time import Time
from astropy.nddata import CCDData
# Multiprocess tools
from itertools import repeat
import multiprocessing
#------------------------------------------------------------
# My library
# from tableutil import *
#============================================================
# USER SETTING
#============================================================
# Input
#------------------------------------------------------------
"""
# [0] Folder to process
try:
path_raw = sys.argv[1]
except:
path_raw = input('''# Data folder to process : ''')
# [1] Observatory_ccd
try:
obs = (sys.argv[2]).upper()
except:
obs = input('''# Observatory(_ccd) to run
--------------------
LOAO
DOAO
SOAO
CBNUO
KCT_ASI1600MM
KCT_STX16803
KHAO
RASA36
LSGT
---------------------
:''').upper()
print('# Observatory : {}'.format(obs.upper()))
# [3] The number of cores
try:
ncore = int(sys.argv[3])
except:
ncore = 8
"""
# Test setting
# path_raw = '/data6/obsdata/LOAO/1994_1026'
# path_raw = '/data6/obsdata/LOAO/1994_1003'
# path_raw = '/data6/obsdata/LOAO/1969_0119'
# path_raw = '/data6/obsdata/LOAO/test'
# path_raw = '/data6/obsdata/LOAO/test_fast'
path_raw = '/data6/obsdata/LOAO/2021_1227'
obs = 'LOAO'
fast_mode4mframe = True
ncore = 4
#------------------------------------------------------------
# PATH
#------------------------------------------------------------
path_factory = '/data3/paek/factory'
path_gal = '/data6/IMSNG/IMSNGgalaxies'
# path_config = '/home/paek/config'
path_config = '/home/paek/imsngpy/config'
path_log = '/home/paek/log'
path_bkg = '/data6/bkgdata'
path_table = '/home/paek/imsngpy/table'
path_gppy = '/home/paek/imsngpy'
#------------------------------------------------------------
path_mframe = f'{path_factory}/master_frames'
path_ref = f'{path_factory}/ref_frames/{obs.upper()}'
path_obs = f'{path_factory}/{obs.lower()}'
path_default_gphot = f'{path_config}/gphot.{obs.lower()}.config'
#------------------------------------------------------------
path_save = f'{path_bkg}/{obs.upper()}'
#------------------------------------------------------------
ccddat = f'{path_table}/obs.dat'
#------------------------------------------------------------
# Codes
path_phot = f'{path_gppy}/imsngpy/gpphot.py'
path_find = f'{path_gppy}/imsngpy/gpsearch.py'
#------------------------------------------------------------
# Table
logtbl = ascii.read(f'{path_log}/{obs.lower()}.log')
hdrtbl = ascii.read(f'{path_table}/changehdr.dat')
alltbl = ascii.read(f'{path_table}/alltarget.dat')
frgtbl = ascii.read(f'{path_table}/fringe.dat')
# ccdtbl = ascii.read(f'{path_table}/ccd.dat')
ccdtbl = ascii.read(f'{path_table}/ccd.tsv')
#------------------------------------------------------------
path_data = f'{path_obs}/{os.path.basename(path_raw)}'
print(f"""{'-'*60}\n#\tCOPY DATA\n{'-'*60}""")
# Remove former data
if os.path.exists(path_data):
rmcom = f'rm -rf {path_data}'
print(rmcom)
os.system(rmcom)
# Copy to factory director
cpcom = f'cp -r {path_raw} {path_data}'
print(cpcom)
os.system(cpcom)
#%%
# Identify CCD
ic0 = ImageFileCollection(path_data, keywords='*')
'''
print(f"""{'-'*60}\n#\tIDENTIFY CCD\n{'-'*60}""")
ic0 = ImageFileCollection(path_data, keywords='*')
for key, val, suf, ccd in zip((ccdtbl['key'][ccdtbl['obs']==obs]), (ccdtbl['value'][ccdtbl['obs']==obs]), (ccdtbl['suffix'][ccdtbl['obs']==obs]), (ccdtbl['ccd'][ccdtbl['obs']==obs])):
if (key.lower() in ic0.keywords) & (val == ic0.summary[key.lower()][0]):
ccdkey = key
ccdval = val
ccdtype = ccd
if suf.mask == True:
# No suffix
suffix = ''
obsccd = f'{obs}'
else:
suffix = suf
obsccd = f'{obs}_{suffix}'
print(f'OBSERVAT : {obs}\nCCD KEYWORD : {key}\nCCD HEADER VALUE : {val}\nCCD NAME : {ccdtype}\nSUFFIX : {suffix}\n==> OBS_CCD : {obsccd}')
'''
ccdkey, ccdval, ccdtype, obsccd = identify_ccdinfo(ic0, obs, ccdtbl)
# CCD INFO
indx_ccd = np.where(
(ccdtbl['obs']==obs) &
(ccdtbl['key']==ccdkey) &
(ccdtbl['value']==ccdval)
)
print(f"""{'-'*60}\n#\tCCD INFO\n{'-'*60}""")
gain = ccdtbl['gain'][indx_ccd][0]*(u.electron/u.adu)
rdnoise = ccdtbl['readnoise'][indx_ccd][0]*(u.electron)
pixscale = ccdtbl['pixelscale'][indx_ccd][0]*(u.arcsec/u.pixel)
fov = ccdtbl['foveff'][indx_ccd][0]*(u.arcmin)
print(f"""GAIN : {gain}\nREAD NOISE : {rdnoise}\nPIXEL SCALE : {pixscale}\nEffective FoV : {fov}""")
#------------------------------------------------------------
#%%
# Header correction
#------------------------------------------------------------
comment = f"""{'='*60}\n#\tHEADER CORRECTION\n{'='*60}"""
print(comment)
for i, inim in enumerate(ic0.summary['file']):
image = f'{path_data}/{inim}'
# CCD Type
'''
if i == 0:
for key in set(ccdtbl['key']):
if key.lower() in ic0.summary.keys():
ccdtype = ccdtbl[
(ccdtbl['value'] == ic0.summary[key.lower()][i]) &
(ccdtbl['obs'] == obs)
]['ccd'].item()
else:
ccdtype = 'UNKNOWN'
fits.setval(image, 'CCDNAME', value=ccdtype)
fits.setval(image, 'OBSERVAT', value=obs)'''
fits.setval(image, 'PATHPRC', value=path_data, comment='Path where data is processed')
fits.setval(image, 'CCDNAME', value=ccdtype)
fits.setval(image, 'OBSERVAT', value=obs)
fits.setval(image, 'OBSCCD', value=obsccd)
# Correction with table
for key, val, nval in zip(hdrtbl['key'], hdrtbl['val'], hdrtbl['newval']):
if ic0.summary[key.lower()][i] == val:
print(f'{inim} - {key} : {val} --> {nval}')
fits.setval(image, key, value=nval)
# DATE-OBS, JD, MJD
if 'T' not in ic0.summary['date-obs'][i]:
dateobs = f"{ic0.summary['date-obs']}'T'{ic0.summary['time-obs']}"
fits.setval(image, 'date-obs', value=dateobs)
del dateobs
else:
pass
t = Time(ic0.summary['date-obs'][i], format='isot')
fits.setval(image, 'JD', value=t.jd)
fits.setval(image, 'MJD', value=t.mjd)
del t
# OBJECT name
if 'ngc' in ic0.summary['object'][i]:
objectname = ic0.summary['object'][i]
while len(objectname)<7:
head = objectname[0:3]
tail = objectname[3:]
tail = f'0{tail}'
objectname = f'{head}{tail}'
fits.setval(image, 'OBJECT', value=objectname.upper())
del objectname
del head
del tail
print()
ic1 = ImageFileCollection(path_data, keywords='*')
t_med = np.median(ic1.filter(imagetyp='object').summary['jd']) # [JD]
#------------------------------------------------------------
#%%
# Object Master Table
#
# Write the status of processing
# Pointing the original filename and updated filename
# Each dtype is set to 'strtype' variable
#------------------------------------------------------------
strtype = 'U300'
omtbl = Table()
objtypelist = []
for obj in ic1.filter(imagetyp='object').summary['object']:
if obj in alltbl['obj']:
objtypelist.append('IMSNG')
elif 'GRB' in obj:
objtypelist.append('GRB')
elif ('GECKO' in obj) | ('GCK' in obj):
objtypelist.append('GECKO')
else:
objtypelist.append('NON-IMSNG')
omtbl['objtype'] = objtypelist
omtbl['raw'] = [inim for inim in ic1.filter(imagetyp='object').files]
omtbl['now'] = omtbl['raw']
omtbl['preprocess'] = ''
omtbl['defringe'] = ''
omtbl['cosmic_ray_removal'] = ''
omtbl['astrometry'] = ''
omtbl['final'] = [f'{path_data}/{fnamechange(inim)}' for inim in ic1.filter(imagetyp='object').files]
for key in omtbl.keys(): omtbl[key] = omtbl[key].astype(strtype)
#============================================================
#%%
# Pre-processing
#------------------------------------------------------------
print(f"""{'='*60}\n#\tPREPARE FOR PRE-PROCESS\n{'='*60}""")
mframe = dict()
#------------------------------------------------------------
# BIAS
#------------------------------------------------------------
print(f"""{'-'*60}\n#\tBIAS\n{'-'*60}""")
if 'bias' in ic1.summary['imagetyp']:
biaslist = ic1.filter(imagetyp='bias').files
print(f"""{len(biaslist)} bias frames --> master bias""")
biasdata, biasim = master_bias(biaslist)
mframe['bias'] = biasdata
del biasdata
path_bias = f'{path_mframe}/{obs}/zero'
cpcom = f"cp {biasim} {path_bias}"
print(cpcom)
# os.system(cpcom)
else:
print('No bias frame. Borrow from previous data.')
mftype = 'zero'
biasim = get_nearest_bias(
mftype=mftype,
t_med=t_med,
ccdkey=ccdkey,
ccdval=ccdval,
keyword=f'{path_mframe}/{obs}/{mftype}/????????-{mftype}.fits',
keywords=[ccdkey.lower(), 'jd'],
fast_mode4mframe=True,
)
print(f'Borrow {os.path.basename(biasim)}')
mframe['bias'] = CCDData(fits.getdata(biasim), unit="adu", meta=fits.getheader(biasim))
'''
mftype = 'zero'
if fast_mode4mframe == True:
biaslist = np.array(
[os.path.basename(inim) for inim in sorted(glob.glob(f'{path_mframe}/{obs}/{mftype}/????????-{mftype}.fits'))]
)
deltarr = np.array(
[np.abs(date2jd(inim.split('-')[0]).jd-t_med) for inim in biaslist]
)
indx_bias = np.where(deltarr == np.min(deltarr))
biasim = f"{path_mframe}/{obs}/{mftype}/{biaslist[indx_bias].item()}"
else:
ic_bias = ImageFileCollection(
location=f'{path_mframe}/{obs}/{mftype}',
keywords=[
ccdkey.lower(),
# 'date-obs',
# 'imagetyp',
'jd',
# 'mjd',
]
)
ic_bias_avail = ic_bias.summary[
(ic_bias.summary['jd'].mask == False) &
(ic_bias.summary[ccdkey.lower()]==ccdval)
]
biaslist = ic_bias_avail['file']
deltarr = np.array(np.abs(ic_bias_avail['jd']-t_med))
indx_bias = np.where(deltarr == np.min(deltarr))
biasim = f"{path_mframe}/{obs}/{mftype}/{biaslist[indx_bias].item()}"
del ic_bias_avail
# Delete variables
del mftype
del deltarr
del indx_bias
del biasim'''
#------------------------------------------------------------
#%%
# DARK
#------------------------------------------------------------
print(f"""{'-'*60}\n#\tDARK\n{'-'*60}""")
darkframe = dict()
if 'dark' in ic1.summary['imagetyp']:
darkexptime = np.array(list(set(ic1.filter(imagetyp='dark').summary['exptime'])))
for exptime in darkexptime:
darkdata, darkim = master_dark(ic1.filter(imagetyp='dark', exptime=exptime).files, mbias=mframe['bias'])
# darkframe[f'{int(exptime)}'] = master_dark(ic1.filter(imagetyp='dark', exptime=exptime).files, mbias=mframe['bias'])
darkframe[f'{int(exptime)}'] = darkdata
del darkdata
path_dark = f'{path_mframe}/{obs}/dark'
cpcom = f"cp {darkim} {path_dark}"
print(cpcom)
# os.system(cpcom)
else:
print('No dark frame. Borrow from previous data.')
mftype = 'dark'
darkexptime = []
for exptime in set(ic1.filter(imagetyp='object').summary['exptime']):
mftype = 'dark'
darkim, nexptime = get_nearest_dark(
t_med=t_med,
keyword=f'{path_mframe}/{obs}/{mftype}/*-????????-{mftype}.fits',
exptime=exptime,
ccdkey=ccdkey,
ccdval=ccdval,
keywords=[ccdkey.lower(), 'jd', 'exptime'],
fast_mode4mframe=True,
)
print(f'Borrow {os.path.basename(darkim)}')
# darkframe[f'{int(nexptime)}'] = CCDData(fits.getdata(darkim), unit="adu", meta=fits.getheader(darkim))
if str(nexptime) not in darkframe.keys():
darkframe[f'{int(nexptime)}'] = CCDData(fits.getdata(darkim), unit="adu", meta=fits.getheader(darkim))
darkexptime.append(int(nexptime))
else:
print(f'No available dark frame')
pass
darkexptime = np.array(darkexptime)
mframe['dark'] = darkframe
del darkframe
#------------------------------------------------------------
#%%
# FLAT
#------------------------------------------------------------
print(f"""{'-'*60}\n#\tFLAT\n{'-'*60}""")
flatframe = dict()
# Filter list
if 'object' in ic1.summary['imagetyp']:
objfilterlist = list(ic1.filter(imagetyp='object').summary['filter'])
print(f'OBJECT FILTERS : {list(set(objfilterlist))}')
else:
objfilterlist = []
if 'flat' in ic1.summary['imagetyp']:
flatfilterlist = list(ic1.filter(imagetyp='flat').summary['filter'])
print(f'FLAT FILTERS : {list(set(flatfilterlist))}')
else:
flatfilterlist = []
filterlist = set(objfilterlist+flatfilterlist)
print(f'==> ALL FILTERS : {list(set(filterlist))}')
if 'flat' in ic1.summary['imagetyp']:
# Dark exptime should be corrected!
indx_mindark = np.where(darkexptime == np.min(darkexptime))
mdark = mframe['dark'][str(int(darkexptime[indx_mindark].item()))]
for filte in filterlist:
# print(filte)
flatdata, flatim = master_flat(ic1.filter(imagetyp='flat', filter=filte).files, mbias=mframe['bias'], mdark=mdark, filte=filte)
# flatframe[filte] = master_flat(ic1.filter(imagetyp='flat', filter=filte).files, mbias=mframe['bias'], mdark=mdark, filte=filte)
flatframe[filte] = flatdata
path_flat = f'{path_mframe}/{obs}/flat'
cpcom = f"cp {flatim} {path_flat}"
print(cpcom)
# os.system(cpcom)
del flatdata
del mdark
else:
print('No Flat frame. Borrow from previous data.')
mftype = 'flat'
for filte in set(ic1.filter(imagetyp='object').summary['filter']):
flatim = get_nearest_flat(
t_med,
keyword=f'{path_mframe}/{obs}/{mftype}/????????-n{filte}.fits',
filte=filte,
ccdkey=ccdkey,
ccdval=ccdval,
keywords=[ccdkey.lower(), 'imagetyp', 'jd', 'filter',],
fast_mode4mframe=True,
)
print(f'Borrow {os.path.basename(flatim)}')
flatframe[filte] = CCDData(fits.getdata(flatim), unit="adu", meta=fits.getheader(flatim))
mframe['flat'] = flatframe
del flatframe
#------------------------------------------------------------
#%%
# OBJECT Correction
#------------------------------------------------------------
print(f"""{'-'*60}\n#\tOBJECT CORRECTION ({len(ic1.filter(imagetyp='object').files)})\n{'-'*60}""")
# Function for multi-process
def obj_process4mp(inim, newim, filte, exptime, darkexptime, rdnoise, mframe,):
'''
Routine for multiprocess
'''
# Find the closest exposuretime betw dark and object
indx_closet = np.where(
np.abs(exptime-darkexptime) == np.min(np.abs(exptime-darkexptime))
)
bestdarkexptime = darkexptime[indx_closet].item()
print(f"{os.path.basename(inim)} {exptime} sec in {filte}-band <-- (scaled) DARK {int(darkexptime[indx_closet].item())} sec")
# Process
nccd = obj_process(
inim=inim,
# gain=ccdinfo['gain'],
gain=None,
readnoise=rdnoise,
mbias=mframe['bias'],
mdark=mframe['dark'][str(int(bestdarkexptime))],
mflat=mframe['flat'][filte],
)
# nccd.write(f'{os.path.dirname(inim)}/fdz{os.path.basename(inim)}', overwrite=True)
nccd.write(newim, overwrite=True)
# Run with multi-process
fdzimlist = add_prefix(ic1.filter(imagetyp='object').files, 'fdz')
if __name__ == '__main__':
# Fixed the number of cores (=4)
with multiprocessing.Pool(processes=4) as pool:
results = pool.starmap(
obj_process4mp,
zip(
ic1.filter(imagetyp='object').files,
fdzimlist,
ic1.filter(imagetyp='object').summary['filter'],
ic1.filter(imagetyp='object').summary['exptime'],
repeat(darkexptime),
repeat(rdnoise),
repeat(mframe),
)
)
# Image collection for pre-processed image
fdzic = ImageFileCollection(f'{path_data}', glob_include='fdzobj*', keywords='*')
omtbl['preprocess'] = fdzimlist
omtbl['now'] = fdzimlist
del fdzimlist
for key in omtbl.keys(): omtbl[key] = omtbl[key].astype(strtype)
#------------------------------------------------------------
#%%
# Defringe
#------------------------------------------------------------
print(f"""{'-'*60}\n#\tFRINGE CORRECTION\n{'-'*60}""")
# for filte in set(frgtbl[(frgtbl['ccd'] == ccdtype) & (frgtbl['obs'] == obs)]['filter']):
for filte in set(fdzic.summary['filter']):
frgtbl_ = frgtbl[
(frgtbl['filter']==filte) &
(frgtbl['ccd'] == ccdtype) &
(frgtbl['obs'] == obs)
]
if len(frgtbl_) > 0:
if __name__ == '__main__':
with multiprocessing.Pool(processes=ncore) as pool:
results = pool.starmap(
defringe,
zip(
fdzic.filter(filter=filte).files,
repeat(frgtbl_['image'][0]),
add_prefix(fdzic.filter(filter=filte).files, 'df'),
repeat(frgtbl_['table'][0]),
repeat(10)
)
)
for i, inim in enumerate(fdzic.filter(filter=filte).files):
indx_tmp = np.where(omtbl['now'] == inim)
omtbl['now'][indx_tmp] = results[i]
omtbl['defringe'][indx_tmp] = results[i]
del indx_tmp
for key in omtbl.keys(): omtbl[key] = omtbl[key].astype(strtype)
else:
print(f'{filte} : N/A')
#------------------------------------------------------------
#%%
# FIX (TMP)
#------------------------------------------------------------
#------------------------------------------------------------
# COSMIC-RAY REMOVAL
#------------------------------------------------------------
# Seeing measurement w/ simple SE
prefix = 'simple'
path_conf = f'{path_config}/{prefix}.sex'
path_param = f'{path_config}/{prefix}.param'
path_nnw = f'{path_config}/{prefix}.nnw'
path_conv = f'{path_config}/{prefix}.conv'
# Single
'''
inim = omtbl['now'][0]
seeing, peeing = get_seeing(
inim,
gain,
pixscale,
fov,
path_conf,
path_param,
path_conv,
path_nnw,
seeing_assume=3*u.arcsec,
frac=0.68,
n_min_src=5
)
'''
if __name__ == '__main__':
# Seeing measurement
print(f"""{'-'*60}\n#\tSEEING MEASUREMENT\n{'-'*60}""")
with multiprocessing.Pool(processes=ncore) as pool:
results = pool.starmap(
get_seeing,
zip(
omtbl['now'],
repeat(gain),
repeat(pixscale),
repeat(fov),
repeat(path_conf),
repeat(path_param),
repeat(path_conv),
repeat(path_nnw),
repeat(3*u.arcsec),
repeat(0.68),
repeat(5),
)
)
print('DONE')
# Cosmic-ray removal
print(f"""{'-'*60}\n#\tCOSMIC-RAY REMOVAL\n{'-'*60}""")
cleantype = 'medmask'
if __name__ == '__main__':
with multiprocessing.Pool(processes=ncore) as pool:
results = pool.starmap(
cosmic_ray_removal,
zip(
omtbl['now'],
add_prefix(omtbl['now'], 'cr'),
add_suffix(omtbl['final'], 'mask'),
repeat(gain),
repeat(rdnoise),
[r[0] for r in results],
repeat(cleantype)
)
)
for i, inim in enumerate(omtbl['now']):
tmpim = add_prefix(omtbl['now'], 'cr')[i]
indx_tmp = np.where(omtbl['now'] == inim)
omtbl['now'][indx_tmp] = tmpim
omtbl['cosmic_ray_removal'][indx_tmp] = tmpim
del tmpim
del indx_tmp
for key in omtbl.keys(): omtbl[key] = omtbl[key].astype(strtype)
#------------------------------------------------------------
#%%
# ASTROMETRY
#------------------------------------------------------------
print(f"""{'-'*60}\n#\tASTROMETRY\n{'-'*60}""")
# Classification IMSNG and non-IMSNG target
frac = 0.10 # Pixel scale inverval ratio
tralist, tdeclist = [], []
for i, inim in enumerate(omtbl['now']):
if fits.getheader(inim)['OBJECT'] in alltbl['obj']:
indx_obj = np.where(fits.getheader(inim)['OBJECT']==alltbl['obj'])
# tra, tdec = alltbl['ra'][indx_obj].item(), alltbl['dec'][indx_obj].item()
tra, tdec = alltbl['ra'][indx_obj][0], alltbl['dec'][indx_obj][0]
else:
tra, tdec = None, None
tralist.append(tra)
tdeclist.append(tdec)
# Multi-processing
if __name__ == '__main__':
with multiprocessing.Pool(processes=ncore) as pool:
results = pool.starmap(
astrometry,
zip(
omtbl['now'],
add_prefix(omtbl['now'], 'a'),
repeat(pixscale),
repeat(frac),
tralist,
tdeclist,
repeat(fov),
repeat(30)
)
)
# Check astrometry results
print(f"""{'-'*60}\n#\tCHECK ASTROMETRY RESULTS\n{'-'*60}""")
c_all = SkyCoord(alltbl['ra'], alltbl['dec'], unit=(u.hourangle, u.deg))
for i, inim in enumerate(add_prefix(omtbl['now'], 'a')):
hdr = fits.getheader(inim)
if os.path.exists(inim):
print(f'[{i}] {os.path.basename(inim)} : Astrometry Success')
c = SkyCoord(hdr['CRVAL1'], hdr['CRVAL2'], unit=u.deg)
indx_tmp, sep, _ = c.match_to_catalog_sky(c_all)
ra_offset, dec_offset = c.spherical_offsets_to(c_all)
# Put pointing offset info.
fits.setval(inim, keyword='CNTSEP', value=round(sep[0].arcmin, 3), comment='Offset between pointing and galaxy position [arcmin]')
fits.setval(inim, keyword='CNTRAOFF', value=round(ra_offset.arcmin[indx_tmp], 3), comment='RA offset between pointing and galaxy position [arcmin]')
fits.setval(inim, keyword='CNTDEOFF', value=round(dec_offset.arcmin[indx_tmp], 3), comment='Dec offset between pointing and galaxy position [arcmin]')
print('\tCalculate accuracy')
astrometry_analysis(
inim=omtbl['now'][i],
incor=f"{os.path.splitext(omtbl['now'][i])[0]}.corr",
# outpng=f'{os.path.splitext(inim)[0]}.astrm.png',
# outdat=f'{os.path.splitext(inim)[0]}.astrm.dat'
outpng=f"{os.path.splitext(omtbl['final'][i])[0]}.astrm.png",
outdat=f"{os.path.splitext(omtbl['final'][i])[0]}.astrm.dat"
)
# Update
# omtbl['now'][i] = inim
else:
print(f'{i} {os.path.basename(inim)} : Astrometry Fail')
# Suspect of wrong object name
if omtbl['objtype'][i] == 'IMSNG':
print('\tThis is IMSNG target. Start re-astronomy.')
# Retry astrometry
astrometry(
inim=omtbl['now'][i],
outim=add_prefix(omtbl['now'], 'a')[i],
pixscale=pixscale,
frac=frac,
cpulimit=60
)
if os.path.exists(inim):
print('\tRe-astrometry SUCCESS!')
c = SkyCoord(hdr['CRVAL1'], hdr['CRVAL2'], unit=u.deg)
indx_tmp, sep, _ = c.match_to_catalog_sky(c_all)
if sep[0] < fov:
newobj = alltbl['obj'][indx_tmp]
print(f"\tCorrect OBJECT HEADER, {hdr['OBJECT']} --> {newobj} position.")
fits.setval(inim, keyword='OBJECT', value=newobj)
# Put pointing offset info.
fits.setval(inim, keyword='CENTSEP', value=round(sep[0].arcmin, 3), comment='Offset between pointing and galaxy position')
ra_offset, dec_offset = c.spherical_offsets_to(c_all)
fits.setval(inim, keyword='CNTSEP', value=round(sep[0].arcmin, 3), comment='Offset between pointing and galaxy position [arcmin]')
fits.setval(inim, keyword='CNTRAOFF', value=round(ra_offset.arcmin, 3), comment='RA offset between pointing and galaxy position [arcmin]')
fits.setval(inim, keyword='CNTDEOFF', value=round(dec_offset.arcmin, 3), comment='Dec offset between pointing and galaxy position [arcmin]')
astrometry_analysis(
inim=omtbl['now'][i],
incor=f"{os.path.splitext(omtbl['now'][i])[0]}.corr",
outpng=f"{os.path.splitext(omtbl['final'][i])[0]}.astrm.png",
outdat=f"{os.path.splitext(omtbl['final'][i])[0]}.astrm.dat"
)
# omtbl['now'][i] = inim
pass
else:
print('\tRe-astrometry Fail...')
pass
else:
print('\tNo other actions')
del hdr
#
for i, inim in enumerate(omtbl['now']):
tmpim = add_prefix(omtbl['now'], 'a')[i]
if os.path.exists(tmpim):
indx_tmp = np.where(omtbl['now'] == inim)
omtbl['now'][indx_tmp] = tmpim
omtbl['astrometry'][indx_tmp] = tmpim
del indx_tmp
del tmpim
for key in omtbl.keys(): omtbl[key] = omtbl[key].astype(strtype)
#------------------------------------------------------------
#%%
# File name change
#------------------------------------------------------------
print(f"""{'-'*60}\n#\tFILENAME CHANGE to IMSNG FORMAT\n{'-'*60}""")
for i, inim in enumerate(omtbl['now']):
newim = f"{omtbl['final'][i]}"
com = f'cp {inim} {newim}'
os.system(com)
print(f'[{i}] {os.path.basename(inim)} --> {os.path.basename(newim)}')
fits.setval(newim, keyword='IMNAME', value=os.path.basename(newim), comment='Formatted file name by gpPy process')
ic_cal = ImageFileCollection(path_data, glob_include='Calib-*.f*', glob_exclude='*mask*')
#------------------------------------------------------------
#%%
# Photometry
#------------------------------------------------------------
print(f"""{'='*60}\n#\tPHOTOMETRY FOR SINGLE IMAGEs\n{'-'*60}""")
photcom = f"python {path_phot} '{path_data}/Calib-*0.fits' {ncore}"
print(photcom)
os.system(photcom)
#------------------------------------------------------------
#%%
# IMAGE COMBINE
#------------------------------------------------------------
print(f"""{'='*60}\n#\tPHOTOMETRY FOR COMBINED IMAGEs\n{'-'*60}""")
def group_images(objtbl, tfrac):
delt = np.array(objtbl['jd'] - np.min(objtbl['jd']))*(24*60*60) # [days] --> [sec]
tsub = delt - (np.cumsum(objtbl['exptime']*tfrac) - objtbl['exptime'][0])
indx = np.where(tsub < 0)
indx_inv = np.where(tsub > 0)
return indx, indx_inv
#------------------------------------------------------------
print(f"""{'-'*60}\n#\tGROUP IMAGES\n{'-'*60}""")
tfrac = 1.5 # Time fraction for grouping
comimlist = []
for obj in set(ic_cal.summary['object']):
for filte in set(ic_cal.filter(object=obj).summary['filter']):
print(f"{len(ic_cal.filter(object=obj).summary['filter'])} images for {obj} in {filte}")
print('-'*60)
ic_obj = ic_cal.filter(object=obj, filter=filte)
objtbl = ic_obj.summary
indx_com, indx_inv = group_images(objtbl, tfrac)
comimlist.append(objtbl[indx_com])
# Numbering
n=0
for inim in objtbl['file'][indx_com]:
print(f'[{n}] {os.path.basename(inim)}')
n+=1
print('-'*60)
while len(indx_inv[0]):
objtbl = objtbl[indx_inv]
indx_com, indx_inv = group_images(objtbl, tfrac)
comimlist.append(objtbl[indx_com])
for inim in objtbl['file'][indx_com]:
print(f'[{n}] {os.path.basename(inim)}')
n+=1
print('-'*60)
#------------------------------------------------------------
# %%
print(f"""{'-'*60}\n#\tALIGN IMAGES AND COMBINE\n{'-'*60}""")
cimlist = []
for i in range(len(comimlist)):
# Alignment
## Target image
imtbl = comimlist[i]
indx_ref = np.where(imtbl['seeing']==np.max(imtbl['seeing']))
# tgtim = imtbl['file'][imtbl['file']==imtbl['file'][indx_ref]][0]
tgtim = imtbl['file'][indx_ref][0]
## Source image
srcimlist = list(imtbl['file'][imtbl['file']!=tgtim])
# Combine
aligned_imlist = [CCDData(fits.getdata(tgtim), unit='adu', header=fits.getheader(tgtim))]
for srcim in srcimlist: aligned_imlist.append(align_astroalign(srcim, tgtim, zero=False))
comim = imcombine_ccddata(aligned_imlist, fluxscale=True, zpkey='ZP', nref=0, imlist=None,)
# comim = imcombine_ccddata(aligned_imlist, fluxscale=False, zpkey='ZP', nref=0, imlist=None,)
cimlist.append(comim)
# Seeing measurement for combined image
if __name__ == '__main__':
print(f"""{'-'*60}\n#\tSEEING MEASUREMENT\n{'-'*60}""")
with multiprocessing.Pool(processes=ncore) as pool:
results = pool.starmap(
get_seeing,
zip(
cimlist,
repeat(gain),
repeat(pixscale),
repeat(fov),
repeat(path_conf),
repeat(path_param),
repeat(path_conv),
repeat(path_nnw),
repeat(3*u.arcsec),
repeat(0.68),
repeat(5),
)
)
print('DONE')
#------------------------------------------------------------
# %%
# Photometry for combined image
#------------------------------------------------------------
photcom = f"python {path_phot} '{path_data}/Calib-*com.fits' {ncore}"
print(photcom)
os.system(photcom)
#------------------------------------------------------------
# %%
# Subtraction
#------------------------------------------------------------
print(f"""{'-'*60}\n#\tSUBTRACTION\n{'-'*60}""")
tstbl = Table()
tstbl['sci'] = cimlist
hcimlist = []
hdimlist = []
for n, tgtim in enumerate(cimlist):
subtraction_routine(tgtim, path_ref)
outim=f"{os.path.dirname(tgtim)}/hd{os.path.basename(tgtim)}"
outconvim=f"{os.path.dirname(tgtim)}/hc{os.path.basename(tgtim)}"
if os.path.exists(outim):
print(f"[{n}] {os.path.basename(tgtim)} : Subtraction Success")
hcimlist.append(outconvim)
hdimlist.append(outim)
else:
print(f"[{n}] {os.path.basename(tgtim)} : Subtraction Fail")
tstbl['ref'] = hcimlist
tstbl['sub'] = hdimlist
tstablename = f"{path_data}/transient_search.ecsv"
tstbl.write(tstablename, format='ascii.ecsv')
tscom = f"python {path_find} '{tstablename}' {ncore}"
print(tscom)
os.system(tscom)
print(f"{(time.time()-st)/60} min")
|
StarcoderdataPython
|
11390836
|
<reponame>nolim1t/specter-diy<filename>demo_apps/__init__.py
__all__ = [
'helloworld',
]
|
StarcoderdataPython
|
4963760
|
<reponame>Rijksmuseum-Voice-Inference/voice-to-voice-translation<filename>scripts/extract_features_for_merlin.py
import os
import sys
import shutil
import glob
import time
import multiprocessing as mp
import numpy as np
import wave
if len(sys.argv)!=5:
print("Usage: ")
print("python extract_features_for_merlin.py <path_to_merlin_dir> <path_to_wav_dir> <path_to_feat_dir> <sampling rate>")
sys.exit(1)
# top merlin directory
merlin_dir = sys.argv[1]
# input audio directory
wav_dir = sys.argv[2]
# Output features directory
out_dir = sys.argv[3]
# initializations
fs = int(sys.argv[4])
# tools directory
world = os.path.join(merlin_dir, "tools/bin/WORLD")
sptk = os.path.join(merlin_dir, "tools/bin/SPTK-3.9")
reaper = os.path.join(merlin_dir, "tools/bin/REAPER")
sp_dir = os.path.join(out_dir, 'sp' )
mgc_dir = os.path.join(out_dir, 'mgc')
ap_dir = os.path.join(out_dir, 'ap' )
bap_dir = os.path.join(out_dir, 'bap')
f0_dir = os.path.join(out_dir, 'f0' )
lf0_dir = os.path.join(out_dir, 'lf0')
if not os.path.exists(out_dir):
os.mkdir(out_dir)
if not os.path.exists(sp_dir):
os.mkdir(sp_dir)
if not os.path.exists(mgc_dir):
os.mkdir(mgc_dir)
if not os.path.exists(bap_dir):
os.mkdir(bap_dir)
if not os.path.exists(f0_dir):
os.mkdir(f0_dir)
if not os.path.exists(lf0_dir):
os.mkdir(lf0_dir)
if fs == 16000:
nFFTHalf = 1024
alpha = 0.58
elif fs == 22050:
nFFTHalf = 1024
alpha = 0.65
elif fs == 44100:
nFFTHalf = 2048
alpha = 0.76
elif fs == 48000:
nFFTHalf = 2048
alpha = 0.77
else:
print("As of now, we don't support %d Hz sampling rate." %(fs))
print("Please consider either downsampling to 16000 Hz or upsampling to 48000 Hz")
sys.exit(1)
#bap order depends on sampling rate.
mcsize=59
b_use_reaper=True # If True: Reaper is used for f0 extraction. If False: The vocoder is used for f0 extraction.
def get_wav_filelist(wav_dir):
wav_files = []
for file in os.listdir(wav_dir):
whole_filepath = os.path.join(wav_dir,file)
if os.path.isfile(whole_filepath) and str(whole_filepath).endswith(".wav"):
# dont add stereo files
if wave.open(whole_filepath, 'r').getnchannels() > 1: continue
wav_files.append(whole_filepath)
elif os.path.isdir(whole_filepath):
wav_files += get_wav_filelist(whole_filepath)
wav_files.sort()
return wav_files
def read_binfile(filename, dim=60, dtype=np.float64):
'''
Reads binary file into numpy array.
'''
fid = open(filename, 'rb')
v_data = np.fromfile(fid, dtype=dtype)
fid.close()
if np.mod(v_data.size, dim) != 0:
raise ValueError('Dimension provided not compatible with file size.')
m_data = v_data.reshape((-1, dim)).astype('float64') # This is to keep compatibility with numpy default dtype.
m_data = np.squeeze(m_data)
return m_data
def write_binfile(m_data, filename, dtype=np.float64):
'''
Writes numpy array into binary file.
'''
m_data = np.array(m_data, dtype)
fid = open(filename, 'wb')
m_data.tofile(fid)
fid.close()
return
def read_reaper_f0_file(est_file, skiprows=7):
'''
Reads f0 track into numpy array from EST file generated by REAPER.
'''
v_f0 = np.loadtxt(est_file, skiprows=skiprows, usecols=[2])
v_f0[v_f0<0] = 0
return v_f0
def reaper_f0_extract(in_wavfile, f0_file_ref, f0_file_out, frame_shift_ms=5.0):
'''
Extracts f0 track using REAPER.
To keep consistency with the vocoder, it also fixes for the difference in number
of frames between the REAPER f0 track and the acoustic parameters extracted by the vocoder.
f0_file_ref: f0 extracted by the vocoder. It is used as a reference to fix the number of frames, as explained.
'''
# Run REAPER:
print("Running REAPER f0 extraction...")
cmd = "%s -a -s -x 400 -m 50 -u %1.4f -i %s -f %s" % (os.path.join(reaper, 'reaper'), frame_shift_ms / 1000.0, in_wavfile, f0_file_out + "_reaper")
os.system(cmd)
# Protection - number of frames:
v_f0_ref = read_binfile(f0_file_ref, dim=1)
v_f0 = read_reaper_f0_file(f0_file_out + "_reaper")
frm_diff = v_f0.size - v_f0_ref.size
if frm_diff<0:
v_f0 = np.r_[ v_f0, np.zeros(-frm_diff) + v_f0[-1]]
if frm_diff>0:
v_f0 = v_f0[:-frm_diff]
# Save f0 file:
write_binfile(v_f0, f0_file_out)
return
def process(filename):
'''
The function decomposes a wav file into F0, mel-cepstral coefficients, and aperiodicity
:param filename: path to wav file
:return: .lf0, .mgc and .bap files
'''
file_id = os.path.basename(filename).split(".")[0]
print('\n' + file_id)
### WORLD ANALYSIS -- extract vocoder parameters ###
### extract sp, ap ###
f0_file = os.path.join(f0_dir, file_id + '.f0')
f0_world_file = f0_file
if b_use_reaper:
f0_world_file = f0_file + "_world"
world_analysis_cmd = "%s %s %s %s %s" % (os.path.join(world, 'analysis'), \
filename,
f0_world_file, \
os.path.join(sp_dir, file_id + '.sp'), \
os.path.join(bap_dir, file_id + '.bapd'))
os.system(world_analysis_cmd)
### Extract f0 using reaper ###
if b_use_reaper:
reaper_f0_extract(filename, f0_world_file, f0_file)
### convert f0 to lf0 ###
sptk_x2x_da_cmd = "%s +da %s > %s" % (os.path.join(sptk, 'x2x'), f0_file, \
os.path.join(f0_dir, file_id + '.f0a'))
os.system(sptk_x2x_da_cmd)
sptk_x2x_af_cmd = "%s +af %s | %s > %s " % (os.path.join(sptk, 'x2x'), \
os.path.join(f0_dir, file_id + '.f0a'), \
os.path.join(sptk, 'sopr') + ' -magic 0.0 -LN -MAGIC -1.0E+10', \
os.path.join(lf0_dir, file_id + '.lf0'))
os.system(sptk_x2x_af_cmd)
### convert sp to mgc ###
sptk_x2x_df_cmd1 = "%s +df %s | %s | %s >%s" % (os.path.join(sptk, 'x2x'), \
os.path.join(sp_dir, file_id + '.sp'), \
os.path.join(sptk, 'sopr') + ' -R -m 32768.0', \
os.path.join(sptk, 'mcep') + ' -a ' + str(alpha) + ' -m ' + str(
mcsize) + ' -l ' + str(
nFFTHalf) + ' -e 1.0E-8 -j 0 -f 0.0 -q 3 ', \
os.path.join(mgc_dir, file_id + '.mgc'))
os.system(sptk_x2x_df_cmd1)
### convert bapd to bap ###
sptk_x2x_df_cmd2 = "%s +df %s > %s " % (os.path.join(sptk, "x2x"), \
os.path.join(bap_dir, file_id + ".bapd"), \
os.path.join(bap_dir, file_id + '.bap'))
os.system(sptk_x2x_df_cmd2)
def try_process(filename):
try:
process(filename)
except Exception as e:
print(e)
print("--- Feature extraction started ---")
start_time = time.time()
# get wav files list
wav_files = get_wav_filelist(wav_dir)
# do multi-processing
pool = mp.Pool(mp.cpu_count())
pool.map(try_process, wav_files)
# DEBUG:
#for nxf in xrange(len(wav_files)):
# process(wav_files[nxf])
# clean temporal files
shutil.rmtree(sp_dir, ignore_errors=True)
shutil.rmtree(f0_dir, ignore_errors=True)
for zippath in glob.iglob(os.path.join(bap_dir, '*.bapd')):
os.remove(zippath)
print("You should have your features ready in: "+out_dir)
(m, s) = divmod(int(time.time() - start_time), 60)
print(("--- Feature extraction completion time: %d min. %d sec ---" % (m, s)))
|
StarcoderdataPython
|
4980103
|
from aocd import get_data
import re
def get_first_last_winning_boards(data):
input = [s for s in re.split("\n\n|\n", data)]
numbers = [int(n) for n in input[0].split(",")]
boards = [re.split("\s+", n.lstrip()) for n in input[1:]]
winner_boards = set()
bsize = 5
draw = 5
bingo = False
number_board = 0
while draw < len(numbers):
numbers_set = set(numbers[:draw])
l = 0
for line in boards:
s = set([int(n) for n in line])
if s.issubset(numbers_set):
number_board = int(l / bsize)
if bingo == False:
winner_boards.add(number_board)
number_first_board = number_board
number_first_draw = numbers[draw - 1]
numbers_first_set = numbers_set
bingo = True
if number_board not in winner_boards:
number_last_board = number_board
number_last_draw = numbers[draw - 1]
numbers_last_set = numbers_set
winner_boards.add(number_board)
if l % bsize == 0: # checking verticals and diagonals
for v in range(bsize):
col = [int(b[v]) for b in boards[l : l + bsize]]
s = set([int(b[v]) for b in boards[l : l + bsize]])
if s.issubset(numbers_set):
number_board = int(l / bsize)
if bingo == False:
winner_boards.add(number_board)
number_first_board = number_board
number_first_draw = numbers[draw - 1]
numbers_first_set = numbers_set
bingo = True
if number_board not in winner_boards:
number_last_board = number_board
number_last_draw = numbers[draw - 1]
numbers_last_set = numbers_set
winner_boards.add(number_board)
l += 1
draw += 1
# first board
bingoboard_first = set(
[
int(n)
for li in boards[number_first_board * 5 : number_first_board * 5 + bsize]
for n in li
]
)
other_first = list(
bingoboard_first - bingoboard_first.intersection(numbers_first_set)
)
# last board
bingoboard_last = set(
[
int(n)
for li in boards[number_last_board * 5 : number_last_board * 5 + bsize]
for n in li
]
)
other_last = list(bingoboard_last - bingoboard_last.intersection(numbers_last_set))
return number_first_draw * sum(other_first), number_last_draw * sum(other_last)
def part1(data):
return get_first_last_winning_boards(data)[0]
def part2(data):
return get_first_last_winning_boards(data)[1]
def test_part1():
assert part1(test_data) == 4512
def test_part2():
assert part2(test_data) == 1924
data = get_data(day=4, year=2021)
print(part1(data))
print(part2(data))
test_data = """7,4,9,5,11,17,23,2,0,14,21,24,10,16,13,6,15,25,12,22,18,20,8,19,3,26,1
22 13 17 11 0
8 2 23 4 24
21 9 14 16 7
6 10 3 18 5
1 12 20 15 19
3 15 0 2 22
9 18 13 17 5
19 8 7 25 23
20 11 10 24 4
14 21 16 12 6
14 21 17 24 4
10 16 15 9 19
18 8 23 26 20
22 11 13 6 5
2 0 12 3 7"""
|
StarcoderdataPython
|
4893975
|
<reponame>ilomon10/face-mask-detector
from threading import Timer
def debounce(wait):
""" Decorator that will postpone a functions
execution until after wait seconds
have elapsed since the last time it was invoked. """
def decorator(fn):
def debounced(*args, **kwargs):
def call_it():
fn(*args, **kwargs)
try:
debounced.t.cancel()
except(AttributeError):
pass
debounced.t = Timer(wait, call_it)
debounced.t.start()
return debounced
return decorator
|
StarcoderdataPython
|
8055374
|
<gh_stars>10-100
# -*- coding: utf-8 -*-
import filters
class LockDetector():
"""
LockDetector instances are objects that can determine whether or not
a particular tracking channel is locked or not.
For more information:
Kaplan and Hegarty pages 234-235
note to self: Consider implementing false phase lock detection (p. 235) in the
LockDetector.update() function.
"""
def __init__(self, N, k, lossthreshold, lockthreshold):
"""
Initialize the lock detector.
@type N : int
@param N : This is the number of previous samples to average over.
@type k : float
@param k : This is the scale factor (specifically, the divisor of the inphase
prompt inputs) used for comparing the inphase and quadraphase
inputs. A typical value is 1.5.
@type lossthreshold : int
@param lossthreshold : Once the loss-of-lock condition has been determined
consecutively by the amount given by lossthreshold, loss-of-lock is declared.
@type lockthreshold : int
@param lockthreshold : Once the lock condition has been determined consecutively
by the amount given by lockthreshold, lock is declared.
"""
self.k = k
self.lossthreshold = lossthreshold
self.lockthreshold = lockthreshold
self.iFilter = filters.LowPassFilter(0.0247)
self.qFilter = filters.LowPassFilter(0.0247)
self.reset()
def reset(self, iMagnitude=0, qMagnitude=0, lock=0):
"""
Reset the lock detector.
@type iMagnitude : float
@param iMagnitude: The initial state of the inphase running average filter will be set
to this value (the default is zero).
@type qMagnitude : float
@param qMagnitude: The initial state of the quadraphase running average filter will be
set to this value (the default is zero).
@type lock : int
@param lock: The initial state of the lock detector is set to this status (the
default is 0 for False).
"""
self.losscount = 0
self.lockcount = 0
self.iFilter.reset(h=iMagnitude)
self.qFilter.reset(h=qMagnitude)
self.lock = lock
def update(self, iP, qP):
"""
Update the lock detector with the latest prompt correlator outputs,
and determine the locking status.
@type iP : float
@param iP : This is the latest inphase prompt correlator output.
@type qP : float
@param qP : This is the latest quadraphase prompt correlator output.
@rtype : tuple
@return : (int, float), (self.lock, iP-qP)
This tuple carries locking status (self.lock - either 1 for True or
0 for False) and the current difference between the filtered
magnitudes of the (scaled) inphase prompt value and the quadraphase
prompt value. The latter can be used as a diagnostic aid when
reviewing data.
"""
iP = self.iFilter.update(iP.__abs__()) / self.k
qP = self.qFilter.update(qP.__abs__())
if iP > qP:
self.losscount = 0
if self.lockcount > self.lockthreshold:
self.lock = 1
# Development Notes: consider implementing false phase lock
# detection logic here.
else:
self.lockcount = self.lockcount + 1
else:
self.lockcount = 0
if self.losscount > self.lossthreshold:
self.lock = 0
else:
self.losscount = self.losscount + 1
return self.lock, iP-qP
|
StarcoderdataPython
|
3589835
|
import distutils.util
print(distutils.util.get_platform())
|
StarcoderdataPython
|
3401296
|
from . import get
from . import post
from . import put
|
StarcoderdataPython
|
144456
|
<filename>core/utils/mod2div.py
from core.utils.xor import xor
# Performs Modulo-2 division
def mod2div(dividend, divisor):
# Number of bits to be XORed at a time.
pick = len(divisor)
# Slicing the dividend to appropriate
# length for particular step
tmp = dividend[0: pick]
while pick < len(dividend):
if tmp[0] == '1':
# replace the dividend by the result
# of XOR and pull 1 bit down
tmp = xor(divisor, tmp) + dividend[pick]
else: # If leftmost bit is '0'
# If the leftmost bit of the dividend (or the
# part used in each step) is 0, the step cannot
# use the regular divisor; we need to use an
# all-0s divisor.
tmp = xor('0' * pick, tmp) + dividend[pick]
# increment pick to move further
pick += 1
# For the last n bits, we have to carry it out
# normally as increased value of pick will cause
# Index Out of Bounds.
if tmp[0] == '1':
tmp = xor(divisor, tmp)
else:
tmp = xor('0' * pick, tmp)
remainder = tmp
return remainder
|
StarcoderdataPython
|
6610780
|
import pandas as pd
import os
import subprocess
import tempfile
from inference_results import InferenceResults
from config import Config
from dataset import get_synth_dataset
from ranked_list import RankedList
import re
from typing import List, Tuple, Any
import javalang
from source_code_utils import get_source_code, extract_method
FileName = str
MethodName = str
MethodStartLine = int
Range = Tuple[int, int]
Score = float
def dims_to_str(dims: List[Any]) -> str:
if len(dims) == 0:
return ''
return ''.join(['[]' for _ in dims])
def types_to_str(args: List[Any]) -> str:
if args is None:
return ''
def _to_str(arg):
return arg.pattern_type if arg.type is None else arg.type.name
return '<' + ','.join([_to_str(arg) for arg in args]) + '>'
def formal_to_str(p: javalang.tree.FormalParameter) -> str:
s = p.type.name
if hasattr(p.type, "arguments"):
s += types_to_str(p.type.arguments)
s += dims_to_str(p.type.dimensions)
return s
def meth_dec_to_str(decl: javalang.tree.MethodDeclaration) -> str:
return decl.name + '(' + ','.join([formal_to_str(p) for p in decl.parameters]) + ')'
def get_method_name(filename: str, method_name: str, method_start_line: int) -> str:
source_code = get_source_code(filename)
# source_code = remove_str_literals_with_brackets(source_code)
method_code = extract_method(source_code, method_name, method_start_line)
tokens = javalang.tokenizer.tokenize(method_code)
parser = javalang.parser.Parser(tokens)
method_decl = parser.parse_member_declaration()
return meth_dec_to_str(method_decl)
def prepare_semi_data(df: List[Tuple[FileName, MethodName, MethodStartLine]], dir: str) -> pd.DataFrame:
data = []
errors = 0
for fn, method_name, pos in df:
fn_full = os.path.join(dir, fn)
try:
semi_method_name = get_method_name(fn_full, method_name, pos)
data.append((fn_full, semi_method_name, pos))
except Exception:
print(fn_full, method_name, pos)
errors += 1
continue
return pd.DataFrame(data), errors
def semi_recommendation_str_to_list(s: str) -> List[Tuple[Range, Score]]:
s = s.replace(' to ', ',')
s = re.sub(r"0(\d)(\d)", '\\1\\2', s)
s = re.sub(r"0(\d)", '\\1', s)
ls = eval(s)
ls = sorted(ls, key=lambda v: v[2], reverse=True)
return list(map(lambda v: (v[0: 2], v[2]), ls))
def run_semi_algorithm(df: pd.DataFrame, config: Config) -> pd.DataFrame:
'''Run SEMI inference using provided execuatable'''
with tempfile.NamedTemporaryFile() as tmp, tempfile.NamedTemporaryFile() as semi_out:
df.to_csv(tmp.name, header=False, sep=';', index=False)
_ = subprocess.run([
'java', '-jar',
config.path_to_semi_jar,
tmp.name,
semi_out.name
], capture_output=True, text=True)
# out = subprocess.run(cmd, shell=True, capture_output=True, text=True)
semi_df = pd.read_csv(semi_out.name, sep=';', names=['filename', 'method', 'semi_recommendations'])
semi_df.semi_recommendations = semi_df.semi_recommendations.apply(semi_recommendation_str_to_list)
semi_df.filename = semi_df.filename.apply(os.path.basename)
return semi_df
def _calclualte_inference_data_for_semi(data: pd.DataFrame, path_to_java_files: str) -> pd.DataFrame:
''' To read pre calculated data '''
columns = ['filename', 'target_method', 'target_method_start_line']
for c in columns:
assert c in data.columns, f"Can't find column: {c}"
# data = data.astype({'target_method_start_line': 'int32'}) # TODO: move
df, err_count = prepare_semi_data(
data.groupby(columns).size().reset_index()[columns].values,
path_to_java_files
)
print('SEMI data prepare error count:', err_count)
return df
def _get_semi_inference_from_file_or_calclulate(data: pd.DataFrame, config: Config) -> pd.DataFrame:
if config.path_to_semi_inference_csv is not None:
print('Load precomputed SEMI inference.')
semi_df = pd.read_csv(config.path_to_semi_inference_csv, sep=';')
filenames_filter = [os.path.basename(fn) for fn in data.filename.unique()]
semi_df = semi_df.merge(pd.DataFrame(filenames_filter, columns=['filename']), on='filename')
else:
print('Precomputed SEMO inference file is not found. It will be calculated from scratch')
df = _calclualte_inference_data_for_semi(data, config.path_to_java_files)
semi_df = run_semi_algorithm(df)
semi_df.to_csv(config.path_to_semi_inference_csv, sep=';', index=False)
return semi_df
def _preprocess_data(data: pd.DataFrame, config: Config) -> pd.DataFrame:
semi_df = _get_semi_inference_from_file_or_calclulate(data, config)
synth_dataset = get_synth_dataset(config.path_to_dataset, config.path_to_java_files)
join = semi_df.merge(synth_dataset, 'right', left_on='filename', right_on='filename')
join.semi_recommendations.fillna('[]', inplace=True)
join.semi_recommendations = join.semi_recommendations.apply(eval)
semi_data = join[['semi_recommendations', 'true_inline_range', 'filename']].values
return semi_data
def inference_semi(data: pd.DataFrame, config: Config) -> InferenceResults:
semi_data = _preprocess_data(data, config)
predictions = InferenceResults()
for recommendations, y, fn in semi_data:
recommendations = [rng for rng, score in recommendations if (score >= 0 and (rng[0] <= rng[1]))]
if len(recommendations) == 0:
predictions.add('*', RankedList(pd.DataFrame([], columns=['score', 'is_true', 'range', 'true_range'])))
continue
y = eval(y)
y = [y[0] + 1, y[1] + 1]
rl = RankedList(pd.DataFrame(
[(i + 1, x == y, str(x), str(y)) for i, x in enumerate(recommendations)],
columns=['score', 'is_true', 'range', 'true_range']
))
predictions.add('*', rl)
return predictions
|
StarcoderdataPython
|
9692786
|
from copy import deepcopy
from typing import List, Tuple, Dict, Union, Optional
EncodeDecodeMappings = List[Tuple[int, str, str]]
Predictions = List[Dict[str, Union[str, Dict]]]
class TextEncoder:
def __init__(
self,
encoding: Dict[str, str],
model_special_tokens: Optional[List[str]] = None,
):
r"""
Examples:
```
TextEncoder(
encoding={"\n": "[NEWLINE]", "\t": "[TAB]"},
model_special_tokens=["[NEWLINE]", "[TAB]"],
)
```
Args:
encoding: mapping to special tokens
model_special_tokens: special tokens that the model was trained on
"""
self.encoding = encoding
if model_special_tokens is None:
print(
"ATTENTION! DID NOT CHECK THAT MODEL WAS TRAINED WITH SPECIAL TOKENS ACCORDING TO encoding! "
"It is recommended to provide an 'model_special_tokens' argument to check."
)
else:
assert sorted(list(set(encoding.values()))) == sorted(
list(set(model_special_tokens))
), (
f"ERROR! encoding values = {sorted(list(set(encoding.values())))} does not equal "
f"model_special_tokens = {sorted(list(set(model_special_tokens)))}"
)
def encode(
self, text_list: List[str]
) -> Tuple[List[str], List[EncodeDecodeMappings]]:
r"""
encodes list of text using self.encoding
Examples:
```
text_encoded_list, encode_decode_mappings_list = encode(text_list=["an\n example"])
# text_encoded_list = ["an[NEWLINE] example"]
# encode_decode_mappings_list = [[(2, "\n", "[NEWLINE]")]]
```
Args:
text_list: original text
Returns:
text_encoded_list: encoded text
encode_decode_mappings_list: mappings (char_start, original token, encoded token)
"""
list_of_single_encodings = [self._encode_single(text) for text in text_list]
return [elem[0] for elem in list_of_single_encodings], [
elem[1] for elem in list_of_single_encodings
]
def decode(
self,
text_encoded_list: List[str],
encode_decode_mappings_list: List[EncodeDecodeMappings],
predictions_encoded_list: List[Predictions],
) -> Tuple[List[str], List[Predictions]]:
r"""
decodes list of text_encoded and predictions_encoded using encode_decode_mappings
Examples:
```
text_list, predictions_list = decode(
text_encoded_list=["an[NEWLINE] example"],
encode_decode_mappings_list=[[(2, "\n", "[NEWLINE]")]]),
predictions_encoded_list=[[{"char_start": "12", "char_end": "19", "token": "example", "tag": "TAG"}]]
)
# text_list = ["an\n example"]
# predictions_list = [[{"char_start": "4", "char_end": "11", "token": "example", "tag": "TAG"}]]
```
Args:
text_encoded_list: encoded text
encode_decode_mappings_list: mappings (char_start, original token, encoded token)
predictions_encoded_list: encoded predictions
Returns:
text_list: original / decoded text
predictions_list: original / decoded predictions
"""
list_of_single_decodings = [
self._decode_single(
text_encoded, encode_decode_mappings, predictions_encoded
)
for text_encoded, encode_decode_mappings, predictions_encoded in zip(
text_encoded_list, encode_decode_mappings_list, predictions_encoded_list
)
]
return [elem[0] for elem in list_of_single_decodings], [
elem[1] for elem in list_of_single_decodings
]
def _encode_single(self, text: str) -> Tuple[str, EncodeDecodeMappings]:
r"""
encodes single text using self.encoding
Args:
text: e.g. "an\n example"
Returns:
text_encoded: e.g. "an[NEWLINE] example"
encode_decode_mappings: e.g. [(2, "\n", "[NEWLINE]")]
"""
text_encoded = deepcopy(text)
encode_decode_mappings = list()
for k, v in self.encoding.items():
while k in text_encoded:
index = text_encoded.find(k)
text_encoded = text_encoded.replace(k, v, 1)
encode_decode_mappings.append((index, k, v))
encode_decode_mappings.reverse()
return text_encoded, encode_decode_mappings
@staticmethod
def _decode_single(
text_encoded: str,
encode_decode_mappings: EncodeDecodeMappings,
predictions_encoded: Predictions,
) -> Tuple[str, Predictions]:
r"""
decodes single text_encoded and predictions_encoded using encode_decode_mapping
Args:
text_encoded: e.g. "an[NEWLINE] example"
encode_decode_mappings: e.g. [(2, "\n", "[NEWLINE]")]
predictions_encoded: e.g. [{"char_start": "12", "char_end": "19", "token": "example", "tag": "TAG"}]
Returns:
text: e.g. "an\n example"
predictions: e.g. [{"char_start": "4", "char_end": "11", "token": "example", "tag": "TAG"}]
"""
text = deepcopy(text_encoded)
predictions = deepcopy(predictions_encoded)
for encode_decode_mapping in encode_decode_mappings:
index, k, v = encode_decode_mapping
# print(index, k, v)
assert (
text[index : index + len(v)] == v
), f"ERROR! text[{index}:{index + len(v)}] = {text[index:index + len(v)]} != {v}"
text = text[:index] + k + text[index + len(v) :]
for prediction in predictions:
assert isinstance(prediction["char_start"], str) and isinstance(
prediction["char_end"], str
), f"ERROR! expected str, got type ({prediction['char_start']}, {prediction['char_end']})"
if int(prediction["char_start"]) == index and int(
prediction["char_end"]
) == index + len(v):
prediction["char_end"] = str(
int(prediction["char_end"]) - len(v) + len(k)
)
prediction["token"] = k
elif int(prediction["char_end"]) > index:
prediction["char_start"] = str(
int(prediction["char_start"]) - len(v) + len(k)
)
prediction["char_end"] = str(
int(prediction["char_end"]) - len(v) + len(k)
)
return text, predictions
|
StarcoderdataPython
|
6591795
|
<reponame>wgjak47/supervisor_numplus
from supervisor.options import UnhosedConfigParser
from supervisor.datatypes import list_of_strings
from supervisor.states import SupervisorStates
from supervisor.states import STOPPED_STATES
from supervisor.xmlrpc import Faults as SupervisorFaults
from supervisor.xmlrpc import RPCError
import supervisor.loggers
import json
API_VERSION = '1.0'
class Faults:
NOT_IN_WHITELIST = 230
class GraceNamespaceRPCInterface:
""" A supervisor rpc interface that facilitates manipulation of
supervisor's configuration and state in ways that are not
normally accessible at runtime.
"""
def __init__(self, supervisord, whitelist=[]):
self.supervisord = supervisord
self._whitelist = list_of_strings(whitelist)
def _update(self, func_name):
self.update_text = func_name
state = self.supervisord.get_state()
if state == SupervisorStates.SHUTDOWN:
raise RPCError(SupervisorFaults.SHUTDOWN_STATE)
if len(self._whitelist):
if func_name not in self._whitelist:
raise RPCError(Faults.NOT_IN_WHITELIST, func_name)
# RPC API methods
def getAPIVersion(self):
""" Return the version of the RPC API used by supervisor_twiddler
@return int version version id
"""
self._update('getAPIVersion')
return API_VERSION
def getGroupNames(self):
""" Return an array with the names of the process groups.
@return array Process group names
"""
self._update('getGroupNames')
return list(self.supervisord.process_groups.keys())
def log(self, message, level=supervisor.loggers.LevelsByName.INFO):
""" Write an arbitrary message to the main supervisord log. This is
useful for recording information about your twiddling.
@param string message Message to write to the log
@param string|int level Log level name (INFO) or code (20)
@return boolean Always True unless error
"""
self._update('log')
if isinstance(level, str):
level = getattr(supervisor.loggers.LevelsByName,
level.upper(), None)
if supervisor.loggers.LOG_LEVELS_BY_NUM.get(level, None) is None:
raise RPCError(SupervisorFaults.INCORRECT_PARAMETERS)
self.supervisord.options.logger.log(level, message)
return True
def addProgramToGroup(self, group_name, program_name, program_options):
""" Add a new program to an existing process group. Depending on the
numprocs option, this will result in one or more processes being
added to the group.
@param string group_name Name of an existing process group
@param string program_name Name of the new process in the process table
@param struct program_options Program options, same as in supervisord.conf
@return boolean Always True unless error
"""
self._update('addProgramToGroup')
group = self._getProcessGroup(group_name)
# make configparser instance for program options
section_name = 'program:%s' % program_name
parser = self._makeConfigParser(section_name, program_options)
# make process configs from parser instance
options = self.supervisord.options
try:
new_configs = options.processes_from_section(parser, section_name, group_name)
except ValueError as e:
raise RPCError(SupervisorFaults.INCORRECT_PARAMETERS, e)
# check new process names don't already exist in the config
for new_config in new_configs:
for existing_config in group.config.process_configs:
if new_config.name == existing_config.name:
raise RPCError(SupervisorFaults.BAD_NAME, new_config.name)
# add process configs to group
group.config.process_configs.extend(new_configs)
for new_config in new_configs:
# the process group config already exists and its after_setuid hook
# will not be called again to make the auto child logs for this process.
new_config.create_autochildlogs()
# add process instance
group.processes[new_config.name] = new_config.make_process(group)
return True
def UpdateNumprocs(self, group_name):
""" graceful process_group numprocs without restart all process when only numprocs changed.
if numprocs increased, the operation will start (new_num - old_num) processes,
if numprocs reduced, the operation will stop the last (new_num - old_num) processes
@param string group_name Name of an existing process group
"""
try:
self.supervisord.options.process_config(do_usage=False)
except ValueError as msg:
raise RPCError(Faults.CANT_REREAD, msg)
group = self._getProcessGroup(group_name)
old_config = self.supervisord.process_groups[group_name].config
new_config = [ cfg
for cfg in self.supervisord.options.process_group_configs if cfg.name == group_name
][0]
if old_config == new_config:
return json.dumps({
"msg":"No need to update",
"type":"error"
})
else:
if old_config.name != new_config.name or old_config.priority != new_config.priority:
return json.dumps({
"msg":"Not only numprocs has changed: priority is difference",
"type":"error"
})
new_process_configs = new_config.process_configs
old_process_configs = old_config.process_configs
if len(old_process_configs) < len(new_process_configs):
if self._issubset(old_process_configs, new_process_configs):
return self._add_num(group_name, self._difference(new_process_configs, old_process_configs))
else:
return json.dumps({
"msg": "Not only numprocs has changed",
"type": "error"
})
elif len(old_process_configs) > len(new_process_configs):
if self._issubset(new_process_configs, old_process_configs):
return self._reduce_num(group_name, self._difference(old_process_configs, new_process_configs))
else:
return json.dumps({
"msg": "Not only numprocs has changed",
"type": "error"
})
# If other not only name, priority or numprocs has changed
# Return an error
else:
return json.dumps({
"msg": "Other settings has changed, please use update",
"type": "error"
})
# Return something for xmlrpc lib
return True
# ProcessConfig can't use set because __hash__ is not implemented
def _difference(self, listA, listB):
return [ item for item in listA if not self._has(listB, item) ]
def _has(self, the_list, A):
for item in the_list:
if A.__eq__(item):
return True
return False
def _issubset(self, A, B):
for item in A:
if not self._has(B, item):
return False
return A
# just return the processes need to remove, let
# supervisorctl call supervisor to stop the processes
def _reduce_num(self, group_name, process_configs):
return json.dumps({
'processes_name' : ["{0}:{1}".format(group_name,p.name) for p in process_configs],
'type' : 'reduce'
})
def _add_num(self, group_name, new_configs):
group = self._getProcessGroup(group_name)
group.config.process_configs.extend(new_configs)
for new_config in new_configs:
# the process group config already exists and its after_setuid hook
# will not be called again to make the auto child logs for this process.
new_config.create_autochildlogs()
# add process instance
group.processes[new_config.name] = new_config.make_process(group)
return json.dumps({
'processes_name' : [ "{0}:{1}".format(group_name,p.name) for p in new_configs],
'type' : 'add'
})
def removeProcessFromGroup(self, group_name, process_name):
""" Remove a process from a process group. When a program is added with
addProgramToGroup(), one or more processes for that program is added
to the group. This method removes individual processes (named by the
numprocs and process_name options), not programs.
@param string group_name Name of an existing process group
@param string process_name Name of the process to remove from group
@return boolean Always return True unless error
"""
group = self._getProcessGroup(group_name)
# check process exists and is running
process = group.processes.get(process_name)
if process is None:
raise RPCError(SupervisorFaults.BAD_NAME, process_name)
""" Change to stop process here instead of raise an error
"""
if process.pid or process.state not in STOPPED_STATES:
raise RPCError(SupervisorFaults.STILL_RUNNING, process_name)
group.transition()
# del process config from group, then del process
for index, config in enumerate(group.config.process_configs):
if config.name == process_name:
del group.config.process_configs[index]
del group.processes[process_name]
return True
def _getProcessGroup(self, name):
""" Find a process group by its name """
group = self.supervisord.process_groups.get(name)
if group is None:
raise RPCError(SupervisorFaults.BAD_NAME, 'group: %s' % name)
return group
def _makeConfigParser(self, section_name, options):
""" Populate a new UnhosedConfigParser instance with a
section built from an options dict.
"""
config = UnhosedConfigParser()
try:
config.add_section(section_name)
for k, v in dict(options).items():
config.set(section_name, k, v)
except (TypeError, ValueError):
raise RPCError(SupervisorFaults.INCORRECT_PARAMETERS)
return config
def make_grace_rpcinterface(supervisord, **config):
return GraceNamespaceRPCInterface(supervisord, **config)
|
StarcoderdataPython
|
4945082
|
<reponame>hcmus-nlp-chatbot/CRSLab
from .topic_bert import TopicBERTModel
|
StarcoderdataPython
|
136730
|
<filename>dsutils/__init__.py
from dsutils_dev.dsutils.evaluate import get_eda_plots
from dsutils_dev.dsutils.convert import DataFrameConverter
from dsutils_dev.dsutils.colab_utils import mount_drive #, get_spark_environment
__version__ = '0.0.1'
#from dsutils_dev.evaluate import get_eda_plots
|
StarcoderdataPython
|
1732928
|
# Initialize a variable with a user-specified value.
user = input( 'I am Python. What is your name? : ' )
# Output a string and a variable value.
print( 'Welcome' , user )
# Initialize another variable with a user-specified value.
lang = input( 'Favorite programming language? : ' )
# Output a string and a variable value.
print( lang , 'Is' , 'Fun' , sep = ' * ' , end = '!\n' )
|
StarcoderdataPython
|
6676908
|
"""
Server for pyglidein
"""
import json
import logging
from tornado.web import HTTPError
from rest_tools.server import (RestServer, RestHandler, RestHandlerSetup,
from_environment, role_authorization)
from . import __version__ as version
from .condor import CondorCache
from .clients import Clients
logger = logging.getLogger('server')
class BaseHandler(RestHandler):
def initialize(self, condor, clients, **kwargs):
super().initialize(**kwargs)
self.condor = condor
self.clients = clients
class StatusHandler(BaseHandler):
async def get(self):
self.write({
'condor': self.condor.get_json(),
'clients': self.clients.get_json(),
})
class APITokens(BaseHandler):
@role_authorization(roles=['admin'])
async def post(self):
data = json.loads(self.request.body)
if (not data) or 'client' not in data:
raise HTTPError(400, reason='Missing "client" in body')
token = self.auth.create_token(data['client'], type='client',
payload={'role': 'client'})
self.write({'client': data['client'], 'token': token})
class APIClient(BaseHandler):
@role_authorization(roles=['admin', 'client'])
async def put(self, client):
if self.auth_data.get('role', None) == 'client' and client != self.auth_data.get('sub', None):
raise HTTPError(403, reason='Cannot update a different client than your own')
if self.request.body:
data = json.loads(self.request.body)
self.clients.update(client, data)
else:
self.clients.update(client, {})
self.write({})
class APIClientQueue(BaseHandler):
@role_authorization(roles=['admin', 'client'])
async def post(self, client):
if self.auth_data.get('role', None) == 'client' and client != self.auth_data.get('sub', None):
raise HTTPError(403, reason='Cannot update a different client than your own')
if self.request.body:
status = json.loads(self.request.body)
self.clients.update(client, status)
try:
self.clients.get(client)
except KeyError:
raise HTTPError(400, reason='Need to provide client queue status')
ret = self.clients.match(client, self.condor)
if not ret:
self.write({})
else:
self.write({
'queues': ret,
'token': self.condor.get_startd_token()
})
def create_server():
# static_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'static')
# template_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
default_config = {
'HOST': 'localhost',
'PORT': 8080,
'DEBUG': False,
# 'COOKIE_SECRET': binascii.hexlify(b'secret').decode('utf-8'),
'AUTH_SECRET': '',
'AUTH_EXPIRATION': -1, # seconds for token lifetime
'CONDOR_COLLECTOR': 'localhost',
'CONDOR_CACHE_TIMEOUT': 60,
}
config = from_environment(default_config)
rest_cfg = {
'debug': config['DEBUG'],
'server_header': f'pyglidein_server {version}',
}
if config['AUTH_SECRET']:
rest_cfg['auth'] = {
'secret': config['AUTH_SECRET'],
'issuer': 'pyglidein',
}
if config['AUTH_EXPIRATION'] > 0:
rest_cfg['auth']['expiration'] = config['AUTH_EXPIRATION']
args = RestHandlerSetup(rest_cfg)
condor_args = {
'collector_address': config['CONDOR_COLLECTOR'],
'cache_timeout': config['CONDOR_CACHE_TIMEOUT'],
}
args['condor'] = CondorCache(**condor_args)
args['clients'] = Clients()
server = RestServer(debug=config['DEBUG'],
# static_path=static_path, template_path=template_path,
# cookie_secret=config['COOKIE_SECRET'],
)
server.add_route(r'/status', StatusHandler, args)
server.add_route(r'/api/tokens', APITokens, args)
server.add_route(r'/api/clients/(?P<client>\w+)', APIClient, args)
server.add_route(r'/api/clients/(?P<client>\w+)/actions/queue', APIClientQueue, args)
server.startup(address=config['HOST'], port=config['PORT'])
return server
|
StarcoderdataPython
|
6654024
|
<reponame>pengfei-chen/algorithm_qa
"""
问题描述:给定一个二叉树的头结点head,已知其中没有重复值的节点,实现两个函数分别判断这棵二叉树是否是
搜索二叉树和完全二叉树。
"""
from binarytree.toolcls import Node
class JudgeTool:
@classmethod
def is_bst_tree(cls, head):
if head is None:
return True
res = [True]
cls.is_bst_tree_detail(head, None, '', res)
return res[0]
@classmethod
def is_bst_tree_detail(cls, head, pre, relation, res):
if head.left:
cls.is_bst_tree_detail(head.left, head, 'left', res)
if pre is not None:
if relation == 'left':
if pre.value < head.value:
res[0] = False
else:
if pre.value > head.value:
return False
if head.right:
cls.is_bst_tree_detail(head.right, head, 'right', res)
@classmethod
def is_complete_tree(cls, head):
if head is None:
return True
my_queue = list()
my_queue.append(head)
to_leaf = 0
while len(my_queue) > 0:
node = my_queue.pop(0)
if node.left is not None and node.right is not None:
if to_leaf == 1:
return False
my_queue.append(node.left)
my_queue.append(node.right)
if node.left is None and node.right is not None:
return False
if node.left is not None and node.right is None:
if to_leaf == 1:
return False
to_leaf = 1
my_queue.append(node.left)
if not node.left and not node.right:
if not to_leaf:
to_leaf = 1
return True
if __name__ == '__main__':
head = Node(4)
head.left = Node(2)
head.right = Node(6)
head.left.left = Node(1)
head.left.right = Node(3)
head.right.left = Node(5)
print(JudgeTool.is_bst_tree(head))
print(JudgeTool.is_complete_tree(head))
|
StarcoderdataPython
|
4928075
|
<gh_stars>0
# MongoDB stores data in JSON-like documents, which makes the database very flexible and scalable.
# Python needs a MongoDB driver to access the MongoDB database.
# One of the most known MongoDB driver's is "PyMongo".
from pymongo import MongoClient
import pandas as pd
# Establishes a connection with Cluster0
myclient = MongoClient("XXXX")
# Lists all the DBs in Cluster0
print(myclient.list_database_names())
# Instantiates a single DB in the Cluster
db = myclient['sample_airbnb']
# Fetches a particular Collection from the DB
col=db['listingsAndReviews']
#In MongoDB we use the find and findOne methods to find data in a collection.
#Just like the SELECT statement is used to find data in a table in a MySQL database.
#With first_one() we print the 1st register from the DB
#print(col.find_one())
# Using pandas, we print the 1st register as a DataFrame
# data = col.find_one()
# mydb = pd.DataFrame.from_dict(data,orient='index')
# print(mydb)
#Fetches all the documents from the Collection as a dictionary
data2 = col.find({})
# Ranges the dictionary as a pandas DataFrame
mydb2 = pd.DataFrame.from_dict(data2)
# Prints the first 5 registers from the DB
print(mydb2[:5])
#Finishes the connection with Cluster0
myclient.close()
|
StarcoderdataPython
|
5066204
|
<filename>old/direct/replica_count_for_rse_cx.py
import cx_Oracle, sys, uuid
from dburl import schema as oracle_schema, user, password, host, port, service
conn = cx_Oracle.connect(user, password, "%s:%s/%s" % (host, port, service))
rse_name = sys.argv[1]
c = conn.cursor()
c.execute("""select id from %(schema)s.rses where rse=:rse_name""" % {"schema":oracle_schema},
rse_name=rse_name)
rse_id=c.fetchone()[0]
rse_id = uuid.UUID(bytes=bytes(rse_id)).hex.upper()
#c.execute("""select * from %(schema)s.rses where id=:rse_id""" % {"schema":oracle_schema}, rse_id=rse_id)
#print c.fetchone()
#sys.exit(0)
print type(rse_id), rse_id
#print rse_id.bytes
c.execute("""select count(*)
from %(schema)s.replicas rep
where
rep.rse_id=:rse_id
""" %
{"schema":oracle_schema},
rse_id=rse_id
)
print "%s: %d" % (rse_name, c.fetchone()[0])
|
StarcoderdataPython
|
1783658
|
import ast
class KeywordTransformer(ast.NodeTransformer):
def visit_FunctionDef(self, node):
return node
def visit_AsyncFunctionDef(self, node):
return node
def visit_ClassDef(self, node):
return node
def visit_Return(self, node):
if node.value is None:
return node
return ast.If(
test=ast.NameConstant(
value=True,
lineno=node.lineno,
col_offset=node.col_offset
),
body=[
ast.Expr(
value=ast.Yield(
value=node.value,
lineno=node.lineno,
col_offset=node.col_offset
),
lineno=node.lineno,
col_offset=node.col_offset
),
ast.Return(
value=None,
lineno=node.lineno,
col_offset=node.col_offset
)
],
orelse=[],
lineno=node.lineno,
col_offset=node.col_offset
)
def visit_Delete(self, node):
return ast.If(
test=ast.NameConstant(
value=True,
lineno=node.lineno,
col_offset=node.col_offset
),
body=[
ast.If(
test=ast.Compare(
left=ast.Str(
s=target.id,
lineno=node.lineno,
col_offset=node.col_offset
),
ops=[
ast.In(
lineno=node.lineno,
col_offset=node.col_offset
)
],
comparators=[
self.globals_call(node)
],
lineno=node.lineno,
col_offset=node.col_offset
),
body=[
ast.Expr(
value=ast.Call(
func=ast.Attribute(
value=self.globals_call(node),
attr='pop',
ctx=ast.Load(),
lineno=node.lineno,
col_offset=node.col_offset
),
args=[
# 'x'
ast.Str(
s=target.id,
lineno=node.lineno,
col_offset=node.col_offset
)
],
keywords=[],
lineno=node.lineno,
col_offset=node.col_offset
),
lineno=node.lineno,
col_offset=node.col_offset
)
],
orelse=[
# del x
ast.Delete(
targets=[target],
lineno=node.lineno,
col_offset=node.col_offset
)
],
lineno=node.lineno,
col_offset=node.col_offset
)
if isinstance(target, ast.Name) else
ast.Delete(
targets=[target],
lineno=node.lineno,
col_offset=node.col_offset
)
for target in node.targets
],
orelse=[],
lineno=node.lineno,
col_offset=node.col_offset
)
def globals_call(self, node):
return ast.Call(
func=ast.Name(
id='globals',
ctx=ast.Load(),
lineno=node.lineno,
col_offset=node.col_offset
),
args=[],
keywords=[],
lineno=node.lineno,
col_offset=node.col_offset
)
|
StarcoderdataPython
|
4902841
|
#!/usr/bin/env python3
# -----------------------------------------------------------------------------
#
# P A G E B O T E X A M P L E S
#
# www.pagebot.io
# Licensed under MIT conditions
#
# -----------------------------------------------------------------------------
#
# E03_BabelStringMetrics.py
#
# Show some principles of FlatContext usage.
from pagebot import getContext
from pagebot.document import Document
from pagebot.constants import A3, TOP, EXPORT
from pagebot.conditions import *
from pagebot.elements import *
from pagebot.toolbox.units import *
from pagebot.toolbox.color import noColor, color
from pagebot.toolbox.transformer import path2FileName
FILENAME = path2FileName(__file__)
def draw(contextName):
context = getContext(contextName)
exportPath = '%s/%s-%s.pdf' % (EXPORT, FILENAME, contextName)
# Landscape A3.
H, W = A3
SQ = 150
P = 50
doc = Document(w=W, h=H, context=context)
page = doc[1]
# Sets the page padding, not equal to test vertical position.
page.padding = P, P, 2*P, P
style = dict(font='PageBot-Regular', fontSize=pt(100), textFill=color(0))#, w=800)
bs = context.newString('ABCD', style)#, w=800)
bs.add('EFGH', dict(fontSize=200, textFill=color(0, 1, 0)))
tw, th = bs.textSize
newText(bs, x=P, y=P, parent=page)#, conditions=Fit())
newLine(x=P, y=P, w=tw, h=0, stroke=0, parent=page)#, conditions=Fit())
'''
#print(bs.topLineXHeight)
print(bs.w)
#print(bs.topLineXHeight)
bs.add('IJKL', dict(fontSize=300))
# Parent of the element is the current page.
e = newText(bs, w=SQ, h=SQ, parent=page, conditions=Fit())
print(bs.w)
x, y, w, h = e.x, e.y, e.w, e.h
newRect(parent=page, w=w, h=h, x=x, y=y, fill=None, stroke=color(0, 0, 1),
strokeWidth=0.5)
#print(e.bs)
#print(e.bs.cs) # FormattedString (DrawBot), <FlatBabelData (Flat)
for line in bs.lines:
for run in line.runs:
print(' * run', run)
'''
# Solve conditions of all placed elements on the page
page.solve()
view = doc.view
view.showPadding = True
doc.export(exportPath)
for contextName in ('DrawBot', 'Flat'):
draw(contextName)
|
StarcoderdataPython
|
3592763
|
<filename>angr/angr/procedures/win_user32/wprintf.py
from ..libc.sprintf import sprintf as wsprintfA
|
StarcoderdataPython
|
5141471
|
<gh_stars>0
from __future__ import absolute_import, division, print_function
from builtins import * # @UnusedWildImport
from mcculw import ul
from mcculw.ul import ULError
from mcculw.enums import BoardInfo, InfoType, ULRange, ErrorCode, ScanOptions
class AoInfo:
"""Provides analog output information for the device with the specified
board number.
NOTE: This class is primarily used to provide hardware information for the
library examples and may change some hardware configuration values. It is
recommended that values provided by this class be hard-coded in production
code.
Parameters
----------
board_num : int
The board number associated with the device when created with
:func:`.create_daq_device` or configured with Instacal.
"""
def __init__(self, board_num):
self._board_num = board_num
@property
def board_num(self):
return self._board_num
@property
def num_chans(self):
return ul.get_config(InfoType.BOARDINFO, self._board_num, 0,
BoardInfo.NUMDACHANS)
@property
def is_supported(self):
return self.num_chans > 0
@property
def resolution(self):
return ul.get_config(InfoType.BOARDINFO, self._board_num, 0,
BoardInfo.DACRES)
@property
def supports_scan(self):
return ScanOptions.CONTINUOUS in self.supported_scan_options
@property
def supported_scan_options(self):
try:
scan_options_supported = ScanOptions(ul.get_config(
InfoType.BOARDINFO, self._board_num, 0,
BoardInfo.DACSCANOPTIONS))
except ULError:
scan_options_supported = ScanOptions(0)
return scan_options_supported
@property
def supported_ranges(self):
result = []
# Check if the range is ignored by passing a bogus range in
try:
ul.a_out(self._board_num, 0, -5, 0)
range_ignored = True
except ULError as e:
if (e.errorcode == ErrorCode.NETDEVINUSE or
e.errorcode == ErrorCode.NETDEVINUSEBYANOTHERPROC):
raise
range_ignored = False
if range_ignored:
# Try and get the range configured in InstaCal
try:
curr_range = ULRange(ul.get_config(InfoType.BOARDINFO,
self._board_num, 0,
BoardInfo.DACRANGE))
result.append(curr_range)
except ULError as e:
if (e.errorcode == ErrorCode.NETDEVINUSE or
e.errorcode == ErrorCode.NETDEVINUSEBYANOTHERPROC):
raise
else:
for dac_range in ULRange:
try:
ul.a_out(self._board_num, 0, dac_range, 0)
result.append(dac_range)
except ULError as e:
if (e.errorcode == ErrorCode.NETDEVINUSE or
e.errorcode == ErrorCode.NETDEVINUSEBYANOTHERPROC):
raise
return result
@property
def supports_v_out(self):
ranges_supported = self.supported_ranges
v_out_supported = False
if ranges_supported:
try:
ul.v_out(self._board_num, 0, ranges_supported[0], 0)
v_out_supported = True
except ULError:
v_out_supported = False
return v_out_supported
|
StarcoderdataPython
|
6611697
|
<filename>ensemble_clustering.py
import sys
import copy
from munkres import Munkres
from sklearn.cluster import KMeans, AgglomerativeClustering, SpectralClustering
from increasing_cluster import tran_increase
from plot_cluster import generating_KMeans_plot, generating_Spectral_plot, generating_Agglomerative_plot, generating_Ensemble_plot
from statistic_cluster import statistic_KMeans, statistic_Spectral, statistic_Agglomerative, statistic_Ensemble
def replaced(array, u1, u2, cor):
#copy array for isolation
#Need to use deepcopy because of the nature of Python for treating
replaced = copy.deepcopy(array)
#cor is the corresponding list
for row, col in cor:
#u1[row] and u2[col] is corresponded
for idx in range(len(array)):
#if the element of array is equal to u2[col]
if array[idx] == u2[col]:
#the element is corresponding to u1[row]
#so isolated list replaced is replaced by u1[row]
replaced[idx] = u1[row]
return replaced
def benefit_to_cost(matrix):
cost_matrix = []
for row in matrix:
cost_row = []
for col in row:
cost_row = cost_row + [(sys.maxsize - col)]
cost_matrix = cost_matrix + [cost_row]
return cost_matrix
def relabel(array1, array2):
#this function returns relabeled array2
if len(array1)==len(array2):
# set1 is the unique set of array1
set1 = set(array1)
# u1 is the unique list of array1
u1 = list(set1)
# set2 is the unique set of array2
set2 = set(array2)
# set2 is the unique list of array1
u2 = list(set2)
#matrix is the Corresponding matrix between u1 and u2
matrix = [[0 for i in range(len(u2))]for j in range(len(u1))]
for i in range(len(array1)):
#item_1 is the index of array1's element in u1
item_1 = u1.index(array1[i])
#item_2 is the index of array2's element in u2
item_2 = u2.index(array2[i])
#this situation means 1 correspondence between item_1 and item2 is observed
#so corresponding location in corresponding matrix is incremented
matrix[item_1][item_2] = matrix[item_1][item_2] + 1
cost_matrix = benefit_to_cost(matrix)
#Munkers library solve the cost minimization problem
#but I would like to solve benefit maximization problem
#so convert benefit matrix into cost matrix
#create mukres object
m = Munkres()
#get the most corresponded correspondance
indexes = m.compute(cost_matrix)
#I use array2 as Integer array so, convert it in case
array2 = map(int, array2)
#call replaced function to relace array2 according to object indexes
replaced_matrix = replaced(array2, u1, u2, indexes)
return replaced_matrix
def relabel_cluster(clusters):
#use first object in list object clusters as criteria
criteria = clusters[0]
# M is the number of review in each clustering
M = len(criteria)
# N is the number of clustering
N = len(clusters)
for idx in range(1,N):
#if wrong size of clustering appears, stop the process
if len(clusters[idx]) != M:
print "Cluster "+str(idx)+" is out of size"
return -1
clusters[idx] = relabel(criteria, clusters[idx])
return clusters
def transpose(array):
#Transpose list
return list(map(list, zip(*array)))
def voting(clusters):
#Transpose Clusters
clusters = transpose(clusters)
voted = []
for row in clusters:
#Unique Set of item in the row
u = list(set(row))
#Counter corresponding to object u
counter = [0 for i in u]
# fill object counter by counting the object u in object row
for idx in range(len(u)):
counter[idx] = row.count(u[idx])
#find the index of the most appeared object in the row
max_idx = counter.index(max(counter))
#choose the most appeared object
voted = voted + [u[max_idx]]
#return the result of majority vote
return voted
def generating_ensemble_clusters(X, y, barcode, feature_names):
# KMeans Algorithm
print 'Generating KMeans cluster...'
y_KMeans = KMeans(n_clusters = 2).fit(X)
generating_KMeans_plot(2, X, y_KMeans)
statistic_KMeans(2, X, y_KMeans, barcode, feature_names)
# print y_KMeans.labels_
# SpectralClustering Algorithm
print 'Generating Spectral cluster...'
y_SC = SpectralClustering(n_clusters = 2).fit(X)
generating_Spectral_plot(2, X, y_SC)
statistic_Spectral(2, X, y_SC, barcode, feature_names)
# print y_SC.labels_
# AgglomerativeClustering Algorithm
y_AC = AgglomerativeClustering(n_clusters = 2).fit(X)
generating_Agglomerative_plot(2, X, y_AC)
statistic_Agglomerative(2, X, y_AC, barcode, feature_names)
# print y_AC.labels_
# ensembling phase
print 'Appending three different labels into a cluster...'
clusters = []
clusters.append(list(y_KMeans.labels_))
clusters.append(list(y_SC.labels_))
clusters.append(list(y_AC.labels_))
# print "=========="
# for cluster in clusters:
# print cluster
# print "=========="
print 'Relabelling the cluster...'
relabeled_clusters = relabel_cluster(clusters)
# for cluster in relabeled_clusters:
# print cluster
# print "=========="
print 'Voting the cluster...'
voting_clusters = voting(relabeled_clusters)
# print voting_clusters
print 'Transforming into increasing one...'
final_clusters = tran_increase(voting_clusters)
# print final_clusters
generating_Ensemble_plot(2, X, final_clusters)
statistic_Ensemble(2, X, final_clusters, barcode, feature_names)
return final_clusters
if __name__ == '__main__':
clusters = [[1,2,3,3,2,2,1], [3,2,1,1,1,1,4],[2,3,1,1,1,1,1]]
print "Input:"
for cluster in clusters:
print cluster
print "========"
#relabeling Phase
relabeled_clusters = relabel_cluster(clusters)
print "relabeled clusters:"
for cluster in clusters:
print cluster
print "========"
#voting Phase
print "Output:"
print voting(relabeled_clusters)
#print replace_by_cor([0,1,2],[[0,2],[1,1],[2,0]])
|
StarcoderdataPython
|
5088345
|
<gh_stars>0
# Copyright (C) 2018 Bloomberg LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# <http://www.apache.org/licenses/LICENSE-2.0>
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Execution Controller
====================
An instance of the Execution controller.
All this stuff you need to make the execution service work.
Contains scheduler, execution instance, an interface to the bots
and an operations instance.
"""
import logging
from .scheduler import Scheduler
from .bots.instance import BotsInterface
from .execution.instance import ExecutionInstance
from .operations.instance import OperationsInstance
class ExecutionController:
def __init__(self, data_store, *, storage=None, action_cache=None, action_browser_url=None,
property_keys=None, bot_session_keepalive_timeout=None):
self.__logger = logging.getLogger(__name__)
scheduler = Scheduler(data_store, action_cache=action_cache, action_browser_url=action_browser_url)
self._execution_instance = ExecutionInstance(scheduler, storage, property_keys)
self._bots_interface = BotsInterface(scheduler, bot_session_keepalive_timeout=bot_session_keepalive_timeout)
self._operations_instance = OperationsInstance(scheduler)
def register_instance_with_server(self, instance_name, server):
self._execution_instance.register_instance_with_server(instance_name, server)
self._bots_interface.register_instance_with_server(instance_name, server)
self._operations_instance.register_instance_with_server(instance_name, server)
def stream_operation_updates(self, message_queue, operation_name):
operation = message_queue.get()
while not operation.done:
yield operation
operation = message_queue.get()
yield operation
def cancel_operation(self, name):
# TODO: Cancel leases
raise NotImplementedError("Cancelled operations not supported")
@property
def execution_instance(self):
return self._execution_instance
@property
def bots_interface(self):
return self._bots_interface
@property
def operations_instance(self):
return self._operations_instance
|
StarcoderdataPython
|
3330626
|
<filename>hard/Median Of Two Sorted Arrays/test_solution.py
import pytest
from median_two_sorted_arrays import Solution as Solution
def test_example_1():
### Example 1:
##Input:
nums1 = [1,3]
nums2 = [2]
##Output:
output = 2.00000
##Explanation: merged array = [1,2,3] and median is 2.
# Act
sol = Solution()
median1 = sol.findMedianSortedArrays(nums1, nums2)
# Assert
print(f"**Testing Solution, Example 1: {median1} should be {output}** \r\n")
assert(median1 == output)
def test_example_2():
### Example 2:
##Input:
nums1 = [1,2]
nums2 = [3,4]
##Output: 2.50000
output = 2.50000
##Explanation: merged array = [1,2,3,4] and median is (2 + 3) / 2 = 2.5.
# Act
sol = Solution()
median2 = sol.findMedianSortedArrays(nums1, nums2)
# Assert
print(f"**Testing Solution, Example 2: {median2} should be {output}** \r\n")
assert(median2 == output)
def test_example_3():
### Example 3:
#
#Input:
nums1 = [0,0]
nums2 = [0,0]
#Output: 0.00000
output = 0.00000
# Act
sol = Solution()
median3 = sol.findMedianSortedArrays(nums1, nums2)
# Assert
print(f"**Testing Solution, Example 3: {median3} should be {output}** \r\n")
assert(median3 == output)
def test_example_4():
### Example 4:
#
#Input:
nums1 = []
nums2 = [1]
#Output: 1.00000
output = 1.00000
# Act
sol = Solution()
median4 = sol.findMedianSortedArrays(nums1, nums2)
# Assert
print(f"**Testing Solution, Example 4: {median4} should be {output}** \r\n")
assert(median4 == output)
def test_example_5():
### Example 5:
#
#Input:
nums1 = [2]
nums2 = []
#Output: 2.00000
output = 2.00000
# Act
sol = Solution()
median5 = sol.findMedianSortedArrays(nums1, nums2)
# Assert
print(f"**Testing Solution, Example 5: {median5} should be {output}** \r\n")
assert(median5 == output)
def test_example_6():
# Setup
nums1 = [1,3]
nums2 = [2,7]
output = 2.50000
# Act
sol = Solution()
median2 = sol.findMedianSortedArrays(nums1, nums2)
# Assert
print(f"**Testing Solution, Example 2: {median2} should be {output}** \r\n")
assert(median2 == output)
|
StarcoderdataPython
|
11244083
|
timeout = 300
capture_output = True
accesslog = '/home/dockeruser/gunicorn-access.log'
errorlog = '/home/dockeruser/gunicorn-error.log'
loglevel = 'debug'
bind = "0.0.0.0:9000"
secure_scheme_headers = {
'X-FORWARDED-PROTOCOL': 'ssl',
'X-FORWARDED-PROTO': 'https',
'X-FORWARDED-SSL': 'on'
}
def post_fork(server, worker):
server.log.info('Worker spawned (pid: %s)', worker.pid)
def pre_fork(server, worker):
pass
def pre_exec(server):
server.log.info('Forked child, re-executing.')
def when_ready(server):
server.log.info('Server is ready. Spawning workers')
|
StarcoderdataPython
|
11371987
|
<reponame>manninosi/Data_Incubator_LET<filename>Data_Inc_Section1.py
import numpy as np
import pandas as pd
file = "county_lex_2020-04-14.csv.gz"
df = pd.read_csv(file, compression='gzip', header=0)
countys = df.columns.values[1:]
col_names = dict(zip(countys, ["a" + lab for lab in countys] ))
df = df.rename(columns = col_names)
df_long = pd.wide_to_long(df, stubnames="a", i=['COUNTY_PRE'], j='col')
df_long = df_long.reset_index(drop=False)
df_long = df_long.rename(columns = {"col" : "COUNTY", "a" : "LEX"})
df_long.to_csv(r'reshaped.csv',index=False)
|
StarcoderdataPython
|
44989
|
<gh_stars>10-100
from .mlm import MLMTopicClassifier
from .mnli import NLITopicClassifierWithMappingHead, NLITopicClassifier
from .nsp import NSPTopicClassifier
from .babeldomains import BabelDomainsClassifier
from .wndomains import WNDomainsClassifier
__all__ = [
"NLITopicClassifierWithMappingHead",
"NLITopicClassifier",
"MLMTopicClassifier",
"NSPTopicClassifier",
"BabelDomainsClassifier",
"WNDomainsClassifier",
]
|
StarcoderdataPython
|
9697826
|
if __name__ == '__main__':
input = [[int(y) for y in x.strip()] for x in open('input', 'r').readlines()]
oxygen = input.copy()
co2 = input.copy()
for i in range(len(input[0])):
if(len(oxygen) > 1):
oxygen = [x for x in oxygen if x[i] == int(sum([x[i] for x in oxygen]) >= len(oxygen) / 2)]
if(len(co2) > 1):
co2 = [x for x in co2 if x[i] == int(sum([x[i] for x in co2 ]) < len(co2) / 2)]
if (len(oxygen) == 1 and len(co2) == 1):
break
print(int(''.join(str(x) for x in oxygen[0]), 2) *
int(''.join(str(x) for x in co2[0]), 2))
|
StarcoderdataPython
|
6497181
|
<filename>src/leetcode_1771_maximize_palindrome_length_from_subsequences.py
# @l2g 1771 python3
# [1771] Maximize Palindrome Length From Subsequences
# Difficulty: Hard
# https://leetcode.com/problems/maximize-palindrome-length-from-subsequences
#
# You are given two strings, word1 and word2. You want to construct a string in the following manner:
#
# Choose some non-empty subsequence subsequence1 from word1.
# Choose some non-empty subsequence subsequence2 from word2.
# Concatenate the subsequences: subsequence1 + subsequence2, to make the string.
#
# Return the length of the longest palindrome that can be constructed in the described manner.
# If no palindromes can be constructed,return 0.
# A subsequence of a string s is a string that can be made by deleting some (possibly none) characters from s without changing the order of the remaining characters.
# A palindrome is a string that reads the same forward as well as backward.
#
# Example 1:
#
# Input: word1 = "cacb", word2 = "cbba"
# Output: 5
# Explanation: Choose "ab" from word1 and "cba" from word2 to make "abcba", which is a palindrome.
# Example 2:
#
# Input: word1 = "ab", word2 = "ab"
# Output: 3
# Explanation: Choose "ab" from word1 and "a" from word2 to make "aba", which is a palindrome.
# Example 3:
#
# Input: word1 = "aa", word2 = "bb"
# Output: 0
# Explanation: You cannot construct a palindrome from the described method, so return 0.
#
# Constraints:
#
# 1 <= word1.length, word2.length <= 1000
# word1 and word2 consist of lowercase English letters.
#
#
class Solution:
def longestPalindrome(self, word1: str, word2: str) -> int:
self.total_word = word1 + word2
@lru_cache(None)
def dfs(l, r):
if l == r:
return 1
if l > r:
return 0
if self.total_word[l] == self.total_word[r]:
return dfs(l + 1, r - 1) + 2
return max(dfs(l + 1, r), dfs(l, r - 1))
ans = 0
for letter in "abcdefghijklmnopqrstuvwxyz":
left = word1.find(letter)
right = word2.rfind(letter)
if left != -1 and right != -1:
ans = max(ans, dfs(left, len(word1) + right))
return ans
if __name__ == "__main__":
import os
import pytest
pytest.main([os.path.join("tests", "test_1771.py")])
|
StarcoderdataPython
|
3268217
|
import pytest
import json
from discopy.biclosed import Ty
from lambeq.ccg2discocat.ccg_tree import CCGTree
@pytest.fixture
def tree():
n, s = Ty('n'), Ty('s')
the = CCGTree(text='the', biclosed_type=n << n)
do = CCGTree(text='do', biclosed_type=s >> s)
do_unary = CCGTree(text='do', rule='U', biclosed_type=n, children=(do,))
return CCGTree(text='the do', rule='FA', biclosed_type=n, children=(the, do_unary))
def test_child_reqs(tree):
with pytest.raises(ValueError):
CCGTree(rule='U', biclosed_type=tree.biclosed_type, children=tree.children)
def test_json(tree):
assert CCGTree.from_json(None) is None
assert CCGTree.from_json(tree.to_json()) == tree
assert CCGTree.from_json(json.dumps(tree.to_json())) == tree
|
StarcoderdataPython
|
8033703
|
<gh_stars>0
from fishtext.api import FishTextJson, FishTextHtml, FishTextAPI
|
StarcoderdataPython
|
3242209
|
<gh_stars>1-10
import agents
import argparse
from habitat.core.challenge import Challenge
import importlib
from submit_args import fill_args
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--phase", type=str, required=False, choices=["dev", "standard", "challenge", "video"]
)
parser.add_argument(
"--agent_module",
required=False,
type=str,
default="example",
help="agent module name",
)
parser.add_argument(
"--exp_config", type=str, required=False, help="Config within habitat baselines"
)
parser.add_argument(
"--checkpoint_path", type=str, required=False, help="Path to checkpoint"
)
parser.add_argument(
'--num_episodes', type=int, required=False, default=None ,help="Number of episodes to evaluate. Only works in dev mode."
)
parser.add_argument(
"--no_fill", action='store_true', required=False, help="If Set, skips fill_args"
)
parser.add_argument(
"--external", action='store_true', required=False, help="If Set, agents are loaded from extern_agents folder"
)
parser.add_argument(
"--video_dir", type=str, default=None, help="Path where videos will be logged"
)
args = parser.parse_args()
if not args.no_fill:
args = fill_args(args)
phase = args.phase
challenge = Challenge(phase = phase)
if args.phase is None or args.phase == "dev" or args.phase == "video":
if args.num_episodes is not None:
challenge.num_episodes = args.num_episodes
if args.external:
walker = importlib.import_module(f'extern_agents.{args.agent_module}')
else:
walker = importlib.import_module(f'agents.{args.agent_module}')
agent = walker.get_agent(args.exp_config, challenge, args.checkpoint_path)
if args.video_dir is not None:
from agents.video_walker import VideoWalker
args.phase = "video"
agent = VideoWalker(agent, args.video_dir)
challenge.submit(agent)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1805488
|
from django.test import TestCase, Client
class AboutURLTest(TestCase):
@classmethod
def setUpClass(cls):
# Создаём экземпляр клиента. Он неавторизован.
super().setUpClass()
cls.guest_client = Client()
def test_urls_exists_at_desired_locations(self):
about_response = AboutURLTest.guest_client.get('/about/author/')
self.assertEqual(about_response.status_code, 200)
tech_response = AboutURLTest.guest_client.get('/about/tech/')
self.assertEqual(tech_response.status_code, 200)
def test_urls_uses_correct_templates(self):
about_response = AboutURLTest.guest_client.get('/about/author/')
self.assertTemplateUsed(about_response, 'about/author.html')
tech_response = AboutURLTest.guest_client.get('/about/tech/')
self.assertTemplateUsed(tech_response, 'about/tech.html')
|
StarcoderdataPython
|
127107
|
<gh_stars>0
# Generated by Django 2.1.5 on 2020-01-03 10:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0006_orders_amount'),
]
operations = [
migrations.AlterField(
model_name='product',
name='desc',
field=models.CharField(max_length=600),
),
]
|
StarcoderdataPython
|
11201901
|
<filename>bananadbg.py<gh_stars>0
# Copyright (c) 2017 Akuli
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Simple debugging tool."""
import argparse
import code
import collections
import importlib.util
import inspect
import math
import pydoc
import shlex
import shutil
import sys
import textwrap
import traceback
try:
# Just importing readline is enough to set up handy keyboard
# shortcuts.
import readline # noqa
except ImportError:
# Probably Windows.
pass
__all__ = ['Command', 'DebugConsole', 'debug']
_NOTHING = object()
Command = collections.namedtuple('Command', 'func reqargs optargs doc')
def _wprint(string):
"""Wrap and print a string."""
print(textwrap.fill(string), end='\n\n')
def _print_list(stringlist):
maxlen = max(map(len, stringlist))
columns = shutil.get_terminal_size().columns // (maxlen + 2)
if columns < 2:
for string in stringlist:
print(string)
else:
rows = math.ceil(len(stringlist) / columns)
for y in range(rows):
line = ' '.join(
string.ljust(maxlen) for string in stringlist[y::rows])
print(line.rstrip(' '))
class _Helper:
def __init__(self, console):
self._console = console
def __repr__(self):
return textwrap.fill(
"Type help() for help about this debugging console or "
"help(something) to use Python's built-in help().")
# We can't do something=None to allow help(None).
def __call__(self, something=_NOTHING):
if something is _NOTHING:
_wprint("This is a special Python prompt for debugging large "
"projects that consist of several modules.")
_wprint("Using this prompt is easy. You can enter any Python "
"commands to run them. They will be run in the "
"current module, which is a lot like the current "
"working directory of a shell or a command prompt.")
print("Here is a list of the special commands:")
for name, command in sorted(self._console.commands.items()):
if command.doc is None:
print(name)
else:
summary = inspect.cleandoc(command.doc).split('\n')[0]
# TODO: some way to get more detailed help of the special
# commands.
print(' %-10s %s' % (name, summary))
else:
pydoc.help(something)
class DebugConsole(code.InteractiveConsole):
"""A special console for debugging purposes."""
commands = {} # {name: Command, ...}
def __init__(self, *args, verbose=False, **kwargs):
"""Initialize the console."""
super().__init__(*args, **kwargs)
self._helper = _Helper(self)
self.verbose = verbose
self.modulename = None
self.module = None
# Tell code.InteractiveConsole to work in self.module.
@property
def locals(self):
return self.module.__dict__
# code.InteractiveConsole.__init__ assigns to this.
@locals.setter
def locals(self, value):
pass
def _check_args(self, commandname, args) -> bool:
"""Check if arguments for a command are valid."""
command = self.commands[commandname]
if len(args) < len(command.reqargs):
print("Missing arguments for %s." % commandname, file=sys.stderr)
elif len(args) > len(command.reqargs) + len(command.optargs):
print("Too many arguments for %s." % commandname, file=sys.stderr)
else:
# everything's fine
return True
print("Usage:", commandname, end='')
for arg in command.reqargs:
print(' ' + arg.upper(), end='')
for arg in command.optargs:
print(' [' + arg.upper() + ']', end='')
print()
return None
def raw_input(self, prompt=''):
if prompt != sys.ps1:
# It's probably '... ', no need to support special commands.
return input(prompt)
while True:
string = input(prompt)
try:
commandname, *args = shlex.split(string)
ok = self._check_args(commandname, args)
except (ValueError, KeyError):
# Not a special command.
return string
if ok:
try:
self.run_command(commandname, args)
except Exception:
print("An exception occurred while running %s!"
% commandname, file=sys.stderr)
traceback.print_exc()
def run_command(self, commandname, args):
self.commands[commandname].func(self, *args)
@classmethod
def command(cls, func):
if 'commands' not in cls.__dict__:
# Someone is subclassing DebugConsole and cls.commands
# comes from DebugConsole or some other parent class. The
# subclass must have a command mapping that gets commands
# from the parent class, but adding a command won't add it
# to the parent class.
cls.commands = collections.ChainMap({}, cls.commands)
reqargs = []
optargs = []
signature = inspect.signature(func)
# We need to get rid of the first argument because it's the
# DebugConsole instance.
params = list(signature.parameters.items())[1:]
for name, param in params:
if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD:
raise TypeError("unsupported function signature: "
+ func.__name__ + str(signature))
if param.default is inspect.Parameter.empty:
reqargs.append(name)
else:
optargs.append(name)
cls.commands[func.__name__] = Command(
func, reqargs, optargs, func.__doc__)
return func
# The special help variable.
def add_helper(self):
self.locals.setdefault('help', self._helper)
def remove_helper(self):
if self.locals.get('help', object()) is self._helper:
del self.locals['help']
def _setup_commands():
# This is a separate function to avoid polluting the namespace.
@DebugConsole.command
def cd(console, modulename='__main__'):
"""Change the current module.
The new module will be imported, and it can be relative to the
old module. If no arguments are given, go to __main__.
"""
if console.verbose:
print("Importing", modulename, "and changing the current",
"module to it")
# These need to be first because the rest of this must not run
# if these fail.
modulename = importlib.util.resolve_name(modulename, console.modulename)
module = importlib.import_module(modulename)
if console.module is not None:
# Not running for the first time.
console.remove_helper()
console.module = module
console.modulename = modulename
console.add_helper()
@DebugConsole.command
def ls(console):
"""Print a list of variables in the current module.
This is equivalent to dir(), but this prints the list nicely in
multiple columns.
"""
if console.verbose:
print("Listing the variables in", console.modulename)
_print_list(sorted(dir(console.module)))
@DebugConsole.command
def pwd(console):
"""Print the name of the current module."""
if console.verbose:
print("You are currently in", end=" ")
result = repr(console.module)
if result.startswith('<') and result.endswith('>'):
result = result[1:-1]
print(result)
@DebugConsole.command
def src(console, expression):
"""Evaluate the expression and print the source code of the result."""
obj = eval(expression, console.locals)
print(inspect.getsource(obj))
_setup_commands()
def debug(module='__main__', *, consoleclass=DebugConsole, **kwargs):
print("Starting a debugging session in module", repr(module),
"on Python", '.'.join(map(str, sys.version_info[:3])))
print("Type 'help' for more info.")
console = DebugConsole(**kwargs)
console.run_command('cd', [module])
console.interact('')
print("Exiting the debugging session.")
console.remove_helper()
def _main():
parser = argparse.ArgumentParser()
parser.add_argument(
'module', help="the initial current module")
parser.add_argument(
'-v', '--verbose', action='store_true', help="explain what is done")
args = parser.parse_args()
debug(**args.__dict__)
if __name__ == '__main__':
_main()
|
StarcoderdataPython
|
5091732
|
import argparse
import cv2
import numpy as np
import torch
import torchvision.transforms as transforms
from PIL import Image
from torch.autograd import Variable
from torchvision.transforms import ToTensor, ToPILImage
from tqdm import tqdm
from model import Generator
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Test Single Video')
parser.add_argument('--upscale_factor', default=4, type=int, help='super resolution upscale factor')
parser.add_argument('--video_name', type=str, help='test low resolution video name')
parser.add_argument('--model_name', default='netG_epoch_4_100.pth', type=str, help='generator model epoch name')
opt = parser.parse_args()
UPSCALE_FACTOR = opt.upscale_factor
VIDEO_NAME = opt.video_name
MODEL_NAME = opt.model_name
model = Generator(UPSCALE_FACTOR).eval()
#uncomment for AWS
#if torch.cuda.is_available():
# model = model.cuda()
# for cpu
# model.load_state_dict(torch.load('epochs/' + MODEL_NAME, map_location=lambda storage, loc: storage))
model.load_state_dict(torch.load('epochs/' + MODEL_NAME, map_location=torch.device('cpu')))
videoCapture = cv2.VideoCapture(VIDEO_NAME)
fps = videoCapture.get(cv2.CAP_PROP_FPS)
frame_numbers = videoCapture.get(cv2.CAP_PROP_FRAME_COUNT)
sr_video_size = (int(videoCapture.get(cv2.CAP_PROP_FRAME_WIDTH) * UPSCALE_FACTOR),
int(videoCapture.get(cv2.CAP_PROP_FRAME_HEIGHT)) * UPSCALE_FACTOR)
compared_video_size = (int(videoCapture.get(cv2.CAP_PROP_FRAME_WIDTH) * UPSCALE_FACTOR * 2 + 10),
int(videoCapture.get(cv2.CAP_PROP_FRAME_HEIGHT)) * UPSCALE_FACTOR + 10 + int(
int(videoCapture.get(cv2.CAP_PROP_FRAME_WIDTH) * UPSCALE_FACTOR * 2 + 10) / int(
10 * int(int(
videoCapture.get(cv2.CAP_PROP_FRAME_WIDTH) * UPSCALE_FACTOR) // 5 + 1)) * int(
int(videoCapture.get(cv2.CAP_PROP_FRAME_WIDTH) * UPSCALE_FACTOR) // 5 - 9)))
output_sr_name = 'out_srf_' + str(UPSCALE_FACTOR) + '_' + VIDEO_NAME.split('.')[0] + '.avi'
output_compared_name = 'compare_srf_' + str(UPSCALE_FACTOR) + '_' + VIDEO_NAME.split('.')[0] + '.avi'
sr_video_writer = cv2.VideoWriter(output_sr_name, cv2.VideoWriter_fourcc('M', 'P', 'E', 'G'), fps, sr_video_size)
compared_video_writer = cv2.VideoWriter(output_compared_name, cv2.VideoWriter_fourcc('M', 'P', 'E', 'G'), fps,
compared_video_size)
# read frame
success, frame = videoCapture.read()
test_bar = tqdm(range(int(frame_numbers)), desc='[processing video and saving result videos]')
for index in test_bar:
if success:
image = Variable(ToTensor()(frame), volatile=True).unsqueeze(0)
if torch.cuda.is_available():
image = image.cuda()
out = model(image)
out = out.cpu()
out_img = out.data[0].numpy()
out_img *= 255.0
out_img = (np.uint8(out_img)).transpose((1, 2, 0))
# save sr video
sr_video_writer.write(out_img)
# make compared video and crop shot of left top\right top\center\left bottom\right bottom
out_img = ToPILImage()(out_img)
crop_out_imgs = transforms.FiveCrop(size=out_img.width // 5 - 9)(out_img)
crop_out_imgs = [np.asarray(transforms.Pad(padding=(10, 5, 0, 0))(img)) for img in crop_out_imgs]
out_img = transforms.Pad(padding=(5, 0, 0, 5))(out_img)
compared_img = transforms.Resize(size=(sr_video_size[1], sr_video_size[0]), interpolation=Image.BICUBIC)(
ToPILImage()(frame))
crop_compared_imgs = transforms.FiveCrop(size=compared_img.width // 5 - 9)(compared_img)
crop_compared_imgs = [np.asarray(transforms.Pad(padding=(0, 5, 10, 0))(img)) for img in crop_compared_imgs]
compared_img = transforms.Pad(padding=(0, 0, 5, 5))(compared_img)
# concatenate all the pictures to one single picture
top_image = np.concatenate((np.asarray(compared_img), np.asarray(out_img)), axis=1)
bottom_image = np.concatenate(crop_compared_imgs + crop_out_imgs, axis=1)
bottom_image = np.asarray(transforms.Resize(
size=(int(top_image.shape[1] / bottom_image.shape[1] * bottom_image.shape[0]), top_image.shape[1]))(
ToPILImage()(bottom_image)))
final_image = np.concatenate((top_image, bottom_image))
# save compared video
compared_video_writer.write(final_image)
# next frame
success, frame = videoCapture.read()
|
StarcoderdataPython
|
6423256
|
#
# Copyright 2019 FMR LLC <<EMAIL>>
#
# SPDX-License-Identifier: MIT
#
"""Test role access to the accounts specified.
## Overview
The access_report command will display the number of accounts that the IAM
role does not have access to. For example:
$ awsrun --account 100200300400 --account 200300400100 access_report
Success: 2, Failures: 0
Note: no output is generated until all accounts have been tested, so it may look
like the command is hanging when processing a large number of accounts. With the
`--verbose` option, a success message is generated for each account as soon as
it has been processed:
$ awsrun --include Env=DEV access_report --verbose
400100200300: successful
100200300400: successful
200300400100: successful
Success: 3, Failures: 2
Unsuccessful attempts:
300200100400
300100400200
## Reference
### Synopsis
$ awsrun [options] access_report [command options]
### Configuration
The following is the syntax for the options that can be specified in the user
configuration file:
Commands:
access_report:
verbose: BOOLEAN
### Command Options
Some options can be overridden on the awsrun CLI via command line flags. In
those cases, the CLI flags are specified next to the option name below:
`verbose`, `--verbose`
: Display a message as each account is tested. By default, no output is
generated until all accounts have been processed.
"""
from awsrun.config import Bool
from awsrun.runner import Command
class CLICommand(Command):
"""Test role access to the accounts specified."""
@classmethod
def from_cli(cls, parser, argv, cfg):
parser.add_argument(
"--verbose",
"-v",
action="store_true",
default=cfg("verbose", type=Bool, default=False),
help="display accounts while being processed",
)
args = parser.parse_args(argv)
return cls(**vars(args))
def __init__(self, verbose=False):
self.verbose = verbose
self.total = 0
self.no_access = []
def execute(self, session, acct):
if self.verbose:
return f"{acct}: successful\n"
return None
def collect_results(self, acct, get_result):
self.total += 1
try:
result = get_result()
except Exception: # pylint: disable=broad-except
self.no_access.append(acct)
return
if result:
print(result, end="", flush=True)
def post_hook(self):
unsuccessful = len(self.no_access)
successful = self.total - unsuccessful
print(f"Success: {successful}, Failures: {unsuccessful}")
if unsuccessful:
print(f"\nUnsuccessful attempts:")
for acct in self.no_access:
print(acct)
|
StarcoderdataPython
|
1904753
|
<filename>datasets/MNINST/dnn_3_hidden_layers_with_batch_normalization/dnn_3_hidden_layers_with_batch_normalization.py
########################################
#
# Reproduced work of <NAME> Szegedy's paper
# Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift (https://arxiv.org/abs/1502.03167)
#
########################################
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data, mnist
from tensorflow.contrib.distributions import percentile
import tqdm
from collections import defaultdict
import matplotlib.pyplot as plt
import os
dtype = tf.float32
class LineGraph(object):
def __init__(self):
self.lines = defaultdict()
self.history = defaultdict(list)
def reset_tensors(self):
self.lines = defaultdict()
def add_scalar(self, tensor, legend):
# legend: type is string
# use legend as index of line
# Note: override existing setting
self.lines[legend] = tensor
def add_summary(self, session, index, feed_dict):
self.history['index'].append(index)
for line_name, line_tensor in self.lines.items():
data = session.run(fetches=line_tensor, feed_dict=feed_dict)
self.history[line_name].append(data)
def get_index(self):
return np.array(self.history['index'])
def plot(self, plt):
# plt: matplotlib.pyplot
# iterate lines
# draw line with index
# show legend
for line_name, line in self.lines.items():
x = self.get_index()
y = np.array(self.history[line_name])
plt.plot(x, y, label=line_name)
plt.legend()
class Figures(object):
def __init__(self):
self.figures = defaultdict(LineGraph)
return
def reset_tensors(self):
for fig_name, fig in self.figures:
fig.reset_tensors()
def add_scalar(self, tensor, legend, fig_name):
# fig: key of class LineGraph()
# legend: key of different lines
self.figures[fig_name].add_scalar(tensor, legend)
def add_summary(self, session, fig_name, index, feed_dict):
# iterate figures
# iterate lines
# evaluate tensor
# record the value and index
self.figures[fig_name].add_summary(session, index, feed_dict)
def show(self, plt):
# plt: matplotlib.pyplot
for fig_name, f in self.figures.items():
# draw line graph
f.plot(plt)
# decorate the figure
plt.title(fig_name)
plt.show()
figures = Figures()
def add_percentiles_to_graph(x, percentile_list, fig_name):
for p in percentile_list:
# create percentile tensor
p_tensor = percentile(x, p)
# add tensors to a figure with name=enable_bn
figures.add_scalar(tensor=p_tensor,
legend=str(p),
fig_name=fig_name)
def get_path_saved_model(prefix, suffix):
return prefix + suffix + "/tmp-save"
def get_path_summary(prefix, suffix):
return prefix + suffix + "/"
def mkdir_if_not_exits(path):
dir_path = os.path.dirname(path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def weight_variable(shape, stddev=0.1):
init = tf.truncated_normal(shape, stddev=stddev)
return tf.Variable(init, dtype=dtype)
def summary_percentiles(x, percents):
for p in percents:
name = "percentile_" + str(p)
tf.summary.scalar(name, percentile(x, p))
def batch_norm_wrapper(x, is_training, enable_bn, decay=0.999):
epsilon = 1e-8
size = x.get_shape()[-1]
run_mean = tf.Variable(tf.zeros([size]),
dtype=dtype, trainable=False)
run_var = tf.Variable(tf.ones([size]),
dtype=dtype, trainable=False)
offset = tf.Variable(tf.zeros([size]), dtype=dtype)
scale = tf.Variable(tf.ones([size]), dtype=dtype)
if enable_bn == True:
if is_training == True:
batch_mean, batch_var = tf.nn.moments(x, axes=[0])
update_run_mean = tf.assign(
run_mean,
decay*run_mean + (1-decay)*batch_mean
)
update_run_var = tf.assign(
run_var,
decay*run_var + (1-decay)*batch_var
)
with tf.control_dependencies([update_run_mean, update_run_var]):
return tf.nn.batch_normalization(x, batch_mean, batch_var,
offset=offset, scale=scale,
variance_epsilon=epsilon)
else:
return tf.nn.batch_normalization(x, run_mean, run_var,
offset=offset, scale=scale,
variance_epsilon=epsilon)
else:
return x
def build_graph(is_training, enable_bn, title):
# build graph with 3 hidden layers with 100 sigmoid activations each.
# variables are initialized to small Gaussian values
shapes={'h1': [mnist.IMAGE_PIXELS, 100],
'h2': [100, 100],
'h3': [100, 100],
'output': [100, mnist.NUM_CLASSES]}
activation = tf.nn.sigmoid
x = tf.placeholder(dtype, shape=[None, mnist.IMAGE_PIXELS])
y_ = tf.placeholder(dtype, shape=[None, mnist.NUM_CLASSES])
with tf.name_scope("L1"):
# 1st hidden layer
W1 = weight_variable(shapes['h1'])
z1 = tf.matmul(x, W1)
bn1 = batch_norm_wrapper(z1, is_training, enable_bn)
l1 = activation(bn1)
tf.summary.histogram("weights", W1)
tf.summary.histogram("activation", l1)
with tf.name_scope("L2"):
# 2nd hidden layer
W2 = weight_variable(shapes['h2'])
z2 = tf.matmul(l1, W2)
bn2 = batch_norm_wrapper(z2, is_training, enable_bn)
l2 = activation(bn2)
tf.summary.histogram("weights", W2)
tf.summary.histogram("activation", l2)
with tf.name_scope("L3"):
# 3rd hidden layer
W3 = weight_variable(shapes['h3'])
z3 = tf.matmul(l2, W3)
bn3 = batch_norm_wrapper(z3, is_training, enable_bn)
l3 = activation(bn3)
tf.summary.histogram("weights", W3)
tf.summary.histogram("activation", l3)
percentile_list = [15, 50, 85]
summary_percentiles(l3, percentile_list)
add_percentiles_to_graph(l3, percentile_list, title)
with tf.name_scope("output_layer"):
# output layer
W_out = weight_variable(shapes['output'])
y = tf.matmul(l3, W_out)
tf.summary.histogram("weights", W_out)
with tf.name_scope("loss"):
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_,
logits=y)
)
with tf.name_scope("train"):
lr = 0.5
train_step = tf.train.GradientDescentOptimizer(lr).minimize(loss)
with tf.name_scope("prediction"):
prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
with tf.name_scope("accuracy"):
accuracy = tf.reduce_mean(tf.cast(prediction, dtype))
return (x, y_), train_step, accuracy, y, tf.train.Saver()
def main():
# iterate settings: {'with_bn':True, 'without_bn':False}
# start training
# build graph
# start session
# init global variables
# feed the data set
# if print_period
# save batch accuracy into history
#
# train the batch data
# save the model
# start testing
# reset the default graph
# build the graph
# start session
# init global variable
# restore model
# print accuracy
#
# plot test accuracy history
# {title: enable batch normalization}
experiment_settings = {'with_bn': True, 'without_bn': False}
FLAGS_ = { 'batch_sz': 60,
'max_epochs': 50000,
'print_period': 100,
'summary_period': 50,
'path_data_set': 'MNIST_data/',
'path_saved_model': './tmp/',
'path_summary': './tmp/log/',
}
history_acc_ = defaultdict(list)
data_set = input_data.read_data_sets(FLAGS_['path_data_set'],
one_hot=True)
for title, enable_bn in experiment_settings.items():
# start training
# build graph
(x, y_), train_step, accuracy, _, saver = build_graph(
is_training=True,
enable_bn=enable_bn,
title=title
)
path_saved_model = get_path_saved_model(
FLAGS_['path_saved_model'], title)
mkdir_if_not_exits(path_saved_model)
path_summary = get_path_summary( FLAGS_['path_summary'], title)
mkdir_if_not_exits(path_summary)
with tf.Session() as sess:
merged_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter(path_summary)
writer.add_graph(sess.graph)
sess.run(tf.global_variables_initializer())
for i in tqdm.tqdm(xrange(FLAGS_['max_epochs'])):
batch = data_set.train.next_batch(FLAGS_['batch_sz'])
if i % FLAGS_['summary_period'] == 0 or \
(i+1) == FLAGS_['max_epochs']:
s = sess.run(fetches=merged_summary,
feed_dict={x: batch[0], y_: batch[1]})
writer.add_summary(s, i)
figures.add_summary(session=sess,
fig_name=title, index=i,
feed_dict={x: batch[0], y_:batch[1]})
if i % FLAGS_['print_period'] == 0 or \
(i+1) == FLAGS_['max_epochs'] :
acc = sess.run(fetches=[accuracy],
feed_dict={ x: data_set.test.images,
y_: data_set.test.labels})
history_acc_[title].append(acc[0])
# start training
sess.run(fetches=[train_step],
feed_dict={x: batch[0], y_: batch[1]})
print("training accuracy of %s: %g" %
(title, history_acc_[title][-1]) )
# save model
saver.save(sess, path_saved_model)
# start testing
tf.reset_default_graph()
(x, y_), _, accuracy, y, saver = build_graph(is_training=False,
enable_bn=enable_bn,
title=title)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, path_saved_model)
acc = sess.run(fetches=[accuracy],
feed_dict={x: data_set.test.images,
y_: data_set.test.labels})
print("testing accuracy of %s: %g" %
(title, acc[0]))
# reset default graph before new iteration
tf.reset_default_graph()
# end of iteration
# plot test accuracy history
for title, _ in experiment_settings.items():
acc = np.array(history_acc_[title])
plt.plot(acc, label=title)
plt.legend()
plt.show()
figures.show(plt)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3212799
|
<reponame>CrepeGoat/cdf-estimation
from collections import namedtuple
import numpy as np
from scipy.interpolate import PPoly
import cvxopt
cvxopt.solvers.options['show_progress'] = False
cvxopt.solvers.options['maxiters'] = 500 # seems to reduce errors (unconfirmed)
from likelihood_funcs import *
'''
TODO
- improve "smoothness" input parameter
- change to operate on a [0,1] scale (0 = exact interpolation, 1 = uniform distribution)
- characterize slope with b_mid, instead of b_0
- reduces the repetition of calculations -> reduce errors
- reduces objective matrix non-sparsity
- format tests more professionally
- treat repeat samples as discrete samples
- i.e., make cdf discontinuous around X_i
- i.e., add dirac delta portions to the pdf
'''
def expand_vars(bmid_c, X):
n = X.shape[-1]
n_mid = n // 2
bmid_c = np.asarray(bmid_c)
b_mid, c = np.split(bmid_c, (1,), axis=-1)
alt_sign = (-1) ** np.arange(n)
diffc_diffX = np.diff(c, axis=-1) / np.diff(X, axis=-1)
bpart_lower, bpart_upper = np.array_split(
-2 * alt_sign[:-1] * diffc_diffX,
(n_mid,), axis=-1)
b_cumdiff = np.concatenate([
-bpart_lower[..., ::-1].cumsum(-1)[..., ::-1],
np.zeros_like(b_mid),
np.cumsum(bpart_upper, -1),
], axis=-1)
b = alt_sign * (b_cumdiff + alt_sign[n_mid]*b_mid)
#a_diffX = diffc_diffX - b[..., :-1]
a_diffX = np.diff(b, axis=-1) / 2
return namedtuple("SplineVars", "a_diffX b c")(a_diffX, b, c)
def expand_vars_lc(X):
n = X.shape[-1]
n_mid = n // 2
bmid_c = np.diagflat(np.ones(n+1, dtype=np.int64))
b_mid, c = np.split(bmid_c, (1,), axis=0)
diffc_diffX = np.diff(c, axis=0) / np.diff(X)[:, np.newaxis]
alt_sign = (-1) ** np.arange(n)[:, np.newaxis]
bpart_lower, bpart_upper = np.array_split(
-2 * alt_sign[:-1] * diffc_diffX,
(n_mid,), axis=0)
b_cumdiff = np.concatenate([
-bpart_lower[::-1].cumsum(0)[::-1],
np.zeros_like(b_mid),
np.cumsum(bpart_upper, 0),
], axis=0)
b = alt_sign * (b_cumdiff + alt_sign[n_mid]*b_mid)
a_diffX = np.diff(b, axis=0) / 2
return namedtuple("SplineVars", "a_diffX b c")(a_diffX, b, c)
def make_obj_scale(X, smoothness_factor=1):
n = X.shape[-1]
scale_a = smoothness_factor / (X[-1] - X[0])
scale_c = -d2dp2_rlhood(n, np.arange(n)) / (n.bit_length() * n)
return namedtuple("ObjectiveScales", "scale_a scale_c")(scale_a, scale_c)
def make_P_q(X, scale_a=np.ones(1), scale_e=np.ones(1), autoscale=True):
n = X.shape[-1]
a_diffX, b, c = expand_vars_lc(X)
outer_prod = lambda col: col * col[:,np.newaxis]
P_a = np.sum(
np.apply_along_axis(outer_prod, -1, a_diffX)
* ((scale_a / np.diff(X))[:, np.newaxis, np.newaxis]),
axis=0)
q_a = np.zeros(None)
P_c = np.sum(
np.apply_along_axis(outer_prod, -1, c)
* (scale_e[:, np.newaxis, np.newaxis]),
axis=0)
q_c = np.sum(
-(scale_e * np.arange(1, 2*n, 2) / n)[:, np.newaxis] * c,
axis=0)
P = 2*(P_a + P_c)
q = q_a + q_c
if autoscale:
min_val = min(
np.min(np.abs(P[P != 0])),
np.min(np.abs(q[q != 0]))
)
max_val = max(
np.max(np.abs(P)),
np.max(np.abs(q))
)
scale = 2 ** -(
# centers exponent range on zero
np.mean((np.frexp(min_val)[1], np.frexp(max_val)[1]))
# biases range to account for sums of n values
#+ n.bit_length() / 2
)
P = P * scale
q = q * scale
res = namedtuple("QuadProgramObj", "P q")(
np.asmatrix(P),
np.asmatrix(q).T
)
return res
def make_G_h(X):
n = X.shape[-1]
a_diffX, b, c = expand_vars_lc(X)
G_b = -b
h_b = np.zeros(b.shape[0])
G_c0 = -c[:1]
h_c0 = np.zeros(1)
G_cnm1 = c[-1:]
h_cnm1 = np.ones(1)
return namedtuple("QuadProgramBounds", "G h")(
np.asmatrix(np.concatenate((G_b, G_c0, G_cnm1), axis=0)),
np.asmatrix(np.concatenate((h_b, h_c0, h_cnm1), axis=0)).T,
)
def make_A_b(X):
return namedtuple("QuadProgramBounds", "A b")(
np.zeros((0, X.shape[-1]+1)),
np.zeros(0),
)
def bmid_c_init_state(X):
n = len(X)
n_mid = n // 2
'''
# straight line from first point to last point (when a^2 == 0)
b_mid = ((n-1) / n) / (X[-1] - X[0])
c = (.5 / n) + b_mid * (X - X[0])
return np.concatenate(([b_mid], c))
'''
# interpolation through all points (when e^2 == 0)
b_mid = (2/n) / (X[n_mid+1] - X[n_mid-1])
c = np.arange(1,2*n,2) / (2*n)
return np.concatenate(([b_mid], c))
#'''
def clean_optimizer_results(bmid_c_opt, X):
n = len(X)
bmid_c_opt = np.squeeze(np.array(bmid_c_opt))
d2P_X, dP_X, P_X = expand_vars(bmid_c_opt, X)
d2P_X = d2P_X / np.diff(X)
# Add leading/trailing endpoint regions. I.e., adds:
# 1) knots X0, Xnp1 that smoothly joins curve to the constant-value regions
# P(x) = 0 as x -> -inf,
# P(x) = 1 as x -> inf
# 2) 'dead knots' Xm1, Xnp2 with zero-valued derivatives & P = {0,1},
# from which PPoly can extrapolate for x values outside of the
# sampled region
d2P_X = np.concatenate((
np.zeros(1),
dP_X[:1]**2 / (4*P_X[0]),
d2P_X,
-dP_X[-1:]**2 / (4*(1-P_X[-1])),
np.zeros(1)
))
X0 = X[:1] - 2 * P_X[0] / dP_X[0]
Xnp1 = X[-1:] + 2 * (1-P_X[-1]) / dP_X[-1]
X = np.concatenate((
X0 - (X[0] - X0), # dead knot - included for extrapolation to -inf
X0,
X,
Xnp1,
Xnp1 + (Xnp1 - X[-1]), # dead knot - included for extrapolation to inf
))
P_X = np.concatenate((np.zeros(2), P_X, np.ones(1)))
dP_X = np.concatenate((np.zeros(2), dP_X, np.zeros(1)))
return X, P_X, dP_X, d2P_X
def cdf_approx(X): #, smoothness_factor=1):
"""
Generates a ppoly spline to approximate the cdf of a random variable,
from a 1-D array of i.i.d. samples thereof.
Args:
X: a collection of i.i.d. samples from a random variable.
args, kwargs: any options to forward to the cvxopt qp solver
Returns:
scipy.interpolate.PPoly object, estimating the cdf of the random variable.
Raises:
TODO
"""
# Pre-format input as ordered numpy array
X = np.asarray(X)
diff_X = np.diff(X)
if not (diff_X > 0).all():
X.sort()
diff_X = np.diff(X)
assert(diff_X.all()) # avoids case of duplicate X-values
n = len(X)
scale_axi, scale_ei = make_obj_scale(X)#, smoothness_factor)
P, q = make_P_q(X, scale_a=scale_axi, scale_e=scale_ei)
G, h = make_G_h(X)
#A, b = make_A_b(X) # simply unnecessary
bmid_c_init = bmid_c_init_state(X)
qp_res = cvxopt.solvers.qp(
cvxopt.matrix(P),
cvxopt.matrix(q),
cvxopt.matrix(G),
cvxopt.matrix(h),
#cvxopt.matrix(A),
#cvxopt.matrix(b),
#*args, **kwargs
)
X, P_X, dP_X, d2P_X = clean_optimizer_results(np.array(qp_res['x']), X)
return PPoly.construct_fast(np.stack((d2P_X, dP_X, P_X)), X, extrapolate=True)
if __name__ == "__main__":
from cdf_est_CVXOPT_tests import run_all_tests
run_all_tests()
|
StarcoderdataPython
|
214797
|
<filename>reviewboard/scmtools/tests/test_repository.py
class RepositoryTests(TestCase):
"""Unit tests for Repository operations."""
fixtures = ["test_scmtools"]
def setUp(self):
super(RepositoryTests, self).setUp()
self.local_repo_path = os.path.join(os.path.dirname(__file__), "..", "testdata", "git_repo")
self.repository = Repository.objects.create(name="Git test repo", path=self.local_repo_path, tool=Tool.objects.get(name="Git"))
self.scmtool_cls = self.repository.get_scmtool().__class__
self.old_get_file = self.scmtool_cls.get_file
self.old_file_exists = self.scmtool_cls.file_exists
def tearDown(self):
super(RepositoryTests, self).tearDown()
cache.clear()
self.scmtool_cls.get_file = self.old_get_file
self.scmtool_cls.file_exists = self.old_file_exists
def test_archive(self):
"""Testing Repository.archive"""
self.repository.archive()
self.assertTrue(self.repository.name.startswith("ar:Git test repo:"))
self.assertTrue(self.repository.archived)
self.assertFalse(self.repository.public)
self.assertIsNotNone(self.repository.archived_timestamp)
repository = Repository.objects.get(pk=self.repository.pk)
self.assertEqual(repository.name, self.repository.name)
self.assertEqual(repository.archived, self.repository.archived)
self.assertEqual(repository.public, self.repository.public)
self.assertEqual(repository.archived_timestamp, self.repository.archived_timestamp)
def test_archive_no_save(self):
"""Testing Repository.archive with save=False"""
self.repository.archive(save=False)
self.assertTrue(self.repository.name.startswith("ar:Git test repo:"))
self.assertTrue(self.repository.archived)
self.assertFalse(self.repository.public)
self.assertIsNotNone(self.repository.archived_timestamp)
repository = Repository.objects.get(pk=self.repository.pk)
self.assertNotEqual(repository.name, self.repository.name)
self.assertNotEqual(repository.archived, self.repository.archived)
self.assertNotEqual(repository.public, self.repository.public)
self.assertNotEqual(repository.archived_timestamp, self.repository.archived_timestamp)
def test_clean_without_conflict(self):
"""Testing Repository.clean without name/path conflicts"""
with self.assertNumQueries(1):
self.repository.clean()
def test_clean_with_name_conflict(self):
"""Testing Repository.clean with name conflict"""
repository = Repository(name=self.repository.name, path="path/to/repo.git", tool=self.repository.tool)
with self.assertRaises(ValidationError) as ctx:
with self.assertNumQueries(1):
repository.clean()
self.assertEqual(ctx.exception.message_dict, {"name": ["A repository with this name already exists"]})
def test_clean_with_path_conflict(self):
"""Testing Repository.clean with path conflict"""
repository = Repository(name="New test repo", path=self.repository.path, tool=self.repository.tool)
with self.assertRaises(ValidationError) as ctx:
with self.assertNumQueries(1):
repository.clean()
self.assertEqual(ctx.exception.message_dict, {"path": ["A repository with this path already exists"]})
def test_clean_with_name_and_path_conflict(self):
"""Testing Repository.clean with name and path conflict"""
repository = Repository(name=self.repository.name, path=self.repository.path, tool=self.repository.tool)
with self.assertRaises(ValidationError) as ctx:
with self.assertNumQueries(1):
repository.clean()
self.assertEqual(ctx.exception.message_dict, {"name": ["A repository with this name already exists"], "path": ["A repository with this path already exists"]})
def test_clean_with_path_conflict_with_archived(self):
"""Testing Repository.clean with archived repositories ignored for
path conflict
"""
self.repository.archive()
repository = Repository(name="<NAME> repo", path=self.repository.path, tool=self.repository.tool)
with self.assertNumQueries(1):
repository.clean()
def test_get_file_caching(self):
"""Testing Repository.get_file caches result"""
def get_file(self, path, revision, **kwargs):
num_calls["get_file"] += 1
return b"file data"
num_calls = {"get_file": 0}
path = "readme"
revision = "e965047"
request = {}
self.scmtool_cls.get_file = get_file
data1 = self.repository.get_file(path, revision, request=request)
data2 = self.repository.get_file(path, revision, request=request)
self.assertIsInstance(data1, bytes)
self.assertIsInstance(data2, bytes)
self.assertEqual(data1, b"file data")
self.assertEqual(data1, data2)
self.assertEqual(num_calls["get_file"], 1)
def test_get_file_signals(self):
"""Testing Repository.get_file emits signals"""
def on_fetching_file(sender, path, revision, request, **kwargs):
found_signals.append(("fetching_file", path, revision, request))
def on_fetched_file(sender, path, revision, request, **kwargs):
found_signals.append(("fetched_file", path, revision, request))
found_signals = []
fetching_file.connect(on_fetching_file, sender=self.repository)
fetched_file.connect(on_fetched_file, sender=self.repository)
path = "readme"
revision = "e965047"
request = {}
self.repository.get_file(path, revision, request=request)
self.assertEqual(len(found_signals), 2)
self.assertEqual(found_signals[0], ("fetching_file", path, revision, request))
self.assertEqual(found_signals[1], ("fetched_file", path, revision, request))
def test_get_file_exists_caching_when_exists(self):
"""Testing Repository.get_file_exists caches result when exists"""
def file_exists(self, path, revision, **kwargs):
num_calls["get_file_exists"] += 1
return True
num_calls = {"get_file_exists": 0}
path = "readme"
revision = "e965047"
request = {}
self.scmtool_cls.file_exists = file_exists
exists1 = self.repository.get_file_exists(path, revision, request=request)
exists2 = self.repository.get_file_exists(path, revision, request=request)
self.assertTrue(exists1)
self.assertTrue(exists2)
self.assertEqual(num_calls["get_file_exists"], 1)
def test_get_file_exists_caching_when_not_exists(self):
"""Testing Repository.get_file_exists doesn't cache result when the
file does not exist
"""
def file_exists(self, path, revision, **kwargs):
num_calls["get_file_exists"] += 1
return False
num_calls = {"get_file_exists": 0}
path = "readme"
revision = "12345"
request = {}
self.scmtool_cls.file_exists = file_exists
exists1 = self.repository.get_file_exists(path, revision, request=request)
exists2 = self.repository.get_file_exists(path, revision, request=request)
self.assertFalse(exists1)
self.assertFalse(exists2)
self.assertEqual(num_calls["get_file_exists"], 2)
def test_get_file_exists_caching_with_fetched_file(self):
"""Testing Repository.get_file_exists uses get_file's cached result"""
def get_file(self, path, revision, **kwargs):
num_calls["get_file"] += 1
return b"file data"
def file_exists(self, path, revision, **kwargs):
num_calls["get_file_exists"] += 1
return True
num_calls = {"get_file_exists": 0, "get_file": 0}
path = "readme"
revision = "e965047"
request = {}
self.scmtool_cls.get_file = get_file
self.scmtool_cls.file_exists = file_exists
self.repository.get_file(path, revision, request=request)
exists1 = self.repository.get_file_exists(path, revision, request=request)
exists2 = self.repository.get_file_exists(path, revision, request=request)
self.assertTrue(exists1)
self.assertTrue(exists2)
self.assertEqual(num_calls["get_file"], 1)
self.assertEqual(num_calls["get_file_exists"], 0)
def test_get_file_exists_signals(self):
"""Testing Repository.get_file_exists emits signals"""
def on_checking(sender, path, revision, request, **kwargs):
found_signals.append(("checking_file_exists", path, revision, request))
def on_checked(sender, path, revision, request, **kwargs):
found_signals.append(("checked_file_exists", path, revision, request))
found_signals = []
checking_file_exists.connect(on_checking, sender=self.repository)
checked_file_exists.connect(on_checked, sender=self.repository)
path = "readme"
revision = "e965047"
request = {}
self.repository.get_file_exists(path, revision, request=request)
self.assertEqual(len(found_signals), 2)
self.assertEqual(found_signals[0], ("checking_file_exists", path, revision, request))
self.assertEqual(found_signals[1], ("checked_file_exists", path, revision, request))
def test_repository_name_with_255_characters(self):
"""Testing Repository.name with 255 characters"""
self.repository = Repository.objects.create(name="t" * 255, path=self.local_repo_path, tool=Tool.objects.get(name="Git"))
self.assertEqual(len(self.repository.name), 255)
def test_is_accessible_by_with_public(self):
"""Testing Repository.is_accessible_by with public repository"""
user = self.create_user()
repository = self.create_repository()
self.assertTrue(repository.is_accessible_by(user))
self.assertTrue(repository.is_accessible_by(AnonymousUser()))
def test_is_accessible_by_with_public_and_hidden(self):
"""Testing Repository.is_accessible_by with public hidden repository"""
user = self.create_user()
repository = self.create_repository(visible=False)
self.assertTrue(repository.is_accessible_by(user))
self.assertTrue(repository.is_accessible_by(AnonymousUser()))
def test_is_accessible_by_with_private_and_not_member(self):
"""Testing Repository.is_accessible_by with private repository and
user not a member
"""
user = self.create_user()
repository = self.create_repository(public=False)
self.assertFalse(repository.is_accessible_by(user))
self.assertFalse(repository.is_accessible_by(AnonymousUser()))
def test_is_accessible_by_with_private_and_member(self):
"""Testing Repository.is_accessible_by with private repository and
user is a member
"""
user = self.create_user()
repository = self.create_repository(public=False)
repository.users.add(user)
self.assertTrue(repository.is_accessible_by(user))
def test_is_accessible_by_with_private_and_member_by_group(self):
"""Testing Repository.is_accessible_by with private repository and
user is a member by group
"""
user = self.create_user()
group = self.create_review_group(invite_only=True)
group.users.add(user)
repository = self.create_repository(public=False)
repository.review_groups.add(group)
self.assertTrue(repository.is_accessible_by(user))
def test_is_accessible_by_with_private_and_superuser(self):
"""Testing Repository.is_accessible_by with private repository and
user is a superuser
"""
user = self.create_user(is_superuser=True)
repository = self.create_repository(public=False)
self.assertTrue(repository.is_accessible_by(user))
def test_is_accessible_by_with_private_hidden_not_member(self):
"""Testing Repository.is_accessible_by with private hidden
repository and user not a member
"""
user = self.create_user()
repository = self.create_repository(public=False, visible=False)
self.assertFalse(repository.is_accessible_by(user))
def test_is_accessible_by_with_private_hidden_and_member(self):
"""Testing Repository.is_accessible_by with private hidden
repository and user is a member
"""
user = self.create_user()
repository = self.create_repository(public=False, visible=False)
repository.users.add(user)
self.assertTrue(repository.is_accessible_by(user))
def test_is_accessible_by_with_private_hidden_and_member_by_group(self):
"""Testing Repository.is_accessible_by with private hidden
repository and user is a member
"""
user = self.create_user()
group = self.create_review_group(invite_only=True)
group.users.add(user)
repository = self.create_repository(public=False, visible=False)
repository.review_groups.add(group)
self.assertTrue(repository.is_accessible_by(user))
def test_is_accessible_by_with_private_hidden_and_superuser(self):
"""Testing Repository.is_accessible_by with private hidden
repository and superuser
"""
user = self.create_user(is_superuser=True)
repository = self.create_repository(public=False, visible=False)
self.assertTrue(repository.is_accessible_by(user))
@add_fixtures(["test_users", "test_site"])
def test_is_accessible_by_with_local_site_accessible(self):
"""Testing Repository.is_accessible_by with Local Site accessible by
user
"""
user = self.create_user()
repository = self.create_repository(with_local_site=True)
repository.local_site.users.add(user)
self.assertTrue(repository.is_accessible_by(user))
@add_fixtures(["test_users", "test_site"])
def test_is_accessible_by_with_local_site_not_accessible(self):
"""Testing Repository.is_accessible_by with Local Site not accessible
by user
"""
user = self.create_user()
repository = self.create_repository(with_local_site=True)
self.assertFalse(repository.is_accessible_by(user))
self.assertFalse(repository.is_accessible_by(AnonymousUser()))
|
StarcoderdataPython
|
12825950
|
# (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import json
import os
import mock
import pytest
from datadog_checks.citrix_hypervisor import CitrixHypervisorCheck
from . import common
@pytest.mark.usefixtures('mock_responses')
def test_collect_metadata(datadog_agent, instance):
check = CitrixHypervisorCheck('citrix_hypervisor', {}, [instance])
check.check_id = 'test:123'
version_metadata = {
'version.scheme': 'semver',
'version.major': '8',
'version.minor': '2',
'version.patch': '0',
'version.raw': '8.2.0',
}
with open(os.path.join(common.HERE, 'fixtures', 'standalone', 'version.json'), 'rb') as f:
content = json.load(f)
xenserver = common.mocked_xenserver('master')
xenserver.session.get_this_host.return_value = {'Status': 'Success', 'Value': 'hostref'}
xenserver.host.get_software_version.return_value = content
with mock.patch('six.moves.xmlrpc_client.Server', return_value=xenserver):
check.check(None)
datadog_agent.assert_metadata('test:123', version_metadata)
datadog_agent.assert_metadata_count(len(version_metadata))
|
StarcoderdataPython
|
1753989
|
def disassemble_script(inp, ip_reg):
r_names = ['A', 'B', 'C', 'D', 'E', 'F']
r_names[ip_reg] = 'IP'
for n, line in enumerate(inp):
op, *params = line.split()
a, b, c = map(int, params)
print(f'{n:>2}: ', end='')
if op == 'addr':
print(f'{op} {r_names[a]} + {r_names[b]} -> {r_names[c]}')
elif op == 'addi':
print(f'{op} {r_names[a]} + {b} -> {r_names[c]}')
elif op == 'mulr':
print(f'{op} {r_names[a]} * {r_names[b]} -> {r_names[c]}')
elif op == 'muli':
print(f'{op} {r_names[a]} * {b} -> {r_names[c]}')
elif op == 'banr':
print(f'{op} {r_names[a]} & {r_names[b]} -> {r_names[c]}')
elif op == 'bani':
print(f'{op} {r_names[a]} & {b} -> {r_names[c]}')
elif op == 'borr':
print(f'{op} {r_names[a]} | {r_names[b]} -> {r_names[c]}')
elif op == 'bori':
print(f'{op} {r_names[a]} | {b} -> {r_names[c]}')
elif op == 'setr':
print(f'{op} {r_names[a]} -> {r_names[c]}')
elif op == 'seti':
print(f'{op} {a} -> {r_names[c]}')
elif op == 'gtir':
print(f'{op} {a} > {r_names[b]} -> {r_names[c]}')
elif op == 'gtri':
print(f'{op} {r_names[a]} > {b} -> {r_names[c]}')
elif op == 'gtrr':
print(f'{op} {r_names[a]} > {r_names[b]} -> {r_names[c]}')
elif op == 'eqir':
print(f'{op} {a} == {r_names[b]} -> {r_names[c]}')
elif op == 'eqri':
print(f'{op} {r_names[a]} == {b} -> {r_names[c]}')
elif op == 'eqrr':
print(f'{op} {r_names[a]} == {r_names[b]} -> {r_names[c]}')
DEBUG = False
def bug(s: str, **kwds):
if DEBUG:
print(s, *kwds)
def run_script(inp, ip_reg, pass1=True):
registers = [0, 0, 0, 0, 0, 0]
r_names = ['A', 'B', 'C', 'D', 'E', 'F']
r_names[ip_reg] = 'IP'
last_unique = 1e600
history = set()
while 0 <= registers[ip_reg] <= len(inp):
if registers[ip_reg] == 17:
bug(f'L16: [{" ".join([f"{n}:{r:<3}" for r, n in zip(registers, r_names)]):^30}]')
registers[1] //= 256
registers[2] = 1
registers[3] = registers[1]
registers[ip_reg] = 8
bug(f'L26: [{" ".join([f"{n}:{r:<3}" for r, n in zip(registers, r_names)]):^30}]')
if registers[ip_reg] == 27:
bug(f'L26: [{" ".join([f"{n}:{r:<3}" for r, n in zip(registers, r_names)]):^30}]')
if registers[ip_reg] == 28:
bug(f'L27: [{" ".join([f"{n}:{r:<3}" for r, n in zip(registers, r_names)]):^30}]')
f_reg = registers[5]
if pass1:
return f_reg
else:
if f_reg in history:
return last_unique
history.add(f_reg)
last_unique = f_reg
line = inp[registers[ip_reg]]
bug(f'[{" ".join([f"{n}:{r:<3}" for r, n in zip(registers, r_names)]):^30}] : ', end='')
op, *params = line.split()
a, b, c = map(int, params)
if op == 'addr':
registers[c] = registers[a] + registers[b]
bug(f'{op} {r_names[a]} + {r_names[b]} -> {r_names[c]} [{registers[c]}]')
elif op == 'addi':
registers[c] = registers[a] + b
bug(f'{op} {r_names[a]} + {b} -> {r_names[c]} [{registers[c]}]')
elif op == 'mulr':
registers[c] = registers[a] * registers[b]
bug(f'{op} {r_names[a]} * {r_names[b]} -> {r_names[c]} [{registers[c]}]')
elif op == 'muli':
registers[c] = registers[a] * b
bug(f'{op} {r_names[a]} * {b} -> {r_names[c]} [{registers[c]}]')
elif op == 'banr':
registers[c] = registers[a] & registers[b]
bug(f'{op} {r_names[a]} & {r_names[b]} -> {r_names[c]} [{registers[c]}]')
elif op == 'bani':
registers[c] = registers[a] & b
bug(f'{op} {r_names[a]} & {b} -> {r_names[c]} [{registers[c]}]')
elif op == 'borr':
registers[c] = registers[a] | registers[b]
bug(f'{op} {r_names[a]} | {r_names[b]} -> {r_names[c]} [{registers[c]}]')
elif op == 'bori':
registers[c] = registers[a] | b
bug(f'{op} {r_names[a]} | {b} -> {r_names[c]} [{registers[c]}]')
elif op == 'setr':
registers[c] = registers[a]
bug(f'{op} {r_names[a]} -> {r_names[c]} [{registers[c]}]')
elif op == 'seti':
registers[c] = a
bug(f'{op} {a} -> {r_names[c]} [{registers[c]}]')
elif op == 'gtir':
registers[c] = 1 if a > registers[b] else 0
bug(f'{op} {a} > {r_names[b]} -> {r_names[c]} [{registers[c]}]')
elif op == 'gtri':
registers[c] = 1 if registers[a] > b else 0
bug(f'{op} {r_names[a]} > {b} -> {r_names[c]} [{registers[c]}]')
elif op == 'gtrr':
registers[c] = 1 if registers[a] > registers[b] else 0
bug(f'{op} {r_names[a]} > {r_names[b]} -> {r_names[c]} [{registers[c]}]')
elif op == 'eqir':
registers[c] = 1 if a == registers[b] else 0
bug(f'{op} {a} == {r_names[b]} -> {r_names[c]} [{registers[c]}]')
elif op == 'eqri':
registers[c] = 1 if registers[a] == b else 0
bug(f'{op} {r_names[a]} == {b} -> {r_names[c]} [{registers[c]}]')
elif op == 'eqrr':
registers[c] = 1 if registers[a] == registers[b] else 0
bug(f'{op} {r_names[a]} == {r_names[b]} -> {r_names[c]} [{registers[c]}]')
registers[ip_reg] += 1
return registers
def chronal_conversion(inp, ip_reg, pass1=True):
result = run_script(inp, ip_reg, pass1)
return result
if __name__ == '__main__':
with open('input.txt') as code_file:
code_lines = code_file.read().splitlines(keepends=False)
ip_reg = -1
if code_lines[0].startswith('#ip '):
ip_reg = int(code_lines[0][4:])
disassemble_script(code_lines[1:], ip_reg)
print(f'Day 21, pass 1: {chronal_conversion(code_lines[1:], ip_reg)}')
print(f'Day 21, pass 2: {chronal_conversion(code_lines[1:], ip_reg, False)}')
# Day 21, pass 1: 12446070
# Day 21, pass 2: 13928239
|
StarcoderdataPython
|
3362475
|
# THIS IS NOT INTENDED TO RUN AS IS, THE IMPORTS WILL FAIL.
# PLEASE READ THE COMMENTS
import argparse
import slurm as om
# Note: The slurm repo has to be added as a submodule within that path
# that you will be working in. Thus you'll use something like:
# ` import slurm as om` were slurm is this package's name.
# Below is an example package layout:
# # # # # # # # # # # # # # # # # # # #
# MyPackage:
# __init__.py
# \foo:
# __init__.py
# example.py (this file)
#
# \slurm (This module as a submodule)
#
# \bar:
# __init__.py
# bar.py
#
# # # # # # # # # # # # # # # # # # # #
#
# Ensuring that the path is known:
# export PYTHONPATH=<Path to MyPackage>
#
# You can now run the example as:
# python ../foo/example.py ....
#
# Also don't forget your __init__.py's!
def main():
# Use argparse to make a parser containing your batch arguments
parser = argparse.ArgumentParser(description="This is a cool example")
parser.add_argument( 'name', type=str,
help='Your name')
parser.add_argument( 'food', type=str,
help = 'Your favorite food')
# Pass the parser to slurm so it can add its own optional parameters
# such as cpu, memory...
# Call `python example.py -h` to see these arguments
args = om.slurm.args(parser).parse_args()
# Now lets define any job-inpendent arguments (to be applied to all jobs)
# Interpreter to read the virtual job shell
interpreter = '#!/bin/sh'
# Modules to import. Note: modules are not automatically passed
# into each subprocess from the current environment.
modules = []
# For example (not used here)
modules.append('openmind/singularity/librefactor-1g397644e')
# Any variables to export in the form: ("NAME", "VALUE")
exports = []
exports.append(("NAME", args.name))
exports.append(("FOOD", args.food))
# Any command postfix-flags to be used in all batches
flags = []
# Finally the actual execution command.
# Note this could be a really complicated function call, say to
# a singularity container, matlab, python, or even better, a python
# environment inside a singularity container (hypothetically defined in
# bar.py)
func = "echo $NAME really loves $FOOD"
# These are the job dependent arguments.
# They must be a list-of-lists where each inner list are the arguments
# for `func`
batches = [["For the {0:d}th time...".format(i)] for i in range(10)]
# Constructs the batch object
batch = om.slurm.Batch(interpreter, modules, exports, func, batches, flags,
args)
# Submit the jobarray
batch.run()
# Watch the status of the jobs until completion
batch.monitor()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1632098
|
<reponame>sun1638650145/CRNN
from .utils import decode_predictions
from .utils import get_dataset_summary
from .utils import get_image_format
from .utils import visualize_train_data
from .preprocessing import character_decoder
from .preprocessing import character_encoder
from .preprocessing import create_tf_dataset
from .preprocessing import train_validation_split
|
StarcoderdataPython
|
9786281
|
<filename>pypcode_emu/ntypes.py<gh_stars>10-100
import operator
from typing import Type
import nativetypes as nt
from bidict import bidict
uint1 = nt.nint_type("uint1", 1, False)
int1 = nt.nint_type("int1", 1, True)
size2uintN = bidict({0: uint1, 1: nt.uint8, 2: nt.uint16, 4: nt.uint32, 8: nt.uint64})
size2intN = bidict({1: int1, 2: nt.int16, 4: nt.int32, 8: nt.int64})
def uintN(nbytes: int) -> Type[nt.nint]:
return size2uintN[nbytes]
def intN(nbytes: int) -> Type[nt.nint]:
return size2intN[nbytes]
def as_u(self: nt.nint):
if self.v < 0:
return nt.nint((1 << self.b) + self.v, self.b, False)
return nt.nint(self.v, self.b, False)
nt.nint.as_u = property(as_u)
def as_s(self: nt.nint):
if self.s:
return self
return nt.nint(self.v, self.b, True)
nt.nint.as_s = property(as_s)
def sext(self: nt.nint, nbits: int):
return nt.nint(self.as_s.v, nbits, True)
nt.nint.sext = sext
def zext(self: nt.nint, nbits: int):
return nt.nint(self.as_u.v, nbits, False)
nt.nint.zext = zext
def asr(self: nt.nint, nbits: int):
return nt.nint((self.as_s >> nbits).v, self.b, True)
nt.nint.asr = asr
nt.nint.CMP_MAP = {
">": "gt",
"<": "lt",
"==": "eq",
"!=": "ne",
">=": "ge",
"<=": "le",
}
def cmp(self: nt.nint, cmp: str, other: nt.nint) -> nt.uint8:
signed = cmp.startswith("s")
if signed:
a, b = self.as_s, other.as_s
else:
a, b = self.as_u, other.as_u
cmp = cmp.lstrip("s")
py_op_name = f"__{nt.nint.CMP_MAP[cmp]}__"
op_func = getattr(operator, py_op_name)
return nt.uint8(1 if op_func(a, b) else 0)
nt.nint.cmp = cmp
def strict_eq(self: nt.nint, other: nt.nint) -> bool:
return self.v == other.v and self.b == other.b and self.s == other.s
nt.nint.strict_eq = strict_eq
def comp_time_eq(self: nt.nint, other: nt.nint) -> bool:
return self.strict_eq(other)
nt.nint.comp_time_eq = comp_time_eq
def nint_hash(self: nt.nint) -> int:
return hash((self.v, self.b, self.s))
nt.nint.__hash__ = nint_hash
exported_attrs_names = list(
filter(lambda n: not n.startswith("__") and not n.endswith("__"), dir(nt))
)
exported_attrs = [getattr(nt, n) for n in exported_attrs_names]
exported_attrs = [*exported_attrs, uint1, int1]
exported_attrs_names = [*exported_attrs_names, "uint1", "int1"]
for n, a in zip(exported_attrs_names, exported_attrs):
globals()[n] = a
nint = nt.nint
uint8, int8 = nt.uint8, nt.int8
uint16, int16 = nt.uint16, nt.uint16
uint32, int32 = nt.uint32, nt.int32
uint64, int64 = nt.uint64, nt.int64
__all__ = tuple(exported_attrs_names)
|
StarcoderdataPython
|
6478337
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 <NAME> <EMAIL>
#
import logging
import ctypes
from haystack.reverse import config
from haystack.reverse import structure
"""
the Python classes to represent the guesswork record and field typing of
allocations.
"""
log = logging.getLogger('field')
# Field related functions and classes
class FieldType(object):
"""
Represents the type of a field.
"""
types = set()
def __init__(self, _id, _name, _signature):
self.__id = _id
self.__name = _name
self.__sig = _signature
@property
def id(self):
return self.__id
@property
def name(self):
return self.__name
@property
def signature(self):
return self.__sig
def __cmp__(self, other):
try:
return cmp(self.id, other.id)
except AttributeError as e:
return -1
def __hash__(self):
return hash(self.id)
def __str__(self):
return '<FieldType %s>' % self.name
def __repr__(self):
return '<t:%s>' % self.name
class FieldTypeStruct(FieldType):
"""
Fields that are know independent structure.
In case we reverse a Big record that has members of known record types.
"""
def __init__(self, _typename):
assert isinstance(_typename, str)
super(FieldTypeStruct, self).__init__(0x1, _typename, 'K')
def __str__(self):
return self.name
class FieldTypeArray(FieldType):
"""
An array type
"""
def __init__(self, item_type, item_size, nb_items):
super(FieldTypeArray, self).__init__(0x60, '%s*%d' % (item_type.name, nb_items), 'a')
self.nb_items = nb_items
self.item_type = item_type
self.item_size = item_size
self.size = item_size*nb_items
class RecordTypePointer(FieldType):
def __init__(self, _type):
#if typ == STRING:
# return STRING_POINTER
super(RecordTypePointer, self).__init__(_type.id + 0xa, 'ctypes.POINTER(%s)' % _type.name, 'P')
# setup all the know types that are interesting to us
UNKNOWN = FieldType(0x0, 'ctypes.c_ubyte', 'u')
STRUCT = FieldType(0x1, 'Structure', 'K')
ZEROES = FieldType(0x2, 'ctypes.c_ubyte', 'z')
STRING = FieldType(0x4, 'ctypes.c_char', 'T')
STRING16 = FieldType(0x14, 'ctypes.c_char', 'T')
STRINGNULL = FieldType(0x6, 'ctypes.c_char', 'T')
STRING_POINTER = FieldType(0x4 + 0xa, 'ctypes.c_char_p', 's')
INTEGER = FieldType(0x18, 'ctypes.c_uint', 'I')
SMALLINT = FieldType(0x8, 'ctypes.c_uint', 'i')
SIGNED_SMALLINT = FieldType(0x28, 'ctypes.c_int', 'i')
ARRAY = FieldType(0x40, 'Array', 'a')
BYTEARRAY = FieldType(0x50, 'ctypes.c_ubyte', 'a')
# ARRAY_CHAR_P = FieldType(0x9, 'array_char_p', 'ctypes.c_char_p', 'Sp')
POINTER = FieldType(0xa, 'ctypes.c_void_p', 'P')
PADDING = FieldType(0xff, 'ctypes.c_ubyte', 'X')
class Field(object):
"""
Class that represent a Field instance, a FieldType instance.
"""
def __init__(self, name, offset, _type, size, is_padding):
self.__name = name
self.__offset = offset
assert isinstance(_type, FieldType)
self.__field_type = _type
self.__size = size
self.__padding = is_padding
self.__comment = '#'
@property
def name(self):
return self.__name
@name.setter
def name(self, _name):
if _name is None:
self.__name = '%s_%s' % (self.field_type.name, self.offset)
else:
self.__name = _name
@property
def offset(self):
return self.__offset
@property
def field_type(self):
return self.__field_type
@property
def size(self):
return self.__size
@property
def padding(self):
return self.__padding
@property
def comment(self):
return self.__comment
@comment.setter
def comment(self, txt):
self.__comment = '# %s' % txt
def is_string(self): # null terminated
return self.field_type in [STRING, STRING16, STRINGNULL, STRING_POINTER]
def is_pointer(self):
# we could be a pointer or a pointer string
return issubclass(self.__class__, PointerField)
def is_zeroes(self):
return self.field_type == ZEROES
def is_array(self): # will be overloaded
return self.field_type == ARRAY or self.field_type == BYTEARRAY
def is_integer(self):
return self.field_type == INTEGER or self.field_type == SMALLINT or self.field_type == SIGNED_SMALLINT
def is_record(self):
return self.field_type == STRUCT
def is_gap(self):
return self.field_type == UNKNOWN
def get_typename(self):
if self.is_string() or self.is_zeroes():
return '%s*%d' % (self.field_type.name, len(self))
elif self.is_array():
# TODO should be in type
return '%s*%d' % (self.field_type.name, len(self) / self.nb_items)
elif self.field_type == UNKNOWN:
return '%s*%d' % (self.field_type.name, len(self))
return self.field_type.name
def __hash__(self):
return hash((self.offset, self.size, self.field_type))
def __cmp__(self, other):
# XXX : Perf... cmp sux
try:
if self.offset < other.offset:
return -1
elif self.offset > other.offset:
return 1
elif (self.offset, self.size, self.field_type) == (other.offset, other.size, other.field_type):
return 0
# last chance, expensive cmp
return cmp((self.offset, self.size, self.field_type),
(other.offset, other.size, other.field_type))
except AttributeError as e:
# if not isinstance(other, Field):
return -1
def __len__(self):
return int(self.size) # some long come and goes
def __repr__(self):
return str(self)
def __str__(self):
return '<Field offset:%d size:%s t:%s>' % (self.offset, self.size, self.field_type)
def get_signature(self):
return self.field_type, self.size
def to_string(self, value):
if value is None:
value = 0
if self.is_pointer():
comment = '# @ 0x%0.8x %s' % (value, self.comment)
elif self.is_integer():
comment = '# 0x%x %s' % (value, self.comment)
elif self.is_zeroes():
comment = '''# %s zeroes: '\\x00'*%d''' % (self.comment, len(self))
elif self.is_string():
comment = '# %s %s: %s' % (self.comment, self.field_type.name, value)
elif self.is_record():
comment = '#'
else:
# unknown
comment = '# %s else bytes:%s' % (self.comment, repr(value))
# prep the string
fstr = "( '%s' , %s ), %s\n" % (self.name, self.get_typename(), comment)
return fstr
class PointerField(Field):
"""
represent a pointer field
"""
def __init__(self, name, offset, size):
super(PointerField, self).__init__(name, offset, POINTER, size, False)
self.__pointee = None
self.__pointer_to_ext_lib = False\
# ??
self._child_addr = 0
self._child_desc = None
self._child_type = None
@property
def pointee(self):
return self.__pointee
@pointee.setter
def pointee(self, pointee_field):
self.__pointee = pointee_field
def is_pointer_to_string(self):
# if hasattr(self, '_ptr_to_ext_lib'):
# return False
return self.pointee.is_string()
def is_pointer_to_ext_lib(self):
return self.__pointer_to_ext_lib
def set_pointer_to_ext_lib(self):
self.__pointer_to_ext_lib = True
def set_pointee_addr(self, addr):
self._child_addr = addr
def set_pointee_desc(self, desc):
self._child_desc = desc
def set_pointee_ctype(self, _type):
self._child_type = _type
class ArrayField(Field):
"""
Represents an array field.
"""
# , basicTypename, basicTypeSize ): # use first element to get that info
def __init__(self, name, offset, item_type, item_size, nb_item):
size = item_size * nb_item
super(ArrayField, self).__init__(name, offset, FieldTypeArray(item_type, item_size, nb_item), size, False)
def get_typename(self):
return self.field_type.name
def is_array(self):
return True
def _get_value(self, _record, maxLen=120):
return None
def to_string(self, _record, prefix=''):
item_type = self.field_type.item_type
# log.debug('P:%s I:%s Z:%s typ:%s' % (item_type.is_pointer(), item_type.is_integer(), item_type.is_zeroes(), item_type.name))
log.debug("array type: %s", item_type.name)
#
comment = '# %s array' % self.comment
fstr = "%s( '%s' , %s ), %s\n" % (prefix, self.name, self.get_typename(), comment)
return fstr
class ZeroField(ArrayField):
"""
Represents an array field of zeroes.
"""
def __init__(self, name, offset, nb_item):
super(ZeroField, self).__init__(name, offset, ZEROES, 1, nb_item)
def is_zeroes(self):
return True
class RecordField(Field, structure.AnonymousRecord):
"""
make a record field
"""
def __init__(self, parent, offset, field_name, field_type, fields):
size = sum([len(f) for f in fields])
_address = parent.address + offset
structure.AnonymousRecord.__init__(self, parent._memory_handler, _address, size, prefix=None)
Field.__init__(self, field_name, offset, FieldTypeStruct(field_type), size, False)
structure.AnonymousRecord.set_name(self, field_name)
#structure.AnonymousRecord.add_fields(self, fields)
_record_type = structure.RecordType(field_type, size,fields)
self.set_record_type(_record_type)
return
def get_typename(self):
return '%s' % self.field_type
@property
def address(self):
raise NotImplementedError('You cannot call address on a subrecord')
# def to_string(self, *args):
# # print self.fields
# fieldsString = '[ \n%s ]' % (''.join([field.to_string(self, '\t') for field in self.get_fields()]))
# info = 'rlevel:%d SIG:%s size:%d' % (self.get_reverse_level(), self.get_signature(), len(self))
# ctypes_def = '''
#class %s(ctypes.Structure): # %s
# _fields_ = %s
#
#''' % (self.name, info, fieldsString)
# return ctypes_def
|
StarcoderdataPython
|
3550051
|
<reponame>emilybache/ApprovalTools<filename>approve_all.py
#!/usr/bin/env python3
import os
import re
import shutil
import argparse
def approve_all(directory, verbose=True):
regex = re.compile(r"(.*)\.received(\..*)")
for root, dirs, files in os.walk(directory):
for filename in files:
matches = re.findall(regex, filename)
if matches:
test_name = matches[0][0]
file_extension_including_dot = matches[0][1]
new_filename = test_name + ".approved" + file_extension_including_dot
received_file = str(os.path.join(root, filename))
approved_file = str(os.path.join(root, new_filename))
shutil.copyfile(received_file,
approved_file)
if verbose:
print(f"approving {test_name}")
if __name__ == "__main__":
import sys
parser = argparse.ArgumentParser()
parser.add_argument("directory", help="the directory to approve files in")
parser.add_argument("-q", "--quiet", action="store_true", help="suppress all output messages")
args = parser.parse_args()
directory = args.directory or os.getcwd()
verbose = not args.quiet
if not os.path.exists(directory):
print(f"directory not found {directory}")
sys.exit(-1)
if verbose:
print(f"approving all received files in folder {directory}")
approve_all(directory, verbose)
|
StarcoderdataPython
|
1906222
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-26 12:19
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_category'),
]
operations = [
migrations.AddField(
model_name='post',
name='category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='blog.category'),
),
migrations.AddField(
model_name='post2',
name='category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='blog.category'),
),
]
|
StarcoderdataPython
|
9663157
|
<reponame>nigeljyng/textacy
# -*- coding: utf-8 -*-
"""
Load, process, iterate, transform, and save text content paired with metadata
— a document.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import Counter
import os
import warnings
from cytoolz import itertoolz
import spacy.about
from spacy import attrs
from spacy.language import Language as SpacyLang
from spacy.tokens.doc import Doc as SpacyDoc
from spacy.tokens.span import Span as SpacySpan
from spacy.tokens.token import Token as SpacyToken
import textacy
from textacy.compat import unicode_
from textacy.constants import NUMERIC_NE_TYPES
from textacy import data, fileio, spacy_utils, text_utils
from textacy import network
class Doc(object):
"""
A text document parsed by spaCy and, optionally, paired with key metadata.
Transform ``Doc`` into an easily-customized list of terms, a bag-of-words or
(more general) bag-of-terms, or a semantic network; save and load parsed
content and metadata to and from disk; index, slice, and iterate through
tokens and sentences; and more.
Initialize from a text and (optional) metadata::
>>> content = '''
... The apparent symmetry between the quark and lepton families of
... the Standard Model (SM) are, at the very least, suggestive of
... a more fundamental relationship between them. In some Beyond the
... Standard Model theories, such interactions are mediated by
... leptoquarks (LQs): hypothetical color-triplet bosons with both
... lepton and baryon number and fractional electric charge.'''
>>> metadata = {
... 'title': 'A Search for 2nd-generation Leptoquarks at √s = 7 TeV',
... 'author': '<NAME>',
... 'pub_date': '2012-08-01'}
>>> doc = textacy.Doc(content, metadata=metadata)
>>> print(doc)
Doc(71 tokens; "The apparent symmetry between the quark and lep...")
Transform into other, common formats::
>>> doc.to_bag_of_words(lemmatize=False, as_strings=False)
{205123: 1, 21382: 1, 17929: 1, 175499: 2, 396: 1, 29774: 1, 27472: 1,
4498: 1, 1814: 1, 1176: 1, 49050: 1, 287836: 1, 1510365: 1, 6239: 2,
3553: 1, 5607: 1, 4776: 1, 49580: 1, 6701: 1, 12078: 2, 63216: 1,
6738: 1, 83061: 1, 5243: 1, 1599: 1}
>>> doc.to_bag_of_terms(ngrams=2, named_entities=True,
... lemmatize=True, as_strings=True)
{'apparent symmetry': 1, 'baryon number': 1, 'electric charge': 1,
'fractional electric': 1, 'fundamental relationship': 1,
'hypothetical color': 1, 'lepton family': 1, 'model theory': 1,
'standard model': 2, 'triplet boson': 1}
Doc as sequence of tokens, emulating spaCy's "sequence API"::
>>> doc[49] # spacy.Token
leptoquarks
>>> doc[:3] # spacy.Span
The apparent symmetry
Save to and load from disk::
>>> doc.save('~/Desktop', name='leptoquarks')
>>> doc = textacy.Doc.load('~/Desktop', name='leptoquarks')
>>> print(doc)
Doc(71 tokens; "The apparent symmetry between the quark and lep...")
Args:
content (str or ``spacy.Doc``): Document content as (unicode) text or an
already-parsed ``spacy.Doc``. If str, content is processed by models
loaded with a ``spacy.Language`` and assigned to :attr:`spacy_doc`.
metadata (dict): Dictionary of relevant information about content. This
can be helpful when identifying and filtering documents, as well as
when engineering features for model inputs.
lang (str or ``spacy.Language``): Language of document content. If this
is known, pass in its standard 2-letter language code or, if *not*
known, leave as None (default) and let textacy handle everything.
A language code is then used to load a ``spacy.Language`` object,
unless an already-instantiated such object is passed in here.
**Note:** Currently, spaCy only works with English ('en') and German
('de') languages! The ``spacy.Language`` object parses ``content``
(if str) and sets the :attr:`spacy_vocab` and :attr:`spacy_stringstore`
attributes.
Attributes:
lang (str): 2-letter code for language of ``Doc``.
metadata (dict): Dictionary of relevant information about content.
spacy_doc (``spacy.Doc``): https://spacy.io/docs#doc
spacy_vocab (``spacy.Vocab``): https://spacy.io/docs#vocab
spacy_stringstore (``spacy.StringStore``): https://spacy.io/docs#stringstore
"""
def __init__(self, content, metadata=None, lang=None):
self.metadata = metadata or {}
# Doc instantiated from text, so must be parsed with a spacy.Language
if isinstance(content, unicode_):
if isinstance(lang, SpacyLang):
self.lang = lang.lang
spacy_lang = lang
elif isinstance(lang, unicode_):
self.lang = lang
spacy_lang = data.load_spacy(self.lang)
elif lang is None:
self.lang = text_utils.detect_language(content)
spacy_lang = data.load_spacy(self.lang)
else:
msg = '`lang` must be {}, not "{}"'.format(
{unicode_, SpacyLang}, type(lang))
raise ValueError(msg)
self.spacy_vocab = spacy_lang.vocab
self.spacy_stringstore = self.spacy_vocab.strings
self.spacy_doc = spacy_lang(content)
# Doc instantiated from an already-parsed spacy.Doc
elif isinstance(content, SpacyDoc):
self.spacy_vocab = content.vocab
self.spacy_stringstore = self.spacy_vocab.strings
self.spacy_doc = content
self.lang = self.spacy_vocab.lang
# these checks are probably unnecessary, but in case a user
# has done something very strange, we should complain...
if isinstance(lang, SpacyLang):
if self.spacy_vocab is not lang.vocab:
msg = '`spacy.Vocab` used to parse `content` must be the same as the one associated with `lang`'
raise ValueError(msg)
elif isinstance(lang, unicode_):
if lang != self.lang:
raise ValueError('lang of spacy models used to parse `content` must be the same as `lang`')
elif lang is not None:
msg = '`lang` must be {}, not "{}"'.format(
{unicode_, SpacyLang}, type(lang))
raise ValueError(msg)
# oops, user has made some sort of mistake
else:
msg = '`Doc` must be initialized with {}, not "{}"'.format(
{unicode_, SpacyDoc}, type(content))
raise ValueError(msg)
def __repr__(self):
snippet = self.text[:50].replace('\n', ' ')
if len(snippet) == 50:
snippet = snippet[:47] + '...'
return 'Doc({} tokens; "{}")'.format(len(self.spacy_doc), snippet)
def __len__(self):
return self.n_tokens
def __getitem__(self, index):
return self.spacy_doc[index]
def __iter__(self):
for tok in self.spacy_doc:
yield tok
##########
# FILEIO #
def save(self, path, name=None):
"""
Save ``Doc`` content and metadata to disk.
Args:
path (str): Directory on disk where content + metadata will be saved.
name (str): Prepend default filenames 'spacy_doc.bin' and 'metadata.json'
with a name to identify/uniquify this particular document.
.. warning:: If the ``spacy.Vocab`` object used to save this document is
not the same as the one used to load it, there will be problems!
Consequently, this functionality is only useful as short-term but
not long-term storage.
"""
if name:
meta_fname = os.path.join(path, '_'.join([name, 'metadata.json']))
doc_fname = os.path.join(path, '_'.join([name, 'spacy_doc.bin']))
else:
meta_fname = os.path.join(path, 'metadata.json')
doc_fname = os.path.join(path, 'spacy_doc.bin')
package_info = {'textacy_lang': self.lang,
'spacy_version': spacy.about.__version__}
fileio.write_json(
dict(package_info, **self.metadata), meta_fname)
fileio.write_spacy_docs(self.spacy_doc, doc_fname)
@classmethod
def load(cls, path, name=None):
"""
Load content and metadata from disk, and initialize a ``Doc``.
Args:
path (str): Directory on disk where content and metadata are saved.
name (str): Identifying/uniquifying name prepended to the default
filenames 'spacy_doc.bin' and 'metadata.json', used when doc was
saved to disk via :meth:`Doc.save()`.
Returns:
:class:`textacy.Doc <Doc>`
.. warning:: If the ``spacy.Vocab`` object used to save this document is
not the same as the one used to load it, there will be problems!
Consequently, this functionality is only useful as short-term but
not long-term storage.
"""
if name:
meta_fname = os.path.join(path, '_'.join([name, 'metadata.json']))
docs_fname = os.path.join(path, '_'.join([name, 'spacy_doc.bin']))
else:
meta_fname = os.path.join(path, 'metadata.json')
docs_fname = os.path.join(path, 'spacy_doc.bin')
metadata = list(fileio.read_json(meta_fname))[0]
lang = metadata.pop('textacy_lang')
spacy_version = metadata.pop('spacy_version')
if spacy_version != spacy.about.__version__:
msg = """
the spaCy version used to save this Doc to disk is not the
same as the version currently installed ('{}' vs. '{}'); if the
data underlying the associated `spacy.Vocab` has changed, this
loaded Doc may not be valid!
""".format(spacy_version, spacy.about.__version__)
warnings.warn(msg, UserWarning)
spacy_vocab = data.load_spacy(lang).vocab
return cls(list(fileio.read_spacy_docs(spacy_vocab, docs_fname))[0],
lang=lang, metadata=metadata)
####################
# BASIC COMPONENTS #
@property
def tokens(self):
"""
Yield the document's tokens, as tokenized by spaCy. Equivalent to
iterating directly: ``for token in Doc: <do stuff>``
"""
for tok in self.spacy_doc:
yield tok
@property
def sents(self):
"""Yield the document's sentences, as segmented by spaCy."""
for sent in self.spacy_doc.sents:
yield sent
@property
def n_tokens(self):
"""The number of tokens in the document — including punctuation."""
return len(self.spacy_doc)
@property
def n_sents(self):
"""The number of sentences in the document."""
return sum(1 for _ in self.spacy_doc.sents)
def merge(self, spans):
"""
Merge spans *in-place* within ``Doc`` so that each takes up a single
token. Note: All cached counts on this doc are cleared after a merge.
Args:
spans (Iterable[``spacy.Span``]): for example, the results from
:func:`extract.named_entities() <textacy.extract.named_entities>`
or :func:`extract.pos_regex_matches() <textacy.extract.pos_regex_matches>`
"""
spacy_utils.merge_spans(spans)
# reset counts, since merging spans invalidates existing counts
self._counts.clear()
self._counted_ngrams = set()
_counted_ngrams = set()
_counts = Counter()
def count(self, term):
"""
Get the number of occurrences (i.e. count) of ``term`` in ``Doc``.
Args:
term (str or int or ``spacy.Token`` or ``spacy.Span``): The term to
be counted can be given as a string, a unique integer id, a
spacy token, or a spacy span. Counts for the same term given in
different forms are the same!
Returns:
int: Count of ``term`` in ``Doc``.
.. tip:: Counts are cached. The first time a single word's count is
looked up, *all* words' counts are saved, resulting in a slower
runtime the first time but orders of magnitude faster runtime for
subsequent calls for this or any other word. Similarly, if a
bigram's count is looked up, all bigrams' counts are stored — etc.
If spans are merged using :meth:`Doc.merge()`, all cached counts are
deleted, since merging spans will invalidate many counts. Better to
merge first, count second!
"""
# figure out what object we're dealing with here; convert as necessary
if isinstance(term, unicode_):
term_text = term
term_id = self.spacy_stringstore[term_text]
term_len = term_text.count(' ') + 1
elif isinstance(term, int):
term_id = term
term_text = self.spacy_stringstore[term_id]
term_len = term_text.count(' ') + 1
elif isinstance(term, SpacyToken):
term_text = term.orth_
term_id = self.spacy_stringstore[term_text]
term_len = 1
elif isinstance(term, SpacySpan):
term_text = term.orth_
term_id = self.spacy_stringstore[term_text]
term_len = len(term)
# we haven't counted terms of this length; let's do that now
if term_len not in self._counted_ngrams:
if term_len == 1:
self._counts += Counter(
word.orth
for word in textacy.extract.words(self,
filter_stops=False,
filter_punct=False,
filter_nums=False))
else:
self._counts += Counter(
self.spacy_stringstore[ngram.orth_]
for ngram in textacy.extract.ngrams(self, term_len,
filter_stops=False,
filter_punct=False,
filter_nums=False))
self._counted_ngrams.add(term_len)
return self._counts[term_id]
###############
# DOC AS TEXT #
@property
def text(self):
"""Return the document's raw text."""
return self.spacy_doc.text_with_ws
@property
def tokenized_text(self):
"""Return text as an ordered, nested list of tokens per sentence."""
return [[token.text for token in sent]
for sent in self.spacy_doc.sents]
@property
def pos_tagged_text(self):
"""Return text as an ordered, nested list of (token, POS) pairs per sentence."""
return [[(token.text, token.pos_) for token in sent]
for sent in self.spacy_doc.sents]
#################
# TRANSFORM DOC #
def to_terms_list(self, ngrams=(1, 2, 3), named_entities=True,
normalize='lemma', lemmatize=None, lowercase=None,
as_strings=False, **kwargs):
"""
Transform ``Doc`` into a sequence of ngrams and/or named entities, which
aren't necessarily in order of appearance, where each term appears in
the list with the same frequency that it appears in ``Doc``.
Args:
ngrams (int or Set[int]): n of which n-grams to include; ``(1, 2, 3)``
(default) includes unigrams (words), bigrams, and trigrams; `2`
if only bigrams are wanted; falsy (e.g. False) to not include any
named_entities (bool): if True (default), include named entities
in the terms list; note: if ngrams are also included, named
entities are added *first*, and any ngrams that exactly overlap
with an entity are skipped to prevent double-counting
lemmatize (bool): *deprecated* if True (default), lemmatize all terms
lowercase (bool): *deprecated* if True and `lemmatize` is False, words
are lower-cased
normalize (str or callable): if 'lemma', lemmatize terms; if 'lower',
lowercase terms; if false-y, use the form of terms as they appear
in doc; if a callable, must accept a ``spacy.Token`` or ``spacy.Span``
and return a str, e.g. :func:`textacy.spacy_utils.normalized_str()`
as_strings (bool): if True, terms are returned as strings; if False
(default), terms are returned as their unique integer ids
kwargs:
- filter_stops (bool)
- filter_punct (bool)
- filter_nums (bool)
- include_pos (str or Set[str])
- exclude_pos (str or Set[str])
- min_freq (int)
- include_types (str or Set[str])
- exclude_types (str or Set[str]
- drop_determiners (bool)
see :func:`extract.words <textacy.extract.words>`,
:func:`extract.ngrams <textacy.extract.ngrams>`,
and :func:`extract.named_entities <textacy.extract.named_entities>`
for more information on these parameters
Yields:
int or str: the next term in the terms list, either as a unique
integer id or as a string
Raises:
ValueError: if neither ``named_entities`` nor ``ngrams`` are included
.. note:: Despite the name, this is a generator function; to get an
actual list of terms, call ``list(doc.to_terms_list())``.
"""
if lemmatize is not None or lowercase is not None:
normalize = ('lemma' if lemmatize is True else
'lower' if lowercase is True else
False)
msg = '`lemmatize` and `lowercase` params are deprecated; use `normalize` instead'
with warnings.catch_warnings():
warnings.simplefilter('once', DeprecationWarning)
warnings.warn(msg, DeprecationWarning)
if not named_entities and not ngrams:
raise ValueError('either `named_entities` or `ngrams` must be included')
if isinstance(ngrams, int):
ngrams = (ngrams,)
if named_entities is True:
ne_kwargs = {
'include_types': kwargs.get('include_types'),
'exclude_types': kwargs.get('exclude_types'),
'drop_determiners': kwargs.get('drop_determiners', True),
'min_freq': kwargs.get('min_freq', 1)}
# if numeric ngrams are to be filtered, we should filter numeric entities
if ngrams and kwargs.get('filter_nums') is True:
if ne_kwargs['exclude_types']:
if isinstance(ne_kwargs['exclude_types'], (set, frozenset, list, tuple)):
ne_kwargs['exclude_types'] = set(ne_kwargs['exclude_types'])
ne_kwargs['exclude_types'].add(NUMERIC_NE_TYPES)
else:
ne_kwargs['exclude_types'] = NUMERIC_NE_TYPES
if ngrams:
ngram_kwargs = {
'filter_stops': kwargs.get('filter_stops', True),
'filter_punct': kwargs.get('filter_punct', True),
'filter_nums': kwargs.get('filter_nums', False),
'include_pos': kwargs.get('include_pos'),
'exclude_pos': kwargs.get('exclude_pos'),
'min_freq': kwargs.get('min_freq', 1)}
# if numeric entities are to be filtered, we should filter numeric ngrams
if named_entities and kwargs.get('exclude_types') and NUMERIC_NE_TYPES in kwargs['exclude_types']:
ngram_kwargs['filter_nums'] = True
terms = []
# special case: ensure that named entities aren't double-counted when
# adding words or ngrams that were already added as named entities
if named_entities is True and ngrams:
ents = tuple(textacy.extract.named_entities(self, **ne_kwargs))
ent_idxs = {(ent.start, ent.end) for ent in ents}
terms.append(ents)
for n in ngrams:
if n == 1:
terms.append(
(word for word in textacy.extract.words(self, **ngram_kwargs)
if (word.idx, word.idx + 1) not in ent_idxs))
else:
terms.append(
(ngram for ngram in textacy.extract.ngrams(self, n, **ngram_kwargs)
if (ngram.start, ngram.end) not in ent_idxs))
# otherwise, no need to check for overlaps
else:
if named_entities is True:
terms.append(textacy.extract.named_entities(self, **ne_kwargs))
else:
for n in ngrams:
if n == 1:
terms.append(textacy.extract.words(self, **ngram_kwargs))
else:
terms.append(textacy.extract.ngrams(self, n, **ngram_kwargs))
terms = itertoolz.concat(terms)
# convert token and span objects into integer ids
if as_strings is False:
if normalize == 'lemma':
for term in terms:
try:
yield term.lemma
except AttributeError:
yield self.spacy_stringstore[term.lemma_]
elif normalize == 'lower':
for term in terms:
try:
yield term.lower
except AttributeError:
yield self.spacy_stringstore[term.orth_.lower()]
elif not normalize:
for term in terms:
try:
yield term.orth
except AttributeError:
yield self.spacy_stringstore[term.orth_]
else:
for term in terms:
yield self.spacy_stringstore[normalize(term)]
# convert token and span objects into strings
else:
if normalize == 'lemma':
for term in terms:
yield term.lemma_
elif normalize == 'lower':
for term in terms:
try:
yield term.lower_
except AttributeError:
yield term.orth_.lower()
elif not normalize:
for term in terms:
yield term.orth_
else:
for term in terms:
yield normalize(term)
def to_bag_of_words(self, normalize='lemma', lemmatize=None, lowercase=None,
weighting='count', as_strings=False):
"""
Transform ``Doc`` into a bag-of-words: the set of unique words in ``Doc``
mapped to their absolute, relative, or binary frequency of occurrence.
Args:
normalize (str): if 'lemma', lemmatize words before counting; if
'lower', lowercase words before counting; otherwise, words are
counted using the form with which they they appear in doc
lemmatize (bool): if True, words are lemmatized before counting; for
example, 'happy', 'happier', and 'happiest' would be grouped
together as 'happy', with a count of 3 (*DEPRECATED*)
lowercase (bool): *deprecated* if True and ``lemmatize`` is False,
words are lower-cased before counting; for example, 'happy' and
'Happy' would be grouped together as 'happy', with a count of 2
weighting ({'count', 'freq', 'binary'}): Type of weight to assign to
words. If 'count' (default), weights are the absolute number of
occurrences (count) of word in doc. If 'binary', all counts are
set equal to 1. If 'freq', word counts are normalized by the
total token count, giving their relative frequency of occurrence.
Note: The resulting set of frequencies won't (necessarily) sum
to 1.0, since punctuation and stop words are filtered out after
counts are normalized.
as_strings (bool): if True, words are returned as strings; if False
(default), words are returned as their unique integer ids
Returns:
dict: mapping of a unique word id or string (depending on the value
of ``as_strings``) to its absolute, relative, or binary frequency
of occurrence (depending on the value of ``weighting``).
"""
if lemmatize is not None or lowercase is not None:
normalize = ('lemma' if lemmatize is True else
'lower' if lowercase is True else
False)
msg = '`lemmatize` and `lowercase` params are deprecated; use `normalize` instead'
with warnings.catch_warnings():
warnings.simplefilter('once', DeprecationWarning)
warnings.warn(msg, DeprecationWarning)
if weighting not in {'count', 'freq', 'binary'}:
raise ValueError('weighting "{}" is invalid'.format(weighting))
count_by = (attrs.LEMMA if normalize == 'lemma' else
attrs.LOWER if normalize == 'lower' else
attrs.ORTH)
word_to_weight = self.spacy_doc.count_by(count_by)
if weighting == 'freq':
n_tokens = self.n_tokens
word_to_weight = {id_: weight / n_tokens
for id_, weight in word_to_weight.items()}
elif weighting == 'binary':
word_to_weight = {word: 1 for word in word_to_weight.keys()}
bow = {}
if as_strings is False:
for id_, count in word_to_weight.items():
lexeme = self.spacy_vocab[id_]
if lexeme.is_stop or lexeme.is_punct or lexeme.is_space:
continue
bow[id_] = count
else:
for id_, count in word_to_weight.items():
lexeme = self.spacy_vocab[id_]
if lexeme.is_stop or lexeme.is_punct or lexeme.is_space:
continue
bow[self.spacy_stringstore[id_]] = count
return bow
def to_bag_of_terms(self, ngrams=(1, 2, 3), named_entities=True,
normalize='lemma', lemmatize=None, lowercase=None,
weighting='count', as_strings=False, **kwargs):
"""
Transform ``Doc`` into a bag-of-terms: the set of unique terms in ``Doc``
mapped to their frequency of occurrence, where "terms" includes ngrams
and/or named entities.
Args:
ngrams (int or Set[int]): n of which n-grams to include; ``(1, 2, 3)``
(default) includes unigrams (words), bigrams, and trigrams; `2`
if only bigrams are wanted; falsy (e.g. False) to not include any
named_entities (bool): if True (default), include named entities;
note: if ngrams are also included, any ngrams that exactly
overlap with an entity are skipped to prevent double-counting
lemmatize (bool): *deprecated* if True, words are lemmatized before counting;
for example, 'happy', 'happier', and 'happiest' would be grouped
together as 'happy', with a count of 3
lowercase (bool): *deprecated* if True and ``lemmatize`` is False, words are lower-
cased before counting; for example, 'happy' and 'Happy' would be
grouped together as 'happy', with a count of 2
normalize (str or callable): if 'lemma', lemmatize terms; if 'lower',
lowercase terms; if false-y, use the form of terms as they appear
in doc; if a callable, must accept a ``spacy.Token`` or ``spacy.Span``
and return a str, e.g. :func:`textacy.spacy_utils.normalized_str()`
weighting ({'count', 'freq', 'binary'}): Type of weight to assign to
terms. If 'count' (default), weights are the absolute number of
occurrences (count) of term in doc. If 'binary', all counts are
set equal to 1. If 'freq', term counts are normalized by the
total token count, giving their relative frequency of occurrence.
as_strings (bool): if True, words are returned as strings; if False
(default), words are returned as their unique integer ids
kwargs:
- filter_stops (bool)
- filter_punct (bool)
- filter_nums (bool)
- include_pos (str or Set[str])
- exclude_pos (str or Set[str])
- min_freq (int)
- include_types (str or Set[str])
- exclude_types (str or Set[str]
- drop_determiners (bool)
See :func:`extract.words() <textacy.extract.words>`,
:func:`extract.ngrams() <textacy.extract.ngrams>`,
and :func:`extract.named_entities() <textacy.extract.named_entities>`
for more information on these parameters.
Returns:
dict: mapping of a unique term id or string (depending on the value
of ``as_strings``) to its absolute, relative, or binary frequency
of occurrence (depending on the value of ``weighting``).
See Also:
:meth:`Doc.to_terms_list() <Doc.to_terms_list>`
"""
if lemmatize is not None or lowercase is not None:
normalize = ('lemma' if lemmatize is True else
'lower' if lowercase is True else
False)
msg = '`lemmatize` and `lowercase` params are deprecated; use `normalize` instead'
with warnings.catch_warnings():
warnings.simplefilter('once', DeprecationWarning)
warnings.warn(msg, DeprecationWarning)
if weighting not in {'count', 'freq', 'binary'}:
raise ValueError('weighting "{}" is invalid'.format(weighting))
terms_list = self.to_terms_list(
ngrams=ngrams, named_entities=named_entities,
normalize=normalize, as_strings=as_strings, **kwargs)
bot = itertoolz.frequencies(terms_list)
if weighting == 'freq':
n_tokens = self.n_tokens
bot = {term: weight / n_tokens for term, weight in bot.items()}
elif weighting == 'binary':
bot = {term: 1 for term in bot.keys()}
return bot
def to_semantic_network(self, nodes='words', normalize='lemma',
edge_weighting='default', window_width=10):
"""
Transform ``Doc`` into a semantic network, where nodes are either 'words'
or 'sents' and edges between nodes may be weighted in different ways.
Args:
nodes ({'words', 'sents'}): type of doc component to use as nodes
in the semantic network
normalize (str or callable): if 'lemma', lemmatize terms; if 'lower',
lowercase terms; if false-y, use the form of terms as they appear
in doc; if a callable, must accept a ``spacy.Token`` or ``spacy.Span``
(if ``nodes`` = 'words' or 'sents', respectively) and return a
str, e.g. :func:`textacy.spacy_utils.normalized_str()`
edge_weighting (str): type of weighting to apply to edges
between nodes; if ``nodes == 'words'``, options are {'cooc_freq', 'binary'},
if ``nodes == 'sents'``, options are {'cosine', 'jaccard'}; if
'default', 'cooc_freq' or 'cosine' will be automatically used
window_width (int): size of sliding window over terms that
determines which are said to co-occur; only applicable if 'words'
Returns:
:class:`networkx.Graph <networkx.Graph>`: where nodes represent either
terms or sentences in doc; edges, the relationships between them
Raises:
ValueError: if ``nodes`` is neither 'words' nor 'sents'
See Also:
:func:`terms_to_semantic_network() <textacy.network.terms_to_semantic_network>`
:func:`sents_to_semantic_network() <textacy.network.sents_to_semantic_network>`
"""
if nodes == 'words':
if edge_weighting == 'default':
edge_weighting = 'cooc_freq'
return network.terms_to_semantic_network(
list(textacy.extract.words(self)),
normalize=normalize,
window_width=window_width,
edge_weighting=edge_weighting)
elif nodes == 'sents':
if edge_weighting == 'default':
edge_weighting = 'cosine'
return network.sents_to_semantic_network(
list(self.sents),
normalize=normalize,
edge_weighting=edge_weighting)
else:
msg = 'nodes "{}" not valid; must be in {}'.format(
nodes, {'words', 'sents'})
raise ValueError(msg)
|
StarcoderdataPython
|
8080093
|
import os
import pandas as pd
RANK_DIR = "rank"
FILE_IN = "rank_unordered.tsv"
FILE_OUT = "rank_ordered.csv"
file_path_in = os.path.join(RANK_DIR, FILE_IN)
df = pd.read_csv(file_path_in, sep="\t", names=["rank"]).rename_axis("node").sort_values(by='rank', ascending=False)
file_path_out = os.path.join(RANK_DIR, FILE_OUT)
df.to_csv(file_path_out)
|
StarcoderdataPython
|
251404
|
<reponame>Maxsior/BotCom
from commands.base import Command
from messengers import Messenger
from entities import Message
from storage import Storage
import entities.keyboards as keyboards
class LangCommand(Command):
def execute(self):
sender = self.msg.sender
messenger = Messenger.get_instance(sender.messenger)
if len(self.msg.cmd.args) == 0:
messenger.send(
sender.id,
Message('MESSAGE.WRONG_ARGS').localize(sender.lang),
keyboards.ConnectKeyboard(sender)
)
return
sender.lang = self.msg.cmd.args[0]
Storage().update(sender.key, {'lang': sender.lang})
messenger.send(
sender.id,
Message('MESSAGE.LANG_CHANGED').localize(sender.lang),
keyboards.ConnectKeyboard(sender)
)
|
StarcoderdataPython
|
3315724
|
<reponame>1067511899/tornado-learn
'''
Divisors of 42 are : 1, 2, 3, 6, 7, 14, 21, 42. These divisors squared are: 1, 4, 9, 36, 49, 196, 441, 1764. The sum of the squared divisors is 2500 which is 50 * 50, a square!
Given two integers m, n (1 <= m <= n) we want to find all integers between m and n whose sum of squared divisors is itself a square. 42 is such a number.
The result will be an array of arrays or of tuples (in C an array of Pair) or a string, each subarray having two elements, first the number whose squared divisors is a square and then the sum of the squared divisors.
#Examples:
list_squared(1, 250) --> [[1, 1], [42, 2500], [246, 84100]]
list_squared(42, 250) --> [[42, 2500], [246, 84100]]
The form of the examples may change according to the language, see Example Tests: for more details.
'''
import time
def list_squared(m, n):
result = []
for x in range(m, n + 1):
sumx = 0
sqrtx = int(x ** 0.5) + 1
for i in range(1, sqrtx):
if x % i == 0:
tmp = x // i
if tmp != i:
sumx = sumx + i ** 2 + (tmp) ** 2
else:
sumx += tmp ** 2
if int(sumx ** 0.5) ** 2 == sumx:
result.append([x, sumx])
return result
if __name__ == '__main__':
beg = time.time()
print(list_squared(1, 251110))
print(time.time() - beg)
|
StarcoderdataPython
|
71386
|
<gh_stars>1-10
from spark.tests import TestCase
from nose.tools import eq_
from users.models import User, CompletedChallenge
from challenges.models import Challenge
from challenges.utils import award_hidden_badges
class AwardHiddenBadges(TestCase):
fixtures = ['boost.json', 'challenges.json', 'completed_challenges.json']
def setUp(self):
self.profile = User.objects.create(username='test_user').profile
def get_hidden_badges(self):
return CompletedChallenge.objects.filter(profile=self.profile,
date_badge_earned=None)
def test_new_level_awards_hidden_badges(self):
eq_(1, self.profile.level)
self.profile.complete_challenges(Challenge.objects.filter(pk__in=['2_1', '2_2']))
eq_(2, len(self.get_hidden_badges()))
# Fake gaining a new level
self.profile.level = 2
award_hidden_badges(self.profile)
eq_(0, len(self.get_hidden_badges()))
def test_awarding_badges_sets_new_badge_flag(self):
challenge = Challenge.objects.get(pk='2_1')
self.profile.complete_challenges([challenge])
eq_(False, self.get_hidden_badges()[0].new_badge)
self.profile.level = 2
award_hidden_badges(self.profile)
eq_(True, CompletedChallenge.objects.get(profile=self.profile,
challenge=challenge).new_badge)
|
StarcoderdataPython
|
4825421
|
from bzt.utils import SoapUIScriptConverter
from tests.unit import BZTestCase, RESOURCES_DIR, ROOT_LOGGER
class TestSoapUIConverter(BZTestCase):
def test_minimal(self):
obj = SoapUIScriptConverter(ROOT_LOGGER)
config = obj.convert_script(RESOURCES_DIR + "soapui/project.xml")
self.assertIn("execution", config)
self.assertEqual(4, len(config["execution"]))
execution = config["execution"][1]
self.assertEqual("TestSuite 1-index", execution.get("scenario"))
self.assertEqual(60, execution.get("hold-for"))
self.assertEqual(10, execution.get("concurrency"))
self.assertIn("scenarios", config)
self.assertIn("TestSuite 1-index", config["scenarios"])
scenario = config["scenarios"]["TestSuite 1-index"]
self.assertIn("requests", scenario)
self.assertEqual(3, len(scenario["requests"]))
self.assertIn("variables", scenario)
self.assertEqual("foo", scenario["variables"].get("something"))
self.assertEqual("2", scenario["variables"].get("something_else"))
self.assertEqual("json", scenario["variables"].get("route_part"))
first_req = scenario["requests"][0]
self.assertEqual("http://blazedemo.com/reserve.php", first_req["url"])
self.assertEqual("test index", first_req["label"])
self.assertIn("headers", first_req)
self.assertEqual(first_req["headers"].get("X-Custom-Header"), "Value")
self.assertIn("assert", first_req)
self.assertEqual(2, len(first_req["assert"]))
self.assertEqual("BlazeDemo", first_req["assert"][0]["contains"][0])
self.assertFalse(first_req["assert"][0]["not"])
self.assertFalse(first_req["assert"][0]["regexp"])
self.assertEqual("BlazeDemou", first_req["assert"][1]["contains"][0])
self.assertTrue(first_req["assert"][1]["not"])
self.assertTrue(first_req["assert"][1]["regexp"])
second_req = scenario["requests"][1]
self.assertEqual("http://example.com/body", second_req["url"])
self.assertEqual("posty", second_req["label"])
self.assertEqual("POST", second_req["method"])
self.assertIn("headers", second_req)
self.assertEqual(second_req["headers"].get("X-Header"), "X-Value")
self.assertEqual(second_req["headers"].get("X-Header-2"), "X-Value-2")
self.assertIn("body", second_req)
self.assertIn("answer", second_req["body"])
self.assertEqual('42', second_req["body"]["answer"])
self.assertIn("extract-xpath", second_req)
self.assertIn("something_else", second_req["extract-xpath"])
self.assertEqual("//head", second_req["extract-xpath"]["something_else"]["xpath"])
third_req = scenario["requests"][2]
self.assertEqual("http://localhost:9999/api/${route_part}", third_req["url"])
self.assertEqual("/api/json", third_req["label"])
self.assertIn("extract-jsonpath", third_req)
self.assertIn("something", third_req["extract-jsonpath"])
self.assertEqual("$.baz", third_req["extract-jsonpath"]["something"]["jsonpath"])
def test_find_test_case(self):
obj = SoapUIScriptConverter(ROOT_LOGGER)
config = obj.convert_script(RESOURCES_DIR + "soapui/project.xml")
scenarios = config["scenarios"]
self.assertEqual(len(scenarios), 4)
target_scenario = scenarios["TestSuite 1-index"]
found_name, found_scenario = obj.find_soapui_test_case("index", scenarios)
self.assertEqual(target_scenario, found_scenario)
def test_find_test_case_empty(self):
obj = SoapUIScriptConverter(ROOT_LOGGER)
self.sniff_log(obj.log)
config = obj.convert_script(RESOURCES_DIR + "soapui/project.xml")
scenarios = config["scenarios"]
self.assertEqual(len(scenarios), 4)
target_scenario = scenarios["BlazeDemo LoadTest"]
found_name, found_scenario = obj.find_soapui_test_case(None, scenarios)
self.assertEqual(target_scenario, found_scenario)
def test_skip_if_no_requests(self):
obj = SoapUIScriptConverter(ROOT_LOGGER)
self.sniff_log(obj.log)
obj.convert_script(RESOURCES_DIR + "soapui/project.xml")
self.assertIn("No requests extracted for scenario TestSuite 1-EmptyTestCase, skipping it",
self.log_recorder.warn_buff.getvalue())
def test_rest_service_name_as_base_address(self):
obj = SoapUIScriptConverter(ROOT_LOGGER)
config = obj.convert_script(RESOURCES_DIR + "soapui/youtube-sample.xml")
scenarios = config["scenarios"]
scenario = scenarios["TestSuite-TestCase"]
self.assertEqual(len(scenario["requests"]), 5)
for request in scenario["requests"]:
self.assertTrue(request["url"].startswith("http://gdata.youtube.com/"))
def test_project_suite_case_level_properties(self):
obj = SoapUIScriptConverter(ROOT_LOGGER)
config = obj.convert_script(RESOURCES_DIR + "soapui/flickr-sample.xml")
scenarios = config["scenarios"]
scenario = scenarios["TestSuite-TestCase"]
self.assertEqual(len(scenario["variables"]), 2)
self.assertIn("#Project#ApiKey", scenario["variables"])
self.assertIn("#TestCase#temp", scenario["variables"])
def test_rest_parameters(self):
obj = SoapUIScriptConverter(ROOT_LOGGER)
config = obj.convert_script(RESOURCES_DIR + "soapui/flickr-sample.xml")
scenarios = config["scenarios"]
scenario = scenarios["TestSuite-TestCase"]
self.assertEqual(len(scenario["requests"]), 4)
first = scenario["requests"][0]
self.assertIn("body", first)
self.assertEqual(len(first["body"]), 4)
self.assertTrue(all(key in first["body"] for key in ["format", "method", "nojsoncallback", "api_key"]))
def test_soap_conversion(self):
obj = SoapUIScriptConverter(ROOT_LOGGER)
config = obj.convert_script(RESOURCES_DIR + "soapui/globalweather.xml")
self.assertEqual(len(config["scenarios"]), 4)
merged = config["scenarios"]["GWSOAPMerged-Test"]
split1 = config["scenarios"]["GWSOAPSplit-GetCities"]
split2 = config["scenarios"]["GWSOAPSplit-GetWeather"]
self.assertEqual(len(merged["requests"]), 2)
self.assertEqual(merged["requests"][0]["url"], "http://www.webservicex.com/globalweather.asmx")
self.assertEqual(merged["requests"][0]["method"], "POST")
self.assertEqual(merged["requests"][0]["headers"]["Content-Type"], "text/xml; charset=utf-8")
self.assertIn("body", merged["requests"][0])
self.assertEqual(merged["requests"][1]["url"], "http://www.webservicex.com/globalweather.asmx")
self.assertEqual(len(split1["requests"]), 1)
self.assertEqual(split1["requests"][0]["url"], "http://www.webservicex.com/globalweather.asmx")
self.assertEqual(len(split2["requests"]), 1)
self.assertEqual(split2["requests"][0]["url"], "http://www.webservicex.com/globalweather.asmx")
def test_rest_templated_params_interpolation(self):
obj = SoapUIScriptConverter(ROOT_LOGGER)
config = obj.convert_script(RESOURCES_DIR + "soapui/gmaps-sample.xml")
self.assertEqual(len(config["scenarios"]), 10)
scenario = config["scenarios"]["Directions API TestSuite-Simple Tests"]
for request in scenario["requests"]:
self.assertNotIn("{format}", request["url"])
self.assertEqual(scenario["requests"][0]["url"], "http://maps.googleapis.com/maps/api/directions/json")
self.assertEqual(scenario["requests"][1]["url"], "http://maps.googleapis.com/maps/api/directions/json")
self.assertEqual(scenario["requests"][2]["url"], "http://maps.googleapis.com/maps/api/directions/xml")
|
StarcoderdataPython
|
6594948
|
<gh_stars>0
from array import *
##################################################################
# 1. Create an array and traverse.
myarray = array('i', [1,2,3,4,5,6]) # i means int
print(myarray)
myarray1 = array('d', [1.3,2.4,3.2,4.1,5.5]) # d means double
print(myarray1)
##################################################################
# 2. Insert Elements in an array.
myarray.insert(6, 7) #6 is the index and 7 is the value
print(myarray)
myarray.insert(0, 0) #6 is the index and 7 is the value
print(myarray)
##################################################################
# 3 Traversal Operations
def traverse(array):
for i in array:
print(i)
traverse(myarray)
##################################################################
print("--------------------------------")
# 4 Accessing an element in an array
def accesselement(arr, index):
if index>= len(arr):
print("No such element present")
print(arr[index])
accesselement(myarray, 4)
##################################################################
print("--------------------------------")
# 5 Searching for an element
def search(arr , value):
for i in arr:
if i == value:
print("Element present at :", arr.index(value))
search(myarray, 4)
##################################################################
print("--------------------------------")
#Delete an element
myarray.remove(7) #pass value
print(myarray)
print("--------------------------------")
|
StarcoderdataPython
|
9720669
|
<gh_stars>0
"""Axis conftest."""
from typing import Optional
from unittest.mock import patch
from axis.rtsp import (
SIGNAL_DATA,
SIGNAL_FAILED,
SIGNAL_PLAYING,
STATE_PLAYING,
STATE_STOPPED,
)
import pytest
from tests.components.light.conftest import mock_light_profiles # noqa: F401
@pytest.fixture(autouse=True)
def mock_axis_rtspclient():
"""No real RTSP communication allowed."""
with patch("axis.streammanager.RTSPClient") as rtsp_client_mock:
rtsp_client_mock.return_value.session.state = STATE_STOPPED
async def start_stream():
"""Set state to playing when calling RTSPClient.start."""
rtsp_client_mock.return_value.session.state = STATE_PLAYING
rtsp_client_mock.return_value.start = start_stream
def stop_stream():
"""Set state to stopped when calling RTSPClient.stop."""
rtsp_client_mock.return_value.session.state = STATE_STOPPED
rtsp_client_mock.return_value.stop = stop_stream
def make_rtsp_call(data: Optional[dict] = None, state: str = ""):
"""Generate a RTSP call."""
axis_streammanager_session_callback = rtsp_client_mock.call_args[0][4]
if data:
rtsp_client_mock.return_value.rtp.data = data
axis_streammanager_session_callback(signal=SIGNAL_DATA)
elif state:
axis_streammanager_session_callback(signal=state)
else:
raise NotImplementedError
yield make_rtsp_call
@pytest.fixture(autouse=True)
def mock_rtsp_event(mock_axis_rtspclient):
"""Fixture to allow mocking received RTSP events."""
def send_event(
topic: str,
data_type: str,
data_value: str,
operation: str = "Initialized",
source_name: str = "",
source_idx: str = "",
) -> None:
source = ""
if source_name != "" and source_idx != "":
source = f'<tt:SimpleItem Name="{source_name}" Value="{source_idx}"/>'
event = f"""<?xml version="1.0" encoding="UTF-8"?>
<tt:MetadataStream xmlns:tt="http://www.onvif.org/ver10/schema">
<tt:Event>
<wsnt:NotificationMessage xmlns:tns1="http://www.onvif.org/ver10/topics"
xmlns:tnsaxis="http://www.axis.com/2009/event/topics"
xmlns:wsnt="http://docs.oasis-open.org/wsn/b-2"
xmlns:wsa5="http://www.w3.org/2005/08/addressing">
<wsnt:Topic Dialect="http://docs.oasis-open.org/wsn/t-1/TopicExpression/Simple">
{topic}
</wsnt:Topic>
<wsnt:ProducerReference>
<wsa5:Address>
uri://bf32a3b9-e5e7-4d57-a48d-1b5be9ae7b16/ProducerReference
</wsa5:Address>
</wsnt:ProducerReference>
<wsnt:Message>
<tt:Message UtcTime="2020-11-03T20:21:48.346022Z"
PropertyOperation="{operation}">
<tt:Source>{source}</tt:Source>
<tt:Key></tt:Key>
<tt:Data>
<tt:SimpleItem Name="{data_type}" Value="{data_value}"/>
</tt:Data>
</tt:Message>
</wsnt:Message>
</wsnt:NotificationMessage>
</tt:Event>
</tt:MetadataStream>
"""
mock_axis_rtspclient(data=event.encode("utf-8"))
yield send_event
@pytest.fixture(autouse=True)
def mock_rtsp_signal_state(mock_axis_rtspclient):
"""Fixture to allow mocking RTSP state signalling."""
def send_signal(connected: bool) -> None:
"""Signal state change of RTSP connection."""
signal = SIGNAL_PLAYING if connected else SIGNAL_FAILED
mock_axis_rtspclient(state=signal)
yield send_signal
|
StarcoderdataPython
|
63444
|
import collections
import typing
from . import tags
from .iterators import chunked_iterable
from .models import Project
def list_cluster_arns_in_account(ecs_client):
"""
Generates the ARN of every ECS cluster in an account.
"""
paginator = ecs_client.get_paginator("list_clusters")
for page in paginator.paginate():
yield from page["clusterArns"]
def list_service_arns_in_cluster(ecs_client, *, cluster):
"""
Generates the ARN of every ECS service in a cluster.
"""
paginator = ecs_client.get_paginator("list_services")
for page in paginator.paginate(cluster=cluster):
yield from page["serviceArns"]
def describe_services(session):
"""
Describe all the ECS services in an account.
"""
ecs_client = session.client("ecs")
result = []
for cluster in list_cluster_arns_in_account(ecs_client):
service_arns = list_service_arns_in_cluster(ecs_client, cluster=cluster)
# We can specify up to 10 services in a single DescribeServices API call.
for service_set in chunked_iterable(service_arns, size=10):
resp = ecs_client.describe_services(
cluster=cluster,
services=service_set,
include=["TAGS"]
)
result.extend(resp["services"])
return result
class NoMatchingServiceError(Exception):
pass
class MultipleMatchingServicesError(Exception):
pass
def find_matching_service(
service_descriptions, *, service_id, environment_id
):
"""
Given a service (e.g. bag-unpacker) and an environment (e.g. prod),
return the unique matching service.
"""
try:
return tags.find_unique_resource_matching_tags(
service_descriptions,
expected_tags={
"deployment:service": service_id,
"deployment:env": environment_id,
}
)
except tags.NoMatchingResourceError:
raise NoMatchingServiceError(
f"No matching service found for {service_id}/{environment_id}!"
)
except tags.MultipleMatchingResourcesError:
raise MultipleMatchingServicesError(
f"Multiple matching services found for {service_id}/{environment_id}!"
)
def find_service_arns_for_release(
*, project: Project, release, service_descriptions, environment_id
):
"""
Build a dictionary (image ID) -> list(service ARNs) for all the images
in a particular release.
"""
result = {image_id: [] for image_id in release["images"]}
for image_id in release["images"]:
try:
services = project.image_repositories[image_id].services
except KeyError:
continue
for service_id in services:
try:
matching_service = find_matching_service(
service_descriptions,
service_id=service_id,
environment_id=environment_id
)
except NoMatchingServiceError:
continue
result[image_id].append(matching_service["serviceArn"])
return result
def deploy_service(session, *, cluster_arn, service_arn, deployment_label):
"""
Triggers a deployment of a given service.
"""
ecs_client = session.client("ecs")
resp = ecs_client.update_service(
cluster=cluster_arn,
service=service_arn,
forceNewDeployment=True
)
ecs_client.tag_resource(
resourceArn=service_arn,
tags=tags.to_aws_tags({"deployment:label": deployment_label})
)
return {
"cluster_arn": resp["service"]["clusterArn"],
"service_arn": resp["service"]["serviceArn"],
"deployment_id": resp["service"]["deployments"][0]["id"]
}
def list_tasks_in_service(session, *, cluster_arn, service_name):
"""
Given the name of a service, return a list of tasks running within
the service.
"""
ecs_client = session.client("ecs")
task_arns = []
paginator = ecs_client.get_paginator("list_tasks")
for page in paginator.paginate(
cluster=cluster_arn, serviceName=service_name
):
task_arns.extend(page["taskArns"])
# If task_arns is empty we can't ask to describe them.
# TODO: This method can handle up to 100 task ARNs. It seems unlikely
# we'd ever have more than that, hence not handling it properly.
if task_arns:
resp = ecs_client.describe_tasks(
cluster=cluster_arn,
tasks=task_arns,
include=["TAGS"]
)
return resp["tasks"]
else:
return []
def find_ecs_services_for_release(
*,
project: Project,
service_descriptions: typing.List[typing.Dict],
release: str,
environment_id: str
):
"""
Returns a map (image ID) -> Dict(service ID -> ECS service description)
"""
matched_services = collections.defaultdict(dict)
for image_id, _ in release['images'].items():
# Attempt to match deployment image id to config and override service_ids
try:
matched_image = project.image_repositories[image_id]
except KeyError:
continue
for service_id in matched_image.services:
try:
service_description = find_matching_service(
service_descriptions=service_descriptions,
service_id=service_id,
environment_id=environment_id
)
matched_services[image_id] = {
service_id: service_description
}
except NoMatchingServiceError:
pass
return matched_services
|
StarcoderdataPython
|
1631838
|
#!/usr/bin/env python
# encoding:utf-8
from handler.public import (BasePage, BaseApi, Auth)
from fastweb import coroutine
import sys
import datetime
reload(sys)
sys.setdefaultencoding('utf-8')
# 平台首页
class LogHandler(BasePage):
@coroutine
@Auth.authUser
def get(self):
self.end('log/index.html', username=self.get_secure_cookie("user_name"))
class LogListHandler(BaseApi):
@coroutine
@Auth.authUser
def get(self):
user_id = self.get_secure_cookie("userId")
sql = '''
SELECT
id,
log_title,
`type`,
log_desc,
create_time
FROM
login_log
WHERE
user_id = %s
LIMIT 20;
'''
yield self.mysql_passhub_db.query(sql, user_id)
result = self.mysql_passhub_db.fetchall()
list = []
for r in result:
json = {}
json['id'] = r['id']
json['log_title'] = r['log_title']
json['type'] = r['type']
json['log_desc'] = r['log_desc']
json['create_time'] = datetime.datetime.strftime(r['create_time'], '%Y-%m-%d %H:%M:%S')
list.append(json)
self.end(code='SUC', log=True, **{'result': list})
|
StarcoderdataPython
|
3211739
|
<filename>linkedlist/addTwoNumbersTwo.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: <NAME>
# You are given two non-empty linked lists representing two non-negative integers. The most significant digit comes first and each of their nodes contain a single digit. Add the two numbers and return it as a linked list.
# Example:
# Input: (7 -> 2 -> 4 -> 3) + (5 -> 6 -> 4)
# Output: 7 -> 8 -> 0 -> 7
# ****************
# Final Solution *
# Using Stack *
# ****************
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def addTwoNumbers(self, n1, n2):
l1 = self.list_to_stack(n1)
l2 = self.list_to_stack(n2)
stack = self.add_helper(l1,l2)
return self.stack_to_list(stack)
def list_to_stack(self, node):
stack = []
while node:
stack.append(node.val)
node = node.next
return stack
def add_helper(self, s1, s2):
res = []
carry = 0
while s1 or s2 or carry:
num1 = s1.pop() if s1 else 0
num2 = s2.pop() if s2 else 0
total = num1 + num2 + carry
res.append(total % 10)
carry = total/10
return res
def stack_to_list(self, stack):
head = ListNode(0)
cur = head
while stack:
cur.next = ListNode(stack.pop())
cur = cur.next
return head.next
# *****************************************
# The following code is an fail attempt *
# Passed 893/1539 Test Cases *
# Failed Cases such as 1 + 99(Getting 01) *
# *****************************************
class Solution(object):
def addTwoNumbers(self, n1, n2):
#Pad The Two List
len1, len2 = self.get_length(n1), self.get_length(n2)
if len1 > len2:
#Insert Dummy Node to head
for _ in range(len1-len2):
temp = ListNode(0)
temp.next = n2
n2 = temp
else:
for _ in range(len2-len1):
temp = ListNode(0)
temp.next = n1
n1 = temp
has_carry_head = False
#Edge: Largest Carry
res = cur = ListNode(0)
guard1, guard2 = n1, n2
carry = 0
while n1 or n2 or carry:
if n1.next and n2.next:
carry = (n1.next.val + n2.next.val) / 10
result = carry + n1.val + n2.val
temp = ListNode(result % 10)
front_carry = result / 10
cur.next = temp
cur = cur.next
n1 = n1.next
n2 = n2.next
if guard1.val + guard2.val + front_carry > 10:
res.val = 1
has_carry_head = True
print has_carry_head
return res if has_carry_head else res.next
def display(self,node):
cur = node
arr = []
while cur:
arr.append(cur.val)
cur = cur.next
print arr
def get_length(self, node):
cur = node
count = 0
while cur:
count += 1
cur = cur.next
return count
#失败心经
# 讲道理,现在还是懵的。。首先insert方程和pad方程
# 若是单独写一个方程,然后带入,不知道为什么不能pass by reference
# 所以后来很机械化的没有写进方程
# 这个代码大部分的Test Case都能过,有一些小的,是在不知道怎么调了
# 因为弄得特别的乱,大概思路就是Pad两个List,然后加减,CC189的套路
# 这道题后来看了答案,可以用Stack来写。
|
StarcoderdataPython
|
268067
|
from beem.account import Account
from beem.amount import Amount
from beem import Steem
from beem.instance import set_shared_steem_instance
from beem.nodelist import NodeList
from beem.utils import formatTimeString
import re
import json
import os
from time import sleep
import dataset
import json
from steembi.parse_hist_op import ParseAccountHist
from steembi.storage import TrxDB, MemberDB, ConfigurationDB, AccountsDB
if __name__ == "__main__":
config_file = 'config.json'
if not os.path.isfile(config_file):
raise Exception("config.json is missing!")
else:
with open(config_file) as json_data_file:
config_data = json.load(json_data_file)
databaseConnector = config_data["databaseConnector"]
databaseConnector2 = config_data["databaseConnector2"]
mgnt_shares = config_data["mgnt_shares"]
hive_blockchain = config_data["hive_blockchain"]
db2 = dataset.connect(databaseConnector2)
# Create keyStorage
trxStorage = TrxDB(db2)
memberStorage = MemberDB(db2)
confStorage = ConfigurationDB(db2)
accStorage = AccountsDB(db2)
accounts = accStorage.get()
other_accounts = accStorage.get_transfer()
sp_share_ratio = confStorage.get()["sp_share_ratio"]
nodes = NodeList()
try:
nodes.update_nodes()
except:
print("could not update nodes")
stm = Steem(node=nodes.get_nodes(hive=hive_blockchain))
# Update current node list from @fullnodeupdate
print("check member database")
# memberStorage.wipe(True)
member_accounts = memberStorage.get_all_accounts()
data = trxStorage.get_all_data()
missing_accounts = []
member_data = {}
aborted = False
for m in member_accounts:
member_data[m] = memberStorage.get(m)
# Check wrong account names:
if False:
cnt = 0
for m in member_accounts:
if aborted:
continue
cnt += 1
if cnt % 100 == 0:
print("%d/%d scanned" % ())
try:
acc = Account(m, steem_instance=stm)
except KeyboardInterrupt:
aborted = True
except:
print("%s is not a valid account" % m)
missing_accounts.append(m)
shares = 0
bonus_shares = 0
balance_rshares = 0
for m in member_data:
shares += member_data[m]["shares"]
bonus_shares += member_data[m]["bonus_shares"]
balance_rshares += member_data[m]["balance_rshares"]
print("units: %d" % shares)
print("bonus units: %d" % bonus_shares)
print("total units: %d" % (shares + bonus_shares))
print("----------")
print("balance_rshares: %d" % balance_rshares)
print("balance_rshares: %.3f $" % stm.rshares_to_sbd(balance_rshares))
if len(missing_accounts) > 0:
print("%d not existing accounts: " % len(missing_accounts))
print(missing_accounts)
|
StarcoderdataPython
|
12863991
|
import pytest
from Door import Door
def test_find_password():
door_id = "abc"
door = Door(door_id)
assert(door.find_password() == "<PASSWORD>")
def test_find_password2():
door_id = "abc"
door = Door(door_id)
assert(door.find_password2() == "<PASSWORD>")
|
StarcoderdataPython
|
3254957
|
<gh_stars>1-10
# tests/test_py
from ttlockwrapper import TTLock,TTlockAPIError, constants
import requests_mock
import requests
import re
import pytest
FAKE_CLIENT_ID='34144ff6749ea9ced96cbd2470db12f2'
FAKE_ACCESS_TOKEN='<KEY>'
TOKEN_ERROR_CODES = [10003]
INVALID_CURRENT_TIMESTAMP_ERROR = 80000
LOCK_STATE_RESPONSE = '{"state": 1}'
LOCK_ELECTRIC_QUANTITY_RESPONSE = '{"electricQuantity": 68}'
INVALID_TOKEN_RESPONSE = '{"errcode": 10003,"errmsg": "invalid token","description": ""}'
MOCK_JSON_PATH = './tests/data/'
def response_lock_records_list_callback(request, context):
pageNo = re.compile('pageNo=\\d').search(request.url).group()[7:]
with open(MOCK_JSON_PATH+'lock_records_response_page_{}.json'.format(pageNo), 'r') as json_file:
mock_response = json_file.read()
return mock_response
def response_gateway_list_callback(request, context):
pageNo = re.compile('pageNo=\\d').search(request.url).group()[7:]
with open(MOCK_JSON_PATH+'gateway_response_page_{}.json'.format(pageNo), 'r') as json_file:
mock_response = json_file.read()
return mock_response
def test_ttlock_get_gateways_list_paginated():
"""Tests API call to get a gateways from a account"""
with requests_mock.Mocker() as m:
m.register_uri('GET', re.compile(constants.GATEWAY_LIST_RESOURCE), text=response_gateway_list_callback)
response_generator = TTLock(clientId=FAKE_CLIENT_ID
,accessToken=FAKE_ACCESS_TOKEN).get_gateway_generator(pageSize=1)
gateways = [gateway for gateway in response_generator]
assert len(gateways)==2
def test_ttlock_get_gateways_stop_iteration(mocker):
"""Tests API call to get a gateways from a account"""
with requests_mock.Mocker() as m:
mocker.patch.object(TTLock, '__verify_page__')
TTLock.__verify_page__.return_value = False
m.register_uri('GET', re.compile(constants.GATEWAY_LIST_RESOURCE), text=response_gateway_list_callback)
response_generator = TTLock(clientId=FAKE_CLIENT_ID
,accessToken=FAKE_ACCESS_TOKEN).get_gateway_generator(pageSize=1)
with pytest.raises(StopIteration):
next(response_generator)
def test_ttlock_get_locks_gateway_list():
with open(MOCK_JSON_PATH+'gateway_lock_list_response.json', 'r') as json_file:
mock_response = json_file.read()
with requests_mock.Mocker() as m:
m.register_uri('GET', re.compile(constants.LOCKS_PER_GATEWAY_RESOURCE), text=mock_response)
response_generator = TTLock(clientId=FAKE_CLIENT_ID
,accessToken=FAKE_ACCESS_TOKEN).get_locks_per_gateway_generator(gatewayId=35155)
lock_list = [lock for lock in response_generator]
assert isinstance(lock_list, list)
assert lock_list[0].get('lockId' )==3879122
assert lock_list[1].get('lockId' )==1928723
def test_ttlock_get_lock_records_list_paginated():
with requests_mock.Mocker() as m:
m.register_uri('GET', re.compile(constants.LOCK_RECORDS_RESOURCE), text=response_lock_records_list_callback)
response_generator = TTLock(clientId=FAKE_CLIENT_ID
,accessToken=FAKE_ACCESS_TOKEN).get_lock_records_generator(lockId=1928723,pageSize=20)
record_list = [record for record in response_generator]
assert len(record_list)==80
def test_ttlock_get_lock_state():
with requests_mock.Mocker() as m:
m.register_uri('GET', re.compile(constants.LOCK_STATE_RESOURCE), text=LOCK_STATE_RESPONSE)
state = TTLock(clientId=FAKE_CLIENT_ID
,accessToken=FAKE_ACCESS_TOKEN).lock_state(lockId=1928723)
assert state == 1
def test_ttlock_get_lock_electric_quantity():
with requests_mock.Mocker() as m:
m.register_uri('GET', re.compile(constants.LOCK_ELECTRIC_QUANTITY_RESOURCE), text=LOCK_ELECTRIC_QUANTITY_RESPONSE)
electric_quantity = TTLock(clientId=FAKE_CLIENT_ID
,accessToken=FAKE_ACCESS_TOKEN).lock_electric_quantity(lockId=1928723)
assert electric_quantity == 68
def test_ttlock_no_mock_request():
with pytest.raises(TTlockAPIError) as ttLock_error:
response = TTLock(clientId=FAKE_CLIENT_ID
,accessToken=FAKE_ACCESS_TOKEN).lock_state()
assert response.status_code in constants.GOOD_HTTP_CODES
assert not ttLock_error.error_code == INVALID_CURRENT_TIMESTAMP_ERROR
with pytest.raises(TTlockAPIError) as ttLock_error:
response = TTLock(clientId=FAKE_CLIENT_ID
,accessToken=FAKE_ACCESS_TOKEN).lock_electric_quantity()
assert response.status_code in constants.GOOD_HTTP_CODES
assert not ttLock_error.error_code == INVALID_CURRENT_TIMESTAMP_ERROR
def test_ttlock_no_mock_invalid_date_current():
with pytest.raises(TTlockAPIError) as ttLock_error:
_url_request = constants.GATEWAY_LIST_URL.format(
constants.API_URI,
constants.GATEWAY_LIST_RESOURCE,
FAKE_CLIENT_ID,
FAKE_ACCESS_TOKEN,
1,
20,
TTLock().__get_current_millis__()-10000,
)
TTLock(clientId=FAKE_CLIENT_ID
,accessToken=FAKE_ACCESS_TOKEN).lock_electric_quantity()
assert ttLock_error.error_code==INVALID_CURRENT_TIMESTAMP_ERROR
def test_lock():
with requests_mock.Mocker() as m:
m.register_uri('GET', re.compile(constants.LOCK_RESOURCE), text='{"errcode": 0,"errmsg": "none error message or means yes","description": "表示成功或是"}')
assert TTLock(clientId=FAKE_CLIENT_ID
,accessToken=FAKE_ACCESS_TOKEN).lock(lockId=1928723)
def test_unlock():
with requests_mock.Mocker() as m:
m.register_uri('GET', re.compile(constants.UNLOCK_RESOURCE), text='{"errcode": 0,"errmsg": "none error message or means yes","description": "表示成功或是"}')
assert TTLock(clientId=FAKE_CLIENT_ID
,accessToken=FAKE_ACCESS_TOKEN).unlock(lockId=1928723)
def test__send_request__():
with pytest.raises(requests.exceptions.HTTPError):
with requests_mock.Mocker() as m:
m.register_uri('GET', re.compile(constants.UNLOCK_RESOURCE), text='Not Found', status_code=403)
assert TTLock(clientId=FAKE_CLIENT_ID,accessToken=FAKE_ACCESS_TOKEN).unlock(lockId=1928723)
with requests_mock.Mocker() as m:
m.register_uri('GET', re.compile(constants.UNLOCK_RESOURCE), text='{"errcode": 0,"errmsg": "none error message or means yes","description": "表示成功或是"}')
assert TTLock(clientId=FAKE_CLIENT_ID
,accessToken=FAKE_ACCESS_TOKEN).unlock(lockId=1928723)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.