id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
4828718
|
<filename>scripts/deepsplines_tutorial.py<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This script exemplifies how to use DeepSplines in a network,
starting from the PyTorch CIFAR-10 tutorial:
https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html
"""
import time
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# Need to import dsnn (takes the role of torch.nn for DeepSplines)
from deepsplines.ds_modules import dsnn
########################################################################
# ReLU network
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.num_params = sum(p.numel() for p in self.parameters())
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
########################################################################
# Deepspline network
# We now show how to replace the ReLUs with DeepSpline activations in
# the previous network.
# we can use deepspline modules of three types:
# - DeepBspline
# - DeepBSplineExplicitLinear
# - DeepReLUSpline
# In this tutorial, we use the first as an example.
# The model needs to inherit from dsnn.DSModule. This is a wrap around
# nn.Module that contains all the DeepSpline functionality.
class DSNet(dsnn.DSModule):
def __init__(self):
super().__init__()
# we put the deepsplines (ds) of the convolutional and fully-connected
# layers in two separate nn.ModuleList() for simplicty.
self.conv_ds = nn.ModuleList()
self.fc_ds = nn.ModuleList()
# We define some optional parameters for the deepspline
# (see DeepBSpline.__init__())
opt_params = {
'size': 51,
'range_': 4,
'init': 'leaky_relu',
'save_memory': False
}
self.conv1 = nn.Conv2d(3, 6, 5)
# 1st parameter (mode): 'conv' (convolutional) / 'fc' (fully-connected)
# 2nd parameter: nb. channels (mode='conv') / nb. neurons (mode='fc')
self.conv_ds.append(dsnn.DeepBSpline('conv', 6, **opt_params))
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.conv_ds.append(dsnn.DeepBSpline('conv', 16, **opt_params))
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc_ds.append(dsnn.DeepBSpline('fc', 120, **opt_params))
self.fc2 = nn.Linear(120, 84)
self.fc_ds.append(dsnn.DeepBSpline('fc', 84, **opt_params))
self.fc3 = nn.Linear(84, 10)
self.initialization(opt_params['init'], init_type='He')
self.num_params = self.get_num_params()
def forward(self, x):
x = self.pool(self.conv_ds[0](self.conv1(x)))
x = self.pool(self.conv_ds[1](self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = self.fc_ds[0](self.fc1(x))
x = self.fc_ds[1](self.fc2(x))
x = self.fc3(x)
return x
if __name__ == "__main__":
########################################################################
# Load the data
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
batch_size = 4
trainset = torchvision.datasets.CIFAR10(root='./data',
train=True,
download=True,
transform=transform)
trainloader = torch.utils.data.DataLoader(trainset,
batch_size=batch_size,
shuffle=True,
num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data',
train=False,
download=True,
transform=transform)
testloader = torch.utils.data.DataLoader(testset,
batch_size=batch_size,
shuffle=False,
num_workers=2)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
'ship', 'truck')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f'\nDevice: {device}')
########################################################################
# Network, optimizer, loss
net = Net() # relu network
net.to(device)
print('ReLU: nb. parameters - {:d}'.format(net.num_params))
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
dsnet = DSNet() # deepsplines network
dsnet.to(device)
print('DeepSpline: nb. parameters - {:d}'.format(dsnet.num_params))
# For the parameters of the deepsplines, an optimizer different from "SGD"
# is usually required for stability during training (Adam is recommended).
# Therefore, when using an SGD optimizer for the network parameters, we
# require an auxiliary one for the deepspline parameters.
# Inherenting from DSModule allows us to use the parameters_deepspline()
# and parameters_no_deepspline() methods for this.
main_optimizer = optim.SGD(dsnet.parameters_no_deepspline(),
lr=0.001,
momentum=0.9)
aux_optimizer = optim.Adam(dsnet.parameters_deepspline())
criterion = nn.CrossEntropyLoss()
########################################################################
# Training the ReLU network
print('\nTraining ReLU network.')
start_time = time.time()
for epoch in range(2): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data[0].to(device), data[1].to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
end_time = time.time()
print('Finished Training ReLU network. \n'
'Took {:d} seconds. '.format(int(end_time - start_time)))
########################################################################
# Training the DeepSpline network
# Note: Since the original network is small, the time it takes to train
# deepsplines is significantly larger.
# Regularization weight for the TV(2)/BV(2) regularization
# Needs to be tuned for performance
lmbda = 1e-4
# lipschitz control: if True, BV(2) regularization is used instead of TV(2)
lipschitz = False
print('\nTraining DeepSpline network.')
start_time = time.time()
for epoch in range(2): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data[0].to(device), data[1].to(device)
# zero the parameter gradients
main_optimizer.zero_grad()
aux_optimizer.zero_grad()
# forward + backward + optimize
outputs = dsnet(inputs)
loss = criterion(outputs, labels)
# add regularization loss
if lipschitz is True:
loss = loss + lmbda * dsnet.BV2()
else:
loss = loss + lmbda * dsnet.TV2()
loss.backward()
main_optimizer.step()
aux_optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
end_time = time.time()
print('Finished Training DeepSpline network. \n'
'Took {:d} seconds. '.format(int(end_time - start_time)))
########################################################################
# Testing the ReLU and DeepSpline networks
for model, name in zip([net, dsnet], ['ReLU', 'DeepSpline']):
print(f'\nTesting {name} network.')
correct = 0
total = 0
# since we're not training, we don't need to calculate the gradients
# for our outputs
with torch.no_grad():
for data in testloader:
images, labels = data[0].to(device), data[1].to(device)
# calculate outputs by running images through the network
outputs = model(images)
# the class with the highest energy is what we choose
# as prediction
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print(f'Accuracy of the {name} network '
'on the 10000 test images: %d %%' % (100 * correct / total))
|
StarcoderdataPython
|
3271676
|
from trame.html import vuetify, Element, Div, Span
def create_project_generation(
validation_callback, validation_output, validation_check, run_variables
):
with Div(
classes="d-flex flex-column fill-height justify-space-around",
v_if="currentView == 'Project Generation'",
):
with Div(v_if=validation_check, classes="mx-6"):
with vuetify.VCard(outlined=True, classes="pa-2 my-4"):
vuetify.VCardTitle("Run variables")
vuetify.VTextField(
v_model=(run_variables["BaseUnit"], 1.0),
label="TimingInfo.BaseUnit",
)
vuetify.VTextField(
v_model=(run_variables["DumpInterval"], 1.0),
label="TimingInfo.DumpInterval",
)
vuetify.VTextField(
v_model=(run_variables["StartCount"], 0),
label="TimingInfo.StartCount",
)
vuetify.VTextField(
v_model=(run_variables["StartTime"], 0.0),
label="TimingInfo.StartTime",
)
vuetify.VTextField(
v_model=(run_variables["StopTime"], 1000.0),
label="TimingInfo.StopTime",
)
with vuetify.VCard(dark=True, outlined=True, classes="pa-2 my-4"):
vuetify.VCardTitle("Validation console output")
vuetify.VDivider()
vuetify.VTextarea(
value=(validation_output,),
dark=True,
readonly=True,
style="font-family: monospace;",
)
vuetify.VSpacer()
with Div(v_if="projGenValidation.valid", classes="mx-6"):
Span("Run Validated", classes="text-h5")
with Div(classes="d-flex justify-end ma-6"):
vuetify.VBtn(
"Validate", click=validation_callback, color="primary", classes="mx-2"
)
vuetify.VBtn(
"Generate", disabled=("!projGenValidation.valid"), color="primary"
)
|
StarcoderdataPython
|
1752464
|
<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import time
import os
import sys
from multiprocessing import Pool
from time import sleep
# HACK: cross py2-py3 compatible version
try:
from queue import Queue
except ImportError:
from Queue import Queue
import click
import cv2
import numpy as np
from lmnet.common import get_color_map
from lmnet.nnlib import NNLib
from lmnet.utils.config import (
load_yaml,
build_pre_process,
build_post_process,
)
from lmnet.utils.demo import (
add_rectangle,
add_fps,
run_inference,
)
from lmnet.visualize import (
label_to_color_image,
visualize_keypoint_detection,
)
nn = None
pre_process = None
post_process = None
class MyTime:
def __init__(self, function_name):
self.start_time = time.time()
self.function_name = function_name
def show(self):
print("TIME: ", self.function_name, time.time() - self.start_time)
def init_camera(camera_width, camera_height):
if hasattr(cv2, 'cv'):
vc = cv2.VideoCapture(0)
vc.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, camera_width)
vc.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, camera_height)
vc.set(cv2.cv.CV_CAP_PROP_FPS, 10)
else:
vc = cv2.VideoCapture(1)
vc.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
vc.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)
vc.set(cv2.CAP_PROP_FPS, 10)
return vc
def add_class_label(canvas,
text="Hello",
font=cv2.FONT_HERSHEY_SIMPLEX,
font_scale=0.42,
font_color=(140, 40, 200),
line_type=1,
dl_corner=(50, 50)):
cv2.putText(canvas, text, dl_corner, font, font_scale, font_color, line_type)
def _run_inference(img):
global nn, pre_process, post_process
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
result, fps, _ = run_inference(img, nn, pre_process, post_process)
return result, fps
def clear_queue(queue):
while not queue.empty():
queue.get()
return queue
def swap_queue(q1, q2):
return q2, q1 # These results are swapped
def run_object_detection(config):
global nn
# Set variables
camera_width = 320
camera_height = 240
window_name = "Object Detection Demo"
input_width = config.IMAGE_SIZE[1]
input_height = config.IMAGE_SIZE[0]
vc = init_camera(camera_width, camera_height)
pool = Pool(processes=1, initializer=nn.init)
result = False
fps = 1.0
q_save = Queue()
q_show = Queue()
grabbed, camera_img = vc.read()
q_show.put(camera_img.copy())
input_img = camera_img.copy()
# ----------- Beginning of Main Loop ---------------
while True:
m1 = MyTime("1 loop of while(1) of main()")
pool_result = pool.apply_async(_run_inference, (input_img, ))
is_first = True
while True:
grabbed, camera_img = vc.read()
if is_first:
input_img = camera_img.copy()
is_first = False
q_save.put(camera_img.copy())
if not q_show.empty():
window_img = q_show.get()
if result:
window_img = add_rectangle(
config.CLASSES,
window_img,
result,
(input_height, input_width)
)
window_img = add_fps(window_img, fps)
# ---------- END of if result != False -----------------
cv2.imshow(window_name, window_img)
key = cv2.waitKey(2) # Wait for 2ms
if key == 27: # ESC to quit
return
if pool_result.ready():
break
# -------------- END of wait loop ----------------------
q_show = clear_queue(q_show)
q_save, q_show = swap_queue(q_save, q_show)
result, fps = pool_result.get()
m1.show()
# --------------------- End of main Loop -----------------------
def run_classification(config):
global nn
camera_height = 240
camera_width = 320
window_name = "Classification Demo"
window_width = 320
window_height = 240
vc = init_camera(camera_width, camera_height)
pool = Pool(processes=1, initializer=nn.init)
grabbed, camera_img = vc.read()
pool_result = pool.apply_async(_run_inference, (camera_img, ))
result = None
fps = 1.0
loop_count = 0
while 1:
m1 = MyTime("1 loop of while(1) of main()")
key = cv2.waitKey(2) # Wait for 2ms
if key == 27: # ESC to quit
break
m2 = MyTime("vc.read()")
grabbed, camera_img = vc.read()
m2.show()
if pool_result.ready():
result, fps = pool_result.get()
pool_result = pool.apply_async(_run_inference, (camera_img, ))
if (window_width == camera_width) and (window_height == camera_height):
window_img = camera_img
else:
window_img = cv2.resize(camera_img, (window_width, window_height))
if result is not None:
result_class = np.argmax(result, axis=1)
add_class_label(window_img, text=str(result[0, result_class][0]), font_scale=0.52, dl_corner=(230, 230))
add_class_label(window_img, text=config.CLASSES[result_class[0]], font_scale=0.52, dl_corner=(230, 210))
window_img = add_fps(window_img, fps)
loop_count += 1
print("loop_count:", loop_count)
m3 = MyTime("cv2.imshow()")
cv2.imshow(window_name, window_img)
m3.show()
m1.show()
sleep(0.05)
cv2.destroyAllWindows()
def run_semantic_segmentation(config):
global nn
camera_width = 320
camera_height = 240
window_name = "Segmentation Demo"
vc = init_camera(camera_width, camera_height)
pool = Pool(processes=1, initializer=nn.init)
result = None
fps = 1.0
q_save = Queue()
q_show = Queue()
grabbed, camera_img = vc.read()
q_show.put(camera_img.copy())
input_img = camera_img.copy()
colormap = np.array(get_color_map(len(config['CLASSES'])), dtype=np.uint8)
while True:
m1 = MyTime("1 loop of while(1) of main()")
pool_result = pool.apply_async(_run_inference, (input_img,))
is_first = True
while True:
grabbed, camera_img = vc.read()
if is_first:
input_img = camera_img.copy()
is_first = False
q_save.put(camera_img.copy())
if not q_show.empty():
window_img = q_show.get()
overlay_img = window_img
if result is not None:
seg_img = label_to_color_image(result, colormap)
seg_img = cv2.resize(seg_img, dsize=(camera_width, camera_height))
overlay_img = cv2.addWeighted(window_img, 1, seg_img, 0.8, 0)
overlay_img = add_fps(overlay_img, fps)
cv2.imshow(window_name, overlay_img)
key = cv2.waitKey(2) # Wait for 2ms
if key == 27: # ESC to quit
return
if pool_result.ready():
break
q_show = clear_queue(q_show)
q_save, q_show = swap_queue(q_save, q_show)
result, fps = pool_result.get()
m1.show()
def run_keypoint_detection(config):
global nn
camera_width = 320
camera_height = 240
window_name = "Keypoint Detection Demo"
input_width = config.IMAGE_SIZE[1]
input_height = config.IMAGE_SIZE[0]
vc = init_camera(camera_width, camera_height)
pool = Pool(processes=1, initializer=nn.init)
result = None
fps = 1.0
q_save = Queue()
q_show = Queue()
grabbed, camera_img = vc.read()
q_show.put(camera_img.copy())
input_img = camera_img.copy()
while True:
m1 = MyTime("1 loop of while(1) of main()")
pool_result = pool.apply_async(_run_inference, (input_img,))
is_first = True
while True:
grabbed, camera_img = vc.read()
if is_first:
input_img = camera_img.copy()
is_first = False
q_save.put(camera_img.copy())
if not q_show.empty():
window_img = q_show.get()
drawed_img = window_img
if result is not None:
drawed_img = visualize_keypoint_detection(window_img, result[0], (input_height, input_width))
drawed_img = add_fps(drawed_img, fps)
cv2.imshow(window_name, drawed_img)
key = cv2.waitKey(2) # Wait for 2ms
# TODO(yang): Consider using another key for abort.
if key == 27: # ESC to quit
return
# TODO(yang): Busy loop is not efficient here. Improve it and change them in other tasks.
if pool_result.ready():
break
q_show = clear_queue(q_show)
q_save, q_show = swap_queue(q_save, q_show)
result, fps = pool_result.get()
m1.show()
def run(model, config_file):
global nn, pre_process, post_process
filename, file_extension = os.path.splitext(model)
supported_files = ['.so', '.pb']
if file_extension not in supported_files:
raise Exception("""
Unknown file type. Got %s%s.
Please check the model file (-m).
Only .pb (protocol buffer) or .so (shared object) file is supported.
""" % (filename, file_extension))
config = load_yaml(config_file)
pre_process = build_pre_process(config.PRE_PROCESSOR)
post_process = build_post_process(config.POST_PROCESSOR)
if file_extension == '.so': # Shared library
nn = NNLib()
nn.load(model)
elif file_extension == '.pb': # Protocol Buffer file
# only load tensorflow if user wants to use GPU
from lmnet.tensorflow_graph_runner import TensorflowGraphRunner
nn = TensorflowGraphRunner(model)
TASK_HANDLERS = {"IMAGE.CLASSIFICATION": run_classification,
"IMAGE.OBJECT_DETECTION": run_object_detection,
"IMAGE.SEMANTIC_SEGMENTATION": run_semantic_segmentation,
"IMAGE.KEYPOINT_DETECTION": run_keypoint_detection}
TASK_HANDLERS[config.TASK](config)
@click.command(context_settings=dict(help_option_names=['-h', '--help']))
@click.option(
"-m",
"-l",
"--model",
type=click.Path(exists=True),
help=u"""
Inference Model filename
(-l is deprecated please use -m instead)
""",
default="../models/lib/lib_fpga.so",
)
@click.option(
"-c",
"--config_file",
type=click.Path(exists=True),
help=u"Config file Path",
default="../models/meta.yaml",
)
def main(model, config_file):
_check_deprecated_arguments()
run(model, config_file)
def _check_deprecated_arguments():
argument_list = sys.argv
if '-l' in argument_list:
print("Deprecated warning: -l is deprecated please use -m instead")
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3203180
|
from base import BaseWorker
|
StarcoderdataPython
|
1604422
|
from oauth2_provider.middleware import OAuth2TokenMiddleware
class AccesTokenOAuth2TokenMiddleware(OAuth2TokenMiddleware):
def process_request(self, request):
if not request.META.get('HTTP_AUTHORIZATION', '').startswith('Bearer') and \
request.GET.get('access_token'):
bearer = 'Bearer %s' % request.GET.get('access_token')
request.META['HTTP_AUTHORIZATION'] = bearer
super().process_request(request=request)
|
StarcoderdataPython
|
3350891
|
from __future__ import annotations
from pathlib import Path
from typing import Type, TYPE_CHECKING, Any
from pycep.typing import BicepJson
from checkov.bicep.parser import Parser
from checkov.bicep.utils import get_scannable_file_paths
from checkov.common.graph.db_connectors.db_connector import DBConnector
from checkov.common.graph.graph_manager import GraphManager
from checkov.bicep.graph_builder.local_graph import BicepLocalGraph
if TYPE_CHECKING:
from checkov.common.graph.graph_builder.local_graph import LocalGraph
class BicepGraphManager(GraphManager):
def __init__(self, db_connector: DBConnector, source: str = "Bicep") -> None:
super().__init__(db_connector=db_connector, parser=None, source=source)
def build_graph_from_source_directory(
self,
source_dir: str,
render_variables: bool = True,
local_graph_class: Type[LocalGraph] = BicepLocalGraph,
parsing_errors: dict[str, Exception] | None = None,
download_external_modules: bool = False,
excluded_paths: list[str] | None = None,
) -> tuple[LocalGraph, dict[Path, BicepJson]]:
file_paths = get_scannable_file_paths(root_folder=source_dir)
definitions, definitions_raw, parsing_errors = Parser().get_files_definitions(file_paths) # type:ignore[assignment]
local_graph = self.build_graph_from_definitions(definitions)
return local_graph, definitions
def build_graph_from_definitions(
self, definitions: dict[Path, BicepJson], render_variables: bool = True
) -> LocalGraph:
local_graph = BicepLocalGraph(definitions)
local_graph.build_graph(False)
return local_graph
|
StarcoderdataPython
|
102695
|
<filename>Modules/tobii/eye_tracking_io/time/sync.py
from tobii.eye_tracking_io._native import tetio
from tobii.eye_tracking_io.time.clock import Clock
from tobii.eye_tracking_io.mainloop import Mainloop, MainloopThread
from tobii.eye_tracking_io.browsing import EyetrackerInfo
class State(object):
UNSYNCHRONIZED = 0
STABILIZING = 1
SYNCHRONIZED = 2
def __init__(self, internal_state):
self._internal_state = internal_state
@property
def state_flag(self):
"""Returns either UNSYNCHRONIZED, STABILIZING or SYNCHRONIZED"""
return self._internal_state.get_state_flag()
@property
def points_in_use(self):
"""Returns a list of tuples with three values in them, where the
values in the tuple are:
local_midpoint
remote
roundtrip
"""
return self._internal_state.get_points_in_use()
@property
def error_approximation(self):
"""Returns an approximation of the current synchronization error."""
return self._internal_state.get_error_approximation()
class SyncManager(object):
def __init__(self, clock, eyetracker_info, mainloop, error_handler=None, status_handler=None):
if not isinstance(clock, Clock):
raise TypeError("clock should be of type Clock")
if not isinstance(eyetracker_info, (tetio.factory_info, EyetrackerInfo)):
raise TypeError("factory_info should be of type EyetrackerInfo")
if not isinstance(mainloop, Mainloop) and \
not isinstance(mainloop, MainloopThread):
raise TypeError("mainloop should of type Mainloop or MainloopThread")
if error_handler is not None:
if not callable(error_handler):
raise TypeError("error_handler should be callable")
if status_handler is not None:
if not callable(status_handler):
raise TypeError("status_handler should be callable")
cl = clock._clock
ml = None
if isinstance(mainloop, MainloopThread):
ml = mainloop._mainloop.mainloop
else:
ml = mainloop.mainloop
fi = None
if isinstance(eyetracker_info, EyetrackerInfo):
fi = eyetracker_info.factory_info
else:
fi = eyetracker_info
self._error_handler = error_handler
self._status_handler = status_handler
self._sync_manager = tetio.SyncManager(cl, fi, ml,
self._on_error,
self._on_status)
def convert_from_local_to_remote(self, local_usecs):
return self._sync_manager.convert_from_local_to_remote(local_usecs)
def convert_from_remote_to_local(self, remote_usecs):
return self._sync_manager.convert_from_remote_to_local(remote_usecs)
@property
def sync_state(self):
"""Returns the current sync state"""
return State(self._sync_manager.get_sync_state())
def _on_error(self, error):
if self._error_handler is None:
return
try:
self._error_handler(error)
except:
pass
def _on_status(self, state):
if self._status_handler is None:
return
try:
self._status_handler(State(state))
except:
pass
|
StarcoderdataPython
|
1659174
|
from backend.api.models import Horario
from rest_framework import serializers
class HorarioSerializer(serializers.ModelSerializer):
"""
HorarioSerializer Serializer de Horario
Args:
serializers (ModelSerializer): Serializer del modulo rest_framework
"""
class Meta:
"""
Metadatos de Horario
"""
model = Horario
fields = (
"id",
"lunes",
"martes",
"miercoles",
"jueves",
"viernes",
"sabado",
"domingo"
)
|
StarcoderdataPython
|
1726206
|
<reponame>umihai1/github_publish
#!/usr/bin/env python
"""
Main test function to execute all tests found in the current directory
"""
import sys
import logging
import xmlrunner
try:
import unittest2 as unittest
except ImportError:
import unittest
def main():
tests = unittest.TestLoader().discover('.', 'test_*.py')
testResult=xmlrunner.XMLTestRunner(output='test-reports').run(tests)
if testResult.wasSuccessful():
return 0
else:
return 1
if __name__ == '__main__':
sys.exit(main())
|
StarcoderdataPython
|
3201439
|
from decimal import Decimal
import decimal
import time
import json
import random
import argparse
import requests
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
import logging
with open('config.json','r') as conf:
configs = json.loads(conf.read())
rpc_user = configs['rpc_user']
rpc_password = configs['rpc_user']
account_pool_endpoint = configs['account_pool_endpoint']
parser = argparse.ArgumentParser(description='configs')
parser.add_argument('--speed', dest='speed', type=int, help='how many accounts per minute')
parser.add_argument('--debug', dest='debug', default=0, type=bool, help='debug')
node_name = configs['node_name']
names = configs['names']
# rpc_connection.getnewaddress(node_name)
if __name__ == "__main__":
args = parser.parse_args()
logging.basicConfig()
logger = logging.getLogger(__name__)
if args.debug:
logger.setLevel(logging.DEBUG)
while(True):
try:
rpc_connection = AuthServiceProxy("http://%s:%[email protected]:18443"%(rpc_user, rpc_password))
listunspent = []
while(not listunspent):
r = requests.get(f'{account_pool_endpoint}/account/{node_name}')
sender=r.json()["address"]
logger.debug(f"sender: {str(sender)}")
listunspent = rpc_connection.listunspent(0,9999,[f"{sender}"])
logger.debug(f"unspent:{listunspent}")
unspent = random.choice(listunspent)
while(unspent["amount"]<0.1):
unspent = random.choice(listunspent)
balance = unspent['amount']
logger.debug(f"-------------")
logger.debug(f"unspent:{unspent}")
logger.debug(unspent['spendable'])
if unspent['spendable']:
can_spend = 1
txid = unspent['txid']
vout = 0
amount_to_spend = Decimal(random.uniform(0,float(balance)*0.5))
fee = Decimal(0.00001111)
change = balance-(amount_to_spend+fee)
# unspent = random.choice(listunspent)
# balance = unspent['amount']
# logger.debug("balance:",balance)
# logger.debug("spendable?:",unspent['spendable'])
# if unspent['spendable']:
# txid = unspent['txid']
# vout = 0
# amount_to_spend = Decimal(random.uniform(0,float(balance)*0.5))
# fee = balance*Decimal(0.05)
# change = balance-amount_to_spend-fee
# break
reciever = requests.get(f'{account_pool_endpoint}/account').json()["address"]
logger.debug(f"amount_to_spend:{amount_to_spend}")
logger.debug(f"change:{change}")
logger.debug(f"fee:{fee}")
rawtransaction =rpc_connection.createrawtransaction(
[{"txid":txid,"vout":0}],
{reciever:amount_to_spend,sender:change}
)
logger.debug(f"rawtransaction:{rawtransaction}")
privkey = rpc_connection.dumpprivkey(
sender
)
logger.debug(f"key:{privkey}")
signed_transaction = rpc_connection.signrawtransactionwithkey(
rawtransaction,
[privkey]
)
logger.debug(f"signed transaction:{signed_transaction}")
hex = signed_transaction["hex"]
logger.debug(f"hex:{hex}")
sent_rawtransaction = rpc_connection.sendrawtransaction(
hex
)
logger.debug(f"sent_transaction:{sent_rawtransaction}")
time.sleep(60/args.speed)
except:
pass
# ./bitcoin-cli createrawtransaction "[{\"txid\":\"2f7aa4499f94371c942d326da5127c8cb15ea97b82c31453338de7eb15844d25\",\"vout\":0}]" "{\"2NBVRHmHS3ReJ7sTQxxAmVHyak9BwWUnxzA\":1.0,\"2N91bwsns4Hd5rLLMVMUVhqut17oq3L96nN\":45.52992940}"
# 0200000001254d8415ebe78d335314c3827ba95eb18c7c12a56d322d941c37949f49a47a2f0000000000ffffffff0200e1f5050000000017a914c8218580adab134ef396f41436d5543af89ce8e887ac28610f0100000017a914acee84e3dc1719d398b82287ccd0db658561cbec8700000000
# ./bitcoin-cli signrawtransactionwithkey "0200000001254d8415ebe78d335314c3827ba95eb18c7c12a56d322d941c37949f49a47a2f0000000000ffffffff0200e1f5050000000017a914c8218580adab134ef396f41436d5543af89ce8e887ac28610f0100000017a914acee84e3dc1719d398b82287ccd0db658561cbec8700000000" "[\"cUCcoPJx2vaKL7CAAPbissa4bLuWivzyCC3JWzGTWhqVYkavc7Yc\"]"
# {
# "hex": "02000000000101254d8415ebe78d335314c3827ba95eb18c7c12a56d322d941c37949f49a47a2f000000001716001449ca9ac7497e741737f03f8f160b929de23f0924ffffffff0200e1f5050000000017a914c8218580adab134ef396f41436d5543af89ce8e887ac28610f0100000017a914acee84e3dc1719d398b82287ccd0db658561cbec8702473044022068665fa3259cd8e1fa9067ece0a05699546fa65a484d0370da7589311727092402204a5b36dbfe9d9c43df3fc09f971dd1cef28902b08b7354ea860e4f20df9e7965012103bd20c009a1d867482a4d1691d35ee7b7a228046f2c442a42a0b655336237ea0d00000000",
# "complete": true
# }
# ./bitcoin-cli sendrawtransaction 02000000000101254d8415ebe78d335314c3827ba95eb18c7c12a56d322d941c37949f49a47a2f000000001716001449ca9ac7497e741737f03f8f160b929de23f0924ffffffff0200e1f5050000000017a914c8218580adab134ef396f41436d5543af89ce8e887ac28610f0100000017a914acee84e3dc1719d398b82287ccd0db658561cbec8702473044022068665fa3259cd8e1fa9067ece0a05699546fa65a484d0370da7589311727092402204a5b36dbfe9d9c43df3fc09f971dd1cef28902b08b7354ea860e4f20df9e7965012103bd20c009a1d867482a4d1691d35ee7b7a228046f2c442a42a0b655336237ea0d00000000
# 636620655b14c862b76aeef301029d1d147c559ab1152a137a7c78d7ffb47683
# ./bitcoin-cli listunspent 0 99999 "[\"<KEY>\"]"
|
StarcoderdataPython
|
3353138
|
"""Additional file information in Database
Revision ID: ec66a0a3186b
Revises: 4b41e84f7aac
Create Date: 2022-01-20 17:00:16.089851
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "ec66a0a3186b"
down_revision = "4b41e84f7aac"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("files", sa.Column("creation_date", sa.DateTime(), nullable=False))
op.add_column("files", sa.Column("update_date", sa.DateTime(), nullable=False))
op.add_column("files", sa.Column("format", sa.String(), nullable=False))
op.drop_column("files", "registration_date")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"files",
sa.Column(
"registration_date",
postgresql.TIMESTAMP(),
autoincrement=False,
nullable=True,
),
)
op.drop_column("files", "format")
op.drop_column("files", "update_date")
op.drop_column("files", "creation_date")
# ### end Alembic commands ###
|
StarcoderdataPython
|
1672335
|
<filename>protonfixes/gamefixes/312530.py
""" Game fix for Duck Game
"""
#pylint: disable=C0103
from protonfixes import util
def main():
""" https://www.protondb.com/app/312530#bXY0Kuwwlz
"""
util.winedll_override('dinput', 'n')
util.append_argument('-nothreading')
|
StarcoderdataPython
|
180775
|
from . import common
class Devices(common.Resource):
def __init__(self, sigfox):
super().__init__(sigfox, "devices/")
def retrieve_undelivered_callbacks(self, id, query=""):
""" Retrieve a list of undelivered callbacks and errors for a
given device, in reverse chronological order (most recent
message first).
"""
response = self.sigfox.get(
"{}{}/callbacks-not-delivered/".format(self.resource, id),
query
)
return response
def retrieve_modem_certificate(self, id, query=""):
""" Retrieve the modem certificate associated with a device. """
response = self.sigfox.get(
"{}{}/certificate/modem/".format(self.resource, id),
query
)
return response
def retrieve_product_certificate(self, id, query=""):
""" Retrieve the modem certificate associated with a device. """
response = self.sigfox.get(
"{}{}/certificate/product/".format(self.resource, id),
query
)
return response
def retrieve_product_certificate_PAC(self, id, query):
""" Retrieve the product certificate associated with a given device ID
and PAC, when the device has not already been created on the
portal, only in CRA.
"""
response = self.sigfox.get(
"{}{}/product-certificate/".format(self.resource, id),
query
)
return response
def retrieve_year_consumption(self, id, year):
""" Retrieve a device's consumption for a given year."""
response = self.sigfox.get(
"{}{}/consumption/{}".format(self.resource, id, year)
)
return response
def retrieve_month_consumption(self, id, year, month):
""" Retrieve a device's consumption for a given year and month."""
response = self.sigfox.get(
"{}{}/consumption/{}/{}".format(self.resource, id, year, month)
)
return response
def disable_sequence_number(self, id):
""" Disable sequence number check for the next message. """
response = self.sigfox.post(
"{}{}/disengage/".format(self.resource, id)
)
return response
def retrieve_messages_list(self, id, query=""):
""" Retrieve a list of messages for a given device according to
request filters, with a 3-day history.
"""
response = self.sigfox.get(
"{}{}/messages".format(self.resource),
query
)
return response
def retrieve_messages_metrics(self, id, query=""):
""" Return the number of messages for a given device, for the last day,
last week and last month.
"""
response = self.sigfox.get(
"{}{}/messages/metric".format(self.resource),
query
)
return response
def retrieve_location(self, id, query=""):
""" Retrieve a list of location data of a device according to request
filters.
"""
response = self.sigfox.get(
"{}{}/locations".format(self.resource),
query
)
return response
def unsubscribe_device(self, id, body):
""" Set an unsubscription date for the device's token. """
response = self.sigfox.post(
"{}{}/unsubscribe".format(self.resource),
body
)
return response
def create_devices(self, body):
""" Create multiple new devices with asynchronous job. """
response = self.sigfox.post(
"{}bulk".format(self.resource),
body
)
return response
def update_devices(self, body):
""" Update or edit multiple devices with asynchronous job. """
response = self.sigfox.put(
"{}bulk".format(self.resource),
body
)
return response
def retrieve_job_status(self, job_id):
""" Update or edit multiple devices with asynchronous job. """
response = self.sigfox.get(
"{}bulk/{}".format(self.resource, job_id),
)
return response
def transfer_devices(self, body):
""" Transfer multiple devices to another device type with asynchronous
job.
"""
response = self.sigfox.post(
"{}bulk/transfer".format(self.resource),
body
)
return response
def replace_devices(self, body):
""" Replace multiple devices (moving tokens from one device to another)
with synchronous job.
"""
response = self.sigfox.post(
"{}bulk/replace".format(self.resource),
body
)
return response
def restart_devices(self, body):
""" Restart multiple devices with asynchronous job. """
response = self.sigfox.post(
"{}bulk/restart".format(self.resource),
body
)
return response
def retrieve_restart_job_status(self, job_id):
""" Retrieve the async job status for a restart devices action. """
response = self.sigfox.get(
"{}bulk/restart/{}".format(self.resource, job_id),
)
return response
def suspend_devices(self, body):
""" Suspend multiple devices with asynchronous job. """
response = self.sigfox.post(
"{}bulk/suspend".format(self.resource),
body
)
return response
def retrieve_suspend_job_status(self, job_id):
""" Retrieve the async job status for a restart devices action. """
response = self.sigfox.get(
"{}bulk/suspend/{}".format(self.resource, job_id),
)
return response
def resume_devices(self, body):
""" Resume multiple devices with asynchronous job. """
response = self.sigfox.post(
"{}bulk/resume".format(self.resource),
body
)
return response
def retrieve_resume_job_status(self, job_id):
""" Retrieve the async job status for a resume devices action. """
response = self.sigfox.get(
"{}bulk/resume/{}".format(self.resource, job_id),
)
return response
def unsubscribe_devices(self, body):
""" Unsubscribe multiple devices with asynchronous job. """
response = self.sigfox.post(
"{}bulk/unsubscribe".format(self.resource),
body
)
return response
def retrieve_unsubscribe_job_status(self, job_id):
""" Retrieve the async job status for a unsubscribe devices action. """
response = self.sigfox.get(
"{}bulk/unsubscribe/{}".format(self.resource, job_id),
)
return response
|
StarcoderdataPython
|
3334373
|
<reponame>ZhengyangXu/Algorithm-Daily-Practice
#
# @lc app=leetcode.cn id=139 lang=python3
#
# [139] 单词拆分
#
# https://leetcode-cn.com/problems/word-break/description/
#
# algorithms
# Medium (49.95%)
# Likes: 992
# Dislikes: 0
# Total Accepted: 146.7K
# Total Submissions: 292.3K
# Testcase Example: '"leetcode"\n["leet","code"]'
#
# 给定一个非空字符串 s 和一个包含非空单词的列表 wordDict,判定 s 是否可以被空格拆分为一个或多个在字典中出现的单词。
#
# 说明:
#
#
# 拆分时可以重复使用字典中的单词。
# 你可以假设字典中没有重复的单词。
#
#
# 示例 1:
#
# 输入: s = "leetcode", wordDict = ["leet", "code"]
# 输出: true
# 解释: 返回 true 因为 "leetcode" 可以被拆分成 "leet code"。
#
#
# 示例 2:
#
# 输入: s = "applepenapple", wordDict = ["apple", "pen"]
# 输出: true
# 解释: 返回 true 因为 "applepenapple" 可以被拆分成 "apple pen apple"。
# 注意你可以重复使用字典中的单词。
#
#
# 示例 3:
#
# 输入: s = "catsandog", wordDict = ["cats", "dog", "sand", "and", "cat"]
# 输出: false
#
#
#
# @lc code=start
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
if not s or not wordDict:
return True
n = len(s)
@functools.lru_cache(None)
def backtrack(s):
if not s:
return True
res = False
for i in range(1,len(s)+1):
if s[:i] in wordDict:
res = backtrack(s[i:]) or res
return res
return backtrack(s)
# @lc code=end
# def wordBreak(self, s: str, wordDict: List[str]) -> bool:
# n = len(s)
# dp = [False for _ in range(n+1)]
# dp[0] = True
# for i in range(n+1):
# for j in range(i+1,n+1):
# if dp[i] and s[i:j] in wordDict:
# dp[j] = True
# return dp[-1]
|
StarcoderdataPython
|
3298590
|
<reponame>piotr-worotnicki/raspberry-pi-rgb-led-controller
# Generated by Django 2.1.2 on 2018-12-29 19:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('led', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='LedState',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.IntegerField()),
('red', models.IntegerField(default=0)),
('green', models.IntegerField(default=0)),
('blue', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('hold_time', models.IntegerField()),
('fade_time', models.IntegerField()),
],
),
migrations.AddField(
model_name='ledstate',
name='profile',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='led.Profile'),
),
migrations.AddField(
model_name='currentledstate',
name='profile',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='led.Profile'),
),
]
|
StarcoderdataPython
|
1673139
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from absl.testing import parameterized
from tensorflow_addons.layers import Sparsemax
from tensorflow_addons.utils import test_utils
test_obs = 17
def _np_sparsemax(z):
z = z - np.mean(z, axis=1)[:, np.newaxis]
# sort z
z_sorted = np.sort(z, axis=1)[:, ::-1]
# calculate k(z)
z_cumsum = np.cumsum(z_sorted, axis=1)
k = np.arange(1, z.shape[1] + 1)
z_check = 1 + k * z_sorted > z_cumsum
# use argmax to get the index by row as .nonzero() doesn't
# take an axis argument. np.argmax return the first index, but the last
# index is required here, use np.flip to get the last index and
# `z.shape[axis]` to compensate for np.flip afterwards.
k_z = z.shape[1] - np.argmax(z_check[:, ::-1], axis=1)
# calculate tau(z)
tau_sum = z_cumsum[np.arange(0, z.shape[0]), k_z - 1]
tau_z = ((tau_sum - 1) / k_z).reshape(-1, 1)
# calculate p
return np.maximum(0, z - tau_z)
@parameterized.parameters([np.float32, np.float64])
@test_utils.run_all_in_graph_and_eager_modes
class SparsemaxTest(tf.test.TestCase):
def test_sparsemax_layer_against_numpy(self, dtype):
"""check sparsemax kernel against numpy."""
random = np.random.RandomState(1)
z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype)
test_utils.layer_test(
Sparsemax,
kwargs={'dtype': dtype},
input_data=z,
expected_output=_np_sparsemax(z).astype(dtype))
if __name__ == '__main__':
tf.test.main()
|
StarcoderdataPython
|
20272
|
<filename>tests/test_timeconversion.py
import unittest
from datetime import datetime, timezone
from pyfuppes import timeconversion
class TestTimeconv(unittest.TestCase):
@classmethod
def setUpClass(cls):
# to run before all tests
print("testing pyfuppes.timeconversion...")
@classmethod
def tearDownClass(cls):
# to run after all tests
pass
def setUp(self):
# to run before each test
pass
def tearDown(self):
# to run after each test
pass
def test_dtstr_2_mdns(self):
# no timezone
t = ["2012-01-01T01:00:00", "2012-01-01T02:00:00"]
f = "%Y-%m-%dT%H:%M:%S"
result = list(map(int, timeconversion.dtstr_2_mdns(t, f)))
self.assertEqual(result, [3600, 7200])
# with timezone
t = ["2012-01-01T01:00:00+02:00", "2012-01-01T02:00:00+02:00"]
f = "%Y-%m-%dT%H:%M:%S%z"
result = list(map(int, timeconversion.dtstr_2_mdns(t, f)))
self.assertEqual(result, [3600, 7200])
# zero case
t = "2012-01-01T00:00:00+02:00"
result = timeconversion.dtstr_2_mdns(t, f)
self.assertEqual(int(result), 0)
def test_dtobj_2_mdns(self):
t = [datetime(2000, 1, 1, 1), datetime(2000, 1, 1, 2)]
result = list(map(int, timeconversion.dtobj_2_mdns(t)))
self.assertEqual(result, [3600, 7200])
t = [
datetime(2000, 1, 1, 1, tzinfo=timezone.utc),
datetime(2000, 1, 1, 2, tzinfo=timezone.utc),
]
result = list(map(int, timeconversion.dtobj_2_mdns(t)))
self.assertEqual(result, [3600, 7200])
def test_posix_2_mdns(self):
t = [3600, 7200, 10800]
result = list(map(int, timeconversion.posix_2_mdns(t)))
self.assertEqual(result, t)
def test_mdns_2_dtobj(self):
t = [3600, 10800, 864000]
ref = datetime(2020, 5, 15, tzinfo=timezone.utc)
result = list(map(int, timeconversion.mdns_2_dtobj(t, ref, posix=True)))
self.assertEqual(result, [1589504400, 1589511600, 1590364800])
def test_daysSince_2_dtobj(self):
t0, off = datetime(2020, 5, 10), 10.5
result = timeconversion.daysSince_2_dtobj(t0, off)
self.assertEqual(result.hour, 12)
self.assertEqual(result.day, 20)
def test_dtstr_2_posix(self):
result = timeconversion.dtstr_2_posix("2020-05-15", "%Y-%m-%d")
self.assertAlmostEqual(
result, datetime(2020, 5, 15, tzinfo=timezone.utc).timestamp()
)
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
1628847
|
<reponame>camerondphillips/MAYAN
from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.utils.http import urlencode
from django.utils.translation import ugettext_lazy as _, ungettext
from acls.models import AccessEntry
from documents.models import Document, DocumentType
from documents.permissions import (
PERMISSION_DOCUMENT_TYPE_EDIT, PERMISSION_DOCUMENT_VIEW
)
from documents.views import document_list
from permissions.models import Permission
from common.utils import encapsulate, generate_choices_w_labels
from common.views import assign_remove
from .api import save_metadata_list
from .forms import (
AddMetadataForm, MetadataFormSet, MetadataRemoveFormSet, MetadataTypeForm
)
from .models import DocumentMetadata, MetadataType
from .permissions import (
PERMISSION_METADATA_DOCUMENT_ADD, PERMISSION_METADATA_DOCUMENT_EDIT,
PERMISSION_METADATA_DOCUMENT_REMOVE, PERMISSION_METADATA_DOCUMENT_VIEW,
PERMISSION_METADATA_TYPE_CREATE, PERMISSION_METADATA_TYPE_DELETE,
PERMISSION_METADATA_TYPE_EDIT, PERMISSION_METADATA_TYPE_VIEW
)
def metadata_edit(request, document_id=None, document_id_list=None):
if document_id:
document_id_list = unicode(document_id)
documents = Document.objects.select_related('metadata').filter(pk__in=document_id_list.split(','))
try:
Permission.objects.check_permissions(request.user, [PERMISSION_METADATA_DOCUMENT_EDIT])
except PermissionDenied:
documents = AccessEntry.objects.filter_objects_by_access(PERMISSION_METADATA_DOCUMENT_EDIT, request.user, documents)
if not documents:
if document_id:
raise Http404
else:
messages.error(request, _('Must provide at least one document.'))
return HttpResponseRedirect(request.META.get('HTTP_REFERER', reverse('main:home')))
if len(set([document.document_type.pk for document in documents])) > 1:
messages.error(request, _('Only select documents of the same type.'))
return HttpResponseRedirect(request.META.get('HTTP_REFERER', reverse('main:home')))
if set(documents.values_list('metadata__value', flat=True)) == set([None]):
message = ungettext(
'The selected document doesn\'t have any metadata.',
'The selected documents don\'t have any metadata.',
len(documents)
)
messages.warning(request, message)
return HttpResponseRedirect(request.META.get('HTTP_REFERER', reverse('main:home')))
post_action_redirect = reverse('documents:document_list_recent')
next = request.POST.get('next', request.GET.get('next', request.META.get('HTTP_REFERER', post_action_redirect)))
metadata = {}
initial = []
for document in documents:
document.add_as_recent_document_for_user(request.user)
for item in document.metadata.all():
value = item.value
if item.metadata_type in metadata:
if value not in metadata[item.metadata_type]:
metadata[item.metadata_type].append(value)
else:
metadata[item.metadata_type] = [value] if value else []
for key, value in metadata.items():
initial.append({
'metadata_type': key,
'value': ', '.join(value) if value else '',
'required': key in document.document_type.metadata.filter(required=True),
})
formset = MetadataFormSet(initial=initial)
if request.method == 'POST':
formset = MetadataFormSet(data=request.POST, initial=initial)
if formset.is_valid():
for document in documents:
errors = []
for form in formset.forms:
if form.cleaned_data['update']:
try:
save_metadata_list([form.cleaned_data], document)
except Exception as exception:
errors.append(exception)
if errors:
for error in errors:
if settings.DEBUG:
raise
else:
messages.error(request, _('Error editing metadata for document %(document)s; %(exception)s.') % {
'document': document, 'exception': ', '.join(exception.messages)})
else:
messages.success(request, _('Metadata for document %s edited successfully.') % document)
return HttpResponseRedirect(next)
context = {
'form_display_mode_table': True,
'form': formset,
'next': next,
}
if len(documents) == 1:
context['object'] = documents[0]
context['title'] = ungettext(
'Edit document metadata',
'Edit documents metadata',
len(documents)
)
return render_to_response('main/generic_form.html', context,
context_instance=RequestContext(request))
def metadata_multiple_edit(request):
return metadata_edit(request, document_id_list=request.GET.get('id_list', ''))
def metadata_add(request, document_id=None, document_id_list=None):
if document_id:
documents = [get_object_or_404(Document, pk=document_id)]
elif document_id_list:
documents = [get_object_or_404(Document.objects.select_related('document_type'), pk=document_id) for document_id in document_id_list.split(',')]
if len(set([document.document_type.pk for document in documents])) > 1:
messages.error(request, _('Only select documents of the same type.'))
return HttpResponseRedirect(request.META.get('HTTP_REFERER', reverse('main:home')))
try:
Permission.objects.check_permissions(request.user, [PERMISSION_METADATA_DOCUMENT_ADD])
except PermissionDenied:
documents = AccessEntry.objects.filter_objects_by_access(PERMISSION_METADATA_DOCUMENT_ADD, request.user, documents)
if not documents:
messages.error(request, _('Must provide at least one document.'))
return HttpResponseRedirect(request.META.get('HTTP_REFERER', reverse('main:home')))
for document in documents:
document.add_as_recent_document_for_user(request.user)
post_action_redirect = reverse('documents:document_list_recent')
next = request.POST.get('next', request.GET.get('next', request.META.get('HTTP_REFERER', post_action_redirect)))
if request.method == 'POST':
form = AddMetadataForm(data=request.POST, document_type=document.document_type)
if form.is_valid():
metadata_type = form.cleaned_data['metadata_type']
for document in documents:
try:
document_metadata, created = DocumentMetadata.objects.get_or_create(document=document, metadata_type=metadata_type.metadata_type, defaults={'value': ''})
except Exception as exception:
if getattr(settings, 'DEBUG', False):
raise
else:
messages.error(request, _('Error adding metadata type "%(metadata_type)s" to document: %(document)s; %(exception)s') % {
'metadata_type': metadata_type, 'document': document, 'exception': ', '.join(getattr(exception, 'messages', exception))})
else:
if created:
messages.success(request, _('Metadata type: %(metadata_type)s successfully added to document %(document)s.') % {
'metadata_type': metadata_type, 'document': document})
else:
messages.warning(request, _('Metadata type: %(metadata_type)s already present in document %(document)s.') % {
'metadata_type': metadata_type, 'document': document})
if len(documents) == 1:
return HttpResponseRedirect('%s?%s' % (
reverse('metadata:metadata_edit', args=[document.pk]),
urlencode({'next': next}))
)
elif len(documents) > 1:
return HttpResponseRedirect('%s?%s' % (
reverse('metadata:metadata_multiple_edit'),
urlencode({'id_list': document_id_list, 'next': next}))
)
else:
form = AddMetadataForm(document_type=document.document_type)
context = {
'form': form,
'next': next,
}
if len(documents) == 1:
context['object'] = documents[0]
context['title'] = ungettext(
'Add metadata types to document',
'Add metadata types to documents',
len(documents)
)
return render_to_response('main/generic_form.html', context,
context_instance=RequestContext(request))
def metadata_multiple_add(request):
return metadata_add(request, document_id_list=request.GET.get('id_list', []))
def metadata_remove(request, document_id=None, document_id_list=None):
if document_id:
document_id_list = unicode(document_id)
documents = Document.objects.select_related('metadata').filter(pk__in=document_id_list.split(','))
try:
Permission.objects.check_permissions(request.user, [PERMISSION_METADATA_DOCUMENT_REMOVE])
except PermissionDenied:
documents = AccessEntry.objects.filter_objects_by_access(PERMISSION_METADATA_DOCUMENT_REMOVE, request.user, documents)
if not documents:
if document_id:
raise Http404
else:
messages.error(request, _('Must provide at least one document.'))
return HttpResponseRedirect(request.META.get('HTTP_REFERER', reverse('main:home')))
if len(set([document.document_type.pk for document in documents])) > 1:
messages.error(request, _('Only select documents of the same type.'))
return HttpResponseRedirect(request.META.get('HTTP_REFERER', reverse('main:home')))
if set(documents.values_list('metadata__value', flat=True)) == set([None]):
message = ungettext(
'The selected document doesn\'t have any metadata.',
'The selected documents doesn\'t have any metadata.',
len(documents)
)
messages.warning(request, message)
return HttpResponseRedirect(request.META.get('HTTP_REFERER', reverse('main:home')))
post_action_redirect = reverse('documents:document_list_recent')
next = request.POST.get('next', request.GET.get('next', request.META.get('HTTP_REFERER', post_action_redirect)))
metadata = {}
for document in documents:
document.add_as_recent_document_for_user(request.user)
for item in document.metadata.all():
value = item.value
if item.metadata_type in metadata:
if value not in metadata[item.metadata_type]:
metadata[item.metadata_type].append(value)
else:
metadata[item.metadata_type] = [value] if value else ''
initial = []
for key, value in metadata.items():
initial.append({
'metadata_type': key,
'value': ', '.join(value)
})
formset = MetadataRemoveFormSet(initial=initial)
if request.method == 'POST':
formset = MetadataRemoveFormSet(request.POST)
if formset.is_valid():
for document in documents:
for form in formset.forms:
if form.cleaned_data['update']:
metadata_type = get_object_or_404(MetadataType, pk=form.cleaned_data['id'])
try:
document_metadata = DocumentMetadata.objects.get(document=document, metadata_type=metadata_type)
document_metadata.delete()
messages.success(request, _('Successfully remove metadata type "%(metadata_type)s" from document: %(document)s.') % {
'metadata_type': metadata_type, 'document': document})
except Exception as exception:
messages.error(request, _('Error removing metadata type "%(metadata_type)s" from document: %(document)s; %(exception)s') % {
'metadata_type': metadata_type, 'document': document, 'exception': ', '.join(exception.messages)})
return HttpResponseRedirect(next)
context = {
'form_display_mode_table': True,
'form': formset,
'next': next,
}
if len(documents) == 1:
context['object'] = documents[0]
context['title'] = ungettext(
'Remove metadata types from the document',
'Remove metadata types from the documents',
len(documents)
)
return render_to_response('main/generic_form.html', context,
context_instance=RequestContext(request))
def metadata_multiple_remove(request):
return metadata_remove(request, document_id_list=request.GET.get('id_list', []))
def metadata_view(request, document_id):
document = get_object_or_404(Document, pk=document_id)
try:
Permission.objects.check_permissions(request.user, [PERMISSION_METADATA_DOCUMENT_VIEW])
except PermissionDenied:
AccessEntry.objects.check_access(PERMISSION_METADATA_DOCUMENT_VIEW, request.user, document)
return render_to_response('main/generic_list.html', {
'title': _('Metadata for document: %s') % document,
'object_list': document.metadata.all(),
'extra_columns': [
{'name': _('Value'), 'attribute': 'value'},
{'name': _('Required'), 'attribute': encapsulate(lambda x: x.metadata_type in document.document_type.metadata.filter(required=True))}
],
'hide_link': True,
'object': document,
}, context_instance=RequestContext(request))
def documents_missing_required_metadata(request):
pre_object_list = Document.objects.filter(document_type__metadata__required=True, metadata__value__isnull=True)
try:
Permission.objects.check_permissions(request.user, [PERMISSION_DOCUMENT_VIEW])
except PermissionDenied:
# If user doesn't have global permission, get a list of document
# for which he/she does hace access use it to filter the
# provided object_list
object_list = AccessEntry.objects.filter_objects_by_access(
PERMISSION_DOCUMENT_VIEW, request.user, pre_object_list)
else:
object_list = pre_object_list
context = {
'object_list': object_list,
'title': _('Documents missing required metadata'),
'hide_links': True,
}
return document_list(
request,
extra_context=context
)
# Setup views
def setup_metadata_type_list(request):
Permission.objects.check_permissions(request.user, [PERMISSION_METADATA_TYPE_VIEW])
context = {
'object_list': MetadataType.objects.all(),
'title': _('Metadata types'),
'hide_link': True,
'extra_columns': [
{
'name': _('Internal name'),
'attribute': 'name',
},
]
}
return render_to_response('main/generic_list.html', context,
context_instance=RequestContext(request))
def setup_metadata_type_edit(request, metadatatype_id):
Permission.objects.check_permissions(request.user, [PERMISSION_METADATA_TYPE_EDIT])
metadata_type = get_object_or_404(MetadataType, pk=metadatatype_id)
if request.method == 'POST':
form = MetadataTypeForm(instance=metadata_type, data=request.POST)
if form.is_valid():
try:
form.save()
messages.success(request, _('Metadata type edited successfully'))
return HttpResponseRedirect(reverse('metadata:setup_metadata_type_list'))
except Exception as exception:
messages.error(request, _('Error editing metadata type; %s') % exception)
pass
else:
form = MetadataTypeForm(instance=metadata_type)
return render_to_response('main/generic_form.html', {
'title': _('Edit metadata type: %s') % metadata_type,
'form': form,
'object': metadata_type,
}, context_instance=RequestContext(request))
def setup_metadata_type_create(request):
Permission.objects.check_permissions(request.user, [PERMISSION_METADATA_TYPE_CREATE])
if request.method == 'POST':
form = MetadataTypeForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, _('Metadata type created successfully'))
return HttpResponseRedirect(reverse('metadata:setup_metadata_type_list'))
else:
form = MetadataTypeForm()
return render_to_response('main/generic_form.html', {
'title': _('Create metadata type'),
'form': form,
}, context_instance=RequestContext(request))
def setup_metadata_type_delete(request, metadatatype_id):
Permission.objects.check_permissions(request.user, [PERMISSION_METADATA_TYPE_DELETE])
metadata_type = get_object_or_404(MetadataType, pk=metadatatype_id)
post_action_redirect = reverse('metadata:setup_metadata_type_list')
previous = request.POST.get('previous', request.GET.get('previous', request.META.get('HTTP_REFERER', post_action_redirect)))
next = request.POST.get('next', request.GET.get('next', request.META.get('HTTP_REFERER', post_action_redirect)))
if request.method == 'POST':
try:
metadata_type.delete()
messages.success(request, _('Metadata type: %s deleted successfully.') % metadata_type)
except Exception as exception:
messages.error(request, _('Metadata type: %(metadata_type)s delete error: %(error)s') % {
'metadata_type': metadata_type, 'error': exception})
return HttpResponseRedirect(next)
context = {
'delete_view': True,
'next': next,
'previous': previous,
'object': metadata_type,
'title': _('Are you sure you wish to delete the metadata type: %s?') % metadata_type,
}
return render_to_response('main/generic_confirm.html', context,
context_instance=RequestContext(request))
def setup_document_type_metadata(request, document_type_id):
Permission.objects.check_permissions(request.user, [PERMISSION_DOCUMENT_TYPE_EDIT])
document_type = get_object_or_404(DocumentType, pk=document_type_id)
return assign_remove(
request,
left_list=lambda: generate_choices_w_labels(set(MetadataType.objects.all()) - set(MetadataType.objects.filter(id__in=document_type.metadata.values_list('metadata_type', flat=True))), display_object_type=False),
right_list=lambda: generate_choices_w_labels(document_type.metadata.filter(required=False), display_object_type=False),
add_method=lambda x: document_type.metadata.create(metadata_type=x, required=False),
remove_method=lambda x: x.delete(),
extra_context={
'document_type': document_type,
'navigation_object_name': 'document_type',
'main_title': _('Optional metadata types for document type: %s') % document_type,
},
decode_content_type=True,
)
def setup_document_type_metadata_required(request, document_type_id):
Permission.objects.check_permissions(request.user, [PERMISSION_DOCUMENT_TYPE_EDIT])
document_type = get_object_or_404(DocumentType, pk=document_type_id)
return assign_remove(
request,
left_list=lambda: generate_choices_w_labels(set(MetadataType.objects.all()) - set(MetadataType.objects.filter(id__in=document_type.metadata.values_list('metadata_type', flat=True))), display_object_type=False),
right_list=lambda: generate_choices_w_labels(document_type.metadata.filter(required=True), display_object_type=False),
add_method=lambda x: document_type.metadata.create(metadata_type=x, required=True),
remove_method=lambda x: x.delete(),
extra_context={
'document_type': document_type,
'navigation_object_name': 'document_type',
'main_title': _('Required metadata types for document type: %s') % document_type,
},
decode_content_type=True,
)
|
StarcoderdataPython
|
3220483
|
<gh_stars>1-10
from . import redshift_query
import logging
def main():
logging.basicConfig()
redshift_query.query({})
return 0
|
StarcoderdataPython
|
1778424
|
<filename>src/token_auth/settings.py<gh_stars>1-10
# Django settings for cms project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('<NAME>', '<EMAIL>'),
)
CACHE_BACKEND = 'locmem:///'
MANAGERS = ADMINS
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be avilable on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Los_Angeles'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
MEDIA_URL = '/media/uploaded/'
ADMIN_MEDIA_PREFIX = '/media/admin/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '*xq7m@)*f2awoj!spa0(jibsrz9%c0d=e(g)v*!17y(vx0ue_3'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.auth",
"django.core.context_processors.i18n",
"django.core.context_processors.debug",
"django.core.context_processors.request",
"django.core.context_processors.media",
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.doc.XViewMiddleware',
'django.contrib.csrf.middleware.CsrfMiddleware',
'token_auth.middleware.ProtectedURLMiddleware',
)
ROOT_URLCONF = 'token_auth.urls'
INSTALLED_APPS = (
'token_auth',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.sites',
)
|
StarcoderdataPython
|
1688325
|
from azureml.core import Run
from mlapp.main import MLApp
from mlapp.handlers.wrappers.file_storage_wrapper import file_storage_instance
from mlapp.integrations.aml.utils.run_class import load_config_from_string, tag_and_log_run, tag_and_log_outputs
import argparse
from config import settings
from mlapp.managers.flow_manager import FlowManager
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, dest='config', help='configuration')
args = parser.parse_args()
run = Run.get_context()
# pre-processing
config = load_config_from_string(args.config)
tag_and_log_run(config)
# init mlapp
MLApp(settings)
# run config
_, output_ids, output_data = FlowManager(Run.get_context().id, config).run()
# post-processing
tag_and_log_outputs(output_ids)
# post-processing
file_storage_instance.postprocessing()
|
StarcoderdataPython
|
83482
|
class Calculator:
def __init__(self):
self.calculation = 0
self.operation = None
def plus(self, num):
self.calculation += num
def minus(self, num):
self.calculation -= num
def multiply(self, num):
self.calculation *= num
def divide(self, num):
self.calculation /= num
|
StarcoderdataPython
|
1744104
|
<filename>guess/urls.py<gh_stars>0
from django.conf.urls import url
from .views import GuessResultList, CachedGuessResultList
urlpatterns = [
url(r'^guessresult/(?P<pagepath>.+)/(?P<effectivetype>.+)/$', GuessResultList.as_view(), name='guess_result'),
url(r'^guessresult/(?P<pagepath>.+)/$', GuessResultList.as_view(), name='guess_result_pagepath_only'),
url(r'^cachedguessresult/(?P<pagepath>.+)/(?P<effectivetype>.+)/$', CachedGuessResultList.as_view(), name='cached_guess_result'),
url(r'^cachedguessresult/(?P<pagepath>.+)/$', CachedGuessResultList.as_view(), name='cached_guess_result_pagepath_only'),
]
|
StarcoderdataPython
|
3326684
|
<gh_stars>0
import itertools
import types
import numpy as np
import hierarchy as hrcy
def test_get_transition_rate():
capacities = [3, 1]
state_in = [[2, 1], [1, 0]]
state_out = [[2, 0], [1, 0]]
r = 1.1
lmbda = [2, 3]
mu = [[0.2, 0.1], [1.2, 1.1]]
assert (
hrcy.transitions.get_rate(
state_in=state_in,
state_out=state_out,
capacities=capacities,
r=r,
lmbda=lmbda,
mu=mu,
)
== 0.1
)
state_out = [[1, 1], [1, 0]]
assert (
hrcy.transitions.get_rate(
state_in=state_in,
state_out=state_out,
capacities=capacities,
r=r,
lmbda=lmbda,
mu=mu,
)
== 0.2
)
state_out = [[1, 0], [1, 0]]
assert (
hrcy.transitions.get_rate(
state_in=state_in,
state_out=state_out,
capacities=capacities,
r=r,
lmbda=lmbda,
mu=mu,
)
== 0
)
state_out = [[1, 2], [1, 0]]
assert (
hrcy.transitions.get_rate(
state_in=state_in,
state_out=state_out,
capacities=capacities,
r=r,
lmbda=lmbda,
mu=mu,
)
== 0
)
def test_get_transition_matrix():
capacities = [2, 1]
r = 1.1
lmbda = [2, 3]
mu = [[0.2, 0.1], [1.2, 1.1]]
matrix = hrcy.transitions.get_transition_matrix(
capacities=capacities, r=r, lmbda=lmbda, mu=mu
)
assert np.array_equal(matrix.shape, np.array([5, 5]))
expected_matrix = np.array(
[
[-0.1, 0.0, 0.0, 0.1, 0.0],
[0.0, -0.3, 0.0, 0.2, 0.1],
[0.0, 0.0, -0.2, 0.0, 0.2],
[3.0, 2.0, 0.0, -5.0, 0.0],
[0.0, 3.0, 2.0, 0.0, -5.0],
]
)
assert np.allclose(matrix, expected_matrix)
def test_get_transition_matrix_example_two():
capacities = [4, 2, 1]
r = 1.1
lmbda = [2, 3]
mu = [[0.2, 0.1], [1.2, 1.1], [1.5, 1.7]]
matrix = hrcy.transitions.get_transition_matrix(
capacities=capacities, r=r, lmbda=lmbda, mu=mu
)
assert np.array_equal(matrix.shape, np.array([45, 45]))
assert np.allclose(np.sum(matrix, axis=1), 0)
def test_get_potential_states_hire():
capacities = [4, 2, 1]
state_in = [[2, 1], [2, 0], [1, 0]]
potential_states = hrcy.transitions.get_potential_states(
state_in=state_in, capacities=capacities
)
expected_states = ([[3, 1], [2, 0], [1, 0]], [[2, 2], [2, 0], [1, 0]])
assert all(
np.array_equal(potential, expected)
for potential, expected in zip(potential_states, expected_states)
)
def test_get_potential_states_promotion():
capacities = [4, 2, 1]
state_in = [[2, 2], [1, 0], [1, 0]]
potential_states = hrcy.transitions.get_potential_states(
state_in=state_in, capacities=capacities
)
expected_states = ([[1, 2], [2, 0], [1, 0]], [[2, 1], [1, 1], [1, 0]])
assert len(expected_states) == len(potential_states)
assert all(
np.array_equal(potential, expected)
for potential, expected in zip(potential_states, expected_states)
)
def test_get_potential_states_promotion_all_capacities_one():
capacities = [1, 1, 1]
state_in = [[0, 1], [0, 0], [0, 1]]
potential_states = hrcy.transitions.get_potential_states(
state_in=state_in, capacities=capacities
)
expected_states = ([[0, 0], [0, 1], [0, 1]],)
assert len(expected_states) == len(potential_states)
assert all(
np.array_equal(potential, expected)
for potential, expected in zip(potential_states, expected_states)
)
def test_get_potential_states_retirement():
capacities = [4, 2, 1]
state_in = [[2, 2], [1, 1], [1, 0]]
potential_states = hrcy.transitions.get_potential_states(
state_in=state_in, capacities=capacities
)
expected_states = (
[[1, 2], [1, 1], [1, 0]],
[[2, 1], [1, 1], [1, 0]],
[[2, 2], [0, 1], [1, 0]],
[[2, 2], [1, 0], [1, 0]],
)
assert len(expected_states) == len(potential_states)
assert all(
np.array_equal(potential, expected)
for potential, expected in zip(potential_states, expected_states)
)
def test_is_full():
capacities = [4, 2, 1]
state_in = [[2, 2], [2, 0], [1, 0]]
assert hrcy.transitions.is_full(state_in=state_in, capacities=capacities)
state_in = [[2, 2], [1, 0], [1, 0]]
assert not hrcy.transitions.is_full(
state_in=state_in, capacities=capacities
)
def test_find_free_levels():
capacities = [4, 2, 1]
state_in = [[2, 2], [2, 0], [1, 0]]
assert (
list(
hrcy.transitions.find_free_levels(
state_in=state_in, capacities=capacities
)
)
== []
)
state_in = [[2, 2], [1, 0], [1, 0]]
assert list(
hrcy.transitions.find_free_levels(
state_in=state_in, capacities=capacities
)
) == [1]
def test_promotion_is_affected_by_top_level():
capacities = [4, 3, 2, 1]
lmbda = [1, 1]
r = 5
mu = [[1, 1], [1, 1], [1, 1]]
state_in = np.array([[1.0, 3.0], [2.0, 1.0], [0.0, 1.0], [1.0, 0.0]])
state_out = np.array([[1.0, 3.0], [1.0, 1.0], [1.0, 1.0], [1.0, 0.0]])
assert (
hrcy.transitions.get_rate(state_in, state_out, capacities, r, lmbda, mu)
== r
)
|
StarcoderdataPython
|
3252114
|
import time
from turtle import Screen
from player import Player
from car_manager import CarManager
from scoreboard import Scoreboard
screen = Screen()
screen.setup(width=600, height=600)
screen.tracer(0)
player=Player()
car_manager=CarManager()
scoreboard=Scoreboard()
screen.listen()
screen.onkeypress(player.move_forwards,"Up")
game_is_on = True
while game_is_on:
time.sleep(0.1)
screen.update()
car_manager.create_car()
car_manager.move_cars()
for car in car_manager.all_cars:
if car.distance(player)<20:
game_is_on=False
scoreboard.game_over()
if player.ycor()>=280:
scoreboard.point()
scoreboard.update_score()
player.start()
car_manager.level_up()
screen.exitonclick()
|
StarcoderdataPython
|
1613875
|
<filename>src/visualizers/error.py
from pathlib import Path
def check_directory_exists(dir_path: Path, dir_type: str):
if not dir_path.exists():
print(f"{dir_type} directory doesn't exists: {dir_path}")
exit(1)
|
StarcoderdataPython
|
169574
|
<filename>tests/test_eval_metrics.py
import unittest
from citeomatic.eval_metrics import precision_recall_f1_at_ks, average_results
class TestEvalMetrics(unittest.TestCase):
def test_precision_recall_f1_at_ks(self):
gold_y = ['1', '2', '3']
pred_y = ['1', '4', '3']
scores_y = [1.0, 0.1, 0.5]
k = [1, 2, 3]
results = precision_recall_f1_at_ks(gold_y, pred_y, scores=None, k_list=k)
assert results['precision'] == [1.0, 0.5, 2/3]
assert results['recall'] == [1/3, 1/3, 2/3]
assert results['f1'] == [1/2, 2/5, 2/3]
assert results['mrr'] == 1.0
results_2 = precision_recall_f1_at_ks(gold_y, pred_y, scores_y, k)
assert results_2['precision'] == [1.0, 1.0, 2/3]
assert results_2['recall'] == [1/3, 2/3, 2/3]
assert results_2['f1'] == [1/2, 4/5, 2/3]
assert results_2['mrr'] == 1.0
def test_average_results(self):
r1 = {
'precision': [1.0, 0.5, 2/3],
'recall': [1.0, 0.5, 2/3],
'f1': [1.0, 0.5, 2/3],
'mrr': 1.0,
}
r2 = {
'precision': [3.0, 1.0, 4/3],
'recall': [3.0, 1.0, 4/3],
'f1': [3.0, 1.0, 4/3],
'mrr': 0.5,
}
averaged_results = average_results([r1, r2])
assert averaged_results['precision'] == [2.0, 0.75, 1.0]
assert averaged_results['mrr'] == 0.75
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1648612
|
<gh_stars>0
import sqlite3
import pandas as pd
class DBConnect:
def __init__(self, db_path):
self.con = sqlite3.connect(db_path)
self.cur = self.con.cursor()
def initialize_db(self):
self.cur.execute("DROP TABLE IF EXISTS officer_payment")
self.cur.execute("DROP TABLE IF EXISTS schedule_j")
self.cur.execute("DROP TABLE IF EXISTS irs_dashboard")
self.cur.execute("DROP TABLE IF EXISTS grants")
self.cur.execute(officer_payment_sql)
self.cur.execute(schedule_j_sql)
self.cur.execute(irs_dashboard_sql)
self.cur.execute(grants_sql)
def saveDF(self, df_pandas, table, insert="replace", index=False):
df_pandas.to_sql(table, con=self.con, if_exists=insert, index=index)
def query(self, sqlStr):
df = pd.read_sql(sqlStr, con=self.con)
return df
irs_dashboard_sql = """
CREATE TABLE irs_dashboard (
EIN text,
URL text,
LastUpdated text,
OrganizationName text,
TaxPeriod text,
TaxPeriodBeginDt text,
TaxPeriodEndDt text,
TaxYr text,
StateAbbr text,
Mission text,
TotalEmployee text,
ObjectId text,
NTEECommonCode text,
Foundation text,
OfficerName text,
OfficerTitle text,
OfficerCompensationPart9 float,
GrantDesc text,
GrantMoneyTotal float,
ProgramExpenses float,
PYTotalRevenue float,
CYTotalRevenue float,
PYRevenuesLessExpenses float,
CYRevenuesLessExpenses float,
PYSalaryBenefitsPaid float,
CYSalaryBenefitsPaid float,
TotalAssetsBOY float,
TotalAssetsEOY float,
TotalLiabilitiesBOY float,
TotalLiabilitiesEOY float,
TotalExpenses float,
CYTotalExpenses float,
PYTotalExpenses float,
Part9_1GrantsGov float,
Part9_2GrantsIndv float,
Part9_3GrantsForGov float,
Part9_4Benefits float,
Part9_5OfficerComp float,
Part9_6DisqComp float,
Part9_7OtherSalary float,
Part9_8Pension float,
Part9_9OtherBenefits float,
WorkingCapital float,
LiabilitiesToAsset float,
SurplusMargin float,
ProgramExp float,
ScheduleA text,
ScheduleJ text,
ScheduleI text,
ScheduleO text)"""
officer_payment_sql = """
CREATE TABLE officer_payment (
EIN text PRIMARY KEY,
ObjectId text,
OrganizationName text,
TaxYr text,
StateAbbr text,
PersonNm text,
TitleTxt text,
AverageHoursPerWeekRt float,
ReportableCompFromOrgAmt float,
OtherCompensationAmt float,
ReportableCompFromRltdOrgAmt float,
AverageHoursPerWeekRltdOrgRt float,
IndividualTrusteeOrDirectorInd bool,
OfficerInd bool,
HighestCompensatedEmployeeInd bool,
FormerOfcrDirectorTrusteeInd bool,
KeyEmployeeInd bool,
InstitutionalTrusteeInd bool,
TotalCompFromOrgAmt float)"""
schedule_j_sql = """
CREATE TABLE schedule_j (
EIN text,
ObjectId text,
OrganizationName text,
StateAbbr text,
TaxYr text,
PersonNm text,
TitleTxt text,
TotalCompensationFilingOrgAmt float,
BaseCompensationFilingOrgAmt float,
BonusFilingOrganizationAmount float,
OtherCompensationFilingOrgAmt float,
DeferredCompensationFlngOrgAmt float,
NontaxableBenefitsFilingOrgAmt float,
TotalCompensationRltdOrgsAmt float,
OtherCompensationRltdOrgsAmt float,
BonusRelatedOrganizationsAmt float,
CompensationBasedOnRltdOrgsAmt float,
DeferredCompRltdOrgsAmt float,
NontaxableBenefitsRltdOrgsAmt float,
CompReportPrior990FilingOrgAmt float,
CompReportPrior990RltdOrgsAmt float,
SeverancePaymentInd bool,
TravelForCompanionsInd text)"""
grants_sql = """
CREATE TABLE grants (
EIN text,
ObjectId text,
OrganizationName text,
TaxYr text,
Address text,
City text,
StateAbbr text,
RecipientEIN text,
RecipientBusinessName_BusinessNameLine1Txt text,
PurposeOfGrantTxt text,
CashGrantAmt float,
NonCashAssistanceAmt float,
NonCashAssistanceDesc text,
IRCSectionDesc text,
USAddress_CityNm text,
USAddress_StateAbbreviationCd text,
ForeignAddress_AddressLine1Txt text,
ForeignAddress_CountryCd text)"""
|
StarcoderdataPython
|
1681470
|
"""/**
* @author [<NAME>]
* @email [<EMAIL>]
* @create date 2020-05-14 11:49:34
* @modify date 2020-05-14 11:49:34
* @desc [
Data module for statistics.
Prefix SUF indicates "sufficient". Used for data mins.
]
*/
"""
##########
# Sufficient Data Checks
##########
SUF_TABLE_DATA = 5
SUF_ERR_LIST = 5
SUF_ANSWERED_TABLES = 15
|
StarcoderdataPython
|
1726570
|
from django.http import JsonResponse, HttpResponseBadRequest, HttpResponse
from src.tipboard.app.applicationconfig import getRedisPrefix
from src.tipboard.app.properties import BASIC_CONFIG, REDIS_DB, DEBUG, ALLOWED_TILES
from src.tipboard.app.cache import MyCache, save_tile
from src.tipboard.app.utils import checkAccessToken
from src.tipboard.app.parser import getConfigNames
def project_info(request):
""" Return info of server tipboard """
cache = MyCache()
return JsonResponse(dict(is_redis_connected=cache.isRedisConnected,
last_update=cache.getLastUpdateTime(),
first_start=cache.getFirstTimeStarter(),
project_default_config=BASIC_CONFIG,
dashboard_list=getConfigNames(),
redis_db=REDIS_DB))
def get_tile(request, tile_key):
""" Return Json from redis for tile_key """
if not checkAccessToken(method='GET', request=request, unsecured=True):
return HttpResponse('API KEY incorrect', status=401)
redis = MyCache().redis
if redis.exists(getRedisPrefix(tile_key)):
return HttpResponse(redis.get(tile_key))
return HttpResponseBadRequest(f'{tile_key} key does not exist.')
def delete_tile(request, tile_key):
""" Delete in redis """
if not checkAccessToken(method='DELETE', request=request, unsecured=True):
return HttpResponse('API KEY incorrect', status=401)
redis = MyCache().redis
if redis.exists(getRedisPrefix(tile_key)):
redis.delete(tile_key)
return HttpResponse('Tile\'s data deleted.')
return HttpResponseBadRequest(f'{tile_key} key does not exist.')
def tile_rest(request, tile_key):
""" Handles reading and deleting of tile's data """
if request.method == 'DELETE':
return delete_tile(request, tile_key)
if request.method == 'GET':
return get_tile(request, tile_key)
def sanity_push_api(request, unsecured):
""" Test token, all data present, correct tile_template and tile_id present in cache """
if not checkAccessToken(method='POST', request=request, unsecured=unsecured):
return False, HttpResponse('API KEY incorrect', status=401)
HttpData = request.POST
if not HttpData.get('tile_id', None) or not HttpData.get('tile_template', None) or \
not HttpData.get('data', None):
return False, HttpResponseBadRequest('Missing data')
if HttpData.get('tile_template', None) not in ALLOWED_TILES:
tile_template = HttpData.get('tile_template', None)
return False, HttpResponseBadRequest(f'tile_template: {tile_template} is unknow')
cache = MyCache()
tilePrefix = getRedisPrefix(HttpData.get('tile_id', None))
if not cache.redis.exists(tilePrefix) and not DEBUG:
return False, HttpResponseBadRequest(f'tile_id: {tilePrefix} is unknow')
return True, HttpData
def push_api(request, unsecured=False):
""" Update the content of a tile (widget) """
if request.method == 'POST':
state, HttpData = sanity_push_api(request, unsecured)
if state:
tile_id = HttpData.get('tile_id', None)
tile_template = HttpData.get('tile_template', None)
tile_data = HttpData.get('data', None)
tile_meta = HttpData.get('meta', None)
if save_tile(tile_id=tile_id, template=tile_template, data=tile_data, meta=tile_meta):
return HttpResponse(f'{tile_id} data updated successfully.')
HttpData = HttpResponse(f'Error while saving tile with tile_id: {tile_id}')
return HttpData
|
StarcoderdataPython
|
3385110
|
import os
import numpy as np
import gensim
class YearTextEmbeddings:
"""
Representation of the year using average base embeddings of entities
"""
def __init__(self, entity_embeddings, years_folder, output_file, tfidf = False):
self.entity_embeddings = entity_embeddings
self.years_folder = years_folder
self.output_file = output_file
self.tfidf = tfidf
self.entities_inside_year = dict()
self.model = None
def get_entities_of_years(self, year):
"""
For a given year, retrieves the list of the entities contained inside that year. This method will load
all the entities for all the years once for all.
:param year:
:return:
"""
if self.entities_inside_year is False:
for file_name in os.listdir(self.years_folder):
path = (os.path.join(self.years_folder, file_name))
with open(path, "r") as filino:
read_entities = filino.readlines()
entities = read_entities[0].split()
self.entities_inside_year[file_name] = entities
return self.entities_inside_year[year]
def fit(self):
with open(self.output_file, "w") as file_with_output_model:
folders = os.listdir(self.years_folder)
num_files = len(folders)
dimensions = len(self.entity_embeddings[self.entity_embeddings.wv.vocab[0]])
file_with_output_model.write(str(num_files) + " " + str(dimensions) + " " + "\n")
for name in folders:
path = (os.path.join(self.years_folder, name))
with open(path, "r") as filino:
coso = filino.readlines()
entities = (coso[0].split())
collect_embeddings = []
for entity in set(entities):
try:
time_array = self.entity_embeddings[entity]
collect_embeddings.append(time_array)
except:
continue
collect_embeddings = np.average(collect_embeddings, axis=0)
embedding_list = collect_embeddings.tolist()
string_to_save = ' '.join(map(str, embedding_list))
self.output_file.write(str(name) + " " + string_to_save + "\n")
self.model = gensim.models.KeyedVectors.load_word2vec_format(self.output_file)
|
StarcoderdataPython
|
1780375
|
"""This module is part of the CFSAN SNP Pipeline. It contains the code to
calculate the pairwise SNP distances between samples.
"""
from __future__ import print_function
from __future__ import absolute_import
import itertools
from snppipeline import utils
from snppipeline.utils import verbose_print
def calculate_snp_distances(args):
"""Calculate pairwise sample SNP distances.
Calculate pairwise SNP distances from the multi-fasta SNP matrix.
Generate a file of pairwise distances and a file containing a matrix
of distances.
This function expects, or creates '(*)', the following files:
snpma.fasta
snp_distance_pairwise.tsv*
snp_distance_matrix.tsv*
The files are used as follows:
1. The snpma.fasta input file contains the snp matrix for all samples
2. The snp_distance_pairwise.tsv output file contains a three column
tab-separated table of distances between all pairs of samples
2. The snp_distance_matrix.tsv output file contains a matrix of
distances between all samples.
Parameters
----------
args : Namespace
inputFile: File path (not just file name) for the snp matrix in fasta format
pairwiseFile: File path (not just file name) of the output pairwise distance file
matrixFile: File path (not just file name) for the output distance matrix file
Raises:
Examples:
args = argparse.Namespace
args.inputFile = 'snpma.fasta'
args.pairwiseFile = 'snp_distance_pairwise.tsv'
args.matrixFile = 'snp_distance_matrix.tsv'
calculate_snp_distances(args)
"""
utils.print_log_header()
utils.print_arguments(args)
#==========================================================================
# Validate arguments
#==========================================================================
input_file = args.inputFile
pairwise_file = args.pairwiseFile
matrix_file = args.matrixFile
force_flag = args.forceFlag
bad_file_count = utils.verify_existing_input_files("SNP matrix file", [input_file])
if bad_file_count > 0:
utils.global_error("Error: cannot calculate sequence distances without the snp matrix file.")
if not pairwise_file and not matrix_file:
utils.global_error("Error: no output file specified.")
#==========================================================================
# Check freshness
#==========================================================================
rebuild_pairwise_file = pairwise_file and utils.target_needs_rebuild([input_file], pairwise_file)
rebuild_matrix_file = matrix_file and utils.target_needs_rebuild([input_file], matrix_file)
if force_flag or rebuild_pairwise_file or rebuild_matrix_file:
#------------------------------
# Read in snp matrix file
#------------------------------
seqs = {}
with open(input_file) as ifile:
for line in ifile:
line = line.rstrip('\n')
if line.startswith('>'):
curr_sample = line.lstrip('>')
seqs[curr_sample] = ''
else:
seqs[curr_sample] += str(line)
#------------------------------
# Count mismatches
#------------------------------
verbose_print("# %s %s" % (utils.timestamp(), "Calculating all pairwise distances"))
ids = sorted(seqs.keys())
pairwise_mismatches = dict() # tuple (seq1 id, seq2 id) -> int
for id1, id2 in itertools.combinations(ids, 2):
mismatches = utils.calculate_sequence_distance(seqs[id1], seqs[id2])
pairwise_mismatches[(id1, id2)] = mismatches
pairwise_mismatches[(id2, id1)] = mismatches
#------------------------------
# Print distance files
#------------------------------
if pairwise_file:
with open(pairwise_file, 'w') as p_out:
p_out.write('%s\n' % '\t'.join(['Seq1', 'Seq2', 'Distance']))
for id1, id2 in itertools.product(ids, ids):
mismatches = pairwise_mismatches.get((id1, id2), 0) # zero when id1=id2
p_out.write("%s\t%s\t%i\n" % (id1, id2, mismatches))
if matrix_file:
with open(matrix_file, 'w') as m_out:
m_out.write('\t%s\n' % '\t'.join(ids)) # matrix header
# write table of mismatches
for id1 in ids:
mismatches = [pairwise_mismatches.get((id1, id2), 0) for id2 in ids]
mismatch_strs = map(str, mismatches)
m_out.write("%s\t%s\n" % (id1, '\t'.join(mismatch_strs)))
else:
utils.verbose_print("Distance files have already been freshly built. Use the -f option to force a rebuild.")
|
StarcoderdataPython
|
1721091
|
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import HybridBlock
from ceecnet.utils.get_norm import *
class Conv2DNormed(HybridBlock):
"""
Convenience wrapper layer for 2D convolution followed by a normalization layer
All other keywords are the same as gluon.nn.Conv2D
"""
def __init__(self, channels, kernel_size, strides=(1, 1),
padding=(0, 0), dilation=(1, 1), activation=None,
weight_initializer=None, in_channels=0, _norm_type = 'BatchNorm', norm_groups=None, axis =1 , groups=1, **kwards):
super().__init__(**kwards)
with self.name_scope():
self.conv2d = gluon.nn.Conv2D(channels, kernel_size = kernel_size,
strides= strides,
padding=padding,
dilation= dilation,
activation=activation,
use_bias=False,
weight_initializer = weight_initializer,
groups=groups,
in_channels=0)
self.norm_layer = get_norm(_norm_type, axis=axis, norm_groups= norm_groups)
def hybrid_forward(self,F,_x):
x = self.conv2d(_x)
x = self.norm_layer(x)
return x
|
StarcoderdataPython
|
1697682
|
from permutation import Permutation
from itertools import permutations
from time import time
import numpy as np
import math
"""
This has several perfect hash functions to give each position of the cube
a unique coordinate. It can also reverse the hash function to give the
relavent cube information back from a coordinate.
"""
def co_ori(co_ori):
'''
co_perm_ori 8-bit ternary, but only the first 7 bits are used
'''
rank = 0
for i in range(7):
rank += co_ori[i]*(3**(6-i))
return rank
def co_ori_inv(rank):
'''
0 <= rank < 3^7
'''
rank = np.base_repr(rank, base=3)
co_ori = bytearray([0]*8)
start = 7-len(rank)
for i in range(start, 7):
co_ori[i] = int(rank[i-start])
co_ori[7] = (3-sum(co_ori)%3)%3
return co_ori
def eg_ori(eg_ori):
'''
eg_ori is a 12-bit binary, but only the first 11 bits are used
'''
rank = 0
for i in range(11):
rank += eg_ori[i]*(2**(10-i))
return rank
def eg_ori_inv(rank):
'''
0 <= rank < 2^11
'''
rank = np.base_repr(rank, base=2)
eg_ori = bytearray([0]*12)
start = 11-len(rank)
for i in range(start, 11):
eg_ori[i] = int(rank[i-start])
eg_ori[11] = (2-sum(eg_ori)%2)%2
return eg_ori
def ud_edges(egs):
'''
egs is a set of 12 numbers ranging from 0 to 11
we are only interested in entries that are bigger than 7
'''
start = False
k = -1
sum = 0
for n, eg in enumerate(egs):
if eg >= 8:
start = True
k += 1
elif start:
sum += math.comb(n, k)
return sum
def ud_edges_inv(rank):
k = 3
egs = [0]*12
for n in reversed(range(12)):
n_choose_k = math.comb(n, k)
if rank-n_choose_k >= 0:
rank -= n_choose_k
else:
egs[n] = 8+k
k -= 1
if k < 0:
break
return egs
def co_perm(co_perm):
'''
co_perm is a permutation of 0-7
'''
return Permutation(*[i+1 for i in co_perm]).lehmer(8)
def co_perm_inv(rank):
return [i-1 for i in (Permutation.from_lehmer(rank, 8)).to_image(8)]
def eg_perm(eg_perm):
'''
eg_perm is a permutation of 0-7, so same as corners
'''
return Permutation(*[i+1 for i in eg_perm]).lehmer(8)
def eg_perm_inv(rank):
return [i-1 for i in (Permutation.from_lehmer(rank, 8)).to_image(8)]
def ud_perm(ud_perm):
'''
We treat ud_perm as a permutation of 0-3
'''
return Permutation(*[i-7 for i in ud_perm]).lehmer(4)
def ud_perm_inv(rank):
return [i+7 for i in (Permutation.from_lehmer(rank, 4)).to_image(4)]
if __name__ == "__main__":
print(co_ori([2, 1, 2, 1, 1, 0, 0, 2]))
pass
|
StarcoderdataPython
|
3246164
|
<reponame>Elzei/show-off
#!/usr/bin/env python2.7
def dividers(value):
answer = []
for i in xrange(1, (value // 2 ) + 1):
if (value % i) == 0:
answer.append(i)
return answer
def dividers2(value):
return filter(lambda x: (value % x) == 0, xrange(1, (value // 2) + 1))
def dividers_gen(value):
half_range = (value // 2) + 1
for i in xrange(1, half_range):
if (value % i) == 0:
yield i
def dividers2_gen(value):
half_range = (value // 2) + 1
return (i for i in xrange(1, half_range) if (value % i) == 0)
def perfect(value):
answer = []
for i in xrange(2, value + 1):
if i == reduce(lambda x, y: x + y, dividers_gen(i)):
answer.append(i)
return answer
def perfect2(value):
return filter(lambda x: x == reduce(lambda x, y: x + y, dividers_gen(x)),
xrange(2, value + 1)
)
def perfect_gen(value):
for i in xrange(2, value + 1):
if i == reduce(lambda x, y: x + y, dividers_gen(i)):
yield i
def perfect2_gen(value):
return (i for i in xrange(2, value + 1) if i == reduce(lambda x, y: x + y, dividers_gen(i)))
if __name__ == '__main__':
print dividers(1024)
print dividers2(1024)
print "-" * 10
for i in dividers_gen(1024):
print i,
print "\n", "-" * 10
for i in dividers2_gen(1024):
print i,
print "\n", "-" * 10
print perfect2(100)
print "-" * 10
for i in perfect_gen(9000):
print i,
print "\n", "-" * 10
for i in perfect2_gen(9000):
print i,
|
StarcoderdataPython
|
1766898
|
from algoliasearch_django import AlgoliaIndex
from algoliasearch_django.decorators import register
from products.models import Product
@register(Product)
class ProductIndex(AlgoliaIndex):
# should_index = "is_expensive_item"
fields = ["user", "title", "content", "price", "is_public"]
tags = 'get_random_model_tag'
settings = {
"searchableAttributes": ["title", "content"],
"attributesForFacetting": ["user", "is_public"],
}
|
StarcoderdataPython
|
1657048
|
<reponame>focusunsink/study_python<filename>np/8_assure_quality_with_testing/7_to_8_all_close.py<gh_stars>0
# -*- coding:utf-8 -*-
"""
Project : numpy
File Name : 7_to_8_all_close
Author : Focus
Date : 8/23/2021 9:02 AM
Keywords : assert_allclose, assert_array_equal,
Abstract : |a - b| <= (atol + rtol * |b|)
Param :
Usage : py 7_to_8_all_close
Reference :
"""
import numpy as np
import matplotlib.pyplot as plt
import sys
l0 = [0, 0.123456789, np.nan]
l1 = [0, 0.123456780, np.nan]
print("Pass", np.testing.assert_allclose(l0, l1, rtol=1e-7, atol=0))
print("Fail", np.testing.assert_array_equal(l0, l1))
|
StarcoderdataPython
|
1766553
|
<filename>primetest.py
import time
NUM_PRIMES = 50000
array = list(range(0, NUM_PRIMES))
isPrime = True
t1 = time.time()
i = 2
idx = 0
while (idx < NUM_PRIMES):
isPrime = True;
y = 0
for y in range(0, idx):
if (i % array[y] == 0):
isPrime = False
break
if (isPrime):
array[idx] = i
idx += 1
i += 1
t2 = time.time()
for x in range(0, 100):
print(array[x], end="\t")
elapsedMsec = (t2 - t1) * 1000.0;
print("\nTime needed for process:", elapsedMsec, "Milliseconds.");
input()
|
StarcoderdataPython
|
1626222
|
<reponame>mikemartino/cookbook
from recipebook.cookbook import Cookbook
from recipebook.ingredient import Ingredient
from recipebook.recipe import Recipe, Time
def main():
cookbook = Cookbook()
print(cookbook.table_of_contents.pretty_print())
# cookbook.recipes.append(Recipe("Chickpea Burgers", Time(45, Time.Unit.MINUTES)))
# cookbook.recipes.append(Recipe("Black Bean Burgers", Time(45, Time.Unit.MINUTES)))
# cookbook.recipes.append(Recipe("Homemade Sesame Seed Hamburger Buns", Time(2, Time.Unit.HOURS)))
save_a_recipe(cookbook)
print(cookbook.table_of_contents.pretty_print())
for recipe in cookbook.recipes:
print(recipe)
def save_a_recipe(cookbook: Cookbook) -> Cookbook:
recipe = Recipe("Sweet Potato Hash and Poached Egg", Time(20, Time.Unit.MINUTES))
recipe.ingredients = [
Ingredient("Olive oil", 1, Ingredient.UnitOfMeasurement.Tablespoons),
Ingredient("Large sweet potato", 0.5, Ingredient.UnitOfMeasurement.NA),
Ingredient("Medium onion", 0.5, Ingredient.UnitOfMeasurement.NA),
Ingredient("Egg", 1, Ingredient.UnitOfMeasurement.NA),
Ingredient("Garlic powder", 0.5, Ingredient.UnitOfMeasurement.Teaspoons),
Ingredient("Smoked paprika", 0.5, Ingredient.UnitOfMeasurement.Teaspoons),
Ingredient("Black pepper", 0.25, Ingredient.UnitOfMeasurement.Teaspoons),
Ingredient("Water", 1, Ingredient.UnitOfMeasurement.Tablespoons)
]
recipe.instructions = [
"Put the burner on medium heat.",
"Put the olive oil in the pan.",
"Shred the onion and sweet potato using a cheese grater.",
"Cook the onion and sweet potato mix for 5 to 7 minutes.",
"Form the hash into a circle, making the edges slightly higher than the centre.",
"Crack the egg into the centre of the hash.",
"Sprinkle garlic powder, smoked paprika, and black pepper over the mix.",
"Add a little water to the pan to steam the egg. Cover pan with lid and cook for 5 minutes.",
"Continue to cook egg and hash until it's reached a consistency of your liking."
]
cookbook.recipes.append(recipe)
return cookbook
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
4804958
|
<gh_stars>1-10
from django import forms
from djangoProject1.polls.models import Profile
class AddProfileForm(forms.ModelForm):
MAX_LENGTH = 30
first_name = forms.CharField(max_length=15, label="First Name", widget=forms.TextInput(attrs={
'id': "id_first_name", "type": "text", "name": "first_name", "max_length": 15
}))
second_name = forms.CharField(max_length=15, label="Last Name", widget=forms.TextInput(attrs={
'id': "id_last_name", "type": "text", "name": "last_name", "max_length": 15
}))
age = forms.IntegerField(label="Age", widget=forms.NumberInput(attrs={
'id': "id_age", "type": "number", "name": "age",
}))
image_url = forms.URLField(label="Link to Profile Image", widget=forms.URLInput(attrs={
'id': "id_image", "type": "url", "name": "profile_image",
}))
class Meta:
model = Profile
fields = "__all__"
|
StarcoderdataPython
|
3223447
|
<reponame>mazurbeam/django-cities
from django.contrib.gis.db import models
class AlternativeNameManager(models.Manager):
def get_queryset(self):
return super(AlternativeNameManager, self).get_queryset().exclude(kind='link')
|
StarcoderdataPython
|
3246205
|
def wordBreakCount(dictionary, txt):
txtl = len(txt)
maxw = 0
d = dict()
lens = set()
for word in dictionary:
wlen = len(word)
maxw = max(wlen, maxw)
if wlen not in d:
d[wlen] = []
d[wlen].append(word)
lens.add(wlen)
print(txtl, d, lens, maxw)
dp = [0] * (txtl + 1)
dp[0] = 1
dp[1] = 1
for i in range(2, txtl + 1):
count = 0
for j in lens:
if i - j - 1 >= 0:
print(i, j, i - j, txt[i - j - 1:i - 1])
if txt[i - j - 1:i - 1] in d[j]:
count += dp[i - j]
print("*", count)
dp[i] = count
print(dp)
return dp[txtl]
import pytest
@pytest.mark.parametrize("dictionary,txt,expected", [
([
"kick",
"start",
"kickstart",
"is",
"awe",
"some",
"awesome",
],
"kickstartisawesome", 4),
])
def test_edit_distance(dictionary, txt, expected):
assert(wordBreakCount(dictionary, txt) == expected)
pytest.main()
|
StarcoderdataPython
|
3346722
|
<reponame>Feuoy/campus-network-login<filename>signIn.py
# coding:utf-8
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from PIL import Image
import pandas as pd
import time
import identifyRandcode
# import getValidAccount
class Cn_SignIn():
"""
这个类模拟登录校园网
"""
def __init__(self, username, password, current_ip):
""" __init__
:param username:
:param password:
:param randcode:
:param current_ip: 本机当前ip,cmd//ipconfig查询,如10.19.170.01
:param url: 登录网址,需要参数current_ip
"""
self.username = username
self.password = password
self.randcode = 1000
self.current_ip = current_ip
self.url = ''
def __main__(self):
"""
__main__
"""
# self.username = input("请输入学号:")
# self.password = input("请输入密码:")
self.randcode = 1000
# self.current_ip = input("请输入当前IP地址:")
self.url = "http://enet.10000.gd.cn:10001/login.jsp?wlanuserip=" \
+ self.current_ip + "&wlanacip=192.168.127.12"
# self.username = ''
# self.password = ''
# self.randcode = 1000
# self.current_ip = ''
# self.url = ''
bool_sign_in = self.sign_in()
return bool_sign_in
def cut_picture(self, img1_path, img2_path):
"""
剪裁图片
:param img1_path: 需要剪裁图片路径
:param img2_path: 剪裁后图片存放路径
"""
# 选择验证码所在页面的像素边界
left = int(800)
top = int(225)
right = int(885)
bottom = int(255)
# 剪裁图像
im = Image.open(img1_path)
im = im.crop((left, top, right, bottom))
im.save(img2_path)
def transfer_picture_format(self, img1_path, img2_path):
"""
转换图片格式
:param img1_path: 原png格式图片路径
:param img2_path: 转换后jpg图片存放路径
"""
# png转jpg
img = Image.open(img1_path)
bg = Image.new("RGB", img.size, (255, 255, 255))
bg.paste(img, img)
bg.save(img2_path)
def sign_in(self):
"""通过webdriver模拟登入
"""
# 打开webdriver
driver = webdriver.Chrome()
# 先打开校园网
driver.get(self.url)
# 再打开百度,保留窗口,并验证是否成功联网
# driver.execute_script('window.open("https://www.baidu.com/")')
# 获取整个校园网登入页面截图
driver.get_screenshot_as_file('image/screenshot_PNG.png')
# 剪裁图片
self.cut_picture('image/screenshot_PNG.png', 'image/screenshot_randcode_PNG.png')
# 转换图片格式
self.transfer_picture_format("image/screenshot_randcode_PNG.png", "image/screenshot_randcode_JPG.jpg")
# 识别图片,获取验证码
self.randcode = identifyRandcode.identify_randcode('image/screenshot_randcode_JPG.jpg',
'image/screenshot_randcode_JPG_adjusted.jpg')[0:4]
# 模拟填入账号,密码,验证码
driver.find_elements_by_xpath('//*[@id="userName1"]')[0].send_keys(self.username)
driver.find_elements_by_xpath('//*[@id="password1"]')[0].send_keys(self.password)
driver.find_elements_by_xpath('//*[@id="rand"]')[0].send_keys(self.randcode)
# 模拟点击登录
driver.find_element_by_xpath('//*[@id="login1"]/table/tbody/tr/td/table[1]/tbody/tr[4]/td[2]/img').click()
time.sleep(1)
# 判断是否登录成功
if driver.current_url == 'http://enet.10000.gd.cn:10001/success.jsp':
print(self.username + '登录成功')
else:
driver.close()
print(self.username + '登录失败')
return 'login failure'
# # 关闭窗口
# driver.close()
return driver
def sign_out(self, driver):
if type(driver) == type("aa"):
return
# 模拟点击下线
driver.find_element_by_xpath('//*[@id="logout"]').click()
# # 回车确定
# driver.find_element_by_id('kw').send_keys(Keys.ENTER)
# 处理弹窗确定
alert = driver.switch_to_alert()
time.sleep(1) # 一定要等待
print(alert.text) # 打印警告对话框内容
alert.accept()
time.sleep(1)
# 判断是否下线成功
if driver.current_url == 'http://enet.10000.gd.cn:10001/logoutsuccess.jsp':
print(self.username + '下线成功')
driver.close()
else:
print(self.username + '下线失败')
driver.close()
return "end"
if __name__ == '__main__':
signIn = Cn_SignIn()
bool_sign_in = signIn.__main__()
end = bool_sign_in.signOut()
print(end)
|
StarcoderdataPython
|
93997
|
from setuptools import setup
setup(
name="medicus",
version="0.1",
packages=["medicus"]
)
|
StarcoderdataPython
|
151039
|
<filename>home/hairygael/InMoov2.minimalTorso.py
#file : InMoov2.minimalTorso.py
# this will run with versions of MRL 1.0.107
# a very minimal script for InMoov
# although this script is very short you can still
# do voice control of a right Arm
# for any command which you say - you will be required to say a confirmation
# e.g. you say -> test stomach, InMoov will ask -> "Did you say test stomach?", you will need to
# respond with a confirmation ("yes","correct","yeah","ya")
from java.lang import String
from org.myrobotlab.service import Runtime
import urllib2
import os
# To set a directory
# Modify this line according to your directory and version of MRL
os.chdir("C:/myrobotlab/myrobotlab.1.0.107/audioFile/google/en_gb/audrey")
# the name of the local file
# remove the file if it already exist in the Audiofile directory
soundfilename="starting mouth.mp3";
try:
mp3file = urllib2.urlopen('http://www.inmoov.fr/wp-content/uploads/2015/05/starting-mouth.mp3')
output = open(soundfilename,'wb')
output.write(mp3file.read())
output.close()
except IOError:
print "Check access right on the directory"
except Exception:
print "Can't get the sound File ! Check internet Connexion"
leftPort = "COM20" #modify port according to your board
i01 = Runtime.createAndStart("i01", "InMoov")
i01.startEar()
mouth = Runtime.createAndStart("mouth","Speech")
i01.startMouth()
##############
torso = i01.startTorso("COM20") #modify port according to your board
# tweaking default torso settings
torso.topStom.setMinMax(0,180)
torso.topStom.map(0,180,67,110)
torso.midStom.setMinMax(0,180)
torso.midStom.map(0,180,60,120)
#torso.lowStom.setMinMax(0,180)
#torso.lowStom.map(0,180,60,110)
#torso.topStom.setRest(90)
#torso.midStom.setRest(90)
#torso.lowStom.setRest(90)
#################
# verbal commands
ear = i01.ear
ear.addCommand("attach everything", "i01", "attach")
ear.addCommand("disconnect everything", "i01", "detach")
ear.addCommand("attach torso", "i01.torso", "attach")
ear.addCommand("disconnect torso", "i01.torso", "detach")
ear.addCommand("rest", "python", "rest")
ear.addCommand("capture gesture", ear.getName(), "captureGesture")
ear.addCommand("manual", ear.getName(), "lockOutAllGrammarExcept", "voice control")
ear.addCommand("voice control", ear.getName(), "clearLock")
ear.addCommand("test your stomach", "python", "teststomach")
ear.addComfirmations("yes","correct","ya","yeah", "yes please", "yes of course")
ear.addNegations("no","wrong","nope","nah","no thank you", "no thanks")
ear.startListening()
def teststomach():
i01.setTorsoSpeed(0.75,0.55,0.75)
i01.moveTorso(90,90,90)
sleep(2)
i01.moveTorso(45,90,90)
sleep(4)
i01.moveTorso(90,90,90)
sleep(2)
i01.moveTorso(135,90,90)
sleep(4)
i01.moveTorso(90,90,90)
sleep(2)
i01.moveTorso(90,45,90)
sleep(3)
i01.moveTorso(90,135,90)
sleep(3)
i01.moveTorso(90,90,45)
sleep(3)
i01.moveTorso(90,90,135)
sleep(3)
|
StarcoderdataPython
|
3399520
|
<reponame>k1lgor/linux-update
#!/bin/python3
from time import sleep
import os
def update():
"""Update method"""
print('''
=====================
Updating has begun...
=====================
''')
os.system("apt update && apt dist-upgrade -y")
sleep(2)
os.system("apt autoclean && apt autoremove -y")
sleep(2)
# removes all leftover config files
os.system("apt purge $(dpkg -l | grep '^rc' | awk '{print$2}')")
sleep(2)
os.system("echo 3 > /proc/sys/vm/drop_caches")
if __name__ == '__main__':
if os.geteuid() != 0:
exit('''
ERROR ==============
ERROR Run it as root
ERROR ==============
''')
else:
update()
print('''
========================
Updating has finished...
========================
''')
|
StarcoderdataPython
|
3208705
|
<reponame>KinmanCovey/fifth-row-py<filename>fifth-row-test.py
#!/usr/bin/env python
import unittest, fifthrow
from fifthrow.fifthrow import *
class FifthRowTest(unittest.TestCase):
def test_sandboxed_url(self):
'''
FifthRow object's url should be the sandboxed url.
'''
self.assertEqual(FifthRow(sandbox=True).url, 'sandbox.the5throw.com')
def test_sandboxed_token(self):
'''
A sandboxed FifthRow should have a token of 'sandbox'
regardless of token parameter.
'''
self.assertEqual(FifthRow(sandbox=True, token=5555).token, 'sandbox')
def test_none_token(self):
'''
TokenError should be raised if token != an integer.
'''
self.assertRaises(TokenError, FifthRow, sandbox=False, token=None)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
55094
|
from components.base.ecu.types.impl_ecu_simple import SimpleECU
from components.base.ecu.software.ecu_software import ECUSoftware
from components.security.ecu.software.impl_app_layer_secure import SecureApplicationLayer
from layers.impl_comm_module_my_protocol import MyProtocolCommModule
class MyProtocolECU(SimpleECU):
def __init__(self, sim_env=None, ecu_id=None, data_rate=None, size_sending_buffer=None, size_receive_buffer=None):
''' Constructor
Input: sim_env simpy.Environment environment in which this ECU lives
ecu_id string id of this ECU component
data_rate integer data_rate of the connected bus
size_sending_buffer float size of the sending buffer of this ECU
size_receive_buffer float size of the receiving buffer of this ECU
Output: -
'''
# set settings
self.set_settings()
if sim_env == None: return
# set SW and HW
SimpleECU.__init__(self, sim_env, ecu_id, data_rate, size_sending_buffer, size_receive_buffer)
self.ecuSW = ECUSoftware(sim_env, MyProtocolCommModule(sim_env, ecu_id), SecureApplicationLayer(sim_env, ecu_id))
# connect
self._connect_hw_sw()
def add_sending(self, start_time, interval, message_id, data, data_len):
''' this method adds a new sending action to the application layer of this
ECU. Then the message will start sending messages in the defined interval
starting at the specified start_time
Input: start_time float time at which the first message is sent
interval float period within which the messages are sent
message_id integer message identifier of the messages that are sent
data object/.. content of the messages that are sent
data_length float size of one message
Output: -
'''
self.ecuSW.app_lay.add_sending(start_time, interval, message_id, data, data_len)
def get_type_id(self):
''' returns the id of this ECU type
Input: -
Output: ecu_type string type of this ECU; e.g.'TLSECU'
'''
return "MyProtocolECU"
def add_stream(self, new_stream):
''' this method adds a new stream that is allowed to the TESLA environment.
This stream will then be legal and the ECUs will send according to those
streams.
Input: new_stream MessageStream message stream that is added to the environment
Output: -
'''
# push to communication module
self.ecuSW.comm_mod.add_stream(new_stream)
# add HW filter
if self.ecu_id in new_stream.receivers and \
new_stream.message_id not in self._allowed_streams:
self._allowed_streams += [new_stream.message_id]
self.ecuHW.transceiver.install_filter(self._allowed_streams)
def set_max_message_number(self, nr_messages):
''' sets the number of messages that are sent by this ecu per
stream
Input: nr_messages int number of messages sent
Output: -
'''
self.ecuSW.app_lay.set_max_message_number(nr_messages)
def set_settings(self):
''' sets the initial setting association between the settings variables
and the actual parameter
Input: -
Output: -
'''
self.settings = {}
return self.settings
def monitor_update(self):
''' returns a list of monitor inputs
Input: -
Output: list list list of MonitorInput objects
'''
return self.ecuSW.comm_mod.monitor_update()
'''class StdTLSECUTimingFunctions(object):
def __init__(self, main_library_tag='CyaSSL'):
self.available_tags = ['CyaSSL', 'Crypto_Lib_HW', 'Crypto_Lib_SW']
self.library_tag = main_library_tag # e.g. CyaSSL, or CryptoLib
self.function_map = {}
# Record Layer
self.function_map['t_tls_record_compression'] = self.c_t_tls_record_compression
self.function_map['t_tls_record_decompression'] = self.c_t_tls_record_decompression
def get_function_map(self):
return self.function_map
def c_t_timing_function_1(self, msg_size, compr_alg):
if compr_alg == CompressionMethod.NULL:
return 0
return 0
def c_t_timing_function_2(self, compressed_msg_size, compr_alg):
if compr_alg == CompressionMethod.NULL:
return 0
return 0
'''
|
StarcoderdataPython
|
4813804
|
#################################################################################################
# Visual object tracking in panoramic video
# Master thesis at Brno University of Technology - Faculty of Information Technology
# Author: <NAME> (<EMAIL>)
# Supervisor: Doc. Ing. <NAME>, Ph.D.
# Module: evaluation.py
# Description: Evaluation of single object trackers in custom groundtruth dataset.
# Drawing groundtruth+result bounding boxes or computing metrics.
#################################################################################################
from cv2 import cv2
import numpy as np
import sys
import glob
import os
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from code.boundingbox import Parser
from code.boundingbox import BoundingBox
class Evaluation:
"""
Evaluation of single object trackers in custom groundtruth dataset.
Drawing groundtruth+result bounding boxes or computing metrics.
"""
def __init__(self, path: str, groundtruthPath: str, resultPath: str):
# list of annotated groundtruth bounding box objects
self.gt_bounding_boxes = []
# list of tracker result bounding boxes
self.gt_bounding_boxes = []
# path of video file or directory with *.jpg images
self.path = path
# path of file with groundtruth data
self.groundtruth_path = groundtruthPath
# path of file with groundtruth data
self.result_path = resultPath
# enable parsing/creating methods
self.parser = Parser()
self.video = None
self.video_width = None
self.video_height = None
# constants for sizes and positions of opencv rectangles and texts
self.RECTANGLE_BORDER_PX = 2
self.FONT_SCALE = 0.75
self.FONT_WEIGHT = 2
self.TEXT_ROW1_POS = (20,30)
self.TEXT_ROW2_POS = (20,60)
self.TEXT_ROW2_POS2 = (280,60)
self.TEXT_ROW3_POS = (20,90)
self.TEXT_ROW3_POS2 = (280,90)
self.WINDOW_NAME = "Evaluation"
def loadInit(self):
"""Method for loading video, groundtruth and result data"""
# Read video
self.video = cv2.VideoCapture(self.path)
# Exit if video not opened.
if not self.video.isOpened():
print("Error - Could not open video")
sys.exit(-1)
# store video width/height to variables
self.video_width = int(self.video.get(cv2.CAP_PROP_FRAME_WIDTH))
self.video_height = int(self.video.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Read and parse existing groundtruth file
if not(os.path.exists(self.groundtruth_path)):
print("Error - Could not read a groundtruth file")
sys.exit(-1)
# Read and parse existing tracking result file
if not(os.path.exists(self.result_path)):
print("Error - Could not read a tracking result file")
sys.exit(-1)
# list of annotated bounding box objects
self.gt_bounding_boxes = []
# list of tracking result bounding box objects
self.result_bounding_boxes = []
# parsing groundtruth and result files
self.gt_bounding_boxes = self.parser.parseGivenDataFile(self.groundtruth_path, self.video_width)
self.result_bounding_boxes = self.parser.parseGivenDataFile(self.result_path, self.video_width)
############################################################
################ Intersection over Union ###################
############################################################
def computeIntersectionOverUnion(self):
"""
Method for computing IoU metric between groundtruth and result bounding boxes
Intersection over Union is an evaluation metric used to measure the accuracy/success of an object tracker/detector
"""
if len(self.gt_bounding_boxes) == len(self.result_bounding_boxes):
iou_string = ""
# loop in bounding_boxes lists
for idx in range(len(self.gt_bounding_boxes)):
gt_bbox = self.gt_bounding_boxes[idx]
result_bbox = self.result_bounding_boxes[idx]
# check if ground truth is not nan (occlusion) -> ignore occluded frames
if gt_bbox.point1 and gt_bbox.point2:
iou = self.intersectionOverUnion(gt_bbox, result_bbox)
# store iou results to list
iou_string += str(iou) + "\n"
# saving file on drive
saveFilePath = self.result_path.replace(".txt", "-iou.txt")
newFile = open(saveFilePath, "w")
newFile.write(iou_string)
newFile.close()
print("File '" + saveFilePath + "' has been created.")
self.video.release()
# inspired and modified from https://www.pyimagesearch.com/2016/11/07/intersection-over-union-iou-for-object-detection/
def intersectionOverUnion(self, bboxA: BoundingBox, bboxB: BoundingBox):
"""Method for computing IoU metric between 2 given bounding boxes"""
if bboxA.point1 and bboxA.point2 and bboxB.point1 and bboxB.point2:
# bboxA and bboxB have valid coordinates
# determine the (x,y)-coordinates of the intersection rectangle
left_top_x = max(bboxA.get_point1_x(), bboxB.get_point1_x())
left_top_y = max(bboxA.get_point1_y(), bboxB.get_point1_y())
# not using point2 directly for right_bottom
# because point1 could be on right border, and point2 could be on left border of image
right_bottom_x = min(bboxA.get_point1_x() + bboxA.get_width(), bboxB.get_point1_x() + bboxB.get_width())
right_bottom_y = min(bboxA.get_point1_y() + bboxA.get_height(), bboxB.get_point1_y() + bboxB.get_height())
# compute the area of intersection rectangle (inc +1 because of zero indexing in pixels coordinates)
intersection_area = max(0, right_bottom_x - left_top_x + 1) * max(0, right_bottom_y - left_top_y + 1)
# compute the area of both the prediction and ground-truth rectangles
bboxA_area = bboxA.get_width() * bboxA.get_height()
bboxB_area = bboxB.get_width() * bboxB.get_height()
# compute the intersection over union by taking the intersection area
# and dividing it by the sum of result + ground-truth areas - the interesection area
iou = intersection_area / float(bboxA_area + bboxB_area - intersection_area)
# possible fix because of previous float rounding - max iou is 1.0
if iou > 1.0:
iou = 1.0
return iou
else:
# tracker failures
return 0.0
############################################################
################ Euclidian distance (L2 norm) ##############
############################################################
def computeCenterError(self):
"""
Method for computing Location error metric between groundtruth and result bounding boxes centers
Location error is an evaluation metric used to measure the accuracy of an object tracker/detector
"""
if len(self.gt_bounding_boxes) == len(self.result_bounding_boxes):
center_error_string = ""
# loop in bounding_boxes lists
for idx in range(len(self.gt_bounding_boxes)):
gt_bbox = self.gt_bounding_boxes[idx]
result_bbox = self.result_bounding_boxes[idx]
# check if ground truth is not Nan (occlusion) -> ignore occluded frames
if gt_bbox.point1 and gt_bbox.point2:
center_error = self.centerError(gt_bbox, result_bbox)
center_error_string += str(center_error) + "\n"
# saving file on drive
saveFilePath = self.result_path.replace(".txt", "-centererror.txt")
newFile = open(saveFilePath, "w")
newFile.write(center_error_string)
newFile.close()
print("File '" + saveFilePath + "' has been created.")
self.video.release()
def centerError(self, bboxA: BoundingBox, bboxB: BoundingBox):
"""
Method for computing Euclidian distance between 2 given bounding boxes
This method also normalizes distance according to video height (our dataset varies a lot in resolution -smallest 720p, highest 2160p)
"""
if bboxA.point1 and bboxA.point2 and bboxB.point1 and bboxB.point2:
# centers for distance from left point to right (modulo by video width)
centerA1 = np.array([(bboxA.get_point1_x() + bboxA.get_width()/2) % self.video_width, bboxA.get_point1_y() + bboxA.get_height()/2])
centerB1 = np.array([(bboxB.get_point1_x() + bboxB.get_width()/2) % self.video_width, bboxB.get_point1_y() + bboxB.get_height()/2])
# centers for distance from right point to left (possible in equirectangular panorama)
# center bboxA x could be on video_width-100, center bboxB x could be on 100 (distance is 200 in equirectangular)
centerA2 = np.array([])
centerB2 = np.array([])
if centerA1[0] < centerB1[0]:
# e.g. center bboxA x is 100, center bboxB x is video_width - 100
centerA2 = np.array([self.video_width + centerA1[0], centerA1[1]])
centerB2 = np.array([centerB1[0], centerB1[1]])
else:
# e.g. center bboxA x is video_width - 100, center bboxB x is 100
centerA2 = np.array([centerA1[0], centerA1[1]])
centerB2 = np.array([self.video_width + centerB1[0], centerB1[1]])
################################## Stackoverflow atribution #######################################
# https://stackoverflow.com/questions/1401712/how-can-the-euclidean-distance-be-calculated-with-numpy
# Asked by <NAME>: https://stackoverflow.com/users/1084/nathan-fellman
# Answeerd by u0b34a0f6ae: https://stackoverflow.com/users/137317/u0b34a0f6ae
# euclidian distance is L2 norm
euclidian_dist1 = np.linalg.norm(centerA1 - centerB1)
euclidian_dist2 = np.linalg.norm(centerA2 - centerB2)
euclidian_dist = euclidian_dist1 if euclidian_dist1 < euclidian_dist2 else euclidian_dist2
# our dataset varies a lot in resolution (smallest 720p, highest 2160p)
SMALLEST_VIDEO_HEIGHT = 720
correct_ratio = self.video_width / SMALLEST_VIDEO_HEIGHT
# normalize it for correct plots
euclidian_dist = euclidian_dist / correct_ratio
# possible fix because of previous float rounding - min center error is 0
if euclidian_dist < 0:
euclidian_dist = 0
# return the intersection over union value
return euclidian_dist
else:
# precision plots got X range (0,51) - 100px should define tracker failure quite well
MAX_ERROR = 100
return MAX_ERROR
############################################################
############## Displaying results + groundtruth ############
############################################################
def runVideo(self):
"""Method for running video and drawing groundtruth + result bounding boxes"""
# resize window (lets define max width is 1600px)
if self.video_width < 1600:
cv2.namedWindow(self.WINDOW_NAME)
else:
cv2.namedWindow(self.WINDOW_NAME, cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)
whRatio = self.video_width / self.video_height
if whRatio == 2:
# pure equirectangular 2:1
cv2.resizeWindow(self.WINDOW_NAME, 1600, 800)
else:
# default 16:9
cv2.resizeWindow(self.WINDOW_NAME, 1600, 900)
scaleFactor = self.video_width / 1600
self.RECTANGLE_BORDER_PX = int(self.RECTANGLE_BORDER_PX * scaleFactor)
self.FONT_SCALE = self.FONT_SCALE * scaleFactor
self.FONT_WEIGHT = int(self.FONT_WEIGHT * scaleFactor) + 1
self.TEXT_ROW1_POS = (int(self.TEXT_ROW1_POS[0] * scaleFactor), int(self.TEXT_ROW1_POS[1] * scaleFactor))
self.TEXT_ROW2_POS = (int(self.TEXT_ROW2_POS[0] * scaleFactor), int(self.TEXT_ROW2_POS[1] * scaleFactor))
self.TEXT_ROW2_POS2 = (int(self.TEXT_ROW2_POS2[0] * scaleFactor), int(self.TEXT_ROW2_POS2[1] * scaleFactor))
self.TEXT_ROW3_POS = (int(self.TEXT_ROW3_POS[0] * scaleFactor), int(self.TEXT_ROW3_POS[1] * scaleFactor))
self.TEXT_ROW3_POS2 = (int(self.TEXT_ROW3_POS2[0] * scaleFactor), int(self.TEXT_ROW3_POS2[1] * scaleFactor))
# prints just basic guide and info
print("----------------------------------------------------")
print("This script shows groundtruth and also tracker results bounding boxes of particular objects for purpose of visual object tracking evaluation")
print("Press 'Esc' or 'Q' key to exit")
print("----------------------------------------------------")
# FPS according to the original video
fps = self.video.get(cv2.CAP_PROP_FPS)
# fps = 30
# calculate the interval between frame.
interval = int(1000/fps)
# counter of frames
currentFrame = 0
# Just read first frame for sure
ok, frame = self.video.read()
if not ok:
print("Error - Could not read a video file")
self.video.release()
cv2.destroyAllWindows()
sys.exit(-1)
# keep looping until end of video, or until 'q' or 'Esc' key pressed
while True:
if currentFrame > 0:
# Read a new frame
ok, frame = self.video.read()
if not ok:
break
# increment counter of frames
currentFrame += 1
# video might be longer than groundtruth annotations
if currentFrame <= len(self.gt_bounding_boxes):
gt_bb = self.gt_bounding_boxes[currentFrame - 1]
# show annotations
if gt_bb and gt_bb.is_annotated:
pt1 = gt_bb.point1
pt2 = gt_bb.point2
if (gt_bb.is_on_border()):
# draw two rectangles around the region of interest
rightBorderPoint = (self.video_width - 1, pt2[1])
cv2.rectangle(frame, pt1, rightBorderPoint, (0, 255, 0), self.RECTANGLE_BORDER_PX)
leftBorderPoint = (0, pt1[1])
cv2.rectangle(frame, leftBorderPoint, pt2, (0, 255, 0), self.RECTANGLE_BORDER_PX)
else:
# draw a rectangle around the region of interest
cv2.rectangle(frame, pt1, pt2, (0, 255, 0), self.RECTANGLE_BORDER_PX)
if currentFrame <= len(self.result_bounding_boxes):
res_bb = self.result_bounding_boxes[currentFrame - 1]
# show annotations
if res_bb and res_bb.is_annotated:
pt1 = res_bb.point1
pt2 = res_bb.point2
if (res_bb.is_on_border()):
# draw two rectangles around the region of interest
rightBorderPoint = (self.video_width - 1, pt2[1])
cv2.rectangle(frame, pt1, rightBorderPoint, (255, 0, 0), self.RECTANGLE_BORDER_PX)
leftBorderPoint = (0, pt1[1])
cv2.rectangle(frame, leftBorderPoint, pt2, (255, 0, 0), self.RECTANGLE_BORDER_PX)
else:
# draw a rectangle around the region of interest
cv2.rectangle(frame, pt1, pt2, (255, 0, 0), self.RECTANGLE_BORDER_PX)
# display (annotated) frame
# print("Frame #" + str(currentFrame))
cv2.putText(frame, "Frame #" + str(currentFrame), (20,30), cv2.FONT_HERSHEY_SIMPLEX, self.FONT_SCALE, (250, 250, 0), self.FONT_WEIGHT)
cv2.putText(frame, "Groundtruth (green)", self.TEXT_ROW2_POS, cv2.FONT_HERSHEY_SIMPLEX, self.FONT_SCALE, (0, 250, 0), self.FONT_WEIGHT)
cv2.putText(frame, ": " + self.parser.bboxString(gt_bb), self.TEXT_ROW2_POS2, cv2.FONT_HERSHEY_SIMPLEX, self.FONT_SCALE, (0, 250, 0), self.FONT_WEIGHT)
cv2.putText(frame, "Tracker result (blue)", self.TEXT_ROW3_POS, cv2.FONT_HERSHEY_SIMPLEX, self.FONT_SCALE, (250, 0, 0), self.FONT_WEIGHT)
cv2.putText(frame, ": " + self.parser.bboxString(res_bb), self.TEXT_ROW3_POS2, cv2.FONT_HERSHEY_SIMPLEX, self.FONT_SCALE, (250, 0, 0), self.FONT_WEIGHT)
cv2.imshow(self.WINDOW_NAME, frame)
# Exit if ESC or Q pressed
k = cv2.waitKey(interval) & 0xff
if k == 27 or k == ord('q'):
break
self.video.release()
cv2.destroyAllWindows()
|
StarcoderdataPython
|
3324676
|
<gh_stars>0
"""
:author: <NAME>
"""
from typing import Union, Optional, Sequence, Tuple, Set
NargsValue = Union[str, int, Tuple[int, Optional[int]], Sequence[int], Set[int], range]
NARGS_STR_RANGES = {'?': (0, 1), '*': (0, None), '+': (1, None)}
SET_ERROR_FMT = 'Invalid nargs={!r} set - expected non-empty set where all values are integers >= 0'
SEQ_ERROR_FMT = 'Invalid nargs={!r} sequence - expected 2 ints where 0 <= a <= b or b is None'
class Nargs:
"""
Helper class for validating the number of values provided for a given :class:`~.parameters.Parameter`. Unifies the
handling of different ways of specifying the required number of values.
Acceptable values include ``?``, ``*``, and ``+``, and they have the same meaning that they have in argparse.
Additionally, integers, a range of integers, or a set/tuple of integers are accepted for more specific requirements.
"""
def __init__(self, nargs: NargsValue):
self._orig = nargs
self.range = None
if isinstance(nargs, int):
if nargs < 0:
raise ValueError(f'Invalid nargs={nargs!r} integer - must be >= 0')
self.min = self.max = nargs
self.allowed = (nargs,)
elif isinstance(nargs, str):
try:
self.min, self.max = self.allowed = NARGS_STR_RANGES[nargs]
except KeyError as e:
raise ValueError(f'Invalid nargs={nargs!r} string - expected one of ?, *, or +') from e
elif isinstance(nargs, range):
if not 0 <= nargs.start < nargs.stop or nargs.step < 0:
raise ValueError(f'Invalid nargs={nargs!r} range - expected positive step and 0 <= start < stop')
self.range = nargs
self.allowed = nargs
self.min = nargs.start
# As long as range.start < range.stop and range.step > 0, it will yield at least 1 value
self.max = next(reversed(nargs)) # simpler than calculating, especially for step!=1
elif isinstance(nargs, set):
if not nargs:
raise ValueError(SET_ERROR_FMT.format(nargs))
elif not all(isinstance(v, int) for v in nargs):
raise TypeError(SET_ERROR_FMT.format(nargs))
self.allowed = self._orig = frozenset(nargs) # Prevent modification after init
self.min = min(nargs)
if self.min < 0:
raise ValueError(SET_ERROR_FMT.format(nargs))
self.max = max(nargs)
elif isinstance(nargs, Sequence):
try:
self.min, self.max = self.allowed = a, b = nargs
except (ValueError, TypeError) as e:
raise e.__class__(SEQ_ERROR_FMT.format(nargs)) from e
if not (isinstance(a, int) and (b is None or isinstance(b, int))):
raise TypeError(SEQ_ERROR_FMT.format(nargs))
elif 0 > a or (b is not None and a > b):
raise ValueError(SEQ_ERROR_FMT.format(nargs))
else:
raise TypeError(f'Unexpected type={nargs.__class__.__name__} for nargs={nargs!r}')
self.variable = self.min != self.max
def __repr__(self) -> str:
return f'{self.__class__.__name__}({self._orig!r})'
def __str__(self) -> str:
rng = self.range
if rng is not None:
return f'{rng.start} ~ {rng.stop}' if rng.step == 1 else f'{rng.start} ~ {rng.stop} (step={rng.step})'
elif self.max is None:
return f'{self.min} or more'
elif self.min == self.max:
return str(self.min)
elif isinstance(self.allowed, frozenset):
return '{{{}}}'.format(','.join(map(str, sorted(self.allowed))))
else:
return f'{self.min} ~ {self.max}'
def __contains__(self, num: int) -> bool:
"""See :meth:`.satisfied`"""
return self.satisfied(num)
def __eq__(self, other: Union['Nargs', int]) -> bool:
if isinstance(other, Nargs):
if self.max is None:
return other.max is None and self.min == other.min
elif other.max is None:
return False
elif isinstance(other._orig, type(self._orig)):
return self.allowed == other.allowed
# After this point, the allowed / range attribute types cannot match because the originals did not match
elif isinstance(self.allowed, frozenset):
return self._compare_allowed_set(other)
elif isinstance(other.allowed, frozenset):
return other._compare_allowed_set(self)
rng = self.range or other.range
if rng:
return self.min == other.min and self.max == other.max and rng.step == 1
else:
return self.min == other.min and self.max == other.max
elif isinstance(other, int):
return self.min == self.max == other
else:
return NotImplemented
def _compare_allowed_set(self, other: 'Nargs') -> bool:
"""
Used internally to determine whether 2 Nargs instances are equivalent when they were initialized with different
types of arguments.
"""
allowed = self.allowed
if other.range is not None:
try_all = other.min in allowed and other.max in allowed
return try_all and all(v in allowed for v in other.range) # less mem than large set(other.range)
else:
return allowed == set(other.allowed)
def __hash__(self) -> int:
return hash(self.__class__) ^ hash(self._orig)
def satisfied(self, count: int) -> bool:
"""
Returns True if the minimum number of values have been provided to satisfy the requirements, and if the number
of values has not exceeded the maximum allowed. Returns False if the count is below the minimum or above the
maximum.
For more advanced use cases, such as range or a set of counts, the count must also match one of the specific
numbers provided for this to return True.
"""
if self.max is None:
return count >= self.min
else:
return count in self.allowed
|
StarcoderdataPython
|
4810621
|
import abc
class Recorder:
def __init__(self):
self.record = 0
def Record(self):
if self.record == 0:
self.record = 1
def RecordStop(self):
if self.record == 1:
self.record = 0
@abc.abstractmethod
def ProcessRecord(self, value):
pass
|
StarcoderdataPython
|
83680
|
<filename>satchless/image/views.py
from django.http import HttpResponseNotFound
from django.shortcuts import get_object_or_404, redirect
from . import IMAGE_SIZES
from . import models
def thumbnail(request, image_id, size):
image = get_object_or_404(models.Image, id=image_id)
if not size in IMAGE_SIZES:
return HttpResponseNotFound()
return redirect(models.Thumbnail.objects.get_or_create_at_size(image.id, size))
|
StarcoderdataPython
|
3267056
|
# Generated by Django 2.2.13 on 2020-06-18 17:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sites', '0041_remove_action_equivalent_command'),
]
operations = [
migrations.AddField(
model_name='site',
name='admin_comments',
field=models.TextField(blank=True, help_text='Administrative comments. If availability != enabled, this will be shown to the user.'),
),
migrations.AddField(
model_name='site',
name='availability',
field=models.CharField(choices=[('enabled', 'Enabled (fully functional)'), ('not-served', 'Not served publicly'), ('disabled', 'Disabled (not served, only viewable/editable by admins)')], default='enabled', help_text='Controls availability of the site (whether it is served publicly and whether it is editable)', max_length=10),
),
]
|
StarcoderdataPython
|
191131
|
import tkinter as tk
from tkinter import ttk
from pprint import pprint
app = tk.Tk()
# ------------ Label ---------
label1Top = tk.Label(app, text = "Нажми на Checkbox")
label1Top.pack()
def checkbutton_check():
print('checkbox', var1.get())
label1Top.config(text=var1.get())
# ---------- CheckBox ------------
var1 = tk.BooleanVar()
chb1 = tk.Checkbutton(app, text='Python', variable=var1, onvalue=True, offvalue=False, command=checkbutton_check)
chb1.pack()
app.mainloop()
|
StarcoderdataPython
|
1783563
|
import click
import requests
import pandas
from io import StringIO
import re
import os
class CurrencyCode(click.ParamType):
name = 'symbol'
def convert(self, value, param, ctx):
if value.isalpha():
return value.upper()
self.fail('%s is not a valid symbol' % value, param, ctx)
class PortfolioEntry(click.ParamType):
name = 'portfolio'
def convert(self, value, param, ctx):
if re.match("^[0-9]+x[A-Z]+$", value):
arg_parts = value.split('x')
return dict(
symbol=arg_parts[1],
count=int(arg_parts[0])
)
self.fail('%s does not follow the specified format: <Count>x<Symbol>. e.g. 100xMSFT' % value, param, ctx)
CURRENCY_CODE_TYPE = CurrencyCode()
PORTFOLIO_ENTRY_TYPE = PortfolioEntry()
@click.command()
@click.option('--in', '-i', 'in_currency', type=CURRENCY_CODE_TYPE, default='USD')
@click.argument('portfolios', type=PORTFOLIO_ENTRY_TYPE, nargs=-1)
def cli(portfolios, in_currency):
""" Calcuate the value of stocks in chosen currency
Pass in you portfolio in this format: <Count>x<Symbol>.
For example, if you have 100 Microsoft stock, pass: 100xMSFT.
The script will calculate the value of your portfolio.
You can pass the `--in` parameter to specify a currency to use for the evaluation.
The script will use the latest exchange rate for the day.
\f
:param tuple portfolios: Argument
:param str in_currency: currency used for evaluation"""
api_key = os.environ.get('ALPHAVANTAGE_API_KEY')
if not api_key:
click.echo('AlphaVantage API Key not set. Please set the ALPHAVANTAGE_API_KEY variable', err=True)
raise click.ClickException('AlphaVantage API KEY not set!')
alphavantage = AlphaVantage(api_key)
total_in_usd = 0.0
with click.progressbar(portfolios, label='Fetching the latest stock price', length=len(portfolios)) as pb:
for portfolio in pb:
latest_price = alphavantage.get_latest_stock_value(portfolio['symbol'])
total_in_usd += latest_price * portfolio['count']
click.echo("Total value in USD: {:20,.2f}".format(total_in_usd))
if in_currency != 'USD':
exchange_rate = alphavantage.get_latest_exchange_rate(to_currency=in_currency)
click.echo("Latest exchange rate for {}: {} ".format(in_currency, exchange_rate))
total_in_currency = total_in_usd * exchange_rate
click.echo("Total value in {}: {:20,.2f}".format(in_currency, total_in_currency))
class AlphaVantage:
BASE_URL = "https://www.alphavantage.co/query"
TIME_SERIES_DAILY_FUNC = 'TIME_SERIES_DAILY'
FOREX_FUNC = 'CURRENCY_EXCHANGE_RATE'
def __init__(self, api_key):
self._api_key = api_key
def get_latest_stock_value(self, symbol):
try:
response = requests.get(
self.BASE_URL,
params=dict(
function=self.TIME_SERIES_DAILY_FUNC,
symbol=symbol,
apikey=self._api_key,
datatype='csv'
)
)
except requests.exceptions.RequestException as e:
# TODO: Error handling
return
data_table = pandas.read_csv(StringIO(response.text))
return data_table.iloc[0]['close']
def get_latest_exchange_rate(self, to_currency, from_currency='USD'):
try:
response = requests.get(
self.BASE_URL,
params=dict(
function=self.FOREX_FUNC,
from_currency=from_currency,
to_currency=to_currency,
apikey=self._api_key
)
)
except requests.exceptions.RequestException as e:
# TODO: Error handling
return
# FIXME: Don't like the hard-coded strings here, can we do something better?
return float(response.json()['Realtime Currency Exchange Rate']['5. Exchange Rate'])
if __name__ == '__main__':
cli()
|
StarcoderdataPython
|
4802580
|
#! /usr/bin/env python3
import glob
import jinja2
import re
import os
import subprocess
import sys
import textwrap
import yaml
from typing import List
TEMPLATE = """
vars.http_vhosts["{{hostname}}"] = {
http_uri = "{{ uri }}"
http_vhost = "{{ vhost | default(hostname) }}"
http_ssl = {{ ssl | default('true') }}
{% if expect %}
http_expect = "{{ expect }}"
{% endif %}
}
"""
DEBUG = os.getenv("DEBUG", None)
def get_vhosts() -> List[str]:
vhosts = set()
for filename in glob.glob("/etc/nginx/sites-enabled/*"):
with open(filename, "r") as f:
matches = re.findall("server_name\s+(.+);", f.read())
if not matches:
print("Warning: could not find any vhosts in file: {}".format(filename))
for match in matches:
for server_name in match.split(" "):
vhosts.add(server_name)
for i in read_config()["exclude_vhosts"]:
vhosts.discard(i)
for vhost in list(vhosts):
if "*." in vhost:
# assumption: in a wildcard setup, the hostname _wildcard should be reachable
vhosts.remove(vhost)
vhosts.add(vhost.replace("*.", "_wildcard."))
for vhost in read_config()["vhosts"].keys():
if vhost not in vhosts:
vhosts.add(vhost)
rv = list(vhosts)
rv.sort()
return rv
def read_config() -> dict:
# dummy configuration in case the path can't be resolved
data = {
"vhosts": [],
"exclude_vhosts": [],
}
config_path = os.path.abspath(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"config.yml"
)
)
try:
with open(config_path, "r") as f:
data = yaml.load(f.read(), Loader=yaml.SafeLoader)
except Exception as e:
print("Warning: could not open configuration file {}: "
"{}".format(config_path, repr(e)),
file=sys.stderr)
return data
def render(vhost: str) -> str:
template = jinja2.Template(TEMPLATE)
vars = {
"hostname": vhost,
"uri": "/",
}
vhosts = read_config()["vhosts"]
# if it's a wildcard host, assume 404 as default status
if "_wildcard." in vhost:
vars["expect"] = 404
if vhost in vhosts:
vars.update(vhosts[vhost])
result = template.render(**vars)
return result
def main():
vhosts = get_vhosts()
f = None
try:
if DEBUG:
# overwrite f with sys.stdout
f = sys.stdout
else:
dirpath = "/etc/icinga2/conf.d/"
os.makedirs(dirpath, exist_ok=True)
f = open(os.path.join(dirpath, "nginx-vhosts.conf"), "w")
preamble = textwrap.dedent("""\
# autogenerated vhosts list
""")
f.write(preamble)
for vhost in vhosts:
data = render(vhost)
f.write(data)
f.write("\n")
except:
raise
else:
subprocess.check_output("service icinga2 reload".split())
finally:
if not DEBUG:
if f is not None:
f.close()
if __name__ == "__main__":
sys.exit(main())
|
StarcoderdataPython
|
1662622
|
import torch
from torchvision import transforms as T
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def to_same_size(A, focus_map):
'''
Input: aim_size_image
Image_need_to_be_resize
Output: resized_image
'''
A_size = list(A.size())
focus_map = focus_map.squeeze(dim = 0).squeeze(dim = 1).cpu()
focus_map = T.ToPILImage()(focus_map)
crop_obt = T.CenterCrop((A_size[2],A_size[3]))
focus_map = crop_obt(focus_map)
focus_map = T.ToTensor()(focus_map)
focus_map = focus_map.unsqueeze(dim = 0).to(device)
return focus_map
|
StarcoderdataPython
|
3260745
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 该例程将提供print_string服务,std_srvs::SetBool
import rospy
from std_srvs.srv import SetBool, SetBoolResponse
def stringCallback(req):
# 显示请求数据
if req.data:
rospy.loginfo("Hello ROS!")
# 反馈数据
return SetBoolResponse(True, "Print Successully")
else:
# 反馈数据
return SetBoolResponse(False, "Print Failed")
def string_server():
# ROS节点初始化
rospy.init_node('string_server')
# 创建一个名为/print_string的server,注册回调函数stringCallback
s = rospy.Service('print_string', SetBool, stringCallback)
# 循环等待回调函数
print "Ready to print hello string."
rospy.spin()
if __name__ == "__main__":
string_server()
|
StarcoderdataPython
|
1708860
|
<filename>utils_eval.py
import sys
print(sys.executable)
import sklearn
sklearn.__version__
import torch.nn as nn
import torch.optim as optim
import nltk
from nltk import word_tokenize
nltk.download('punkt')
import dill
print('next')
from torchtext import data
from torchtext import datasets
from torchtext.vocab import GloVe
import torch
import torchtext.vocab as vocab
import time
from torchtext.data import Field
from torchtext.datasets import IMDB
from torchtext.data import BucketIterator
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TensorBoardLogger
from torch.optim.lr_scheduler import ReduceLROnPlateau
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
import tensorboard
from pytorch_lightning.metrics import functional as FM
from pytorch_lightning.callbacks import LearningRateMonitor
from pytorch_lightning.callbacks import EarlyStopping
import pytorch_lightning as pl
from pytorch_lightning import seed_everything
seed_everything(42)
from torch.nn.utils.rnn import pack_padded_sequence,pad_packed_sequence
print(device)
from collections import Counter
class AWE(nn.Module):
def __init__(self, input_size, embed_size, hidden_size, num_layers):
super(AWE, self).__init__()
self.embedding = nn.Embedding(input_size, embed_size)
def forward(self, x):
embedded = self.embedding(x)
awe = torch.mean(embedded, axis=1)
# outputs, _ = self.rnn(embedded, (h0, c0))
# prediction = self.fc_out(outputs[-1, :, :])
return awe
class AverageEmbeddings(LightningModule):
def __init__(self, config, text, train_iter, dev_iter, test_iter ):
super().__init__()
self.nli_net = NLINet(config, text).to(device = device)
weight = torch.FloatTensor(3).fill_(1)
self.loss_function = nn.CrossEntropyLoss(weight=weight)
self.valid_losses = []
self.train_iter = train_iter
self.dev_iter = dev_iter
self.test_iter = test_iter
def forward(self, x):
# X is vector of shape (batch, input, )
# need to be permuted because by default X is batch first
return self.nli_net(x[0], x[1])
# def on_epoch_end(self):
# # print(trainer.optimizers[0].param_groups[0].keys())
# print('HOOK')
# dic = self.trainer.optimizers[0].param_groups[0]
# lr = dic['lr']
# print(lr)
# self.log('learning_rate', lr)
# # early stopping
# if lr < 1e-5:
# raise KeyboardInterrupt
def training_step(self, batch, batch_idx):
premise = batch.premise[0].to(device=device)
hypothesis = batch.hypothesis[0].to(device=device)
targets = batch.label.to(device=device)
# TODO: Accuracies in all of the,loops
y_hat = self.nli_net(premise, hypothesis)
loss = self.loss_function(y_hat, targets.type_as(y_hat).long())
predict = torch.argmax(y_hat, axis=1)
acc = FM.accuracy(predict, targets)
metrics_train = {'train_acc': acc, 'train_loss': loss}
self.log('train_loss', loss, on_step=True)
self.log('train_acc', acc, on_step=True, on_epoch=True, prog_bar=True, logger=True)
self.log('train_loss_epoch', loss, on_epoch=True, prog_bar=True, logger=True)
return loss
def validation_step(self, batch, batch_idx):
premise = batch.premise[0].to(device=device)
hypothesis = batch.hypothesis[0].to(device=device)
targets = batch.label.to(device=device)
y_hat = self.nli_net(premise, hypothesis)
loss = self.loss_function(y_hat, targets.type_as(y_hat).long())
predict = torch.argmax(y_hat, axis=1)
acc = FM.accuracy(predict, targets)
metrics_val = {'val_acc': acc, 'val_loss': loss}
self.log_dict(metrics_val)
self.log('val_acc_on_EPOCH', acc, on_epoch=True)
return metrics_val
def test_step(self, batch, batch_idx):
premise = batch.premise[0].to(device=device)
hypothesis = batch.hypothesis[0].to(device=device)
targets = batch.label.to(device=device)
y_hat = self.nli_net(premise, hypothesis)
loss = self.loss_function(y_hat, targets.type_as(y_hat).long())
predict = torch.argmax(y_hat, axis=1)
predict = torch.argmax(y_hat, axis=1)
acc = FM.accuracy(predict, targets)
metrics_test = {'test_acc': acc, 'test_loss': loss}
self.log_dict(metrics_test, prog_bar=True, logger=True)
class Recurrent(LightningModule):
def __init__(self, config, text, train_iter, dev_iter, test_iter):
super().__init__()
self.nli_net = NLINet(config, text)
weight = torch.FloatTensor(3).fill_(1)
self.loss_function = nn.CrossEntropyLoss(weight=weight)
self.valid_losses = []
self.train_iter = train_iter
self.dev_iter = dev_iter
self.test_iter = test_iter
def forward(self, x):
# X is sentence 1 and 2
return self.nli_net(x[0], x[1])
# def on_epoch_end(self):
# print(trainer.optimizers[0].param_groups[0].keys())
# print('HOOK')
# dic = self.trainer.optimizers[0].param_groups[0]
# lr = dic['lr']
# print(lr)
# self.log('learning_rate', lr)
# # early stopping
# if lr < 1e-5:
# raise KeyboardInterrupt
def training_step(self, batch, batch_idx):
premise = batch.premise
hypothesis = batch.hypothesis
targets = batch.label
# TODO: Accuracies in all of the,loops
y_hat = self.nli_net(premise, hypothesis)
loss = self.loss_function(y_hat, targets.type_as(y_hat).long())
predict = torch.argmax(y_hat, axis=1)
acc = FM.accuracy(predict, targets)
metrics_train = {'train_acc': acc, 'train_loss': loss}
self.log('train_loss', loss, on_step=True)
self.log('train_acc', acc, on_step=True, on_epoch=True, prog_bar=True, logger=True)
self.log('train_loss_epoch', loss, on_epoch=True, prog_bar=True, logger=True)
return loss
def validation_step(self, batch, batch_idx):
premise = batch.premise
hypothesis = batch.hypothesis
targets = batch.label
y_hat = self.nli_net(premise, hypothesis)
loss = self.loss_function(y_hat, targets.type_as(y_hat).long())
predict = torch.argmax(y_hat, axis=1)
acc = FM.accuracy(predict, targets)
metrics_val = {'val_acc': acc, 'val_loss': loss}
self.log_dict(metrics_val)
self.log('val_acc_on_EPOCH', acc, on_epoch=True)
return metrics_val
def test_step(self, batch, batch_idx):
premise = batch.premise
hypothesis = batch.hypothesis
targets = batch.label
y_hat = self.nli_net(premise, hypothesis)
loss = self.loss_function(y_hat, targets.type_as(y_hat).long())
predict = torch.argmax(y_hat, axis=1)
acc = FM.accuracy(predict, targets)
metrics_test = {'test_acc': acc, 'test_loss': loss}
self.log_dict(metrics_test, prog_bar=True, logger=True)
def configure_optimizers(self):
# TODO: Learning_RATE
optimizer = optim.SGD(self.parameters(), lr=0.1)
#
scheduler1 = ReduceLROnPlateau(optimizer, mode='max', factor=0.2,
verbose=True, patience=2)
return ({'optimizer': optimizer, "lr_scheduler": scheduler1, "monitor": "val_acc"})
def train_dataloader(self):
return self.train_iter
def val_dataloader(self):
return self.dev_iter
def test_dataloader(self):
return self.test_iter
class RNN_LSTM(nn.Module):
def __init__(self, input_size, embed_size, hidden_size, num_layers):
super(RNN_LSTM, self).__init__()
self.hidden_size = 2048
self.num_layers = 1
embed_size = 300
self.embedding = nn.Embedding(input_size, embed_size, padding_idx=1)
self.rnn = nn.LSTM(input_size=embed_size, hidden_size=self.hidden_size,
num_layers=self.num_layers, bidirectional=False)
def forward(self, x):
src, src_len = x[0], x[1]
src = self.embedding(src)
src = pack_padded_sequence(src, src_len, batch_first=True, enforce_sorted=False)
# Set initial hidden and cell states
h0 = torch.zeros(self.num_layers, x[0].shape[0], self.hidden_size)
c0 = torch.zeros(self.num_layers, x[0].shape[0], self.hidden_size)
output, (hn, cn) = self.rnn(src, (h0, c0))
return hn[0]
class RNN_BiLSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes):
super(RNN_BiLSTM, self).__init__()
self.hidden_size = 2048
self.num_layers = 1
embed_size = 300
self.embedding = nn.Embedding(input_size, embed_size, padding_idx=1)
self.rnn = nn.LSTM(input_size=embed_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
bidirectional=True)
def forward(self, x):
# Set initial states
src, src_len = x[0], x[1]
src = self.embedding(src)
src = pack_padded_sequence(src, src_len, batch_first=True, enforce_sorted=False)
h0 = torch.zeros(self.num_layers * 2, x[0].shape[0], self.hidden_size)
c0 = torch.zeros(self.num_layers * 2, x[0].shape[0], self.hidden_size)
# Forward propagate LSTM
output, (hn, cn) = self.rnn(src, (h0, c0))
forward_state = hn[-2, :, :]
backward_state = hn[-1, :, :]
representation = torch.cat((forward_state, backward_state), dim=1)
return representation
class NLINet(nn.Module):
def __init__(self, config, text):
super(NLINet, self).__init__()
# classifier
self.nonlinear_fc = True
self.n_classes = 3
self.enc_lstm_dim = 300
self.dpout_fc = 0.1
self.fc_dim = 512
self.n_classes = 3
self.hidden = config.hidden_dim #default 2048
# TODO: CHANGABLE ENCODERS
if config.model_name == 'AWE':
self.encoder = AWE(37179, 300, self.hidden, 1)
self.inputdim = 4 * 300
if config.model_name == 'LSTM':
self.encoder = RNN_LSTM(37179, self.enc_lstm_dim, self.hidden, 1)
self.inputdim = 4 * 2048
if config.model_name == 'BiLSTM':
self.encoder = RNN_BiLSTM(37179, self.enc_lstm_dim, self.hidden, 1)
self.inputdim = 4 * 4096
pretrained_embeddings = torch.cat((text.vocab.vectors, torch.zeros(4, 300)), 0)
self.encoder.embedding.weight.data.copy_(pretrained_embeddings)
if self.nonlinear_fc:
self.classifier = nn.Sequential(
nn.Dropout(p=self.dpout_fc),
nn.Linear(self.inputdim, self.fc_dim),
nn.Tanh(),
nn.Dropout(p=self.dpout_fc),
nn.Linear(self.fc_dim, self.fc_dim),
nn.Tanh(),
nn.Dropout(p=self.dpout_fc),
nn.Linear(self.fc_dim, self.n_classes),
)
else:
self.classifier = nn.Sequential(
nn.Linear(self.inputdim, self.fc_dim),
nn.Linear(self.fc_dim, self.fc_dim),
nn.Linear(self.fc_dim, self.n_classes)
)
def forward(self, s1, s2):
u = self.encoder(s1)
v = self.encoder(s2)
features = torch.cat((u, v, torch.abs(u - v), u * v), 1)
output = self.classifier(features)
return output
def encode(self, s1):
emb = self.encoder(s1)
return emb
|
StarcoderdataPython
|
132916
|
"""
Plotting convenience functions.
"""
from math import ceil
import ipywidgets as widgets
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
from model_base import get_ext_input
# define basics
prop_cycle = plt.rcParams["axes.prop_cycle"]
colors = prop_cycle.by_key()["color"]
plt.style.use("seaborn-muted")
INPUT_START = 1000 # dt, i.e. 100ms
LABEL_SIZE = 16
def setup_sliders_layout(model_specific_sliders):
"""
Set up interactive part of the plot, i.e. sliders and grid layout.
model_params: list of model parameters names
"""
assert isinstance(model_specific_sliders, dict)
num_model_sliders = len(model_specific_sliders)
# define general sliders
I_m_slider = widgets.FloatSlider(
min=-5, max=20, step=0.5, value=10.0, description="I max"
)
T_slider = widgets.IntSlider(
min=500, max=2000, step=5, value=750, description="time"
)
I_types = widgets.ToggleButtons(
options=["constant", "sq. pulse", "sine", "ramp", "Ornstein-Uhlenbeck"],
value="constant",
description="Current type",
disabled=False,
layout=widgets.Layout(height="auto", width="auto"),
)
I_period = widgets.FloatSlider(
min=10, max=1000, step=5, value=200, description="I period"
)
# define grid
grid = widgets.GridspecLayout(ceil(5 + num_model_sliders / 2), 2)
grid[0, :] = widgets.Button(
description="Model parameters",
layout=widgets.Layout(height="auto", width="auto"),
)
# assign model sliders
for idx, (_, slider) in enumerate(model_specific_sliders.items()):
grid[idx // 2 + 1, idx % 2] = slider
grid[idx // 2 + 2, :] = widgets.Button(
description="External current parameters",
layout=widgets.Layout(height="auto", width="auto"),
)
grid[idx // 2 + 3, 0] = I_period
grid[idx // 2 + 4, 0] = I_m_slider
grid[idx // 2 + 4, 1] = T_slider
grid[idx // 2 + 5, :] = I_types
sliders = {
**model_specific_sliders,
"I_max": I_m_slider,
"I_period": I_period,
"T": T_slider,
"current_type": I_types,
}
for _, slider in sliders.items():
# lower number of "waiting" updates in the pipe
slider.msg_throttle = 1
return grid, sliders
def integrate_and_plot(model_cls, **kwargs):
"""
Integrate the model given its parameters and plot.
"""
T = kwargs.pop("T")
I_max = kwargs.pop("I_max")
I_period = kwargs.pop("I_period")
current_type = kwargs.pop("current_type")
model = model_cls(parameters=kwargs, T=T)
ext_current = np.zeros((model.n_points + 1))
input_length = ext_current.shape[0] - INPUT_START
ext_current[INPUT_START:] = get_ext_input(
I_max, I_period, current_type, model.T_total, input_length
)
model.set_input(ext_current)
t, y = model.integrate()
# set up figure
fig = plt.figure(constrained_layout=True, figsize=(15, 8))
spec = gridspec.GridSpec(ncols=3, nrows=3, figure=fig)
# set up axis for timeseries of input current
ax2 = fig.add_subplot(spec[2, :2])
ax2.set_ylim([-20, 30])
ax2.set_ylabel("INPUT CURRENT [AU]", size=LABEL_SIZE)
ax2.set_xlabel("TIME [ms]", size=LABEL_SIZE)
ax2.axvline(100.0, 0, 1, linestyle="--", color="grey", linewidth=0.7)
ax2.spines["right"].set_visible(False)
ax2.spines["top"].set_visible(False)
ax2.tick_params(axis="both", which="major", labelsize=LABEL_SIZE - 2)
# set up axis for timeseries of state vector
ax1 = fig.add_subplot(spec[:2, :2], sharex=ax2)
ax1.set_ylim([-90, 30])
ax1.set_ylabel("MEMBRANE POTENTIAL [mV]", size=LABEL_SIZE)
ax1.spines["right"].set_visible(False)
ax1.spines["top"].set_visible(False)
ax1.spines["bottom"].set_visible(False)
ax1.axvline(100.0, 0, 1, linestyle="--", color="grey", linewidth=0.7)
ax1.tick_params(axis="both", which="major", labelsize=LABEL_SIZE - 2)
ax12 = ax1.twinx()
ax12.set_ylim([-20, 10])
ax12.set_yticklabels([])
ax12.set_yticks([])
ax12.spines["right"].set_visible(False)
ax12.spines["top"].set_visible(False)
ax12.spines["bottom"].set_visible(False)
ax12.tick_params(axis="both", which="major", labelsize=LABEL_SIZE - 2)
# set up axis for scatter u vs v
ax3 = fig.add_subplot(spec[:2, 2], sharey=ax1)
ax3.spines["right"].set_visible(False)
ax3.spines["top"].set_visible(False)
ax3.set_xlabel("MEMBRANE RECOVERY", size=LABEL_SIZE)
scatter_colors = colors[3]
ax3.set_ylim([-90, 30])
ax3.set_xlim([-20, 10])
ax3.tick_params(axis="both", which="major", labelsize=LABEL_SIZE - 2)
# plot
ax1.plot(t, y[0, :], color=colors[0], linewidth=2.5)
ax12.plot(t, y[1:, :].T, color=colors[1])
ax2.plot(t, model.ext_current[1:], color=colors[2])
ax3.scatter(y[1, :], y[0, :], s=7, c=scatter_colors)
plt.suptitle(f"Number of spikes: {model.num_spikes}", size=LABEL_SIZE + 3)
|
StarcoderdataPython
|
1678578
|
<reponame>sajjadt/competitive-programming
from functools import lru_cache
from operator import getitem
LIMIT = 10000 + 1
pow_table = [1]
for i in range(LIMIT):
pow_table.append(2*pow_table[-1])
best_choice = [0, 0, 0, 1]
for disp in range(3, 150):
b = best_choice[-1]
for j in range(disp):
best_choice.append(b+j)
@lru_cache(maxsize=(2**14))
def f(n):
if n <= 1:
return n
k = best_choice[n]
sol = 2*f(k) + 2**(n-k) - 1
return sol
while True:
try:
inp = int(input())
print(f(inp))
except(EOFError):
break
|
StarcoderdataPython
|
169266
|
<reponame>plutoyuxie/mmgeneration
import os.path as osp
from mmgen.datasets.builder import build_dataloader, build_dataset
class TestPersistentWorker(object):
@classmethod
def setup_class(cls):
imgs_root = osp.join(osp.dirname(__file__), '..', 'data/image')
train_pipeline = [
dict(type='LoadImageFromFile', io_backend='disk', key='real_img')
]
cls.config = dict(
samples_per_gpu=1,
workers_per_gpu=4,
drop_last=True,
persistent_workers=True)
cls.data_cfg = dict(
type='UnconditionalImageDataset',
imgs_root=imgs_root,
pipeline=train_pipeline,
test_mode=False)
def test_persistent_worker(self):
# test non-persistent-worker
dataset = build_dataset(self.data_cfg)
build_dataloader(dataset, **self.config)
|
StarcoderdataPython
|
3283011
|
<reponame>luisriverag/certbot
"""Common utilities for certbot_apache."""
import shutil
import sys
import unittest
import augeas
import josepy as jose
try:
import mock
except ImportError: # pragma: no cover
from unittest import mock # type: ignore
from certbot.compat import os
from certbot.plugins import common
from certbot.tests import util as test_util
from certbot.display import util as display_util
from certbot_apache._internal import configurator
from certbot_apache._internal import entrypoint
from certbot_apache._internal import obj
class ApacheTest(unittest.TestCase):
def setUp(self, test_dir="debian_apache_2_4/multiple_vhosts",
config_root="debian_apache_2_4/multiple_vhosts/apache2",
vhost_root="debian_apache_2_4/multiple_vhosts/apache2/sites-available"):
# pylint: disable=arguments-differ
self.temp_dir, self.config_dir, self.work_dir = common.dir_setup(
test_dir=test_dir,
pkg=__name__)
self.config_path = os.path.join(self.temp_dir, config_root)
self.vhost_path = os.path.join(self.temp_dir, vhost_root)
self.rsa512jwk = jose.JWKRSA.load(test_util.load_vector(
"rsa512_key.pem"))
self.config = get_apache_configurator(self.config_path, vhost_root,
self.config_dir, self.work_dir)
# Make sure all vhosts in sites-enabled are symlinks (Python packaging
# does not preserve symlinks)
sites_enabled = os.path.join(self.config_path, "sites-enabled")
if not os.path.exists(sites_enabled):
return
for vhost_basename in os.listdir(sites_enabled):
# Keep the one non-symlink test vhost in place
if vhost_basename == "non-symlink.conf":
continue
vhost = os.path.join(sites_enabled, vhost_basename)
if not os.path.islink(vhost): # pragma: no cover
os.remove(vhost)
target = os.path.join(
os.path.pardir, "sites-available", vhost_basename)
os.symlink(target, vhost)
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.config_dir)
shutil.rmtree(self.work_dir)
class ParserTest(ApacheTest):
def setUp(self, test_dir="debian_apache_2_4/multiple_vhosts",
config_root="debian_apache_2_4/multiple_vhosts/apache2",
vhost_root="debian_apache_2_4/multiple_vhosts/apache2/sites-available"):
super().setUp(test_dir, config_root, vhost_root)
from certbot_apache._internal.parser import ApacheParser
self.aug = augeas.Augeas(
flags=augeas.Augeas.NONE | augeas.Augeas.NO_MODL_AUTOLOAD)
with mock.patch("certbot_apache._internal.parser.ApacheParser."
"update_runtime_variables"):
self.parser = ApacheParser(
self.config_path, self.vhost_path, configurator=self.config)
def get_apache_configurator(
config_path, vhost_path,
config_dir, work_dir, version=(2, 4, 7),
os_info="generic",
conf_vhost_path=None,
use_parsernode=False,
openssl_version="1.1.1a"):
"""Create an Apache Configurator with the specified options.
:param conf: Function that returns binary paths. self.conf in Configurator
"""
backups = os.path.join(work_dir, "backups")
mock_le_config = mock.MagicMock(
apache_server_root=config_path,
apache_vhost_root=None,
apache_le_vhost_ext="-le-ssl.conf",
apache_challenge_location=config_path,
apache_enmod=None,
backup_dir=backups,
config_dir=config_dir,
http01_port=80,
temp_checkpoint_dir=os.path.join(work_dir, "temp_checkpoints"),
in_progress_dir=os.path.join(backups, "IN_PROGRESS"),
work_dir=work_dir)
with mock.patch("certbot_apache._internal.configurator.util.run_script"):
with mock.patch("certbot_apache._internal.configurator.util."
"exe_exists") as mock_exe_exists:
mock_exe_exists.return_value = True
with mock.patch("certbot_apache._internal.parser.ApacheParser."
"update_runtime_variables"):
with mock.patch("certbot_apache._internal.apache_util.parse_from_subprocess") as mock_sp:
mock_sp.return_value = []
try:
config_class = entrypoint.OVERRIDE_CLASSES[os_info]
except KeyError:
config_class = configurator.ApacheConfigurator
config = config_class(config=mock_le_config, name="apache",
version=version, use_parsernode=use_parsernode,
openssl_version=openssl_version)
if not conf_vhost_path:
config_class.OS_DEFAULTS.vhost_root = vhost_path
else:
# Custom virtualhost path was requested
config.config.apache_vhost_root = conf_vhost_path
config.config.apache_ctl = config_class.OS_DEFAULTS.ctl
config.prepare()
return config
def get_vh_truth(temp_dir, config_name):
"""Return the ground truth for the specified directory."""
if config_name == "debian_apache_2_4/multiple_vhosts":
prefix = os.path.join(
temp_dir, config_name, "apache2/sites-enabled")
aug_pre = "/files" + prefix
vh_truth = [
obj.VirtualHost(
os.path.join(prefix, "encryption-example.conf"),
os.path.join(aug_pre, "encryption-example.conf/Virtualhost"),
{obj.Addr.fromstring("*:80")},
False, True, "encryption-example.demo"),
obj.VirtualHost(
os.path.join(prefix, "default-ssl.conf"),
os.path.join(aug_pre,
"default-ssl.conf/IfModule/VirtualHost"),
{obj.Addr.fromstring("_default_:443")}, True, True),
obj.VirtualHost(
os.path.join(prefix, "000-default.conf"),
os.path.join(aug_pre, "000-default.conf/VirtualHost"),
{obj.Addr.fromstring("*:80"),
obj.Addr.fromstring("[::]:80")},
False, True, "ip-172-30-0-17"),
obj.VirtualHost(
os.path.join(prefix, "certbot.conf"),
os.path.join(aug_pre, "certbot.conf/VirtualHost"),
{obj.Addr.fromstring("*:80")}, False, True,
"certbot.demo", aliases=["www.certbot.demo"]),
obj.VirtualHost(
os.path.join(prefix, "mod_macro-example.conf"),
os.path.join(aug_pre,
"mod_macro-example.conf/Macro/VirtualHost"),
{obj.Addr.fromstring("*:80")}, False, True,
modmacro=True),
obj.VirtualHost(
os.path.join(prefix, "default-ssl-port-only.conf"),
os.path.join(aug_pre, ("default-ssl-port-only.conf/"
"IfModule/VirtualHost")),
{obj.Addr.fromstring("_default_:443")}, True, True),
obj.VirtualHost(
os.path.join(prefix, "wildcard.conf"),
os.path.join(aug_pre, "wildcard.conf/VirtualHost"),
{obj.Addr.fromstring("*:80")}, False, True,
"ip-172-30-0-17", aliases=["*.blue.purple.com"]),
obj.VirtualHost(
os.path.join(prefix, "ocsp-ssl.conf"),
os.path.join(aug_pre, "ocsp-ssl.conf/IfModule/VirtualHost"),
{obj.Addr.fromstring("10.2.3.4:443")}, True, True,
"ocspvhost.com"),
obj.VirtualHost(
os.path.join(prefix, "non-symlink.conf"),
os.path.join(aug_pre, "non-symlink.conf/VirtualHost"),
{obj.Addr.fromstring("*:80")}, False, True,
"nonsym.link"),
obj.VirtualHost(
os.path.join(prefix, "default-ssl-port-only.conf"),
os.path.join(aug_pre,
"default-ssl-port-only.conf/VirtualHost"),
{obj.Addr.fromstring("*:80")}, True, True, ""),
obj.VirtualHost(
os.path.join(temp_dir, config_name,
"apache2/apache2.conf"),
"/files" + os.path.join(temp_dir, config_name,
"apache2/apache2.conf/VirtualHost"),
{obj.Addr.fromstring("*:80")}, False, True,
"vhost.in.rootconf"),
obj.VirtualHost(
os.path.join(prefix, "duplicatehttp.conf"),
os.path.join(aug_pre, "duplicatehttp.conf/VirtualHost"),
{obj.Addr.fromstring("10.2.3.4:80")}, False, True,
"duplicate.example.com"),
obj.VirtualHost(
os.path.join(prefix, "duplicatehttps.conf"),
os.path.join(aug_pre, "duplicatehttps.conf/IfModule/VirtualHost"),
{obj.Addr.fromstring("10.2.3.4:443")}, True, True,
"duplicate.example.com")]
return vh_truth
if config_name == "debian_apache_2_4/multi_vhosts":
prefix = os.path.join(
temp_dir, config_name, "apache2/sites-available")
aug_pre = "/files" + prefix
vh_truth = [
obj.VirtualHost(
os.path.join(prefix, "default.conf"),
os.path.join(aug_pre, "default.conf/VirtualHost[1]"),
{obj.Addr.fromstring("*:80")},
False, True, "ip-172-30-0-17"),
obj.VirtualHost(
os.path.join(prefix, "default.conf"),
os.path.join(aug_pre, "default.conf/VirtualHost[2]"),
{obj.Addr.fromstring("*:80")},
False, True, "banana.vomit.com"),
obj.VirtualHost(
os.path.join(prefix, "multi-vhost.conf"),
os.path.join(aug_pre, "multi-vhost.conf/VirtualHost[1]"),
{obj.Addr.fromstring("*:80")},
False, True, "1.multi.vhost.tld"),
obj.VirtualHost(
os.path.join(prefix, "multi-vhost.conf"),
os.path.join(aug_pre, "multi-vhost.conf/IfModule/VirtualHost"),
{obj.Addr.fromstring("*:80")},
False, True, "2.multi.vhost.tld"),
obj.VirtualHost(
os.path.join(prefix, "multi-vhost.conf"),
os.path.join(aug_pre, "multi-vhost.conf/VirtualHost[2]"),
{obj.Addr.fromstring("*:80")},
False, True, "3.multi.vhost.tld")]
return vh_truth
return None # pragma: no cover
|
StarcoderdataPython
|
1715318
|
<reponame>rexor12/holobot<gh_stars>1-10
from .action_base import ActionBase
from .do_nothing_action import DoNothingAction
from .reply_action import ReplyAction
|
StarcoderdataPython
|
3324670
|
#!/usr/bin/env python3
#
# Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import itertools
import logging
import os
import re
import sys
from cros.factory.utils import cros_board_utils
from cros.factory.utils.process_utils import CheckOutput
from cros.factory.utils.process_utils import Spawn
RE_PACKAGE_FILES = re.compile(r'.*?files/')
def GetFileToLint(path=None):
output = CheckOutput(
['git', 'status', '--untracked-files=all', '--porcelain'], cwd=path,
log=True)
# Remove first three characters, and anything up to the -> for renames.
uncommitted = [re.sub(r'^...(.+ -> )?', '', x) for x in output.splitlines()]
# The output contains full path as
# chromeos-base/{chromeos-,}factory-board/files/py/device/boards/test.py
# and we want to strip until 'files/'.
uncommitted = [re.sub(RE_PACKAGE_FILES, '', x) if path else x
for x in uncommitted if x.endswith('.py') and '#' not in x]
logging.info('Uncommitted files: %r', uncommitted)
all_files = set(uncommitted)
for i in itertools.count():
commit = 'HEAD~%d' % i
proc = Spawn(['git', 'log', '-1', commit], cwd=path, read_stdout=True)
if proc.returncode:
# No more log entries
break
if '\n Reviewed-on: ' in proc.stdout_data:
logging.info('%s has Reviewed-on; ending search', commit)
break
files = CheckOutput(['git', 'diff-tree', '--no-commit-id', '--name-only',
'-r', commit], cwd=path, log=True).splitlines()
logging.info('%s contains files %s', commit, files)
for f in files:
file_path = os.path.join(path, f) if path else f
if f.endswith('.py') and os.path.exists(file_path):
all_files.add(re.sub(RE_PACKAGE_FILES, '', f) if path else f)
all_files = set(f for f in all_files if os.path.exists(f))
return all_files
def main():
parser = argparse.ArgumentParser(
description='Lints files that are new, changed, or in a pending CL.')
parser.add_argument('--verbose', '-v', action='count')
parser.add_argument('--overlay', '-o')
args = parser.parse_args()
logging.basicConfig(level=logging.WARNING - 10 * (args.verbose or 0))
# chdir to repo root so paths are all correct
os.chdir(CheckOutput(['git', 'rev-parse', '--show-toplevel']).strip())
all_files = GetFileToLint()
if args.overlay:
overlay_path = cros_board_utils.GetChromeOSFactoryBoardPath(args.overlay)
if overlay_path:
all_files |= GetFileToLint(os.path.join(overlay_path, '../..'))
CheckOutput(['make', 'overlay-%s' % args.overlay])
all_files_str = ' '.join(sorted(all_files))
overlay_args = ['-C', 'overlay-%s' % args.overlay] if args.overlay else []
proc = Spawn(
['make'] + overlay_args + ['lint', 'LINT_FILES=%s' % all_files_str],
call=True, log=True)
sys.exit(proc.returncode)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
146854
|
import os.path as op
from pylama.check_async import check_async
from pylama.config import parse_options
from pylama.core import filter_errors, parse_modeline, run
from pylama.errors import Error, remove_duplicates
from pylama.hook import git_hook, hg_hook
from pylama.main import shell, check_path
def test_filter_errors():
assert list(filter_errors([Error(text='E1')], select=['E'], ignore=['E101']))
assert not list(filter_errors([Error(text='W1')], select=['W100'], ignore=['W']))
def test_remove_duplicates():
errors = [Error(linter='pycodestyle', text='E701'), Error(linter='pylint', text='C0321')]
errors = list(remove_duplicates(errors))
assert len(errors) == 1
def test_parser_modeline():
code = """
bla bla bla
# pylama: ignore=W12,E14:select=R:skip=0
"""
params = parse_modeline(code)
assert params == dict(ignore='W12,E14', select='R', skip='0')
def test_checkpath():
path = op.abspath('dummy.py')
options = parse_options([path])
result = check_path(options)
assert result
assert result[0].filename == 'dummy.py'
def test_linters_params():
options = parse_options(linters='mccabe', config=False)
options.linters_params['mccabe'] = dict(complexity=1)
errors = run('dummy.py', options=options)
assert len(errors) == 1
options.linters_params['mccabe'] = dict(complexity=20)
errors = run('dummy.py', options=options)
assert not errors
def test_sort():
options = parse_options()
options.sort = ['C', 'D']
errors = run('dummy.py', options=options)
assert errors[0].type == 'C'
def test_shell():
errors = shell('-o dummy dummy.py'.split(), error=False)
assert errors
errors = shell(['unknown.py'], error=False)
assert not errors
def test_git_hook():
assert not git_hook(False)
def test_hg_hook():
assert not hg_hook(None, dict())
def test_async():
options = parse_options(config=False)
errors = check_async(['dummy.py'], options=options, rootdir='.')
assert errors
|
StarcoderdataPython
|
3210589
|
import xml.etree.ElementTree as ET
import time
import select
from io import StringIO
from threading import Thread, Event, Lock
from os import read
from .coqapi import Ok, Err
from .xmltype import *
class CoqHandler:
def __init__(self, state_manager, printer):
self.printer = printer
self.state_manager = state_manager
self.currentContent = ""
self.oldProcess = None
self.currentProcess = None
self.messageLevel = None
self.val = None
self.state_id = None
self.nextFlush = True
self.goals = None
self.goals_fg = []
self.goals_bg = 0
self.goals_shelved = 0
self.goals_given_up = 0
self.goal_id = None
self.goal_hyps = []
self.goal_ccl = None
# Call when an element starts
def start(self, tag, attributes):
if tag == 'value':
self.currentProcess = 'value'
self.val = attributes['val']
self.loc_s = None if not 'loc_s' in attributes else attributes['loc_s']
self.loc_e = None if not 'loc_e' in attributes else attributes['loc_e']
if tag == 'option' and attributes['val'] == 'none' and self.currentProcess == 'value':
self.printer.addGoal(None)
elif tag == 'goals' and self.currentProcess == 'value':
self.currentProcess = 'goals_fg'
elif tag == 'list' and self.currentProcess == 'goals_fg':
self.currentProcess = 'fg'
elif tag == 'goal' and self.currentProcess == 'fg':
self.currentProcess = 'goal'
elif tag == 'pair' and self.currentProcess == 'goals_bg':
self.currentProcess = 'goals_bg_in'
elif tag == 'goal' and self.currentProcess == 'goals_bg_in':
self.goals_bg += 1
# TODO
elif tag == 'goal' and self.currentProcess == 'goals_shelved':
self.goals_shelved += 1
# TODO
elif tag == 'goal' and self.currentProcess == 'goals_given_up':
self.goals_given_up += 1
elif tag == 'string' and self.currentProcess == 'goal':
self.currentProcess = 'goal_id'
elif tag == 'list' and self.currentProcess == 'goal':
self.currentProcess = 'goal_hyps'
elif tag == 'state_id' and self.currentProcess == 'value':
self.state_id = attributes['val']
elif tag == 'feedback_content' and attributes['val'] == 'message':
self.currentProcess = 'waitmessage'
elif tag == 'feedback_content' and attributes['val'] == 'processingin':
self.currentProcess = 'waitworker'
elif self.currentProcess == 'message' and tag == 'message_level':
self.messageLevel = attributes['val']
elif tag == 'message':
# older coq (8.6) use a message tag at top-level, newer ones use a
# message tag inside a feedback_content one.
# Since there might be more than one message, we want to track when
# we came from a 'waitmessage' (newer coq).
self.oldProcess = self.currentProcess
self.currentProcess = 'message'
# Call when an element ends
def end(self, tag):
if tag == "value":
if self.nextFlush:
self.printer.flushInfo()
self.nextFlush = True
if self.val == 'good':
self.state_manager.pull_event(Ok(self.state_id))
else:
self.state_manager.pull_event(
Err(None, False if not hasattr(self, "loc_s") or self.loc_s is None else int(self.loc_s),
False if not hasattr(self, "loc_e") or self.loc_e is None else int(self.loc_e)))
self.printer.addInfo(self.currentContent)
self.currentContent = ''
self.nextFlush = False
self.state_id = None
self.val = None
self.currentProcess = None
elif tag == 'goals':
self.printer.debug("Goals: " + str(self.goals_fg) + "\n;; " + str(self.goals_bg) + "\n;; " + str(self.goals_shelved) + "\n;; " + str(self.goals_given_up) + "\n")
self.printer.addGoal(Goals(self.goals_fg, self.goals_bg, self.goals_shelved, self.goals_given_up))
self.goals_fg = []
self.goals_bg = 0
self.goals_shelved = 0
self.goals_given_up = 0
self.currentProcess = 'value'
elif tag == 'string' and self.currentProcess == 'goal_id':
self.goal_id = self.currentContent
self.currentProcess = 'goal'
self.currentContent = ''
elif tag == 'goal' and self.currentProcess == 'goal':
self.goals_fg.append(Goal(self.goal_id, self.goal_hyps, self.currentContent))
self.goal_hyps = []
self.currentContent = ''
self.currentProcess = 'fg'
elif tag == 'richpp' and self.currentProcess == 'goal_hyps':
self.goal_hyps.append(self.currentContent)
self.currentContent = ''
elif tag == 'list' and self.currentProcess == 'goal_hyps':
self.currentContent = ''
self.currentProcess = 'goal'
elif tag == 'list' and self.currentProcess == 'fg':
self.currentContent = ''
self.currentProcess = 'goals_bg'
elif tag == 'pair' and self.currentProcess == 'goals_bg_in':
self.currentContent = ''
self.currentProcess = 'goals_bg'
elif tag == 'feedback_content' and self.currentProcess == 'waitmessage':
self.currentProcess = None
self.oldProcess = None
self.messageLevel = None
self.currentContent = ''
elif tag == 'feedback_content' and self.currentProcess == 'waitworker':
self.state_manager.setWorker(self.currentContent)
self.currentContent = ''
elif tag == 'message' and self.currentProcess == 'message':
self.currentProcess = 'waitmessage'
self.printer.debug(self.messageLevel + ": " + str(self.currentContent) + "\n\n")
self.printer.addInfo(self.currentContent)
self.currentProcess = self.oldProcess
self.messageLevel = None
self.currentContent = ''
# Call when a character is read
def data(self, content):
if self.currentProcess == 'message' or self.currentProcess == 'value' or \
self.currentProcess == 'goal_id' or self.currentProcess == 'goal' or \
self.currentProcess == 'waitworker' or self.currentProcess == 'goal_hyps':
self.currentContent += content
class CoqParser(Thread):
def __init__(self, process, state_manager, printer):
Thread.__init__(self)
self.cont = True
self.process = process
self.printer = printer
self.target = CoqHandler(state_manager, printer)
self.parser = ET.XMLParser(target=self.target)
self.parser.feed("""
<!DOCTYPE coq [
<!-- we replace non-breakable spaces with normal spaces, because it would
make copy-pasting harder -->
<!ENTITY nbsp \" \">
<!ENTITY gt \">\">
<!ENTITY lt \"<\">
<!ENTITY apos \"'\">
]>
<Root>
""")
def run(self):
self.printer.debug("Running parser...\n")
try:
f = self.process.stdout
while self.cont:
r, w, e = select.select([ f ], [], [], 0.1)
if f in r:
content = read(f.fileno(), 0x400)
self.printer.debug("<< " + str(content) + "\n")
self.parser.feed(content)
except Exception as e:
self.printer.debug("WHOOPS!\n")
self.printer.debug("WHOOPS! " + str(e) + "\n")
self.printer.debug("WHOOPS! " + str(traceback.format_exc()) + "\n")
try:
self.parser.feed("</Root>")
except:
pass
self.printer.debug("END OF PARSING\n")
def stop(self):
self.cont = False
|
StarcoderdataPython
|
3344658
|
"""Problem 51: Prime digit replacements"""
import unittest
from utils.primes import seive_of_erat
def replace_digits(p, digits):
"""If p contains more than one of the same digit, replace them will all
other possible digits."""
if 0 in digits:
other_digits = [str(d) for d in list(range(1, 10))
if str(d) != str(p)[digits[0]]]
else:
other_digits = [str(d) for d in list(range(10))
if str(d) != str(p)[digits[0]]]
generated_numbers = []
p_d = [str(d) for d in str(p)]
for od in other_digits:
for d in digits:
p_d[d] = od
generated_numbers.append(int("".join(p_d)))
return generated_numbers
def get_digits(n):
"""Get digits to replace in n"""
d_tab = {}
digits = []
for i, d in enumerate(str(n)):
if d in d_tab:
d_tab[d].append(i)
else:
d_tab[d] = [i]
for d, locs in d_tab.items():
if len(locs) > 1:
digits.append(locs)
return digits
def find_family(n, limit=100000):
"""Finds lowest prime in family of n primes, searches up to limit"""
primes_list = seive_of_erat(limit)
primes_set = set(primes_list)
for p in primes_list:
digits = get_digits(p)
for d in digits:
family = replace_digits(p, d)
if family:
size = len([m for m in family if m in primes_set])
if size + 1 == n:
return p
return False
def solution():
return find_family(8, limit=1000000)
class TestFunction(unittest.TestCase):
def test_replacer(self):
self.assertEqual(replace_digits(13, [0]), [23, 33, 43, 53, 63, 73, 83,
93])
self.assertEqual(replace_digits(56003, [2, 3]), [56113, 56223, 56333,
56443, 56553, 56663,
56773, 56883, 56993])
def test_digits(self):
for digits in get_digits(13):
self.assertIn(digits, [[0], [1]])
for digits in get_digits(56003):
self.assertIn(digits, [[0], [1], [2, 3], [4]])
def test_finder(self):
self.assertEqual(find_family(7), 56003)
if __name__ == "__main__":
print(solution())
unittest.main()
|
StarcoderdataPython
|
3251240
|
<gh_stars>1-10
from contextlib import suppress
from discord.ext import commands
class Context(commands.Context):
async def send(self, content=None, *, tts=False, embed=None, file=None, files=None, delete_after=None, nonce=None, allowed_mentions=None, reference=None, mention_author=None):
try:
response = self.bot.old_responses[self.message.id]
with suppress(Exception):
await response.clear_reactions()
return await response.edit(content=content, tts=tts, embed=embed, file=file, files=files, delete_after=delete_after, nonce=nonce, allowed_mentions=allowed_mentions, reference=reference, mention_author=mention_author)
except KeyError:
response = await super().send(content=content, tts=tts, embed=embed, file=file, files=files, delete_after=delete_after, nonce=nonce, allowed_mentions=allowed_mentions, reference=reference, mention_author=mention_author)
self.bot.old_responses[self.message.id] = response
return response
async def reply(self, content=None, *, tts=False, embed=None, file=None, files=None, delete_after=None, nonce=None, allowed_mentions=None, mention_author=None):
try:
response = self.bot.old_responses[self.message.id]
with suppress(Exception):
await response.clear_reactions()
return await response.edit(content=content, tts=tts, embed=embed, file=file, files=files, delete_after=delete_after, nonce=nonce, allowed_mentions=allowed_mentions, mention_author=mention_author)
except KeyError:
response = await super().reply(content=content, tts=tts, embed=embed, file=file, files=files, delete_after=delete_after, nonce=nonce, allowed_mentions=allowed_mentions, mention_author=mention_author)
self.bot.old_responses[self.message.id] = response
return response
|
StarcoderdataPython
|
3380501
|
from django.apps import AppConfig
class IrekuaModelsConfig(AppConfig):
name = 'irekua_models'
verbose_name = 'irekua-models'
|
StarcoderdataPython
|
3296628
|
from fiber import Process, SimpleQueue
def foo(q, a, b):
q.put(a + b)
if __name__ == '__main__':
q = SimpleQueue()
p = Process(target=foo, args=(q, 42, 21))
p.start()
print(q.get())
p.join()
|
StarcoderdataPython
|
3277492
|
<filename>test/check-gui-sh.sikuli/check-gui-sh.py
wait("1530638293676.png", 10)
|
StarcoderdataPython
|
129472
|
<reponame>crestdatasystems/rubrik-polaris-sdk-for-python<gh_stars>0
import os
import pytest
from conftest import util_load_json, BASE_URL
from rubrik_polaris.sonar.scan import ERROR_MESSAGES
FILE_TYPES = ['ANY', 'HITS', 'STALE', 'OPEN_ACCESS', 'STALE_HITS', 'OPEN_ACCESS_HITS']
@pytest.mark.parametrize("scan_name, resources, analyzer_groups", [
("", [{"snappableFid": "dummy_id"}], [{"id": "dummy_id"}]),
("scan_name", [], [{"id": "dummy_id"}]),
("scan_name", [{"snappableFid": "dummy_id"}], [])
])
def test_trigger_on_demand_scan_when_invalid_values_are_provided(client, scan_name, resources, analyzer_groups):
"""
Tests trigger_on_demand_scan method of PolarisClient when invalid values are provided
"""
from rubrik_polaris.sonar.scan import trigger_on_demand_scan
with pytest.raises(ValueError) as e:
trigger_on_demand_scan(client, scan_name=scan_name, resources=resources, analyzer_groups=analyzer_groups)
assert str(e.value) == ERROR_MESSAGES['MISSING_PARAMETERS_IN_SCAN']
def test_trigger_on_demand_scan_when_valid_values_are_provided(requests_mock, client):
"""
Tests trigger_on_demand_scan method of PolarisClient when valid values are provided
"""
from rubrik_polaris.sonar.scan import trigger_on_demand_scan
expected_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/on_demand_scan.json"))
requests_mock.post(BASE_URL + "/graphql", json=expected_response)
scan_name = "Scan from SDK"
resources = [{"snappableFid": "dummy_id"}]
analyzer_groups = [{"id": "dummy_id", "name": "name", "groupType": "group_type", "analyzers": [{}]}]
response = trigger_on_demand_scan(
client, scan_name=scan_name, resources=resources, analyzer_groups=analyzer_groups)
assert response == expected_response
def test_get_on_demand_scan_status_when_valid_values_are_provided(requests_mock, client):
"""
Tests get_on_demand_scan_status method of PolarisClient when valid values are provided
"""
from rubrik_polaris.sonar.scan import get_on_demand_scan_status
expected_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/on_demand_scan_status.json"))
requests_mock.post(BASE_URL + "/graphql", json=expected_response)
response = get_on_demand_scan_status(client, crawl_id="587d147a-add9-4152-b7a0-5a667d99f395")
assert response == expected_response
@pytest.mark.parametrize("crawl_id", [""])
def test_get_on_demand_scan_status_when_invalid_values_are_provided(client, crawl_id):
"""
Tests get_on_demand_scan_status method of PolarisClient when invalid values are provided
"""
from rubrik_polaris.sonar.scan import get_on_demand_scan_status
with pytest.raises(ValueError) as e:
get_on_demand_scan_status(client, crawl_id=crawl_id)
assert str(e.value) == ERROR_MESSAGES['MISSING_PARAMETERS_IN_SCAN_STATUS']
@pytest.mark.parametrize("crawl_id, filters, err_msg", [
("", {"fileType": "HITS"}, ERROR_MESSAGES['MISSING_PARAMETERS_IN_SCAN_RESULT']),
("scan_name", {}, ERROR_MESSAGES['MISSING_PARAMETERS_IN_SCAN_RESULT']),
("scan_name", {"fileType": "HIT"}, ERROR_MESSAGES['INVALID_FILE_TYPE'].format('HIT', FILE_TYPES))
])
def test_get_on_demand_scan_result_when_invalid_values_are_provided(client, crawl_id, filters, err_msg, requests_mock):
"""
Tests get_on_demand_scan_result method of PolarisClient when invalid values are provided
"""
from rubrik_polaris.sonar.scan import get_on_demand_scan_result
expected_response = util_load_json(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_data/file_type_values.json")
)
requests_mock.post(BASE_URL + "/graphql", json=expected_response)
with pytest.raises(ValueError) as e:
get_on_demand_scan_result(client, crawl_id=crawl_id, filters=filters)
assert str(e.value) == err_msg
def test_get_on_demand_scan_result_when_valid_values_are_provided(requests_mock, client):
"""
Tests get_on_demand_scan_result method of PolarisClient when valid values are provided
"""
from rubrik_polaris.sonar.scan import get_on_demand_scan_result
query_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/on_demand_scan_result.json"))
enum_response = util_load_json(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_data/file_type_values.json")
)
responses = [
{'json': enum_response},
{'json': query_response}
]
requests_mock.post(BASE_URL + "/graphql", responses)
response = get_on_demand_scan_result(client, crawl_id="dummy_id", filters={"fileType": "HITS"})
assert response == query_response
|
StarcoderdataPython
|
1731833
|
from .custom_model import get_custom_model
from . import archs
|
StarcoderdataPython
|
146602
|
import os
import json
from typing import Dict
from fintools.settings import get_logger
from fintools.utils import StringWrapper, timeit
from .settings import (
INDUSTRY_SEARCH_DEFAULT_FILENAME,
INDUSTRY_SEARCH_DEFAULT_THRESHOLD
)
logger = get_logger(name=__name__)
class Main:
threshold = INDUSTRY_SEARCH_DEFAULT_THRESHOLD
@timeit(logger=logger)
def search(self, title: str, exact: bool = False, file: str = INDUSTRY_SEARCH_DEFAULT_FILENAME) -> str:
pass
|
StarcoderdataPython
|
1625422
|
<filename>envs/CARLA/carla_lib/client_example.py
#!/usr/bin/env python3
# Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
#### This script has been deprecated !!!! #####
"""Basic CARLA client example."""
from __future__ import print_function
import argparse
import logging
import random
import time
import numpy as np
from carla.client import make_carla_client
from carla.sensor import Camera, Lidar
from carla.settings import CarlaSettings
from carla.tcp import TCPConnectionError
from carla.util import print_over_same_line
from carla.image_converter import labels_to_array
from carla.transform import Transform
WIDTH = 512
HEIGHT = 256
def simplify_seg(array):
classes = {
0: 0, # None
1: 1, # Buildings
2: 1, # Fences
3: 1, # Other
4: 1, # Pedestrians
5: 1, # Poles
6: 4, # RoadLines
7: 3, # Roads
8: 2, # Sidewalks
9: 1, # Vegetation
10: 5, # Vehicles
11: 1, # Walls
12: 1 # TrafficSigns
}
result = np.zeros_like(array, dtype=np.uint8)
for key, value in classes.items():
result[np.where(array == key)] = value
return result
def convert_image(sensor_data, width, height, simple_seg=True):
obs = np.frombuffer(sensor_data['CameraRGB'].raw_data, dtype=np.uint8).reshape((height, width, 4))[:, :, :3]
mon = np.frombuffer(sensor_data['CameraMON'].raw_data, dtype=np.uint8).reshape((height, width, 4))[:, :, :3]
seg = labels_to_array(sensor_data['CameraSegmentation'])
if simple_seg:
seg = simplify_seg(seg)
return obs, seg, mon
def get_bbox(self, measurement, seg):
global
width = WIDTH
height = HEIGHT
extrinsic = Transform(measurement.player_measurements.transform) * self.obs_to_car_transform
bbox_list = []
orientation_list = []
distance_list = []
# main_rotation = measurement.player_measurements.transform.rotation
player_location = measurement.player_measurements.transform.location
player_location = np.array([player_location.x, player_location.y, player_location.z])
# collect the 2bbox generated from the 3d-bbox of non-player agents
for agent in measurement.non_player_agents:
if agent.HasField("vehicle"):
# veh_id = agent.id
# idx = self.nonplayer_ids[veh_id]
vehicle_transform = Transform(agent.vehicle.transform)
bbox_transform = Transform(agent.vehicle.bounding_box.transform)
ext = agent.vehicle.bounding_box.extent
bbox = np.array([
[ ext.x, ext.y, ext.z],
[- ext.x, ext.y, ext.z],
[ ext.x, - ext.y, ext.z],
[- ext.x, - ext.y, ext.z],
[ ext.x, ext.y, - ext.z],
[- ext.x, ext.y, - ext.z],
[ ext.x, - ext.y, - ext.z],
[- ext.x, - ext.y, - ext.z]
])
bbox = bbox_transform.transform_points(bbox)
bbox = vehicle_transform.transform_points(bbox)
orientation = agent.vehicle.transform.orientation
vehicle_location = agent.vehicle.transform.location
cur_location = np.array([vehicle_location.x, vehicle_location.y, vehicle_location.z])
distance = np.linalg.norm(player_location - cur_location)
vertices = []
for vertex in bbox:
pos_vector = np.array([
[vertex[0,0]], # [[X,
[vertex[0,1]], # Y,
[vertex[0,2]], # Z,
[1.0] # 1.0]]
])
transformed_3d_pos = np.dot(inv(extrinsic.matrix), pos_vector)
pos2d = np.dot(self.intrinsic, transformed_3d_pos[:3])
pos2d = np.array([
pos2d[0] / pos2d[2], pos2d[1] / pos2d[2], pos2d[2]
])
if pos2d[2] > 0:
x_2d = width - pos2d[0]
y_2d = height - pos2d[1]
vertices.append([x_2d, y_2d])
if len(vertices) > 1:
# vehicle_rotation = agent.vehicle.transform.rotation
vertices = np.array(vertices)
bbox_list.append([np.min(vertices[:, 0]), np.min(vertices[:, 1]),
np.max(vertices[:, 0]), np.max(vertices[:, 1])])
orientation_list.append(orientation)
distance_list.append(distance)
seg_bboxes = seg_to_bbox(seg)
final_bboxes = []
final_directions = []
final_distances = []
assert(len(bbox_list) == len(orientation_list))
for i in range(len(bbox_list)):
bbox = bbox_list[i]
direction = orientation_list[i]
xmin, ymin, xmax, ymax = bbox
x1, y1, x2, y2 = width, height, 0, 0
for segbbox in seg_bboxes:
xmin0, ymin0, xmax0, ymax0 = segbbox
if xmin0 >= xmin - 5 and ymin0 >= ymin - 5 and xmax0 < xmax + 5 and ymax0 < ymax + 5:
x1 = min(x1, xmin0)
y1 = min(y1, ymin0)
x2 = max(x2, xmax0)
y2 = max(y2, ymax0)
if x2 > x1 and y2 > y1 and [int(x1), int(y1), int(x2), int(y2)] not in final_bboxes:
final_bboxes.append([int(x1), int(y1), int(x2), int(y2)])
relative_orientation = get_angle(direction.x, direction.y, self.orientation.x, self.orientation.y)
final_directions.append(relative_orientation)
final_distances.append(distance_list[i])
# for angle in final_directions:
# self.angle_logger.write("timestep {}: {}\n".format(self.timestep, angle))
# self.angle_logger.flush()
final_distances = np.array(final_distances)
visible_coll_num = min(coll_veh_num, final_distances.size)
coll_idx = np.argpartition(final_distances, visible_coll_num - 1)[:visible_coll_num]
final_colls = [1 if i in coll_idx else 0 for i in range(final_distances.size)]
return final_bboxes, final_directions, final_colls
def run_carla_client(args):
# Here we will run 3 episodes with 300 frames each.
global WIDTH, HEIGHT
number_of_episodes = 1
frames_per_episode = 300
# We assume the CARLA server is already waiting for a client to connect at
# host:port. To create a connection we can use the `make_carla_client`
# context manager, it creates a CARLA client object and starts the
# connection. It will throw an exception if something goes wrong. The
# context manager makes sure the connection is always cleaned up on exit.
with make_carla_client(args.host, args.port) as client:
print('CarlaClient connected')
for episode in range(0, number_of_episodes):
# Start a new episode.
settings = CarlaSettings()
settings.set(
SynchronousMode=True,
SendNonPlayerAgentsInfo=True,
NumberOfVehicles=0,
NumberOfPedestrians=60,
WeatherId=1,
PlayerVehicle='/Game/Blueprints/Vehicles/Mustang/Mustang.Mustang_C',
QualityLevel='Epic')
settings.randomize_seeds()
# Now we want to add a couple of cameras to the player vehicle.
# We will collect the images produced by these cameras every
# frame.
# The default camera captures RGB images of the scene.
camera0 = Camera('CameraRGB')
# Set image resolution in pixels.
camera0.set_image_size(WIDTH, HEIGHT)
# Set its position relative to the car in meters.
camera0.set_position(1, 0, 2.50)
camera0.set_rotation(0, 0, 0)
settings.add_sensor(camera0)
# Let's add another camera producing ground-truth depth.
# camera1 = Camera('CameraDepth', PostProcessing='Depth')
# camera1.set_image_size(800, 600)
# camera1.set_position(0.30, 0, 1.30)
# settings.add_sensor(camera1)
camera2 = Camera('CameraSegmentation', PostProcessing='SemanticSegmentation')
camera2.set_image_size(WIDTH, HEIGHT)
camera2.set_position(1, 0, 2.50)
settings.add_sensor(camera2)
# Now we load these settings into the server. The server replies
# with a scene description containing the available start spots for
# the player. Here we can provide a CarlaSettings object or a
# CarlaSettings.ini file as string.
scene = client.load_settings(settings)
# Choose one player start at random.
# number_of_player_starts = len(scene.player_start_spots)
# player_start = random.randint(0, max(0, number_of_player_starts - 1))
interval = lambda x, y: list(range(x, y+1))
player_starts = interval(29, 32) + interval(34, 43) + interval(45, 54) + interval(56, 57) + interval(64, 85) + interval(87, 96) + interval(98, 107) + interval(109, 118) + interval(120, 121)
player_start = np.random.choice(player_starts)
# Notify the server that we want to start the episode at the
# player_start index. This function blocks until the server is ready
# to start the episode.
print('Starting new episode at %r...' % scene.map_name)
client.start_episode(player_start)
# Iterate every frame in the episode.
for frame in range(0, frames_per_episode):
# Read the data produced by the server this frame.
measurements, sensor_data = client.read_data()
obs, seg, mon = convert_image(sensor_data, WIDTH, HEIGHT, True)
bboxes, directions, _ = get_bbox(measurements, seg)
# Print some of the measurements.
# print_measurements(measurements)
# Save the images to disk if requested.
if args.save_images_to_disk:
for name, measurement in sensor_data.items():
filename = args.out_filename_format.format(episode, name, frame)
measurement.save_to_disk(filename)
# We can access the encoded data of a given image as numpy
# array using its "data" property. For instance, to get the
# depth value (normalized) at pixel X, Y
#
# depth_array = sensor_data['CameraDepth'].data
# value_at_pixel = depth_array[Y, X]
#
# Now we have to send the instructions to control the vehicle.
# If we are in synchronous mode the server will pause the
# simulation until we send this control.
control = measurements.player_measurements.autopilot_control
control.steer += random.uniform(-0.1, 0.1)
client.send_control(control)
def print_measurements(measurements):
number_of_agents = len(measurements.non_player_agents)
player_measurements = measurements.player_measurements
message = 'Vehicle at ({pos_x:.1f}, {pos_y:.1f}), '
message += '{speed:.0f} km/h, '
message += 'Collision: {{vehicles={col_cars:.0f}, pedestrians={col_ped:.0f}, other={col_other:.0f}}}, '
message += '{other_lane:.0f}% other lane, {offroad:.0f}% off-road, '
message += '({agents_num:d} non-player agents in the scene)'
message = message.format(
pos_x=player_measurements.transform.location.x,
pos_y=player_measurements.transform.location.y,
speed=player_measurements.forward_speed * 3.6, # m/s -> km/h
col_cars=player_measurements.collision_vehicles,
col_ped=player_measurements.collision_pedestrians,
col_other=player_measurements.collision_other,
other_lane=100 * player_measurements.intersection_otherlane,
offroad=100 * player_measurements.intersection_offroad,
agents_num=number_of_agents)
print_over_same_line(message)
def main():
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument(
'-v', '--verbose',
action='store_true',
dest='debug',
help='print debug information')
argparser.add_argument(
'--host',
metavar='H',
default='localhost',
help='IP of the host server (default: localhost)')
argparser.add_argument(
'-p', '--port',
metavar='P',
default=2000,
type=int,
help='TCP port to listen to (default: 2000)')
argparser.add_argument(
'-a', '--autopilot',
action='store_true',
help='enable autopilot')
argparser.add_argument(
'-l', '--lidar',
action='store_true',
help='enable Lidar')
argparser.add_argument(
'-q', '--quality-level',
choices=['Low', 'Epic'],
type=lambda s: s.title(),
default='Epic',
help='graphics quality level, a lower level makes the simulation run considerably faster.')
argparser.add_argument(
'-i', '--images-to-disk',
action='store_true',
dest='save_images_to_disk',
help='save images (and Lidar data if active) to disk')
argparser.add_argument(
'-c', '--carla-settings',
metavar='PATH',
dest='settings_filepath',
default=None,
help='Path to a "CarlaSettings.ini" file')
args = argparser.parse_args()
log_level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
logging.info('listening to server %s:%s', args.host, args.port)
args.out_filename_format = '_out/episode_{:0>4d}/{:s}/{:0>6d}'
while True:
try:
run_carla_client(args)
print('Done.')
return
except TCPConnectionError as error:
logging.error(error)
time.sleep(1)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('\nCancelled by user. Bye!')
|
StarcoderdataPython
|
191914
|
<reponame>pvtokmakov/video_cluster
from src.datasets.kinetics import load_annotation_data, get_video_names_and_annotations, load_value_file
import os
import torch
import json
import argparse
from os.path import join
import numpy as np
from src.objectives.localagg import run_kmeans_multi_gpu, run_kmeans
DEFAULT_KMEANS_SEED = 1234
def get_parser():
parser = argparse.ArgumentParser(description="IDT video inference")
parser.add_argument("--k", type=int, help="Number of clusters.")
parser.add_argument("--num_c", type=int, help="Number of clusterings.")
parser.add_argument("--frames_path", help="Path to Kinetics frames.")
parser.add_argument("--annotation_path", help="Path to Kinetics annotation.")
parser.add_argument("--fv_path", help="Path to Fisher vectors.")
parser.add_argument("--clusters_path", help="Path to save cluster.")
parser.add_argument("--processed_annotation_path", help="Path to output annotation file.")
parser.add_argument('--gpu', nargs='*', help='GPU id')
return parser
def compute_clusters(data, k, gpu_devices):
pred_labels = []
data_npy = data.cpu().detach().numpy()
data_npy = np.float32(data_npy)
for k_idx, each_k in enumerate(k):
# cluster the data
if len(gpu_devices) == 1: # single gpu
I, _ = run_kmeans(data_npy, each_k, seed=k_idx + DEFAULT_KMEANS_SEED,
gpu_device=gpu_devices[0])
else: # multigpu
I, _ = run_kmeans_multi_gpu(data_npy, each_k, seed=k_idx + DEFAULT_KMEANS_SEED, gpu_device=gpu_devices)
clust_labels = np.asarray(I)
pred_labels.append(clust_labels)
pred_labels = np.stack(pred_labels, axis=0)
pred_labels = torch.from_numpy(pred_labels).long()
return pred_labels
if __name__ == "__main__":
args = get_parser().parse_args()
gpu_devices = []
if args.gpu:
ids_list = ''
for i in range(len(args.gpu)):
ids_list += args.gpu[i] + ','
gpu_devices.append(int(args.gpu[i]))
ids_list = ids_list[:-1]
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ids_list
frames_path = args.frames_path
annotation_path = args.annotation_path
fv_path = args.fv_path
k = args.k
n_clusters = args.num_c
data = load_annotation_data(annotation_path)
video_names, annotations = get_video_names_and_annotations(data, "training")
count_valid = 0
count_missing = 0
fvs = []
database = {}
labels = set([])
for i in range(len(video_names)):
if i % 1000 == 0:
print('dataset loading [{}/{}]'.format(i, len(video_names)))
vid_key = video_names[i].split("/")[-1]
vid_label = video_names[i].split("/")[-2].replace("_", " ")
video_path = os.path.join(frames_path, video_names[i])
if not os.path.exists(video_path):
continue
n_frames_file_path = os.path.join(video_path, 'n_frames')
n_frames = int(load_value_file(n_frames_file_path))
if n_frames <= 0:
continue
count_valid += 1
fv_vid_path = os.path.join(fv_path, video_names[i]) + ".dat"
if not os.path.exists(fv_vid_path):
count_missing += 1
continue
else:
fv = torch.load(fv_vid_path)
value = {}
value['subset'] = 'training'
value['annotations'] = {}
value['annotations']['label'] = vid_label
database[vid_key] = value
labels.add(vid_label)
fvs.append(fv.cpu().squeeze())
for key, value in data['database'].items():
this_subset = value['subset']
if (this_subset == 'validation' and (value['annotations']['label'] in labels)) or this_subset == 'testing':
database[key] = value
print("%d missing out of %d\n" % (count_missing, count_valid))
fvs = torch.stack(fvs)
k = [k for _ in range(n_clusters)]
cluster_labels = compute_clusters(fvs, k, gpu_devices)
os.mkdir(join(args.clusters_path, "checkpoints"))
torch.save({'cluster_labels': cluster_labels}, join(args.clusters_path, "checkpoints/checkpoint.pth.tar"))
out = {}
out['labels'] = list(labels)
out['database'] = database
with open(args.processed_annotation_path, 'w') as dst_file:
json.dump(out, dst_file)
|
StarcoderdataPython
|
3240435
|
#!/usr/bin/env python
# Copyright (c) 2007 XenSource, Inc.
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Parse/regenerate the "disk provisioning" XML contained within templates
# NB this provisioning XML refers to disks which should be created when
# a VM is installed from this template. It does not apply to templates
# which have been created from real VMs -- they have their own disks.
import XenAPI
import xml.dom.minidom
class Disk:
"""Represents a disk which should be created for this VM"""
def __init__(self, device : str =None, size : str =None, sr : str =None, bootable : bool =None, element=None):
if element:
self.device = element.getAttribute("device")
self.size = element.getAttribute("size")
self.sr = element.getAttribute("sr")
self.bootable = element.getAttribute("bootable") == "true"
else:
if not any((device, size, sr, bootable)):
raise ValueError("specify either Element or all of device, size, sr, bootable")
self.device = device # 0, 1, 2, ...
self.size = size # in bytes
self.sr = sr # uuid of SR
self.bootable = bootable
def toElement(self, doc : xml.dom.minidom.Document):
disk = doc.createElement("disk")
disk.setAttribute("device", self.device)
disk.setAttribute("size", self.size)
disk.setAttribute("sr", self.sr)
b = "false"
if self.bootable: b = "true"
disk.setAttribute("bootable", b)
return disk
class ProvisionSpec:
"""Represents a provisioning specification: currently a list of required disks"""
def toElement(self, doc : xml.dom.minidom.Document):
element = doc.createElement("provision")
for disk in self.disks:
element.appendChild(disk.toElement(doc))
return element
def setSR(self, sr):
"""Set the requested SR for each disk"""
for disk in self.disks:
disk.sr = sr
def setDiskSize(self, size):
(self.disks)[0].size = size
def __str__(self):
doc = xml.dom.minidom.Document()
doc.appendChild(self.toElement(doc))
return doc.toprettyxml()
def __init__(self, spec=None):
self.disks = []
if spec:
doc = xml.dom.minidom.parseString(txt)
all = doc.getElementsByTagName("provision")
if len(all) != 1:
raise "Expected to find exactly one <provision> element"
disks = all[0].getElementsByTagName("disk")
for disk in disks:
self.disks.append(Disk(disk))
def getProvisionSpec(session, vm):
"""Read the provision spec of a template/VM"""
other_config = session.xenapi.VM.get_other_config(vm)
return ProvisionSpec(spec=other_config['disks'])
def setProvisionSpec(session, vm, ps):
"""Set the provision spec of a template/VM"""
txt = str(ps)
try:
session.xenapi.VM.remove_from_other_config(vm, "disks")
except:
pass
session.xenapi.VM.add_to_other_config(vm, "disks", txt)
if __name__ == "__main__":
print ("Unit test of provision XML spec module")
print ("--------------------------------------")
ps = ProvisionSpec()
ps.disks.append(Disk("0", "1024", "0000-0000", True))
ps.disks.append(Disk("1", "2048", "1111-1111", False))
print ("* Pretty-printing spec")
txt = str(ps)
print (txt)
print ("* Re-parsing output")
ps2 = ProvisionSpec(txt)
print ("* Pretty-printing spec")
txt2 = str(ps)
print (txt2)
if txt != txt2:
raise "Sanity-check failed: print(parse(print(x))) <> print(x)"
print ("* OK: print(parse(print(x))) == print(x)")
|
StarcoderdataPython
|
3346475
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from typing import Dict, List, NamedTuple, Optional
from torch import Tensor
EncoderOut = NamedTuple(
"EncoderOut",
[
("encoder_out", Tensor), # T x B x C
("encoder_padding_mask", Optional[Tensor]), # B x T
("encoder_embedding", Optional[Tensor]), # B x T x C
("encoder_states", Optional[List[Tensor]]), # List[T x B x C]
("src_tokens", Optional[Tensor]), # B x T
("src_lengths", Optional[Tensor]), # B x 1
],
)
class FairseqEncoder(nn.Module):
"""Base class for encoders."""
def __init__(self, dictionary):
super().__init__()
self.dictionary = dictionary
def forward(self, src_tokens, src_lengths=None, **kwargs):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (LongTensor): lengths of each source sentence of shape
`(batch)`
"""
raise NotImplementedError
def forward_torchscript(self, net_input: Dict[str, Tensor]):
"""A TorchScript-compatible version of forward.
Encoders which use additional arguments may want to override
this method for TorchScript compatibility.
"""
if torch.jit.is_scripting():
return self.forward(
src_tokens=net_input["src_tokens"],
src_lengths=net_input["src_lengths"],
)
else:
return self.forward_non_torchscript(net_input)
@torch.jit.unused
def forward_non_torchscript(self, net_input: Dict[str, Tensor]):
encoder_input = {
k: v
for k, v in net_input.items()
if k != "prev_output_tokens"
}
return self.forward(**encoder_input)
def reorder_encoder_out(self, encoder_out, new_order):
"""
Reorder encoder output according to `new_order`.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
`encoder_out` rearranged according to `new_order`
"""
raise NotImplementedError
def max_positions(self):
"""Maximum input length supported by the encoder."""
return 1e6 # an arbitrary large number
def upgrade_state_dict(self, state_dict):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
return state_dict
|
StarcoderdataPython
|
74027
|
from setuptools import find_packages, setup
description = 'Create webhook services for Dialogflow using Python'
setup(
name='dialogflow-fulfillment',
version='0.4.4',
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages('src'),
package_dir={'': 'src'},
url='https://github.com/gcaccaos/dialogflow-fulfillment',
project_urls={
'Documentation': 'https://dialogflow-fulfillment.readthedocs.io',
},
license='Apache License 2.0',
description=description,
long_description_content_type='text/markdown',
long_description=open('README.md').read(),
include_package_data=True,
python_requires='>=3',
extras_require={
'dev': [
'tox==3.24.1',
'setuptools==57.4.0',
'wheel==0.36.2',
'twine==3.4.2',
],
'lint': [
'flake8==3.9.2',
'flake8-docstrings==1.6.0',
'flake8-isort==4.0.0',
'pre-commit==2.13.0',
],
'docs': [
'sphinx==4.1.2',
'sphinx-autobuild==2021.3.14',
'sphinx-rtd-theme==0.5.2',
'sphinxcontrib-mermaid==0.7.1'
],
'tests': [
'pytest==6.2.4',
'coverage==5.5',
]
},
keywords=[
'dialogflow',
'fulfillment',
'webhook',
'api',
'python',
],
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
StarcoderdataPython
|
127021
|
<reponame>ShujaKhalid/deep-rl
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_SUN_triangle_list'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_SUN_triangle_list',error_checker=_errors._error_checker)
GL_R1UI_C3F_V3F_SUN=_C('GL_R1UI_C3F_V3F_SUN',0x85C6)
GL_R1UI_C4F_N3F_V3F_SUN=_C('GL_R1UI_C4F_N3F_V3F_SUN',0x85C8)
GL_R1UI_C4UB_V3F_SUN=_C('GL_R1UI_C4UB_V3F_SUN',0x85C5)
GL_R1UI_N3F_V3F_SUN=_C('GL_R1UI_N3F_V3F_SUN',0x85C7)
GL_R1UI_T2F_C4F_N3F_V3F_SUN=_C('GL_R1UI_T2F_C4F_N3F_V3F_SUN',0x85CB)
GL_R1UI_T2F_N3F_V3F_SUN=_C('GL_R1UI_T2F_N3F_V3F_SUN',0x85CA)
GL_R1UI_T2F_V3F_SUN=_C('GL_R1UI_T2F_V3F_SUN',0x85C9)
GL_R1UI_V3F_SUN=_C('GL_R1UI_V3F_SUN',0x85C4)
GL_REPLACEMENT_CODE_ARRAY_POINTER_SUN=_C('GL_REPLACEMENT_CODE_ARRAY_POINTER_SUN',0x85C3)
GL_REPLACEMENT_CODE_ARRAY_STRIDE_SUN=_C('GL_REPLACEMENT_CODE_ARRAY_STRIDE_SUN',0x85C2)
GL_REPLACEMENT_CODE_ARRAY_SUN=_C('GL_REPLACEMENT_CODE_ARRAY_SUN',0x85C0)
GL_REPLACEMENT_CODE_ARRAY_TYPE_SUN=_C('GL_REPLACEMENT_CODE_ARRAY_TYPE_SUN',0x85C1)
GL_REPLACEMENT_CODE_SUN=_C('GL_REPLACEMENT_CODE_SUN',0x81D8)
GL_REPLACE_MIDDLE_SUN=_C('GL_REPLACE_MIDDLE_SUN',0x0002)
GL_REPLACE_OLDEST_SUN=_C('GL_REPLACE_OLDEST_SUN',0x0003)
GL_RESTART_SUN=_C('GL_RESTART_SUN',0x0001)
GL_TRIANGLE_LIST_SUN=_C('GL_TRIANGLE_LIST_SUN',0x81D7)
@_f
@_p.types(None,_cs.GLenum,_cs.GLsizei,arrays.GLvoidpArray)
def glReplacementCodePointerSUN(type,stride,pointer):pass
@_f
@_p.types(None,_cs.GLubyte)
def glReplacementCodeubSUN(code):pass
@_f
@_p.types(None,arrays.GLubyteArray)
def glReplacementCodeubvSUN(code):pass
@_f
@_p.types(None,_cs.GLuint)
def glReplacementCodeuiSUN(code):pass
@_f
@_p.types(None,arrays.GLuintArray)
def glReplacementCodeuivSUN(code):pass
@_f
@_p.types(None,_cs.GLushort)
def glReplacementCodeusSUN(code):pass
@_f
@_p.types(None,arrays.GLushortArray)
def glReplacementCodeusvSUN(code):pass
|
StarcoderdataPython
|
36897
|
import pandas as pd
from visions import visions_string, visions_datetime
from visions.core.model import TypeRelation
from visions.core.model.relations import InferenceRelation
from visions.utils.coercion import test_utils
def to_datetime_year_week(series):
"""Convert a series of the format YYYY/UU (year, week) to datetime.
A '0' is added as day dummy value, as pandas requires a day value to parse.
Args:
series: the Series to parse
Returns:
A datetime series
Examples:
>>> series = pd.Series(['2018/47', '2018/12', '2018/03'])
>>> parsed_series = to_datetime_year_week(series)
>>> print(parsed_series.dt.week)
0 47
1 12
2 3
dtype: int64
"""
return pd.to_datetime(series + "0", format="%Y/%U%w")
def to_datetime_year_month_day(series):
"""Convert a series of the format YYYYMMDD (year, month, day) to datetime.
Args:
series: the Series to parse
Returns:
A datetime series
Examples:
>>> series = pd.Series(['20181201', '20181202', '20181203'])
>>> parsed_series = to_datetime_year_week(series)
>>> print(parsed_series.dt.day)
0 1
1 2
2 3
dtype: int64
"""
return pd.to_datetime(series, format="%Y%m%d")
def get_string_datetime_type_relation(func):
return InferenceRelation(
relationship=test_utils.coercion_test(func),
transformer=func,
related_type=visions_string,
type=visions_datetime,
)
def string_to_datetime_year_week():
return get_string_datetime_type_relation(to_datetime_year_week)
def string_to_datetime_year_month_day():
return get_string_datetime_type_relation(to_datetime_year_month_day)
|
StarcoderdataPython
|
3388038
|
class GenericUriParser(UriParser):
"""
A customizable parser for a hierarchical URI.
GenericUriParser(options: GenericUriParserOptions)
"""
@staticmethod
def __new__(self,options):
""" __new__(cls: type,options: GenericUriParserOptions) """
pass
|
StarcoderdataPython
|
182208
|
<filename>ae5_tools/cli/commands/pod.py
import sys
import click
from ..login import cluster_call
from ..utils import add_param, ident_filter, global_options
@click.group(short_help='info, list',
epilog='Type "ae5 user <command> --help" for help on a specific command.')
@global_options
def pod():
'''Commands related to the AE5 pods (sessions, deployments, runs).
These commands require that either K8S deployment to be live on the platform,
or the --k8s-ssh-user option be supplied with a valid username.
'''
pass
@pod.command()
@ident_filter('pod')
@global_options
def list(**kwargs):
'''List all nodes.'''
cluster_call('pod_list', **kwargs)
@pod.command()
@ident_filter('pod', required=True)
@global_options
def info(**kwargs):
'''Get information about a specific pod.'''
cluster_call('pod_info', **kwargs)
|
StarcoderdataPython
|
1623308
|
<gh_stars>1-10
""" Module for definig an instance of a detected symptom """
class Symptom:
""" An object to represent an occurence of a specific symptom"""
def __init__(self, tag, action_msg, signal):
self.tag = tag
self.action_msg = action_msg
self.signal = signal
self.start_timestamp = signal.start_timestamp
self.duration = signal.duration
def convert_to_dict(self):
dict_form = self.__dict__
dict_form["start_timestamp"] = str(dict_form["start_timestamp"])
dict_form["duration"] = str(dict_form["duration"])
dict_form["signal"] = dict_form["signal"].convert_to_dict()
return dict_form
|
StarcoderdataPython
|
3244649
|
import discord
import discord.ext.commands as commands
import time as time_module
import sys
import pickle
import asyncio
sys.path.insert(0, "../")
import util
class Reminders(commands.Cog):
def __init__(self, bot, timeouts, generic_responses):
self.bot = bot
self.timeouts = timeouts
self.generic_responses = generic_responses
try:
self.reminders = pickle.load(open("user_data/reminders.pickle", "rb"))
except FileNotFoundError:
self.reminders = list()
self.reminders_changed = False
try:
self.reminders_blocklist = pickle.load(open("user_data/reminders_blocklist.pickle", "rb"))
except FileNotFoundError:
self.reminders_blocklist = dict()
self.reminders_blocklist_changed = False
bot.loop.create_task(self.reminders_autoflush())
bot.loop.create_task(self.reminders_remind())
async def reminders_autoflush(self):
while True:
await asyncio.sleep(20)
if self.reminders_changed:
pickle.dump(self.reminders, open("user_data/reminders.pickle", "wb"))
self.reminders_changed = False
if self.reminders_blocklist_changed:
pickle.dump(self.reminders_blocklist, open("user_data/reminders_blocklist.pickle", "wb"))
self.reminders_blocklist_changed = False
async def reminders_remind(self):
while True:
await asyncio.sleep(10)
current_time = time_module.time()
reminders = self.reminders.copy()
for reminder in reminders:
try:
if reminder[1] < current_time:
user = self.bot.get_user(reminder[0])
user_dm = await user.create_dm()
if len(reminder) == 3: # old reminders without a message link
await user_dm.send("привет. you wanted me to remind you of:\n```" + reminder[2] + "```(no message link available)")
else:
await user_dm.send("привет. you wanted me to remind you of:\n```" + reminder[2] + "```https://discordapp.com/channels/" + reminder[3])
self.reminders.remove(reminder)
self.reminders_changed = True
except:
pass
def add_reminder(self, user, timestamp, text, link):
self.reminders.append([user, timestamp, text, link])
self.reminders_changed = True
def get_reminders(self, user):
reminders_list = list()
for reminder in self.reminders:
if reminder[0] == user:
buffer = reminder.copy()
buffer = buffer[1:]
reminders_list.append(buffer)
reminders_list.sort()
return reminders_list
@commands.command()
async def remind(self, ctx, time, *message): # TODO: Rewrite this command as a command group, so it's less cluttered
"""The remind command can remind you or another user of something. Usage:
remind [time] [message] will set a reminder for yourself
remind [@user] [time] [message] will set a reminder for another user
remind list will send a message with a list of set reminders
remind list [@user] will send a message with a list of set reminders for the mentioned user
remind remove [id] will remove the specified reminder
remind blocklist will send a message with a list of blocked users (who can't set reminders for you)
remind blocklist add [@user] will add a user to your blocklist
remind blocklist remove [@user] will remove a user from your blocklist"""
message = " ".join(message)
if time == "list":
if len(ctx.message.mentions) > 0:
user = ctx.message.mentions[0]
else:
user = ctx.message.author
reminders_list = self.get_reminders(user.id)
if len(reminders_list) == 0:
await ctx.send("No reminders are set!")
return
current_time = int(time_module.time())
i = 0
message = "**Reminders for " + user.name + "**\n"
for reminder in reminders_list:
message += "**[" + str(i) + "]**" + " in " + util.time_calc.time_period_human_readable(reminder[0] - current_time) + " `" + reminder[1] + "`\n"
i += 1
await ctx.send(message[:-1])
elif time == "blocklist":
if len(ctx.message.mentions) > 0:
message = message.split(" ")
user = ctx.message.mentions[0]
if ctx.message.author.id not in self.reminders_blocklist:
self.reminders_blocklist[ctx.message.author.id] = list()
if message[0] == "add":
self.reminders_blocklist[ctx.message.author.id].append(user.id)
self.reminders_blocklist_changed = True
await ctx.send("Added **" + user.name + "** to your blocklist!")
elif message[0] == "remove":
try:
self.reminders_blocklist[ctx.message.author.id].remove(user.id)
self.reminders_blocklist_changed = True
await ctx.send("Removed **" + user.name + "** from your blocklist!")
except ValueError:
await ctx.send("**" + user.name + "** was not found in your blocklist!")
else:
if ctx.message.author.id not in self.reminders_blocklist or len(self.reminders_blocklist[ctx.message.author.id]) == 0:
await ctx.send("**You haven't blocked anyone!**")
return
message = "**Reminders blocklist:**\n"
for blocked in self.reminders_blocklist[ctx.message.author.id]:
user = self.bot.get_user(blocked)
if user == None:
username = "[Username not available]"
else:
username = user.name
message += "**" + username + "** (" + str(blocked) + ")\n"
await ctx.send(message)
elif time == "remove":
eyedee = int(message[0])
reminders = self.get_reminders(ctx.message.author.id)
reminder = reminders[eyedee]
self.reminders.remove([ctx.message.author.id, *reminder])
self.reminders_changed = True
await ctx.send("Removed reminder #" + str(eyedee))
else:
if len(ctx.message.mentions) > 0 and time.startswith("<@"):
user = ctx.message.mentions[0]
offset_args = True
else:
user = ctx.message.author
offset_args = False
if offset_args:
time = message.split(" ")[0]
message = " ".join(message.split(" ")[1:])
if user.id == self.bot.user.id:
await ctx.send("and i say no")
return
timestamp = int(util.time_calc.timestamp_in(time))
if user.id in self.reminders_blocklist and ctx.message.author.id in self.reminders_blocklist[user.id]:
await ctx.send("This user has blocked you from creating reminders for them!")
return
link = str(ctx.message.guild.id) + "/" + str(ctx.message.channel.id) + "/" + str(ctx.message.id)
self.add_reminder(user.id, timestamp, message, link)
if offset_args:
await ctx.send("я буду remind " + user.name)
else:
await ctx.send("я буду remind you")
|
StarcoderdataPython
|
1610091
|
<filename>patch_performance/__init__.py
from cb_performance import get_all_cb_sets_perf, group_poll_results, get_perf_totals
from cb_score import compute_overhead
from perf_constants import SIZE_PERF_NAME
from farnsworth.models import PatchScore, Round, PatchType
import logging
l = logging.getLogger("patch_performance.main")
def compute_patch_performance(target_cs):
"""
Compute patch performance for all patched binaries of given CS.
This will update DB with results.
:param target_cs: CS for which patch performance needs to be computed.
:return: None.
"""
l.info("Trying to compute patch performance for CS:" + str(target_cs.id))
patched_bins_perf = get_all_cb_sets_perf(target_cs)
l.info("Got Raw Perf Scores.")
l.info("Trying to Group Raw Scores.")
grouped_perf_results = group_poll_results(patched_bins_perf)
l.info("Grouped Raw Scores.")
original_cbs_perf = []
if 'original' in grouped_perf_results:
original_cbs_perf = grouped_perf_results['original']
del grouped_perf_results['original']
if len(original_cbs_perf) <= 0:
l.warning("No polls have been evaluated against original binary. "
"Ignoring this round of patch performance computation.")
return
if len(original_cbs_perf['fail']) > 0:
l.warning("Weired. There are some failed polls for original binary, ignoring failed polls.")
# consider only passed polls
original_cbs_perf = original_cbs_perf['pass']
for curr_patch_type in grouped_perf_results:
l.info("Computing Scores for Patch Type:" + str(curr_patch_type))
pass_perf_objects = grouped_perf_results[curr_patch_type]['pass']
patched_cbs_pass_poll_ids = []
if len(pass_perf_objects) > 0:
patched_cbs_pass_poll_ids = map(lambda perf_obj: perf_obj.poll.id, pass_perf_objects)
else:
l.warning("No passed polls found for Patch Type:" + str(curr_patch_type))
# skip to next patch type
continue
failed_perf_objects = grouped_perf_results[curr_patch_type]['fail']
has_fails = len(failed_perf_objects) > 0
failed_polls = []
if has_fails:
failed_polls = map(lambda perf_obj: perf_obj.poll.id, failed_perf_objects)
failed_polls_json = {'poll_ids': list(failed_polls)}
original_cbs_pass_poll_ids = map(lambda perf_obj: perf_obj.poll.id, original_cbs_perf)
common_pass_poll_ids = set(original_cbs_pass_poll_ids)
common_pass_poll_ids.intersection_update(patched_cbs_pass_poll_ids)
if not (len(common_pass_poll_ids) > 0):
l.warning("No polls have been common between original and patched cbs. Ignoring patch type:" +
str(curr_patch_type))
# skip to next patch type
continue
polls_included = {'poll_ids': list(common_pass_poll_ids)}
base_perf_objects = filter(lambda perf_obj: perf_obj.poll.id in common_pass_poll_ids, original_cbs_perf)
patched_perf_objects = filter(lambda perf_obj: perf_obj.poll.id in common_pass_poll_ids, pass_perf_objects)
base_perf_jsons = map(lambda perf_obj: perf_obj.performances['perf']['median'], base_perf_objects)
patched_perf_jsons = map(lambda perf_obj: perf_obj.performances['perf']['median'], patched_perf_objects)
base_perf_total = get_perf_totals(base_perf_jsons)
# get the size of binaries, size of the binaries will be same on all runs
base_perf_total[SIZE_PERF_NAME] = base_perf_jsons[0][SIZE_PERF_NAME]
patched_perf_total = get_perf_totals(patched_perf_jsons)
# again size of binaries will be same across all tests.
patched_perf_total[SIZE_PERF_NAME] = patched_perf_jsons[0][SIZE_PERF_NAME]
target_score = compute_overhead(base_perf_total, patched_perf_total)
l.info("Trying to create PatchScore into DB for patch type:" + str(curr_patch_type) + " for cs:" +
str(target_cs.id))
# convert patch type name to PatchType
curr_patch_type = PatchType.get(PatchType.name == curr_patch_type)
# create patch score
PatchScore.create(cs=target_cs, patch_type=curr_patch_type, num_polls=len(common_pass_poll_ids),
polls_included=polls_included, has_failed_polls=has_fails, failed_polls=failed_polls_json,
round=Round.current_round(), perf_score=target_score)
|
StarcoderdataPython
|
1722918
|
# Reads data from QuickBooks Excel output, and combines two detail profit and loss files (from two time periods) into one
# file which keeps the detail from each time period but also shows the comparision between the two periods
import numpy as np
import pandas as pd
def read_db_from_file(filename):
# Read in the data from Excel and drop unusued columns
df = pd.read_excel(filename)
del df['Unnamed: 6']
df = df.drop([0,1,2,3,4])
return df
def parse_QB_data(filename):
# df1 will be our output dataframe
df1 = pd.DataFrame(columns=['Category','Date','Memo', 'Amount'])
lastdescr = 'start'
newheading = False
descr = ""
for index, row in df.iterrows():
descr = row["Business name"]
date1 = row['Unnamed: 1']
memo1 = row['Unnamed: 3']
memo2 = row['Unnamed: 4']
amount = row['Unnamed: 5']
if np.isnan(amount):
lastdescr = descr
if isinstance(descr, float):
if np.isnan(descr):
# This code is to combine the memo1 and memo2, removing NaNs (equivalent to blank cells in the original files):
concat_memo = True
if isinstance(memo1, float):
if np.isnan(memo1):
memo = str(memo2)
concat_memo = False
if isinstance(memo2, float):
if np.isnan(memo2):
memo = str(memo1)
concat_memo = False
if isinstance(memo1, float) and isinstance(memo2, float):
if np.isnan(memo1) and np.isnan(memo2):
memo = ""
concat_memo = False
if concat_memo:
memo = str(memo1) + ", " + str(memo2)
df1 = df1.append({"Category": lastdescr, "Date": date1, "Memo": memo, "Amount": amount}, ignore_index=True)
df1['Date'] = pd.to_datetime(df1['Date'])
return df1
def combine_year_data(df2019, df2020):
dfout = pd.DataFrame(columns=['Category', 'Date', 'Memo', 'Amount', 'Total','Difference','Difference_Percent'])
lastcategory = "start"
total = 0
for index, row in df2019.iterrows():
category = row["Category"]
date = row['Date']
memo = row['Memo']
amount = row['Amount']
if category != lastcategory: # We found a new category
# Write out a subtotal
dfout = dfout.append({"Memo": "A:", "Total": total}, ignore_index=True)
# Find the next year's entries for that category
total2 = 0
for index2, row2 in df2020.iterrows():
# This simplistic approach is really slow (? O(n^2)) but works fine for this use case
# which is just a small number of transactions
category2 = row2["Category"]
date2 = row2['Date']
memo2 = row2['Memo']
amount2 = row2['Amount']
if category2 == lastcategory:
dfout = dfout.append(row2)
total2 += amount2
# Write out a subtotal
dfout = dfout.append({"Memo": "B:", "Total": total2}, ignore_index=True)
if total == 0: # Don't calculate percentage if total = 0 as that will throw an error
dfout = dfout.append({"Difference": total2-total}, ignore_index=True)
else:
dfout = dfout.append({"Difference": total2-total, "Difference_Percent": (total2-total)/total}, ignore_index=True)
dfout = dfout.append({"Category": ""}, ignore_index=True)
total = 0
dfout = dfout.append({"Category": category, "Date": date, "Memo": memo, "Amount": amount}, ignore_index=True)
lastcategory = category
total += amount
return dfout
df = read_db_from_file(r'2019.xlsx')
df2019 = parse_QB_data(df)
df = read_db_from_file(r'2020.xlsx')
df2020 = parse_QB_data(df)
dfout = combine_year_data(df2019, df2020)
dfout.to_excel("out.xlsx", index=False)
|
StarcoderdataPython
|
3379425
|
<gh_stars>1-10
# Simple Class with class variable
# Class with init
# Class with StaticMethod
# Class with print method
# Advance class with inheritance
class Person:
Answer = 42 # Class Variable, Shared by all instance
def __init__(self, name):
self.name = name # Instance Variable
@staticmethod
def something():
return Person.Answer
abc = Person("ABC")
pqr = Person("pqr")
print(abc.name)
print(abc.Answer)
print(pqr.name)
print(pqr.Answer)
abc.Answer = 0
print(abc.Answer)
print(pqr.Answer)
Person.Answer = "How about now?"
print(pqr.Answer)
print(abc.Answer)
del abc.Answer
print(abc.Answer)
print(Person.something())
print("\n\n")
#### Advance Class
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def __repr__(self):
return repr((self.name,self.age))
def __eq__(self, other):
if isinstance(other, Person):
return self.name == other.name
def print_age_class(self):
print("Child" if self.age < 13 else "Something-else")
@classmethod
def class_method(cls):
def new_method(self):
print(f"New Method {self.name}")
setattr(cls, 'new_method', new_method)
x = Person("ABC",50)
print(x)
x.print_age_class()
# x.new_method() # This will not work.
x.class_method()
x.new_method() # OR Person.new_method()
###### Inheritance
class Child(Person):
def __init__(self, name, age):
super().__init__(name, age)
self.age_class = "Child"
def print_category(self):
print(self.name,self.age,self.age_class)
def new_method(self):
print("This will be overridden")
c = Child("Child",2)
c.print_category()
c.new_method()
c.class_method()
c.new_method()
#### Decorator
def uppercase_decorator(function):
def wrapper():
result_of_function_execution = function()
uppercased = result_of_function_execution.upper()
return uppercased
return wrapper
def some_string():
return "Hello World!"
print(some_string())
decorate = uppercase_decorator(some_string)
print(decorate())
@uppercase_decorator
def some_string_annotation():
return "Uppercase String!"
print(some_string_annotation())
# Decorator use case:
import requests
@uppercase_decorator
def get_results():
return requests.get("http://www.mocky.io/v2/5dadc1602d00008040e4bcb3").text
print(get_results())
#### Generator
def number_generator(stop,multiply):
num = 0
while(num < stop):
num += 1
yield num*multiply
print(type(number_generator(5,10)))
print(list(number_generator(5,10)))
print([f"${i}" for i in number_generator(5,10)])
#### Write decorate which converts number list to string with currency symbol
# List: [1, 2, 3, 4]
# Outcome: ['$0', '$1', '$2', '$3', '$4']
|
StarcoderdataPython
|
1607294
|
<filename>studentdb/api/urls.py
from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from api import views
urlpatterns = [
path('add', views.AddAPIView.as_view()),
path('students', views.StudentListAPIView.as_view()),
path('student/<id>', views.StudentDetailAPIView.as_view()),
path('students/<id>', views.StudentUpdateAPIView.as_view()),
path('clear', views.ClearStudentRecordsAPIView.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
StarcoderdataPython
|
1692788
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
MAST Portal
===========
Module to query the <NAME> Archive for Space Telescopes (MAST).
"""
from __future__ import print_function, division
import warnings
import json
import time
import os
import re
#import keyring
import io
import numpy as np
from requests import HTTPError
from getpass import getpass
from base64 import b64encode
import astropy.units as u
import astropy.coordinates as coord
from astropy.table import Table, Row, vstack, MaskedColumn
from astropy.extern.six.moves.urllib.parse import quote as urlencode
from astropy.extern.six.moves.http_cookiejar import Cookie
from astropy.utils.exceptions import AstropyWarning
from astropy.logger import log
from ..query import BaseQuery
from ..utils import commons, async_to_sync
from ..utils.class_or_instance import class_or_instance
from ..exceptions import (TimeoutError, InvalidQueryError, RemoteServiceError,
LoginError, ResolverError, MaxResultsWarning,
NoResultsWarning, InputWarning, AuthenticationWarning)
from . import conf
__all__ = ['Registry', 'RegistryClass']
#
# Functions to help replace bytes with strings in astropy tables that came from VOTABLEs
#
def sval(val):
"""
Returns a string value for the given object. When the object is an instanceof bytes,
utf-8 decoding is used.
Parameters
----------
val : object
The object to convert
Returns
-------
string
The input value converted (if needed) to a string
"""
if (isinstance(val, bytes)):
return str(val, 'utf-8')
else:
return str(val)
# Create a version of sval() that operates on a whole column.
svalv = np.vectorize(sval)
def sval_whole_column(single_column):
"""
Returns a new column whose values are the string versions of the values
in the input column. The new column also keeps the metadata from the input column.
Parameters
----------
single_column : astropy.table.Column
The input column to stringify
Returns
-------
astropy.table.Column
Stringified version of input column
"""
new_col = svalv(single_column)
new_col.meta = single_column.meta
return new_col
def stringify_table(t):
"""
Substitutes strings for bytes values in the given table.
Parameters
----------
t : astropy.table.Table
An astropy table assumed to have been created from a VOTABLE.
Returns
-------
astropy.table.Table
The same table as input, but with bytes-valued cells replaced by strings.
"""
# This mess will look for columns that should be strings and convert them.
if (len(t) is 0):
return # Nothing to convert
scols = []
for col in t.columns:
colobj = t.columns[col]
if (colobj.dtype == 'object' and isinstance(t[colobj.name][0], bytes)):
scols.append(colobj.name)
for colname in scols:
t[colname] = sval_whole_column(t[colname])
class RegistryClass(BaseQuery):
"""
Registry query class.
"""
def __init__(self):
super(RegistryClass, self).__init__()
self._REGISTRY_TAP_SYNC_URL = conf.registry_tap_url + "/sync"
def query(self, **kwargs):
adql = self._build_adql(**kwargs)
x = """
select b.waveband,b.short_name,a.ivoid,b.res_description,c.access_url,b.reference_url from rr.capability a
natural join rr.resource b
natural join rr.interface c
where a.cap_type='SimpleImageAccess' and a.ivoid like 'ivo://%stsci%'
order by short_name
"""
if 'debug' in kwargs and kwargs['debug']==True: print ('Registry: sending query ADQL = {}\n'.format(adql))
if 'method' in kwargs:
method = kewargs['method']
else:
method = 'POST'
url = self._REGISTRY_TAP_SYNC_URL
tap_params = {
"request": "doQuery",
"lang": "ADQL",
"query": adql
}
response = self._request(method, url, data=tap_params)
if 'debug' in kwargs and kwargs['debug']==True: print('Queried: {}\n'.format(response.url))
aptable = self._astropy_table_from_votable_response(response)
return aptable
def _build_adql(self, **kwargs):
# Default values
service_type=""
keyword=""
waveband=""
source=""
order_by=""
logic_string=" and "
# Find the keywords we recognize
for key,val in kwargs.items():
if (key == 'service_type'):
service_type = val
elif (key == 'keyword'):
keyword = val
elif (key == 'waveband'):
waveband = val
elif (key == 'source'):
source = val
elif (key == 'order_by'):
order_by = val
elif (key == 'logic_string'):
logic_string = val
##
if "image" in service_type.lower():
service_type="simpleimageaccess"
elif "spectr" in service_type.lower():
service_type="simplespectralaccess"
elif "cone" in service_type.lower():
service_type="conesearch"
else:
service_type="tableaccess"
query_retcols="""
select res.waveband,res.short_name,cap.ivoid,res.res_description,
int.access_url, res.reference_url
from rr.capability cap
natural join rr.resource res
natural join rr.interface int
"""
x = """
select b.waveband,b.short_name,a.ivoid,b.res_description,c.access_url,b.reference_url from rr.capability a
natural join rr.resource b
natural join rr.interface c
"""
query_where="where "
wheres=[]
if service_type is not "":
wheres.append("cap.cap_type='{}'".format(service_type))
if source is not "":
wheres.append("cap.ivoid like '%{}%'".format(source))
if waveband is not "":
wheres.append("res.waveband like '%{}%'".format(waveband))
if (keyword is not ""):
keyword_where = """
(res.res_description like '%{}%' or
res.res_title like '%{}%' or
cap.ivoid like '%{}%')
""".format(keyword, keyword, keyword)
wheres.append(keyword_where)
query_where=query_where+logic_string.join(wheres)
if order_by is not "":
query_order="order by {}".format(order_by)
else: query_order=""
query=query_retcols+query_where+query_order
return query
def _astropy_table_from_votable_response(self, response):
"""
Takes a VOTABLE response from a web service and returns an astropy table.
Parameters
----------
response : requests.Response
Response whose contents are assumed to be a VOTABLE.
Returns
-------
astropy.table.Table
Astropy Table containing the data from the first TABLE in the VOTABLE.
"""
# The astropy table reader would like a file-like object, so convert
# the response content a byte stream. This assumes Python 3.x.
#
# (The reader also accepts just a string, but that seems to have two
# problems: It looks for newlines to see if the string is itself a table,
# and we need to support unicode content.)
file_like_content = io.BytesIO(response.content)
# The astropy table reader will auto-detect that the content is a VOTABLE
# and parse it appropriately.
aptable = Table.read(file_like_content)
# String values in the VOTABLE are stored in the astropy Table as bytes instead
# of strings. To makes accessing them more convenient, we will convert all those
# bytes values to strings.
stringify_table(aptable)
return aptable
Registry = RegistryClass()
|
StarcoderdataPython
|
1690092
|
import pytest
from bids.layout import BIDSLayout
from os.path import join, abspath, sep
from bids.tests import get_test_data_path
@pytest.fixture(scope='module')
def layout():
data_dir = join(get_test_data_path(), '7t_trt')
return BIDSLayout(data_dir)
def test_bold_construction(layout):
ents = dict(subject='01', run=1, task='rest', suffix='bold')
assert layout.build_path(ents) == "sub-01/func/sub-01_task-rest_run-1_bold.nii.gz"
ents['acquisition'] = 'random'
assert layout.build_path(ents) == "sub-01/func/sub-01_task-rest_acq-random_run-1_bold.nii.gz"
def test_invalid_file_construction(layout):
# no hyphens allowed!
ents = dict(subject='01', run=1, task='resting-state', suffix='bold')
with pytest.raises(ValueError):
layout.build_path(ents)
target = "sub-01/func/sub-01_task-resting-state_run-1_bold.nii.gz"
assert layout.build_path(ents, validate=False) == target
def test_failed_file_construction(layout):
ents = dict(subject='01', fakekey='foobar')
with pytest.raises(ValueError):
layout.build_path(ents, strict=True)
@pytest.mark.parametrize("strict", [True, False])
@pytest.mark.parametrize("validate", [True, False])
def test_insufficient_entities(layout, strict, validate):
"""Check https://github.com/bids-standard/pybids/pull/574#discussion_r366447600."""
with pytest.raises(ValueError):
layout.build_path({'subject': '01'}, strict=strict, validate=validate)
|
StarcoderdataPython
|
97935
|
"""empty message
Revision ID: <PASSWORD>
Revises: <PASSWORD>
Create Date: 2018-07-18 12:08:54.854137
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
from alembic.ddl import postgresql
revision = '<PASSWORD>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('resource', 'approved')
op.add_column('resource', sa.Column('approved', sa.String(), nullable=False, default="unapproved"))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('resource', 'approved')
op.add_column('resource', sa.Column('approved', sa.Boolean(), nullable=False, default=False))
# ### end Alembic commands ###
|
StarcoderdataPython
|
3282513
|
<reponame>asl-usgs/earthquake-heliplot<filename>lib/parallelplotVelocity.py<gh_stars>1-10
#!/usr/bin/env python
# --------------------------------------------------------------
# Filename: parallelplotVelocity.py
# --------------------------------------------------------------
# Purpose: Plots velocity data (filtered/magnified stream)
# ---------------------------------------------------------------
# Methods:
# launchWorkers() - multiprocessing pool for plotting
# plotVelocity() - plots filtered/magnified streams
# ---------------------------------------------------------------
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt # will use title, figure, savefig methods
from obspy.core.utcdatetime import UTCDateTime
from datetime import datetime, timedelta
import matplotlib.image as img
import multiprocessing
from multiprocessing import Manager, Value
import os, sys, string, subprocess
import time, signal, glob, re
from kill import Kill
from interrupt import KeyboardInterruptError, TimeoutExpiredError
# Unpack self from parallel method args and call method plotVelocity()
def unwrap_self_plotVelocity(args, **kwargs):
return ParallelPlotVelocity.plotVelocity(*args, **kwargs)
class ParallelPlotVelocity(object):
def __init__(self):
# Initializes kill object for pool
self.killproc = Kill()
def plotVelocity(self, stream, stationName, filters):
# --------------------------------
# Plots filtered/magnified streams
# --------------------------------
try:
streamID = stream[0].get_id()
magnification = self.magnification[streamID] # magnification for station[i]
trspacing = self.vertrange/magnification * 1000.0 # trace spacing
# Get filter coefficients for every station
if streamID in filters['streamID']:
filtertype = filters['filtertype']
freqX = filters['freqX']
freqY = filters['freqY']
# set bounds x-label
if filtertype == "highpass":
bounds = str(freqX)
elif filtertype == "bandpass":
bounds = str(freqX) + "-" + str(freqY)
elif filtertype == "bandstop":
bounds = str(freqX) + "-" + str(freqY)
elif filtertype == "lowpass":
bounds = str(freqX)
# pass explicit figure instance to set correct title and attributes
# pix, resx, and resy are from station.cfg
# - pix is dpi (arbitrary, but using 80 is easiest)
# - resx and resy are in pixels - should be 800 x 600 to match expectations
# of earthquake.usgs.gov Monitoring web pages
dpl = plt.figure(dpi=self.pix, figsize=(self.resx/self.pix, self.resy/self.pix))
titlestartTime = self.datetimePlotstart.strftime("%Y/%m/%d %H:%M")
titlestartTime = titlestartTime + " UTC"
plotstart = self.datetimePlotstart
plotend = self.datetimePlotend
plotday = plotstart.day
plothour = plotstart.hour
# Need to check for streams that have a start time greater
# than the query time, then trim based on the nearest hour
streamstart = stream[0].stats.starttime.datetime
streamstart = streamstart.strftime("%Y%m%d_%H:%M:00")
streamstart = UTCDateTime(streamstart)
if (streamstart.datetime <= plotstart.datetime):
#print streamID + ": " + str(streamstart.datetime) + " < " + str(plotstart.datetime) + "\n"
# Trim stream to starttime of plot
# Round up to the nearest sample, this will take care
# of sample drift for non-Q330 signals
stream.trim(starttime=plotstart, endtime=plotend, nearest_sample=True) # selects sample nearest trim time
# Check trimmed hour and round if != plotstart hour
trimmedhour = stream[0].stats.starttime.hour
if (trimmedhour != plothour):
stream[0].stats.starttime.day = plotday
stream[0].stats.starttime.hour = plothour
stream[0].stats.starttime.minute = 0
stream[0].stats.starttime.second = 0
stream[0].stats.starttime.microsecond = 0
elif (streamstart.datetime > plotstart.datetime):
#print streamID + ": " + str(streamstart.datetime) + " > " + str(plotstart.datetime) + "\n"
# Trim stream to nearest hour when
# the plot start time is less than
# the stream start time
year = streamstart.year # stream stats (date/time)
month = streamstart.month
day = streamstart.day
hour = streamstart.hour
minute = 0 # 00 to account for shift
second = 0
currtime = datetime(year, month, day, hour, minute, second, 0)
if int(hour) != 23:
# increase trim time to next hour if h != 23
trimtime = currtime + timedelta(hours=1)
else:
# trim to next day if h = 23
hour = 0 # set time to 00:00:00
minute = 0
second = 0
trimtime = datetime(year, month, day, hour, minute, second, 0) + timedelta(days=1)
trimtime = UTCDateTime(trimtime)
startday = trimtime.day
starthour = trimtime.hour
stream.trim(starttime=trimtime, endtime=plotend, nearest_sample=True) # selects sample nearest trim time
# Check trimmed hour and round if != trimhour
trimmedhour = stream[0].stats.starttime.hour
if (trimmedhour != starthour):
stream[0].stats.starttime.day = startday
stream[0].stats.starttime.hour = starthour
stream[0].stats.starttime.minute = 0
stream[0].stats.starttime.second = 0
stream[0].stats.starttime.microsecond = 0
print("\nPlotting: " + str(stream))
stream.plot(startime=plotstart,
endtime=plotend,
type='dayplot', interval=60,
vertical_scaling_range=self.vertrange,
right_vertical_labels=False, number_of_ticks=7,
one_tick_per_line=True, color=['k'], fig=dpl,
show_y_UTC_label=True, title_size=-1)
# set title, x/y labels and tick marks
plt.title(streamID.replace('.',' ') + " " + "Starts: " +
str(titlestartTime), fontsize=12)
plt.xlabel('Time [m]\n(%s: %sHz Trace Spacing: %.2e mm/s)' %
(str(filtertype), str(bounds), trspacing), fontsize=10)
plt.ylabel('Time [h]', fontsize=10)
locs, labels = plt.yticks() # pull current locs/labels
hours = [0 for i in range(24)] # 24 hours
# Create list of hours (if missing data, fill in beginning hours)
if len(labels) < len(hours):
tmptime = re.split(':', labels[0].get_text())
starthour = int(tmptime[0])
hour = 0 # fill in hour
lastindex = len(hours) - len(labels)
i = lastindex
# Stream start hour can be < or > than the plot
# start hour (if > then subtract, else start from
# plot hour and add)
# **NOTE: This fixes negative indexing
if (plothour < starthour):
while (i > 0): # fill beginning hours
hour = starthour - i
hours[lastindex-i] = str(hour)+":00"
i = i - 1
i = 0
for i in range(len(labels)): # fill remaining hours
tmptime = re.split(':', labels[i].get_text())
hour = int(tmptime[0])
hours[i+lastindex] = str(hour)+":00"
else: # plothour > starthour
while (i > 0):
if (i > starthour):
hour = plothour + (lastindex-i)
hours[lastindex-i] = str(hour) +":00"
elif (i <= starthour): # start at 0
hour = starthour - i
hours[lastindex-i] = str(hour)+":00"
i = i - 1
i = 0
for i in range(len(labels)): # fill remaining hours
tmptime = re.split(':', labels[i].get_text())
hour = int(tmptime[0])
hours[i+lastindex] = str(hour)+":00"
elif len(labels) == len(hours):
for i in range(len(labels)): # extract hours from labels
tmptime = re.split(':', labels[i].get_text())
hour = int(tmptime[0])
hours[i] = str(hour)+":00"
# Create tick position list
position = [i+0.5 for i in range(24)]
position = position[::-1] # reverse list
plt.yticks(position, hours, fontsize=9) # times in position
#dpi=self.pix, size=(self.resx,self.resy))
# port to 3.6 cut off bottom legend, this fixes the problem
plt.gcf().subplots_adjust(bottom=0.15)
# GHSC version - use station as plot name
name_components = stationName.split(".")
station = name_components[1]
plt.savefig(station + "." + self.imgformat)
plt.close(dpl)
except KeyboardInterrupt:
print("KeyboardInterrupt plotVelocity(): terminate workers...")
raise KeyboardInterruptError()
return # return to plotVelocity() pool
except Exception as e:
print("UnknownException plotVelocity(): " + str(e))
return
def launchWorkers(self, streams, plotspath, stationName,
magnification, vertrange, datetimePlotstart,
datetimePlotend, resx, resy, pix, imgformat,
filters):
# ------------------------
# Pool of plotting workers
# ------------------------
print("------plotVelocity() Pool------\n")
self.magnification = magnification
self.vertrange = vertrange
self.datetimePlotstart = datetimePlotstart
self.datetimePlotend = datetimePlotend
self.resx = resx
self.resy = resy
self.pix = pix
self.imgformat = imgformat
streamlen = len(streams)
# clear output plots dir
os.chdir(plotspath)
imgfiles = glob.glob(plotspath+"*")
for f in imgfiles:
os.remove(f) # remove tmp png files from OutputPlots dir
# Initialize multiprocessing pools for plotting
PROCESSES = multiprocessing.cpu_count()
print("PROCESSES: " + str(PROCESSES))
print("streamlen: " + str(streamlen) + "\n")
pool = multiprocessing.Pool(PROCESSES)
try:
self.poolpid = os.getpid()
self.poolname = "plotVelocity()"
#print "pool PID: " + str(self.poolpid) + "\n"
pool.map(unwrap_self_plotVelocity, list(zip([self]*streamlen,
streams, stationName, filters))) # thread plots
pool.close()
pool.join()
print("\n------plotVelocity() Pool Complete------\n\n")
except KeyboardInterrupt:
print("KeyboardInterrupt parallelplotVelocity(): terminating pool...")
# find/kill all child processes
killargs = {'pid': self.poolpid, 'name': self.poolname}
self.killproc.killPool(**killargs)
except Exception as e:
print("Exception parallelplotVelocity(): terminating pool: " + str(e))
killargs = {'pid': self.poolpid, 'name': self.poolname}
self.killproc.killPool(**killargs)
else:
# cleanup (close pool of workers)
pool.close()
pool.join()
|
StarcoderdataPython
|
3293599
|
<filename>src/mygrad/math/misc/funcs.py
from mygrad.tensor_base import Tensor
from .ops import Abs, Cbrt, Maximum, Minimum, Sqrt
__all__ = ["abs", "absolute", "cbrt", "clip", "sqrt", "maximum", "minimum"]
def abs(a, constant=False):
""" ``f(a) -> abs(a)``
Parameters
----------
a : array_like
constant : bool, optional(default=False)
If ``True``, the returned tensor is a constant (it
does not back-propagate a gradient)
Returns
-------
mygrad.Tensor
Notes
-----
The derivative at a == 0 returns nan"""
return Tensor._op(Abs, a, constant=constant)
absolute = abs
def sqrt(a, constant=False):
""" ``f(a) -> sqrt(a)``
Parameters
----------
a : array_like
constant : bool, optional(default=False)
If ``True``, the returned tensor is a constant (it
does not backpropagate a gradient)
Returns
-------
mygrad.Tensor"""
return Tensor._op(Sqrt, a, constant=constant)
def cbrt(a, constant=False):
""" ``f(a) -> cbrt(a)``
Parameters
----------
a : array_like
constant : bool, optional(default=False)
If ``True``, the returned tensor is a constant (it
does not backpropagate a gradient)
Returns
-------
mygrad.Tensor"""
return Tensor._op(Cbrt, a, constant=constant)
def maximum(a, b, constant=False):
""" Element-wise maximum of array elements.
Parameters
----------
a : array_like
b : array_like
constant : bool, optional(default=False)
If ``True``, the returned tensor is a constant (it
does not backpropagate a gradient)
Returns
-------
mygrad.Tensor
Notes
-----
The gradient does not exist where a == b; we use a
value of 0 here."""
return Tensor._op(Maximum, a, b, constant=constant)
def minimum(a, b, constant=False):
""" Element-wise minimum of array elements.
Parameters
----------
a : array_like
b : array_like
constant : bool, optional(default=False)
If ``True``, the returned tensor is a constant (it
does not backpropagate a gradient)
Returns
-------
mygrad.Tensor
Notes
-----
The gradient does not exist where a == b; we use a
value of 0 here."""
return Tensor._op(Minimum, a, b, constant=constant)
def clip(a, a_min, a_max, constant=False):
""" Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Equivalent to `mg.minimum(a_max, mg.maximum(a, a_min))``.
No check is performed to ensure ``a_min < a_max``.
This docstring was adapted from that of `numpy.clip`
Parameters
----------
a : array_like
Array containing elements to clip.
a_min : Optional[float, array_like]
Minimum value. If `None`, clipping is not performed on lower
interval edge. Not more than one of `a_min` and `a_max` may be
`None`.
a_max : Optional[float, array_like]
Maximum value. If `None`, clipping is not performed on upper
interval edge. Not more than one of `a_min` and `a_max` may be
`None`. If `a_min` or `a_max` are array_like, then the three
arrays will be broadcasted to match their shapes.
constant : bool, optional(default=False)
If ``True``, the returned tensor is a constant (it
does not backpropagate a gradient)
Returns
-------
Tensor
A tensor with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
Examples
--------
>>> import mygrad as mg
>>> a = mg.arange(10)
>>> mg.clip(a, 1, 8)
Tensor([1, 1, 2, 3, 4, 5, 6, 7, 8, 8])
>>> a
Tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> mg.clip(a, [3, 4, 1, 1, 1, 4, 4, 4, 4, 4], 8)
Tensor([3, 4, 2, 3, 4, 5, 6, 7, 8, 8])"""
if a_min is None and a_max is None:
raise ValueError("`a_min` and `a_max` cannot both be set to `None`")
if a_min is not None:
a = maximum(a_min, a, constant=constant)
if a_max is not None:
a = minimum(a_max, a, constant=constant)
return a
|
StarcoderdataPython
|
1623304
|
<reponame>ottomattas/INFOMAIGT-AGENTS
#! /usr/bin/env -S python -u
from game import Game
from random_agent import RandomAgent
from bandit_agent import BanditAgent
from neural_network_agent import NNAgent
import argparse, time, cProfile
import numpy as np
import multiprocessing as mp
from collections import Counter
from itertools import starmap
import tensorflow as tf
def main(args):
if args.input:
# Load dataset
data = read_games(args.input)
#print("Game 1 array is \n", data[0])
# Count the board states
board_state_count = 0
# For each _ element, and game element
for _, game in data:
# For each _ element as only the number of elements is relevant
for _, _, _ in game:
board_state_count += 1
#print("Board state count is ",board_state_count)
# Create array for the input layer
# (Columns: each possible move, represented in one-hot encoding
# Rows: each possible board state)
x_train = np.zeros((board_state_count,75),dtype=int)
#print("X train is \n",x_train)
# Create array for the output layer with the proper shape
# (For each board state, save the winner)
y_train = np.zeros(board_state_count,dtype=int)
#y_train = tf.keras.utils.to_categorical(np.zeros(board_state_count,dtype=int),3)
#print("Y train is \n",y_train)
# Create indexes for game and board
game_index = 0
board_index = 0
# Loop over all games and boards
for winner, game in data:
game_index += 1
for player, move, board in game:
#print("Player is ", player)
#print("Move is ", move)
#print("Board is\n", board)
#print("Winner is ", winner)
##########################
# Create the input layer #
##########################
# For each player, we want to look it from their perspective.
# Set each player's move as 0 0 1 in x_train.
# Define a list for appending the one-hot encoded players
lst = []
# If player 1 move
if player == 1:
for x in range(5):
for y in range(5):
# When position value is 1 (player 1 move)
if board[x, y] == 1:
# Append 0 0 1
lst.append(0)
lst.append(0)
lst.append(1)
# When position value is 2 (player 2 move)
elif board[x, y] == 2:
# Append 0 1 0
lst.append(0)
lst.append(1)
lst.append(0)
# When position value is 0 (no player move yet)
else:
# Append 1 0 0
lst.append(1)
lst.append(0)
lst.append(0)
# Save the one-hot encoded list in the x_train array
# at position board_index
x_train[board_index] = np.array(lst)
#print("After player 1 move, encoded board is now \n", x_train[board_index])
#print("After player 1 move, x_train is now \n", x_train)
# If player 2 move
else:
for x in range(5):
for y in range(5):
# When position value is 2 (player 2 move)
if board[x, y] == 2:
# Append 0 0 1
lst.append(0)
lst.append(0)
lst.append(1)
# When position value is 1 (player 1 move)
elif board[x, y] == 1:
# Append 0 1 0
lst.append(0)
lst.append(1)
lst.append(0)
# When position value is 0 (no player move yet)
else:
# Append 1 0 0
lst.append(1)
lst.append(0)
lst.append(0)
# Save the one-hot encoded list in the x_train array
# at position board_index
x_train[board_index] = np.array(lst)
#print("After player 2 move, encoded board is now \n", x_train[board_index])
#print("After player 2 move, x_train is now \n", x_train)
###########################
# Create the output layer #
###########################
# If draw
if winner == 0:
y_train[board_index] = 0
# If player 1 is winner
elif winner == player:
y_train[board_index] = 1
# If player 2 is winner
else:
y_train[board_index] = 2
#print("y_train is", y_train)
board_index += 1
#print("This is game nr: ", game_index)
#print("This is board nr: ", board_index)
############
# Training #
############
# Create the tf.keras.Sequential model by stacking layers.
# Choose an optimizer and loss function for training.
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=(75)), # array with 75 objects
tf.keras.layers.Dense(75, activation='relu'),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(50),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(25),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(3, activation='softmax') # win/loss/draw, so 3
])
# # # Compile the model
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# # # Adjust the model parameters to minimize the loss
model.fit(x_train, y_train, batch_size=200, epochs=5)
# Checks the models performance
#model.evaluate(x_test, y_test, verbose=2)
# Save the model
model.save("nn2_model", overwrite=False)
work = []
for i in range(args.games):
# swap order every game
if i % 2 == 0:
players = [BanditAgent(args.time,1), NNAgent(2)]
else:
players = [NNAgent(2), BanditAgent(args.time,1)]
work.append((args.size,
read_objectives(args.objectives),
players,
args.output,
args.print_board))
start = time.perf_counter()
# the tests can be run in parallel, or sequentially
# it is recommended to only use the parallel version for large-scale testing
# of your agent, as it is harder to debug your program when enabled
if args.parallel == None or args.output != None:
results = starmap(play_game, work)
else:
# you probably shouldn't set args.parallel to a value larger than the
# number of cores on your CPU, as otherwise agents running in parallel
# may compete for the time available during their turn
with mp.Pool(args.parallel) as pool:
results = pool.starmap(play_game, work)
stats = Counter(results)
end = time.perf_counter()
print(f'Total score {stats[1]}/{stats[2]}/{stats[0]}.')
print(f'Total time {end - start} seconds.')
def play_game(boardsize, objectives, players, output, print_board = None):
game = Game.new(boardsize, objectives, players, print_board == 'all')
if output:
with open(output, 'a') as outfile:
print(boardsize, file = outfile)
winner = game.play(outfile)
print(f'winner={winner.id if winner else 0}', file = outfile)
else:
winner = game.play()
if print_board == 'final':
game.print_result(winner)
return 0 if winner == None else winner.id
def read_objectives(filename):
with open(filename) as file:
lines = [line.strip() for line in file]
i = 0
shapes = []
while i < len(lines):
shape = []
# shapes are separated by blank lines
while i < len(lines) and lines[i].strip() != '':
shape_line = []
for char in lines[i].strip():
shape_line.append(char == 'x')
shape.append(shape_line)
i += 1
shapes.append(np.transpose(np.array(shape)))
i += 1
return shapes
def read_games(filename):
with open(filename) as file:
lines = list(file)
games = []
i = 0
while i < len(lines):
game = []
boardsize = int(lines[i])
i += 1
while not lines[i].startswith('winner'):
turn = int(lines[i])
i += 1
move = [int(x) for x in lines[i].split(',')]
i += 1
board = np.zeros((boardsize, boardsize), dtype = int)
for y in range(boardsize):
row = lines[i].split(',')
for x in range(boardsize):
board[(x, y)] = int(row[x])
i += 1
game.append((turn, move, board))
winner = int(lines[i].split('=')[1])
games.append((winner, game))
i += 1
return games
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--size', type = int, default = 10,
help = 'The size of the board.')
parser.add_argument('--games', type = int, default = 1,
help = 'The number of games to play.')
parser.add_argument('--time', type = int, default = 10,
help = 'The allowed time per move, in milliseconds.')
parser.add_argument('--print-board', choices = ['all', 'final'],
help = 'Show the board state, either every turn or only at the end.')
parser.add_argument('--parallel', type = int,
help = 'Run multiple games in parallel. Only use for large-scale '
'testing.')
parser.add_argument('--output',
help = 'Write training data to the given file.')
parser.add_argument('--input',
help = 'Read training data from the given file.')
parser.add_argument('objectives',
help = 'The name of a file containing the objective shapes. The file '
'should contain a rectangle with x on positions that should be '
'occupied, and dots on other positions. Separate objective shapes '
'should be separated by a blank line.')
args = parser.parse_args()
#cProfile.run('main(args)')
main(args)
|
StarcoderdataPython
|
1730307
|
from enum import Enum
class TankBody(Enum):
VT1 = 1
VT2 = 2
VT3 = 3
VT4 = 4
class Tank:
def __init__(self) -> None:
self.body:TankBody = TankBody.VT1
self.hasExtraArmour: bool = False #附加装甲
self.hasAutomaticWeaponStation: bool = False #自动武器站
self.hasAirConditioner: bool = False #空调
self.hasIRCM: bool = False #红外对抗
self.hasActiveDefence: bool = False #主动防御
def cost(self)->int:
"""坦克报价"""
price = 0
if self.body is TankBody.VT1:
price += 30000
elif self.body is TankBody.VT2:
price += 50000
elif self.body is TankBody.VT3:
price += 70000
elif self.body is TankBody.VT4:
price += 100000
else:
pass
if self.hasExtraArmour:
price += 10000
if self.hasAutomaticWeaponStation:
price += 20000
if self.hasAirConditioner:
price += 3000
if self.hasIRCM:
price += 30000
if self.hasActiveDefence:
price += 35000
return price
if __name__ == "__main__":
tank1 = Tank()
tank1.body = TankBody.VT4
tank1.hasActiveDefence = True
tank1.hasAirConditioner = True
tank1.hasAutomaticWeaponStation = True
tank1.hasExtraArmour = True
tank1.hasIRCM = True
print(tank1.cost())
|
StarcoderdataPython
|
4838075
|
<reponame>ValRat/raster-vision
from copy import deepcopy
import rastervision as rv
from rastervision.command import (CommandConfig, CommandConfigBuilder,
BundleCommand)
from rastervision.protos.command_pb2 \
import CommandConfig as CommandConfigMsg
from rastervision.rv_config import RVConfig
from rastervision.data import SceneConfig
from rastervision.command.utils import (check_task_type, check_analyzers_type,
check_backend_type)
class BundleCommandConfig(CommandConfig):
def __init__(self, root_uri, task, backend, scene, analyzers):
super().__init__(rv.BUNDLE, root_uri)
self.task = task
self.backend = backend
self.scene = scene
self.analyzers = analyzers
def create_command(self, tmp_dir=None):
if not tmp_dir:
_tmp_dir = RVConfig.get_tmp_dir()
tmp_dir = _tmp_dir.name
else:
_tmp_dir = tmp_dir
retval = BundleCommand(self)
retval.set_tmp_dir(_tmp_dir)
return retval
def to_proto(self):
msg = super().to_proto()
task = self.task.to_proto()
backend = self.backend.to_proto()
scene = self.scene.to_proto()
analyzers = list(map(lambda a: a.to_proto(), self.analyzers))
b = CommandConfigMsg.BundleConfig(
task=task, backend=backend, scene=scene, analyzers=analyzers)
msg.MergeFrom(CommandConfigMsg(bundle_config=b))
return msg
@staticmethod
def builder():
return BundleCommandConfigBuilder()
class BundleCommandConfigBuilder(CommandConfigBuilder):
def __init__(self, prev=None):
super().__init__(prev)
if prev is None:
self.task = None
self.backend = None
self.scene = None
self.analyzers = None
else:
self.task = prev.task
self.backend = prev.backend
self.scene = prev.scene
self.analyzers = prev.analyzers
def validate(self):
super().validate()
if self.task is None:
raise rv.ConfigError('Task not set for BundleCommandConfig. '
'Use with_task or with_experiment')
check_task_type(self.task)
if self.backend is None:
raise rv.ConfigError('Backend not set for BundleCommandConfig. '
'Use with_backend or with_experiment')
check_backend_type(self.backend)
if self.scene is None:
raise rv.ConfigError(
'Template scene not set for BundleCommandConfig. '
'Use with_scene or with_experiment')
if not isinstance(self.scene, SceneConfig):
raise rv.ConfigError(
'Template scene must be of class SceneConfig, got {}'.format(
type(self.scene)))
if self.analyzers is None:
raise rv.ConfigError('Analyzers not set for BundleCommandConfig. '
'Use with_analyzers or with_experiment')
check_analyzers_type(self.analyzers)
def build(self):
self.validate()
return BundleCommandConfig(self.root_uri, self.task, self.backend,
self.scene, self.analyzers)
def from_proto(self, msg):
b = super().from_proto(msg)
conf = msg.bundle_config
task = rv.TaskConfig.from_proto(conf.task)
backend = rv.BackendConfig.from_proto(conf.backend)
scene = rv.SceneConfig.from_proto(conf.scene)
analyzers = list(map(rv.AnalyzerConfig.from_proto, conf.analyzers))
b = b.with_task(task)
b = b.with_backend(backend)
b = b.with_scene(scene)
b = b.with_analyzers(analyzers)
return b
def get_root_uri(self, experiment_config):
return experiment_config.bundle_uri
def with_experiment(self, experiment_config):
b = super().with_experiment(experiment_config)
b = b.with_task(experiment_config.task)
b = b.with_backend(experiment_config.backend)
b = b.with_scene(experiment_config.dataset.all_scenes()[0])
b = b.with_analyzers(experiment_config.analyzers)
return b
def with_task(self, task):
"""Sets a specific task type.
Args:
task: A TaskConfig object.
"""
b = deepcopy(self)
b.task = task
return b
def with_backend(self, backend):
b = deepcopy(self)
b.backend = backend
return b
def with_scene(self, scene):
b = deepcopy(self)
b.scene = scene
return b
def with_analyzers(self, analyzers):
b = deepcopy(self)
b.analyzers = analyzers
return b
|
StarcoderdataPython
|
121191
|
# -*- coding:utf-8 -*-
from multiprocessing import Pool, Queue, Lock, SimpleQueue
import Queue
import os
import time
import random
import argparse
import utils
MAX_PROCESS = 5
def executor(name, r_queue):
# print 'Run task %s (%s)...' % (name, os.getpid())
start = time.time()
time.sleep(random.random() * 3)
end = time.time()
# print 'Task %s runs %0.2f seconds.' % (name, (end - start))
r_queue.put('Task %s runs %0.2f seconds.' % (name, (end - start)))
def printer(r_name, r_count, r_queue, lock):
"""
:param r_name:
:param r_count:
:param r_queue:
:return:
"""
i = 0
while i < r_count:
line = r_queue.get(True)
if line:
lock.acquire()
print '[%d]' % (i + 1,),
utils.print_output(line)
lock.release()
i += 1
r_queue.task_done()
def main(r_parser):
"""
:param r_parser: 传入的参数
:return:
"""
r_parser.add_argument("-m", "--module", help="module", required=True)
r_parser.add_argument("-u", "--user", help="remote machine user", required=True)
r_parser.add_argument("-p", "--password", help="remote machine password")
r_parser.add_argument("-d", "--destination", help="destination hosts group", required=True)
r_parser.add_argument("-c", "--command", help="command or shell scripts", required=True)
args = r_parser.parse_args()
module = args.module
user = args.user
password = <PASSWORD>
dest = args.destination
command = args.command
print 'Parent process %s.' % os.getpid()
config = utils.read_config('./rcm.conf')
hosts_lst = utils.get_hosts(config, dest)
print hosts_lst
_exe_command(hosts_lst, module, user, password, command)
def _exe_command(r_hosts_lst, r_module, r_user, r_password, r_command):
"""
:param r_hosts_lst:
:param r_module:
:param r_user:
:param r_password:
:param r_command:
:return:
"""
queue = SimpleQueue()
lock = Lock()
p = Pool(MAX_PROCESS)
# p.apply_async(printer, args=('printer', len(r_hosts_lst), queue))
p.apply_async(printer, args=('printer', 5, queue, lock))
for i in range(5):
p.apply_async(executor, args=(i, queue))
p.close()
p.join()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='''execute command or scripts on remote machine''')
main(parser)
|
StarcoderdataPython
|
4835379
|
import pytest
from scrapy_autoextract.errors import QueryError, summarize_exception
def test_query_error():
exc = QueryError({"foo": "bar"}, "sample error")
assert str(exc) == "QueryError: message='sample error', query={'foo': 'bar'}"
@pytest.mark.parametrize("exception, message", [
(QueryError({}, "domain xyz is occupied, please retry in 2.2 seconds"),
"/query/domain occupied"),
(QueryError({}, "Another thing"),
"/query/Another thing"),
(ValueError("Value Error"), "/rest/ValueError"),
(TypeError("Type Error"), "/rest/TypeError"),
])
def test_summarize_exception(exception, message):
assert summarize_exception(exception) == message
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.