id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
6626358
|
<filename>raiden_contracts/constants.py
from enum import Enum, IntEnum
from eth_utils import to_canonical_address
# Contract names
CONTRACT_ENDPOINT_REGISTRY = 'EndpointRegistry'
CONTRACT_HUMAN_STANDARD_TOKEN = 'HumanStandardToken'
CONTRACT_TOKEN_NETWORK_REGISTRY = 'TokenNetworkRegistry'
CONTRACT_TOKEN_NETWORK = 'TokenNetwork'
CONTRACT_SECRET_REGISTRY = 'SecretRegistry'
CONTRACT_CUSTOM_TOKEN = 'CustomToken'
CONTRACT_CUSTOM_TOKEN_NO_DECIMALS = 'CustomTokenNoDecimals'
CONTRACT_MONITORING_SERVICE = 'MonitoringService'
CONTRACT_RAIDEN_SERVICE_BUNDLE = 'RaidenServiceBundle'
# Deployed contract information
# Deployed to Ropsten revival on 2018-09-03 from
# raiden-contracts@fc1c79329a165c738fc55c3505cf801cc79872e4
ROPSTEN_TOKEN_NETWORK_REGISTRY_ADDRESS = '0xf2a175A52Bd3c815eD7500c765bA19652AB89B30'
ROPSTEN_ENDPOINT_REGISTRY_ADDRESS = '0xEEADDC1667B6EBc7784721B123a6F669B69Eb9bD'
ROPSTEN_SECRET_REGISTRY_ADDRESS = '0x16a25511A92C5ebfc6C30ad98F754e4c820c6822'
# Deployed to Ropsten revival on 2018-09-21 from
# raiden-contracts@bfb24fed3ebda2799e4d11ad1bb5a6de116bd12d
ROPSTEN_LIMITS_TOKEN_NETWORK_REGISTRY_ADDRESS = '0x6cC27CBF184B4177CD3c5D1a39a875aD07345eEb'
ROPSTEN_LIMITS_ENDPOINT_REGISTRY_ADDRESS = '0xcF47EDF0D951c862ED9825F47075c15BEAf5Db1B'
ROPSTEN_LIMITS_SECRET_REGISTRY_ADDRESS = '0x8167a262Fa3Be92F05420675c3b409c64Be3d348'
# Network configurations
START_QUERY_BLOCK_KEY = 'DefaultStartBlock'
class ChainId(Enum):
MAINNET = 1
ROPSTEN = 3
RINKEBY = 4
KOVAN = 42
SMOKETEST = 627
MAINNET = 'mainnet'
ROPSTEN = 'ropsten'
RINKEBY = 'rinkeby'
KOVAN = 'kovan'
SMOKETEST = 'smoketest'
ID_TO_NETWORKNAME = {
ChainId.MAINNET: MAINNET,
ChainId.ROPSTEN: ROPSTEN,
ChainId.RINKEBY: RINKEBY,
ChainId.KOVAN: KOVAN,
ChainId.SMOKETEST: SMOKETEST,
}
NETWORKNAME_TO_ID = {
name: id
for id, name in ID_TO_NETWORKNAME.items()
}
class NetworkType(Enum):
MAIN = 1
TEST = 2
ID_TO_NETWORK_CONFIG = {
ChainId.ROPSTEN: {
NetworkType.TEST: {
'network_type': NetworkType.TEST,
'contract_addresses': {
CONTRACT_ENDPOINT_REGISTRY: to_canonical_address(
ROPSTEN_ENDPOINT_REGISTRY_ADDRESS,
),
CONTRACT_SECRET_REGISTRY: to_canonical_address(ROPSTEN_SECRET_REGISTRY_ADDRESS),
CONTRACT_TOKEN_NETWORK_REGISTRY: to_canonical_address(
ROPSTEN_TOKEN_NETWORK_REGISTRY_ADDRESS,
),
},
# 924 blocks before token network registry deployment
START_QUERY_BLOCK_KEY: 3604000,
},
NetworkType.MAIN: {
'network_type': NetworkType.MAIN,
'contract_addresses': {
CONTRACT_ENDPOINT_REGISTRY: to_canonical_address(
ROPSTEN_LIMITS_ENDPOINT_REGISTRY_ADDRESS,
),
CONTRACT_SECRET_REGISTRY: to_canonical_address(
ROPSTEN_LIMITS_SECRET_REGISTRY_ADDRESS,
),
CONTRACT_TOKEN_NETWORK_REGISTRY: to_canonical_address(
ROPSTEN_LIMITS_TOKEN_NETWORK_REGISTRY_ADDRESS,
),
},
# 153 blocks before token network registry deployment
START_QUERY_BLOCK_KEY: 4084000,
},
},
}
# TokenNetworkRegistry
EVENT_TOKEN_NETWORK_CREATED = 'TokenNetworkCreated'
class ChannelEvent(str, Enum):
OPENED = 'ChannelOpened'
DEPOSIT = 'ChannelNewDeposit'
WITHDRAW = 'ChannelWithdraw'
BALANCE_PROOF_UPDATED = 'NonClosingBalanceProofUpdated'
CLOSED = 'ChannelClosed'
SETTLED = 'ChannelSettled'
UNLOCKED = 'ChannelUnlocked'
# SecretRegistry
EVENT_SECRET_REVEALED = 'SecretRevealed'
# EndpointRegistry
EVENT_ADDRESS_REGISTERED = 'AddressRegistered'
# Timeouts
TEST_SETTLE_TIMEOUT_MIN = 5
TEST_SETTLE_TIMEOUT_MAX = 100000
DEPLOY_SETTLE_TIMEOUT_MIN = 500 # ~ 2 hours
DEPLOY_SETTLE_TIMEOUT_MAX = 555428 # ~ 3 months
class MessageTypeId(IntEnum):
BALANCE_PROOF = 1
BALANCE_PROOF_UPDATE = 2
WITHDRAW = 3
COOPERATIVE_SETTLE = 4
class ChannelState(IntEnum):
NONEXISTENT = 0
OPENED = 1
CLOSED = 2
SETTLED = 3
REMOVED = 4
# Temporary deposit limits for the Red Eyes release in WEI
MAX_ETH_CHANNEL_PARTICIPANT = int(0.075 * 10**18)
MAX_ETH_TOKEN_NETWORK = int(250 * 10**18)
class ChannelInfoIndex(IntEnum):
SETTLE_BLOCK = 0
STATE = 1
class ParticipantInfoIndex(IntEnum):
DEPOSIT = 0
WITHDRAWN = 1
IS_CLOSER = 2
BALANCE_HASH = 3
NONCE = 4
LOCKSROOT = 5
LOCKED_AMOUNT = 6
|
StarcoderdataPython
|
12833579
|
<reponame>iasbs-isg/PMoE
"""All the models that are used in the experiments:
Mixture of Experts (MoE)(Alternative) with/out shared weights
Predictive U-Net (PU-Net)
Predictive Mixture of Experts (PMoE)
"""
from pathlib import Path
import sys
try:
sys.path.append(str(Path("../").resolve()))
except:
raise RuntimeError("Can't append root directory of the project the path")
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
from .punet import PredictiveUnet
from .blocks.basics import make_mlp
from .blocks.backbone import get_backbone, get_unet
from utils.nn import freeze
def get_model(cfg):
model_type = cfg.type
assert model_type is not None, "Network type can not be None"
if model_type in ["moe", "moe_alt"]:
return MixtureOfExperts(cfg)
elif model_type in ["moe_shared"]:
return MixtureOfExpertsShared(cfg)
elif model_type in ["punet", "punet_inter"]:
return PUNetExpert(cfg)
elif model_type in ["pmoe", "pmoe+pretrained"]:
assert (
cfg.pmoe.moe_dir != ""
), "MoE pretrained weights directory should be specified"
if model_type == "pmoe+pretrained":
assert (
cfg.pmoe.punet_dir != ""
), "PU-Net pretrained weights directory should be specified"
return PMoE(cfg)
else:
raise ValueError(
f"{model_type} is UNKNOWN, model type should be one of 'moe', 'punet', "
f"'punet_inter', 'pmoe', 'pmoe+pretrained', 'moe_alt'"
)
class BaseExpert(nn.Module):
"""Expert model"""
def __init__(self, params):
super().__init__()
self.speed_encoder = make_mlp(**params.speed_encoder)
self.command_encoder = make_mlp(**params.command_encoder)
backbone_cfg = (
{**params.backbone.rgb, "n_frames": params.backbone.n_frames}
if params.backbone.type == "rgb"
else {**params.backbone.segmentation, "n_frames": params.backbone.n_frames}
)
self.backbone = (
get_backbone(**backbone_cfg)
if params.backbone.type == "rgb"
else get_unet(**backbone_cfg)
)
self.speed_pred = make_mlp(**params.speed_prediction)
self.action_features = make_mlp(**params.action_head)
# separate co-efficients to easily unfroze them later
action_layer_out_features = params.action_head.dims[-1]
self.alpha = nn.Linear(action_layer_out_features, 1)
self.action_pred = nn.Linear(action_layer_out_features, 4)
def forward(
self, images: torch.Tensor, speed: torch.Tensor, command: torch.Tensor):
"""Forward pass of expert model.
Args:
images: (torch.Tensor) with dimensions: B, T, C, H, W
speed: (torch.Tensor) with dimensions: B, 1
command: (torch.Tensor) with dimensions: B, 4
Returns:
alphas: (torch.Tensor) predicted coefficients
actions: (torch.Tensor) predicted actions [steer, pedal]
pred_speed: (torch.Tensor) predicted speed
"""
speed = self.speed_encoder(speed)
command = self.command_encoder(command)
images = images.view(
images.shape[0], -1, images.shape[-2], images.shape[-1]
) # cat along time dimension
img = self.backbone(images)
# concat features along the last dim to produce tensor (B, 3 * 512)
features = torch.cat([img, speed, command], dim=-1)
pred_speed = self.speed_pred(features)
action_features = self.action_features(features)
mean, std = self.action_pred(action_features).split(2, dim=-1)
std = F.elu(std) + 1
alpha = torch.relu(self.alpha(action_features))
return alpha, mean, std, pred_speed
class BaseExpertAlt(BaseExpert):
"""Alternative expert model which alpha uses input of the network"""
def __init__(self, params):
super().__init__(params)
self.alpha = nn.Sequential(
*[nn.Linear(1536, 512), nn.ReLU(inplace=True), nn.Linear(512, 1)]
)
def forward(
self, images: torch.Tensor, speed: torch.Tensor, command: torch.Tensor):
speed = self.speed_encoder(speed)
command = self.command_encoder(command)
images = images.view(
images.shape[0], -1, images.shape[-2], images.shape[-1]
) # cat along time dimension
img = self.backbone(images)
# concat features along the last dim to produce tensor (B, 3 * 512)
features = torch.cat([img, speed, command], dim=-1)
pred_speed = self.speed_pred(features)
action_features = self.action_features(features)
mean, std = self.action_pred(action_features).split(2, dim=-1)
std = F.elu(std) + 1
alpha = self.alpha(features)
return alpha, mean, std, pred_speed
class MixtureOfExperts(nn.Module):
def __init__(self, params):
super().__init__()
# number of experts
self.k = params.n_experts
# experts does not share weights
base = BaseExpert if params.type == "moe" else BaseExpertAlt
self.moe = nn.ModuleList([base(params) for _ in range(self.k)])
def forward(self, images: torch.Tensor, speed: torch.Tensor, command: torch.Tensor):
out = [moe(images, speed, command) for moe in self.moe]
alphas, mean, std, speeds = [], [], [], []
for expert in out:
alphas.append(expert[0])
mean.append(expert[1])
std.append(expert[2])
speeds.append(expert[3])
# alpha: (Batch, #Experts)
alphas = torch.cat(alphas, dim=1)
alphas = F.softmax(alphas, dim=1)
mean = torch.stack(mean, dim=1)
std = torch.stack(std, dim=1)
mixtures = D.Categorical(alphas)
components = D.Independent(D.Normal(mean, std), 1)
actions = D.MixtureSameFamily(mixtures, components)
# speed: (Batch, Expert_idx, Pred_Speed)
return actions, torch.stack(speeds, dim=1)
def sample(
self, images: torch.Tensor, speed: torch.Tensor, command: torch.Tensor
) -> torch.Tensor:
out = [moe(images, speed, command) for moe in self.moe]
alphas, mean, std = [], [], []
for expert in out:
alphas.append(expert[0])
mean.append(expert[1])
std.append(expert[2])
alphas = torch.cat(alphas, dim=1)
alphas = F.softmax(alphas, dim=1)
mean = torch.stack(mean, dim=1)
std = torch.stack(std, dim=1)
mixtures = D.Categorical(alphas)
components = D.Independent(D.Normal(mean, std), 1)
actions = D.MixtureSameFamily(mixtures, components)
return actions.sample()
class MixtureOfExpertsShared(nn.Module):
"""Mixture of Expert (MoE) model with share backbone"""
def __init__(self, params):
super().__init__()
self.speed_encoder = make_mlp(**params.speed_encoder)
self.command_encoder = make_mlp(**params.command_encoder)
backbone_cfg = (
{**params.backbone.rgb, "n_frames": params.backbone.n_frames}
if params.backbone.type == "rgb"
else {**params.backbone.segmentation, "n_frames": params.backbone.n_frames}
)
self.backbone = (
get_backbone(**backbone_cfg)
if params.backbone.type == "rgb"
else get_unet(**backbone_cfg)
)
self.speed_pred = make_mlp(**params.speed_prediction)
self.action_features = make_mlp(**params.action_head)
# separate co-efficients to easily unfroze them later
action_layer_out_features = params.action_head.dims[-1]
self.n_experts = params.n_experts
self.alpha = nn.Linear(action_layer_out_features, params.n_experts)
self.action_pred = nn.Linear(action_layer_out_features, 4 * params.n_experts)
def forward(self, images: torch.Tensor, speed: torch.Tensor, command: torch.Tensor):
"""Forward pass of expert model.
Args:
images: (torch.Tensor) with dimensions: B, T, C, H, W
speed: (torch.Tensor) with dimensions: B, 1
command: (torch.Tensor) with dimensions: B, 4
Returns:
actions: (torch.Tensor) predicted actions [steer, pedal]
pred_speed: (torch.Tensor) predicted speed
"""
speed = self.speed_encoder(speed)
command = self.command_encoder(command)
images = images.view(
images.shape[0], -1, images.shape[-2], images.shape[-1]
) # cat along time dimension
img = self.backbone(images)
# concat features along the last dim to produce tensor (B, 3 * 512)
features = torch.cat([img, speed, command], dim=-1)
pred_speed = self.speed_pred(features)
action_features = self.action_features(features)
# calculate mean and std
mean, std = (
self.action_pred(action_features)
.view(speed.shape[0], self.n_experts, -1)
.split(2, dim=-1)
)
std = F.elu(std) + 1
alpha = F.softmax(self.alpha(action_features), dim=1)
# mixture coefficients
mixture = D.Categorical(alpha)
components = D.Independent(D.Normal(mean, std), 1)
actions = D.MixtureSameFamily(mixture, components)
return actions, pred_speed
def sample(
self, images: torch.Tensor, speed: torch.Tensor, command: torch.Tensor
) -> torch.Tensor:
speed = self.speed_encoder(speed)
command = self.command_encoder(command)
images = images.view(
images.shape[0], -1, images.shape[-2], images.shape[-1]
) # cat along time dimension
img = self.backbone(images)
# concat features along the last dim to produce tensor (B, 3 * 512)
features = torch.cat([img, speed, command], dim=-1)
action_features = self.action_features(features)
# calculate mean and std
mean, std = (
self.action_pred(action_features)
.view(speed.shape[0], self.n_experts, -1)
.split(2, dim=-1)
)
std = F.elu(std) + 1
alpha = F.softmax(self.alpha(action_features), dim=1)
# mixture coefficients
mixture = D.Categorical(alpha)
components = D.Independent(D.Normal(mean, std), 1)
actions = D.MixtureSameFamily(mixture, components)
return actions.sample()
class PUNetExpert(nn.Module):
"""PU-Net as action prediction"""
def __init__(self, params):
super().__init__()
self.return_inter = True if params.type == "punet_inter" else False
params.punet.inter_repr = self.return_inter
self.speed_encoder = make_mlp(**params.speed_encoder)
self.command_encoder = make_mlp(**params.command_encoder)
self.punet = PredictiveUnet(**params.punet)
punet_weights = torch.load(params.punet_path, map_location=params.device)
self.punet.load_state_dict(punet_weights["model"])
self.punet = freeze(self.punet)
# use backbone if PU-Net does not return a vector as the result
self.backbone = (
None
if self.return_inter
else get_backbone(
**{
**params.backbone.rgb,
"n_frames": params.punet.future_frames,
"n_channels": params.punet.num_classes,
}
)
)
self.speed_pred = make_mlp(**params.speed_prediction)
# return actions, use tanh to squash output in range [-1, 1]
# params.action_head.act = 'tanh'
self.action_pred = nn.Sequential(
*[
make_mlp(**params.action_head),
nn.Linear(params.action_head.dims[-1], 2),
],
)
def forward(self, images: torch.Tensor, speed: torch.Tensor, command: torch.Tensor):
speed = self.speed_encoder(speed)
command = self.command_encoder(command)
if not self.return_inter:
images = self.punet(images)
images = images.view(
images.shape[0], -1, images.shape[-2], images.shape[-1]
) # cat time x channel together
img = self.backbone(images)
else:
img = self.punet(images)
# concat features along the last dim to produce tensor (B, 3 * 512)
features = torch.cat([img, speed, command], dim=-1)
return torch.tanh(self.action_pred(features)), self.speed_pred(features)
def sample(
self, images: torch.Tensor, speed: torch.Tensor, command: torch.Tensor
) -> torch.Tensor:
action, _ = self.forward(images, speed, command)
return action
class PMoE(nn.Module):
"""Predictive mixture of experts (PMoE) implementation"""
def __init__(self, params):
super().__init__()
assert params.pmoe.moe_dir is not None, "MoE weights should be provided"
# initialize MoE model
moe_model_dir = params.pmoe.moe_dir
self.moe = MixtureOfExperts(params)
# you may want to use SWA model, therefore strict should be False
self.moe.load_state_dict(torch.load(moe_model_dir), strict=False)
self.moe = freeze(self.moe, params.exclude_freeze, params.verbose)
# initialize PU-Net model
punet_model_dir = params.pmoe.punet_dir
self.punet = PUNetExpert(params)
if punet_model_dir:
self.punet.load_state_dict(torch.load(punet_model_dir), strict=False)
self.punet = freeze(self.punet, params.exclude_freeze, params.verbose)
self.lat_weights = nn.Linear(2, 1)
self.long_weights = nn.Linear(2, 1)
def forward(
self, images: torch.Tensor, speed: torch.Tensor, command: torch.Tensor):
punet_actions, _ = self.punet(images.clone(), speed.clone(), command.clone())
dists, _ = self.moe(images, speed, command)
moe_actions = dists.sample()
lat_actions = self.lat_weights(torch.cat([moe_actions[:, 0: 1], punet_actions[:, 0: 1]], dim=-1))
long_actions = self.long_weights(torch.cat([moe_actions[:, 1:], punet_actions[:, 1:]], dim=-1))
# -1 is just a dummy variable as speed prediction for the sake of interface consistency
return torch.tanh(torch.cat([lat_actions, long_actions], dim=-1)), -1
def sample(
self, images: torch.Tensor, speed: torch.Tensor, command: torch.Tensor
) -> torch.Tensor:
"""For the interface to be consistent with other networks"""
actions, _ = self.forward(images, speed, command)
return actions
if __name__ == "__main__":
pass
|
StarcoderdataPython
|
364221
|
<filename>setup.py
from distutils.core import setup
setup(
name = 'loggable-decorator',
packages = ['loggable'],
version = '1.1.1',
description = 'Add a logger attribute to class decorated',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/ateliedocodigo/python-loggable-decorator',
download_url = 'https://github.com/ateliedocodigo/python-loggable-decorator/tarball/1.1.1',
keywords = ['logging'],
classifiers = [],
)
|
StarcoderdataPython
|
5170066
|
# -*- coding: utf-8 -*-
import gzip
import re
import sqlite3
import sys
from defusedxml import ElementTree
class IngestLogfile:
"""log ingestion class"""
def __init__(self, conn):
"""
:param conn: sqlite connection object
"""
self.conn = conn
self.jid_pattern = re.compile("^(?:([^\"&'/:<>@]{1,1023})@)?([^/@]{1,1023})(?:/(.{1,1023}))?$")
self.message_pattern = re.compile(r'<message.*?</message>', re.DOTALL)
def read(self, infile: list = None):
"""
ingest method to split up the ingest file list, if necessary decompression and decoding are applied
:param infile: list containing log filenames to be ingested
"""
magic_number = b"\x1f\x8b\x08"
# iterate over all infile elements
for element in infile:
try:
# open file in binary mode
with open(element, "rb") as infile:
content = infile.read()
# in case of a missing file set content to an empty string
except FileNotFoundError as err:
content = ""
print(err, file=sys.stderr)
# if magic number is present decompress and decode file
if content.startswith(magic_number):
content = gzip.decompress(content).decode("utf-8")
# in any other case read file normally
else:
content = content.decode("utf-8")
# None catch
if content is not None:
log = re.findall(self.message_pattern, content)
if log is not None:
self.db_import(log)
def db_import(self, message_log: list):
"""
import xml stanzas into database
:param message_log: list of xml messages
"""
for message in message_log:
message_parsed = ElementTree.fromstring(message)
# parse 'from' tag
spam_from = message_parsed.get('from')
match = self.jid_pattern.match(spam_from)
(node, domain, resource) = match.groups()
# stamp
all_delay_tags = message_parsed.findall('.//{urn:xmpp:delay}delay')
spam_time = None
for tag in all_delay_tags:
if "@" in tag.get("from"):
continue
spam_time = tag.get('stamp')
# body
spam_body = message_parsed.find('{jabber:client}body')
if spam_body is not None:
spam_body = spam_body.text
# format sql
try:
self.conn.execute('''INSERT INTO spam VALUES(:user, :domain, :spam_time, :spam_body);''',
{"user": node, "domain": domain, "spam_time": spam_time, "spam_body": spam_body})
except sqlite3.IntegrityError:
pass
finally:
self.conn.commit()
|
StarcoderdataPython
|
6640544
|
import sqlite3
from sqlite3 import Error
def create_connetion(db_file):
connection = None
try:
connection = sqlite3.connect(db_file)
return connection
except Error as e:
print(e)
if __name__=='__main__':
create_connetion("db/forum.db")
|
StarcoderdataPython
|
6536751
|
<filename>gpuPTXModel.py<gh_stars>1-10
def main():
"""Main function."""
import argparse
import sys
import torch
import numpy as np
import os
from os import listdir
from src import readFiles as rf
from src import functionsPyTorch as pytor
from src import globalStuff as gls
from src.globalStuff import printing, output_dir_train, isa_file, inst_types_file, state_spaces_file, initPytorch, arrangeDataset, possible_outputs, closeOutputLogFile
use_test = False
gls.init()
parser = argparse.ArgumentParser()
parser.add_argument('benchs_data_path', type=str)
parser.add_argument('gpu_name', type=str)
parser.add_argument('--test_data_path', type=str, default='')
parser.add_argument('--benchs_file', type=str, default = 'all')
parser.add_argument('--benchs_test_file', type=str, default = 'all')
parser.add_argument('--tdp', type=int, default=250)
parser.add_argument('--device', type=str, default='gpu')
parser.add_argument('--device_id', type=int, default=0)
parser.add_argument('--test_number', type=int, default=-1)
parser.add_argument('--num_epochs', type=int, default=20)
parser.add_argument('--fast', action='store_const', const=True, default=False)
parser.add_argument('--v', action='store_const', const=True, default=False)
parser.add_argument('--no_pow_dvfs', action='store_const', const=True, default=False)
parser.add_argument('--no_time_dvfs', action='store_const', const=True, default=False)
parser.add_argument('--no_energy_dvfs', action='store_const', const=True, default=False)
parser.add_argument('--never_stop', action='store_const', const=True, default=False)
parser.add_argument('--no_output', action='store_const', const=True, default=False)
parser.add_argument('--pc', action='store_const', const=True, default=False)
parser.add_argument('--encoder_file', type=str, default='')
parser.add_argument('--time_dvfs_file', type=str, default='')
parser.add_argument('--pow_dvfs_file', type=str, default='')
parser.add_argument('--energy_dvfs_file', type=str, default='')
args = vars(parser.parse_args())
print(args)
benchs_data_path = args['benchs_data_path']
gpu_name = args['gpu_name']
test_data_path = args['test_data_path']
benchs_file = args['benchs_file']
benchs_test_file = args['benchs_test_file']
tdp = args['tdp']
device_arg = args['device']
device_id = args['device_id']
num_epochs = args['num_epochs']
fast = args['fast']
verbose = args['v']
never_stop = args['never_stop']
no_output = args['no_output']
test_number = args['test_number']
performance_counters = args['pc']
encoder_config_file = args['encoder_file']
timedvfs_config_file = args['time_dvfs_file']
powdvfs_config_file = args['pow_dvfs_file']
energydvfs_config_file = args['energy_dvfs_file']
outputs_to_model = {'time_dvfs': args['no_time_dvfs'], 'pow_dvfs': args['no_pow_dvfs'], 'energy_dvfs': args['no_energy_dvfs']}
ISA = rf.readISA(isa_file)
gls.ISA_size = len(ISA)
state_spaces = rf.readISA(state_spaces_file)
gls.state_spaces_size = len(state_spaces)
inst_types = rf.readISA(inst_types_file)
gls.inst_types_size = len(inst_types)
orig_dir = os.path.dirname(os.path.abspath(__file__))
test_output_dir = gls.createTestOutputFolder(output_dir_train, orig_dir, test_number, device_id)
device = initPytorch(device_arg, device_id)
printing(device, no_output)
np.random.seed(40)
# printing('Model type: %s' %model_name, no_output)
printing('Data from GPU: %s' %gpu_name, no_output)
dataset_ubench, clocks = rf.readDataSet(benchs_data_path, gpu_name, tdp, performance_counters)
ubenchmarks = dataset_ubench['names']
num_ubenchmarks = len(ubenchmarks)
printing('Number of microbenchmarks: %d' %(num_ubenchmarks), no_output)
printing('Benchs file: %s' %benchs_file, no_output)
#if testing set is provided
if test_data_path != '':
use_test = True
dataset_test, _ = rf.readDataSet(test_data_path, gpu_name, tdp, performance_counters)
test_benchmarks = dataset_test['names']
num_test_benchmarks = len(test_benchmarks)
printing('Number of testing benchmarks: %d' %(num_test_benchmarks), no_output)
printing('Test Benchs file: %s' %benchs_test_file, no_output)
vocab_size = gls.ISA_size*(gls.state_spaces_size+1)*(gls.inst_types_size+1)*(gls.max_operands+1)*(gls.buffer_max_size+1)*(gls.dependencies_types+1) + 3
if verbose == True:
print('Vocab size: %d' %vocab_size)
index_limit_training = int(gls.percentage_training * num_ubenchmarks)
random_ordering = np.arange(num_ubenchmarks)
np.random.shuffle(random_ordering)
index_train = random_ordering[0:index_limit_training]
index_train.sort()
index_val = random_ordering[index_limit_training:]
index_val.sort()
data_train = arrangeDataset(dataset_ubench, index_train, performance_counters)
data_val = arrangeDataset(dataset_ubench, index_val, performance_counters)
if encoder_config_file != '':
config = rf.readISA('model_configs/encoder/%s.txt' %(encoder_config_file))
encoder_params = {'embed_size': int(config[0]), 'learning_rate': float(config[1]), 'dropout_prob': float(config[2]), 'optimizer_name': config[3], 'num_layers': int(config[4]), 'hidden_size': int(config[5]), 'batch_size': int(config[6])}
else:
encoder_params = {'embed_size': embed_size, 'learning_rate': learning_rate_encoder, 'dropout_prob': dropout_prob_encoder, 'optimizer_name': optimizer_encoder, 'num_layers': num_layers_encoder, 'hidden_size': hidden_size_encoder, 'batch_size': batch_size}
encoder_params['vocab_size'] = vocab_size
# PREPARE THE PARAMETERS BEFORE MODEL TRAINING
nn_params = {}
config_files = [timedvfs_config_file, powdvfs_config_file, energydvfs_config_file]
for model_name, model_config_file in zip(possible_outputs, config_files):
if outputs_to_model[model_name] == False:
if model_config_file != '':
config = rf.readISA('model_configs/%s/%s.txt' %(model_name, model_config_file))
nn_params[model_name] = {'learning_rate': float(config[0]), 'dropout_prob': float(config[1]), 'optimizer_name': config[2], 'num_layers': int(config[3])}
hidden_sizes_list = []
for hidden_size_aux in config[4:]:
hidden_sizes_list.append(int(hidden_size_aux))
nn_params[model_name]['hidden_sizes'] = np.asarray(hidden_sizes_list)
if len(nn_params[model_name]['hidden_sizes']) != nn_params[model_name]['num_layers']:
print('ERROR: Hidden sizes dont match with number of hidden layers in file: \'model_configs/encoder/%s' %(encoder_config_file))
sys.exit()
else:
nn_params[model_name] = {'learning_rate': learning_rate, 'dropout_prob': dropout_prob, 'optimizer_name': optimizer, 'num_layers': 2, 'hidden_sizes': [hidden_size, hidden_size_2]}
model_params = {'model_name': model_name, 'max_epochs': num_epochs, 'encoder_params': encoder_params, 'nn_params': nn_params, 'outputs': outputs_to_model, 'never_stop': never_stop, 'no_output': no_output}
# TRAIN THE MODELS
if use_test == True:
data_test = arrangeDataset(dataset_test, np.arange(len(dataset_test['names'])), performance_counters)
trainingTime, trainedModels, results_train, results_val, results_test = pytor.trainPytorchModel(device, clocks, verbose, fast, performance_counters, model_params, test_output_dir, data_train, data_val, data_test)
else:
trainingTime, trainedModels, results_train, results_val, _ = pytor.trainPytorchModel(device, clocks, verbose, fast, performance_counters, model_params, test_output_dir, data_train, data_val)
if verbose == True:
print(test_output_dir)
if fast == 0:
predicted_values = {}
measured_values = {}
errors_values = {}
#save the predictions to an output .csv file
for model_type in trainedModels['output_types']:
predicted_values[model_type] = {'Training': results_train['last_epoch_predictions'][model_type], 'Validation': results_val['last_epoch_predictions'][model_type]}
measured_values[model_type] = {'Training': data_train[model_type], 'Validation': data_val[model_type]}
errors_values[model_type] = {'Training': results_train['abs_error_per_epoch'][-1][model_type], 'Validation': results_val['abs_error_per_epoch'][-1][model_type]}
np.savetxt('%s/last_epoch_prediction_train_%s_%s_%s.csv' %(test_output_dir, model_type, model_name, benchs_file[:-4]), predicted_values[model_type]['Training'], delimiter=",")
np.savetxt('%s/last_epoch_prediction_val_%s_%s_%s.csv' %(test_output_dir, model_type, model_name, benchs_file[:-4]), predicted_values[model_type]['Validation'], delimiter=",")
#if using the testing benchmarks to validate model
if use_test == True:
for model_type in trainedModels['output_types']:
predicted_values[model_type]['Testing'] = results_test['last_epoch_predictions'][model_type]
measured_values[model_type]['Testing'] = data_test[model_type]
errors_values[model_type]['Testing'] = results_test['abs_error_per_epoch'][-1][model_type]
np.savetxt('%s/last_epoch_prediction_test_%s_%s_%s.csv' %(test_output_dir, model_type, model_name, benchs_file[:-4]), predicted_values[model_type]['Testing'], delimiter=",")
#save model to an output file
torch.save(trainedModels['encoder'].state_dict(), '%s/%s_%s' %(test_output_dir, 'encoder', benchs_file))
for model in trainedModels['output_types']:
torch.save(trainedModels[model].state_dict(), '%s/%s_%s' %(test_output_dir, model, benchs_file))
#
#print last epoch results
LEF = open('%s/last_epoch.txt' %(test_output_dir), 'w')
LEF.write("num_epochs,%d\n" %len(results_train['abs_error_per_epoch']))
if use_test == True:
datasets_names = ['Training', 'Validation', 'Testing']
else:
datasets_names = ['Training', 'Validation']
for model_type in trainedModels['output_types']:
for dataset in datasets_names:
LEF.write('%s,%s,%.4f\n' %(dataset, model_type, errors_values[model_type][dataset]))
LEF.close()
closeOutputLogFile()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
4818567
|
<gh_stars>1-10
import unittest
from instasteem.parser import InstagramPostParser
from instasteem.sync import Sync
TEST_POST = "https://www.instagram.com/p/BsfudqQgGlw/"
class IntegrationTest(unittest.TestCase):
def test_image_parsing(self):
p = InstagramPostParser(TEST_POST)
images = p.extract_images()
self.assertEqual(len(images), 2)
def test_metadata_parsing(self):
p = InstagramPostParser(TEST_POST)
metadata = p.extract_metadata()
self.assertEqual(len(metadata), 4)
self.assertEqual("Hamburg, Germany", metadata[0])
self.assertEqual(
"lorem Ipsum is simply dummy text of the printing and typesettin"
"g industry. Lorem Ipsum has been the industry's standard "
"dummy text ever since the 1500s, when an unknown printer took "
"a galley of type and scrambled it to make a type specimen book. "
"It has survived not only five centuries, but also the leap into"
" electronic typesetting, remaining essentially unchanged."
" It was popularised in the 1960s with the release of Letraset "
"sheets containing Lorem Ipsum passages, and more recently with"
" desktop publishing software like Aldus PageMaker including "
"versions of Lorem Ipsum.",
metadata[1]
)
self.assertEqual(
"instasteem on Instagram: “lorem Ipsum is simply "
"dummy text of the printing and typesetting industry."
" Lorem Ipsum has been the industry's standard dummy "
"text ever…”",
metadata[2],
)
self.assertEqual("2019-01-11T13:20:03", metadata[3])
class ParserTest(unittest.TestCase):
def test_invalid_url(self):
with self.assertRaises(ValueError) as context:
InstagramPostParser("http://invaliddomain.org")
def test_injection(self):
p = InstagramPostParser(TEST_POST)
self.assertNotEqual(None, p.content)
class SyncTest(unittest.TestCase):
def test_client_keys(self):
s = Sync(keys=["foo"])
self.assertEqual(s.client.keys, ["foo",])
def test_post(self):
s = Sync()
op = s.post("foo", "title", "body", ["tag1", "tag2"], safe_mode=True)
self.assertEqual(op.op_id, "comment")
self.assertEqual(op.op_data["parent_author"], None)
self.assertEqual(op.op_data["parent_permlink"], "tag1")
self.assertEqual(op.op_data["author"], "foo")
self.assertEqual(op.op_data["permlink"], "title")
self.assertEqual(op.op_data["title"], "title")
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
223151
|
<gh_stars>0
import hikari
import lightbulb
moderation_plugin = lightbulb.Plugin('Moderation')
moderation_plugin.add_checks(
lightbulb.has_guild_permissions(hikari.Permissions.MANAGE_GUILD)
)
|
StarcoderdataPython
|
325705
|
import matplotlib.pyplot as plt
import numpy as np
def plot_ica_points(feat_actors, ica_mixing_array, idx):
data = np.concatenate([feat_actors['01'][idx] for actor in feat_actors],axis=1)
scatter = np.dot(data.T, ica_mixing_array[idx])
plt.scatter(scatter[:,0], scatter[:,1])
|
StarcoderdataPython
|
4916687
|
<gh_stars>0
# Test functions for Submission.py
import os
import sys
import unittest
# Add the parent directory to the lib path
lib_path = os.path.abspath(os.path.join(__file__, '..'))
sys.path.append(lib_path)
class TestRunner(unittest.TestCase):
'''
Just a dummy test
'''
def testSubmission1(self):
result = True
self.assertTrue(result)
|
StarcoderdataPython
|
3201322
|
<reponame>tlagore/ros2relay<filename>ros2relay/metrics/metrics.py
import threading
class MessageMetrics:
def __init__(self):
""" """
self.byte_sum = 0
self.message_count = 0
self.message_handle_time = 0
def increment_message_count(self, message_size, time_taken):
self.message_count += 1
self.byte_sum += message_size
self.message_handle_time += time_taken
def get_and_reset_metrics(self):
m_count = self.message_count
b_sum = self.byte_sum
t_taken = self.message_handle_time
self.message_count = 0
self.byte_sum = 0
self.message_handle_time = 0
return (m_count, b_sum, t_taken)
class MessageMetricsHandler:
def __init__(self, num_handlers, count_drops=False):
""" """
self.dropped_messages = 0
self.dropped_per_period = 0
self.dropped_lock = threading.Lock()
self.observed_messages = 0
self.observed_lock = threading.Lock()
self.metric_handlers = []
self.count_drops = count_drops
for i in range(num_handlers):
self.metric_handlers.append(MessageMetrics())
def handle_message(self, handler, message_size, time_taken):
if handler > len(self.metric_handlers) - 1 or handler < 0:
raise ValueError(f"Handler must be between 0 and {len(self.metric_handlers) - 1}")
self.metric_handlers[handler].increment_message_count(message_size, time_taken)
def publish_metrics(self):
""" """
# list of tuples (message_count, byte_sum, time_taken) for each worker
vals = [m.get_and_reset_metrics() for m in self.metric_handlers]
# summed tuple, [sum(message_count), sum(byte_sum), sum(time_taken)]
sums = [sum(x) for x in zip(*vals)]
handle_time = 0 if sums[0] == 0 else sums[2]/sums[0]
if self.count_drops:
message = f"m/s observed: {self.observed_messages}. m/s:{sums[0]}. d:{self.dropped_messages} d/s:{self.dropped_per_period} KB/s:{(sums[1] / 1024):.2f}. t/s:{handle_time:.2f}"
print(message.ljust(len(message)+20), end='')
print("\r", end='')
with self.dropped_lock:
self.dropped_per_period = 0
else:
message = f"m/s observed: {self.observed_messages}. m/s sent:{sums[0]}. KB/s:{(sums[1] / 1024):.2f}. t/m/s:{handle_time:.2f}"
print(message.ljust(len(message)+20), end='')
print("\r", end='')
# only lock once per publish period to ensure we reset observed
with self.observed_lock:
self.observed_messages = 0
def increment_dropped(self):
# shouldn't be calling this if we haven't enabled drop counters
if self.count_drops:
with self.dropped_lock:
self.dropped_messages += 1
self.dropped_per_period += 1
def increment_observed(self):
# to not put a lock in hot path, observed messages is not thread safe on increment
self.observed_messages += 1
|
StarcoderdataPython
|
1972718
|
"""外星人入侵游戏"""
# 要玩游戏《外星人入侵》,只需运行这个py文件即可
import pygame
from settings import Settings
from ship import Ship
import game_functions as gf
from pygame.sprite import Group
from alien import Alien
from game_stats import GameStats
from button import Button
from scoreboard import ScoreBoard
def run_game():
# 初始化pygame、设置和屏幕对象
pygame.init()
ai_settings = Settings() # ai_settings继承Settings类的属性和方法
screen = pygame.display.set_mode((ai_settings.screen_width, ai_settings.screen_height))
pygame.display.set_caption("Alien Invasion")
# 创建Play按钮(游戏需要玩家点击“Play”才能开始)
play_button = Button(ai_settings, screen, "Play")
# 创建一个用于存储游戏统计信息的实例,并创建记分牌
stats = GameStats(ai_settings)
sb = ScoreBoard(ai_settings, screen, stats)
# 创建一艘飞船(实例)
ship = Ship(ai_settings, screen)
# 创建一个用于存储子弹的编组
bullets = Group()
# 创建一个外星人编组
aliens = Group()
# 创建外星人群
gf.create_fleet(ai_settings, screen, ship, aliens)
# 设置背景色
bg_color = (230, 230, 230)
# 创建一个外星人
alien = Alien(ai_settings, screen)
# 开始游戏的主循环
while True:
gf.check_events(ai_settings, screen, stats, sb, play_button, ship, aliens, bullets) # 主循环检查玩家的输入
if stats.game_active:
ship.update() # 更新飞船的位置(每次循环都可以调用飞船的响应按键和鼠标操作的方法,即左右移动)
gf.update_bullets(ai_settings, screen, stats, sb, ship, aliens,
bullets) # 子弹的位置和删除已消失的子弹 + 外星人的位置和删除与子弹重叠的外星人
gf.update_aliens(ai_settings, screen, stats, sb, ship, aliens, bullets)
# 先绘制飞船和子弹,再绘制外星人,让外星人在屏幕上位于最前面
gf.update_screen(ai_settings, screen, stats, sb, ship, aliens, bullets, play_button) # 更新后的位置绘制新屏幕
run_game()
|
StarcoderdataPython
|
5015407
|
<reponame>PacktPublishing/Applied-Computational-Thinking-with-Python
with open("ch8_survey.txt") as file:
for line in file:
line = line.strip()
divide = line.split(" - ")
name = divide[0]
color = divide[1]
print(name + " voted for " + color)
|
StarcoderdataPython
|
286376
|
#!/usr/bin/python
import sys
sys.path.append('/usr/share/inkscape/extensions')
import inkex
from simplestyle import *
import simpletransform
import lc
import re
class VDistanceEffect(inkex.Effect):
def __init__(self):
inkex.Effect.__init__(self)
self.OptionParser.add_option('--distance', action = 'store',
type = 'float', dest = 'distance', default = '10',
help = 'distance')
self.OptionParser.add_option('--moving', action = 'store',
type = 'string', dest = 'moving', default = 't',
help = 'moving')
self.OptionParser.add_option('--top', action = 'store',
type = 'string', dest = 'top', default = 't',
help = 'top')
self.OptionParser.add_option('--bottom', action = 'store',
type = 'string', dest = 'bottom', default = 't',
help = 'bottom')
self.OptionParser.add_option('--unit', action = 'store',
type = 'string', dest = 'unit', default = 'mm',
help = 'unit')
def effect(self):
distance=self.unittouu(str(self.options.distance)+self.options.unit)
if len(self.options.ids)!=2:
print >>sys.stderr,"you must select exactly two objects"
return
id1=self.options.ids[0]
id2=self.options.ids[1]
b1=simpletransform.computeBBox([self.selected[id1],])
b2=simpletransform.computeBBox([self.selected[id2],])
if b1[2]>b2[2]:
b1,b2=(b2,b1)
id1,id2=(id2,id1)
# id1,b1 is for the top element
# id2,b2 is for the bottom element
if self.options.top=='t':
top=b1[2]
else:
top=b1[3]
if self.options.bottom=='t':
bottom=b2[2]
else:
bottom=b2[3]
if self.options.moving=='t':
moving=self.selected[id1]
delta=(bottom-top)-distance
else:
moving=self.selected[id2]
delta=-(bottom-top)+distance
#print >>sys.stderr,distance,top,bottom,delta
#print >>sys.stderr,self.selected,b1,b2,delta,distance
# translate
#print >>sys.stderr,self.selected[id2].attrib['transform']
m=re.search('translate.*\([0-9-.]+,([0-9-.]+).*\)',moving.attrib.get('transform',''))
#print >>sys.stderr,"match is:",m
if m!=None:
delta=delta+float(m.group(1))
#print >>sys.stderr,"delta is:",delta
moving.attrib['transform']='translate(0,'+str(delta)+')'
effect = VDistanceEffect()
effect.affect()
|
StarcoderdataPython
|
1671461
|
def start(animation,frame):
o=frame.make_random()[0]
animation.pass_obj=o
def move_right(animation,frame):
frame.move(animation.pass_obj,10,0)
def move_left(animation,frame):
frame.move(animation.pass_obj,-10,0)
def end(animation,frame):
frame.delete(animation.pass_obj)
animation=(
start,
move_right,
move_left,
move_left,
move_right,
end
)
|
StarcoderdataPython
|
1880220
|
# -*- coding: utf-8 -*-
"""
------------------------------------------------------------------------------
WLAN Experiment Commands
------------------------------------------------------------------------------
Authors: <NAME> (chunter [at] mangocomm.com)
<NAME> (murphpo [at] mangocomm.com)
<NAME> (welsh [at] mangocomm.com)
License: Copyright 2014, Mango Communications. All rights reserved.
Distributed under the WARP license (http://warpproject.org/license)
------------------------------------------------------------------------------
MODIFICATION HISTORY:
Ver Who Date Changes
----- ---- -------- -----------------------------------------------------
1.00a ejw 1/23/14 Initial release
------------------------------------------------------------------------------
This module provides class definitions for all WLAN Exp commands.
Functions (see below for more information):
WlanExpCmdLogGetEvents()
WlanExpCmdResetLog()
WlanExpCmdGetLogCurrIdx()
WlanExpCmdGetLogOldestIdx()
WlanExpCmdAddStatsToLog()
WlanExpCmdStreamLogEntries()
WlanExpCmdNodeTime()
WlanExpCmdNodeChannel()
Integer constants:
None
Many other constants may be defined; please do not use these values when
defining other sub-classes of WnCmd and WnBufferCmd.
"""
import warpnet.wn_cmds as wn_cmds
import warpnet.wn_message as wn_message
import warpnet.wn_transport_eth_udp as wn_transport
__all__ = ['WlanExpCmdLogGetEvents', 'WlanExpCmdResetLog',
'WlanExpCmdGetLogCurrIdx', 'WlanExpCmdGetLogOldestIdx',
'WlanExpCmdAddStatsToLog', 'WlanExpCmdStreamLogEntries',
'WlanExpCmdNodeTime', 'WlanExpCmdNodeChannel']
# WLAN Exp Command IDs (Extension of WARPNet Command IDs)
# NOTE: The C counterparts are found in wlan_exp_node.h
CMD_ASSN_GET_STATUS = 10
CMD_ASSN_SET_TABLE = 11
CMD_ASSN_RESET_STATS = 12
CMD_DISASSOCIATE = 20
CMD_TX_POWER = 30
CMD_TX_RATE = 31
CMD_CHANNEL = 32
CMD_TIME = 33
CMD_LTG_CONFIG_CBF = 40
CMD_LTG_START = 41
CMD_LTG_STOP = 42
CMD_LTG_REMOVE = 43
CMD_LOG_RESET = 50
CMD_LOG_CONFIG = 51
CMD_LOG_GET_CURR_IDX = 52
CMD_LOG_GET_OLDEST_IDX = 53
CMD_LOG_GET_EVENTS = 54
CMD_LOG_ADD_EVENT = 55
CMD_LOG_ENABLE_EVENT = 56
CMD_LOG_STREAM_ENTRIES = 57
CMD_STATS_ADD_TO_LOG = 60
CMD_STATS_GET_STATS = 61
CMD_CONFIG_DEMO = 90
# Be careful that new commands added to WlanExpNode do not collide with child commands
#-----------------------------------------------------------------------------
# Class Definitions for WLAN Exp Commands
#-----------------------------------------------------------------------------
class WlanExpCmdLogGetEvents(wn_message.WnBufferCmd):
"""Command to get the WLAN Exp log events of the node"""
def __init__(self, size, start_byte=0):
command = (wn_cmds.GRPID_NODE << 24) | CMD_LOG_GET_EVENTS
super(WlanExpCmdLogGetEvents, self).__init__(
command=command, buffer_id=0, flags=0, start_byte=start_byte, size=size)
def process_resp(self, resp):
return resp
# End Class
class WlanExpCmdResetLog(wn_message.WnCmd):
"""Command to reset the Event log"""
def __init__(self):
super(WlanExpCmdResetLog, self).__init__()
self.command = (wn_cmds.GRPID_NODE << 24) | CMD_LOG_RESET
def process_resp(self, resp):
pass
# End Class
class WlanExpCmdGetLogCurrIdx(wn_message.WnCmd):
"""Command to reset the Event log"""
def __init__(self):
super(WlanExpCmdGetLogCurrIdx, self).__init__()
self.command = (wn_cmds.GRPID_NODE << 24) | CMD_LOG_GET_CURR_IDX
def process_resp(self, resp):
args = resp.get_args()
if len(args) != 1:
print("Invalid response.")
print(resp)
return args[0]
# End Class
class WlanExpCmdGetLogOldestIdx(wn_message.WnCmd):
"""Command to reset the Event log"""
def __init__(self):
super(WlanExpCmdGetLogOldestIdx, self).__init__()
self.command = (wn_cmds.GRPID_NODE << 24) | CMD_LOG_GET_OLDEST_IDX
def process_resp(self, resp):
args = resp.get_args()
if len(args) != 1:
print("Invalid response.")
print(resp)
return args[0]
# End Class
class WlanExpCmdAddStatsToLog(wn_message.WnCmd):
"""Command to add the current statistics to the Event log"""
def __init__(self):
super(WlanExpCmdAddStatsToLog, self).__init__()
self.command = (wn_cmds.GRPID_NODE << 24) | CMD_STATS_ADD_TO_LOG
def process_resp(self, resp):
args = resp.get_args()
if len(args) != 1:
print("Invalid response.")
print(resp)
return args[0]
# End Class
class WlanExpCmdStreamLogEntries(wn_message.WnCmd):
"""Command to configure the node log streaming."""
def __init__(self, enable, host_id, ip_address, port):
super(WlanExpCmdStreamLogEntries, self).__init__()
self.command = (wn_cmds.GRPID_NODE << 24) | CMD_LOG_STREAM_ENTRIES
if (type(ip_address) is str):
addr = wn_transport.ip2int(ip_address)
elif (type(ip_address) is int):
addr = ip_address
else:
raise TypeError("IP Address must be either a str or int")
arg = (2**16 * int(host_id)) + (int(port) & 0xFFFF)
self.add_args(enable)
self.add_args(addr)
self.add_args(arg)
def process_resp(self, resp):
pass
# End Class
class WlanExpCmdNodeTime(wn_message.WnCmd):
"""Command to get / set the time on the node.
NOTE: Python time functions operate on floating point numbers in
seconds, while the WnNode operates on microseconds. In order
to be more flexible, this class can be initialized with either
type of input. However, it will only return an integer number
of microseconds.
Attributes:
time -- Time as either an integer number of microseconds or
a floating point number in seconds.
"""
time_factor = 6
def __init__(self, time):
super(WlanExpCmdNodeTime, self).__init__()
self.command = (wn_cmds.GRPID_NODE << 24) | CMD_TIME
if (type(time) is float):
time_to_send = int(round(time, self.time_factor) * (10**self.time_factor))
elif (type(time) is int):
time_to_send = time
else:
raise TypeError("Time must be either a float or int")
self.add_args((time_to_send & 0xFFFFFFFF))
self.add_args(((time_to_send >> 32) & 0xFFFFFFFF))
def process_resp(self, resp):
args = resp.get_args()
if len(args) != 2:
print("Invalid response.")
print(resp)
return ( (2**32 * args[1]) + args[0] )
# End Class
class WlanExpCmdNodeChannel(wn_message.WnCmd):
"""Command to get / set the channel of the node.
Attributes:
channel -- 802.11 Channel for the node. Should be a value between
0 and 11. Checking is done on the node and the current
channel will always be returned by the node. A value
of 0xFFFF will only return the channel.
"""
def __init__(self, channel):
super(WlanExpCmdNodeChannel, self).__init__()
self.command = (wn_cmds.GRPID_NODE << 24) | CMD_CHANNEL
self.add_args((channel & 0xFFFF))
def process_resp(self, resp):
args = resp.get_args()
if len(args) != 1:
print("Invalid response.")
print(resp)
return args[0]
# End Class
class WlanExpCmdConfigDemo(wn_message.WnCmd):
"""Command to configure the demo on the node.
Attributes:
flags - Flags to pass to the demo. Defined Flags:
DEMO_CONFIG_FLAGS_EN = 1
wait_time - Inter-packet sleep time (usec)
"""
def __init__(self, flags, sleep_time):
super(WlanExpCmdConfigDemo, self).__init__()
self.command = (wn_cmds.GRPID_NODE << 24) | CMD_CONFIG_DEMO
self.add_args(flags)
self.add_args(sleep_time)
def process_resp(self, resp):
pass
# End Class
|
StarcoderdataPython
|
1763394
|
"""This module contains all actions and interactions for the Home Page."""
from selenium.webdriver.common.by import By
class HomePage:
# * Locators
@classmethod
def contact_us_a(cls):
return (By.ID, 'contact-link')
def __init__(self, driver):
self.driver = driver
# * Actions
def click_contact_us_link(self):
contact_us_link = self.driver.find_element(*self.contact_us_a())
contact_us_link.click()
|
StarcoderdataPython
|
1871740
|
<reponame>RafaelAmauri/Projeto-e-Analise-de-Algoritmos<gh_stars>0
class Celula:
def __init__(self, value):
self.value = value
self.visited = False
self.up = None
self.down = None
self.left = None
self.right = None
def __repr__(self):
return self.value
def set_up(self, cell):
self.up = cell
def set_down(self, cell):
self.down = cell
def set_left(self, cell):
self.left = cell
def set_right(self, cell):
self.right = cell
def get_value(self):
return self.value
|
StarcoderdataPython
|
336768
|
<reponame>Coalin/Daily-LeetCode-Exercise<gh_stars>1-10
class Solution(object):
def robot(self, command, obstacles, x, y):
"""
:type command: str
:type obstacles: List[List[int]]
:type x: int
:type y: int
:rtype: bool
"""
# zb = [0, 0]
# ind = 0
# while True:
# if command[ind] == 'U':
# zb = [zb[0], zb[1]+1]
# elif command[ind] == 'R':
# zb = [zb[0]+1, zb[1]]
# # print(zb)
# if zb == [x, y]:
# return True
# if zb in obstacles:
# return False
# if zb[0] > x or zb[1] > y:
# return False
# if ind < len(command)-1:
# ind += 1
# elif ind == len(command)-1:
# ind = 0
# return False
xi = 0
yi = 0
zb = [0, 0]
for c in command:
if c == 'R':
xi += 1
if c == 'U':
yi += 1
epoch = min(x//xi, y//yi)
x_ = x-epoch*xi
y_ = y-epoch*yi
zb = [0, 0]
is_reach = False
for i in command:
if zb == [x_, y_]:
is_reach = True
if i == 'R':
zb = [zb[0]+1, zb[1]]
if i == 'U':
zb = [zb[0], zb[1]+1]
obstacles_ = []
for item in obstacles:
if item[0]<=x and item[1]<=y:
obstacles_.append(item)
for ob in obstacles_:
cur_epo = min(ob[0]//xi, ob[1]//yi)
cur_x = ob[0]-cur_epo*xi
cur_y = ob[1]-cur_epo*yi
cur_zb = [0, 0]
for cm in command:
print(cur_zb)
if cur_zb == [cur_x, cur_y]:
return False
if cm == 'R':
cur_zb = [cur_zb[0]+1, cur_zb[1]]
if cm == 'U':
cur_zb = [cur_zb[0], cur_zb[1]+1]
return is_reach
|
StarcoderdataPython
|
8021590
|
<filename>Python/41.first-missing-positive.py
from typing import List
class Solution:
def firstMissingPositive(self, nums: List[int]) -> int:
"""
Given an unsorted integer array, find the smallest missing positive integer.
>>> Solution().firstMissingPositive([1, 2, 0])
3
>>> Solution().firstMissingPositive([3, 4, -1, 1])
2
>>> Solution().firstMissingPositive([7, 8, 9, 11, 12])
1
:param nums: a list of unsorted integer array
:return: the smallest missing positive integer
"""
if not nums:
return 1
return self.leet_code_soln(nums)
def leet_code_soln(self, nums: List[int]) -> int:
"""
T: O(n)
Idea:
for any k positive nums (allow dup), the first missing positive num must be in [1:k+1] in R.
e.g. k balls into k + 1 bins, there must be a bin that's empty
Steps:
1. Since there are 0 and -ive, we partition the list as we do in quick sort and get k
2. Now nums[:,k] are +ive. Then the first missing number must be in [1:k+1] in R.
We use nums[i] to indicate whether the number i + 1 exists.
e.g. nums[0] should be 1
If nums[i] exists, set it to -ive to indicate that.
3. Then scan the elements between nums[-:k] to find the first positive element.
nums[i] +ive => i+1 does not exist
https://leetcode.com/problems/first-missing-positive/discuss/17073
:param nums: a list of unsorted integer array
:return: the smallest missing positive integer
"""
n = len(nums)
# Step 1: partition the array
k = -1
for i in range(n):
if nums[i] > 0:
k += 1
nums[k], nums[i] = nums[i], nums[k]
k += 1
first_missing_idx = k
# Step 2: if the number exists and is in place, then set it to minus
for i in range(k):
temp = abs(nums[i])
if temp <= k:
if nums[temp - 1] > 0:
nums[temp - 1] *= -1
# Step 3: scan the list and find the one that's not in place and positive
for i in range(k):
if nums[i] > 0:
first_missing_idx = i
break
return first_missing_idx + 1
def brute_force(self, nums: List[int]) -> int:
"""
T: O(nlogn)
The brute force solution:
Sort the nums list and loop through the list
:param nums: a list of unsorted integer array
:return: the smallest missing positive integer
"""
nums.sort() # O(nlogn)
res = 1
for num in nums:
if 0 < num == res:
res += 1
return res
if __name__ == '__main__':
print(Solution().firstMissingPositive([1, 2, 0]))
|
StarcoderdataPython
|
139870
|
# Implementation of the Gaborfilter
# https://en.wikipedia.org/wiki/Gabor_filter
import numpy as np
from cv2 import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filter2D, imread, imshow, waitKey
def gabor_filter_kernel(
ksize: int, sigma: int, theta: int, lambd: int, gamma: int, psi: int
) -> np.ndarray:
"""
:param ksize: The kernelsize of the convolutional filter (ksize x ksize)
:param sigma: standard deviation of the gaussian bell curve
:param theta: The orientation of the normal to the parallel stripes
of Gabor function.
:param lambd: Wavelength of the sinusoidal component.
:param gamma: The spatial aspect ratio and specifies the ellipticity
of the support of Gabor function.
:param psi: The phase offset of the sinusoidal function.
>>> gabor_filter_kernel(3, 8, 0, 10, 0, 0).tolist()
[[0.8027212023735046, 1.0, 0.8027212023735046], [0.8027212023735046, 1.0, \
0.8027212023735046], [0.8027212023735046, 1.0, 0.8027212023735046]]
"""
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
ksize = ksize + 1
gabor = np.zeros((ksize, ksize), dtype=np.float32)
# each value
for y in range(ksize):
for x in range(ksize):
# distance from center
px = x - ksize // 2
py = y - ksize // 2
# degree to radiant
_theta = theta / 180 * np.pi
cos_theta = np.cos(_theta)
sin_theta = np.sin(_theta)
# get kernel x
_x = cos_theta * px + sin_theta * py
# get kernel y
_y = -sin_theta * px + cos_theta * py
# fill kernel
gabor[y, x] = np.exp(
-(_x ** 2 + gamma ** 2 * _y ** 2) / (2 * sigma ** 2)
) * np.cos(2 * np.pi * _x / lambd + psi)
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
img = imread("../image_data/lena.jpg")
# turn image in gray scale value
gray = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
out = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
"""
ksize = 10
sigma = 8
lambd = 10
gamma = 0
psi = 0
"""
kernel_10 = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filter2D(gray, CV_8UC3, kernel_10)
out = out / out.max() * 255
out = out.astype(np.uint8)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
|
StarcoderdataPython
|
1624540
|
<reponame>babbysross/ITER-Inspection-Tool<filename>GPIO-PWM.py
#A program that will hopefully control a motor via a Raspberry Pi Zero W
from tkinter import Frame, Scale, HORIZONTAL, Tk
import RPi.GPIO as GPIO
from time import sleep
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.OUT)
GPIO.setwarnings(False)
pwm = GPIO.PWM(18,100)
pwm.start(5)
class App:
#main application
def _init_(self, master):
master = Tk()
w = Scale(frame, from_=0, to=180,
orient=HORIZONTAL, command=self.update)
w.pack()
def update(self, angle):
duty = float(angle) / 10.0 + 2.5
pwm.ChangeDutyCycle(duty)
root = Tk()
root.wm_title('Doosan Motor Control')
app = App(root)
root.geometry("200x50+0+0")
root.mainloop()
|
StarcoderdataPython
|
1799106
|
<reponame>AxelGoetz/website-fingerprinting<gh_stars>10-100
UNMONITORED_LABEL = -1
MONITORED_LABEL = 1
# Sets the percentage of unmonitored and monitored data you will train on
TRAIN_PERCENTAGE_UNMONITORED = 0.10
TRAIN_PERCENTAGE_MONITORED = 0.90
K_FOLDS = 3
DATA_DIR = ''
|
StarcoderdataPython
|
42854
|
from .client import Spread, Client
from ._version import __version__, __version_info__
__all__ = ["Spread", "Client", "__version__", "__version_info__"]
|
StarcoderdataPython
|
3261026
|
<gh_stars>0
"""
Main entry point for the application.
"""
# Standard Library Packages
import logging
import sys
# Installed Packages
import darkdetect
import qdarkstyle
from PyQt5 import QtWidgets, QtGui
# BEAMS Modules
from app.gui import mainwindow
from app.gui.dialogs.dialog_misc import NotificationDialog
from app.model import services
from app.resources import resources
from app.util import qt_constants, report
class BEAMS(QtWidgets.QApplication):
"""
Main program class. Initializing will not instantiate any GUI elements until run() is called.
"""
def __init__(self):
super(BEAMS, self).__init__(sys.argv)
report.init_reporting()
self.__system_service = services.SystemService()
self.__file_service = services.FileService()
self.__system_service.load_configuration_file()
pix = QtGui.QPixmap(resources.SPLASH_IMAGE)
self.splash = QtWidgets.QSplashScreen(pix.scaledToHeight(200, qt_constants.SmoothTransformation))
self.splash.show()
self.processEvents()
if self.__system_service.get_theme_preference() == self.__system_service.Themes.LIGHT:
self.setStyleSheet(qdarkstyle.load_stylesheet(palette=qdarkstyle.LightPalette))
elif self.__system_service.get_theme_preference() == self.__system_service.Themes.DARK:
self.setStyleSheet(qdarkstyle.load_stylesheet(palette=qdarkstyle.DarkPalette))
else:
if darkdetect.isDark():
self.setStyleSheet(qdarkstyle.load_stylesheet(palette=qdarkstyle.DarkPalette))
else:
self.setStyleSheet(qdarkstyle.load_stylesheet(palette=qdarkstyle.LightPalette))
db = QtGui.QFontDatabase()
db.addApplicationFont(resources.LATO_BLACK_FONT)
db.addApplicationFont(resources.LATO_BLACK_ITALIC_FONT)
db.addApplicationFont(resources.LATO_BOLD_FONT)
db.addApplicationFont(resources.LATO_BOLD_ITALIC_FONT)
db.addApplicationFont(resources.LATO_ITALIC_FONT)
db.addApplicationFont(resources.LATO_LIGHT_FONT)
db.addApplicationFont(resources.LATO_LIGHT_ITALIC_FONT)
db.addApplicationFont(resources.LATO_REGULAR_FONT)
db.addApplicationFont(resources.LATO_THIN_FONT)
db.addApplicationFont(resources.LATO_THIN_ITALIC_FONT)
self.main_program_window = None
def run(self):
"""
Creates the main window and starts the application.
"""
self.main_program_window = mainwindow.MainWindow()
self.main_program_window.show()
self.splash.finish(self.main_program_window)
self._check_version()
sys.exit(self.exec_())
def _check_version(self):
notify = self.__system_service.get_notify_user_of_update()
current_version = self.__system_service.get_current_version()
_, latest_version = self.__system_service.get_latest_version()
report.log_info(f"Running beams@{current_version}")
if notify and latest_version != "unknown":
NotificationDialog.launch(f"New version available! Currently {current_version} is installed, {latest_version} is available.",
"Do not show again (until next release).",
lambda user_checked: self.__system_service.set_notify_user_of_update(not user_checked))
def exec_(self) -> int:
try:
i = super(BEAMS, self).exec_()
return i
except Exception as e:
report.report_exception(e)
finally:
report.close()
self.__system_service.write_configuration_file()
|
StarcoderdataPython
|
26474
|
#!/usr/bin/env python3
import re
import sys
from glob import glob
from subprocess import run
def main(args):
assert len(args) >= 1
from_image = args.pop(0)
optional = [x for x in map(str.strip, args) if x]
optional_used = set()
with open("Dockerfile", "w") as fout:
print(f"from {from_image}", file=fout)
for fname in sorted(glob("*.Dockerfile")):
if fname.startswith("optional."):
if any(x in fname for x in optional):
optional_used.add(
re.search(
r"^optional\.(\d*\.)?(\S+?)\.Dockerfile$", fname
).groups()[1]
)
else:
continue
with open(fname) as fin:
print(fin.read().strip(), file=fout)
our_tag = "orestisfl/env"
if optional_used:
our_tag += "-" + "-".join(sorted(optional_used))
our_tag += ":" + from_image.split(":", 1)[1]
with open("image", "w") as f:
print(our_tag, file=f)
return run(["docker", "build", "-t", our_tag, "."], check=True)
if __name__ == "__main__":
print(main(sys.argv[1:]), file=sys.stderr)
|
StarcoderdataPython
|
5136216
|
from time import sleep
from datetime import datetime
import sys
import busio
import digitalio
import board
import adafruit_mcp3xxx.mcp3008 as MCP
from adafruit_mcp3xxx.analog_in import AnalogIn
def voltage_to_direction(voltage: float) -> str:
"""
Converts an anolog voltage to a direction
Arguments:
- voltage: Voltage float value form the MCP3008. values are between 0 and 3.3V
Returns:
- Direction coresponding to an input voltage
"""
if voltage < 0.20625 or voltage > 3.09375:
return "N"
elif 0.20625 <= voltage < 0.61875:
return "NE"
elif 0.61875 <= voltage < 1.03125:
return "E"
elif 1.03125 <= voltage < 1.44375:
return "SE"
elif 1.44375 <= voltage < 1.85625:
return "S"
elif 1.85625 <= voltage < 2.26875:
return "SW"
elif 2.26875 <= voltage < 2.68125:
return "W"
else:
return "NW"
def voltage_to_degrees(voltage: float) -> int:
"""
Converts an anolog voltage to rotational degrees
Arguments: None
Returns:
- Degrees coresponding to an input voltage
"""
return int(voltage*360/3.3)
def voltage() -> float:
"""
Gets the analog voltage from pin 0 on the MCP3008
Arguments: None
Returns:
- The analog voltage
"""
try:
# create the spi bus
spi = busio.SPI(clock=board.SCK, MISO=board.MISO, MOSI=board.MOSI)
# create the cs (chip select)
cs = digitalio.DigitalInOut(board.D5)
# create the mcp object
mcp = MCP.MCP3008(spi, cs)
# create an analog input channel on pin 0
chan = AnalogIn(mcp, MCP.P0)
return chan
except:
sys.exit(1)
def main():
"""
Driver function
"""
degree_sign= u'\N{DEGREE SIGN}'
chan = voltage()
while True:
print('Raw ADC Value: ', chan.value)
print('ADC Voltage: ' + str(chan.voltage) + 'V')
print('Direction: ' + voltage_to_direction(chan.voltage))
print('Direction: ' + str(voltage_to_degrees(chan.voltage)) + degree_sign)
print()
sleep(2)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
4851535
|
<reponame>melancholy/dd-trace-py
import mock
from ddtrace.internal.hostname import get_hostname
@mock.patch("socket.gethostname")
def test_get_hostname(socket_gethostname):
# Test that `get_hostname()` just returns `socket.gethostname`
socket_gethostname.return_value = "test-hostname"
assert get_hostname() == "test-hostname"
# Change the value returned by `socket.gethostname` to test the cache
socket_gethostname.return_value = "new-hostname"
assert get_hostname() == "test-hostname"
|
StarcoderdataPython
|
9682288
|
# Importing the required libraries
from surprise import Reader, Dataset
from surprise import SVD, accuracy, SVDpp, SlopeOne, BaselineOnly, CoClustering
import datetime
import requests, zipfile, io
from os import path
import pandas as pd
import tqdm as tqdm
from numpy import *
from sklearn.model_selection import train_test_split
import time
import pickle
# Loading the mapping data which is to map each movie Id
# in the ratings with it's title and genre
# the resulted data structure is a dictionary where the
# movie id is the key, the genre and titles are values
def load_mapping_data():
movie_data = {}
chunk_size = 500000
df_dtype = {
"movieId": int,
"title": str,
"genres": str
}
cols = list(df_dtype.keys())
for df_chunk in tqdm.tqdm(pd.read_csv('ml-latest-small/movies.csv', usecols=cols, dtype=df_dtype, chunksize=chunk_size)):
df_chunk.shape[0]
combine_data = [list(a) for a in
zip(df_chunk["movieId"].tolist(), df_chunk["title"].tolist(),
df_chunk["genres"].tolist())]
for a in combine_data:
movie_data[a[0]] = [a[1], a[2]]
del df_chunk
return movie_data
# Loading the rating data which is around 27M records it takes around 2 minutes
# the resulted data structure us a dictionary where the
# user id is the key and all their raings are values for example for user 1 :
# 1 = {
# [movieId,rating,timestamp],
# [movieId,rating,timestamp],
# [movieId,rating,timestamp],
# }
def load_data():
rating_data = {}
unique_user_id = []
chunk_size = 50000
df_dtype = {
"userId": int,
"movieId": int,
"rating": float,
"timestamp": int,
}
cols = list(df_dtype.keys())
for df_chunk in tqdm.tqdm(pd.read_csv('ml-latest-small/ratings.csv', usecols=cols, dtype=df_dtype, chunksize=chunk_size)):
user_id = df_chunk["userId"].tolist()
unique_user_id.extend(set(user_id))
movie_id = df_chunk["movieId"].tolist()
rating = df_chunk["rating"].tolist()
timestamp = df_chunk["timestamp"].tolist()
combine_data = [list(a) for a in zip(user_id, movie_id, rating, timestamp)]
for a in combine_data:
if a[0] in rating_data.keys():
rating_data[a[0]].extend([[a[0], a[1], a[2], a[3]]])
else:
rating_data[a[0]] = [[a[0], a[1], a[2], a[3]]]
del df_chunk
return rating_data, unique_user_id
# Split the data into training and testing
# this processes isn't being done for the whole dataset instead it's being done
# for each user id, for each user we split their ratings 80 training and 20 testing
# the resulted training and testing datasets are including the whole original dataset
def spilt_data(rating_data, unique_user_id):
training_data = []
testing_data = []
t0 = time.time()
t1 = time.time()
for u in unique_user_id:
if len(rating_data[u]) == 1:
x_test = rating_data[u]
x_train = rating_data[u]
else:
x_train, x_test = train_test_split(rating_data[u], test_size=0.2)
training_data.extend(x_train)
testing_data.extend(x_test)
total = t1 - t0
print(int(total))
return training_data, testing_data
def get_movie_title(movie_id, movie_data):
if movie_id in movie_data.keys():
return movie_data[movie_id][0]
def get_movie_genre(movie_id, movie_data):
if movie_id in movie_data.keys():
return movie_data[movie_id][1]
# def get_train_test_data():
# rating_data, unique_user_id = load_data()
# training_data, testing_data = spilt_data(rating_data, unique_user_id)
# training_dataframe = pd.DataFrame.from_records(training_data)
# training_dataframe.columns = ["userId","movieId","rating","timestamp"]
# testing_dataframe = pd.DataFrame.from_records(testing_data)
# testing_dataframe.columns= ["userId","movieId","rating","timestamp"]
# return training_dataframe, testing_dataframe
def get_train_test_data(new_sample = False):
if new_sample:
rating_data, unique_user_id = load_data()
training_data, testing_data = spilt_data(rating_data, unique_user_id)
training_dataframe = pd.DataFrame.from_records(training_data)
training_dataframe.columns = ["userId","movieId","rating","timestamp"]
testing_dataframe = pd.DataFrame.from_records(testing_data)
testing_dataframe.columns=["userId","movieId","rating","timestamp"]
# df_links = pd.read_csv('ml-latest-small/links.csv')
file = open('training_dataframe.txt', 'wb')
pickle.dump(training_dataframe, file)
file.close()
file = open('testing_dataframe.txt', 'wb')
pickle.dump(testing_dataframe, file)
file.close()
else:
file = open('training_dataframe.txt', 'rb')
training_dataframe = pickle.load(file)
file.close()
file = open('testing_dataframe.txt', 'rb')
testing_dataframe = pickle.load(file)
file.close()
return training_dataframe, testing_dataframe
if __name__ == "__main__":
# download http://files.grouplens.org/datasets/movielens/ml-latest-small.zip with 1M records File
# all files should be placed inside ml-latest folder
if not path.exists('ml-latest-small'):
print("Downloading Files for first time use: ")
download_file = requests.get('http://files.grouplens.org/datasets/movielens/ml-latest-small.zip')
zipped_file = zipfile.ZipFile(io.BytesIO(download_file.content)) # having First.csv zipped file.
zipped_file.extractall()
print("Data Loading and Processing, Estimated Time 2 minutes :")
rating_data, unique_user_id = load_data()
print("Training and Testing DataSets Construction, Estimated Time 40 seconds :")
training_data, testing_data = spilt_data(rating_data, unique_user_id)
print("Mapping Data Processing :")
movie_data = load_mapping_data()
print("Movie name with id = 1 :")
print(get_movie_title(1, movie_data))
print("Movie genre with id = 1 :")
print(get_movie_genre(1, movie_data))
|
StarcoderdataPython
|
105840
|
<gh_stars>10-100
#
# Copyright 2015-2020 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ..runtimetool import RuntimeTool
from .bashtoolmixin import BashToolMixIn
from .curltoolmixin import CurlToolMixIn
class phpTool(BashToolMixIn, CurlToolMixIn, RuntimeTool):
"""PHP is a popular general-purpose scripting language that is especially suited to web development.
Home: http://php.net/
By default the latest available PHP binary is used for the following OSes:
* Debian & Ubuntu - uses Sury (https://deb.sury.org/) builds 5.6 & 7.x.
* CentOS, RHEL & Oracle Linux - uses SCL 5.6 & 7.x.
* macos uses Homebrew builds 5.6 & 7.x.
You can forbid source builds by setting phpBinOnly to non-empty string.
However, if phpVer is set then we use php-build which make consume a lot of time and
resources due to lack of trusted binary builds.
You can control installed extensions by setting space separated environment:
* phpExtRequire - required extensions to be installed or fail
* phpExtTry - nice to have extensions
The same can be done by setting project-specific .toolTune options as array:
* extRequire = []
* extTry = []
"""
__slots__ = ()
def getDeps(self):
deps = BashToolMixIn.getDeps(self) + CurlToolMixIn.getDeps(self)
# TODO: need to disable for binary only installs
deps += ['phpbuild']
return deps
def _installTool(self, env):
ospath = self._ospath
environ = self._environ
php_ver = env['phpVer']
if not env['phpSourceBuild']:
self._installBinaries(env)
return
php_dir = env['phpDir']
try:
self._os.makedirs(php_dir)
except:
pass
self._buildDeps(env)
old_tmpdir = environ.get('TMPDIR', '/tmp')
environ['TMPDIR'] = ospath.join(php_dir, '..')
self._executil.callExternal(
[env['phpbuildBin'], env['phpSourceVer'], env['phpDir']])
environ['TMPDIR'] = old_tmpdir
def _installBinaries(self, env):
detect = self._detect
phputil = self._phputil
install = self._install
ver = env['phpVer']
php_pkg = phputil.basePackage(ver)
if detect.isDebian():
repo = env.get('phpSuryRepo', 'https://packages.sury.org/php')
gpg = self._callCurl(env, [repo + '/apt.gpg'], binary_output=True)
install.aptRepo(
'sury', "deb {0} $codename$ main".format(repo), gpg)
install.deb(php_pkg)
elif detect.isUbuntu():
install.aptRepo('sury', 'ppa:ondrej/php', None)
install.deb(php_pkg)
elif detect.isSCLSupported():
if phputil.isIUSVer(ver):
install.yumIUS()
install.yum(php_pkg + '-cli')
else:
install.yumSCL()
install.yum(php_pkg)
elif detect.isMacOS():
install.brew(php_pkg)
elif detect.isAlpineLinux():
install.apkCommunity()
install.apk(php_pkg)
elif detect.isSLES():
self._install.SUSEConnectVerArch('sle-module-web-scripting')
install.zypper(php_pkg)
else:
install.zypper(php_pkg)
install.yum('{0}-cli'.format(php_pkg))
install.emerge('dev-lang/php')
install.pacman(php_pkg)
env['phpJustInstalled'] = True
def _updateTool(self, env):
pass
def uninstallTool(self, env):
if env['phpVer'] == self.SYSTEM_VER or env['phpBinOnly']:
return super(phpTool, self).uninstallTool(env)
php_dir = env['phpDir']
self._pathutil.rmTree(php_dir)
self._have_tool = False
def envNames(self):
return ['phpDir', 'phpBin', 'phpVer', 'phpfpmVer', 'phpBinOnly', 'phpSuryRepo',
'phpExtRequire', 'phpExtTry', 'phpForceBuild', 'phpSourceVer']
def initEnv(self, env):
ospath = self._ospath
os = self._os
detect = self._detect
phputil = self._phputil
environ = self._environ
# ---
if 'phpfpmVer' in env:
php_ver = env.setdefault('phpVer', env['phpfpmVer'])
# ---
phpForceBuild = env.setdefault('phpForceBuild', False)
phpBinOnly = env.setdefault('phpBinOnly', not phpForceBuild)
env['phpSourceBuild'] = False
if phpBinOnly and phpForceBuild:
self._warn('"phpBinOnly" and "phpForceBuild" do not make sense'
' when set together!')
php_required_ext = env.get('phpExtRequire', '')
php_try_ext = env.get('phpExtTry', '')
environ['phpExtRequire'] = php_required_ext
environ['phpExtTry'] = php_try_ext
php_required_ext = php_required_ext.split()
php_try_ext = php_try_ext.split()
# ---
php_binaries = phputil.binaryVersions()
if php_binaries:
php_latest = php_binaries[-1]
php_ver = env.setdefault('phpVer', php_latest)
foundBinary = True
if php_ver.split('.')[0] == '5' and php_ver != '5.6':
php_ver = '5.6'
self._warn('Forcing PHP 5.6 for PHP 5.x requirement')
elif php_ver == '7' and php_latest.split('.')[0] == '7':
php_ver = php_latest
elif php_ver.split('.') > php_latest.split('.'):
foundBinary = False
self._warn(
'Binary builds are supported only for: {0}'.format(', '.join(php_binaries)))
env['phpVer'] = php_ver
else:
foundBinary = False
php_ver = env.setdefault('phpVer', self.SYSTEM_VER)
# ---
if php_ver == self.SYSTEM_VER:
super(phpTool, self).initEnv(env)
elif foundBinary and not phpForceBuild:
if detect.isDebian() or detect.isUbuntu():
bin_name = 'php' + php_ver
bin_src = ospath.join('/usr/bin', bin_name)
self._have_tool = phputil.createBinDir(env, bin_src)
elif detect.isSCLSupported():
if phputil.isIUSVer(php_ver):
# IUS allows only one version installed as default
super(phpTool, self).initEnv(env)
else:
try:
ver = php_ver.replace('.', '')
sclname = 'rh-php{0}'.format(ver)
env_to_set = self._executil.callExternal(
['scl', 'enable', sclname, 'env'], verbose=False)
self._pathutil.updateEnvFromOutput(env_to_set)
super(phpTool, self).initEnv(env)
if self._have_tool:
self._have_tool = env['phpBin'].startswith('/opt')
except self._ext.subprocess.CalledProcessError:
return
except OSError:
return
elif detect.isArchLinux():
if phputil.isArchLatest(php_ver):
bin_name = 'php'
else:
bin_name = 'php' + php_ver.replace('.', '')
bin_src = ospath.join('/usr/bin', bin_name)
self._have_tool = phputil.createBinDir(env, bin_src)
elif detect.isAlpineLinux():
if phputil.isAlpineSplit():
bin_name = 'php' + php_ver[0]
else:
bin_name = 'php'
bin_src = ospath.join('/usr/bin', bin_name)
self._have_tool = phputil.createBinDir(env, bin_src)
elif detect.isMacOS():
brew_prefix = env['brewDir']
formula = phputil.basePackage(php_ver)
php_dir = ospath.join(brew_prefix, 'opt', formula, 'bin')
if ospath.exists(php_dir):
self._pathutil.addBinPath(php_dir, True)
super(phpTool, self).initEnv(env)
elif not phpBinOnly:
def_dir = ospath.join(
env['phpbuildDir'], 'share', 'php-build', 'definitions')
if not ospath.exists(def_dir):
return
php_ver = env.setdefault('phpSourceVer', php_ver)
defs = os.listdir(def_dir)
defs = self._ext.fnmatch.filter(defs, php_ver + '*')
if not defs:
self._errorExit('PHP version "{0}" not found'.format(php_ver))
php_ver = self._versionutil.latest(defs)
env['phpSourceVer'] = php_ver
env['phpForceBuild'] = True
env['phpSourceBuild'] = True
php_dir = ospath.join(self._environ['HOME'], '.php', php_ver)
php_dir = env.setdefault('phpDir', php_dir)
php_bin_dir = ospath.join(php_dir, 'bin')
php_bin = ospath.join(php_bin_dir, 'php')
if ospath.exists(php_bin):
self._have_tool = True
self._pathutil.addBinPath(php_bin_dir, True)
env.setdefault('phpBin', php_bin)
# ---
if self._have_tool:
if env.get('phpJustInstalled', False):
self._phputil.installExtensions(env, [
'apcu',
'curl',
], True)
phputil.installExtensions(env, php_required_ext, False)
phputil.installExtensions(env, php_try_ext, True)
def loadConfig(self, config):
env = config['env']
phputil = self._phputil
php_required_ext = self._getTune(config, 'extRequire', [])
php_try_ext = self._getTune(config, 'extTry', [])
phputil.installExtensions(env, php_required_ext, False)
phputil.installExtensions(env, php_try_ext, True)
def _buildDeps(self, env):
ospath = self._ospath
os = self._os
environ = self._environ
if self._detect.isSLES():
self._errorExit(
'PHP source builds are not supported for SLES yet!')
return
self._builddep.require(env, [
'ssl',
'mysqlclient',
'postgresql',
])
# APT
# ---
self._install.deb([
'build-essential',
'bison',
'automake',
'autoconf',
'libtool',
're2c',
'libcurl4-openssl-dev',
'libtidy-dev',
'libpng-dev',
'libmcrypt-dev',
'libjpeg-dev',
'libreadline-dev',
'libbz2-dev',
'libc-client-dev',
'libdb-dev',
'libedit-dev',
'libenchant-dev',
'libevent-dev',
'libexpat1-dev',
'libfreetype6-dev',
'libgcrypt11-dev',
'libgd2-dev',
'libglib2.0-dev',
'libgmp3-dev',
'libicu-dev',
'libjpeg-dev',
'libkrb5-dev',
'libldap2-dev',
'libmagic-dev',
'libmhash-dev',
'libonig-dev',
'libpam0g-dev',
'libpcre3-dev',
'libpng-dev',
'libpspell-dev',
'libqdbm-dev',
'librecode-dev',
'libsasl2-dev',
'libsnmp-dev',
'libsqlite3-dev',
'libwrap0-dev',
'libxmltok1-dev',
'libxml2-dev',
'libvpx-dev',
'libxslt1-dev',
'unixodbc-dev',
'zlib1g-dev',
])
# Extra repo before the rest
# ---
self._install.yumEPEL()
self._install.rpm([
'binutils',
'patch',
'git',
'gcc',
'gcc-c++',
'make',
'autoconf',
'automake',
'libtool',
'bison',
're2c',
'glibc-devel',
'libxml2-devel',
'pkgconfig',
'curl-devel',
'libpng-devel',
'libjpeg-devel',
'libXpm-devel',
'freetype-devel',
'gmp-devel',
'libmcrypt-devel',
'aspell-devel',
'recode-devel',
'libicu-devel',
'oniguruma-devel',
'libtidy-devel',
'libxslt-devel',
'readline-devel',
'zlib-devel',
'pcre-devel',
])
self._install.yum('bzip2-devel')
self._install.zypper('libbz2-devel')
self._install.emergeDepsOnly(['dev-lang/php'])
self._install.pacman([
'patch',
'git',
'gcc',
'make',
'autoconf',
'automake',
'libtool',
'bison',
're2c',
'glibc',
'libxml2',
'curl',
'libpng',
'libjpeg',
'libxpm',
'freetype2',
'gmp',
'libmcrypt',
'aspell',
'recode',
'icu',
'oniguruma',
'tidy',
'libxslt',
'readline',
'zlib',
'pcre',
])
# ---
systemctl = self._pathutil.which('systemctl')
if systemctl:
self._install.deb(['libsystemd-dev'])
self._install.rpm(['systemd-devel'])
with_systemd = ' --with-fpm-systemd'
else:
with_systemd = ' --without-fpm-systemd'
multiarch = None
dpkgarch = self._pathutil.which('dpkg-architecture')
if dpkgarch:
multiarch = self._executil.callExternal(
[dpkgarch, '-qDEB_HOST_MULTIARCH']).strip()
if multiarch:
if ospath.exists(ospath.join('/usr/include', multiarch, 'curl')):
curl_dir = ospath.join(env['phpDir'], '..', 'curl')
try:
os.mkdir(curl_dir)
os.symlink(ospath.join('/usr/include', multiarch),
ospath.join(curl_dir, 'include'))
os.symlink(ospath.join('/usr/lib', multiarch),
ospath.join(curl_dir, 'lib'))
except Exception as e:
# print(e)
pass
else:
curl_dir = '/usr/include'
with_libdir = ' --with-libdir={0} --with-curl={1}'.format(
ospath.join('lib', multiarch),
curl_dir,
)
else:
with_libdir = ''
# ---
from ..details.resourcealgo import ResourceAlgo
cpu_count = ResourceAlgo().cpuLimit({})
environ['PHP_BUILD_EXTRA_MAKE_ARGUMENTS'] = '-j{0}'.format(
cpu_count)
environ['PHP_BUILD_CONFIGURE_OPTS'] = ' \
--disable-debug \
--with-regex=php \
--enable-calendar \
--enable-sysvsem \
--enable-sysvshm \
--enable-sysvmsg \
--enable-bcmath \
--disable-cgi \
--disable-phpdbg \
--enable-fpm \
--with-bz2 \
--enable-ctype \
--without-db4 \
--without-qdbm \
--without-gdbm \
--with-iconv \
--enable-exif \
--enable-ftp \
--with-gettext \
--enable-mbstring \
--with-onig=/usr \
--with-pcre-regex=/usr \
--enable-shmop \
--enable-sockets \
--enable-wddx \
--with-libxml-dir=/usr \
--with-zlib \
--with-kerberos=/usr \
--with-openssl=/usr \
--enable-soap \
--enable-zip \
--with-mhash=yes \
--with-system-tzdata \
' + with_systemd + with_libdir
def tuneDefaults(self, env):
return {
'minMemory': '8M',
'socketType': 'none',
'scalable': False,
'reloadable': False,
'multiCore': False,
}
|
StarcoderdataPython
|
9704249
|
<gh_stars>10-100
from enum import Enum
class Status(Enum):
BETA = "beta"
PRODUCTION = "production"
DEPRECATED = "deprecated"
DISCONTINUED = "discontinued"
class Impact(Enum):
PROFIT = "profit"
CUSTOMERS = "customers"
EMPLOYEES = "employees"
class SentryIssueCategory(Enum):
STALE = "stale"
DECAYING = "decaying"
SPOILED = "spoiled"
FRESH = "fresh"
class EnviromentType(Enum):
GITLAB = "gitlab"
ZOO = "zoo"
|
StarcoderdataPython
|
5197209
|
<filename>explain_of_imply.py<gh_stars>0
from formula import *
def get_truth_value(f, explain_imp: list):
"""
:param f: 待求真值的公式
:param explain_imp: p->q中, (p,q)值为(0,0),(0,1),(1,0),(1,1)时的真值
:return:
"""
if len(explain_imp) != 4:
return
def confirm(_exp, assignment: dict):
form = get_form(_exp)
if form == 'imp':
first = confirm(_exp[0], assignment)
second = confirm(_exp[2], assignment)
if not first and not second:
return explain_imp[0]
elif not first and second:
return explain_imp[1]
elif first and not second:
return explain_imp[2]
else:
return explain_imp[3]
elif form == 'not':
return not confirm(_exp[1], assignment)
else:
return assignment[_exp]
variables = f.get_variables()
true_assignments = []
truth_value = True
for i in range(2 ** len(variables)):
tmp = i
value = []
for j in range(len(variables)):
value = [tmp % 2] + value
tmp >>= 1
assignment = dict(zip(variables, value))
if not confirm(f.exp, assignment):
truth_value = False
else:
true_assignments.append(assignment)
return truth_value
l1 = Formula.from_str('p->(q->p)')
l2 = Formula.from_str('(p->(q->r))->((p->q)->(p->r))')
l3 = Formula.from_str('(!p->!q)->(q->p)')
for i in range(16):
tmp = i
explain_imp = []
for j in range(4):
explain_imp = [tmp % 2] + explain_imp
tmp >>= 1
if get_truth_value(l1, explain_imp) \
and get_truth_value(l2, explain_imp) \
and get_truth_value(l3, explain_imp):
print('{{f->f is {}'.format(bool(explain_imp[0])))
print(' f->t is {}'.format(bool(explain_imp[1])))
print(' t->f is {}'.format(bool(explain_imp[2])))
print(' t->t is {}}}'.format(bool(explain_imp[3])))
|
StarcoderdataPython
|
1745713
|
def resolve():
'''
code here
'''
from functools import lru_cache
import sys
sys.setrecursionlimit(10**6)
N, M = [int(item) for item in input().split()]
As = [int(input()) for _ in range(M)]
memo = [False for _ in range(N+1)]
for item in As:
memo[item] = True
res_list = []
prev = 0
for i in range(1,N+1):
if memo[i] == True:
res_list.append(i-1 - prev)
prev = i+1
else:
if memo[N] == False and memo[N] == False:
res_list.append(N - prev)
@lru_cache
def cal(num):
if 0 <= num <= 1:
return 1
elif num == -1:
return 0
else:
return cal(num-1) + cal(num-2)
# print(res_list)
res = 1
for item in res_list:
res *= cal(item)% (10**9 + 7)
print(res % (10**9 + 7))
if __name__ == "__main__":
resolve()
|
StarcoderdataPython
|
1923776
|
"""Data Manipulation - Strings"""
def handle_permutations(existing_list, permutations_to_populate):
"""Handle permutations."""
temp_list = []
for perm in permutations_to_populate:
for item in existing_list:
temp_list.append('{}{}'.format(item, perm))
return [item for item in temp_list]
def make_string_permutations(permutation_matrix):
"""Make string permutations."""
temp_list = ['']
for permutation_list in permutation_matrix:
temp_list = handle_permutations(
existing_list=temp_list,
permutations_to_populate=permutation_list)
return temp_list
def single_value_from_permutable_keys(source_dict, permutable_keys,
default_value=''):
"""Single value from permutable keys."""
example_condition = True
err_msg = 'Multiple permutable keys were found. Please use one.\n\n' \
'Source dictionary: {}\n' \
'Allowable permutable keys: {}' \
.format(source_dict, permutable_keys)
valid_keys_in_source_dict = 0
for key in source_dict:
if key in permutable_keys:
valid_keys_in_source_dict += 1
if valid_keys_in_source_dict == 0:
return ''
elif valid_keys_in_source_dict > 1:
raise Exception(err_msg)
else:
return ''.join(
source_dict[key]
if key in source_dict else '' for key in permutable_keys
) if example_condition else default_value
def example_string_permutations_use_case():
"""Example."""
example_string_permutations = (
('char', 'characteristic'),
('Grp', 'Group'),
('', 1, 2),
('Label', '.label')
)
example_dict = {}
example_arg_name_permutations = \
make_string_permutations(example_string_permutations)
example_chargrp_label_arg_names = example_arg_name_permutations
example_char_grp_label = single_value_from_permutable_keys(
source_dict=example_dict,
permutable_keys=example_chargrp_label_arg_names)
return example_char_grp_label
|
StarcoderdataPython
|
9604897
|
import pytest
from random import randint
from datetime import datetime
from test.test_data import generate_courier_db, generate_courier
from candy_delivery.db.models import Courier, Order
@pytest.mark.asyncio
async def test_update_courier(api_client, migrated_db_session):
courier = generate_courier_db()
courier_update = generate_courier()
courier_db = Courier(
id=courier["courier_id"],
courier_type=courier["courier_type"],
regions=courier["regions"],
working_hours=courier["working_hours"],
rating=courier["rating"],
earnings=courier["earnings"]
)
migrated_db_session.add(courier_db)
migrated_db_session.commit()
response = await api_client.patch("/couriers/" + str(courier["courier_id"]),
json = {
"courier_type": courier_update['courier_type'],
"regions": courier_update['regions'],
"working_hours": courier_update['working_hours']
}
)
assert response.status_code == 200
assert response.json() == {
"courier_id": courier["courier_id"],
"courier_type": courier_update['courier_type'],
"regions": courier_update['regions'],
"working_hours": courier_update['working_hours']
}
@pytest.mark.asyncio
async def test_update_courier_not_found(api_client):
courier_update = generate_courier()
response = await api_client.patch("/couriers/" + str(randint(0, 100)),
json = {
"courier_type": courier_update['courier_type'],
"regions": courier_update['regions'],
"working_hours": courier_update['working_hours']
}
)
assert response.status_code == 404
@pytest.mark.asyncio
async def test_update_courier_cancel_orders(api_client, migrated_db_session):
courier_db = Courier(
id=6,
courier_type="car",
regions=[20, 15, 9, 4],
working_hours=["09:00-16:00"],
)
order_db_1 = Order(
id=1,
weight=40,
region=4,
delivery_hours=["09:30-12:30"],
assign_time=datetime.utcnow(),
courier_id=6
)
order_db_2 = Order(
id=2,
weight=14,
region=20,
delivery_hours=["14:30-16:00"],
assign_time=datetime.utcnow(),
courier_id=6
)
order_db_3 = Order(
id=3,
weight=16,
region=15,
delivery_hours=["12:00-15:30"],
assign_time=datetime.utcnow(),
courier_id=6
)
order_db_4 = Order(
id=4,
weight=10,
region=9,
delivery_hours=["08:00-08:30"],
assign_time=datetime.utcnow(),
courier_id=6
)
migrated_db_session.add(courier_db)
migrated_db_session.add(order_db_1)
migrated_db_session.add(order_db_2)
migrated_db_session.add(order_db_3)
migrated_db_session.add(order_db_4)
migrated_db_session.commit()
response = await api_client.patch("/couriers/6",
json = {
"courier_type": "bike",
"regions": [20, 9]
}
)
assert response.status_code == 200
assert response.json() == {
"courier_id": 6,
"courier_type": "bike",
"regions": [20, 9],
"working_hours": ["09:00-16:00"],
}
valid_orders_ids = [order.id for order in migrated_db_session.query(Order).filter(Order.courier_id != None)]
assert valid_orders_ids == [2]
|
StarcoderdataPython
|
5085913
|
<filename>src/sst/elements/memHierarchy/tests/sdl5-1.py
# Automatically generated SST Python input
import sst
from mhlib import componentlist
DEBUG_L1 = 0
DEBUG_L2 = 0
DEBUG_L3 = 0
DEBUG_MEM = 0
DEBUG_CORE0 = 0
DEBUG_CORE1 = 0
DEBUG_CORE2 = 0
DEBUG_CORE3 = 0
DEBUG_NODE0 = 0
DEBUG_NODE1 = 0
# Define the simulation components
comp_cpu0 = sst.Component("cpu0", "memHierarchy.trivialCPU")
comp_cpu0.addParams({
"memSize" : "0x1000",
"num_loadstore" : "1000",
"commFreq" : "100",
"do_write" : "1"
})
iface0 = comp_cpu0.setSubComponent("memory", "memHierarchy.memInterface")
comp_c0_l1cache = sst.Component("c0.l1cache", "memHierarchy.Cache")
comp_c0_l1cache.addParams({
"access_latency_cycles" : "5",
"cache_frequency" : "2 Ghz",
"replacement_policy" : "lru",
"coherence_protocol" : "MSI",
"associativity" : "4",
"cache_line_size" : "64",
"debug_level" : "10",
"L1" : "1",
"debug" : DEBUG_L1 | DEBUG_CORE0 | DEBUG_NODE0,
"cache_size" : "4 KB"
})
comp_cpu1 = sst.Component("cpu1", "memHierarchy.trivialCPU")
comp_cpu1.addParams({
"memSize" : "0x1000",
"num_loadstore" : "1000",
"commFreq" : "100",
"do_write" : "1"
})
iface1 = comp_cpu1.setSubComponent("memory", "memHierarchy.memInterface")
comp_c1_l1cache = sst.Component("c1.l1cache", "memHierarchy.Cache")
comp_c1_l1cache.addParams({
"access_latency_cycles" : "5",
"cache_frequency" : "2 Ghz",
"replacement_policy" : "lru",
"coherence_protocol" : "MSI",
"associativity" : "4",
"cache_line_size" : "64",
"debug_level" : "10",
"L1" : "1",
"debug" : DEBUG_L1 | DEBUG_CORE0 | DEBUG_NODE0,
"cache_size" : "4 KB"
})
comp_n0_bus = sst.Component("n0.bus", "memHierarchy.Bus")
comp_n0_bus.addParams({
"bus_frequency" : "2 Ghz"
})
comp_n0_l2cache = sst.Component("n0.l2cache", "memHierarchy.Cache")
comp_n0_l2cache.addParams({
"access_latency_cycles" : "20",
"cache_frequency" : "2 Ghz",
"replacement_policy" : "lru",
"coherence_protocol" : "MSI",
"associativity" : "8",
"cache_line_size" : "64",
"debug_level" : "10",
"debug" : DEBUG_L2 | DEBUG_NODE0,
"cache_size" : "32 KB"
})
comp_cpu2 = sst.Component("cpu2", "memHierarchy.trivialCPU")
comp_cpu2.addParams({
"memSize" : "0x1000",
"num_loadstore" : "1000",
"commFreq" : "100",
"do_write" : "1"
})
iface2 = comp_cpu2.setSubComponent("memory", "memHierarchy.memInterface")
comp_c2_l1cache = sst.Component("c2.l1cache", "memHierarchy.Cache")
comp_c2_l1cache.addParams({
"access_latency_cycles" : "5",
"cache_frequency" : "2 Ghz",
"replacement_policy" : "lru",
"coherence_protocol" : "MSI",
"associativity" : "4",
"cache_line_size" : "64",
"debug_level" : "10",
"L1" : "1",
"debug" : DEBUG_L1 | DEBUG_CORE2 | DEBUG_NODE1,
"cache_size" : "4 KB"
})
comp_cpu3 = sst.Component("cpu3", "memHierarchy.trivialCPU")
comp_cpu3.addParams({
"memSize" : "0x1000",
"num_loadstore" : "1000",
"commFreq" : "100",
"do_write" : "1"
})
iface3 = comp_cpu3.setSubComponent("memory", "memHierarchy.memInterface")
comp_c3_l1cache = sst.Component("c3.l1cache", "memHierarchy.Cache")
comp_c3_l1cache.addParams({
"access_latency_cycles" : "5",
"cache_frequency" : "2 Ghz",
"replacement_policy" : "lru",
"coherence_protocol" : "MSI",
"associativity" : "4",
"cache_line_size" : "64",
"debug_level" : "10",
"L1" : "1",
"debug" : DEBUG_L1 | DEBUG_CORE3 | DEBUG_NODE1,
"cache_size" : "4 KB"
})
comp_n1_bus = sst.Component("n1.bus", "memHierarchy.Bus")
comp_n1_bus.addParams({
"bus_frequency" : "2 Ghz"
})
comp_n1_l2cache = sst.Component("n1.l2cache", "memHierarchy.Cache")
comp_n1_l2cache.addParams({
"access_latency_cycles" : "20",
"cache_frequency" : "2 Ghz",
"replacement_policy" : "lru",
"coherence_protocol" : "MSI",
"associativity" : "8",
"cache_line_size" : "64",
"debug_level" : "10",
"debug" : DEBUG_L2 | DEBUG_NODE1,
"cache_size" : "32 KB"
})
comp_n2_bus = sst.Component("n2.bus", "memHierarchy.Bus")
comp_n2_bus.addParams({
"bus_frequency" : "2 Ghz"
})
l3cache = sst.Component("l3cache", "memHierarchy.Cache")
l3cache.addParams({
"access_latency_cycles" : "100",
"cache_frequency" : "2 Ghz",
"replacement_policy" : "lru",
"coherence_protocol" : "MSI",
"associativity" : "8",
"cache_line_size" : "64",
"debug_level" : "10",
"debug" : DEBUG_L3,
"cache_size" : "64 KB"
})
memctrl = sst.Component("memory", "memHierarchy.MemController")
memctrl.addParams({
"debug" : DEBUG_MEM,
"debug_level" : "10",
"clock" : "1GHz",
})
memory = memctrl.setSubComponent("backend", "memHierarchy.dramsim")
memory.addParams({
"mem_size" : "512MiB",
"access_time" : "1000 ns",
"system_ini" : "system.ini",
"device_ini" : "DDR3_micron_32M_8B_x4_sg125.ini",
})
# Enable statistics
sst.setStatisticLoadLevel(7)
sst.setStatisticOutput("sst.statOutputConsole")
for a in componentlist:
sst.enableAllStatisticsForComponentType(a)
# Define the simulation links
link_cpu0_l1cache_link = sst.Link("link_cpu0_l1cache_link")
link_cpu0_l1cache_link.connect( (iface0, "port", "1000ps"), (comp_c0_l1cache, "high_network_0", "1000ps") )
link_c0_l1cache_l2cache_link = sst.Link("link_c0_l1cache_l2cache_link")
link_c0_l1cache_l2cache_link.connect( (comp_c0_l1cache, "low_network_0", "10000ps"), (comp_n0_bus, "high_network_0", "10000ps") )
link_cpu1_l1cache_link = sst.Link("link_cpu1_l1cache_link")
link_cpu1_l1cache_link.connect( (iface1, "port", "1000ps"), (comp_c1_l1cache, "high_network_0", "1000ps") )
link_c1_l1cache_l2cache_link = sst.Link("link_c1_l1cache_l2cache_link")
link_c1_l1cache_l2cache_link.connect( (comp_c1_l1cache, "low_network_0", "10000ps"), (comp_n0_bus, "high_network_1", "10000ps") )
link_n0_bus_l2cache = sst.Link("link_n0_bus_l2cache")
link_n0_bus_l2cache.connect( (comp_n0_bus, "low_network_0", "10000ps"), (comp_n0_l2cache, "high_network_0", "1000ps") )
link_n0_l2cache_l3cache = sst.Link("link_n0_l2cache_l3cache")
link_n0_l2cache_l3cache.connect( (comp_n0_l2cache, "low_network_0", "10000ps"), (comp_n2_bus, "high_network_0", "10000ps") )
link_cpu2_l1cache_link = sst.Link("link_cpu2_l1cache_link")
link_cpu2_l1cache_link.connect( (iface2, "port", "1000ps"), (comp_c2_l1cache, "high_network_0", "1000ps") )
link_c2_l1cache_l2cache_link = sst.Link("link_c2_l1cache_l2cache_link")
link_c2_l1cache_l2cache_link.connect( (comp_c2_l1cache, "low_network_0", "10000ps"), (comp_n1_bus, "high_network_0", "10000ps") )
link_cpu3_l1cache_link = sst.Link("link_cpu3_l1cache_link")
link_cpu3_l1cache_link.connect( (iface3, "port", "1000ps"), (comp_c3_l1cache, "high_network_0", "1000ps") )
link_c3_l1cache_l2cache_link = sst.Link("link_c3_l1cache_l2cache_link")
link_c3_l1cache_l2cache_link.connect( (comp_c3_l1cache, "low_network_0", "10000ps"), (comp_n1_bus, "high_network_1", "10000ps") )
link_n1_bus_l2cache = sst.Link("link_n1_bus_l2cache")
link_n1_bus_l2cache.connect( (comp_n1_bus, "low_network_0", "10000ps"), (comp_n1_l2cache, "high_network_0", "1000ps") )
link_n1_l2cache_l3cache = sst.Link("link_n1_l2cache_l3cache")
link_n1_l2cache_l3cache.connect( (comp_n1_l2cache, "low_network_0", "10000ps"), (comp_n2_bus, "high_network_1", "10000ps") )
link_bus_l3cache = sst.Link("link_bus_l3cache")
link_bus_l3cache.connect( (comp_n2_bus, "low_network_0", "10000ps"), (l3cache, "high_network_0", "10000ps") )
link_mem_bus_link = sst.Link("link_mem_bus_link")
link_mem_bus_link.connect( (l3cache, "low_network_0", "10000ps"), (memctrl, "direct_link", "10000ps") )
# End of generated output.
|
StarcoderdataPython
|
1971235
|
<reponame>yuvabedev/AV-Bakery-APP
# Copyright (c) 2022, <EMAIL> and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class AVBCustomer(Document):
pass
|
StarcoderdataPython
|
115664
|
<gh_stars>1-10
from datetime import timedelta, datetime
import pendulum
from airflow import DAG
from airflow.operators.python import PythonOperator
from airflow.providers.ssh.operators.ssh import SSHOperator
from auxiliary.outils import refresh_tableau_extract
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'email': ['<EMAIL>'],
'email_on_failure': True,
'email_on_retry': False,
'start_date': datetime(2019, 3, 6, tzinfo=pendulum.timezone('America/Los_Angeles')),
'retries': 1,
'retry_delay': timedelta(minutes=2),
}
dag = DAG('update_foundation_data', default_args=default_args, catchup=False, schedule_interval='00 21 * * *')
t1_bash = 'cd C:\\Anaconda\\ETL\\foundation && python DSS_D_Data.py'
t2_bash = 'cd C:\\Anaconda\\ETL\\foundation && python LU_Physicians.py'
t4_bash = 'cd C:\\Anaconda\\ETL\\misc_etl && python CovidWaiverData.py'
t1 = SSHOperator(ssh_conn_id='tableau_server',
task_id='refresh_dss_d_data',
command=t1_bash,
dag=dag)
t3 = PythonOperator(
task_id='refresh_rvu_extract',
python_callable=refresh_tableau_extract,
op_kwargs={'datasource_id': 'c08148a1-cf27-48df-8c8f-fc29f2c77c12'},
dag=dag
)
t4 = SSHOperator(ssh_conn_id='tableau_server',
task_id='refresh_covid_waiver',
command=t4_bash,
dag=dag)
t1 >> t3
t4
|
StarcoderdataPython
|
6694190
|
<gh_stars>100-1000
from django.urls import path
from chats import views
app_name = 'chats'
urlpatterns = [
path('chats/', views.index, name='index'),
]
|
StarcoderdataPython
|
165934
|
# -*- coding: utf-8 -*-
"""Command line interface for Axonius API Client."""
import click
from ..context import AliasedGroup
from . import (
grp_central_core,
grp_discover,
grp_meta,
grp_nodes,
grp_roles,
grp_settings,
grp_users,
)
@click.group(cls=AliasedGroup)
def system():
"""Group: System control commands."""
system.add_command(grp_meta.meta)
system.add_command(grp_nodes.instances)
system.add_command(grp_central_core.central_core)
system.add_command(grp_roles.roles)
system.add_command(grp_settings.settings_lifecycle)
system.add_command(grp_settings.settings_gui)
system.add_command(grp_settings.settings_core)
system.add_command(grp_users.users)
system.add_command(grp_discover.discover)
|
StarcoderdataPython
|
5187112
|
<reponame>vikingden8/Algorithms-Patterns
#!/usr/bin/python3
#coding=utf-8
def search():
items = (0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610)
target = 144
index = interpolationSearch(items, target)
if index == -1:
print('Element found not found.')
else:
print('Element at index {}'.format(index))
def interpolationSearch(items, target):
lo, hi = 0, len(items) - 1
while lo <= hi and target >= items[lo] and target <= items[hi]:
position = lo + int((target - items[lo]) * (hi - lo) / (items[hi] - items[lo]))
if items[position] == target:
return position
if items[position] < target:
lo = position + 1
else:
hi = position - 1
return -1
if __name__ == '__main__':
search()
|
StarcoderdataPython
|
4971797
|
class UserExistsError(Exception):
"""Пользователь уже есть в базе"""
class UserNotExistsError(Exception):
"""Пользователь не найден"""
class UserOrGoodsNotExistsError(Exception):
"""Товара нет в базе или у пользователя нет товаров"""
class TelegramUserExistsError(Exception):
"""Пользователь уже есть в базе"""
class PriceException(Exception):
"""Что-то не то с ценой. Смотри логи."""
class PasswordException(Exception):
"""Что-то не то с пользователем. Смотри логи."""
class AddGoodsError(Exception):
"""Ошибка при добавлении товара"""
class GoodsNotExists(Exception):
"""Видимо url не верный"""
class URLExistsError(Exception):
pass
class TelegramUserNotExistsError(Exception):
pass
|
StarcoderdataPython
|
4852343
|
from orbit.estimators.pyro_estimator import PyroEstimatorSVI
def test_pyro_estimator_vi(stan_estimator_lgt_model_input):
stan_model_name, model_param_names, data_input = stan_estimator_lgt_model_input
# create estimator
vi_estimator = PyroEstimatorSVI(num_steps=50)
# extract posterior samples
posteriors, training_metrics = vi_estimator.fit(
model_name=stan_model_name,
model_param_names=model_param_names,
data_input=data_input,
)
assert set(model_param_names) == set(posteriors.keys())
|
StarcoderdataPython
|
1752791
|
<gh_stars>100-1000
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 <NAME>
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.stanza import Message
from sleekxmpp.xmlstream import register_stanza_plugin
from sleekxmpp.plugins.xep_0071 import XHTML_IM as HTMLIM
register_stanza_plugin(Message, HTMLIM)
# To comply with PEP8, method names now use underscores.
# Deprecated method names are re-mapped for backwards compatibility.
HTMLIM.setBody = HTMLIM.set_body
HTMLIM.getBody = HTMLIM.get_body
HTMLIM.delBody = HTMLIM.del_body
|
StarcoderdataPython
|
11213235
|
<filename>flashback/__init__.py
from .thread import Thread
from .post import Post
def get(base_url):
t = Thread(base_url)
t.get()
return t
|
StarcoderdataPython
|
5080066
|
<reponame>angeldev7/ico
import datetime
import pytest
from eth_utils import to_wei
from web3.contract import Contract
@pytest.fixture
def presale_freeze_ends_at() -> int:
"""How long presale funds stay frozen until refund."""
return int(datetime.datetime(2017, 1, 1).timestamp())
@pytest.fixture
def presale_milestone_pricing(chain, presale_fund_collector, uncapped_flatprice, presale_freeze_ends_at, team_multisig):
"""Pricing used in presale tests, allowing us to set special price for presale participants."""
week = 24 * 3600 * 7
start_time = uncapped_flatprice.call().startsAt()
end_time = start_time + week*4
uncapped_flatprice.transact({"from": team_multisig}).setEndsAt(end_time)
args = [
[
start_time + 0, to_wei("0.10", "ether"),
start_time + week*1, to_wei("0.10", "ether"),
start_time + week*2, to_wei("0.10", "ether"),
start_time + week*3, to_wei("0.10", "ether"),
end_time, to_wei("0", "ether"),
],
]
tx = {
"gas": 4000000,
"from": team_multisig
}
contract, hash = chain.provider.deploy_contract('MilestonePricing', deploy_args=args, deploy_transaction=tx)
contract.transact({"from": team_multisig}).setPreicoAddress(presale_fund_collector.address, to_wei("0.05", "ether"))
assert contract.call().isSane(uncapped_flatprice.address)
return contract
@pytest.fixture
def presale_fund_collector(chain, presale_freeze_ends_at, team_multisig) -> Contract:
"""In actual ICO, the price is doubled (for testing purposes)."""
args = [
team_multisig,
presale_freeze_ends_at,
to_wei(1, "ether")
]
tx = {
"from": team_multisig,
}
presale_fund_collector, hash = chain.provider.deploy_contract('PresaleFundCollector', deploy_args=args, deploy_transaction=tx)
return presale_fund_collector
@pytest.fixture
def presale_crowdsale(chain, presale_fund_collector, uncapped_flatprice, team_multisig):
"""ICO associated with the presale where funds will be moved to a presale."""
presale_fund_collector.transact({"from": team_multisig}).setCrowdsale(uncapped_flatprice.address)
return uncapped_flatprice
@pytest.fixture
def presale_crowdsale_miletstoned(chain, presale_fund_collector, uncapped_flatprice, presale_milestone_pricing, team_multisig):
"""ICO associated with the presale where funds will be moved to a presale.
We set a special milestone pricing that allows us to control the pricing for the presale participants.
"""
uncapped_flatprice.transact({"from": team_multisig}).setPricingStrategy(presale_milestone_pricing.address)
presale_fund_collector.transact({"from": team_multisig}).setCrowdsale(uncapped_flatprice.address)
presale_milestone_pricing.transact({"from" : team_multisig}).setPreicoAddress(presale_fund_collector.address, to_wei("0.08", "ether"))
return uncapped_flatprice
|
StarcoderdataPython
|
292051
|
<filename>tests/integration/test_fetch_partition_should_reset_mutation/test.py
import pytest
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance("node", main_configs=["configs/zookeeper_config.xml"], with_zookeeper=True)
@pytest.fixture(scope="module")
def start_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_part_should_reset_mutation(start_cluster):
node.query(
"CREATE TABLE test (i Int64, s String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test', 'node') ORDER BY i;"
)
node.query("INSERT INTO test SELECT 1, 'a'")
node.query("optimize table test final")
node.query("optimize table test final")
expected = TSV('''all_0_0_2\t1\ta''')
assert TSV(node.query('SELECT _part, * FROM test')) == expected
node.query("ALTER TABLE test UPDATE s='xxx' WHERE 1", settings={"mutations_sync": "2"})
node.query("ALTER TABLE test UPDATE s='xxx' WHERE 1", settings={"mutations_sync": "2"})
node.query("ALTER TABLE test UPDATE s='xxx' WHERE 1", settings={"mutations_sync": "2"})
node.query("ALTER TABLE test UPDATE s='xxx' WHERE 1", settings={"mutations_sync": "2"})
expected = TSV('''all_0_0_2_4\t1\txxx''')
assert TSV(node.query('SELECT _part, * FROM test')) == expected
node.query(
"CREATE TABLE restore (i Int64, s String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/restore', 'node') ORDER BY i;"
)
node.query("ALTER TABLE restore FETCH PARTITION tuple() FROM '/clickhouse/tables/test/'")
node.query("ALTER TABLE restore ATTACH PART 'all_0_0_2_4'")
node.query("INSERT INTO restore select 2, 'a'")
print(TSV(node.query('SELECT _part, * FROM restore')))
expected = TSV('''all_0_0_0\t1\txxx\nall_1_1_0\t2\ta''')
assert TSV(node.query('SELECT _part, * FROM restore ORDER BY i')) == expected
node.query("ALTER TABLE restore UPDATE s='yyy' WHERE 1", settings={"mutations_sync": "2"})
expected = TSV('''all_0_0_0_2\t1\tyyy\nall_1_1_0_2\t2\tyyy''')
assert TSV(node.query('SELECT _part, * FROM restore ORDER BY i')) == expected
node.query("ALTER TABLE restore DELETE WHERE 1", settings={"mutations_sync": "2"})
assert node.query("SELECT count() FROM restore").strip() == "0"
|
StarcoderdataPython
|
5173135
|
<filename>setup.py<gh_stars>0
from setuptools import setup, find_packages
DESCRIPTION = "Djangotoolbox for Django-nonrel"
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except:
pass
setup(name='djangotoolbox',
version='1.6.2',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/django-nonrel/djangotoolbox',
packages=find_packages(),
license='3-clause BSD',
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Database',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
StarcoderdataPython
|
1631728
|
# coding=utf-8
# pip install textract
import textract
extension = ".pdf"
folder_ma = "texts-ma/"
folder_ba = "texts-ba/"
files_ma = [
"BLEHNER_SVEN_MA_THESIS",
"Iila_Marit_MA_Thesis",
"Kubre_Liisa_MA_Thesis",
"Kümnik_Maret_MA_Thesis",
"Laanepere_Lilian_MA_Thesis.pdf1",
"Maatee, Sylvia. MA thesis",
"Mugra_Siiri_MA_Thesis",
"Parker_Helen_MA_Thesis",
"Rahusaar_Anne_MA_Thesis",
"Sagar_Sandra_MA_Thesis",
"Thealane_Kairit_MA_Thesis",
"Tihkan_Kristi_MA_Thesis",
"Tiido_Terje_MA_Thesis",
"Wu_GaoHeng_MA_Thesis"
]
files_ba = [
"Eesalu_Mona_BA_Thesis",
"Meriste_Liisa_BA_Thesis",
"Roomäe_Annotated Transcript of Grammar Day",
"Gaibli_Aibike_BA_Thesis",
"Nook_Petra_BA_Thesis",
"Roomäe_Kärt_BA_Thesis",
"Gortalova_Jekaterina_BA_Thesis",
"Näär_Kaisa-Liina_BA_Thesis",
"Sai_Elizaveta_BA_Thesis",
"Joemaa_Evelin_BA_Thesis",
"Org_MariaRoberta_BA_Thesis",
"Sünd_Siiri_BA_Thesis",
"Kahur_Helen_BA",
"Parksepp_Lotte_BA_Thesis",
"Talviste_Eliseta_BA_Thesis",
"Karilaid_Liis_BA_Thesis",
"Peedumäe_Ander_BA_Thesis",
"Teele_Männik_BA_24May2019",
"Konno_Henry_BA_Thesis",
"Peterson_Karl_BA_Thesis",
"Tsebakov_Fjodor_BA_Thesis",
"Mander_Monika_BA_Thesis",
"Poopuu_Amanda_BA_Thesis",
"Visnapuu_Karmen_BA_Thesis",
"Melnik_Darja_BA_Thesis",
"Rips_Elisabeth_BA_Thesis",
]
def calculate(folder, files):
for file in files:
path = folder + file + extension
text = textract.process(path)
split = text.split(" ")
unique_words = {}
for word in split:
word = word.lower()
if word not in unique_words:
unique_words[word] = 1
else:
unique_words[word] += 1
ratio = int((float(len(unique_words)) / float(len(split)) * 100)) / 100.0
print("" + file + ": words: " + str(len(split)) + "; unique: " + str(len(unique_words)) + "; ratio: " + str(ratio))
calculate(folder_ma, files_ma)
calculate(folder_ba, files_ba)
|
StarcoderdataPython
|
3368578
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import random
from .builder import DATASETS
from .custom import CustomDataset
from collections import OrderedDict
from mmseg.core import eval_metrics, intersect_and_union, pre_eval_to_metrics
import mmcv
import numpy as np
from mmcv.utils import print_log
from prettytable import PrettyTable
from mmseg.core import eval_metrics
import os
from sklearn import metrics
from mmseg.utils import get_root_logger
@DATASETS.register_module()
class HSIGANDataset(CustomDataset):
"""Pascal VOC dataset.
Args:
split (str): Split txt file for Pascal VOC.
"""
CLASSES = ('noncancer', 'cancer')
PALETTE = [[0, 0, 0], [255, 255, 255]]
def __init__(self, split, gan_img_dir=None, gan_ann_dir=None, gan_split=None, gan_suffix=None,
fake_rate=0.2, **kwargs):
super(HSIGANDataset, self).__init__(
img_suffix='.hdr', seg_map_suffix='.png', split=split, **kwargs)
assert osp.exists(self.img_dir) and self.split is not None
if gan_img_dir is not None:
self.gan_img_dir = gan_img_dir
self.gan_ann_dir = gan_ann_dir
self.gan_split = gan_split
self.gan_suffix = gan_suffix
if self.data_root is not None:
if not osp.isabs(self.gan_img_dir):
self.gan_img_dir = osp.join(self.data_root, self.gan_img_dir)
if not (self.ann_dir is None or osp.isabs(self.gan_ann_dir)):
self.gan_ann_dir = osp.join(self.data_root, self.gan_ann_dir)
if not (self.gan_split is None or osp.isabs(self.gan_split)):
self.gan_split = osp.join(self.data_root, self.gan_split)
self.gan_infos = self.load_annotations(self.gan_img_dir, self.gan_suffix,
self.gan_ann_dir, self.seg_map_suffix,
self.gan_split)
self.len_ganx = len(self.gan_infos)
self.fake_rate = fake_rate/(1 + fake_rate)
self.len_gan = int(self.fake_rate * len(self.img_infos))
else:
self.len_gan = 0
self.len_real = len(self.img_infos)
def __len__(self):
return self.len_real + self.len_gan
def pre_pipeline(self, results):
"""Prepare results dict for pipeline."""
results['seg_fields'] = []
results['img_prefix'] = self.img_dir
results['seg_prefix'] = self.ann_dir
if self.custom_classes:
results['label_map'] = self.label_map
def pre_gan_pipeline(self, results):
"""Prepare results dict for pipeline."""
results['seg_fields'] = []
results['img_prefix'] = self.gan_img_dir
results['seg_prefix'] = self.gan_ann_dir
if self.custom_classes:
results['label_map'] = self.label_map
def get_ann_info(self, idx):
"""Get annotation by index."""
return self.img_infos[idx]['ann']
def get_gan_ann_info(self, idx):
"""Get annotation by index."""
return self.gan_infos[idx]['ann']
def prepare_train_img(self, idx):
if idx < self.len_real:
img_info = self.img_infos[idx]
ann_info = self.get_ann_info(idx)
results = dict(img_info=img_info, ann_info=ann_info)
self.pre_pipeline(results)
else:
random_idx = random.randint(0, self.len_ganx-1)
img_info = self.gan_infos[random_idx]
ann_info = self.get_gan_ann_info(random_idx)
# img_info = self.gan_infos[idx - self.len_real]
# ann_info = self.get_gan_ann_info(idx - self.len_real)
results = dict(img_info=img_info, ann_info=ann_info)
self.pre_gan_pipeline(results)
return self.pipeline(results)
def evaluate(self,
results,
metric='mIoU',
logger=None,
gt_seg_maps=None,
**kwargs):
"""Evaluate the dataset.
Args:
results (list[tuple[torch.Tensor]] | list[str]): per image pre_eval
results or predict segmentation map for computing evaluation
metric.
metric (str | list[str]): Metrics to be evaluated. 'mIoU',
'mDice' and 'mFscore' are supported.
logger (logging.Logger | None | str): Logger used for printing
related information during evaluation. Default: None.
gt_seg_maps (generator[ndarray]): Custom gt seg maps as input,
used in ConcatDataset
Returns:
dict[str, float]: Default metrics.
"""
if isinstance(metric, str):
metric = [metric]
allowed_metrics = ['mIoU', 'mDice', 'mFscore']
if not set(metric).issubset(set(allowed_metrics)):
raise KeyError('metric {} is not supported'.format(metric))
eval_results = {}
# test a list of files
if mmcv.is_list_of(results, np.ndarray) or mmcv.is_list_of(
results, str):
if gt_seg_maps is None:
gt_seg_maps = self.get_gt_seg_maps()
num_classes = len(self.CLASSES)
ret_metrics = eval_metrics(
results,
gt_seg_maps,
num_classes,
self.ignore_index,
metric,
label_map=self.label_map,
reduce_zero_label=self.reduce_zero_label)
# get kappa
con_mat = np.zeros((2, 2))
for result, gt in zip(results, self.get_gt_seg_maps()):
con_mat += metrics.confusion_matrix(gt.flatten(), result.flatten(), labels=[1, 0])
# test a list of pre_eval_results
else:
ret_metrics = pre_eval_to_metrics(results, metric)
# get kappa
con_mat = np.zeros((2, 2))
pre_eval_results = tuple(zip(*results))
total_area_intersect = sum(pre_eval_results[0])
total_area_label = sum(pre_eval_results[3])
con_mat[0][0] = total_area_intersect[0]
con_mat[1][1] = total_area_intersect[1]
con_mat[0][1] = total_area_label[1] - total_area_intersect[1]
con_mat[1][0] = total_area_label[0] - total_area_intersect[0]
# Because dataset.CLASSES is required for per-eval.
if self.CLASSES is None:
class_names = tuple(range(num_classes))
else:
class_names = self.CLASSES
# summary table
ret_metrics_summary = OrderedDict({
ret_metric: np.round(np.nanmean(ret_metric_value) * 100, 2)
for ret_metric, ret_metric_value in ret_metrics.items()
})
# each class table
ret_metrics.pop('aAcc', None)
ret_metrics_class = OrderedDict({
ret_metric: np.round(ret_metric_value * 100, 2)
for ret_metric, ret_metric_value in ret_metrics.items()
})
ret_metrics_class.update({'Class': class_names})
ret_metrics_class.move_to_end('Class', last=False)
# for logger
class_table_data = PrettyTable()
for key, val in ret_metrics_class.items():
class_table_data.add_column(key, val)
summary_table_data = PrettyTable()
for key, val in ret_metrics_summary.items():
if key == 'aAcc':
summary_table_data.add_column(key, [val])
else:
summary_table_data.add_column('m' + key, [val])
print_log('per class results:', logger)
print_log('\n' + class_table_data.get_string(), logger=logger)
print_log('Summary:', logger)
print_log('\n' + summary_table_data.get_string(), logger=logger)
# each metric dict
for key, value in ret_metrics_summary.items():
if key == 'aAcc':
eval_results[key] = value / 100.0
else:
eval_results['m' + key] = value / 100.0
ret_metrics_class.pop('Class', None)
for key, value in ret_metrics_class.items():
eval_results.update({
key + '.' + str(name): value[idx] / 100.0
for idx, name in enumerate(class_names)
})
print_log('mIoU:{:.4f}'.format(eval_results['mIoU']), logger=logger)
print_log('mDice:{:.4f}'.format(eval_results['mDice']), logger=logger)
print_log('mAcc:{:.4f}'.format(eval_results['mAcc']), logger=logger)
print_log('aAcc:{:.4f}'.format(eval_results['aAcc']), logger=logger)
print_log('kappa:{:.4f}'.format(kappa(con_mat)), logger=logger)
print_log('accuracy:{:.4f}'.format(accuracy(con_mat)), logger=logger)
# print_log('precision:{:.4f}'.format(precision(con_mat)), logger=logger)
# print_log('sensitivity:{:.4f}'.format(sensitivity(con_mat)), logger=logger)
# print_log('specificity:{:.4f}'.format(specificity(con_mat)), logger=logger)
return eval_results
def kappa(matrix):
matrix = np.array(matrix)
n = np.sum(matrix)
sum_po = 0
sum_pe = 0
for i in range(len(matrix[0])):
sum_po += matrix[i][i]
row = np.sum(matrix[i, :])
col = np.sum(matrix[:, i])
sum_pe += row * col
po = sum_po / n
pe = sum_pe / (n * n)
# print(po, pe)
return (po - pe) / (1 - pe)
def sensitivity(matrix):
return matrix[0][0]/(matrix[0][0]+matrix[1][0])
def specificity(matrix):
return matrix[1][1]/(matrix[1][1]+matrix[0][1])
def precision(matrix):
return matrix[0][0]/(matrix[0][0]+matrix[0][1])
def accuracy(matrix):
return (matrix[0][0]+matrix[1][1])/(matrix[0][0]+matrix[0][1]+matrix[1][0]+matrix[1][1])
|
StarcoderdataPython
|
11343336
|
<reponame>Erick-Paulino/exercicios-de-cursos
'''Faça um programa que leia o ano de nascimento de um jovem e informe,
de acordo com a sua idade, se ele ainda vai se alistar ao serviço militar,
se é a hora exata de se alistar ou se já passou do tempo do alistamento.
Seu programa também deverá mostrar o tempo que falta ou que passou do prazo.'''
from datetime import date
ano = int(input('em qual ano voce nasceu? '))
idade = date.today().year - ano
if idade < 18:
print(f'você deverá se alistar no ano de {ano + 18}.')
elif idade == 18:
print('esta é a hora certa para realizar o seu alistamento.')
else:
print(f'o seu alistamento foi ha {date.today().year - (ano + 18)} ano(s) atrás.')
|
StarcoderdataPython
|
4981557
|
#!/usr/bin/python2.5
#
# Copyright 2014 <NAME>.
#
# Author: <NAME> (<EMAIL>)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# See http://creativecommons.org/licenses/MIT/ for more information.
#
# -----------------------------------------------------------------------------
#
# Lookup table definitions.
import numpy
import pylab
import scipy.signal
def pole_pair_to_f_fq(pole_pair):
fq = 1 - pole_pair.prod()
f = -(2 - fq - (pole_pair.sum())) ** 0.5
return f.real, fq.real
def modified_chamberlin(f, fq, x, mode='bp'):
lp = 0.0
bp = 0.0
y = numpy.zeros(x.shape)
x_ = 0.0
coefficient = 1.0 if mode == 'bp' else 0.0
for i in xrange(len(y)):
lp += f * bp
bp += -fq * bp -f * lp + (x[i] + x_ * coefficient)
x_ = x[i]
if mode =='bp':
y[i] = fq * bp
elif mode == 'lp':
y[i] = f * lp
elif mode == 'hp':
y[i] = x_ - lp * f - bp * fq
return y
SAMPLE_RATE = 96000
IR_SIZE = 2048
sample_rates = [SAMPLE_RATE / 12] * 13
sample_rates += [SAMPLE_RATE / 3] * 6
sample_rates += [SAMPLE_RATE] * 1
num_bands = len(sample_rates)
interval = 2 ** (1 / 3.0)
first_frequency = 110 / interval
frequencies = first_frequency * (interval ** numpy.arange(0, num_bands))
filters = []
responses = {}
reconstruction = {}
generate_figures = __name__ == '__main__'
for index, (frequency, sr) in enumerate(zip(frequencies, sample_rates)):
if not sr in reconstruction:
reconstruction[sr] = [0.0, 0.0]
responses[sr] = []
frequency = frequency / (sr * 0.5)
if index == 0:
w = frequency
z, p, k = scipy.signal.cheby1(4, 0.5, w, 'lowpass', output='zpk')
svf_mode = 'lp'
gain = 1.0
elif index == num_bands - 1:
w = frequency
z, p, k = scipy.signal.cheby1(4, 0.25, w, 'highpass', output='zpk')
svf_mode = 'hp'
gain = 21 * frequency
else:
w = [frequency / (interval ** 0.5), frequency * (interval ** 0.5)]
z, p, k = scipy.signal.butter(2, w, 'bandpass', output='zpk')
svf_mode = 'bp'
gain = 0.25
# Filter using direct form
out = numpy.eye(IR_SIZE, 1).ravel()
b, a = scipy.signal.zpk2tf(z, p, k)
out = scipy.signal.lfilter(b, a, out)
out = scipy.signal.lfilter(b, a, out)
reconstruction[sr][0] += out
responses[sr] += [out]
# Filter using modified Chamberlin filter
out = numpy.eye(IR_SIZE, 1).ravel() * gain
coefficients = [0, 0, 0]
for i in xrange(2):
f, fq = pole_pair_to_f_fq(p[i*2:i*2 + 2])
out = modified_chamberlin(f, fq, out, svf_mode)
out = modified_chamberlin(f, fq, out, svf_mode)
coefficients += [f, fq]
delay = (numpy.arange(len(out)) * out * out).sum() / (out * out).sum()
# Completely empirical fixes to the delay to maximize the flatness of the
# total impulse response.
if index == num_bands - 1:
delay += 4
coefficients[0] = SAMPLE_RATE / sr
coefficients[1] = numpy.floor(delay)
coefficients[2] = gain
filters += [('%3.0f_%d' % (frequency * 0.5 * sr, sr), coefficients)]
reconstruction[sr][1] += out
if generate_figures:
pylab.figure(figsize=(20,8))
n = len(responses.keys())
for row, sr in enumerate(sorted(responses.keys())):
f = numpy.arange(IR_SIZE / 2 + 1) / float(IR_SIZE) * sr
for column, plots in enumerate([reconstruction[sr], responses[sr]]):
pylab.subplot(2, n, column * n + row + 1)
for r in plots:
sy = numpy.log10(numpy.abs(numpy.fft.rfft(r)) + 1e-20) * 20.0
pylab.semilogx(f, sy)
pylab.xlim(80, sr / 2)
pylab.ylim(-36, 12)
pylab.xlabel('Frequency (Hz)')
pylab.ylabel('Gain (dB)')
if len(plots) == 2:
pylab.ylim(-4, 3)
#pylab.legend(['Direct form', 'Chamberlin'])
pylab.savefig('filter_bank.pdf')
# pylab.show()
pylab.close()
|
StarcoderdataPython
|
54676
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
import optparse
import os
from mako.template import Template
from pyramid.path import AssetResolver
from shutil import copyfile
def _create_standard_yaml_config_(name='pyramid_oereb_standard.yml',
database='postgresql://postgres:password@localhost/pyramid_oereb',
print_backend='MapFishPrint',
print_url='http://oereb-print:8080/print/oereb'):
"""
Creates the specified YAML file using a template. This YAML file contains the standard
configuration to run a oereb server out of the box.
Args:
(str): The name of the new file. Default
database (str): The database connection string.Default:
'postgresql://postgres:password@localhost/pyramid_oereb'
"""
# File names
logo_oereb_name_de = 'logo_oereb_de.png'
logo_oereb_name_fr = 'logo_oereb_fr.png'
logo_oereb_name_it = 'logo_oereb_it.png'
logo_confederation_name = 'logo_confederation.png'
logo_canton_name = 'logo_canton.png'
# Create pyramid_oereb.yml from template
template = Template(
filename=AssetResolver('pyramid_oereb').resolve('standard/pyramid_oereb.yml.mako').abspath(),
input_encoding='utf-8',
output_encoding='utf-8'
)
config = template.render(
sqlalchemy_url=database,
png_root_dir='',
print_backend=print_backend,
print_url=print_url
)
pyramid_oereb_yml = open(name, 'wb+')
pyramid_oereb_yml.write(config)
pyramid_oereb_yml.close()
# Copy static files
logo_oereb_path_de = AssetResolver('pyramid_oereb').resolve(
'standard/{name}'.format(name=logo_oereb_name_de)
).abspath()
logo_oereb_path_fr = AssetResolver('pyramid_oereb').resolve(
'standard/{name}'.format(name=logo_oereb_name_fr)
).abspath()
logo_oereb_path_it = AssetResolver('pyramid_oereb').resolve(
'standard/{name}'.format(name=logo_oereb_name_it)
).abspath()
logo_confederation_path = AssetResolver('pyramid_oereb').resolve(
'standard/{name}'.format(name=logo_confederation_name)
).abspath()
logo_sample_path = AssetResolver('pyramid_oereb').resolve(
'standard/{name}'.format(name=logo_canton_name)
).abspath()
target_path = os.path.abspath('{path}{sep}{name}'.format(
path=os.getcwd(), name=logo_oereb_name_de, sep=os.sep)
)
copyfile(logo_oereb_path_de, target_path)
target_path = os.path.abspath('{path}{sep}{name}'.format(
path=os.getcwd(), name=logo_oereb_name_fr, sep=os.sep)
)
copyfile(logo_oereb_path_fr, target_path)
target_path = os.path.abspath('{path}{sep}{name}'.format(
path=os.getcwd(), name=logo_oereb_name_it, sep=os.sep)
)
copyfile(logo_oereb_path_it, target_path)
target_path = os.path.abspath('{path}{sep}{name}'.format(
path=os.getcwd(), name=logo_confederation_name, sep=os.sep)
)
copyfile(logo_confederation_path, target_path)
target_path = os.path.abspath('{path}{sep}{name}'.format(
path=os.getcwd(), name=logo_canton_name, sep=os.sep)
)
copyfile(logo_sample_path, target_path)
def create_standard_yaml():
parser = optparse.OptionParser(
usage='usage: %prog [options]',
description='Create all content for the standard database'
)
parser.add_option(
'-n', '--name',
dest='name',
metavar='YAML',
type='string',
default='pyramid_oereb_standard.yml',
help='The name for the new configuration yaml file (default is: pyramid_oereb_standard.yml).'
)
parser.add_option(
'-d', '--database',
dest='database',
metavar='DATABASE',
type='string',
default='postgresql://postgres:password@oereb-db:5432/pyramid_oereb',
help='The database connection string (default is: '
'postgresql://postgres:password@oereb-db:5432/pyramid_oereb).'
)
parser.add_option(
'-p', '--print_backend',
dest='print_backend',
metavar='PRINT_BACKEND',
type='string',
default='MapFishPrint',
help='The print backend (for PDF generation) to use (default is: MapFishPrint)'
)
parser.add_option(
'-u', '--print_url',
dest='print_url',
metavar='PRINT_URL',
type='string',
default='http://oereb-print:8080/print/oereb',
help='The URL of the print server'
)
options, args = parser.parse_args()
_create_standard_yaml_config_(name=options.name, database=options.database,
print_backend=options.print_backend)
|
StarcoderdataPython
|
1694867
|
<reponame>anandagopal6/azure-functions-python-worker<filename>tests/endtoend/test_eventhub_functions.py<gh_stars>100-1000
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import json
import time
from datetime import datetime
from dateutil import parser, tz
from azure_functions_worker import testutils
class TestEventHubFunctions(testutils.WebHostTestCase):
"""Test EventHub Trigger and Output Bindings (cardinality: one).
Each testcase consists of 3 part:
1. An eventhub_output HTTP trigger for generating EventHub event
2. An actual eventhub_trigger EventHub trigger for storing event into blob
3. A get_eventhub_triggered HTTP trigger for retrieving event info blob
"""
@classmethod
def get_script_dir(cls):
return testutils.E2E_TESTS_FOLDER / 'eventhub_functions'
@testutils.retryable_test(3, 5)
def test_eventhub_trigger(self):
# Generate a unique event body for the EventHub event
data = str(round(time.time()))
doc = {'id': data}
# Invoke eventhub_output HttpTrigger to generate an Eventhub Event.
r = self.webhost.request('POST', 'eventhub_output',
data=json.dumps(doc))
self.assertEqual(r.status_code, 200)
self.assertEqual(r.text, 'OK')
# Once the event get generated, allow function host to pool from
# EventHub and wait for eventhub_trigger to execute,
# converting the event metadata into a blob.
time.sleep(5)
# Call get_eventhub_triggered to retrieve event metadata from blob.
r = self.webhost.request('GET', 'get_eventhub_triggered')
self.assertEqual(r.status_code, 200)
response = r.json()
# Check if the event body matches the initial data
self.assertEqual(response, doc)
@testutils.retryable_test(3, 5)
def test_eventhub_trigger_with_metadata(self):
# Generate a unique event body for EventHub event
# Record the start_time and end_time for checking event enqueue time
start_time = datetime.now(tz=tz.UTC)
random_number = str(round(time.time()) % 1000)
req_body = {
'body': random_number
}
# Invoke metadata_output HttpTrigger to generate an EventHub event
# from azure-eventhub SDK
r = self.webhost.request('POST', 'metadata_output',
data=json.dumps(req_body))
self.assertEqual(r.status_code, 200)
self.assertIn('OK', r.text)
end_time = datetime.now(tz=tz.UTC)
# Once the event get generated, allow function host to pool from
# EventHub and wait for eventhub_trigger to execute,
# converting the event metadata into a blob.
time.sleep(5)
# Call get_metadata_triggered to retrieve event metadata from blob
r = self.webhost.request('GET', 'get_metadata_triggered')
self.assertEqual(r.status_code, 200)
# Check if the event body matches the unique random_number
event = r.json()
self.assertEqual(event['body'], random_number)
# EventhubEvent property check
# Reenable these lines after enqueued_time property is fixed
# enqueued_time = parser.isoparse(event['enqueued_time'])
# self.assertIsNotNone(enqueued_time)
self.assertIsNone(event['partition_key']) # There's only 1 partition
self.assertGreaterEqual(event['sequence_number'], 0)
self.assertIsNotNone(event['offset'])
# Check if the event contains proper metadata fields
self.assertIsNotNone(event['metadata'])
metadata = event['metadata']
sys_props = metadata['SystemProperties']
enqueued_time = parser.isoparse(metadata['EnqueuedTimeUtc'])
self.assertTrue(start_time < enqueued_time < end_time)
self.assertIsNone(sys_props['PartitionKey'])
self.assertGreaterEqual(sys_props['SequenceNumber'], 0)
self.assertIsNotNone(sys_props['Offset'])
|
StarcoderdataPython
|
184757
|
<filename>examples.py/Basics/Color/WaveGradient.py<gh_stars>1000+
"""
Wave Gradient
by <NAME>.
Adapted to python by <NAME>
Generate a gradient along a sin() wave.
"""
import math
amplitude = 30
fillGap = 2.5
def setup():
size(200, 200)
background(200, 200, 200)
noLoop()
def draw():
frequency = 0
for i in range(-75, height + 75):
# Reset angle to 0, so waves stack properly
angle = 0
# Increasing frequency causes more gaps
frequency += .006
for j in range(width + 75):
py = i + sin(radians(angle)) * amplitude
angle += frequency
c = color(abs(py - i) * 255 / amplitude,
255 - abs(py - i) * 255 / amplitude,
j * (255.0 / (width + 50)))
# Hack to fill gaps. Raise value of fillGap if you increase frequency
for filler in range(int(math.ceil(fillGap))):
set(int(j - filler), int(py) - filler, c)
set(int(j), int(py), c)
set(int(j + filler), int(py) + filler, c)
|
StarcoderdataPython
|
1728870
|
import re
from typing import Any
from aim.ql.tokens.types import *
class Token(object):
types = [
String, Integer, Float, Boolean, None_,
List,
Comparison, Logical,
Identifier, Path,
Expression,
]
def __init__(self, value: Any, ltype: str):
if ltype == 'Number':
if '.' in value:
cleaned_value, ttype = float(value), Float
else:
cleaned_value, ttype = int(value), Integer
elif ltype == 'String':
cleaned_value = str(value).strip().strip('"')
ttype = String
elif ltype == 'Boolean':
cleaned_value = True if str(value) == 'True' else False
ttype = Boolean
elif ltype == 'None':
cleaned_value, ttype = None, None_
elif ltype == 'Identifier':
ttype = Identifier
cleaned_value = str(value)
elif ltype == 'List':
cleaned_value, ttype = [], List
elif ltype == 'Path':
cleaned_value, ttype = [], Path
elif ltype == 'Expression':
cleaned_value, ttype = [], Expression
elif ltype == 'Operator':
cleaned_value = re.sub(' +', ' ', value.strip())
if cleaned_value == 'and' \
or cleaned_value == 'or' \
or cleaned_value == 'not':
ttype = Logical
else:
ttype = Comparison
else:
raise TypeError('undefined `Token` type')
self._value = cleaned_value
self._ttype = ttype
def __repr__(self):
return '{}: {}'.format(self.type, self.value)
def __str__(self):
if self.type == List:
return '[{}]'.format(','.join([str(token) for token in self.value]))
elif self.type == Path:
return '.'.join([str(token) for token in self.value])
else:
return str(self.value)
@property
def value(self) -> Any:
return self._value
@value.setter
def value(self, value: Any):
# TODO
self._value = value
@property
def type(self) -> TokenType:
return self._ttype
@type.setter
def type(self, ttype: TokenType):
# TODO
self._ttype = ttype
def get_cleaned_value(self, fields: dict = None, *add_fields):
if self.type == List:
return [token.get_cleaned_value(fields, *add_fields)
for token in self.value]
if self.type not in (Identifier, Path):
return self.value
all_fields = [fields] + list(add_fields)
search_field_match = None
found = False
for fields in all_fields:
if self.type == Identifier and self.value in fields.keys():
search_field_match = fields[self.value]
found = True
break
if self.type == Path:
fields_search = fields
path_found = True
for token in self.value:
if isinstance(fields_search, dict) \
and token.value in fields_search.keys():
fields_search = fields_search[token.value]
search_field_match = fields_search
else:
path_found = False
break
if not path_found:
search_field_match = None
else:
found = True
break
if self.type == Identifier:
if found:
return search_field_match
# Treat as string
return self.value
else:
# Treat as None
return search_field_match
class TokenList(Token):
def __init__(self, ttype: str, tokens: list = []):
super(TokenList, self).__init__(tokens, ttype)
def append(self, item: Token):
self._value.append(item)
|
StarcoderdataPython
|
1827440
|
<filename>domain/processor_md.py
from domain.processor import Processor
class ProcessorMd(Processor):
def __init__(self, file) -> None:
super().__init__(file)
@property
def word_count(self):
purged_text = self.data.replace('#', '')
return len(purged_text.split())
def unpack(self):
return self.file
|
StarcoderdataPython
|
3553591
|
<filename>contentcuration/contentcuration/views/nodes.py
import json
import logging
from datetime import datetime
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.core.exceptions import PermissionDenied
from django.db.models import F
from django.db.models import Max
from django.db.models import Sum
from django.http import HttpResponse
from django.http import HttpResponseBadRequest
from django.http import HttpResponseNotFound
from django.shortcuts import get_object_or_404
from le_utils.constants import content_kinds
from rest_framework.authentication import SessionAuthentication
from rest_framework.authentication import TokenAuthentication
from rest_framework.decorators import api_view
from rest_framework.decorators import authentication_classes
from rest_framework.decorators import permission_classes
from rest_framework.permissions import AllowAny
from rest_framework.permissions import IsAuthenticated
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from contentcuration.models import Channel
from contentcuration.models import ContentNode
from contentcuration.models import License
from contentcuration.serializers import ContentNodeEditSerializer
from contentcuration.serializers import ReadOnlyContentNodeFullSerializer
from contentcuration.serializers import ReadOnlyContentNodeSerializer
from contentcuration.serializers import ReadOnlySimplifiedContentNodeSerializer
from contentcuration.serializers import TaskSerializer
from contentcuration.tasks import create_async_task
from contentcuration.tasks import getnodedetails_task
@authentication_classes((TokenAuthentication, SessionAuthentication))
@permission_classes((IsAuthenticated,))
@api_view(['GET'])
def get_node_diff(request, channel_id):
try:
request.user.can_edit(channel_id)
except PermissionDenied:
return HttpResponseNotFound("No channel matching: {}".format(channel_id))
original = [] # Currently imported nodes
changed = [] # Nodes from original node
fields_to_check = ['title', 'description', 'license', 'license_description', 'copyright_holder', 'author', 'extra_fields', 'language', 'role_visibility']
assessment_fields_to_check = ['type', 'question', 'hints', 'answers', 'order', 'raw_data', 'source_url', 'randomize']
current_tree_id = Channel.objects.get(pk=channel_id).main_tree.tree_id
nodes = ContentNode.objects.prefetch_related('assessment_items').prefetch_related('files').prefetch_related('tags')
copied_nodes = nodes.filter(tree_id=current_tree_id).exclude(original_source_node_id=F('node_id'))
channel_ids = copied_nodes.values_list('original_channel_id', flat=True).exclude(original_channel_id=channel_id).distinct()
tree_ids = Channel.objects.filter(pk__in=channel_ids).values_list("main_tree__tree_id", flat=True)
original_node_ids = copied_nodes.values_list('original_source_node_id', flat=True).distinct()
original_nodes = nodes.filter(tree_id__in=tree_ids, node_id__in=original_node_ids)
# Use dictionary for faster lookup speed
content_id_mapping = {n.content_id: n for n in original_nodes}
for copied_node in copied_nodes:
node = content_id_mapping.get(copied_node.content_id)
if node:
# Check lengths, metadata, tags, files, and assessment items
node_changed = node.assessment_items.count() != copied_node.assessment_items.count() or \
node.files.count() != copied_node.files.count() or \
node.tags.count() != copied_node.tags.count() or \
any([f for f in fields_to_check if getattr(node, f, None) != getattr(copied_node, f, None)]) or \
node.tags.exclude(tag_name__in=copied_node.tags.values_list('tag_name', flat=True)).exists() or \
node.files.exclude(checksum__in=copied_node.files.values_list('checksum', flat=True)).exists() or \
node.assessment_items.exclude(assessment_id__in=copied_node.assessment_items.values_list('assessment_id', flat=True)).exists()
# Check individual assessment items
if not node_changed and node.kind_id == content_kinds.EXERCISE:
for ai in node.assessment_items.all():
source_ai = copied_node.assessment_items.filter(assessment_id=ai.assessment_id).first()
if source_ai:
node_changed = node_changed or any([f for f in assessment_fields_to_check if getattr(ai, f, None) != getattr(source_ai, f, None)])
if node_changed:
break
if node_changed:
original.append(copied_node)
changed.append(node)
return Response({
"original": ReadOnlySimplifiedContentNodeSerializer(original, many=True).data,
"changed": ReadOnlySimplifiedContentNodeSerializer(changed, many=True).data,
})
@authentication_classes((TokenAuthentication, SessionAuthentication))
@permission_classes((IsAuthenticated,))
@api_view(['POST'])
def create_new_node(request):
data = request.data
license = License.objects.filter(license_name=data.get('license_name')).first() # Use filter/first in case preference hasn't been set
license_id = license.pk if license else settings.DEFAULT_LICENSE
new_node = ContentNode.objects.create(
kind_id=data.get('kind'),
title=data.get('title'),
author=data.get('author'),
aggregator=data.get('aggregator'),
provider=data.get('provider'),
copyright_holder=data.get('copyright_holder'),
license_id=license_id,
license_description=data.get('license_description'),
parent_id=settings.ORPHANAGE_ROOT_ID,
)
return Response(ContentNodeEditSerializer(new_node).data)
@authentication_classes((TokenAuthentication, SessionAuthentication))
@permission_classes((IsAuthenticated,))
@api_view(['GET'])
def get_total_size(request, ids):
# Get the minimal set of nodes that we need to check permissions on first.
nodes = ContentNode.objects.exclude(kind_id=content_kinds.EXERCISE, published=False)\
.filter(id__in=ids.split(","))
try:
request.user.can_view_nodes(nodes)
except PermissionDenied:
return HttpResponseNotFound("No nodes found for {}".format(ids))
nodes = nodes.prefetch_related('files').get_descendants(include_self=True)\
.values('files__checksum', 'files__file_size')\
.distinct()
sizes = nodes.aggregate(resource_size=Sum('files__file_size'))
return Response({'success': True, 'size': sizes['resource_size'] or 0})
@authentication_classes((TokenAuthentication, SessionAuthentication))
@permission_classes((IsAuthenticated,))
@api_view(['GET'])
def get_nodes_by_ids(request, ids):
nodes = ContentNode.objects.filter(pk__in=ids.split(","))
try:
request.user.can_view_nodes(nodes)
except PermissionDenied:
return HttpResponseNotFound("No nodes found for {}".format(ids))
nodes = nodes.prefetch_related('children',
'files',
'assessment_items',
'tags',
'prerequisite',
'license',
'slideshow_slides',
'is_prerequisite_of'
)\
.defer('node_id', 'original_source_node_id', 'source_node_id', 'content_id',
'original_channel_id', 'source_channel_id', 'source_id', 'source_domain', 'created', 'modified')
serializer = ReadOnlyContentNodeSerializer(nodes, many=True)
return Response(serializer.data)
@authentication_classes((TokenAuthentication, SessionAuthentication))
@permission_classes((IsAuthenticated,))
@api_view(['GET'])
def get_node_path(request, topic_id, tree_id, node_id):
try:
topic = ContentNode.objects.prefetch_related('children').get(node_id__startswith=topic_id, tree_id=tree_id)
try:
request.user.can_view_node(topic)
except PermissionDenied:
return HttpResponseNotFound("No topic found for {}".format(topic_id))
if topic.kind_id != content_kinds.TOPIC:
node = ContentNode.objects.prefetch_related('files', 'assessment_items', 'tags').get(node_id__startswith=topic_id, tree_id=tree_id)
nodes = node.get_ancestors(ascending=True)
else:
node = node_id and ContentNode.objects.prefetch_related('files', 'assessment_items', 'tags').get(node_id__startswith=node_id, tree_id=tree_id)
nodes = topic.get_ancestors(include_self=True, ascending=True)
return Response({
'path': ReadOnlyContentNodeSerializer(nodes, many=True).data,
'node': node and ReadOnlyContentNodeFullSerializer(node).data,
'parent_node_id': topic.kind_id != content_kinds.TOPIC and node.parent and node.parent.node_id
})
except ObjectDoesNotExist:
return HttpResponseNotFound("Invalid URL: the referenced content does not exist in this channel.")
@authentication_classes((TokenAuthentication, SessionAuthentication))
@permission_classes((IsAuthenticated,))
@api_view(['GET'])
def get_nodes_by_ids_simplified(request, ids):
nodes = ContentNode.objects.filter(pk__in=ids.split(","))
try:
request.user.can_view_nodes(nodes)
except PermissionDenied:
return HttpResponseNotFound("No nodes found for {}".format(ids))
nodes = nodes.prefetch_related('children')
serializer = ReadOnlySimplifiedContentNodeSerializer(nodes, many=True)
return Response(serializer.data)
@authentication_classes((TokenAuthentication, SessionAuthentication))
@permission_classes((IsAuthenticated,))
@api_view(['GET'])
def get_nodes_by_ids_complete(request, ids):
nodes = ContentNode.objects.filter(pk__in=ids.split(","))
try:
request.user.can_view_nodes(nodes)
except PermissionDenied:
return HttpResponseNotFound("No nodes found for {}".format(ids))
nodes = nodes.prefetch_related('children', 'files', 'assessment_items', 'tags')
serializer = ReadOnlyContentNodeFullSerializer(nodes, many=True)
return Response(serializer.data)
@api_view(['GET'])
@permission_classes((AllowAny,))
def get_channel_details(request, channel_id):
""" Generates data for channel contents. Used for look-inside previews
Keyword arguments:
channel_id (str): id of channel to get details from
"""
# Get nodes and channel
node = get_object_or_404(ContentNode, channel_main=channel_id)
try:
if not node.channel_main.filter(public=True).exists():
request.user.can_view_node(node)
except PermissionDenied:
return HttpResponseNotFound("No topic found for {}".format(channel_id))
data = get_node_details_cached(node)
return HttpResponse(json.dumps(data))
@api_view(['GET'])
@permission_classes((AllowAny,))
def get_node_details(request, node_id):
node = ContentNode.objects.get(pk=node_id)
channel = node.get_channel()
if channel and not channel.public:
return HttpResponseNotFound("No topic found for {}".format(node_id))
data = get_node_details_cached(node)
return HttpResponse(json.dumps(data))
def get_node_details_cached(node):
cached_data = cache.get("details_{}".format(node.node_id))
if cached_data:
descendants = node.get_descendants().prefetch_related('children', 'files', 'tags') \
.select_related('license', 'language')
channel = node.get_channel()
# If channel is a sushi chef channel, use date created for faster query
# Otherwise, find the last time anything was updated in the channel
last_update = channel.main_tree.created if channel and channel.ricecooker_version else \
descendants.filter(changed=True) \
.aggregate(latest_update=Max('modified')) \
.get('latest_update')
if last_update:
last_cache_update = datetime.strptime(json.loads(cached_data)['last_update'], settings.DATE_TIME_FORMAT)
if last_update.replace(tzinfo=None) > last_cache_update:
# update the stats async, then return the cached value
getnodedetails_task.apply_async((node.pk,))
return json.loads(cached_data)
return node.get_details()
@authentication_classes((TokenAuthentication, SessionAuthentication))
@permission_classes((IsAuthenticated,))
@api_view(['POST'])
def delete_nodes(request):
data = request.data
try:
nodes = data["nodes"]
channel_id = data["channel_id"]
try:
request.user.can_edit(channel_id)
nodes = ContentNode.objects.filter(pk__in=nodes)
request.user.can_edit_nodes(nodes)
except PermissionDenied:
return HttpResponseNotFound("Resources not found to delete")
for node in nodes:
if node.parent and not node.parent.changed:
node.parent.changed = True
node.parent.save()
node.delete()
except KeyError:
raise ObjectDoesNotExist("Missing attribute from data: {}".format(data))
return Response({'success': True})
@authentication_classes((TokenAuthentication, SessionAuthentication))
@permission_classes((IsAuthenticated,))
@api_view(['POST'])
def duplicate_nodes(request):
logging.debug("Entering the copy_node endpoint")
data = request.data
try:
node_ids = data["node_ids"]
target_parent = ContentNode.objects.get(pk=data["target_parent"])
channel = target_parent.get_channel()
try:
request.user.can_edit(channel and channel.pk)
except PermissionDenied:
return HttpResponseNotFound("No channel matching: {}".format(channel and channel.pk))
task_info = {
'user': request.user,
'metadata': {
'affects': {
'channels': [channel.pk],
'nodes': node_ids,
}
}
}
task_args = {
'user_id': request.user.pk,
'channel_id': channel.pk,
'target_parent': target_parent.pk,
'node_ids': node_ids,
}
task, task_info = create_async_task('duplicate-nodes', task_info, task_args)
return HttpResponse(JSONRenderer().render(TaskSerializer(task_info).data))
except KeyError:
raise ObjectDoesNotExist("Missing attribute from data: {}".format(data))
@authentication_classes((TokenAuthentication, SessionAuthentication))
@permission_classes((IsAuthenticated,))
@api_view(['POST'])
def duplicate_node_inline(request):
logging.debug("Entering the dupllicate_node_inline endpoint")
if request.method != 'POST':
return HttpResponseBadRequest("Only POST requests are allowed on this endpoint.")
data = request.data
try:
node_id = data["node_id"]
channel_id = data["channel_id"]
target_parent = ContentNode.objects.get(pk=data["target_parent"])
channel = target_parent.get_channel()
try:
request.user.can_edit(channel and channel.pk)
except PermissionDenied:
return HttpResponseNotFound("No channel matching: {}".format(channel and channel.pk))
task_info = {
'user': request.user,
'metadata': {
'affects': {
'channels': [channel_id],
'nodes': [node_id],
}
}
}
task_args = {
'user_id': request.user.pk,
'channel_id': channel_id,
'target_parent': target_parent.pk,
'node_id': node_id,
}
task, task_info = create_async_task('duplicate-node-inline', task_info, task_args)
return Response(TaskSerializer(task_info).data)
except KeyError:
raise ObjectDoesNotExist("Missing attribute from data: {}".format(data))
@authentication_classes((TokenAuthentication, SessionAuthentication))
@permission_classes((IsAuthenticated,))
@api_view(['POST'])
def move_nodes(request):
logging.debug("Entering the move_nodes endpoint")
data = request.data
try:
nodes = data["nodes"]
target_parent = ContentNode.objects.get(pk=data["target_parent"])
channel_id = data["channel_id"]
min_order = data.get("min_order") or 0
max_order = data.get("max_order") or min_order + len(nodes)
channel = target_parent.get_channel()
try:
request.user.can_edit(channel and channel.pk)
request.user.can_edit_nodes(ContentNode.objects.filter(id__in=list(n["id"] for n in nodes)))
except PermissionDenied:
return HttpResponseNotFound("Resources not found")
task_info = {
'user': request.user,
'metadata': {
'affects': {
'channels': [channel_id],
'nodes': nodes,
}
}
}
task_args = {
'user_id': request.user.pk,
'channel_id': channel_id,
'node_ids': nodes,
'target_parent': data["target_parent"],
'min_order': min_order,
'max_order': max_order
}
task, task_info = create_async_task('move-nodes', task_info, task_args)
return HttpResponse(JSONRenderer().render(TaskSerializer(task_info).data))
except KeyError:
raise ObjectDoesNotExist("Missing attribute from data: {}".format(data))
@authentication_classes((TokenAuthentication, SessionAuthentication))
@permission_classes((IsAuthenticated,))
@api_view(['POST'])
def sync_nodes(request):
logging.debug("Entering the sync_nodes endpoint")
data = request.data
try:
nodes = data["nodes"]
channel_id = data['channel_id']
try:
request.user.can_edit(channel_id)
request.user.can_edit_nodes(ContentNode.objects.filter(id__in=list(n["id"] for n in nodes)))
except PermissionDenied:
return HttpResponseNotFound("Resources not found")
task_info = {
'user': request.user,
'metadata': {
'affects': {
'channels': [channel_id],
'nodes': nodes,
}
}
}
task_args = {
'user_id': request.user.pk,
'channel_id': channel_id,
'node_ids': nodes,
'sync_attributes': True,
'sync_tags': True,
'sync_files': True,
'sync_assessment_items': True,
}
task, task_info = create_async_task('sync-nodes', task_info, task_args)
return HttpResponse(JSONRenderer().render(TaskSerializer(task_info).data))
except KeyError:
raise ObjectDoesNotExist("Missing attribute from data: {}".format(data))
@authentication_classes((TokenAuthentication, SessionAuthentication))
@permission_classes((IsAuthenticated,))
@api_view(['POST'])
def sync_channel_endpoint(request):
logging.debug("Entering the sync_nodes endpoint")
data = request.data
try:
channel_id = data['channel_id']
try:
request.user.can_edit(channel_id)
except PermissionDenied:
return HttpResponseNotFound("No channel matching: {}".format(channel_id))
task_info = {
'user': request.user,
'metadata': {
'affects': {
'channels': [channel_id],
}
}
}
task_args = {
'user_id': request.user.pk,
'channel_id': channel_id,
'sync_attributes': data.get('attributes'),
'sync_tags': data.get('tags'),
'sync_files': data.get('files'),
'sync_assessment_items': data.get('assessment_items'),
'sync_sort_order': data.get('sort'),
}
task, task_info = create_async_task('sync-channel', task_info, task_args)
return HttpResponse(JSONRenderer().render(TaskSerializer(task_info).data))
except KeyError:
raise ObjectDoesNotExist("Missing attribute from data: {}".format(data))
|
StarcoderdataPython
|
5069058
|
<filename>tests/conftest.py<gh_stars>10-100
import os
import shutil
import uuid
from random import randint
import pytest
from pytest_order.sorter import SESSION
from tests.utils import write_test
pytest_plugins = ["pytester"]
@pytest.fixture
def item_names_for(testdir):
def _item_names_for(tests_content):
items = testdir.getitems(tests_content)
hook = items[0].config.hook
hook.pytest_collection_modifyitems(session=items[0].session,
config=items[0].config, items=items)
return [item.name for item in items]
return _item_names_for
@pytest.fixture
def test_path(tmpdir):
path = tmpdir.join("{}.py".format(str(uuid.uuid4())))
yield str(path)
path.remove()
@pytest.fixture
def ignore_settings(mocker):
settings = mocker.patch("pytest_order.sorter.Settings")
settings.return_value.sparse_ordering = False
settings.return_value.order_dependencies = False
settings.return_value.scope = SESSION
settings.return_value.group_scope = SESSION
yield settings
@pytest.fixture
def order_dependencies(ignore_settings):
ignore_settings.return_value.order_dependencies = True
yield
@pytest.fixture
def get_nodeid(tmpdir_factory):
"""Fixture to get the nodeid from tests created using tmpdir_factory.
At least under Windows, the nodeid for the same tests differs depending on
the pytest version and the environment. We need the real nodeid as it is
used in pytest-dependency session-scoped markers in order to create tests
passing under different systems.
"""
fixture_path = str(tmpdir_factory.mktemp("nodeid_path"))
testname = os.path.join(fixture_path, "test_nodeid.py")
test_contents = """
import pytest
@pytest.fixture
def nodeid(request):
yield request.node.nodeid
def test_node(nodeid):
print("NODEID=!!!{}!!!".format(nodeid))
"""
write_test(testname, test_contents)
yield fixture_path
shutil.rmtree(fixture_path, ignore_errors=True)
def pytest_collection_modifyitems(config, items):
for item in items:
if item.name.startswith("test_performance"):
item.add_marker(pytest.mark.order(randint(-100, 100)))
|
StarcoderdataPython
|
4892283
|
<filename>python/testData/completion/qualifiedAssignment.py
def foo(a):
woo = []
a.words = {}
for x in w<caret>
|
StarcoderdataPython
|
6419960
|
import datetime
class Move:
def __init__(self):
self.roll_id = None
self.game_id = None
self.roll_number = None
self.player_id = None
self.is_winning_play = False
|
StarcoderdataPython
|
228027
|
<gh_stars>0
from _collections import deque
INPUT = input()
data = deque()
for parenthesis in INPUT:
if parenthesis == "{" or parenthesis == "[" or parenthesis == "(":
data.append(parenthesis)
if parenthesis == "}" or parenthesis == "]" or parenthesis == ")":
if len(data) == 0:
data.append("Nope")
break
if parenthesis == "}":
if data.pop() != "{":
break
elif parenthesis == "]":
if data.pop() != "[":
break
elif parenthesis == ")":
if data.pop() != "(":
break
if len(data) == 0:
print("YES")
else:
print("NO")
|
StarcoderdataPython
|
12847708
|
<gh_stars>1000+
# Dash components, html, and dash tables
import dash_core_components as dcc
import dash_html_components as html
import dash_table
# Import Bootstrap components
import dash_bootstrap_components as dbc
# Import custom data.py
import data
# Import data from data.py file
teams_df = data.teams
# Hardcoded list that contain era names and marks
era_list = data.era_list
era_marks = data.era_marks
# Main applicaiton menu
appMenu = html.Div(
[
dbc.Row(
[
dbc.Col(
html.H4(style={"text-align": "center"}, children="Select Era:"),
xs={"size": "auto", "offset": 0},
sm={"size": "auto", "offset": 0},
md={"size": "auto", "offset": 3},
lg={"size": "auto", "offset": 0},
xl={"size": "auto", "offset": 0},
),
dbc.Col(
dcc.Dropdown(
style={
"text-align": "center",
"font-size": "18px",
"width": "210px",
},
id="era-dropdown",
options=era_list,
value=era_list[0]["value"],
clearable=False,
),
xs={"size": "auto", "offset": 0},
sm={"size": "auto", "offset": 0},
md={"size": "auto", "offset": 0},
lg={"size": "auto", "offset": 0},
xl={"size": "auto", "offset": 0},
),
dbc.Col(
html.H4(
style={"text-align": "center", "justify-self": "right"},
children="Select Team:",
),
xs={"size": "auto", "offset": 0},
sm={"size": "auto", "offset": 0},
md={"size": "auto", "offset": 3},
lg={"size": "auto", "offset": 0},
xl={"size": "auto", "offset": 1},
),
dbc.Col(
dcc.Dropdown(
style={
"text-align": "center",
"font-size": "18px",
"width": "210px",
},
id="team-dropdown",
clearable=False,
),
xs={"size": "auto", "offset": 0},
sm={"size": "auto", "offset": 0},
md={"size": "auto", "offset": 0},
lg={"size": "auto", "offset": 0},
xl={"size": "auto", "offset": 0},
),
],
form=True,
),
dbc.Row(
dbc.Col(
html.P(
style={"font-size": "16px", "opacity": "70%"},
children="""For continuity, some teams historical names where changed to match """
"""their modern counterpart. Available teams are updated based on Era selection.""",
)
)
),
],
className="menu",
)
# Menu slider used, NOT independent, MUST be used with main menu
menuSlider = html.Div(
[
dbc.Row(
dbc.Col(
dcc.RangeSlider(
id="era-slider",
min=1903,
max=teams_df["year"].max(),
marks=era_marks,
tooltip={"always_visible": False, "placement": "bottom"},
)
)
),
dbc.Row(
dbc.Col(
html.P(
style={"font-size": "16px", "opacity": "70%"},
children="Adjust slider to desired range.",
)
)
),
],
className="era-slider",
)
# Layout for Team Analysis page
teamLayout = html.Div(
[
dbc.Row(dbc.Col(html.H3(children="Team Accolades"))),
# Display Championship titles in datatable
dbc.Row(
dbc.Col(
html.Div(id="team-data"),
xs={"size": "auto", "offset": 0},
sm={"size": "auto", "offset": 0},
md={"size": 7, "offset": 0},
lg={"size": "auto", "offset": 0},
xl={"size": "auto", "offset": 0},
),
justify="center",
),
### Graphs of Historical Team statistics ###
dbc.Row(dbc.Col(html.H3(children="Team Analysis"))),
# Bar Chart of Wins and Losses
dbc.Row(
dbc.Col(
dcc.Graph(id="wl-bar", config={"displayModeBar": False}),
xs={"size": 12, "offset": 0},
sm={"size": 12, "offset": 0},
md={"size": 12, "offset": 0},
lg={"size": 12, "offset": 0},
)
),
# Line Chart of Batting Average, BABIP, and Strikeout Rate
dbc.Row(
dbc.Col(
dcc.Graph(id="batting-line", config={"displayModeBar": False}),
xs={"size": 12, "offset": 0},
sm={"size": 12, "offset": 0},
md={"size": 12, "offset": 0},
lg={"size": 12, "offset": 0},
)
),
# Bar Char of Errors and Double Plays
dbc.Row(
dbc.Col(
dcc.Graph(id="feild-line", config={"displayModeBar": False}),
xs={"size": 12, "offset": 0},
sm={"size": 12, "offset": 0},
md={"size": 12, "offset": 0},
lg={"size": 12, "offset": 0},
)
),
dbc.Row(dbc.Col(html.H4(children="Pitching Performance"))),
dbc.Row(
[
# Line graph of K/BB ratio with ERA bubbles
dbc.Col(
dcc.Graph(id="pitch-bubble", config={"displayModeBar": False}),
xs={"size": 12, "offset": 0},
sm={"size": 12, "offset": 0},
md={"size": 12, "offset": 0},
lg={"size": 6, "offset": 0},
),
# Pie Chart, % of Completed Games, Shutouts, and Saves of Total Games played
dbc.Col(
dcc.Graph(id="pitch-pie", config={"displayModeBar": False}),
xs={"size": 12, "offset": 0},
sm={"size": 12, "offset": 0},
md={"size": 12, "offset": 0},
lg={"size": 6, "offset": 0},
),
],
no_gutters=True,
),
],
className="app-page",
)
# Player menu used to select players after era and team are set
playerMenu = html.Div(
[
dbc.Row(dbc.Col(html.H3(children="Player Profile and Statistics"))),
dbc.Row(
dbc.Col(
html.P(
style={"font-size": "16px", "opacity": "70%"},
children="Available players are updated based on team selection.",
)
)
),
dbc.Row(
[
dbc.Row(
dbc.Col(
html.H4(
style={"text-align": "center"}, children="Select Player:"
),
xs={"size": "auto", "offset": 0},
sm={"size": "auto", "offset": 0},
md={"size": "auto", "offset": 0},
lg={"size": "auto", "offset": 0},
xl={"size": "auto", "offset": 0},
)
),
dbc.Row(
dbc.Col(
dcc.Dropdown(
style={
"margin-left": "2%",
"text-align": "center",
"font-size": "18px",
"width": "218px",
},
id="player-dropdown",
clearable=False,
),
xs={"size": "auto", "offset": 0},
sm={"size": "auto", "offset": 0},
md={"size": "auto", "offset": 0},
lg={"size": "auto", "offset": 0},
xl={"size": "auto", "offset": 0},
)
),
],
no_gutters=True,
),
html.Br(),
dbc.Row(
dbc.Col(
dash_table.DataTable(
id="playerProfile",
style_as_list_view=True,
editable=False,
style_table={
"overflowY": "scroll",
"width": "100%",
"minWidth": "100%",
},
style_header={"backgroundColor": "#f8f5f0", "fontWeight": "bold"},
style_cell={"textAlign": "center", "padding": "8px"},
),
xs={"size": "auto", "offset": 0},
sm={"size": "auto", "offset": 0},
md={"size": 8, "offset": 0},
lg={"size": "auto", "offset": 0},
xl={"size": "auto", "offset": 0},
),
justify="center",
),
html.Br(),
],
className="app-page",
)
# Batting statistics
battingLayout = html.Div(
[
# Batting datatable
dbc.Row(
dbc.Col(
dash_table.DataTable(
id="batterTable",
style_as_list_view=True,
editable=False,
style_table={
"overflowY": "scroll",
"width": "100%",
"minWidth": "100%",
},
style_header={"backgroundColor": "#f8f5f0", "fontWeight": "bold"},
style_cell={"textAlign": "center", "padding": "8px"},
),
xs={"size": 12, "offset": 0},
sm={"size": 12, "offset": 0},
md={"size": 10, "offset": 0},
lg={"size": 10, "offset": 0},
xl={"size": 10, "offset": 0},
),
justify="center",
),
dbc.Row(
dbc.Col(
html.H3(
style={"margin-top": "1%", "margin-bottom": "1%"},
children="Player Analysis",
)
)
),
dbc.Row(
dbc.Col(
html.P(
style={"font-size": "16px", "opacity": "70%"},
children="Some statistics where not tracked until the 1950s, graphs may not always reflect certain plots.",
)
)
),
dbc.Row(
[
# Line/Bar Chart of On-Base Percentage, features; H BB HBP SF
dbc.Col(
dcc.Graph(id="obp-line", config={"displayModeBar": False}),
xs={"size": 12, "offset": 0},
sm={"size": 12, "offset": 0},
md={"size": 12, "offset": 0},
lg={"size": 6, "offset": 0},
),
# Line/Bar Chart of Slugging Average, features; 2B 3B HR
dbc.Col(
dcc.Graph(id="slg-line", config={"displayModeBar": False}),
xs={"size": 12, "offset": 0},
sm={"size": 12, "offset": 0},
md={"size": 12, "offset": 0},
lg={"size": 6, "offset": 0},
),
],
no_gutters=True,
),
# Line Chart of OPS, Features; OBP SLG
dbc.Row(
dbc.Col(
dcc.Graph(id="ops-line", config={"displayModeBar": False}),
xs={"size": 12, "offset": 0},
sm={"size": 12, "offset": 0},
md={"size": 12, "offset": 0},
lg={"size": 12, "offset": 0},
)
),
],
className="app-page",
)
# Feilding Statistics
fieldingLayout = html.Div(
[
dbc.Row(dbc.Col(html.H3(style={"margin-bottom": "1%"}, children="Feilding"))),
# Feilding Datatable
dbc.Row(
dbc.Col(
dash_table.DataTable(
id="fieldTable",
style_as_list_view=True,
editable=False,
style_table={
"overflowY": "scroll",
"width": "100%",
"minWidth": "100%",
},
style_header={"backgroundColor": "#f8f5f0", "fontWeight": "bold"},
style_cell={"textAlign": "center", "padding": "8px"},
),
xs={"size": 12, "offset": 0},
sm={"size": 12, "offset": 0},
md={"size": 8, "offset": 0},
lg={"size": 8, "offset": 0},
xl={"size": 8, "offset": 0},
),
justify="center",
),
html.Br(),
dbc.Row(dbc.Col(html.H3(style={"margin-bottom": "1%"}, children="Pitching"))),
dbc.Row(
dbc.Col(
html.Div(id="pitch-data"),
xs={"size": 12, "offset": 0},
sm={"size": 12, "offset": 0},
md={"size": 10, "offset": 0},
lg={"size": 10, "offset": 0},
xl={"size": 10, "offset": 0},
),
justify="center",
),
html.Br(),
dbc.Row(dbc.Col(html.H3(children="Player Analysis"))),
# Player dropdown menu
dbc.Row(
[
dbc.Row(
dbc.Col(
html.H4(
style={"text-align": "center"}, children="Select Position:"
),
xs={"size": "auto", "offset": 0},
sm={"size": "auto", "offset": 0},
md={"size": "auto", "offset": 0},
lg={"size": "auto", "offset": 0},
xl={"size": "auto", "offset": 0},
)
),
dbc.Row(
dbc.Col(
dcc.Dropdown(
style={
"margin-left": "5px",
"text-align": "center",
"font-size": "18px",
"width": "100px",
},
id="pos-dropdown",
clearable=False,
),
xs={"size": "auto", "offset": 0},
sm={"size": "auto", "offset": 0},
md={"size": "auto", "offset": 0},
lg={"size": "auto", "offset": 0},
xl={"size": "auto", "offset": 0},
)
),
],
no_gutters=True,
),
dbc.Row(dbc.Col(html.H4(children="Pitching Performance"))),
# Pitching and Fielding graphs, Pitching graphs are set in a subplot dcc.Graph(id='field-graphs', config={'displayModeBar': False})
dbc.Row(
dbc.Col(
html.Div(id="pitch-graphs"),
xs={"size": 12, "offset": 0},
sm={"size": 12, "offset": 0},
md={"size": 12, "offset": 0},
lg={"size": 12, "offset": 0},
xl={"size": 12, "offset": 0},
)
),
dbc.Row(
dbc.Col(
dcc.Graph(id="field-graph", config={"displayModeBar": False}),
xs={"size": 12, "offset": 0},
sm={"size": 12, "offset": 0},
md={"size": 12, "offset": 0},
lg={"size": 12, "offset": 0},
xl={"size": 12, "offset": 0},
)
),
],
className="app-page",
)
|
StarcoderdataPython
|
8113332
|
<filename>pictures/views.py<gh_stars>0
from django.http import HttpResponse, Http404
import datetime as dt
from django.shortcuts import render
from .models import Image
from .filters import ImageFilter
# Create your views here.
def home(request):
date = dt.date.today()
slogan = 'Just keep uploading...'
pictures = Image.get_images()
return render(request, 'index.html', {"date": date, "slogan": slogan, "pictures": pictures})
def image(request, image_name):
try:
image = Image.objects.get(name=image_name)
except DoesNotExist:
raise Http404()
return render(request, 'images/image.html', {"image": image})
def search(request):
image_list = Image.objects.all()
image_filter = ImageFilter(request.GET, queryset=image_list)
return render(request, 'search.html', {"filter": image_filter})
|
StarcoderdataPython
|
4816184
|
'''
Given a string s consists of upper/lower-case alphabets and empty space characters ' ', return the length of last word in the string.
If the last word does not exist, return 0.
Note: A word is defined as a character sequence consists of non-space characters only.
Example:
Input: "<NAME>"
Output: 5
'''
class Solution(object):
def lengthOfLastWord(self, s):
p = s.split()
return len(p[-1]) if p else 0
mysolution = Solution()
s = "Hello World"
print(mysolution.lengthOfLastWord(s))
|
StarcoderdataPython
|
4935793
|
<filename>my_module.py
def write_to_file(name, data, encoding):
try:
text_file = open(name, "w")
except IOError as error:
print("[!!] Error opening file " + str(error))
return -1
if enconding == 'utf-8':
try:
text_file.write(data.encode('utf-8'))
except IOError as error:
print("[!!] Error writing to file " + str(error))
return -1
elif encoding == 'string':
try:
text_file.write(str(data))
except IOError as error:
print("[!!] Error writing to file " + str(error))
return -1
text_file.close()
|
StarcoderdataPython
|
11314377
|
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-Base-Filtering-Engine-Resource-Flows
GUID : 92765247-03a9-4ae3-a575-b42264616e78
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("92765247-03a9-4ae3-a575-b42264616e78"), event_id=2002, version=0)
class Microsoft_Windows_Base_Filtering_Engine_Resource_Flows_2002_0(Etw):
pattern = Struct(
"ConnectionUsedId" / Int64ul,
"Protocol" / Int8ul,
"RemotePort" / Int16ul,
"LocalPort" / Int16ul,
"StartTime" / Int64ul
)
@declare(guid=guid("92765247-03a9-4ae3-a575-b42264616e78"), event_id=2003, version=0)
class Microsoft_Windows_Base_Filtering_Engine_Resource_Flows_2003_0(Etw):
pattern = Struct(
"ConnectionUsedId" / Int64ul,
"Protocol" / Int8ul,
"RemotePort" / Int16ul,
"LocalPort" / Int16ul,
"StartTime" / Int64ul,
"CloseTime" / Int64ul
)
@declare(guid=guid("92765247-03a9-4ae3-a575-b42264616e78"), event_id=2004, version=0)
class Microsoft_Windows_Base_Filtering_Engine_Resource_Flows_2004_0(Etw):
pattern = Struct(
"ConnectionUsedId" / Int64ul,
"Protocol" / Int8ul,
"RemoteIPAddress" / Int32ul,
"LocalIPAddress" / Int32ul,
"RemotePort" / Int16ul,
"LocalPort" / Int16ul,
"StartTime" / Int64ul
)
@declare(guid=guid("92765247-03a9-4ae3-a575-b42264616e78"), event_id=2005, version=0)
class Microsoft_Windows_Base_Filtering_Engine_Resource_Flows_2005_0(Etw):
pattern = Struct(
"ConnectionUsedId" / Int64ul,
"Protocol" / Int8ul,
"RemoteIPAddress" / Int32ul,
"LocalIPAddress" / Int32ul,
"RemotePort" / Int16ul,
"LocalPort" / Int16ul,
"StartTime" / Int64ul,
"CloseTime" / Int64ul
)
|
StarcoderdataPython
|
8068278
|
# Generated by Django 2.0.4 on 2018-04-18 14:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Photo',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID')),
(
'title',
models.CharField(
max_length=100,
verbose_name='title')),
(
'description',
models.TextField(blank=True, verbose_name='description')),
(
'image',
models.ImageField(
upload_to='photos/', verbose_name='image')),
(
'thumbnail',
models.ImageField(
upload_to='photos/thumb/', verbose_name='thumbnail')),
(
'file_length',
models.IntegerField(
default=0, verbose_name='file length')),
(
'created_at',
models.DateTimeField(
auto_now_add=True, verbose_name='created at')),
],
options={
'verbose_name': 'photo',
'verbose_name_plural': 'photos',
},
),
migrations.CreateModel(
name='PhotoAlbum',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID')),
(
'title',
models.CharField(
max_length=100,
verbose_name='title')),
(
'description',
models.TextField(blank=True, verbose_name='description')),
(
'enabled',
models.BooleanField(
default=False, verbose_name='enabled')),
(
'title_photo',
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
to='photos.Photo',
verbose_name='title photo')),
],
options={
'verbose_name': 'photo album',
'verbose_name_plural': 'photo albums',
},
),
migrations.AddField(
model_name='photo',
name='album',
field=models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name='photos',
to='photos.PhotoAlbum',
verbose_name='album'),
),
]
|
StarcoderdataPython
|
1801698
|
import time
from worm import WormBase
from mod import MOD
worm = WormBase()
mod = MOD()
mods = [worm]
for m in mods:
start_time = time.time()
m.load_genes()
print (" --- %s seconds --- " % (time.time() - start_time))
# mod.load_homologs()
for m in mods:
start_time = time.time()
m.load_go()
print (" --- %s seconds --- " % (time.time() - start_time))
for m in mods:
start_time = time.time()
m.load_diseases()
print (" --- %s seconds --- " % (time.time() - start_time))
mod.save_into_file()
mod.delete_mapping()
mod.put_mapping()
mod.index_genes_into_es()
mod.index_go_into_es()
mod.index_diseases_into_es()
|
StarcoderdataPython
|
73178
|
<filename>batch.py
import argparse
import os
import sys
import json
from datetime import datetime
import covizu
from covizu.utils import gisaid_utils
from covizu.utils.progress_utils import Callback
from covizu.utils.batch_utils import *
from covizu.utils.seq_utils import SC2Locator
from tempfile import NamedTemporaryFile
def parse_args():
parser = argparse.ArgumentParser(description="CoVizu analysis pipeline automation")
parser.add_argument('--url', type=str, default=os.environ.get("GISAID_URL", None),
help="URL to download provision file, defaults to environment variable.")
parser.add_argument('--user', type=str, default=os.environ.get("GISAID_USER", None),
help="GISAID username, defaults to environment variable.")
parser.add_argument('--password', type=str, default=os.environ.get("GISAID_PSWD", None),
help="GISAID password, defaults to environment variable.")
parser.add_argument("--infile", type=str, default=None,
help="input, path to xz-compressed JSON; if not specified, "
"download xz file from GISAID provision feed.")
parser.add_argument("--outdir", type=str, default='data/',
help="option, path to write output files")
parser.add_argument('--minlen', type=int, default=29000, help='option, minimum genome length (nt)')
parser.add_argument('--mindate', type=str, default='2019-12-01',
help='option, earliest possible sample collection date (ISO format, default '
'2019-12-01')
parser.add_argument('--poisson-cutoff', type=float, default=0.001,
help='option, filtering outlying genomes whose distance exceeds the upper '
'quantile of Poisson distribution (molecular clock). Default 0.001 '
'corresponds to 99.9%% cutoff.')
parser.add_argument('--batchsize', type=int, default=2000,
help='option, number of records to batch process with minimap2')
parser.add_argument('--max-variants', type=int, default=5000,
help='option, limit number of variants per lineage (default 5000)')
parser.add_argument("--ref", type=str,
default=os.path.join(covizu.__path__[0], "data/NC_045512.fa"),
help="option, path to FASTA file with reference genome")
parser.add_argument('--mmbin', type=str, default='minimap2',
help="option, path to minimap2 binary executable")
parser.add_argument('-mmt', "--mmthreads", type=int, default=16,
help="option, number of threads for minimap2.")
parser.add_argument('--misstol', type=int, default=300,
help="option, maximum tolerated number of missing bases per "
"genome (default 300).")
parser.add_argument("--vcf", type=str,
default=os.path.join(covizu.__path__[0], "data/ProblematicSites_SARS-CoV2/problematic_sites_sarsCov2.vcf"),
help="Path to VCF file of problematic sites in SARS-COV-2 genome. "
"Source: https://github.com/W-L/ProblematicSites_SARS-CoV2")
parser.add_argument('--ft2bin', default='fasttree2',
help='option, path to fasttree2 binary executable')
parser.add_argument('--lineages', type=str,
default=os.path.join(covizu.__path__[0], "data/pango-designation/lineages.csv"),
help="optional, path to CSV file containing Pango lineage designations.")
parser.add_argument('--ttbin', default='treetime',
help='option, path to treetime binary executable')
parser.add_argument('--clock', type=float, default=8e-4,
help='option, specify molecular clock rate for '
'constraining Treetime analysis (default 8e-4).')
parser.add_argument('--earliest', action='store_true',
help='option, use earliest sample per lineage for time-scaled '
'tree; otherwise defaults to most recent samples.')
parser.add_argument('--datetol', type=float, default=0.1,
help='option, exclude tips from time-scaled tree '
'with high discordance between estimated and '
'known sample collection dates (year units,'
'default: 0.1)')
parser.add_argument('--binpath', type=str, default='rapidnj',
help='option, path to RapidNJ binary executable')
parser.add_argument('--mincount', type=int, default=5000,
help='option, minimum number of variants in lineage '
'above which MPI processing will be used.')
parser.add_argument('--machine_file', type=str, default='mfile',
help='option, path to machine file for MPI.')
parser.add_argument("-n", "--nboot", type=int, default=100,
help="Number of bootstrap samples, default 100.")
parser.add_argument("--boot-cutoff", type=float, default=0.5,
help="Bootstrap cutoff for consensus tree (default 0.5). "
"Only used if --cons is specified.")
parser.add_argument("--dry-run", action="store_true",
help="Do not upload output files to webserver.")
return parser.parse_args()
def process_feed(args, callback=None):
""" Process feed data """
if callback:
callback("Processing GISAID feed data")
loader = gisaid_utils.load_gisaid(args.infile, minlen=args.minlen, mindate=args.mindate)
batcher = gisaid_utils.batch_fasta(loader, size=args.batchsize)
aligned = gisaid_utils.extract_features(batcher, ref_file=args.ref, binpath=args.mmbin,
nthread=args.mmthreads, minlen=args.minlen)
filtered = gisaid_utils.filter_problematic(aligned, vcf_file=args.vcf, cutoff=args.poisson_cutoff,
callback=callback)
return gisaid_utils.sort_by_lineage(filtered, callback=callback)
if __name__ == "__main__":
args = parse_args()
cb = Callback()
# check that user has loaded openmpi module
try:
subprocess.check_call(['mpirun', '-np', '2', 'ls'], stdout=subprocess.DEVNULL)
except FileNotFoundError:
cb.callback("mpirun not loaded - run `module load openmpi/gnu`", level='ERROR')
sys.exit()
# check that the user has included submodules
if (not os.path.exists(os.path.join(covizu.__path__[0], "data/pango-designation/lineages.csv")) or
not os.path.exists(os.path.join(covizu.__path__[0], "data/ProblematicSites_SARS-CoV2/problematic_sites_sarsCov2.vcf"))):
try:
subprocess.check_call("git submodule init; git submodule update", shell=True)
except:
cb.callback("Error adding the required submodules")
sys.exit()
# update submodules
try:
subprocess.check_call("git submodule foreach git pull origin master", shell=True)
except:
cb.callback("Error updating submodules")
sys.exit()
# download xz file if not specified by user
if args.infile is None:
cb.callback("No input specified, downloading data from GISAID feed...")
args.infile = gisaid_utils.download_feed(args.url, args.user, args.password)
# filter data, align genomes, extract features, sort by lineage
by_lineage = process_feed(args, cb.callback)
# reconstruct time-scaled tree relating lineages
timetree, residuals = build_timetree(by_lineage, args, cb.callback)
timestamp = datetime.now().isoformat().split('.')[0]
nwk_file = os.path.join(args.outdir, 'timetree.{}.nwk'.format(timestamp))
with open(nwk_file, 'w') as handle:
Phylo.write(timetree, file=handle, format='newick')
# clustering analysis of lineages
result = make_beadplots(by_lineage, args, cb.callback, t0=cb.t0.timestamp())
clust_file = os.path.join(args.outdir, 'clusters.{}.json'.format(timestamp))
with open(clust_file, 'w') as handle:
json.dump(result, fp=handle)
# get mutation info
locator = SC2Locator()
mutations = {}
for lineage, features in get_mutations(by_lineage).items():
annots = [locator.parse_mutation(f) for f in features]
mutations.update({lineage: [a for a in annots if a is not None]})
# write data stats
dbstat_file = os.path.join(args.outdir, 'dbstats.{}.json'.format(timestamp))
with open(dbstat_file, 'w') as handle:
nseqs = sum([len(rows) for rows in by_lineage.values()])
val = {
'lastupdate': timestamp.split('T')[0],
'noseqs': nseqs,
'lineages': {}
}
for lineage, samples in by_lineage.items():
ndiffs = [len(x['diffs']) for x in samples]
val['lineages'][lineage] = {
'nsamples': len(samples),
'lastcoldate': max(x['covv_collection_date'] for x in samples),
'residual': residuals[lineage],
'max_ndiffs': max(ndiffs),
'mean_ndiffs': sum(ndiffs)/len(ndiffs),
'mutations': mutations[lineage]
}
json.dump(val, handle)
# upload output files to webserver, requires SSH key credentials
if not args.dry_run:
server_root = 'filogeneti.ca:/var/www/html/covizu/data'
subprocess.check_call(['scp', nwk_file, '{}/timetree.nwk'.format(server_root)])
subprocess.check_call(['scp', clust_file, '{}/clusters.json'.format(server_root)])
subprocess.check_call(['scp', dbstat_file, '{}/dbstats.json'.format(server_root)])
# upload files to EpiCoV server
server_epicov = 'filogeneti.ca:/var/www/html/epicov/data'
subprocess.check_call(['scp', nwk_file, '{}/timetree.nwk'.format(server_epicov)])
subprocess.check_call(['scp', dbstat_file, '{}/dbstats.json'.format(server_epicov)])
# modify clusters JSON
epifile = open(clust_file, 'r')
epicov_data = gisaid_utils.convert_json(epifile, args.infile)
fp = NamedTemporaryFile('w', delete=False)
json.dump(epicov_data, fp=fp) # serialize to temp file
fp.close()
subprocess.check_call(['scp', fp.name, '{}/clusters.json'.format(server_epicov)])
cb.callback("All done!")
|
StarcoderdataPython
|
28337
|
from unihan_db.tables import (
UnhnLocation,
UnhnLocationkXHC1983,
UnhnReading,
kCantonese,
kCCCII,
kCheungBauer,
kCheungBauerIndex,
kCihaiT,
kDaeJaweon,
kDefinition,
kFenn,
kFennIndex,
kGSR,
kHanYu,
kHanyuPinlu,
kHanyuPinyin,
kHDZRadBreak,
kIICore,
kIICoreSource,
kUnihanCore2020,
kIRG_GSource,
kIRG_HSource,
kIRG_JSource,
kIRG_KPSource,
kIRG_KSource,
kIRG_MSource,
kIRG_TSource,
kIRG_USource,
kIRG_VSource,
kIRG_SSource,
kIRG_UKSource,
kIRGDaeJaweon,
kIRGHanyuDaZidian,
kIRGKangXi,
kMandarin,
kRSAdobe_Japan1_6,
kRSJapanese,
kRSKangXi,
kRSKanWa,
kRSKorean,
kRSUnicode,
kSBGY,
kTotalStrokes,
kXHC1983,
kTGHZ2013,
kSimplifiedVariant,
kTraditionalVariant,
kSpoofingVariant,
kZVariant,
kSemanticVariant,
kSpecializedSemanticVariant,
UnhnVariantSource,
SemanticVariantSource
)
def import_char(c, char): # NOQA: C901
if 'kDefinition' in char:
for d in char['kDefinition']:
c.kDefinition.append(kDefinition(definition=d))
if 'kCantonese' in char:
for d in char['kCantonese']:
c.kCantonese.append(kCantonese(definition=d))
if 'kCCCII' in char:
for d in char['kCCCII']:
c.kCCCII.append(kCCCII(hex=d))
if 'kMandarin' in char:
d = char['kMandarin']
c.kMandarin.append(kMandarin(hans=d['zh-Hans'], hant=d['zh-Hant']))
if 'kTotalStrokes' in char:
d = char['kTotalStrokes']
c.kTotalStrokes.append(kTotalStrokes(hans=d['zh-Hans'], hant=d['zh-Hant']))
if 'kHanyuPinyin' in char:
for d in char['kHanyuPinyin']:
k = kHanyuPinyin()
for loc in d['locations']:
k.locations.append(
UnhnLocation(
volume=loc['volume'],
page=loc['page'],
character=loc['character'],
virtual=loc['virtual'],
)
)
for reading in d['readings']:
k.readings.append(UnhnReading(reading=reading))
c.kHanyuPinyin.append(k)
if 'kHanYu' in char:
k = kHanYu()
for d in char['kHanYu']:
k.locations.append(
UnhnLocation(
volume=d['volume'],
page=d['page'],
character=d['character'],
virtual=d['virtual'],
)
)
c.kHanYu.append(k)
if 'kIRGHanyuDaZidian' in char:
for d in char['kIRGHanyuDaZidian']:
k = kIRGHanyuDaZidian()
k.locations.append(
UnhnLocation(
volume=d['volume'],
page=d['page'],
character=d['character'],
virtual=d['virtual'],
)
)
c.kIRGHanyuDaZidian.append(k)
if 'kXHC1983' in char:
for d in char['kXHC1983']:
k = kXHC1983()
for loc in d['locations']:
k.locations.append(
UnhnLocationkXHC1983(
page=loc['page'],
character=loc['character'],
entry=loc['entry'],
substituted=loc['substituted'],
)
)
k.readings.append(UnhnReading(reading=d['reading']))
c.kXHC1983.append(k)
if 'kTGHZ2013' in char:
for d in char['kTGHZ2013']:
k = kTGHZ2013()
for loc in d['locations']:
k.locations.append(
UnhnLocation(
page=loc['page'],
character=loc['character'],
)
)
k.readings.append(UnhnReading(reading=d['reading']))
c.kTGHZ2013.append(k)
if 'kCheungBauer' in char:
for d in char['kCheungBauer']:
k = kCheungBauer(
radical=d['radical'], strokes=d['strokes'], cangjie=d['cangjie']
)
for reading in d['readings']:
k.readings.append(UnhnReading(reading=reading))
c.kCheungBauer.append(k)
if 'kRSAdobe_Japan1_6' in char:
for d in char['kRSAdobe_Japan1_6']:
c.kRSAdobe_Japan1_6.append(
kRSAdobe_Japan1_6(
type=d['type'],
cid=d['cid'],
radical=d['radical'],
strokes=d['strokes'],
strokes_residue=d['strokes-residue'],
)
)
if 'kCihaiT' in char:
for d in char['kCihaiT']:
c.kCihaiT.append(
kCihaiT(page=d['page'], row=d['row'], character=d['character'])
)
if 'kIICore' in char:
for d in char['kIICore']:
k = kIICore(priority=d['priority'])
for s in d['sources']:
k.sources.append(kIICoreSource(source=s))
c.kIICore.append(k)
if 'kUnihanCore2020' in char:
for s in char['kUnihanCore2020']:
c.kUnihanCore2020.append(kUnihanCore2020(source=s))
if 'kDaeJaweon' in char:
k = kDaeJaweon()
d = char['kDaeJaweon']
k.locations.append(
UnhnLocation(page=d['page'], character=d['character'], virtual=d['virtual'])
)
c.kDaeJaweon.append(k)
if 'kIRGKangXi' in char:
k = kIRGKangXi()
for d in char['kIRGKangXi']:
k.locations.append(
UnhnLocation(
page=d['page'], character=d['character'], virtual=d['virtual']
)
)
c.kIRGKangXi.append(k)
if 'kIRGDaeJaweon' in char:
k = kIRGDaeJaweon()
for d in char['kIRGDaeJaweon']:
k.locations.append(
UnhnLocation(
page=d['page'], character=d['character'], virtual=d['virtual']
)
)
c.kIRGDaeJaweon.append(k)
if 'kFenn' in char:
for d in char['kFenn']:
c.kFenn.append(kFenn(phonetic=d['phonetic'], frequency=d['frequency']))
if 'kHanyuPinlu' in char:
for d in char['kHanyuPinlu']:
c.kHanyuPinlu.append(
kHanyuPinlu(phonetic=d['phonetic'], frequency=d['frequency'])
)
if 'kHDZRadBreak' in char:
d = char['kHDZRadBreak']
k = kHDZRadBreak(radical=d['radical'], ucn=d['ucn'])
k.locations.append(
UnhnLocation(
volume=d['location']['volume'],
page=d['location']['page'],
character=d['location']['character'],
virtual=d['location']['virtual'],
)
)
c.kHDZRadBreak.append(k)
if 'kSBGY' in char:
for d in char['kSBGY']:
k = kSBGY()
k.locations.append(UnhnLocation(page=d['page'], character=d['character']))
c.kSBGY.append(k)
rs_fields = ( # radical-stroke fields, since they're the same structure
('kRSUnicode', kRSUnicode, c.kRSUnicode),
('kRSJapanese', kRSJapanese, c.kRSJapanese),
('kRSKangXi', kRSKangXi, c.kRSKangXi),
('kRSKanWa', kRSKanWa, c.kRSKanWa),
('kRSKorean', kRSKorean, c.kRSKorean),
)
for f, model, column in rs_fields:
if f in char:
for d in char[f]:
k = model(
radical=d['radical'],
strokes=d['strokes'],
simplified=d['simplified'],
)
column.append(k)
irg_fields = ( # IRG, since they're the same structure
('kIRG_GSource', kIRG_GSource, c.kIRG_GSource),
('kIRG_HSource', kIRG_HSource, c.kIRG_HSource),
('kIRG_JSource', kIRG_JSource, c.kIRG_JSource),
('kIRG_KPSource', kIRG_KPSource, c.kIRG_KPSource),
('kIRG_KSource', kIRG_KSource, c.kIRG_KSource),
('kIRG_MSource', kIRG_MSource, c.kIRG_MSource),
('kIRG_TSource', kIRG_TSource, c.kIRG_TSource),
('kIRG_USource', kIRG_USource, c.kIRG_USource),
('kIRG_VSource', kIRG_VSource, c.kIRG_VSource),
('kIRG_SSource', kIRG_SSource, c.kIRG_SSource),
('kIRG_UKSource', kIRG_UKSource, c.kIRG_UKSource),
)
for f, model, column in irg_fields:
if f in char:
d = char[f]
k = model(source=d['source'], location=d['location'])
column.append(k)
if 'kGSR' in char:
for d in char['kGSR']:
k = kGSR(set=d['set'], letter=d['letter'], apostrophe=d['apostrophe'])
c.kGSR.append(k)
if 'kCheungBauerIndex' in char:
d = char['kCheungBauerIndex']
k = kCheungBauerIndex()
k.locations.append(
UnhnLocation(
page=d['location']['page'], character=d['location']['character']
)
)
c.kCheungBauerIndex.append(k)
if 'kFennIndex' in char:
d = char['kFennIndex']
k = kFennIndex()
k.locations.append(
UnhnLocation(
page=d['location']['page'], character=d['location']['character']
)
)
c.kFennIndex.append(k)
simple_variant_fields = (
('kSimplifiedVariant', kSimplifiedVariant, c.kSimplifiedVariant),
('kTraditionalVariant', kTraditionalVariant, c.kTraditionalVariant),
('kSpoofingVariant', kSpoofingVariant, c.kSpoofingVariant),
)
for f, model, column in simple_variant_fields:
if f in char:
for d in char[f]:
column.append(model(ucn=d))
sourced_variant_fields = (
('kZVariant', kZVariant, c.kZVariant, UnhnVariantSource),
('kSemanticVariant', kSemanticVariant, c.kSemanticVariant, SemanticVariantSource),
('kSpecializedSemanticVariant', kSpecializedSemanticVariant, c.kSpecializedSemanticVariant, SemanticVariantSource),
)
for f, model, column, source_model in sourced_variant_fields:
if f in char:
for d in char[f]:
m = model(ucn=d['ucn'])
for s in d.get('sources', []):
m.sources.append(source_model(**s))
column.append(m)
|
StarcoderdataPython
|
5120152
|
<reponame>ronan-keane/hav-sim
"""
@author: <EMAIL>
problem with optplot rmse not making sense; verified fixed 11/29, pushed to master.
bug was for multiple guesses being used, when order = 1, if latter guesses are worse, the simulation used in
future calibration would be the worse simulations, which messed up the results for calibrate_tnc2 function (main function used at that time)
"""
#%%
def test(opt,lists):
sim = copy.deepcopy(meas)
sim = helper.obj_helper(opt[0],OVM,OVMadj,OVMadjsys,meas,sim,platooninfo,lists,makeleadfolinfo,platoonobjfn_obj,(True,6))
for count, i in enumerate(lists):
obj = helper.SEobj_pervehicle(meas,sim,platooninfo,i)
print('optimization result is '+str(opt[0][count][-2])+', our result is '+str(obj[0]))
test(out[0],lists[0])
#%% reproduce problem
platoonlist = lists[0][:5]
outtest = calibrate_tnc2(plist,bounds,meas,platooninfo,platoonlist,makeleadfolinfo,platoonobjfn_objder,None,OVM,OVMadjsys,OVMadj,True,6,cutoff=0,cutoff2=4.5,order=1,budget = 3)
test(outtest,platoonlist)
#%%test fix
from havsim.calibration.calibration import calibrate_tnc2
platoonlist = lists[0][:5]
outtest2 = calibrate_tnc2(plist,bounds,meas,platooninfo,platoonlist,makeleadfolinfo,platoonobjfn_objder,None,OVM,OVMadjsys,OVMadj,True,6,cutoff=0,cutoff2=4.5,order=1,budget = 3)
test(outtest2,platoonlist)
|
StarcoderdataPython
|
9661613
|
from django.conf.urls.defaults import *
urlpatterns = patterns('',
(r'^login$', 'views.login'),
(r'^logout$', 'views.logout'),
(r'^addbmark(?:\/)*$', 'views.addBmark'),
(r'^nb/', include('nextbus.urls') ),
(r'^catch/(?P<bmark>[\w|-]+)(?:\/)*$', 'views.catch'),
(r'^delete/bmark/(?P<bmark>[\w|-]+)(?:\/)*(?P<confirm>[\w|-]*)$', 'views.deleteBmark'),
(r'^clear-cache$', 'views.clearCache'),
(r'^(?:.*)', 'views.home'),
)
|
StarcoderdataPython
|
1864976
|
import re
_symbol_delimiter_regex = re.compile(r'[./\-_]')
def split_nasdaq(symbol):
sym = re.replace(_symbol_delimiter_regex, '', symbol)
return sym[:4], sym[4:]
def split_nyse(symbol):
return re.split(_symbol_delimiter_regex, symbol, maxsplit=1)
|
StarcoderdataPython
|
6425416
|
<reponame>glotaran/pyglotaran_extras
from __future__ import annotations
import subprocess
import sys
from pathlib import Path
from textwrap import dedent
from typing import TYPE_CHECKING
from tests.conftest import wrapped_get_script_dir
from pyglotaran_extras.io.setup_case_study import get_script_dir
from pyglotaran_extras.io.setup_case_study import setup_case_study
if TYPE_CHECKING:
from _pytest.monkeypatch import MonkeyPatch
def test_get_script_dir():
"""Called directly"""
expected = Path(__file__).parent
assert get_script_dir() == expected
def test_get_script_dir_in_closure():
"""Called inside other function imported from different file"""
expected = Path(__file__).parent
assert wrapped_get_script_dir() == expected
def test_get_script_dir_tmp_path(tmp_path: Path):
"""File in temp folder"""
tmp_file = tmp_path / "foo.py"
content = dedent(
"""
from pyglotaran_extras.io.setup_case_study import get_script_dir
print(get_script_dir())
"""
)
tmp_file.write_text(content)
printed_result = subprocess.run(
" ".join([sys.executable, tmp_file.resolve().as_posix()]), capture_output=True, shell=True
)
result = printed_result.stdout.decode().rstrip("\n\r")
assert printed_result.returncode == 0
assert Path(result) == tmp_path.resolve()
def test_setup_case_study(monkeypatch: MonkeyPatch, tmp_path: Path):
"""Default settings"""
mock_home = tmp_path / "home"
monkeypatch.setattr(Path, "home", lambda: mock_home)
results_folder, script_folder = setup_case_study()
assert mock_home.exists()
assert results_folder.exists()
assert results_folder == mock_home / "pyglotaran_results/io"
assert script_folder == Path(__file__).parent
def test_setup_case_study_custom(tmp_path: Path):
"""Custom settings"""
results_folder_root = tmp_path / "foo"
results_folder, script_folder = setup_case_study(
output_folder_name="foo", results_folder_root=tmp_path
)
assert results_folder_root.exists()
assert results_folder.exists()
assert results_folder == results_folder_root / "io"
assert script_folder == Path(__file__).parent
|
StarcoderdataPython
|
8158218
|
<filename>the_sward_to_offer/32.2.py
# -*- coding:utf-8 -*-
"""
从上到下按层打印二叉树,同一层结点从左至右输出。每一层输出一行。
input0: {8,6,10,5,7,9,11}
output0: [[8],[6,10],[5,7,9,11]]
"""
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# 返回二维列表[[1,2],[4,5]]
def Print(self, pRoot):
# write code here
if pRoot is None:
return []
print_list = []
cur_layer = [pRoot]
while len(cur_layer)>0:
cur_out = []
cur_layer_len = len(cur_layer)
for _ in range(cur_layer_len):
n = cur_layer.pop(0)
cur_out.append(n.val)
if n.left:
cur_layer.append(n.left)
if n.right:
cur_layer.append(n.right)
print_list.append(cur_out)
return print_list
|
StarcoderdataPython
|
6533030
|
<filename>package/spack-findutils/package.py
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by <NAME>, <EMAIL>, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Findutils(AutotoolsPackage):
"""The GNU Find Utilities are the basic directory searching
utilities of the GNU operating system."""
homepage = "https://www.gnu.org/software/findutils/"
url = "http://ftpmirror.gnu.org/findutils/findutils-4.6.0.tar.gz"
version('4.6.0', '9936aa8009438ce185bea2694a997fc1')
version('4.4.2', '351cc4adb07d54877fa15f75fb77d39f')
version('4.4.1', '5883f569dc021eee765f330bb7a3782d')
version('4.4.0', '49e769ac4382fae6f104f99d54d0a112')
version('4.2.33', 'b7e35aa175778c84942b1fee4144988b')
version('4.2.32', 'aaa6beeb41a6f04963dff58f24a55b96')
version('4.2.31', 'a0e31a0f18a49709bf5a449867c8049a')
version('4.2.30', 'c35ff6502e0b3514c99089cb5d333c25')
version('4.2.29', '24e76434ca74ba3c2c6ad621eb64e1ff')
version('4.2.28', 'f5fb3349354ee3d94fceb81dab5c71fd')
version('4.2.27', 'f1e0ddf09f28f8102ff3b90f3b5bc920')
version('4.2.26', '9ac4e62937b1fdc4eb643d1d4bf117d3')
version('4.2.25', 'e92fef6714ffa9972f28a1a423066921')
version('4.2.23', 'ecaff8b060e8d69c10eb2391a8032e26')
version('4.2.20', '7c8e12165b221dd67a19c00d780437a4')
version('4.2.18', '8aac2498435f3f1882678fb9ebda5c34')
version('4.2.15', 'a881b15aa7170aea045bf35cc92d48e7')
version('4.1.20', 'e90ce7222daadeb8616b8db461e17cbc')
version('4.1', '3ea8fe58ef5386da75f6c707713aa059')
|
StarcoderdataPython
|
8052366
|
from typing import List, Any
from talipp.indicators.Indicator import Indicator
from talipp.indicators.EMA import EMA
from talipp.indicators.AccuDist import AccuDist
from talipp.ohlcv import OHLCV
class ChaikinOsc(Indicator):
"""
Chaikin Oscillator
Output: a list of floats
"""
def __init__(self, period_fast: int, period_slow: int, input_values: List[OHLCV] = None):
super().__init__()
self.period_fast = period_fast
self.period_slow = period_slow
self.accu_dist = AccuDist()
self.add_sub_indicator(self.accu_dist)
self.ema_fast = EMA(period_fast)
self.add_managed_sequence(self.ema_fast)
self.ema_slow = EMA(period_slow)
self.add_managed_sequence(self.ema_slow)
self.initialize(input_values)
def _calculate_new_value(self) -> Any:
if not self.accu_dist.has_output_value():
return None
self.ema_fast.add_input_value(self.accu_dist[-1])
self.ema_slow.add_input_value(self.accu_dist[-1])
if not self.ema_fast.has_output_value() or not self.ema_slow.has_output_value():
return None
return self.ema_fast[-1] - self.ema_slow[-1]
|
StarcoderdataPython
|
4967794
|
<filename>docs/conf.py
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# See astropy.sphinx.conf for which values are set there.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory (if "python setup.py build_sphinx" is used).
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
import datetime
import os
import sys
# Load all of the global Astropy configuration
from sphinx_astropy.conf.v1 import *
# Add some stuff to the intersphinx mapping
intersphinx_mapping['sunpy'] = ('https://docs.sunpy.org/en/stable/', None)
intersphinx_mapping['yt'] = ('https://yt-project.org/doc/', None)
intersphinx_mapping['fiasco'] = ('https://fiasco.readthedocs.io/en/latest/', None)
intersphinx_mapping['dask'] = ('https://docs.dask.org/en/latest', None)
# Get configuration information from setup.cfg
from configparser import ConfigParser
conf = ConfigParser()
conf.read([os.path.join(os.path.dirname(__file__), '..', 'setup.cfg')])
setup_cfg = dict(conf.items('metadata'))
# -- General configuration ----------------------------------------------------
# By default, highlight as Python 3.
highlight_language = 'python3'
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.2'
# To perform a Sphinx version check that needs to be more specific than
# major.minor, call `check_sphinx_version("x.y.z")` here.
# check_sphinx_version("1.2.1")
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns.append('_templates')
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
rst_epilog += """
"""
# -- Project information ------------------------------------------------------
# This does not *have* to match the package name, but typically does
project = setup_cfg['name']
author = setup_cfg['author']
copyright = '{0}, {1}'.format(
datetime.datetime.now().year, setup_cfg['author'])
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
__import__(setup_cfg['name'])
package = sys.modules[setup_cfg['name']]
# The short X.Y version.
version = package.__version__.split('-', 1)[0]
# The full version, including alpha/beta/rc tags.
release = package.__version__
# -- Options for HTML output ---------------------------------------------------
try:
html_theme = "sphinx_rtd_theme"
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
html_theme = 'default'
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = ''
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = ''
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = ''
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{0} v{1}'.format(project, release)
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# -- Options for LaTeX output -------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', project + '.tex', project + u' Documentation',
author, 'manual')]
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', project.lower(), project + u' Documentation',
[author], 1)]
# Render inheritance diagrams in SVG
graphviz_output_format = "svg"
graphviz_dot_args = [
'-Nfontsize=10',
'-Nfontname=Helvetica Neue, Helvetica, Arial, sans-serif',
'-Efontsize=10',
'-Efontname=Helvetica Neue, Helvetica, Arial, sans-serif',
'-Gfontsize=10',
'-Gfontname=Helvetica Neue, Helvetica, Arial, sans-serif'
]
"""
Write the latest changelog into the documentation.
"""
target_file = os.path.abspath("./whatsnew/latest_changelog.txt")
try:
from sunpy.util.towncrier import generate_changelog_for_docs
if is_development:
generate_changelog_for_docs("../", target_file)
except Exception as e:
print(f"Failed to add changelog to docs with error {e}.")
# Make sure the file exists or else sphinx will complain.
open(target_file, 'a').close()
|
StarcoderdataPython
|
9612894
|
<gh_stars>0
"""Module containing the user entity"""
from django.db import models
from django.contrib.auth.models import (AbstractBaseUser, BaseUserManager)
from shared.base_model import BaseModel
class User(AbstractBaseUser, BaseModel):
"""Model for a user in the system"""
class Meta:
"""Class to add more information on user model"""
ordering = ('username', )
username = models.CharField(max_length=150, unique=True)
email = models.EmailField(max_length=150, unique=True)
password = models.CharField(max_length=100)
first_name = models.CharField(max_length=100, null=True, blank=True)
last_name = models.CharField(max_length=100, null=True, blank=True)
image_url = models.URLField(
default='https://res.cloudinary.com/health-id/image/upload/'
'v1554552278/Profile_Picture_Placeholder.png'
)
is_admin = models.BooleanField(default=False)
last_login = models.DateTimeField(auto_now=True)
USERNAME_FIELD = "email"
REQUIRED_FIELDS = []
def __str__(self):
return self.email
|
StarcoderdataPython
|
4873494
|
import numpy as np, random
MIN = "[MIN"
MAX = "[MAX"
MED = "[MED"
SUM_MOD = "[SM"
END = "]"
OPERATORS = [MIN, MAX, MED, SUM_MOD]
VALUES = range(10)
VALUE_P = 0.75
MAX_ARGS = 3
MAX_DEPTH = 4
def generate_tree(depth):
if depth < MAX_DEPTH:
r = random.random()
else:
r = 1
if r > VALUE_P:
value = random.choice(VALUES)
return value
else:
num_values = random.randint(2, MAX_ARGS)
values = []
for _ in range(num_values):
values.append(generate_tree(depth + 1))
op = random.choice(OPERATORS)
t = (op, values[0])
for value in values[1:]:
t = (t, value)
t = (t, END)
return t
def to_string(t, parens=False):
if isinstance(t, str):
return t
elif isinstance(t, int):
return str(t)
else:
if parens:
return '( ' + to_string(t[0], parens) + ' ' + to_string(t[1], parens) + ' )'
else:
return to_string(t[0], parens) + ' ' + to_string(t[1], parens)# + ' '
op2token = dict(zip(list(np.arange(10).astype(str)) + OPERATORS + [END], range(2, 15+2)))
def to_tokens(t):
string = to_string(t)
tokens = list(map(lambda x: op2token[x], string.split(' ')))
return tokens
def to_value(t):
if not isinstance(t, tuple):
return t
l = to_value(t[0])
r = to_value(t[1])
if l in OPERATORS: # Create an unsaturated function.
return (l, [r])
elif r == END: # l must be an unsaturated function.
if l[0] == MIN:
return min(l[1])
elif l[0] == MAX:
return max(l[1])
# elif l[0] == FIRST:
# return l[1][0]
# elif l[0] == LAST:
# return l[1][-1]
elif l[0] == MED:
return int(np.median(l[1]))
elif l[0] == SUM_MOD:
return (np.sum(l[1]) % 10)
elif isinstance(l, tuple): # We've hit an unsaturated function and an argument.
return (l[0], l[1] + [r])
|
StarcoderdataPython
|
6318
|
<filename>fastmvsnet/train1.py
#!/usr/bin/env python
import argparse
import os.path as osp
import logging
import time
import sys
sys.path.insert(0, osp.dirname(__file__) + '/..')
import torch
import torch.nn as nn
from fastmvsnet.config import load_cfg_from_file
from fastmvsnet.utils.io import mkdir
from fastmvsnet.utils.logger import setup_logger
from fastmvsnet.utils.torch_utils import set_random_seed
from fastmvsnet.model1 import build_pointmvsnet as build_model
from fastmvsnet.solver import build_optimizer, build_scheduler
from fastmvsnet.utils.checkpoint import Checkpointer
from fastmvsnet.dataset1 import build_data_loader
from fastmvsnet.utils.tensorboard_logger import TensorboardLogger
from fastmvsnet.utils.metric_logger import MetricLogger
from fastmvsnet.utils.file_logger import file_logger
def parse_args():
parser = argparse.ArgumentParser(description="PyTorch Fast-MVSNet Training")
parser.add_argument(
"--cfg",
dest="config_file",
default="",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
return args
def train_model(model,
loss_fn,
metric_fn,
image_scales,
inter_scales,
isFlow,
data_loader,
optimizer,
curr_epoch,
tensorboard_logger,
log_period=1,
output_dir="",
):
logger = logging.getLogger("fastmvsnet.train")
meters = MetricLogger(delimiter=" ")
model.train()
end = time.time()
total_iteration = data_loader.__len__()
path_list = []
for iteration, data_batch in enumerate(data_loader):
data_time = time.time() - end
curr_ref_img_path = data_batch["ref_img_path"]
path_list.extend(curr_ref_img_path)
data_batch = {k: v.cuda(non_blocking=True) for k, v in data_batch.items() if isinstance(v, torch.Tensor)}
preds = model(data_batch, image_scales, inter_scales, isFlow)
optimizer.zero_grad()
loss_dict = loss_fn(preds, data_batch, isFlow)
metric_dict = metric_fn(preds, data_batch, isFlow)
losses = sum(loss_dict.values())
#print("LOSS DICT", loss_dict['coarse_loss'])
#print("LOSSES", loss_dict.values())
meters.update(loss=losses, **loss_dict, **metric_dict)
losses.backward()
# print(poop)
optimizer.step()
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time, data=data_time)
if iteration % log_period == 0:
logger.info(
meters.delimiter.join(
[
"EPOCH: {epoch:2d}",
"iter: {iter:4d}",
"{meters}",
"lr: {lr:.2e}",
"max mem: {memory:.0f}",
]
).format(
epoch=curr_epoch,
iter=iteration,
meters=str(meters),
lr=optimizer.param_groups[0]["lr"],
memory=torch.cuda.max_memory_allocated() / (1024.0 ** 2),
)
)
tensorboard_logger.add_scalars(loss_dict, curr_epoch * total_iteration + iteration, prefix="train")
tensorboard_logger.add_scalars(metric_dict, curr_epoch * total_iteration + iteration, prefix="train")
if iteration % (100 * log_period) == 0:
file_logger(data_batch, preds, curr_epoch * total_iteration + iteration, output_dir, prefix="train")
return meters
def validate_model(model,
loss_fn,
metric_fn,
image_scales,
inter_scales,
isFlow,
data_loader,
curr_epoch,
tensorboard_logger,
log_period=1,
output_dir="",
):
logger = logging.getLogger("fastmvsnet.validate")
meters = MetricLogger(delimiter=" ")
model.train()
end = time.time()
total_iteration = data_loader.__len__()
with torch.no_grad():
for iteration, data_batch in enumerate(data_loader):
data_time = time.time() - end
curr_ref_img_path = data_batch["ref_img_path"]
data_batch = {k: v.cuda(non_blocking=True) for k, v in data_batch.items() if isinstance(v, torch.Tensor)}
preds = model(data_batch, image_scales, inter_scales, isFlow)
loss_dict = loss_fn(preds, data_batch, isFlow)
metric_dict = metric_fn(preds, data_batch, isFlow)
losses = sum(loss_dict.values())
meters.update(loss=losses, **loss_dict, **metric_dict)
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time, data=data_time)
if iteration % log_period == 0:
logger.info(
meters.delimiter.join(
[
"EPOCH: {epoch:2d}",
"iter: {iter:4d}",
"{meters}",
]
).format(
epoch=curr_epoch,
iter=iteration,
meters=str(meters),
)
)
tensorboard_logger.add_scalars(meters.meters, curr_epoch * total_iteration + iteration, prefix="valid")
if iteration % (100 * log_period) == 0:
file_logger(data_batch, preds, curr_epoch * total_iteration + iteration, output_dir, prefix="valid")
return meters
def train(cfg, output_dir=""):
logger = logging.getLogger("fastmvsnet.trainer")
# build model
set_random_seed(cfg.RNG_SEED)
model, loss_fn, metric_fn = build_model(cfg)
logger.info("Build model:\n{}".format(str(model)))
model = nn.DataParallel(model).cuda()
# build optimizer
optimizer = build_optimizer(cfg, model)
# build lr scheduler
scheduler = build_scheduler(cfg, optimizer)
# build checkpointer
checkpointer = Checkpointer(model,
optimizer=optimizer,
scheduler=scheduler,
save_dir=output_dir,
logger=logger)
checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT, resume=cfg.AUTO_RESUME)
ckpt_period = cfg.TRAIN.CHECKPOINT_PERIOD
# build data loader
train_data_loader = build_data_loader(cfg, mode="train")
val_period = cfg.TRAIN.VAL_PERIOD
val_data_loader = build_data_loader(cfg, mode="val") if val_period > 0 else None
# build tensorboard logger (optionally by comment)
tensorboard_logger = TensorboardLogger(output_dir)
# train
max_epoch = cfg.SCHEDULER.MAX_EPOCH
start_epoch = checkpoint_data.get("epoch", 0)
best_metric_name = "best_{}".format(cfg.TRAIN.VAL_METRIC)
best_metric = checkpoint_data.get(best_metric_name, None)
logger.info("Start training from epoch {}".format(start_epoch))
for epoch in range(start_epoch, max_epoch):
cur_epoch = epoch + 1
scheduler.step()
start_time = time.time()
train_meters = train_model(model,
loss_fn,
metric_fn,
image_scales=cfg.MODEL.TRAIN.IMG_SCALES,
inter_scales=cfg.MODEL.TRAIN.INTER_SCALES,
isFlow=(cur_epoch > cfg.SCHEDULER.INIT_EPOCH),
data_loader=train_data_loader,
optimizer=optimizer,
curr_epoch=epoch,
tensorboard_logger=tensorboard_logger,
log_period=cfg.TRAIN.LOG_PERIOD,
output_dir=output_dir,
)
epoch_time = time.time() - start_time
logger.info("Epoch[{}]-Train {} total_time: {:.2f}s".format(
cur_epoch, train_meters.summary_str, epoch_time))
# checkpoint
if cur_epoch % ckpt_period == 0 or cur_epoch == max_epoch:
checkpoint_data["epoch"] = cur_epoch
checkpoint_data[best_metric_name] = best_metric
checkpointer.save("model_{:03d}".format(cur_epoch), **checkpoint_data)
# validate
if val_period < 1:
continue
if cur_epoch % val_period == 0 or cur_epoch == max_epoch:
val_meters = validate_model(model,
loss_fn,
metric_fn,
image_scales=cfg.MODEL.VAL.IMG_SCALES,
inter_scales=cfg.MODEL.VAL.INTER_SCALES,
isFlow=(cur_epoch > cfg.SCHEDULER.INIT_EPOCH),
data_loader=val_data_loader,
curr_epoch=epoch,
tensorboard_logger=tensorboard_logger,
log_period=cfg.TEST.LOG_PERIOD,
output_dir=output_dir,
)
logger.info("Epoch[{}]-Val {}".format(cur_epoch, val_meters.summary_str))
# best validation
cur_metric = val_meters.meters[cfg.TRAIN.VAL_METRIC].global_avg
if best_metric is None or cur_metric > best_metric:
best_metric = cur_metric
checkpoint_data["epoch"] = cur_epoch
checkpoint_data[best_metric_name] = best_metric
checkpointer.save("model_best", **checkpoint_data)
logger.info("Best val-{} = {}".format(cfg.TRAIN.VAL_METRIC, best_metric))
return model
def main():
args = parse_args()
num_gpus = torch.cuda.device_count()
cfg = load_cfg_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
output_dir = cfg.OUTPUT_DIR
if output_dir:
config_path = osp.splitext(args.config_file)[0]
config_path = config_path.replace("configs", "outputs1")
output_dir = output_dir.replace('@', config_path)
mkdir(output_dir)
logger = setup_logger("fastmvsnet", output_dir, prefix="train")
logger.info("Using {} GPUs".format(num_gpus))
logger.info(args)
logger.info("Loaded configuration file {}".format(args.config_file))
logger.info("Running with config:\n{}".format(cfg))
train(cfg, output_dir)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
9655500
|
# -*- coding: utf-8 -*-
"""
convnet-est-loss
"""
import numpy as np
from os import sys, path
import argparse
caffe_dir = '/home/aaskov/caffe/'
def damage_range(x):
if float(x) < 0.0:
raise argparse.ArgumentTypeError("%r is not positive" % x)
return float(x)
def run():
parser = argparse.ArgumentParser(
description='Neural network damage experiment. ',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--proto', required=True, type=str,
help='Network prototxt.')
parser.add_argument('--model', required=True, type=str,
help='Network caffemodel.')
parser.add_argument('--meanfile', required=True, type=str,
help='Data mean file.')
parser.add_argument('--data', required=True, type=str,
help='Data.')
parser.add_argument('--layer', required=True, type=str,
help='Layer to apply damage to.')
parser.add_argument('--prefix', required=True, type=str,
help='Uniqe model name.')
parser.add_argument('--damage', default=1.0, type=damage_range,
help='Applied damage range.')
parser.add_argument('--step-num', default=10, type=int,
help='Number of steps in damage interval.')
parser.add_argument('--iterations', default=100, type=int,
help='Number of iterations to run.')
parser.add_argument('--repeat', default=0, type=int,
help='Number of repeated experiments to run.')
args = parser.parse_args()
# Object file
obj = 'experiment_damage_' + str(args.layer) + '_' + str(args.damage)
obj += '_step_' + str(args.step_num) + '_iter_' + str(args.iterations)
obj += '_prefix_' + str(args.prefix)
loss_list = list()
for std in np.linspace(0.0, args.damage, args.step_num):
_loss = list()
for t in range(args.repeat + 1):
# Fetch a network structure and apply damage
net = Network(args.proto, args.model, args.meanfile, args.data)
net.__add_damage__(args.layer, std)
# Forward to get loss
top_1, top_5, acc, loss = net.forward(maxrun=args.iterations)
_loss.append(loss)
loss_list.append(_loss)
# Store result
if path.exists(obj+'.pkl'):
read_loss = load_obj(obj)
combined = np.concatenate((read_loss, np.array(loss_list)), 1)
save_obj(combined, obj)
else:
save_obj(np.array(loss_list), obj)
if __name__ == '__main__' and __package__ is None:
# Append parrent directory to sys path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from network import Network
from input_output import save_obj, load_obj
# Setup Caffe
sys.path.insert(0, caffe_dir + 'python')
import caffe
caffe.set_mode_cpu()
# Run experiment
run()
|
StarcoderdataPython
|
4891951
|
<reponame>lresende/text-extensions-for-pandas
#
# Copyright (c) 2020 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
# conll.py
#
# I/O functions related to CONLL entity format and its many derivatives.
from typing import *
import numpy as np
import pandas as pd
import regex
from text_extensions_for_pandas.array import (
TokenSpan,
CharSpanArray,
TokenSpanArray,
)
# Special token that CoNLL-2003 format uses to delineate the documents in
# the collection.
_CONLL_DOC_SEPARATOR = "-DOCSTART-"
# _PUNCT_REGEX = regex.compile(f"[{string.punctuation}]+")
_PUNCT_OR_RIGHT_PAREN_REGEX = regex.compile(
# Punctuation, right paren, or apostrophe followed by 1-2 lowercase letters
# But not single or double quote, which could either begin or end a quotation
'[!#%)*+,-./:;=>?@\\]^_`|}~]|\'[a-zA-Z]{1,2}')
# Tokens that behave like left parentheses for whitespace purposes,
# including dollar signs ("$100", not "$ 100")
_LEFT_PAREN_REGEX = regex.compile(r"[(<\[{$]+")
# _PUNCT_MATCH_FN = np.vectorize(lambda s: _PUNCT_REGEX.fullmatch(s) is not None)
_SPACE_BEFORE_MATCH_FN = np.vectorize(lambda s:
_PUNCT_OR_RIGHT_PAREN_REGEX.fullmatch(s)
is not None)
_SPACE_AFTER_MATCH_FN = np.vectorize(lambda s:
_LEFT_PAREN_REGEX.fullmatch(s)
is not None)
def _make_empty_meta_values(column_names: List[str], iob_columns: List[bool]) \
-> Dict[str, List[str]]:
ret = {}
for i in range(len(column_names)):
name = column_names[i]
if not iob_columns[i]:
ret[name] = []
else:
ret[f"{name}_iob"] = []
ret[f"{name}_type"] = []
return ret
class _SentenceData:
"""
Data structure that encapsulates one sentence's worth of data
from a parsed CoNLL-2003 file.
Not intended for use outside this file.
"""
def __init__(self, column_names: List[str], iob_columns: List[bool]):
self._column_names = column_names
self._iob_columns = iob_columns
# Surface form of token
self._tokens = [] # Type: List[str]
# Metadata columns by name
self._token_metadata = _make_empty_meta_values(column_names, iob_columns)
@property
def num_tokens(self) -> int:
return len(self._tokens)
@property
def tokens(self) -> List[str]:
return self._tokens
@property
def token_metadata(self) -> Dict[str, List[str]]:
return self._token_metadata
def add_line(self, line_num: int, line_elems: List[str]):
"""
:param line_num: Location in file, for error reporting
:param line_elems: Fields of a line, pre-split
"""
if len(line_elems) != 1 + len(self._column_names) :
raise ValueError(f"Unexpected number of elements {len(line_elems)} "
f"at line {line_num}; expected "
f"{1 + len(self._column_names)} elements.")
token = line_elems[0]
raw_tags = line_elems[1:]
self._tokens.append(token)
for i in range(len(raw_tags)):
raw_tag = raw_tags[i]
name = self._column_names[i]
if not self._iob_columns[i]:
# non-IOB data
self._token_metadata[name].append(raw_tag)
else:
# IOB-format data; split into two values
if raw_tag.startswith("I-") or raw_tag.startswith("B-"):
# Tokens that are entities are tagged with tags like
# "I-PER" or "B-MISC".
tag, entity = raw_tag.split("-")
elif raw_tag == "O":
tag = raw_tag
entity = None
elif raw_tag == "-X-":
# Special metadata value for -DOCSTART- tags in the CoNLL corpus.
tag = "O"
entity = None
else:
raise ValueError(f"Tag '{raw_tag}' of IOB-format field {i} at line "
f"{line_num} does not start with 'I-', 'O', "
f"or 'B-'.\n"
f"Fields of line are: {line_elems}")
self._token_metadata[f"{name}_iob"].append(tag)
self._token_metadata[f"{name}_type"].append(entity)
def _parse_conll_file(input_file: str,
column_names: List[str],
iob_columns: List[bool]) \
-> List[List[_SentenceData]]:
"""
Parse the CoNLL-2003 file format for training/test data to Python
objects.
The format is especially tricky, so everything here is straight
non-vectorized Python code. If you want performance, write the
contents of your CoNLL files back out into a file format that
supports performance.
:param input_file: Location of the file to read
:param column_names: Names for the metadata columns that come after the
token text. These names will be used to generate the names of the dataframe
that this function returns.
:param iob_columns: Mask indicating which of the metadata columns after the
token text should be treated as being in IOB format. If a column is in IOB format,
the returned data structure will contain *two* columns, holding IOB tags and
entity type tags, respectively. For example, an input column "ent" will turn into
output columns "ent_iob" and "ent_type".
:returns: A list of lists of _SentenceData objects. The top list has one entry per
document. The next level lists have one entry per sentence.
"""
with open(input_file, "r") as f:
lines = f.readlines()
# Build up a list of document metadata as Python objects
docs = [] # Type: List[List[Dict[str, List[str]]]]
current_sentence = _SentenceData(column_names, iob_columns)
# Information about the current document
sentences = [] # Type: SentenceData
for i in range(len(lines)):
line = lines[i].strip()
if 0 == len(line):
# Blank line is the sentence separator
if current_sentence.num_tokens > 0:
sentences.append(current_sentence)
current_sentence = _SentenceData(column_names, iob_columns)
else:
# Not at the end of a sentence
line_elems = line.split(" ")
current_sentence.add_line(i, line_elems)
if line_elems[0] == _CONLL_DOC_SEPARATOR and i > 0:
# End of document. Wrap up this document and start a new one.
#
# Note that the special "start of document" token is considered part
# of the document. If you do not follow this convention, the
# result sets from CoNLL 2003 won't line up.
# Note also that `current_sentence` is not in `sentences` and will be
# added to the next document.
docs.append(sentences)
sentences = []
# Close out the last sentence and document, if needed
if current_sentence.num_tokens > 0:
sentences.append(current_sentence)
if len(sentences) > 0:
docs.append(sentences)
return docs
def _parse_conll_output_file(doc_dfs: List[pd.DataFrame],
input_file: str
) -> List[Dict[str, List[str]]]:
"""
Parse the CoNLL-2003 file format for output data to Python
objects. This format is similar to the format that `_parse_conll_file`
produces, but without the token and document boundary information.
:param doc_dfs: List of `pd.DataFrame`s of token information from the
corresponding training data file, one `DataFrame` per document.
Used for determining document boundaries, which are not encoded in
CoNLL-2003 output file format.
:param input_file: Location of the file to read
:returns: A list of dicts. The top list has one entry per
document. The next level contains lists under the following keys:
* `iob`: List of IOB2 tags as strings. This function does **NOT**
correct for the silly way that CoNLL-format uses "B" tags. See
`_fix_iob_tags()` for that correction.
* `entity`: List of entity tags where `iob` contains I's or B's.
`None` everywhere else.
"""
with open(input_file, "r") as f:
lines = f.readlines()
# Build up a list of document metadata as Python objects
docs = [] # Type: List[Dict[str, List[str]]]
# Position in the corpus
doc_num = 0
num_tokens_in_doc = len(doc_dfs[doc_num].index)
token_num = 0
# Information about the current document's tokens
iobs = [] # Type: List[str]
entities = [] # Type: List[str]
for i in range(len(lines)):
line = lines[i].strip()
if 0 == len(line):
# Blank line is the sentence separator.
continue
if " " in line:
raise ValueError(f"Line {i} contains unexpected space character.\n"
f"Line was: '{line}'")
raw_tag = line
if raw_tag.startswith("I") or raw_tag.startswith("B"):
# Tokens that are entities are tagged with tags like
# "I-PER" or "B-MISC".
tag, entity = raw_tag.split("-")
elif raw_tag == "O":
tag = raw_tag
entity = None
else:
raise ValueError(f"Unexpected tag {raw_tag} at line {i}.\n"
f"Line was: '{line}'")
iobs.append(tag)
entities.append(entity)
token_num += 1
if token_num == num_tokens_in_doc:
# End of current document, advance to next
docs.append({
"iob": iobs,
"entity": entities
})
iobs = []
entities = []
doc_num += 1
token_num = 0
if doc_num < len(doc_dfs):
num_tokens_in_doc = len(doc_dfs[doc_num].index)
if doc_num < len(doc_dfs):
print(f"WARNING: Corpus has {len(doc_dfs)} documents, but "
f"only found outputs for {doc_num} of them.")
# raise ValueError(f"Corpus has {len(doc_dfs)} documents, but "
# f"only found outputs for {doc_num} of them.")
return docs
def _iob_to_iob2(df: pd.DataFrame, column_names: List[str],
iob_columns: List[bool]) -> pd.DataFrame:
"""
In CoNLL-2003 format, entities are stored in IOB format, where the first
token of an entity is only tagged "B" when there are two entities of the
same type back-to-back. This format makes downstream processing difficult.
If a given position has an `I` tag, that position may or may not be the
first token of an entity. Code will need to inspect both the I/O/B tags
*and* the entity type of multiple other tokens *and* the boundaries between
sentences to disambiguate between those two cases.
This function converts these IOB tags to the easier-to-consume IOB2 format;
see
https://en.wikipedia.org/wiki/Inside%E2%80%93outside%E2%80%93beginning_(tagging)
for details. Basically, every entity in IOB2 format begins with a `B` tag.
The `I` tag is only used for the second, third, etc. tokens of an entity.
:param df: A `pd.DataFrame` with one row per token of the document.
In addition to the metadata columns corresponding to `column_names`, this
dataframe must also contain sentence information in a column called `sentence`.
:param column_names: Names for the metadata columns in the original data file
that were used to generate the names of the columns of `df`.
:param iob_columns: Mask indicating which of the metadata columns after the
token text should be treated as being in IOB format.
:returns: A version of `df` with corrected IOB2 tags in the `ent_iob`
column. The original dataframe is not modified.
"""
ret = df.copy()
sentence_begins = df["sentence"].values.begin_token
for i in range(len(column_names)):
if iob_columns[i]:
name = column_names[i]
iobs = df[f"{name}_iob"].values.copy() # Modified in place
entities = df[f"{name}_type"].values
# Special-case the first one
if iobs[0] == "I":
iobs[0] = "B"
for i in range(1, len(iobs)):
tag = iobs[i]
prev_tag = iobs[i - 1]
if tag == "I":
if (
prev_tag == "O" # Previous token not an entity
or (prev_tag in ("I", "B")
and entities[i] != entities[i - 1]
) # Previous token a different type of entity
or (sentence_begins[i] != sentence_begins[i - 1]
) # Start of new sentence
):
iobs[i] = "B"
ret[f"{name}_iob"] = iobs
return ret
def _doc_to_df(doc: List[_SentenceData],
column_names: List[str],
iob_columns: List[bool],
space_before_punct: bool) -> pd.DataFrame:
"""
Convert the "Python objects" representation of a document from a
CoNLL-2003 file into a `pd.DataFrame` of token metadata.
:param doc: List of Python objects that represents the document.
:param column_names: Names for the metadata columns that come after the
token text. These names will be used to generate the names of the dataframe
that this function returns.
:param iob_columns: Mask indicating which of the metadata columns after the
token text should be treated as being in IOB format. If a column is in IOB format,
the returned dataframe will contain *two* columns, holding IOB2 tags and
entity type tags, respectively. For example, an input column "ent" will turn into
output columns "ent_iob" and "ent_type".
:param space_before_punct: If `True`, add whitespace before
punctuation characters (and after left parentheses)
when reconstructing the text of the document.
:return: DataFrame with four columns:
* `char_span`: Span of each token, with character offsets.
Backed by the concatenation of the tokens in the document into
a single string with one sentence per line.
* `token_span`: Span of each token, with token offsets.
Backed by the contents of the `char_span` column.
* `ent_iob`: IOB2-format tags of tokens, exactly as they appeared
in the original file, with no corrections applied.
* `ent_type`: Entity type names for tokens tagged "I" or "B" in
the `ent_iob` column; `None` everywhere else.
"""
# Character offsets of tokens in the reconstructed document
begins_list = [] # Type: List[np.ndarray]
ends_list = [] # Type: List[np.ndarray]
# Reconstructed text of each sentence
sentences_list = [] # Type: List[np.ndarray]
# Token offsets of sentences containing each token in the document.
sentence_begins_list = [] # Type: List[np.ndarray]
sentence_ends_list = [] # Type: List[np.ndarray]
# Token metadata column values. Key is column name, value is metadata for
# each token.
meta_lists = _make_empty_meta_values(column_names, iob_columns)
char_position = 0
token_position = 0
for sentence_num in range(len(doc)):
sentence = doc[sentence_num]
tokens = sentence.tokens
# Don't put spaces before punctuation in the reconstituted string.
no_space_before_mask = (
np.zeros(len(tokens), dtype=np.bool) if space_before_punct
else _SPACE_BEFORE_MATCH_FN(tokens))
no_space_after_mask = (
np.zeros(len(tokens), dtype=np.bool) if space_before_punct
else _SPACE_AFTER_MATCH_FN(tokens))
no_space_before_mask[0] = True # No space before first token
no_space_after_mask[-1] = True # No space after last token
shifted_no_space_after_mask = np.roll(no_space_after_mask, 1)
prefixes = np.where(
np.logical_or(no_space_before_mask,
shifted_no_space_after_mask),
"", " ")
string_parts = np.ravel((prefixes, tokens), order="F")
sentence_text = "".join(string_parts)
sentences_list.append(sentence_text)
lengths = np.array([len(t) for t in tokens])
prefix_lengths = np.array([len(p) for p in prefixes])
# Begin and end offsets, accounting for which tokens have spaces
# before them.
e = np.cumsum(lengths + prefix_lengths)
b = e - lengths
begins_list.append(b + char_position)
ends_list.append(e + char_position)
sentence_begin_token = token_position
sentence_end_token = token_position + len(e)
sentence_begins = np.repeat(sentence_begin_token, len(e))
sentence_ends = np.repeat(sentence_end_token, len(e))
sentence_begins_list.append(sentence_begins)
sentence_ends_list.append(sentence_ends)
for k in sentence.token_metadata.keys():
meta_lists[k].extend(sentence.token_metadata[k])
char_position += e[-1] + 1 # "+ 1" to account for newline
token_position += len(e)
begins = np.concatenate(begins_list)
ends = np.concatenate(ends_list)
doc_text = "\n".join(sentences_list)
char_spans = CharSpanArray(doc_text, begins, ends)
token_begins = np.arange(len(begins))
token_spans = TokenSpanArray(char_spans, token_begins, token_begins + 1)
sentence_spans = TokenSpanArray(char_spans,
np.concatenate(sentence_begins_list),
np.concatenate(sentence_ends_list))
ret = pd.DataFrame(
{"char_span": char_spans,
"token_span": token_spans
})
for k, v in meta_lists.items():
ret[k] = v
ret["sentence"] = sentence_spans
return ret
def _output_doc_to_df(tokens: pd.DataFrame,
outputs: Dict[str, List[str]],
column_name: str,
copy_tokens: bool) -> pd.DataFrame:
"""
Convert the "Python objects" representation of a document from a
CoNLL-2003 file into a `pd.DataFrame` of token metadata.
:param tokens: `pd.DataFrame` containing metadata about the tokens
of this document, as returned by `conll_2003_to_dataframe`
:param outputs: Dictionary containing outputs for this document,
with fields "iob" and "entity".
:param column_name: Name for the metadata value that the IOB-tagged data
in `input_file` encodes. If this name is present in `doc_dfs`, its value
will be replaced with the data from `input_file`; otherwise a new column
will be added to each dataframe.
:param copy_tokens: `True` if token information should be deep-copied.
:return: DataFrame with four columns:
* `char_span`: Span of each token, with character offsets.
Backed by the concatenation of the tokens in the document into
a single string with one sentence per line.
* `token_span`: Span of each token, with token offsets.
Backed by the contents of the `char_span` column.
* `ent_iob`: IOB2-format tags of tokens, corrected so that every
entity begins with a "B" tag.
* `ent_type`: Entity type names for tokens tagged "I" or "B" in
the `ent_iob` column; `None` everywhere else.
"""
if copy_tokens:
return pd.DataFrame(
{"char_span": tokens["char_span"].copy(),
"token_span": tokens["token_span"].copy(),
f"{column_name}_iob": np.array(outputs["iob"]),
f"{column_name}_type": np.array(outputs["entity"]),
"sentence": tokens["sentence"].copy()})
else:
return pd.DataFrame(
{"char_span": tokens["char_span"],
"token_span": tokens["token_span"],
f"{column_name}_iob": np.array(outputs["iob"]),
f"{column_name}_type": np.array(outputs["entity"]),
"sentence": tokens["sentence"]})
#####################################################
# External API functions below this line
def iob_to_spans(
token_features: pd.DataFrame,
iob_col_name: str = "ent_iob",
char_span_col_name: str = "char_span",
entity_type_col_name: str = "ent_type",
):
"""
Convert token tags in Inside–Outside–Beginning (IOB2) format to a series of
`TokenSpan`s of entities. See https://en.wikipedia.org/wiki/Inside%E2%80%93outside%E2%80%93beginning_(tagging)
for more information on IOB2 format.
:param token_features: DataFrame of token features in the format returned by
`make_tokens_and_features`.
:param iob_col_name: Name of a column in `token_features` that contains the
IOB2 tags as strings, "I", "O", or "B".
:param char_span_col_name: Name of a column in `token_features` that
contains the tokens as a `CharSpanArray`.
:param entity_type_col_name: Optional name of a column in `token_features`
that contains entity type information; or `None` if no such column exists.
:return: A `pd.DataFrame` with the following columns:
* `token_span`: Span (with token offsets) of each entity
* `<value of entity_type_col_name>`: (optional) Entity type
"""
# Start out with 1-token prefixes of all entities.
begin_mask = token_features[iob_col_name] == "B"
first_tokens = token_features[begin_mask].index
if entity_type_col_name is None:
entity_types = np.zeros(len(first_tokens))
else:
entity_types = token_features[begin_mask][entity_type_col_name]
# Add an extra "O" tag to the end of the IOB column to simplify the logic
# for handling the case where the document ends with an entity.
iob_series = (
token_features[iob_col_name].append(pd.Series(["O"])).reset_index(drop=True)
)
entity_prefixes = pd.DataFrame(
{
"ent_type": entity_types,
"begin": first_tokens, # Inclusive
"end": first_tokens + 1, # Exclusive
"next_tag": iob_series.iloc[first_tokens + 1].values,
}
)
df_list = [] # Type: pd.DataFrame
if len(entity_prefixes.index) == 0:
# Code below needs at least one element in the list for schema
df_list = [entity_prefixes]
# Iteratively expand the prefixes
while len(entity_prefixes.index) > 0:
complete_mask = entity_prefixes["next_tag"].isin(["O", "B"])
complete_entities = entity_prefixes[complete_mask]
incomplete_entities = entity_prefixes[~complete_mask].copy()
incomplete_entities["end"] = incomplete_entities["end"] + 1
incomplete_entities["next_tag"] = iob_series.iloc[
incomplete_entities["end"]
].values
df_list.append(complete_entities)
entity_prefixes = incomplete_entities
all_entities = pd.concat(df_list)
# Sort spans by location, not length.
all_entities.sort_values("begin", inplace=True)
# Convert [begin, end) pairs to spans
entity_spans_array = TokenSpanArray(
token_features[char_span_col_name].values,
all_entities["begin"].values,
all_entities["end"].values,
)
if entity_type_col_name is None:
return pd.DataFrame({"token_span": entity_spans_array})
else:
return pd.DataFrame(
{
"token_span": entity_spans_array,
entity_type_col_name: all_entities["ent_type"].values,
}
)
def spans_to_iob(
token_spans: Union[TokenSpanArray, List[TokenSpan], pd.Series],
span_ent_types: Union[str, Iterable, np.ndarray, pd.Series] = None
) -> pd.DataFrame:
"""
Convert a series of `TokenSpan`s of entities to token tags in
Inside–Outside–Beginning (IOB2) format. See
https://en.wikipedia.org/wiki/Inside%E2%80%93outside%E2%80%93beginning_(tagging)
for more information on IOB2 format.
:param token_spans: An object that can be converted to a `TokenSpanArray` via
`TokenSpanArray.make_array()`. Should contain `TokenSpan`s aligned with the
target tokenization.
Usually you create this array by calling `TokenSpanArray.align_to_tokens()`.
:param span_ent_types: List of entity type strings corresponding to each of the
elements of `token_spans`, or `None` to indicate null entity tags.
:return: A `pd.DataFrame` with two columns:
* "ent_iob": IOB2 tags as strings "ent_iob"
* "ent_type": Entity type strings (or NaN values if `ent_types` is `None`)
"""
# Normalize inputs
token_spans = TokenSpanArray.make_array(token_spans)
if span_ent_types is None:
span_ent_types = [None] * len(token_spans)
elif isinstance(span_ent_types, str):
span_ent_types = [span_ent_types] * len(token_spans)
elif isinstance(span_ent_types, pd.Series):
span_ent_types = span_ent_types.values
# Define the IOB categorical type with "O" == 0, "B"==1, "I"==2
iob2_dtype = pd.CategoricalDtype(["O", "B", "I"], ordered=False)
# Handle an empty token span array
if len(token_spans) == 0:
return pd.DataFrame({
"ent_iob": pd.Series(dtype=iob2_dtype),
"ent_type": pd.Series(dtype="string")
})
# Initialize an IOB series with all 'O' entities
iob_data = np.zeros_like(token_spans.tokens.begin, dtype=np.int64)
iob_tags = pd.Categorical.from_codes(codes=iob_data, dtype=iob2_dtype)
# Assign the begin tags
iob_tags[token_spans.begin_token] = "B"
# Fill in the remaining inside tags
i_lengths = token_spans.end_token - (token_spans.begin_token + 1)
i_mask = i_lengths > 0
i_begins = token_spans.begin_token[i_mask] + 1
i_ends = token_spans.end_token[i_mask]
for begin, end in zip(i_begins, i_ends):
iob_tags[begin:end] = "I"
# Use a similar process to generate entity type tags
ent_types = np.full(len(token_spans.tokens), None, dtype=object)
for ent_type, begin, end in zip(span_ent_types,
token_spans.begin_token,
token_spans.end_token):
ent_types[begin:end] = ent_type
return pd.DataFrame({
"ent_iob": iob_tags,
"ent_type": pd.Series(ent_types, dtype="string")
})
def conll_2003_to_dataframes(input_file: str,
column_names: List[str],
iob_columns: List[bool],
space_before_punct: bool = False)\
-> List[pd.DataFrame]:
"""
Parse a file in CoNLL-2003 training/test format into a DataFrame.
CoNLL-2003 training/test format looks like this:
```
-DOCSTART- -X- -X- O
CRICKET NNP I-NP O
- : O O
LEICESTERSHIRE NNP I-NP I-ORG
TAKE NNP I-NP O
OVER IN I-PP O
AT NNP I-NP O
```
Note the presence of the surface forms of tokens at the beginning
of the lines.
:param input_file: Location of input file to read.
:param space_before_punct: If `True`, add whitespace before
punctuation characters when reconstructing the text of the document.
:param column_names: Names for the metadata columns that come after the
token text. These names will be used to generate the names of the dataframe
that this function returns.
:param iob_columns: Mask indicating which of the metadata columns after the
token text should be treated as being in IOB format. If a column is in IOB format,
the returned dataframe will contain *two* columns, holding **IOB2** tags and
entity type tags, respectively. For example, an input column "ent" will turn into
output columns "ent_iob" and "ent_type".
:returns: A list containing, for each document in the input file,
a separate `pd.DataFrame` of four columns:
* `char_span`: Span of each token, with character offsets.
Backed by the concatenation of the tokens in the document into
a single string with one sentence per line.
* `token_span`: Span of each token, with token offsets.
Backed by the contents of the `char_span` column.
* `ent_iob`: IOB2-format tags of tokens, corrected so that every
entity begins with a "B" tag.
* `ent_type`: Entity type names for tokens tagged "I" or "B" in
the `ent_iob` column; `None` everywhere else.
"""
parsed_docs = _parse_conll_file(input_file, column_names, iob_columns)
doc_dfs = [_doc_to_df(d, column_names, iob_columns, space_before_punct)
for d in parsed_docs]
return [_iob_to_iob2(d, column_names, iob_columns)
for d in doc_dfs]
def conll_2003_output_to_dataframes(doc_dfs: List[pd.DataFrame],
input_file: str,
column_name: str = "ent",
copy_tokens: bool = False) -> List[pd.DataFrame]:
"""
Parse a file in CoNLL-2003 output format into a DataFrame.
CoNLL-2003 output format looks like this:
```
O
O
I-LOC
O
O
I-PER
I-PER
```
Note the lack of any information about the tokens themselves. Note
also the lack of any information about document boundaries.
:param doc_dfs: List of `pd.DataFrame`s of token information, as
returned by `conll_2003_to_dataframes`. This is needed because
CoNLL-2003 output format does not include any information about
document boundaries.
:param input_file: Location of input file to read.
:param column_name: Name for the metadata value that the IOB-tagged data
in `input_file` encodes. If this name is present in `doc_dfs`, its value
will be replaced with the data from `input_file`; otherwise a new column
will be added to each dataframe.
:param copy_tokens: If True, deep-copy token series from the
elements of `doc_dfs` instead of using pointers.
:return: A list containing, for each document in the input file,
a separate `pd.DataFrame` of four columns:
* `char_span`: Span of each token, with character offsets.
Backed by the concatenation of the tokens in the document into
a single string with one sentence per line.
* `token_span`: Span of each token, with token offsets.
Backed by the contents of the `char_span` column.
* `<column_name>_iob`: IOB2-format tags of tokens, corrected so that every
entity begins with a "B" tag.
* `<column_name>_type`: Entity type names for tokens tagged "I" or "B" in
the `<column_name>_iob` column; `None` everywhere else.
"""
docs_list = _parse_conll_output_file(doc_dfs, input_file)
return [
_iob_to_iob2(_output_doc_to_df(tokens, outputs, column_name, copy_tokens),
[column_name], [True])
for tokens, outputs in zip(doc_dfs, docs_list)
]
def make_iob_tag_categories(entity_types: List[str]) \
-> Tuple[pd.CategoricalDtype, List[str], Dict[str, int]]:
"""
Enumerate all the possible token categories for combinations of
IOB tags and entity types (for example, I + "PER" ==> "I-PER").
Generate a consistent mapping from these strings to integers.
:param entity_types: Allowable entity type strings for the corpus
:returns: A triple of:
* Pandas CategoricalDtype
* mapping from integer to string label, as a list. This mapping is guaranteed
to be consistent with the mapping in the Pandas CategoricalDtype in the first
return value.
* mapping string label to integer, as a dict; the inverse of the second return
value.
"""
int_to_label = ["O"] + [f"{x}-{y}" for x in ["B", "I"] for y in entity_types]
label_to_int = {int_to_label[i]: i for i in range(len(int_to_label))}
token_class_dtype = pd.CategoricalDtype(categories=int_to_label)
return token_class_dtype, int_to_label, label_to_int
def add_token_classes(token_features: pd.DataFrame,
token_class_dtype: pd.CategoricalDtype = None,
iob_col_name: str = "ent_iob",
entity_type_col_name: str = "ent_type") -> pd.DataFrame:
"""
Add additional columns to a dataframe of IOB-tagged tokens containing composite
string and integer category labels for the tokens.
:param token_features: Dataframe of tokens with IOB tags and entity type strings
:param token_class_dtype: Optional Pandas categorical dtype indicating how to map
composite tags like `I-PER` to integer values.
You can use :func:`make_iob_tag_categories` to generate this dtype.
If this parameter is not provided, this function will use an arbitrary mapping
using the values that appear in this dataframe.
:param iob_col_name: Optional name of a column in `token_features` that contains the
IOB2 tags as strings, "I", "O", or "B".
:param entity_type_col_name: Optional name of a column in `token_features`
that contains entity type information; or `None` if no such column exists.
:returns: A copy of `token_features` with two additional columns, `token_class`
(string class label) and `token_class_id` (integer label).
If `token_features` contains columns with either of these names, those columns will
be overwritten in the returned copy of `token_features`.
"""
if token_class_dtype is None:
empty_mask = (token_features[entity_type_col_name].isna() |
(token_features[entity_type_col_name] == ""))
token_class_type, _, label_to_int = make_iob_tag_categories(
list(token_features[~empty_mask][entity_type_col_name].unique())
)
else:
label_to_int = {token_class_dtype.categories[i]: i
for i in range(len(token_class_dtype.categories))}
elems = [] # Type: str
for index, row in token_features[[iob_col_name, entity_type_col_name]].iterrows():
if row[iob_col_name] == "O":
elems.append("O")
else:
elems.append(f"{row[iob_col_name]}-{row[entity_type_col_name]}")
ret = token_features.copy()
ret["token_class"] = pd.Categorical(elems, dtype=token_class_dtype)
ret["token_class_id"] = [label_to_int[l] for l in elems]
return ret
def decode_class_labels(class_labels: Iterable[str]):
"""
Decode the composite labels that :func:`add_token_classes` creates.
:param class_labels: Iterable of string class labels like "I-LOC"
:returns: A tuple of (IOB2 tags, entity type strings) corresponding
to the class labels.
"""
iobs = ["O" if t == "O" else t[:1] for t in class_labels]
types = [None if t == "O" else t.split("-")[1] for t in class_labels]
return iobs, types
|
StarcoderdataPython
|
5111237
|
import os
import sys
import time
from vector import Vector2D
import math
import random
import pygame
import cProfile as profile
# GLOBALS
LEVEL = 0
RESDIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'res')
#########
def load_sound(name):
"""Borrowed from http://www.pygame.org/docs/tut/chimp/ChimpLineByLine.html"""
class NoneSound:
def play(self): pass
if not pygame.mixer:
return NoneSound()
fullpath = os.path.join(RESDIR, name)
try:
sound = pygame.mixer.Sound(fullpath)
except pygame.error, message:
print('Cannot load sound: %s' % name)
raise SystemExit, message
return sound
def load_image(name, scale=1, colorkey=None):
"""Borrowed from http://www.pygame.org/docs/tut/chimp/ChimpLineByLine.html"""
fullpath = os.path.join(RESDIR, name)
try:
image = pygame.image.load(fullpath)
except pygame.error, message:
print('Cannot load image: %s' % name)
raise SystemExit, message
image = image.convert_alpha()
if colorkey is not None:
if colorkey is -1:
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey, RLEACCEL)
image = pygame.transform.scale(image, (int(image.get_width() * scale),
int(image.get_height() * scale)))
return image
def load_sliced_sprites(w, h, master_image):
'''
Specs :
Master can be any height.
Sprites frames width must be the same width
Master width must be len(frames)*frame.width
Assuming you ressources directory is named "."
'''
images = []
#scalew = int(master_image.get_width() * scale)
#scaleh = int(master_image.get_height() * scale)
#master_image = pygame.transform.scale(master_image, (scalew, scaleh))
master_width, master_height = master_image.get_size()
for j in xrange(int(master_height/h)):
images.append([])
for i in xrange(int(master_width/w)):
images[j].append(master_image.subsurface((i*w,j*h,w,h)))
return images
class Player(object):
def __init__(self, pos=Vector2D(320, 240)):
self.pos = pos
side_length = 4
self.rect = pygame.rect.Rect(0, 0, 2 * side_length, 2 * side_length)
self.diag = math.sqrt(side_length ** 2 + (side_length / 2) ** 2)
self.points = [pos, pos, pos]
self.exhaust = [pos, pos, pos, pos, pos, pos, pos]
self.aspeed = math.pi / 45.0
self.angle = 0
self.maxspeed = 2.0
self.speed = Vector2D(0, -0.05)
self.velocity = Vector2D.zeros()
self.color = pygame.Color('white')
self.ret_color = pygame.Color(24, 24, 24)
self.health = 100
self.lives = 3
self.alive = True
self.score = 0
self.update(0.0)
def get_health(self):
return self.health / 10 * 10 # display health in multiples of 10
def get_lives(self):
return self.lives
def get_score(self):
return self.score
def get_direction(self):
return Vector2D(0, -1).rotate(self.angle)
def update(self, ms):
if not self.alive:
return
keys = pygame.key.get_pressed()
# increase current_speed, and therefore velocity
# only rotate player's velocity when accelerating
#if keys[pygame.K_UP] and self.velocity.Y > -self.maxspeed:
# self.current_speed -= self.lspeed
# self.velocity.Y = self.current_speed
# self.velocity = self.velocity.rotate(self.angle)
self.accelerating = False
if keys[pygame.K_UP]:
new = self.velocity + self.speed.rotate(self.angle)
if not new.length() > self.maxspeed:
self.accelerating = True
self.velocity = new
# turn the player with right/left keys
if keys[pygame.K_LEFT]:
self.angle -= self.aspeed
elif keys[pygame.K_RIGHT]:
self.angle += self.aspeed
# update the player's position
self.pos += self.velocity
self.rect.center = self.pos
# update the lines that form our ship, they are always rotated with
# respect to the player's angle
self.points[0] = Vector2D(0, -self.diag).rotate(self.angle) + self.pos
self.points[1] = Vector2D(self.diag, 2 * self.diag).rotate(self.angle) + self.pos
self.points[2] = Vector2D(-self.diag, 2 * self.diag).rotate(self.angle) + self.pos
self.exhaust[0] = self.points[1]
self.exhaust[1] = Vector2D(self.diag, 3 * self.diag).rotate(self.angle) + self.pos
self.exhaust[2] = Vector2D(self.diag / 2, 2.5 * self.diag).rotate(self.angle) + self.pos
self.exhaust[3] = Vector2D(0, 4 * self.diag).rotate(self.angle) + self.pos
self.exhaust[4] = Vector2D(-self.diag / 2, 2.5 * self.diag).rotate(self.angle) + self.pos
self.exhaust[5] = Vector2D(-self.diag, 3 * self.diag).rotate(self.angle) + self.pos
self.exhaust[6] = self.points[2]
def render(self, surface, ms):
if self.alive:
pygame.draw.lines(surface, self.color, True, self.points)
if self.accelerating:
pygame.draw.lines(surface, self.color, True, self.exhaust)
class Alien(object):
def __init__(self, radius, viewport):
self.radius = radius
self.color = pygame.Color('red')
self.velocity = Vector2D(
random.randint(-2, 2),
random.randint(-2, 2)
)
self.pos = Vector2D(
random.randint(viewport.left, viewport.right),
random.randint(viewport.top, viewport.bottom)
)
self.sq_offsets = [
Vector2D(1.5 * radius, .5 * radius),
Vector2D(1.5 * radius, -.5 * radius),
Vector2D(-1.5 * radius, -.5 * radius),
Vector2D(-1.5 * radius, .5 * radius)
]
self.small_offsets = [Vector2D(-1.5 * radius, 0), Vector2D(1.5 * radius, 0)]
self.points = [self.pos for i in self.sq_offsets]
self.small_circles = [self.pos for i in self.small_offsets]
def update(self, ms):
self.pos += self.velocity
for i, point in enumerate(self.sq_offsets):
self.points[i] = point + self.pos
for i, point in enumerate(self.small_offsets):
self.small_circles[i] = point + self.pos
def render(self, surface, ms):
pygame.draw.lines(surface, self.color, True, self.points)
for c in self.small_circles:
pygame.draw.circle(surface, self.color, (int(c.X), int(c.Y)), self.radius / 2, 1)
pygame.draw.circle(surface, self.color, (int(self.pos.X), int(self.pos.Y)), self.radius, 1)
class Asteroid(object):
def __init__(self, pos, size):
self.pos = pos
self.size = size
self.radius = random.randint(size - (size / 10), size + (size / 10))
xspeed = (random.random() * 2 - 1) / random.randint(1, 4)
yspeed = (random.random() * 2 - 1) / random.randint(1, 4)
self.velocity = Vector2D(xspeed, yspeed)
num_sides = random.randint(8, 16)
maxd = self.radius / 2
a = 2 * math.pi / num_sides
v = Vector2D(self.radius, 0.0)
self.points = list()
self.offsets = list()
for i in range(num_sides):
xy = random.randint(0, 1)
d = random.randint(-maxd, maxd)
if xy:
offset = v + Vector2D(d, 0)
self.offsets.append(offset)
self.points.append(offset + self.pos)
else:
offset = v + Vector2D(0, d)
self.offsets.append(offset)
self.points.append(offset + self.pos)
v = v.rotate(a)
self.rect = pygame.rect.Rect(0, 0, int(self.radius * 1.9), int(self.radius * 1.9))
self.rect.center = (self.pos.X, self.pos.Y)
self.update(0.0)
self.color = pygame.Color('white')
self.alive = True
def update(self, ms):
self.pos += self.velocity
self.rect.center = (self.pos.X, self.pos.Y)
# just update each point comprising the asteroid
for i, offset in enumerate(self.offsets):
self.points[i] = offset + self.pos
def render(self, surface, ms):
#pygame.draw.circle(surface, self.color, (int(self.pos.X), int(self.pos.Y)), self.radius, 1)
pygame.draw.lines(surface, self.color, True, self.points)
class AsteroidField(object):
def __init__(self, level, viewport, forbidden_rect):
self.viewport = viewport
# Asteroid radius-related variables
self.min_radius = (viewport.width + viewport.height) / 200
self.max_radius = (viewport.width + viewport.height) / 40
self.radius_range = self.max_radius - self.min_radius
self.small = self.min_radius
self.med = self.small + self.radius_range / 2
self.big = self.max_radius
self.big_offsets = [
Vector2D(self.med, self.med),
#Vector2D(self.med, -self.med),
#Vector2D(-self.med, self.med),
Vector2D(-self.med, -self.med)
]
self.med_offsets = [Vector2D(self.med, 0), Vector2D(-self.med, 0)]
count = level + 4
self.asteroids = [
Asteroid(
Vector2D(
random.randint(0, viewport.width),
random.randint(0, viewport.height)
), self.big
)
for i in range(count)
]
# move each asteroid if it initially collides with the player
for i, a in enumerate(self.asteroids):
if a.rect.colliderect(forbidden_rect):
a.pos.X += forbidden_rect.centerx + a.radius
a.pos.Y += forbidden_rect.centery + a.radius
a.update(0.0) # must call update to properly update its pos
def kill(self, a):
if a.size == self.big:
children = [
Asteroid(Vector2D(a.pos.X, a.pos.Y) + off, self.med)
for off in self.big_offsets
]
self.asteroids.extend(children)
elif a.size == self.med:
children = [
Asteroid(Vector2D(a.pos.X, a.pos.Y) + off, self.small)
for off in self.med_offsets
]
self.asteroids.extend(children)
# remove/delete the destroyed asteroid
self.asteroids.remove(a)
del(a)
def update(self, ms):
for a in self.asteroids:
if a.pos.X < self.viewport.left:
a.pos.X = self.viewport.right
elif a.pos.X > self.viewport.right:
a.pos.X = self.viewport.left
if a.pos.Y < self.viewport.top:
a.pos.Y = self.viewport.bottom
elif a.pos.Y > self.viewport.bottom:
a.pos.Y = self.viewport.top
a.update(ms)
def render(self, surface, ms):
for a in self.asteroids:
a.render(surface, ms)
class Camera(object):
def __init__(self, size, rect, lock):
self.rect = pygame.Rect((0,0),size)
self.bounds = rect
self.lock = lock
def update(self, ms):
keys = pygame.key.get_pressed()
if keys[pygame.K_EQUALS]:
self.rect.inflate_ip(2, 2)
elif keys[pygame.K_MINUS]:
self.rect.inflate_ip(-2, -2)
self.rect.center = self.lock()
self.rect.clamp_ip(self.bounds)
class HUD(object):
"""heads up display shows Health, Score and Level"""
def __init__(self, size, fontsize, lifefunc, healthfunc,
levelfunc, scorefunc):
self.width, self.height = size
self.font = pygame.font.Font(None, fontsize)
self.color = pygame.Color('white')
self.bgcolor = pygame.Color('black')
self.lifefunc = lifefunc
self.healthfunc = healthfunc
self.levelfunc = levelfunc
self.scorefunc = scorefunc
self.num_items = 4
self.surfaces = [None for i in range(self.num_items)]
def update(self, ms):
#for i, func in enumerate(self.funcs):
self.surfaces[0] = self.font.render("Lives: " +
str(int(self.lifefunc())), 1, self.color)
self.surfaces[1] = self.font.render("Health: " +
str(int(self.healthfunc())), 1, self.color)
self.surfaces[2] = self.font.render("Level: " +
str(int(self.levelfunc())), 1, self.color)
self.surfaces[3] = self.font.render("Score: " +
str(int(self.scorefunc())), 1, self.color)
def render(self, surface, ms):
for i, s in enumerate(self.surfaces):
surface.blit(s, (i * (self.width / (self.num_items)) + 10, 10))
class Bullet(object):
def __init__(self, center, direction):
self.radius = 1
self.color = pygame.Color('white')
self.pos = center
self.rect = pygame.rect.Rect(0, 0, 2*self.radius, 2*self.radius)
self.rect.center = (center.X, center.Y)
self.velocity = direction.normal() * 5
self.alive = True
self._age = 0
self._lifetime = 2000
def update(self, ms):
if self.alive:
self._age += ms
# kill itself if it reaches its max lifetime
if self._age > self._lifetime:
self.alive = False
self.pos += self.velocity
self.rect.center = (self.pos.X, self.pos.Y)
def render(self, surface, ms):
if self.alive:
pygame.draw.circle(surface, self.color, (int(self.pos.X), int(self.pos.Y)), self.radius)
class Explosion(object):
def __init__(self, image, center):
self._images = image
self.image = self._images[0][0]
self.rect = self.image.get_rect()
self.rect.center = center
self.alive = True
self._frameset = 0
self._frame = 0
self._delay = 99
self._frame_timer = 0
# determines whether sprite should look forward when not moving
def update(self, ms):
if self.alive:
self._frame_timer += ms
if self._frame_timer > self._delay:
self._frame += 1
if self._frame >= len(self._images[self._frameset]):
self._frame = 0
self.alive = False
self.image = self._images[self._frameset][self._frame]
self._frame_timer = 0
def render(self, surface, ms):
if self.alive:
surface.blit(self.image, self.rect)
def get_level():
"""Hack to return the current level."""
return LEVEL
def main():
global LEVEL
pygame.init()
pygame.font.init()
size = (640, 480)
s = 5
screen = pygame.display.set_mode(size)
viewport = pygame.rect.Rect((0,0),size)
clock = pygame.time.Clock()
player0 = Player(Vector2D(300, 200))
alien = None
LEVEL = 1
afield = AsteroidField(LEVEL, viewport, player0.rect)
explosion_sound = 'explosion.wav'
explode_sound = load_sound(explosion_sound)
explosion_image = 'explosion-sprite.png'
small_explode_image = load_image(explosion_image, scale=1)
small_explode_sprite = load_sliced_sprites(20, 20, small_explode_image)
med_explode_image = load_image(explosion_image, scale=2)
med_explode_sprite = load_sliced_sprites(40, 40, med_explode_image)
big_explode_image = load_image(explosion_image, scale=3)
big_explode_sprite = load_sliced_sprites(60, 60, big_explode_image)
# temp "pool" for updating/rendering particles
explosions = list()
bullets = list()
hud_items = [player0.get_lives, player0.get_health, get_level, player0.get_score]
#for player in players:
# healths.append(player.get_health)
hud = HUD(size, 32, *hud_items)
game_over_font = pygame.font.Font(None, 72)
game_over_msg = game_over_font.render("GAME OVER", 1, pygame.Color('white'))
offX = (viewport.width - game_over_msg.get_width()) / 2
offY = (viewport.height - game_over_msg.get_height()) / 2
#camera = Camera(size, tilemap0.rect, player0.get_pos)
black = pygame.Color('black')
screen.fill(black)
pygame.display.update()
game_over = False
running = True
while running:
events = pygame.event.get()
for e in events:
if e.type == pygame.QUIT:
running = False
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_ESCAPE:
running = False
if e.key == pygame.K_SPACE and player0.alive:
bullets.append(Bullet(player0.pos, player0.get_direction()))
#if random.randint(0, 100) > 95:
# alien = Alien(20, viewport)
for b in bullets:
# kill bullets that have left the viewport
#if not viewport.colliderect(b.rect):
# b.alive = False
#else:
if b.pos.X < 0:
b.pos.X = size[0]
elif b.pos.X > size[0]:
b.pos.X = 0
elif b.pos.Y < 0:
b.pos.Y = size[1]
elif b.pos.Y > size[1]:
b.pos.Y = 0
# brute force collision checking for each bullet -> each asteroid
for a in afield.asteroids:
if a.rect.colliderect(b.rect):
b.alive = False
player0.score += 1
afield.kill(a)
if a.radius < afield.small:
explosions.append(Explosion(small_explode_sprite, a.rect.center))
elif a.radius < afield.med:
explosions.append(Explosion(med_explode_sprite, a.rect.center))
else:
explosions.append(Explosion(big_explode_sprite, a.rect.center))
if len(afield.asteroids) == 0:
LEVEL += 1
afield = AsteroidField(LEVEL, viewport, player0.rect)
for a in afield.asteroids:
if player0.alive and a.rect.colliderect(player0.rect):
player0.health -= 1
# remove player life
if player0.health <= 0:
explode_sound.play()
player0.lives -= 1
if player0.lives <= 0:
explosions.append(
Explosion(
big_explode_sprite,
player0.rect.center
)
)
# game over
player0.alive = False
game_over = True
else:
player0.health = 100
# wrap player position when offscreen
if player0.pos.X < 0:
player0.pos.X = size[0]
elif player0.pos.X > size[0]:
player0.pos.X = 0
elif player0.pos.Y < 0:
player0.pos.Y = size[1]
elif player0.pos.Y > size[1]:
player0.pos.Y = 0
player0.update(clock.get_time())
afield.update(clock.get_time())
if alien is not None:
alien.update(clock.get_time())
for bull in bullets:
if not bull.alive:
bullets.remove(bull)
del(bull)
else:
bull.update(clock.get_time())
for pop in explosions:
if not pop.alive:
explosions.remove(pop)
del(pop)
else:
pop.update(clock.get_time())
hud.update(clock.get_time())
screen.fill(black)
if game_over:
screen.blit(game_over_msg, (offX, offY))
player0.render(screen, clock.get_time())
for b in bullets:
b.render(screen, clock.get_time())
for p in explosions:
p.render(screen, clock.get_time())
afield.render(screen, clock.get_time())
if alien is not None:
alien.render(screen, clock.get_time())
hud.render(screen, clock.get_time())
pygame.display.flip()
clock.tick(60)
pygame.time.delay(10)
# game over
pygame.quit()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
5132458
|
<filename>medical_prescription/user/views/logoutview.py
# Django
from django.shortcuts import redirect
from django.contrib import auth
from django.views.generic import View
class LogoutView(View):
'''
Logout of User.
'''
# Exit user and render 'home' page.
def get(self, request):
auth.logout(request)
return redirect('/')
|
StarcoderdataPython
|
8171069
|
import RPi.GPIO as GPIO
from time import sleep
SLEEPTIME = 1
PIN = 18
GPIO.setmode(GPIO.BOARD)
GPIO.setup(PIN, GPIO.IN, pull_up_down = GPIO.PUD_UP)
def event_callback():
print("Magnetic field is detected")
GPIO.add_event_detect(GPIO_PIN, GPIO.FALLING, callback = event_callback, bouncetime = 200)
try:
while True:
sleep(SLEEPTIME)
except KeyboardInterrupt:
print("\nprogram stopped")
GPIO.cleanup()
|
StarcoderdataPython
|
147665
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 18 22:59:59 2020
@author: CS
Check the read-me file for a in-depth summary of the problem
"""
import numpy as np
import sys
sys.path.append('..')
import TrussAnalysis as ta
class environment:
"""
The enviroment will act as a container for the data in the problem.
The boundary node coordinates and element fixities are defined as static
values.
"""
def __init__(self, forces = np.array([1000., 1000., 0.]),trussMat = 10):
self.forces = forces
"""
Here the nodes and connectivityies between each node are defined for
the problem. The connectivity assumes a
"""
self.xNodeCoords = np.array([0.,1.,0.,1.,0.,1.,1.])
self.yNodeCoords = np.array([0.,0.,1.,1.,2.,2.,3.])
self.Connectivity = [[1,2],[1,4],[2,4],[1,3], [3,4],[3,6],[3,5],[4,6], [5,6], [5,7], [6,7]]
self.nodeIds = np.arange(len(self.Connectivity)) + 1
self.trussMat = trussMat
def ftest(individual, environment):
"""
Tests and individual and returns the result of that test.
This function essentially is a wrapper that converts data from our
individual and environment into a form usable by the truss anaylsis
functions.
Note several values are returned as a result.
This will be processed later to define the value of fitness.
Parameters
----------
individual : Naturalize Indivdual
The input indivdual.
environment : Naturalize Environment
The input environment.
"""
Areas = individual.genotype[0]
"""
Make the truss defined in the problem definition. This is fixed.
We could condense this by returning a list and passing that list in with a
*List to the function, however, we'll write it out explicitly for this
example to be clear'
"""
Forces = environment.forces
xNodeCoords = environment.xNodeCoords
yNodeCoords = environment.yNodeCoords
Connectivity = environment.Connectivity
trussMat = environment.trussMat
nodeIds = environment.nodeIds
result = ta.runTrussAnalysis(Areas, Forces, nodeIds, xNodeCoords,
yNodeCoords, Connectivity, trussMat)
disp, volume, Forces = result
return disp, volume, Forces
def fitness_basic(individual, environment):
"""
Determines how good each solution is, this is what that is minimized
In this case, minimize the displacement in the x direction.
Parameters
----------
individual : Naturalize Indivdual
The input indivdual.
environment : Naturalize Environment
The input environment.
Returns
-------
disp : float
The output displacement of the analysis.
"""
disp, volumes, _ = individual.result
dx = disp[0]
disp = np.abs(dx)
return disp
def fitness_normalized(individual, environment):
"""
Determines how good each solution is, this is what that is minimized.
In this function, the displacement multiplied by volume is minimized.
This will make solutions with a lower volume more attractive.
Parameters
----------
individual : Naturalize Indivdual
The input indivdual.
environment : Naturalize Environment
The input environment.
Returns
-------
normDisp : float
The output displacement of the analysis.
"""
disp, volumes, _ = individual.result
dx = disp[0]
normDisp = np.abs(dx * np.sum(volumes))
return normDisp
def fitness_Volume(individual, environment):
"""
The fitness function, this value is what is actually minimized.
In this case, the volume is minimized, assuming displacement is below some
limit. This will make solutions with a lower volume more attractive.
Parameters
----------
individual : Naturalize Indivdual
The input indivdual.
environment : Naturalize Environment
The input environment.
Returns
-------
volume : float
The output volume of the truss.
"""
disp, volumes, _ = individual.result
dx = disp[0]
"""
The limit could be placed within the environment function.
"""
lim = 0.01
if dx < lim:
volume = np.sum(volumes)
# normDisp = np.abs(dx * np.sum(volumes))
else:
volume = 100*np.sum(volumes)
return volume
def plotIndividual(data):
"""
Makes a matplotlib plot of the truss.
"""
areas = data
xNodeCoords = np.array([0.,1.,0.,1.,0.,1.,1.])
yNodeCoords = np.array([0.,0.,1.,1.,2.,2.,3.])
Connectivity = [[1,2],[1,4],[2,4],[1,3], [3,4], [3,6], [3,5], [4,6], [5,6], [5,7], [6,7]]
nodeIds = np.arange(len(Connectivity)) + 1
fig, ax = ta.plotTruss(areas, nodeIds, xNodeCoords, yNodeCoords, Connectivity)
maxArea = max(areas)
style_blue(fig, ax, areas, maxArea)
return fig, ax
def style_blue(fig, ax, areas, maxArea):
"""
Used to make the animated plots
"""
fig.set_figwidth(8)
fig.set_figheight(6)
for text in ax.texts:
text.set_fontsize(10)
ax.texts = []
for ii, line in enumerate(ax.lines):
line.set_linewidth(5*areas[ii]/maxArea)
line.set_color("steelblue")
ax.set_facecolor("skyblue")
ax.collections[0].set_color('cornsilk')
ax.collections[0].set_zorder(10)
ax.collections[0].set_linewidth(2)
# fig.savefig("mygraph.png")
# ax.axis('off')
ax.set_xlim([-1.5, 2.5])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# ax.annotate("", xy=(0.9, 3), xytext=(0.5, 3), arrowprops=dict(arrowstyle="->", color = 'red') )
return fig, ax
|
StarcoderdataPython
|
9662998
|
#Credential Generated from your Twilio Account
account_sid= 'Your Twilio Sid'
auth_token='<PASSWORD>'
my_cell='Number whom you want to sms'
my_twilio='Your Twilio number'
|
StarcoderdataPython
|
1918223
|
"""
Low-Level Logging
A module to allow a ton of data (e.g. all SSL unencrypted and encrypted IO) to
be logged but not actually slow the server down unless the thing is being traced
or the whole server is logging super verbose.
Use like:
import ll
import faststat
ml = ll.LLogger()
....
ml.la("format string {0} {1}", var0, var1) # always log
ml.ld("format string 2 {0}", var0) # log most often
ml.ld("format string 3 {0}", var0) # log most often
ml.ld2("format string 4 {0}", var0) # log less often
ml.ld3("format string 5 {0}", var0) # log only at very high verbosity
ml.ld4("format string 5 {0}", var0) # log only at highest verbosity (including greenlet switch)
For best efficiency, use !r in format string, rather than calling str() or repr() on
arguments.
<EMAIL> for details.
"""
import inspect
from collections import defaultdict
from datetime import datetime
import os
import sys
the_file = sys.stdout
log_msgs = defaultdict(int)
LOG_LEVELS = {'NEVER': -1,
'NONE': 0,
'DEBUG': 1,
'DEBUG2': 2,
'DEBUG3': 3,
'DEBUG4': 4
}
_log_level = LOG_LEVELS['NONE']
def print_log_summary():
"""Prints out the hash map of format strings and counts of usage."""
return ["%s: %d\n".format(k, v) for k, v in log_msgs.items()]
def get_log_level():
"""Set global low level log level"""
return _log_level
def set_log_level(level):
"""Set global low lovel log level"""
global _log_level
if level is None:
level = 0
level = max(level, LOG_LEVELS['NEVER'])
level = min(level, LOG_LEVELS['DEBUG4'])
_log_level = level
def use_the_file(name="lll.txt"):
"""Use a file instead of stdout
Relative to cwd unless starts with /"""
global the_file
if name[0] == "/":
path = name
else:
path = os.getcwd() + "/./" + name
the_file = open(path, "a")
def use_std_out():
"""Use stdout instead of a file - just for tests"""
global the_file
the_file = sys.stdout
def log_failure(bad_str):
"""Stats on failed logs"""
try:
import context
context.get_context().stats["log.failure"].add(1)
context.get_context().stats["log.failure." + bad_str].add(1)
if context.get_context().log_failure_print:
if context.get_context().stats["log.failure"].n < 10:
print "log failure - " + bad_str
except:
pass
class LLogger(object):
"""Instantiate this to get the logger object; it grabs module data"""
def __init__(self, tag="", trace_mod=False):
mod = inspect.getmodule(inspect.stack()[1][0])
if mod:
self.caller_mod = mod.__file__.split(".")[-2].upper()
else:
self.caller_mod = "UNKNOWN"
self.trace_mod = trace_mod
self.la = self.log_always
self.ld = self.log_debug
self.ld2 = self.log_debug2
self.ld3 = self.log_debug3
self.ld4 = self.log_debug4
self.tag = tag
def log_always(self, *args, **kw):
"""Log unless never"""
global log_msgs
log_msgs[self.caller_mod + "--" + args[0]] += 1
if self.trace_mod or _log_level >= 0:
import gevent # for getcurrent
try:
msg = apply(args[0].format, tuple(args[1:]))
if "CAL-PRINT" in msg:
print >> the_file, msg
else:
print >> the_file, "%s %s (%s):%s" % (datetime.now().strftime("%d/%H:%M:%S.%f"),
self.caller_mod, id(gevent.getcurrent()),
self.tag), msg
except:
log_failure(args[0])
def log_debug(self, *args, **kw):
"""Log only with -d"""
log_msgs[self.caller_mod + "--" + args[0]] += 1
if self.trace_mod or _log_level >= 1:
import gevent # for getcurrent
try:
msg = apply(args[0].format, tuple(args[1:]))
print >> the_file, "%s %s D (%s):%s" % (datetime.now().strftime("%d/%H:%M:%S.%f"),
self.caller_mod, id(gevent.getcurrent()),
self.tag), msg
except:
log_failure(args[0])
def log_debug2(self, *args, **kw):
"""Log only with -dd"""
log_msgs[self.caller_mod + "--" + args[0]] += 1
if self.trace_mod or _log_level >= 2:
import gevent # for getcurrent
try:
msg = apply(args[0].format, tuple(args[1:]))
print >> the_file, "%s %s D2 (%s):%s" % (datetime.now().strftime("%d/%H:%M:%S.%f"),
self.caller_mod, id(gevent.getcurrent()),
self.tag), msg
except:
log_failure(args[0])
def log_debug3(self, *args, **kw):
"""Log only with -ddd"""
log_msgs[self.caller_mod + "--" + args[0]] += 1
if self.trace_mod or _log_level >= 3:
import gevent # for getcurrent
try:
msg = apply(args[0].format, tuple(args[1:]))
print >> the_file, "%s %s D3 (%s):%s" % (datetime.now().strftime("%d/%H:%M:%S.%f"),
self.caller_mod, id(gevent.getcurrent()),
self.tag), msg
except:
log_failure(args[0])
def log_debug4(self, *args, **kw):
"""Log only with -dddd"""
global log_msgs
log_msgs[self.caller_mod + "--" + args[0]] += 1
if self.trace_mod or _log_level >= 4:
import gevent # for getcurrent
try:
msg = apply(args[0].format, tuple(args[1:]))
print >> the_file, "%s %s D4 (%s):%s" % (datetime.now().strftime("%d/%H:%M:%S.%f"),
self.caller_mod, id(gevent.getcurrent()),
self.tag), msg
except:
log_failure(args[0])
|
StarcoderdataPython
|
3545382
|
<reponame>Jitsusama/lets-do-dns
"""Introduce time delays during program execution."""
import time
def sleep(delay):
"""Pause program execution until delay (in seconds) has expired."""
time.sleep(delay)
|
StarcoderdataPython
|
3442891
|
# GW2 API wrapper class
import os
import json
from . import session
from functions import return_config
"""
GW2 API wrapper class. Configurations for endpoints are
maintained in the src/configs/config.json file for ease
of updating in the event an endpoint changes.
"""
class GW2Wrapper(object):
def __init__(self):
self.configs = return_config()
# Internal URL builder function
def _url_builder(self, endpoint):
return "{0}/{1}/{2}".format(
self.configs['gw2_api_config']['api_base_path'],
self.configs['gw2_api_config']['api_version'],
endpoint
)
# Internal general request function
def _api_request(self, url):
response = session.get(url)
return response.json()
def worlds(self):
return self._api_request(self._url_builder(self.configs['gw2_endpoints']['worlds']))
def wvw_matches(self):
pass
|
StarcoderdataPython
|
3461181
|
<reponame>Zyjacya-In-love/Pedestrian-Detection-on-YOLOv3<gh_stars>10-100
import os
import cv2
import numpy as np
from PIL import Image
from timeit import default_timer as timer
from keras_yolov3_detect import YOLO_detector
'''
对于 Caltech 测试数据集 ./image,使用 keras_yolov3_detect.py 预测出每张图片的 bounding boxes 信息,
对于每张图片的每个目标,其预测边界框由 4 个 int 表达,分别是 [left, top, width, height]
PS:一行 存储一张图片中的 所有 边界框坐标,Test:10000 行
'''
def solve(detector):
original_image_path = './code/data-USA/images/'
save_path = "./code/data-USA/res/YOLOv3/"
second_name = 'sec.npy'
if not os.path.exists(save_path):
os.makedirs(save_path)
take_seconds = []
cnt = 0
for root, sub_dir, files in os.walk(original_image_path):
now_save_path = save_path + '/'.join(root.split('/')[4:])
if not os.path.exists(now_save_path):
os.makedirs(now_save_path)
for file in files:
image_path = os.path.join(root, file)
img = Image.open(image_path)
start = timer()
predict_bb, predict_scores = detector.detect_image(img)
end = timer()
take_seconds.append(end - start)
predict_txt_file = open(now_save_path + '/' + '%s.txt' % (file.split('.')[0]), 'w') # 写预测txt
# print("predict_bb : ", predict_bb)
for i, bbox in enumerate(predict_bb):
x1, y1, x2, y2 = bbox
bbox = (x1, y1, x2-x1, y2-y1, predict_scores[i])
# print(",".join([str(a) for a in bbox]))
predict_txt_file.write(",".join([str(a) for a in bbox]) + '\n')
cnt += 1
take_seconds = np.array(take_seconds)
# np.save(save_path + second_name, take_seconds)
avg_sec = np.mean(take_seconds)
print("avg_sec : ", avg_sec)
if __name__ == '__main__':
detector = YOLO_detector(score=0)
solve(detector)
|
StarcoderdataPython
|
30939
|
<filename>compiler.py
from sphere_engine import CompilersClientV4
from sphere_engine.exceptions import SphereEngineException
import time
# define access parameters
accessToken = '77501c36922866a03b1822b4508a50c6'
endpoint = 'dd57039c.compilers.sphere-engine.com'
# initialization
client = CompilersClientV4(accessToken, endpoint)
# API usage
# source = 'function f() {return "hello"; } print(f());' # Javascript
# compiler = 112 # Javascript
source = 'print("hello world please work!!!!!")' # Python
compiler = 116 # Python
input = '2017'
# Set default value for response
response = None
# Sends the submission and checks for errors in sending the submission
try:
response = client.submissions.create(source, compiler, input)
# response['id'] stores the ID of the created submission
except SphereEngineException as e:
if e.code == 401:
print('Invalid access token')
elif e.code == 402:
print('Unable to create submission')
elif e.code == 400:
print('Error code: ' + str(e.error_code) + ', details available in the message: ' + str(e))
# Set default value for response data
responseData = None
print("Code submitted is: ")
print(source)
print("Submission ID is: " + str(response.get('id')))
print()
# Try getting submission ID and check if there are errors
try:
client.submissions.get(response.get('id'))
except SphereEngineException as e:
if e.code == 401:
print('Invalid access token')
elif e.code == 403:
print('Access to the submission is forbidden')
elif e.code == 404:
print('Submission does not exist')
# Uses submission ID, and checks every x seconds to see if query has been 'accepted' (finished processing)
while client.submissions.get(response.get('id')).get('result').get('status').get('name') != 'accepted' :
responseData = client.submissions.get(response.get('id'))
print(responseData) # for test purposes
print("Status is: " + responseData.get('result').get('status').get('name'))
time.sleep(5)
print("Status is: " + client.submissions.get(response.get('id')).get('result').get('status').get('name'))
print()
rawresponse = None
# Get the output of the query
try:
rawresponse = client.submissions.getStream(response.get('id'), 'output')
except SphereEngineException as e:
if e.code == 401:
print('Invalid access token')
elif e.code == 403:
print('Access to the submission is forbidden')
elif e.code == 404:
print('Non existing resource, error code: ' + str(e.error_code) + ', details available in the message: ' + str(e))
elif e.code == 400:
print('Error code: ' + str(e.error_code) + ', details available in the message: ' + str(e))
print("Output returned is: ")
print(rawresponse)
|
StarcoderdataPython
|
1721289
|
"""User Model."""
from masoniteorm.models import Model
from masoniteorm.scopes import SoftDeletesMixin
class User(Model, SoftDeletesMixin):
"""User Model."""
__timezone__ = "Asia/Hong_Kong"
__fillable__ = ["name", "email", "password"]
__auth__ = "email"
|
StarcoderdataPython
|
11202425
|
<gh_stars>0
import unittest
import pygame
from agagla import game_state_manager
from agagla.game_state_manager import GameState
from agagla import __main__ as main
import time
class GSMTestCase(unittest.TestCase):
def setUp(self):
main.init()
pygame.init()
self.gsm = game_state_manager.GameStateManager()
def test_state_changes(self):
# tests that state changes when conditions for change are met
self.assertEqual(self.gsm.get_state(), GameState.menu)
self.gsm.start_game()
self.gsm._force_tick()
self.assertEqual(self.gsm.get_state(), GameState.running)
self.gsm.lives = 0
self.gsm._force_tick()
self.assertEqual(self.gsm.get_state(), GameState.game_over)
def test_kill_player(self):
# tests that player dies and lives go down
self.gsm.start_game()
self.gsm._force_tick()
old_lives = self.gsm.lives
self.gsm.get_player_ship().set_health(0)
self.gsm._force_tick()
self.assertEqual(old_lives - 1, self.gsm.lives)
def test_score_increase(self):
# tests that score goes up when an enemy dies
self.gsm.start_game()
self.gsm._force_tick()
old_score = self.gsm.game_score
self.gsm.get_enemies()[0].set_health(0)
self.gsm._force_tick()
self.assertGreater(self.gsm.game_score, old_score)
def test_tick_timing(self):
# tests that ticking is locked to the tick rate
successful_ticks = 0
start_time = time.time()
while successful_ticks < 10:
if self.gsm._tick():
successful_ticks += 1
self.assertEqual((time.time() - start_time) // (1 / self.gsm.tick_rate), successful_ticks)
def tearDown(self):
self.gsm = None
game_state_manager.GameStateManager._instance = None
pygame.quit()
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
5037215
|
<gh_stars>0
import discord, numpy, textwrap, requests, wand
from io import BytesIO
from discord.ext import commands
from discord.ext.commands.cooldowns import BucketType
from wand.image import Image as WandImage
from PIL import Image, ImageFilter, ImageDraw, ImageOps, ImageFont, ImageSequence
class image(commands.Cog, command_attrs={'cooldown': commands.Cooldown(1, 10, commands.BucketType.user)}):
"""Image manipulation commands"""
def __init__(self, bot):
self.bot = bot
self.invis = 0x2F3136
# Pillow Image Manipulation
@staticmethod
def do_mc(txt):
image = Image.open(requests.get('https://i.imgur.com/JtNJFZy.png', stream=True).raw).convert("RGBA")
draw = ImageDraw.Draw(image)
font_path = "cogs/assets/minecraft.ttf"
font = ImageFont.truetype(font_path, 17)
draw.text((60, 30), txt, (255, 255, 255), font=font)
buffer = BytesIO()
image.save(buffer, format="PNG")
buffer.seek(0)
return buffer
@staticmethod
def do_ascii(image):
image = Image.open(image)
sc = 0.1
gcf = 2
bgcolor = (13, 2, 8)
re_list = list(
r" .'`^\,:;Il!i><~+_-?][}{1)(|\/tfjrxn"
r"uvczXYUJCLQ0OZmwqpdbkhao*#MW&8%B@$"
)
chars = numpy.asarray(re_list)
font = ImageFont.load_default()
letter_width = font.getsize("x")[0]
letter_height = font.getsize("x")[1]
wcf = letter_height / letter_width
img = image.convert("RGB")
width_by_letter = round(img.size[0] * sc * wcf)
height_by_letter = round(img.size[1] * sc)
s = (width_by_letter, height_by_letter)
img = img.resize(s)
img = numpy.sum(numpy.asarray(img), axis=2)
img -= img.min()
img = (1.0 - img / img.max()) ** gcf * (chars.size - 1)
lines = ("\n".join(
("".join(r) for r in chars[img.astype(int)]))).split("\n")
new_img_width = letter_width * width_by_letter
new_img_height = letter_height * height_by_letter
new_img = Image.new("RGBA", (new_img_width, new_img_height), bgcolor)
draw = ImageDraw.Draw(new_img)
y = 0
line_idx = 0
for line in lines:
line_idx += 1
draw.text((0, y), line, (0, 255, 65), font=font)
y += letter_height
buffer = BytesIO()
new_img.save(buffer, format="PNG")
buffer.seek(0)
return buffer
@staticmethod
def do_quantize(img):
with Image.open(img) as image:
siz = 300
newsize = (siz,siz)
w, h = image.size
if w > h:
the_key = w / siz
image = image.resize((siz,int(h / the_key))).convert("RGBA")
elif h > w:
the_key = h / siz
image = image.resize((int(w / the_key),siz)).convert("RGBA")
else:
image = image.resize(newsize).convert("RGBA")
images1 = []
for i in range(60):
try:
im = image.copy()
im = im.quantize(colors=i + 1, method=2)
images1.append(im)
except:
break
images2 = list(reversed(images1))
images = images1 + images2
buffer = BytesIO()
images[0].save(buffer,
format='gif',
save_all=True,
append_images=images[1:],
duration=1,
loop=0)
buffer.seek(0)
return buffer
@staticmethod
def do_wash(img):
with Image.open(img) as img:
images = []
for i in range(30):
im = img.copy()
im = im.rotate(12 * i)
images.append(im)
gif = BytesIO()
images[0].save(gif,
format='gif',
save_all=True,
append_images=images,
duration=1,
loop=0)
gif.seek(0)
gif = Image.open(gif)
wash = Image.open("cogs/assets/wash.png")
frames = []
for frame in ImageSequence.Iterator(gif):
frame = frame.copy()
frame.paste(wash) #, mask=wash
frames.append(frame)
buffer = BytesIO()
frames[0].save(buffer,
format='gif',
save_all=True,
append_images=images,
duration=1,
loop=0)
buffer.seek(0)
return buffer
@staticmethod
def do_sketch(img):
ele = numpy.pi/2.2
azi = numpy.pi/4.
dep = 10.
with Image.open(img).convert('L') as img:
a = numpy.asarray(img).astype('float')
grad = numpy.gradient(a)
grad_x, grad_y = grad
gd = numpy.cos(ele)
dx = gd*numpy.cos(azi)
dy = gd*numpy.sin(azi)
dz = numpy.sin(ele)
grad_x = grad_x*dep/100.
grad_y = grad_y*dep/100.
leng = numpy.sqrt(grad_x**2 + grad_y**2 + 1.)
uni_x = grad_x/leng
uni_y = grad_y/leng
uni_z = 1./leng
a2 = 255*(dx*uni_x + dy*uni_y + dz*uni_z)
a2 = a2.clip(0,255)
img2 = Image.fromarray(a2.astype('uint8'))
buffer = BytesIO()
img2.save(buffer, format="PNG")
buffer.seek(0)
return buffer
@staticmethod
def do_merge(img1, img2):
img1 = Image.open(img1).convert("RGBA").resize((512, 512))
img2 = Image.open(img2).convert("RGBA").resize((512, 512))
img = Image.blend(img1, img2, 0.5)
buffer = BytesIO()
img.save(buffer, format="PNG")
buffer.seek(0)
return buffer
@staticmethod
def do_invert(img):
with Image.open(img).convert("RGB") as img:
img = ImageOps.invert(img)
buffer = BytesIO()
img.save(buffer, format="PNG")
buffer.seek(0)
return buffer
@staticmethod
def do_emboss(img):
with Image.open(img) as img:
img = img.filter(ImageFilter.EMBOSS)
buffer = BytesIO()
img.save(buffer, format="PNG")
buffer.seek(0)
return buffer
@staticmethod
def do_solarize(img):
with Image.open(img).convert("RGB") as img:
img = ImageOps.solarize(img, threshold=64)
buffer = BytesIO()
img.save(buffer, format="PNG")
buffer.seek(0)
return buffer
@staticmethod
def do_pixel(img):
with Image.open(img) as img:
img = img.resize((36, 36), resample=Image.BILINEAR)
img = img.resize(img.size, Image.NEAREST)
buffer = BytesIO()
img.save(buffer, format="PNG")
buffer.seek(0)
return buffer
# Wand Image Manipulation
@staticmethod
def do_swirl(img):
with WandImage(blob=img) as img:
img.swirl(degree=-90)
buffer = BytesIO()
img.save(buffer)
buffer.seek(0)
return buffer
@staticmethod
def do_polaroid(img):
with WandImage(blob=img) as img:
img.polaroid()
buffer = BytesIO()
img.save(buffer)
buffer.seek(0)
return buffer
@staticmethod
def do_floor(img):
with WandImage(blob=img) as img:
img.virtual_pixel = "tile"
img.resize(300, 300)
x, y = img.width, img.height
arguments = (0, 0, 77, 153, x, 0, 179, 153, 0, y, 51, 255, x, y, 204, 255)
img.distort("perspective", arguments)
buffer = BytesIO()
img.save(buffer)
buffer.seek(0)
return buffer
@staticmethod
def do_cube(img):
with WandImage(blob=img) as image:
def s(x):
return int(x / 3)
image.resize(s(1000), s(860))
image.format = "png"
image.alpha_channel = 'opaque'
image1 = image
image2 = WandImage(image1)
out = WandImage(width=s(3000 - 450), height=s(860 - 100) * 3)
out.format = "png"
image1.shear(background=wand.color.Color("none"), x=-30)
image1.rotate(-30)
out.composite(image1, left=s(500 - 250), top=s(0 - 230) + s(118))
image1.close()
image2.shear(background=wand.color.Color("rgba(0,0,0,0)"), x=30)
image2.rotate(-30)
image3 = WandImage(image2)
out.composite(image2, left=s(1000 - 250) - s(72), top=s(860 - 230))
image2.close()
image3.flip()
out.composite(image3, left=s(0 - 250) + s(68), top=s(860 - 230))
image3.close()
out.crop(left=80, top=40, right=665, bottom=710)
buffer = BytesIO()
out.save(buffer)
buffer.seek(0)
return buffer
@staticmethod
def do_spread(img):
with WandImage(blob=img) as img:
img.resize(256, 256)
img.alpha_channel = False
output = WandImage(width=img.width, height=img.height)
output.format = "GIF"
output.sequence[0] = img
output.sequence.extend(img for _ in range(0, 2))
for radius in range(0, 13):
with img.clone() as frame:
frame.spread(radius=radius ** 2)
output.sequence.append(frame)
output.sequence.extend(reversed(output.sequence))
img.close()
output.optimize_layers()
output.optimize_transparency()
buffer = BytesIO()
output.save(buffer)
buffer.seek(0)
return buffer
# Commands
@commands.command()
async def emboss(self, ctx, *, member: discord.Member = None):
'''Embosses the avatar'''
if not member:
member = ctx.author
url = member.avatar_url_as(size=512, format="png")
async with ctx.typing():
img = BytesIO(await url.read())
img.seek(0)
buffer = await self.bot.loop.run_in_executor(None, self.do_emboss, img)
file=discord.File(buffer, filename="embossed.png")
e=discord.Embed(color=self.invis)
e.set_author(name="<NAME>", icon_url=member.avatar_url)
e.set_image(url="attachment://embossed.png")
await ctx.remove(file=file, embed=e)
@commands.command()
async def invert(self, ctx, *, member: discord.Member = None):
'''Invert the avatar'''
if not member:
member = ctx.author
url = member.avatar_url_as(size=512, format="png")
async with ctx.typing():
img = BytesIO(await url.read())
img.seek(0)
buffer = await self.bot.loop.run_in_executor(None, self.do_invert, img)
file=discord.File(buffer, filename="invert.png")
e=discord.Embed(color=self.invis)
e.set_author(name="Inverted Avatar", icon_url=member.avatar_url)
e.set_image(url="attachment://invert.png")
await ctx.remove(file=file, embed=e)
@commands.command()
async def solarize(self, ctx, member: discord.Member = None):
'''Solarizes the avatar'''
if not member:
member = ctx.author
url = member.avatar_url_as(size=512, format="png")
async with ctx.typing():
img = BytesIO(await url.read())
img.seek(0)
buffer = await self.bot.loop.run_in_executor(None, self.do_solarize, img)
file=discord.File(buffer, filename="solarize.png")
e=discord.Embed(color=self.invis)
e.set_author(name="Solarized Avatar", icon_url=member.avatar_url)
e.set_image(url="attachment://solarize.png")
await ctx.remove(file=file, embed=e)
@commands.command()
async def pixel(self, ctx, *, member: discord.Member = None):
'''Pixelizes the avatar'''
if not member:
member = ctx.author
url = member.avatar_url_as(size=512, format="png")
async with ctx.typing():
img = BytesIO(await url.read())
img.seek(0)
buffer = await self.bot.loop.run_in_executor(None, self.do_pixel, img)
file=discord.File(buffer, filename="pixel.png")
e=discord.Embed(color=self.invis)
e.set_author(name="Pixelated Avatar", icon_url=member.avatar_url)
e.set_image(url="attachment://pixel.png")
await ctx.remove(file=file, embed=e)
@commands.command()
async def swirl(self, ctx, *, member: discord.Member = None):
'''Swirls the avatar'''
if not member:
member = ctx.author
url = member.avatar_url_as(size=512, format="png")
async with ctx.typing():
img = BytesIO(await url.read())
img.seek(0)
buffer = await self.bot.loop.run_in_executor(None, self.do_swirl, img)
file=discord.File(buffer, filename="swirl.png")
e=discord.Embed(color=self.invis)
e.set_author(name="Swirled Avatar", icon_url=member.avatar_url)
e.set_image(url="attachment://swirl.png")
await ctx.remove(file=file, embed=e)
@commands.command()
async def polaroid(self, ctx, *, member: discord.Member = None):
'''Polaroid the avatar'''
if not member:
member = ctx.author
url = member.avatar_url_as(size=512, format="png")
async with ctx.typing():
img = BytesIO(await url.read())
img.seek(0)
buffer = await self.bot.loop.run_in_executor(None, self.do_polaroid, img)
file=discord.File(buffer, filename="polaroid.png")
e=discord.Embed(color=self.invis)
e.set_author(name="Polaroid Avatar", icon_url=member.avatar_url)
e.set_image(url="attachment://polaroid.png")
await ctx.remove(file=file, embed=e)
@commands.command()
async def floor(self, ctx, *, member: discord.Member = None):
'''Floor the avatar'''
if not member:
member = ctx.author
url = member.avatar_url_as(size=512, format="png")
async with ctx.typing():
img = BytesIO(await url.read())
img.seek(0)
buffer = await self.bot.loop.run_in_executor(None, self.do_floor, img)
file=discord.File(buffer, filename="floor.png")
e=discord.Embed(color=self.invis)
e.set_author(name="Floored Avatar", icon_url=member.avatar_url)
e.set_image(url="attachment://floor.png")
await ctx.remove(file=file, embed=e)
@commands.command()
async def cube(self, ctx, *, member: discord.Member = None):
'''Cube the avatar'''
if not member:
member = ctx.author
url = member.avatar_url_as(size=512, format="png")
async with ctx.typing():
img = BytesIO(await url.read())
img.seek(0)
buffer = await self.bot.loop.run_in_executor(None, self.do_cube, img)
file=discord.File(buffer, filename="cube.png")
e=discord.Embed(color=self.invis)
e.set_author(name="Cubed Avatar", icon_url=member.avatar_url)
e.set_image(url="attachment://cube.png")
await ctx.remove(file=file, embed=e)
@commands.command()
async def spread(self, ctx, *, member: discord.Member = None):
'''Spreads the avatar'''
if not member:
member = ctx.author
url = member.avatar_url_as(size=512, format="png")
async with ctx.typing():
img = BytesIO(await url.read())
img.seek(0)
buffer = await self.bot.loop.run_in_executor(None, self.do_spread, img)
file=discord.File(buffer, filename="spread.gif")
e=discord.Embed(color=self.invis)
e.set_author(name="<NAME>", icon_url=member.avatar_url)
e.set_image(url="attachment://spread.gif")
await ctx.remove(file=file, embed=e)
@commands.command()
async def sketch(self, ctx, *, member: discord.Member = None):
'''Sketches the avatar'''
if not member:
member = ctx.author
url = member.avatar_url_as(size=512, format="png")
async with ctx.typing():
img = BytesIO(await url.read())
img.seek(0)
buffer = await self.bot.loop.run_in_executor(None, self.do_sketch, img)
file=discord.File(buffer, filename="sketch.png")
e=discord.Embed(color=self.invis)
e.set_author(name="Sketched Avatar", icon_url=member.avatar_url)
e.set_image(url="attachment://sketch.png")
await ctx.remove(file=file, embed=e)
@commands.command()
async def merge(self, ctx, m1: discord.Member, m2: discord.Member = None):
'''Merge two avatars together'''
if not m2:
m2 = ctx.author
url1 = m1.avatar_url_as(size=512, format="png")
url2 = m2.avatar_url_as(size=512, format="png")
async with ctx.typing():
img1 = BytesIO(await url1.read())
img1.seek(0)
img2 = BytesIO(await url2.read())
img2.seek(0)
buffer = await self.bot.loop.run_in_executor(None, self.do_merge, img1, img2)
file=discord.File(buffer, filename="merge.png")
e=discord.Embed(color=self.invis)
e.set_author(name="<NAME>", icon_url=m1.avatar_url)
e.set_image(url="attachment://merge.png")
await ctx.remove(file=file, embed=e)
@commands.command()
async def color(self, ctx, *, member: discord.Member = None):
'''Colors the avatar'''
if not member:
member = ctx.author
url = member.avatar_url_as(size=512, format="png")
async with ctx.typing():
img = BytesIO(await url.read())
img.seek(0)
buffer = await self.bot.loop.run_in_executor(None, self.do_quantize, img)
file=discord.File(buffer, filename="quantize.gif")
e=discord.Embed(color=self.invis)
e.set_author(name="Colored Avatar", icon_url=member.avatar_url)
e.set_image(url="attachment://quantize.gif")
await ctx.remove(file=file, embed=e)
@commands.command()
async def wash(self, ctx, *, member: discord.Member = None):
'''Wash the avatar'''
if not member:
member = ctx.author
url = member.avatar_url_as(size=512, format="png")
async with ctx.typing():
img = BytesIO(await url.read())
img.seek(0)
buffer = await self.bot.loop.run_in_executor(None, self.do_wash, img)
file=discord.File(buffer, filename="wash.gif")
e=discord.Embed(color=self.invis)
e.set_author(name="<NAME>", icon_url=member.avatar_url)
e.set_image(url="attachment://wash.gif")
await ctx.remove(file=file, embed=e)
@commands.command()
async def ascii(self, ctx, *, member: discord.Member = None):
'''Ascii the avatar'''
if not member:
member = ctx.author
url = member.avatar_url_as(size=512, format="png")
async with ctx.typing():
img = BytesIO(await url.read())
img.seek(0)
buffer = await self.bot.loop.run_in_executor(None, self.do_ascii, img)
file=discord.File(buffer, filename="ascii.png")
e=discord.Embed(color=self.invis)
e.set_author(name="Ascii Avatar", icon_url=member.avatar_url)
e.set_image(url="attachment://ascii.png")
await ctx.remove(file=file, embed=e)
@commands.command()
async def achievement(self, ctx, *, text: str):
'''Make a minecraft achievement'''
async with ctx.typing():
if len(text) > 20:
text = text[:17] + "..."
buffer = await self.bot.loop.run_in_executor(None, self.do_mc, text)
file=discord.File(buffer, filename="achievement.png")
e=discord.Embed(color=self.invis)
e.set_author(name="Achievement", icon_url=ctx.author.avatar_url)
e.set_image(url="attachment://achievement.png")
await ctx.remove(file=file, embed=e)
def setup(bot):
bot.add_cog(image(bot))
|
StarcoderdataPython
|
1888253
|
<reponame>nliolios24/textrank
#!/usr/bin/env python
"""
An example using Graph as a weighted network.
"""
__author__ = """<NAME> (<EMAIL>)"""
try:
import matplotlib.pyplot as plt
except:
raise
import networkx as nx
G=nx.Graph()
G.add_edge('a','b',weight=0.6)
G.add_edge('a','c',weight=0.2)
G.add_edge('c','d',weight=0.1)
G.add_edge('c','e',weight=0.7)
G.add_edge('c','f',weight=0.9)
G.add_edge('a','d',weight=0.3)
elarge=[(u,v) for (u,v,d) in G.edges(data=True) if d['weight'] >0.5]
esmall=[(u,v) for (u,v,d) in G.edges(data=True) if d['weight'] <=0.5]
pos=nx.spring_layout(G) # positions for all nodes
# nodes
nx.draw_networkx_nodes(G,pos,node_size=700)
# edges
nx.draw_networkx_edges(G,pos,edgelist=elarge,
width=6)
nx.draw_networkx_edges(G,pos,edgelist=esmall,
width=6,alpha=0.5,edge_color='b',style='dashed')
# labels
nx.draw_networkx_labels(G,pos,font_size=20,font_family='sans-serif')
plt.axis('off')
plt.savefig("weighted_graph.png") # save as png
plt.show() # display
|
StarcoderdataPython
|
4847411
|
<reponame>romchegue/Python<gh_stars>0
print('Ni' * 8)
|
StarcoderdataPython
|
9729297
|
def mul(number, *bys):
# TODO: block isn't used, debug this!
if type(number) == list and number[0] == "ARRAY":
block = number
result = number
for num in bys:
result *= num
return result
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.