id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
83249 | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Yahoo Non-Factoid Question Dataset"""
from __future__ import absolute_import, division, print_function
import json
import logging
import os
import datasets
_DESCRIPTION = """\
Yahoo Non-Factoid Question Dataset is derived from Yahoo's Webscope L6 collection using machine learning techiques such \
that the questions would contain non-factoid answers.The dataset contains 87,361 questions and their corresponding answers. \
Each question contains its best answer along with additional other answers submitted by users. \
Only the best answer was reviewed in determining the quality of the question-answer pair.
"""
_URL = "https://ciir.cs.umass.edu/downloads/nfL6/nfL6.json.gz"
class YahooAnswersQa(datasets.GeneratorBasedBuilder):
"""Yahoo Non-Factoid Question Dataset"""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [datasets.BuilderConfig(name="yahoo_answers_qa", version=datasets.Version("1.0.0"))]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"question": datasets.Value("string"),
"answer": datasets.Value("string"),
"nbestanswers": datasets.features.Sequence(datasets.Value("string")),
"main_category": datasets.Value("string"),
}
),
supervised_keys=None,
homepage="https://ciir.cs.umass.edu/downloads/nfL6/index.html",
)
def _split_generators(self, dl_manager):
downloaded_file = dl_manager.download_and_extract(_URL)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_file}),
]
def _generate_examples(self, filepath):
logging.info("⏳ Generating examples from = %s", filepath)
if os.path.isdir(filepath):
filepath = os.path.join(filepath, "nfL6.json")
with open(filepath, encoding="utf-8") as f:
data = json.load(f)
for example in data:
yield example["id"], example
| StarcoderdataPython |
1608755 | <gh_stars>0
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import os
import unittest
from paddlenlp.transformers import BigBirdTokenizer
from common_test import CpuCommonTest
import util
import unittest
class TestBigBirdTokenizer(CpuCommonTest):
def set_input(self):
self.max_seq_len = 40
self.max_pred_len = 3
def set_output(self):
self.expected_span_ids = np.array([
65, 1153, 36677, 3766, 2747, 427, 3830, 419, 530, 16474, 1677, 6464,
5441, 385, 7002, 363, 2099, 387, 5065, 441, 484, 2375, 3583, 5682,
16812, 474, 34179, 1266, 8951, 391, 34478, 871, 67, 67, 385, 29223,
2447, 388, 635, 66
])
self.expected_masked_lm_positions = np.array([2, 32, 33])
self.expected_masked_lm_ids = np.array([4558, 2757, 15415])
self.expected_masked_lm_weights = np.array([1., 1., 1.])
def set_text(self):
self.text = 'An extremely powerful film that certainly isnt '\
'appreciated enough Its impossible to describe the experience '\
'of watching it The recent UK television adaptation was shameful '\
'too ordinary and bland This original manages to imprint itself '\
'in your memory'
def setUp(self):
np.random.seed(102)
self.tokenizer = BigBirdTokenizer.from_pretrained(
'bigbird-base-uncased')
self.set_text()
self.set_input()
self.set_output()
def test_vocab_size(self):
self.check_output_equal(self.tokenizer.vocab_size, 50358)
def test_tokenize(self):
result = self.tokenizer.encode(self.text, self.max_seq_len,
self.max_pred_len)
span_ids, masked_lm_positions, masked_lm_ids, masked_lm_weights = result
self.check_output_equal(span_ids, self.expected_span_ids)
self.check_output_equal(masked_lm_positions,
self.expected_masked_lm_positions)
self.check_output_equal(masked_lm_ids, self.expected_masked_lm_ids)
self.check_output_equal(masked_lm_weights,
self.expected_masked_lm_weights)
class TestBigBirdTokenizerLongMaxPredLen(TestBigBirdTokenizer):
def set_input(self):
self.max_seq_len = 40
self.max_pred_len = 8
def set_output(self):
self.expected_span_ids = np.array([
65, 1153, 48226, 3766, 2747, 427, 67, 419, 530, 16474, 1677, 6464,
5441, 385, 7002, 363, 2099, 387, 5065, 441, 484, 67, 3583, 5682,
16812, 474, 34179, 1266, 8951, 391, 34478, 871, 34299, 67, 385,
29223, 2447, 67, 635, 66
])
self.expected_masked_lm_positions = np.array(
[2, 6, 21, 32, 33, 37, 0, 0])
self.expected_masked_lm_ids = np.array(
[4558, 3830, 2375, 2757, 15415, 388, 0, 0])
self.expected_masked_lm_weights = np.array(
[1., 1., 1., 1., 1., 1., 0., 0.])
class TestBigBirdTokenizerGetInputIdsValueError(TestBigBirdTokenizer):
def set_text(self):
self.text = dict()
@util.assert_raises(ValueError)
def test_tokenize(self):
super().test_tokenize()
class TestBigBirdTokenizerGetInputIdsTextInt(TestBigBirdTokenizer):
def set_text(self):
self.text = [
1153, 4558, 3766, 2747, 427, 3830, 419, 530, 16474, 1677, 6464,
5441, 385, 7002, 363, 2099, 387, 5065, 441, 484, 2375, 3583, 5682,
16812, 474, 34179, 1266, 8951, 391, 34478, 871, 2757, 15415, 385,
29223, 2447, 388, 635, 4189
]
class TestBigBirdTokenizerGetInputIdsTextStr(TestBigBirdTokenizer):
def set_text(self):
self.text = [
'▁An', '▁extremely', '▁powerful', '▁film', '▁that', '▁certainly',
'▁is', 'nt', '▁appreciated', '▁enough', '▁Its', '▁impossible',
'▁to', '▁describe', '▁the', '▁experience', '▁of', '▁watching',
'▁it', '▁The', '▁recent', '▁UK', '▁television', '▁adaptation',
'▁was', '▁shameful', '▁too', '▁ordinary', '▁and', '▁bland', '▁This',
'▁original', '▁manages', '▁to', '▁imprint', '▁itself', '▁in',
'▁your', '▁memory'
]
class TestBigBirdTokenizerUnusaulText(CpuCommonTest):
def setUp(self):
self.tokenizer = BigBirdTokenizer.from_pretrained(
'bigbird-base-uncased')
def test_empty_text(self):
ids = self.tokenizer('')
self.check_output_equal(ids == [], True)
def test_bytes(self):
byte_text = 'An extremely powerful film that certainly isnt '\
'appreciated enough Its impossible to describe the experience '\
'of watching it The recent UK television adaptation was shameful '\
'too ordinary and bland This original manages to imprint itself '\
'in your memory'.encode()
self.expected_tokens = [
'▁An', '▁extremely', '▁powerful', '▁film', '▁that', '▁certainly',
'▁is', 'nt', '▁appreciated', '▁enough', '▁Its', '▁impossible',
'▁to', '▁describe', '▁the', '▁experience', '▁of', '▁watching',
'▁it', '▁The', '▁recent', '▁UK', '▁television', '▁adaptation',
'▁was', '▁shameful', '▁too', '▁ordinary', '▁and', '▁bland', '▁This',
'▁original', '▁manages', '▁to', '▁imprint', '▁itself', '▁in',
'▁your', '▁memory'
]
tokens = self.tokenizer(byte_text)
self.check_output_equal(tokens, self.expected_tokens)
class TestBigBirdTokenizerNotExistFile(CpuCommonTest):
@util.assert_raises(ValueError)
def test_not_exist_file(self):
self.tokenizer = BigBirdTokenizer(sentencepiece_model_file='')
class TestBigBirdTokenizerUNK(CpuCommonTest):
def setUp(self):
self.tokenizer = BigBirdTokenizer.from_pretrained(
'bigbird-base-uncased')
def test_unk(self):
self.text = 'An extremely powerful film that certainly isnt '\
'appreciated enough Its impossible to describe the experience '\
'of watching it The recent UK television adaptation was shameful '\
'too ordinary and bland This original manages to imprint itself '\
'in your memory 中'
# Chinese words don't exist in the provided vocabs
tokens = self.tokenizer(self.text)
self.check_output_equal('<unk>' in tokens, True)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
94833 |
import os, sys, json
from collections import Counter
def load_data(path):
data = {}
for line in open(path, 'r'):
jobj = json.loads(line.strip())
sentid = jobj['sentid']
assert sentid not in data
data[sentid] = []
conversation = jobj['sent'].replace('<SEP>', '', 100).split()
for pa_structure in jobj['srl']:
pas = {'V': conversation[pa_structure['pred']]}
for k, v in pa_structure['args'].items():
st, ed = v
if ed == -1:
pas[k] = '我'
elif ed == -2:
pas[k] = '你'
else:
pas[k] = ' '.join(conversation[st:ed+1])
data[sentid].append(pas)
return data
def update_counts_intersect(v1, v2, is_token_level):
if v1 == '' or v2 == '':
return 0
if is_token_level:
v1 = Counter(v1.split())
v2 = Counter(v2.split())
res = 0
for k, cnt1 in v1.items():
if k in v2:
res += min(cnt1, v2[k])
return res
else:
return v1 == v2
def update_counts_denominator(conv, is_token_level):
counts = 0
for pas in conv:
for k, v in pas.items():
if k != 'V': # don't count "pred" for each PA structure
counts += len(v.split()) if is_token_level else 1
return counts
# is_sync: whether ref-file and prd-file have the same content. This is always Ture except when the prd-file is after rewriting.
def update_counts(ref_conv, prd_conv, counts, is_sync, is_token_level):
counts[1] += update_counts_denominator(ref_conv, is_token_level)
counts[2] += update_counts_denominator(prd_conv, is_token_level)
if is_sync:
for ref_pas, prd_pas in zip(ref_conv, prd_conv):
for k, v1 in ref_pas.items():
if k == 'V':
continue
v2 = prd_pas.get(k,'')
counts[0] += update_counts_intersect(v1, v2, is_token_level)
else:
for ref_pas in ref_conv:
for prd_pas in prd_conv:
if prd_pas['V'] == ref_pas['V']:
for k, v1 in ref_pas.items():
if k == 'V':
continue
v2 = prd_pas.get(k,'')
counts[0] += update_counts_intersect(v1, v2, is_token_level)
break
def calc_f1(ref, prd, is_sync=True, is_token_level=False):
"""
:param ref: a list of predicate argument structures
:param prd:
:return:
"""
counts = [0, 0, 0]
update_counts(ref, prd, counts, is_sync, is_token_level)
p = 0.0 if counts[2] == 0 else counts[0]/counts[2]
r = 0.0 if counts[1] == 0 else counts[0]/counts[1]
f = 0.0 if p == 0.0 or r == 0.0 else 2*p*r/(p+r)
return {'P':p, 'R':r, 'F':f}
if __name__ == "__main__":
ref = load_data("../data/dev.txt")
prd = load_data("../data/dev.txt")
is_sync = True
is_token_level = False
ref_list = []
prd_list = []
for key, ref_data in ref.items():
prd_data = prd.get(key, [])
ref_list.extend(ref_data)
prd_list.extend(prd_data)
print(calc_f1(ref_list, prd_list, is_sync, is_token_level))
| StarcoderdataPython |
3393110 | <filename>utils/neuron/models/losses/losses.py<gh_stars>10-100
import torch
import torch.nn as nn
import neuron.ops as ops
from neuron.config import registry
__all__ = ['BalancedBCELoss', 'FocalLoss', 'GHMC_Loss', 'OHEM_BCELoss',
'LabelSmoothLoss', 'SmoothL1Loss', 'IoULoss', 'GHMR_Loss',
'TripletLoss', 'CenterLoss']
@registry.register_module
class BalancedBCELoss(nn.Module):
def __init__(self, neg_weight=1.):
super(BalancedBCELoss, self).__init__()
self.neg_weight = neg_weight
def forward(self, input, target):
return ops.balanced_bce_loss(input, target, self.neg_weight)
@registry.register_module
class FocalLoss(nn.Module):
def __init__(self, alpha=0.25, gamma=2.):
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
def forward(self, input, target):
return ops.focal_loss(input, target, self.alpha, self.gamma)
@registry.register_module
class GHMC_Loss(nn.Module):
def __init__(self, bins=30, momentum=0.5):
super(GHMC_Loss, self).__init__()
self.bins = bins
self.momentum = momentum
def forward(self, input, target):
return ops.ghmc_loss(input, target, self.bins, self.momentum)
@registry.register_module
class OHEM_BCELoss(nn.Module):
def __init__(self, neg_ratio=3.):
super(OHEM_BCELoss, self).__init__()
self.neg_ratio = neg_ratio
def forward(self, input, target):
return ops.ohem_bce_loss(input, target, self.neg_ratio)
@registry.register_module
class LabelSmoothLoss(nn.Module):
def __init__(self, num_classes, eps=0.1, calc_metrics=False):
super(LabelSmoothLoss, self).__init__()
self.num_classes = num_classes
self.eps = eps
self.calc_metrics = calc_metrics
def forward(self, input, target):
loss = ops.label_smooth_loss(
input, target, self.num_classes, self.eps)
if self.calc_metrics and not self.training:
metrics = ops.topk_precision(input, target)
loss = {'loss': loss}
loss.update(metrics)
return loss
@registry.register_module
class SmoothL1Loss(nn.Module):
def __init__(self, beta=1. / 9):
super(SmoothL1Loss, self).__init__()
self.beta = beta
def forward(self, input, target):
return ops.smooth_l1_loss(input, target, self.beta)
@registry.register_module
class IoULoss(nn.Module):
def forward(self, input, target, weight=None):
return ops.iou_loss(input, target, weight)
@registry.register_module
class GHMR_Loss(nn.Module):
def __init__(self, mu=0.02, bins=10, momentum=0):
super(GHMR_Loss, self).__init__()
self.mu = mu
self.bins = bins
self.momentum = momentum
def forward(self, input, target):
return ops.ghmr_loss(input, target)
@registry.register_module
class TripletLoss(nn.Module):
def __init__(self, margin=None, normalize_feats=False, calc_metrics=False):
super(TripletLoss, self).__init__()
self.margin = margin
if margin is not None:
self.ranking_loss = nn.MarginRankingLoss(margin=margin)
else:
self.ranking_loss = nn.SoftMarginLoss()
self.normalize_feats = normalize_feats
self.calc_metrics = calc_metrics
def forward(self, input, target):
if self.normalize_feats:
input = self._normalize(input, dim=-1)
dist_mat = ops.euclidean(input, input, sqrt=True)
dist_ap, dist_an = self._ohem(dist_mat, target)
y = dist_an.new_ones(dist_an.size())
if self.margin is not None:
loss = self.ranking_loss(dist_an, dist_ap, y)
else:
loss = self.ranking_loss(dist_an - dist_ap, y)
if self.calc_metrics and not self.training:
metrics = ops.r1_map(dist_mat, target)
loss = {'loss': loss}
loss.update(metrics)
return loss
def _normalize(self, x, dim=-1):
norm = torch.norm(x, 2, dim=dim, keepdim=True)
x = x / (norm.expand_as(x) + 1e-12)
return x
def _ohem(self, dist_mat, target, return_indices=False):
n = dist_mat.size(0)
label_mat = target.expand(n, n)
pos_mask = label_mat.eq(label_mat.t())
neg_mask = label_mat.ne(label_mat.t())
dist_ap, indices_p = torch.max(
dist_mat[pos_mask].contiguous().view(n, -1),
dim=1, keepdim=True)
dist_an, indices_n = torch.min(
dist_mat[neg_mask].contiguous().view(n, -1),
dim=1, keepdim=True)
dist_ap = dist_ap.squeeze(1)
dist_an = dist_an.squeeze(1)
if return_indices:
indices = target.new_zeros(
target.size()).copy_(torch.arange(n).long())
indices = indices.unsqueeze(0).expand(n, n)
indices_p = torch.gather(
indices[pos_mask].contiguous().view(n, -1),
1, indices_p).squeeze(1)
indices_n = torch.gather(
indices[neg_mask].contiguous().view(n, -1),
1, indices_n).squeeze(1)
return dist_ap, dist_an, indices_p, indices_n
else:
return dist_ap, dist_an
@registry.register_module
class CenterLoss(nn.Module):
def __init__(self, num_classes, num_channels):
super(CenterLoss, self).__init__()
self.num_classes = num_classes
self.num_channels = num_channels
self.centers = nn.Parameter(
torch.randn(num_classes, num_channels))
def forward(self, input, target):
assert len(input) == len(target)
self.centers = self.centers.to(input.device)
dist_mat = ops.euclidean(input, self.centers, sqrt=False)
classes = torch.arange(self.num_classes).long()
classes = classes.to(input.device)
n = len(input)
target = target.unsqueeze(1).expand(n, self.num_classes)
mask = target.eq(classes.expand(n, self.num_classes))
dist_mat = dist_mat * mask.float()
loss = dist_mat.clamp_(min=1e-12, max=1e+12).sum() / n
return loss
| StarcoderdataPython |
1688194 | <gh_stars>0
from django.apps import AppConfig
class JoladnijoConfig(AppConfig):
name = 'joladnijo'
verbose_name = '<NAME>'
def ready(self):
import joladnijo.signals # noqa: F401
| StarcoderdataPython |
141365 | <gh_stars>1-10
#!/usr/bin/env python3
import netifaces as nf
import psutil as ps
import socket
import time
# https://github.com/sindresorhus/cli-spinners/blob/HEAD/spinners.json
# spin = ['|','/','-','\\','+']
# spin = ["◴","◷","◶","◵"]
# spin = ["◐","◓","◑","◒"]
spin = ["⠋","⠙","⠹","⠸","⠼","⠴","⠦","⠧","⠇","⠏"]
wrap = len(spin)
i = 0
try:
while True:
# get interfaces
ifs = nf.interfaces()
ap = False
if 'wlan1' in ifs:
addr = nf.ifaddresses('wlan0')[nf.AF_INET][0]['addr']
if addr == '10.10.10.1':
ap = True
addrs = []
for ip in ['en0', 'eth0', 'wlan0']:
if ip in ifs:
addr = nf.ifaddresses(ip)[nf.AF_INET][0]['addr']
addrs.append((ip, addr,))
print("{} AP[{}] {}".format(
socket.gethostname().split('.')[0],
'UP' if ap else 'DOWN',
spin[i%wrap]
))
i += 1
cpu = ps.cpu_percent()
mem = ps.virtual_memory().percent
print("CPU: {:3.0f}% Mem: {:3.0f}%".format(cpu,mem))
for ip, addr in addrs:
print("{}: {}".format(ip, addr))
time.sleep(1)
except KeyboardInterrupt:
print('bye ...')
| StarcoderdataPython |
3297967 | # The MIT License
# Copyright (c) 2021- Nordic Institute for Interoperability Solutions (NIIS)
# Copyright (c) 2017-2020 Estonian Information System Authority (RIA)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import re
import unittest
from unittest.mock import Mock, MagicMock
from opmon_anonymizer.anonymizer import AnonymizationJob
ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
class TestAnonymizationJob(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.maxDiff = None
def test_record_hiding_with_rules(self):
hiding_rules = [
[('feature1', re.compile('value.')), ('feature2', re.compile(r'value\d+'))],
]
records = [
{'feature1': 'value1', 'feature2': 'value2', 'feature3': 'value3'},
{'feature1': 'valueA', 'feature2': 'value51', 'feature3': 'value3'},
{'feature1': 'value1', 'feature2': 'value_two', 'feature3': 'value3'},
]
MockAnonymizationJob._should_be_hidden = AnonymizationJob._should_be_hidden
anonymization_job = MockAnonymizationJob()
anonymization_job._record_matches_conditions = AnonymizationJob._record_matches_conditions
anonymization_job._hiding_rules = hiding_rules
logger = MagicMock()
passed_records = []
for record in records:
if not anonymization_job._should_be_hidden(record, logger):
passed_records.append(record)
expected_passed_records = [records[2]]
self.assertCountEqual(expected_passed_records, passed_records)
def test_record_substitution(self):
substitution_rules = [
{
'conditions': [
('feature1', re.compile('value.')),
('feature2', re.compile(r'value\d+')),
],
'substitutes': [
{'feature': 'feature1', 'value': 'new_value1'},
{'feature': 'feature3', 'value': 'new_value3'},
],
}
]
records = [
{'feature1': 'value1', 'feature2': 'value2', 'feature3': 'old_value'},
{'feature1': 'valueB', 'feature2': 'value_two', 'feature3': 'old_value'},
]
expected_processed_records = [
{'feature1': 'new_value1', 'feature2': 'value2', 'feature3': 'new_value3'},
{'feature1': 'valueB', 'feature2': 'value_two', 'feature3': 'old_value'},
]
MockAnonymizationJob._substitute = AnonymizationJob._substitute
anonymization_job = MockAnonymizationJob()
anonymization_job._record_matches_conditions = AnonymizationJob._record_matches_conditions
anonymization_job._substitution_rules = substitution_rules
logger = MagicMock()
processed_records = [anonymization_job._substitute(record, logger) for record in records]
self.assertCountEqual(expected_processed_records, processed_records)
def test_dual_record_splitting(self):
field_translations = {
'client': {
'requestInTs': 'requestInTs',
'securityServerType': 'securityServerType',
},
'producer': {
'requestInTs': 'requestInTs',
'securityServerType': 'securityServerType',
},
'totalDuration': 'totalDuration',
'producerDurationProducerView': 'producerDurationProducerView'
}
field_value_masks = {'client': set(['producerDurationProducerView']), 'producer': set(['totalDuration'])}
dual_records = [
{
'client': {
'requestInTs': 'requestInTs_client_value',
'securityServerType': 'securityServerType_client_value'
},
'producer': {
'requestInTs': 'requestInTs_producer_value',
'securityServerType': 'securityServerType_producer_value'
},
'totalDuration': 'totalDuration_value'
}
]
expected_individual_records = [
{
'requestInTs': 'requestInTs_client_value',
'securityServerType': 'securityServerType_client_value',
'totalDuration': 'totalDuration_value',
'producerDurationProducerView': None # Masked for client
},
{
'requestInTs': 'requestInTs_producer_value',
'securityServerType': 'securityServerType_producer_value',
'totalDuration': None # Masked for producer
}
]
MockAnonymizationJob._get_agent_record = AnonymizationJob._get_agent_record
MockAnonymizationJob._get_records = AnonymizationJob._get_records
anonymization_job = MockAnonymizationJob()
anonymization_job._field_value_masks = field_value_masks
anonymization_job._field_translations = field_translations
logger = MagicMock()
individual_records = []
for dual_record in dual_records:
for individual_record in anonymization_job._get_records(dual_record, logger):
individual_records.append(individual_record)
self.assertCountEqual(expected_individual_records, individual_records)
class MockAnonymizationJob(object):
pass
| StarcoderdataPython |
1667108 | from .oauth2 import load | StarcoderdataPython |
1643731 | <filename>project_files/spiders/test/ThreadPoolTest.py
import threading
import time
from concurrent.futures import ThreadPoolExecutor
exitFlag = 0
class ThreadDemo(threading.Thread):
def __init__(self, thread_id, name, counter):
threading.Thread.__init__(self)
self.thread_id = thread_id
self.name = name
self.counter = counter
def run(self):
print("开始线程:" + self.name)
print_time(self.name, self.counter, 5)
print("退出线程:" + self.name)
def get_result(self):
return "thread id:%s, thread name:%s" % (self.thread_id, self.name)
def print_time(thread_name, delay, counter):
while counter:
if exitFlag:
thread_name.exit()
time.sleep(delay)
print("%s: %s" % (thread_name, time.ctime(time.time())))
counter -= 1
# 创建一个最大容纳数量为5的线程池
with ThreadPoolExecutor(max_workers=5) as t:
# 通过submit提交执行的函数到线程池中
task1 = t.submit(ThreadDemo, 1, "Thread-1", 1)
task2 = t.submit(ThreadDemo, 2, "Thread-2", 2)
task3 = t.submit(ThreadDemo, 3, "Thread-3", 3)
# 通过done来判断线程是否完成
print(f"task1 running: {task1.done()}")
print(f"task2 running: {task2.done()}")
print(f"task3 running: {task3.done()}")
time.sleep(2.5)
print(f"task1 done: {task1.done()}")
print(f"task2 done: {task2.done()}")
print(f"task3 done: {task3.done()}")
# 通过result来获取返回值
print(f"task1 result: {task1.result()}")
print(f"task2 result: {task2.result()}")
print(f"task3 result: {task3.result()}")
| StarcoderdataPython |
4810470 | #
# K2HDKC DBaaS based on Trove
#
# Copyright 2020 Yahoo Japan Corporation
#
# K2HDKC DBaaS is a Database as a Service compatible with Trove which
# is DBaaS for OpenStack.
# Using K2HR3 as backend and incorporating it into Trove to provide
# DBaaS functionality. K2HDKC, K2HR3, CHMPX and K2HASH are components
# provided as AntPickax.
#
# For the full copyright and license information, please view
# the license file that was distributed with this source code.
#
# AUTHOR: <NAME>
# CREATE: Mon Sep 14 2020
# REVISION:
#
#
# This product includes software developed at
# The Apache Software Foundation (http://www.apache.org/).
#
# Copyright 2014 Mirantis Inc.
# All Rights Reserved.
# Copyright 2015 Tesora Inc.
# All Rights Reserved.s
#
import re
from oslo_log import log as logging
from trove.common import utils
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.experimental.k2hdkc import service
from trove.guestagent.strategies.restore import base
LOG = logging.getLogger(__name__)
class K2hdkcArchive(base.RestoreRunner):
"""Implementation of restore for k2hdkc.
"""
__strategy_name__ = 'k2hdkcarchive'
def __init__(self, storage, **kwargs):
self._appstatus = service.K2hdkcAppStatus()
self._app = service.K2hdkcApp(self._appstatus)
"""
Get the filename from the swift url to restore.
"""
is_match = re.search(r'http:/(/(.*))/(.*)?\.gz\.enc$',
kwargs.get('location'))
if is_match is not None:
self._id = re.findall(r'http:/(/(.*))/(.*)?\.gz\.enc$',
kwargs.get('location'))[0][2]
else:
self._id = None
kwargs.update({'restore_location': self._app.k2hdkc_data_dir})
super().__init__(storage, **kwargs)
def pre_restore(self):
"""Prepare the data directory for restored files.
The directory itself is not included in the backup archive
(i.e. the archive is rooted inside the data directory).
This is to make sure we can always restore an old backup
even if the standard guest agent data directory changes.
"""
LOG.debug('Initializing a data directory.')
operating_system.create_directory(self.restore_location,
user=self._app.k2hdkc_owner,
group=self._app.k2hdkc_group,
force=True,
as_root=True)
def post_restore(self):
"""Updated ownership on the restored files.
"""
LOG.debug('Updating ownership of the restored files.')
# Owner of the files should be k2hdkc:k2hdkc.
operating_system.chown(self.restore_location,
'k2hdkc',
'k2hdkc',
recursive=True,
force=True,
as_root=True)
utils.execute('/usr/libexec/k2hdkc-snapshot', '--restore',
'%s' % self.restore_location, '%s' % self._id)
@property
def decrypt_cmd(self):
"""command to decrypt.
"""
# Adds openssl options to avoid warings.
if self.is_encrypted:
return (
'openssl enc -d -aes-256-cbc -pbkdf2 -iter 100000 -salt -pass pass:%s | '
% self.decrypt_key)
return ()
@property
def base_restore_cmd(self):
"""Command to extract a backup archive into a given location.
Attempt to preserve access modifiers on the archived files.
"""
return 'sudo tar -xpPf - -C "%(restore_location)s"'
#
# Local variables:
# tab-width: 4
# c-basic-offset: 4
# End:
# vim600: expandtab sw=4 ts=4 fdm=marker
# vim<600: expandtab sw=4 ts=4
#
| StarcoderdataPython |
1679707 | import unittest
from unittest.mock import Mock
from PySide.QtGui import QApplication
from libtuto.size import Size
from libtuto.tutorial import Tutorial
from plustutocenter.definitions.mvp import Controller
from plustutocenter.qt.view_qt import ViewQt
from plustutocenter.qt.widgets.main_window import MainWindow
class ViewQtTests(unittest.TestCase):
TUTORIAL = Tutorial("", "St", "St", "", [], Size(800, 600))
TUTORIALS = {("someapp.desktop", "version"): [TUTORIAL]}
DONE_TUTORIALS = {"someapp.desktop": ["Some tutorial"]}
QT_APP_BCK = ViewQt.QT_APPLICATION
def setUp(self):
self.controller_mock = Mock(spec=Controller)
self.controller_mock.getDoneTutorials.return_value = self.DONE_TUTORIALS
self.controller_mock.getTutorials.return_value = self.TUTORIALS
self.view_qt = ViewQt(self.controller_mock)
def tearDown(self):
ViewQt.QT_APPLICATION.reset_mock()
@classmethod
def setUpClass(cls):
ViewQt.QT_APPLICATION = Mock(spec=QApplication)
@classmethod
def tearDownClass(cls):
ViewQt.QT_APPLICATION = cls.QT_APP_BCK
def test_init(self):
self.assertIs(self.controller_mock, self.view_qt._controller)
self.assertIsNotNone(self.view_qt._main_window)
def test_update_tutorials(self):
self.view_qt._main_window = Mock(spec=MainWindow)
self.view_qt._updateTutorials(self.TUTORIALS)
self.view_qt._main_window.setTutorials.assert_called_with(
self.TUTORIALS)
def test_update_done_tutorials(self):
self.view_qt._main_window = Mock(spec=MainWindow)
self.view_qt.updateDoneTutorials()
self.view_qt._main_window.updateDoneTutorials.assert_called_with(
self.DONE_TUTORIALS
)
def test_launch(self):
self.view_qt._main_window = Mock(spec=MainWindow)
self.view_qt.launch()
self.assertTrue(self.view_qt._main_window.show.called)
self.assertTrue(ViewQt.QT_APPLICATION.exec_.called)
| StarcoderdataPython |
63696 | <filename>pyowapi/tests/test_api.py<gh_stars>1-10
from unittest import TestCase
import pyowapi
class TestAPI(TestCase):
def test_single_player(self):
player = pyowapi.get_player("Jayne#1447")
self.assertTrue(player.success)
def test_single_player_playstation(self):
player = pyowapi.get_player("R3flexTube", platform="psn")
self.assertTrue(player.success)
def test_single_player_xbox(self):
player = pyowapi.get_player("VeX I Ninja", platform="xbl")
self.assertTrue(player.success)
def test_multiple_players(self):
players = pyowapi.get_player(["Jayne#1447", "Zusor#2553"])
for player in players:
self.assertTrue(player.success)
def test_multiple_players_playstation(self):
players = pyowapi.get_player(["R3flexTube", "Savage_DadD"], platform="psn")
for player in players:
self.assertTrue(player.success)
def test_multiple_players_xbox(self):
players = pyowapi.get_player(["VeX I Ninja", "MunchingCarrot"], platform="xbl")
for player in players:
self.assertTrue(player.success)
def test_incorrect_player(self):
player = pyowapi.get_player("jayne#1447")
self.assertFalse(player.success)
player2 = pyowapi.get_player("r3flextube", platform="psn")
self.assertFalse(player2.success)
def test_correcting_player(self):
player = pyowapi.get_player("jayne#1447", correct_player=True)
self.assertTrue(player.success)
self.assertTrue(player.player_name == "Jayne#1447")
player2 = pyowapi.get_player("zusor#1234", correct_player=True)
self.assertTrue(player2.success)
self.assertFalse(player2.player_name == player2.original_player_name)
self.assertTrue(player2.player_name == "Zusor#2553")
| StarcoderdataPython |
160380 | import torch
from torch import nn
from torch.nn import functional as F
from torch import optim
from torch.autograd import Variable
import numpy as np
class ConcreteDropout(nn.Module):
def __init__(self, weight_regularizer=1e-7,
dropout_regularizer=1e-6, init_min=0.1, init_max=0.1):
super(ConcreteDropout, self).__init__()
self.weight_regularizer = weight_regularizer
self.dropout_regularizer = dropout_regularizer
init_min = np.log(init_min) - np.log(1. - init_min)
init_max = np.log(init_max) - np.log(1. - init_max)
self.p_logit = nn.Parameter(torch.empty(1).uniform_(init_min, init_max))
def forward(self, x, layer):
p = torch.sigmoid(self.p_logit)
out = layer(self._concrete_dropout(x, p))
sum_of_square = 0
for param in layer.parameters():
sum_of_square += torch.sum(torch.pow(param, 2))
weights_regularizer = self.weight_regularizer * sum_of_square / (1 - p)
dropout_regularizer = p * torch.log(p)
dropout_regularizer += (1. - p) * torch.log(1. - p)
input_dimensionality = x[0].numel() # Number of elements of first item in batch
dropout_regularizer *= self.dropout_regularizer * input_dimensionality
#regularization = weights_regularizer + dropout_regularizer
regularization = dropout_regularizer
return out, regularization
def _concrete_dropout(self, x, p):
eps = 1e-7
temp = 0.1
unif_noise = torch.rand_like(x)
drop_prob = (torch.log(p + eps)
- torch.log(1 - p + eps)
+ torch.log(unif_noise + eps)
- torch.log(1 - unif_noise + eps))
drop_prob = torch.sigmoid(drop_prob / temp)
random_tensor = 1 - drop_prob
retain_prob = 1 - p
x = torch.mul(x, random_tensor)
x /= retain_prob
return x
| StarcoderdataPython |
3222652 | <filename>code/sheet_cleaner/sheet_processor.py
import logging
import os
from datetime import datetime
from typing import List
import configparser
import pandas as pd
from geocoding import csv_geocoder
from spreadsheet import GoogleSheet
from functions import (duplicate_rows_per_column, fix_na, fix_sex,
generate_error_tables, trim_df, values2dataframe)
class SheetProcessor:
def __init__(self, sheets: List[GoogleSheet], geocoder: csv_geocoder.CSVGeocoder, config: configparser.ConfigParser):
self.for_github = []
self.sheets = sheets
self.geocoder = geocoder
self.config = config
def process(self):
"""Does all the heavy handling of spreadsheets, writing output to CSV files."""
for s in self.sheets:
logging.info("Processing sheet %s", s.name)
### Clean Private Sheet Entries. ###
# note : private sheet gets updated on the fly and redownloaded to ensure continuity between fixes (granted its slower).
range_ = f'{s.name}!A:AG'
data = values2dataframe(s.read_values(range_))
# Expand aggregated cases into one row each.
logging.info("Rows before expansion: %d", len(data))
if len(data) > 150000:
logging.warning("Sheet %s has more than 150K rows, it should be split soon", s.name)
data.aggregated_num_cases = pd.to_numeric(data.aggregated_num_cases, errors='coerce')
data = duplicate_rows_per_column(data, "aggregated_num_cases")
logging.info("Rows after expansion: %d", len(data))
# Generate IDs for each row sequentially following the sheet_id-inc_int pattern.
data['ID'] = s.base_id + "-" + pd.Series(range(1, len(data)+1)).astype(str)
# Remove whitespace.
data = trim_df(data)
# Fix columns that can be fixed easily.
data.sex = fix_sex(data.sex)
# fix N/A => NA
for col in data.select_dtypes("string"):
data[col] = fix_na(data[col])
# Regex fixes
fixable, non_fixable = generate_error_tables(data)
if len(fixable) > 0:
logging.info('fixing %d regexps', len(fixable))
s.fix_cells(fixable)
data = values2dataframe(s.read_values(range_))
# ~ negates, here clean = data with IDs not in non_fixable IDs.
clean = data[~data.ID.isin(non_fixable.ID)]
clean = clean.drop('row', axis=1)
clean.sort_values(by='ID')
s.data = clean
non_fixable = non_fixable.sort_values(by='ID')
# Save error_reports
# These are separated by Sheet.
logging.info('Saving error reports')
directory = self.config['FILES']['ERRORS']
file_name = f'{s.name}.error-report.csv'
error_file = os.path.join(directory, file_name)
non_fixable.to_csv(error_file, index=False, header=True, encoding="utf-8")
self.for_github.append(error_file)
# Combine data from all sheets into a single datafile
all_data = []
for s in self.sheets:
logging.info("sheet %s had %d rows", s.name, len(s.data))
all_data.append(s.data)
all_data = pd.concat(all_data, ignore_index=True)
all_data = all_data.sort_values(by='ID')
logging.info("all_data has %d rows", len(all_data))
# Fill geo columns.
geocode_matched = 0
for i, row in all_data.iterrows():
geocode = self.geocoder.geocode(row.city, row.province, row.country)
if not geocode:
continue
geocode_matched += 1
all_data.at[i, 'latitude'] = geocode.lat
all_data.at[i, 'longitude'] = geocode.lng
all_data.at[i, 'geo_resolution'] = geocode.geo_resolution
all_data.at[i, 'location'] = geocode.location
all_data.at[i, 'admin3'] = geocode.admin3
all_data.at[i, 'admin2'] = geocode.admin2
all_data.at[i, 'admin1'] = geocode.admin1
all_data.at[i, 'admin_id'] = geocode.admin_id
all_data.at[i, 'country_new'] = geocode.country_new
logging.info("Geocode matched %d/%d", geocode_matched, len(all_data))
logging.info("Top 10 geocode misses: %s", self.geocoder.misses.most_common(10))
with open("geocode_misses.csv", "w") as f:
self.geocoder.write_misses_to_csv(f)
logging.info("Wrote all geocode misses to geocode_misses.csv")
# Reorganize csv columns so that they are in the same order as when we
# used to have those geolocation within the spreadsheet.
# This is to avoid breaking latestdata.csv consumers.
all_data = all_data[["ID","age","sex","city","province","country","latitude","longitude","geo_resolution","date_onset_symptoms","date_admission_hospital","date_confirmation","symptoms","lives_in_Wuhan","travel_history_dates","travel_history_location","reported_market_exposure","additional_information","chronic_disease_binary","chronic_disease","source","sequence_available","outcome","date_death_or_discharge","notes_for_discussion","location","admin3","admin2","admin1","country_new","admin_id","data_moderator_initials","travel_history_binary"]]
# save
logging.info("Saving files to disk")
dt = datetime.now().strftime('%Y-%m-%dT%H%M%S')
file_name = self.config['FILES']['DATA'].replace('TIMESTAMP', dt)
latest_name = os.path.join(self.config['FILES']['LATEST'], 'latestdata.csv')
all_data.to_csv(file_name, index=False, encoding="utf-8")
all_data.to_csv(latest_name, index=False, encoding="utf-8")
logging.info("Wrote %s, %s", file_name, latest_name)
self.for_github.extend([file_name, latest_name])
def push_to_github(self):
"""Pushes csv files created by Process to Github."""
logging.info("Pushing to github")
# Create script for uploading to github
script = 'set -e\n'
script += 'cd {}\n'.format(self.config['GIT']['REPO'])
script += 'git pull origin master\n'
for g in self.for_github:
script += f'git add {g}\n'
script += 'git commit -m "data update"\n'
script += 'git push origin master\n'
script += f'cd {os.getcwd()}\n'
print(script)
os.system(script)
| StarcoderdataPython |
1685358 | from django.db import models, migrations
import core.models
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20150126_1611'),
]
operations = [
migrations.AlterField(
model_name='person',
name='birth_date',
field=models.DateField(blank=True, help_text='Syntym\xe4aika muodossa 24.2.1994', null=True, verbose_name='Syntym\xe4aika', validators=[core.models.birth_date_validator]),
preserve_default=True,
),
]
| StarcoderdataPython |
39848 | {
"targets": [
{
"target_name": "gpio",
"sources": ["gpio.cc", "tizen-gpio.cc"]
}
]
} | StarcoderdataPython |
3269806 | import pytest
from django.urls import reverse
@pytest.mark.django_db
def test_plan_unit_detail(
django_db_setup, admin_client, plan_unit_factory, lease_test_data
):
# Add plan unit for lease area
plan_unit_factory(
identifier="PU1",
area=1000,
lease_area=lease_test_data["lease_area"],
is_master=True,
)
url = reverse("planunitlistwithidentifiers-list")
response = admin_client.get(url, content_type="application/json")
plan_unit_id = response.data["results"][0]["id"]
url = reverse("planunit-detail", kwargs={"pk": plan_unit_id})
response = admin_client.get(url, content_type="application/json")
assert response.status_code == 200, "%s %s" % (response.status_code, response.data)
| StarcoderdataPython |
3397921 | <reponame>yushroom/FishEngine_-Experiment<filename>GenProperty.py<gh_stars>1-10
def GenCPPProperty(type, name):
assert(name.startswith('m_'))
# if name.startswith('m_'):
# name = name[2:]
pretty_name = name[2:]
# print '+++++++++++++'
# print('')
if type in ('int', 'float', 'bool', 'uint32_t', 'bool') or type.endswith('*'):
# print '==== v1'
print('{0} Get{1}() const {{ return m_{1}; }}'.format(type, pretty_name))
print('void Set{1}({0} value) {{ m_{1} = value; }}'.format(type, pretty_name))
else:
# print '=== v2'
print('const {0}& Get{1}() const {{ return m_{1}; }}'.format(type, pretty_name))
print('void Set{1}(const {0}& value) {{ m_{1} = value; }}'.format(type, pretty_name))
print('')
def GenPythonDef(klass, name):
pretty_name = name[2:]
# print('.def("Get{1}", &{0}::Get{1})'.format(klass, pretty_name))
# print('.def("Set{1}", &{0}::Set{1})'.format(klass, pretty_name))
name2 = pretty_name[0].lower() + pretty_name[1:]
print('.def_property("{mass}", &{Rigidbody}::Get{Mass}, &{Rigidbody}::Set{Mass})'.format(mass=name2, Mass=pretty_name, Rigidbody=klass))
template3 = '''
@property
def {center}(self)->{Vector3}:
return self.cpp.Get{Center}()
@{center}.setter
def {center}(self, value:{Vector3}):
self.cpp.Set{Center}(value)'''
def GenPythonProperty(type, name):
pretty_name = name[2:]
name2 = pretty_name[0].lower() + pretty_name[1:]
print(template3.format(center=name2, Vector3=type, Center=pretty_name))
klass = 'Prefab'
s = '''
PrefabModification m_Modification;
Prefab* m_ParentPrefab = nullptr;
GameObject* m_RootGameObject = nullptr;
bool m_IsPrefabParent = true;
'''
s = s.strip().split('\n')
s = [x.strip() for x in s]
pairs = []
for line in s:
line = line.strip()
if line.endswith(';'):
line = line[:-1]
if line.startswith('//'):
continue
t = line.split()
if len(t) >= 2:
pairs.append((t[0], t[1]))
print(pairs)
for type, name in pairs:
GenCPPProperty(type, name)
for _, name in pairs:
GenPythonDef(klass, name)
# for type, name in pairs:
# GenPythonProperty(type, name) | StarcoderdataPython |
4821735 | <gh_stars>1-10
from __future__ import absolute_import, print_function, division
import numpy as np
from xmeos.models import core
import pytest
import matplotlib.pyplot as plt
import matplotlib as mpl
from abc import ABCMeta, abstractmethod
import copy
#====================================================================
# Define "slow" tests
# - indicated by @slow decorator
# - slow tests are run only if using --runslow cmd line arg
#====================================================================
slow = pytest.mark.skipif(
not pytest.config.getoption("--runslow"),
reason="need --runslow option to run"
)
#====================================================================
# SEC:3 Test Admin Funcs
#====================================================================
def test_shift_poly():
TOL = 1e-6
V0 = 0.408031
coefs = np.array([127.116,-3503.98,20724.4,-60212.,86060.5,-48520.4])
shift_coefs = core.shift_poly(coefs, xscale=V0)
undo_coefs = core.unshift_poly(shift_coefs, xscale=V0)
assert np.all(np.abs(coefs-undo_coefs) < TOL), \
'Shifted and unshifted polynomial coefs must match originals within TOL everywhere.'
# shift_coefs = np.array([-105.88087217, -1.97201769, 4.4888164, -36.1310988 ,
# -358.36482008, -548.76975936])
V = V0*np.linspace(0.6,1.2,101)
dev = np.polyval(shift_coefs[::-1], V/V0-1) - np.polyval(coefs[::-1], V)
assert np.all(np.abs(dev) < TOL), \
'Shifted polynomial curve must match original to within TOL everywhere.'
#====================================================================
| StarcoderdataPython |
3295643 | degree = int(input())
time_of_the_day = input()
outfit = ""
shoes = ""
if time_of_the_day == "Morning":
if 10 <= degree <= 18:
outfit = "Sweatshirt"
shoes = "Sneakers"
elif 18 < degree <= 24:
outfit = "Shirt"
shoes = "Moccasins"
else:
outfit = "T-Shirt"
shoes = "Sandals"
if time_of_the_day == "Afternoon":
if 10 <= degree <= 18:
outfit = "Shirt"
shoes = "Moccasins"
elif 18 < degree <= 24:
outfit = "T-Shirt"
shoes = "Sandals"
else:
outfit = "Swim Suit"
shoes = "Barefoot"
if time_of_the_day == "Evening":
if 10 <= degree <= 18:
outfit = "Shirt"
shoes = "Moccasins"
elif 18 < degree <= 24:
outfit = "Shirt"
shoes = "Moccasins"
else:
outfit = "Shirt"
shoes = "Moccasins"
print(f"It's {degree} degrees, get your {outfit} and {shoes}.")
| StarcoderdataPython |
3245724 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 30 23:26:47 2020
@author: Js0805
"""
import pandas as pd
import matplotlib.pyplot as plt
from pandas.plotting import autocorrelation_plot
from statsmodels.tsa.arima_model import ARIMA
from sklearn.metrics import mean_squared_error
dataset_1= pd.read_excel('dataset.xlsx',sheet_name='Sheet1')
cols =['AT','WS','RH','BP','SR']
dataset_1.drop(cols, axis=1, inplace=True)
dataset_1 = dataset_1.sort_values('Date')
dataset_1.isnull().sum()
dataset_1 = dataset_1.groupby('Date')['Energy (Meters)'].sum().reset_index()
dataset_1 = dataset_1.set_index('Date')
dataset_1.index
print(dataset_1.head())
dataset_1.plot()
plt.show()
autocorrelation_plot(dataset_1)
model = ARIMA(dataset_1, order=(10,1,0))
model_fit = model.fit(disp=0)
print(model_fit.summary())
# plot residual errors
from pandas import DataFrame
residuals = DataFrame(model_fit.resid)
residuals.plot()
plt.show()
residuals.plot(kind='kde')
plt.show()
print(residuals.describe())
X = dataset_1.values
size = int(len(X) * 0.66)
train, test = X[0:size], X[size:len(X)]
history = [x for x in train]
predictions = list()
for t in range(len(test)):
model = ARIMA(history, order=(5,1,0))
model_fit = model.fit(disp=0)
output = model_fit.forecast()
yhat = output[0]
predictions.append(yhat)
obs = test[t]
history.append(obs)
print('predicted=%f, expected=%f' % (yhat, obs))
error = mean_squared_error(test, predictions)
print('Test MSE: %.3f' % error)
# plot
plt.plot(test)
plt.plot(predictions, color='red')
plt.show() | StarcoderdataPython |
4835675 | """
from dataclasses import dataclass
@dataclass
class HsmScript:
name: str
default_params: dict
use_large_stack: bool = True
"""
class HsmScript(object):
def __init__(self, name: str, default_params: dict, use_large_stack: bool = True):
self.name = name
self.default_params = default_params
self.use_large_stack = use_large_stack
| StarcoderdataPython |
1724622 | #!/usr/bin/env python
"""
--------------------------------------------------------------------------------
Created: <NAME> 9/24/14
This script reads in an per base bedtools gff, the source gff file, and then
calculates the length and average coverage depth of each feature. It also
concats start stop positions for creating unique names
bedtools coverage output: Output (tab delimited) after each base of each feature
in B:
1) depth
2) # bases at depth
Input per base gff (coverage output)
contig-100000014 FIG CDS 15388 17094 . + 1 ID=fig|6666666.84680.peg.14;Name=Tungsten-containing aldehyde:ferredoxin oxidoreductase (EC 1.2.7.5);Ontology_term=KEGG_ENZYME:1.2.7.5 1 60
contig-100000014 FIG CDS 15388 17094 . + 1 ID=fig|6666666.84680.peg.14;Name=Tungsten-containing aldehyde:ferredoxin oxidoreductase (EC 1.2.7.5);Ontology_term=KEGG_ENZYME:1.2.7.5 2 60
contig-100000014 FIG CDS 15388 17094 . + 1 ID=fig|6666666.84680.peg.14;Name=Tungsten-containing aldehyde:ferredoxin oxidoreductase (EC 1.2.7.5);Ontology_term=KEGG_ENZYME:1.2.7.5 3 60
contig-100000014 FIG CDS 15388 17094 . + 1 ID=fig|6666666.84680.peg.14;Name=Tungsten-containing aldehyde:ferredoxin oxidoreductase (EC 1.2.7.5);Ontology_term=KEGG_ENZYME:1.2.7.5 4 64
Input per base txt (genomcov output)
contig-100000014 1 60
contig-100000014 2 60
contig-100000014 3 60
Output coverage file format:
contig-01\tlength\taverage coverage
etc...
--------------------------------------------------------------------------------
usage: contig_coverage_from_perbase_gff.py -i perbase.gff -t -o outfile.file
"""
#-------------------------------------------------------------------------------
#Header - Linkers, Libs, Constants
from string import strip
from numpy import mean
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from itertools import islice
import csv
#-------------------------------------------------------------------------------
#function declarations
#-------------------------------------------------------------------------------
#Body
print "Running..."
if __name__ == '__main__':
parser = ArgumentParser(usage = "contig_coverage_from_perbase_gff.py -i \
perbase.gff -t -o outfile.file",
description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-i", "--perbase_gff", action="store",
dest="inputfilename",
help="perbase (bedtools coverage -d) gff file")
parser.add_argument("-t", "--perbase_txt", action="store_true",
dest="tabflag",
help="set True to use perbase genome (bedtools genomecov \
-d) txt file")
parser.add_argument("-o", "--output_file", action="store",
dest="outputfilename",
help="output coverage file name")
options = parser.parse_args()
mandatories = ["inputfilename", "outputfilename"]
for m in mandatories:
if not options.__dict__[m]:
print "\nError: Missing Arguments\n"
parser.print_help()
exit(-1)
inputfilename = options.inputfilename
outputfilename = options.outputfilename
print "Calculating per base coverages..."
if options.tabflag:
with open(inputfilename, 'U') as inputfile:
parse_dict = {}
orderlist = []
for line in inputfile:
parsedline = line.strip().split('\t')
featurename = parsedline[0]
cov = parsedline[-1]
if featurename in parse_dict:
parse_dict[featurename].append(cov)
else:
parse_dict[featurename] = [cov]
orderlist.append(featurename)
with open(outputfilename, 'w') as outputfile:
writer = csv.writer(outputfile, dialect='excel-tab')
for entry in orderlist:
featurename = entry
seqlen = len(parse_dict[entry])
avgcov = mean(map(int,parse_dict[entry]))
writer.writerow([featurename, seqlen, avgcov])
else:
with open(outputfilename, 'w') as outputfile:
writer = csv.writer(outputfile, dialect='excel-tab')
with open(inputfilename, 'U') as inputfile:
for line in inputfile:
parsedline = line.strip().split('\t')
featurename = parsedline[0] + '_' + parsedline[3] + '_' + parsedline[4]
seqlen = int(parsedline[4])-int(parsedline[3])
countlist = [parsedline[-1]]
for countline in islice(inputfile,seqlen):
countlist.append(countline.strip().split('\t')[-1])
avgcov = mean(map(int,countlist))
writer.writerow([featurename, seqlen, avgcov])
print "Done!"
| StarcoderdataPython |
159027 | <reponame>likein12/comprog-cffi-pypy-set<gh_stars>0
coset_init = lib.coset_init_ll
insert = lib.cs_insert_ll
remove = lib.cs_remove_ll
get_s = lib.cs_get_size_ll
clear = lib.cs_clear_ll
get_min = lib.cs_min_ll
get_max = lib.cs_min_ll
upper_bound = lib.cs_upper_bound_ll
rupper_bound = lib.cs_rupper_bound_ll
get_k = lib.cs_get_k_ll | StarcoderdataPython |
1695172 | <gh_stars>0
import setuptools
import pubsub_zmq
def get_long_desc():
with open("README.rst", "r") as fh:
return fh.read()
setuptools.setup(
name="pubsub-zmq",
version=pubsub_zmq.__version__,
author="<NAME>",
author_email="<EMAIL>",
description="A tiny library that implements the Asynchronous Pub-Sub communication pattern using "
"ZeroMQ PUB and SUB sockets",
long_description=get_long_desc(),
long_description_content_type="text/x-rst",
url="https://github.com/d2gex/pubsub-zmq",
packages=['pubsub_zmq'],
python_requires='>=3.6',
install_requires=['pyzmq>=18.1.0'],
tests_require=['pytest>=5.0.1', 'pymulproc>=0.1.1'],
platforms='any',
zip_safe=True,
classifiers=[
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| StarcoderdataPython |
3368965 | # Generated by Django 2.1.7 on 2019-05-21 13:27
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='ERROR', max_length=255)),
],
),
migrations.CreateModel(
name='SkillCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='ERROR', max_length=100)),
],
),
]
| StarcoderdataPython |
97644 | <gh_stars>0
label_name = []
with open("label_name.txt",encoding='utf-8') as file:
for line in file.readlines():
line = line.strip()
name = (line.split('-')[-1])
if name.count('|') > 0:
name = name.split('|')[-1]
print(name)
label_name.append((name))
for item in label_name:
with open('label_name_1.txt','a') as file:
file.write(item+'\n') | StarcoderdataPython |
194036 | #!/usr/bin/python
import glob
import logging
import os.path
import sys
import configure
from cs.CsHelper import mkdir
from cs.CsPasswordService import CsPasswordServiceVMConfig
from databag.merge import QueueFile
OCCURRENCES = 1
LOG_DIR="/var/log/cosmic/router"
if not os.path.isdir(LOG_DIR):
mkdir(LOG_DIR, 0o755, False)
logging.basicConfig(filename="/var/log/cosmic/router/router.log", level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(filename)s %(funcName)s:%(lineno)d %(message)s')
# first commandline argument should be the file to process
if len(sys.argv) != 2:
logging.error("Invalid usage")
sys.exit(1)
# FIXME we should get this location from a configuration class
jsonPath = "/var/cache/cloud/%s"
jsonCmdConfigPath = jsonPath % sys.argv[1]
def finish_config():
# Converge
returncode = configure.main(sys.argv)
sys.exit(returncode)
def process(do_merge=True):
logging.info("Processing JSON file %s" % sys.argv[1])
qf = QueueFile()
qf.setFile(sys.argv[1])
qf.do_merge = do_merge
qf.load(None)
return qf
def process_file():
logging.info("process_file")
process()
# Converge
finish_config()
def process_vmpasswd():
logging.info("process_vmpassword")
qf = process(False)
logging.info("Sending password to password server")
returncode = CsPasswordServiceVMConfig(qf.getData()).process()
# TODO: use the returncode as exit code, but for now we just log the exit code
logging.info("The vmpassword processing ended with exit code %d" % returncode)
#sys.exit(returncode)
filename = min(glob.iglob(jsonCmdConfigPath + '*'), key=os.path.getctime)
if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
logging.error("You are telling me to process %s, but i can't access it" % jsonCmdConfigPath)
sys.exit(1)
if sys.argv[1].startswith("vm_password.json"):
logging.info("Processing incoming vm_passwd file => %s" % sys.argv[1])
process_vmpasswd()
else:
logging.info("Processing incoming file => %s" % sys.argv[1])
process_file()
| StarcoderdataPython |
3213010 | <reponame>saewashi/R1-peer-review-blcmill<filename>funone.py
#!/usr/bin/env python
'''
For this exercise, draw a circle wherever the user clicks the mouse
'''
import sys, pygame
import random
from datetime import datetime# #Was
assert sys.version_info >= (3,4), 'This script requires at least Python 3.4'
screen_size = (800,600)
FPS = 60
black = (0,0,0)
white = (255,255,255)
def main():
pygame.init()
screen = pygame.display.set_mode(screen_size)
font = pygame.font.SysFont("arial",64) # not sure why a font is here. Maybe explain why there's a font?
clock = pygame.time.Clock()
(x,y,radius) = (100,100,20)
pos = (100, 100) #you have to initialize pos here before setting it
screen.fill(black)
colorCycle = 1
radius = 20
while True:
clock.tick(FPS)
pos = (random.randrange(800), random.randrange(600)) #random position
if (colorCycle % 5 == 0): # depending on mouse click, execute one of these
#COLORS ARE DETERMINED BY POSITION # Maybe there is a way you can slow down circles?
color = (255, 255 * pos[0]/800, 255 * pos[1]/600) #Colorscheme 1
elif (colorCycle % 5 == 1):
color = (255 * pos[0]/800 , 255, 255 * pos[1]/600) #Colorscheme 2
elif (colorCycle % 5 == 2):
color = (255 * pos[0]/800 , 255 * pos[1]/600, 255) #Colorscheme 3
elif (colorCycle % 5 == 3):
color = (0, 0, 0) #Colorscheme 4 (black)
elif (colorCycle % 5 == 4):
color = (255 * pos[0]/800, 255 * pos[0]/800, 255 * pos[0]/800) #Colorscheme 5 (B&W Gradient)
# This is a great step because I like how you incporprate gradient colors!
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit(0)
if event.type == pygame.MOUSEBUTTONUP:
colorCycle += 1 #Change color
pygame.draw.circle(screen, color, pos, radius) #It also changes color!
pygame.display.flip()
if __name__ == '__main__':
main()
| StarcoderdataPython |
3318422 | <reponame>pcen/pulumi
# coding=utf-8
# *** WARNING: this file was generated by test. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
import pulumi_aws
import pulumi_kubernetes
__all__ = ['ComponentArgs', 'Component']
@pulumi.input_type
class ComponentArgs:
def __init__(__self__, *,
required_metadata: pulumi.Input['pulumi_kubernetes.meta.v1.ObjectMetaArgs'],
required_metadata_array: pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.meta.v1.ObjectMetaArgs']]],
required_metadata_map: pulumi.Input[Mapping[str, pulumi.Input['pulumi_kubernetes.meta.v1.ObjectMetaArgs']]],
metadata: Optional[pulumi.Input['pulumi_kubernetes.meta.v1.ObjectMetaArgs']] = None,
metadata_array: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.meta.v1.ObjectMetaArgs']]]] = None,
metadata_map: Optional[pulumi.Input[Mapping[str, pulumi.Input['pulumi_kubernetes.meta.v1.ObjectMetaArgs']]]] = None):
"""
The set of arguments for constructing a Component resource.
"""
pulumi.set(__self__, "required_metadata", required_metadata)
pulumi.set(__self__, "required_metadata_array", required_metadata_array)
pulumi.set(__self__, "required_metadata_map", required_metadata_map)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if metadata_array is not None:
pulumi.set(__self__, "metadata_array", metadata_array)
if metadata_map is not None:
pulumi.set(__self__, "metadata_map", metadata_map)
@property
@pulumi.getter(name="requiredMetadata")
def required_metadata(self) -> pulumi.Input['pulumi_kubernetes.meta.v1.ObjectMetaArgs']:
return pulumi.get(self, "required_metadata")
@required_metadata.setter
def required_metadata(self, value: pulumi.Input['pulumi_kubernetes.meta.v1.ObjectMetaArgs']):
pulumi.set(self, "required_metadata", value)
@property
@pulumi.getter(name="requiredMetadataArray")
def required_metadata_array(self) -> pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.meta.v1.ObjectMetaArgs']]]:
return pulumi.get(self, "required_metadata_array")
@required_metadata_array.setter
def required_metadata_array(self, value: pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.meta.v1.ObjectMetaArgs']]]):
pulumi.set(self, "required_metadata_array", value)
@property
@pulumi.getter(name="requiredMetadataMap")
def required_metadata_map(self) -> pulumi.Input[Mapping[str, pulumi.Input['pulumi_kubernetes.meta.v1.ObjectMetaArgs']]]:
return pulumi.get(self, "required_metadata_map")
@required_metadata_map.setter
def required_metadata_map(self, value: pulumi.Input[Mapping[str, pulumi.Input['pulumi_kubernetes.meta.v1.ObjectMetaArgs']]]):
pulumi.set(self, "required_metadata_map", value)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input['pulumi_kubernetes.meta.v1.ObjectMetaArgs']]:
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input['pulumi_kubernetes.meta.v1.ObjectMetaArgs']]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter(name="metadataArray")
def metadata_array(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.meta.v1.ObjectMetaArgs']]]]:
return pulumi.get(self, "metadata_array")
@metadata_array.setter
def metadata_array(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.meta.v1.ObjectMetaArgs']]]]):
pulumi.set(self, "metadata_array", value)
@property
@pulumi.getter(name="metadataMap")
def metadata_map(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['pulumi_kubernetes.meta.v1.ObjectMetaArgs']]]]:
return pulumi.get(self, "metadata_map")
@metadata_map.setter
def metadata_map(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['pulumi_kubernetes.meta.v1.ObjectMetaArgs']]]]):
pulumi.set(self, "metadata_map", value)
class Component(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
metadata: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.meta.v1.ObjectMetaArgs']]] = None,
metadata_array: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_kubernetes.meta.v1.ObjectMetaArgs']]]]] = None,
metadata_map: Optional[pulumi.Input[Mapping[str, pulumi.Input[pulumi.InputType['pulumi_kubernetes.meta.v1.ObjectMetaArgs']]]]] = None,
required_metadata: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.meta.v1.ObjectMetaArgs']]] = None,
required_metadata_array: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_kubernetes.meta.v1.ObjectMetaArgs']]]]] = None,
required_metadata_map: Optional[pulumi.Input[Mapping[str, pulumi.Input[pulumi.InputType['pulumi_kubernetes.meta.v1.ObjectMetaArgs']]]]] = None,
__props__=None):
"""
Create a Component resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ComponentArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a Component resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param ComponentArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ComponentArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
metadata: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.meta.v1.ObjectMetaArgs']]] = None,
metadata_array: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_kubernetes.meta.v1.ObjectMetaArgs']]]]] = None,
metadata_map: Optional[pulumi.Input[Mapping[str, pulumi.Input[pulumi.InputType['pulumi_kubernetes.meta.v1.ObjectMetaArgs']]]]] = None,
required_metadata: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.meta.v1.ObjectMetaArgs']]] = None,
required_metadata_array: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_kubernetes.meta.v1.ObjectMetaArgs']]]]] = None,
required_metadata_map: Optional[pulumi.Input[Mapping[str, pulumi.Input[pulumi.InputType['pulumi_kubernetes.meta.v1.ObjectMetaArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ComponentArgs.__new__(ComponentArgs)
__props__.__dict__["metadata"] = metadata
__props__.__dict__["metadata_array"] = metadata_array
__props__.__dict__["metadata_map"] = metadata_map
if required_metadata is None and not opts.urn:
raise TypeError("Missing required property 'required_metadata'")
__props__.__dict__["required_metadata"] = required_metadata
if required_metadata_array is None and not opts.urn:
raise TypeError("Missing required property 'required_metadata_array'")
__props__.__dict__["required_metadata_array"] = required_metadata_array
if required_metadata_map is None and not opts.urn:
raise TypeError("Missing required property 'required_metadata_map'")
__props__.__dict__["required_metadata_map"] = required_metadata_map
__props__.__dict__["provider"] = None
__props__.__dict__["security_group"] = None
__props__.__dict__["storage_classes"] = None
super(Component, __self__).__init__(
'example::Component',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Component':
"""
Get an existing Component resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ComponentArgs.__new__(ComponentArgs)
__props__.__dict__["provider"] = None
__props__.__dict__["security_group"] = None
__props__.__dict__["storage_classes"] = None
return Component(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def provider(self) -> pulumi.Output[Optional['pulumi_kubernetes.Provider']]:
return pulumi.get(self, "provider")
@property
@pulumi.getter(name="securityGroup")
def security_group(self) -> pulumi.Output['pulumi_aws.ec2.SecurityGroup']:
return pulumi.get(self, "security_group")
@property
@pulumi.getter(name="storageClasses")
def storage_classes(self) -> pulumi.Output[Optional[Mapping[str, 'pulumi_kubernetes.storage.v1.StorageClass']]]:
return pulumi.get(self, "storage_classes")
| StarcoderdataPython |
3307300 | <gh_stars>1000+
"""Exception classes used by Pexpect"""
import traceback
import sys
class ExceptionPexpect(Exception):
'''Base class for all exceptions raised by this module.
'''
def __init__(self, value):
super(ExceptionPexpect, self).__init__(value)
self.value = value
def __str__(self):
return str(self.value)
def get_trace(self):
'''This returns an abbreviated stack trace with lines that only concern
the caller. In other words, the stack trace inside the Pexpect module
is not included. '''
tblist = traceback.extract_tb(sys.exc_info()[2])
tblist = [item for item in tblist if ('pexpect/__init__' not in item[0])
and ('pexpect/expect' not in item[0])]
tblist = traceback.format_list(tblist)
return ''.join(tblist)
class EOF(ExceptionPexpect):
'''Raised when EOF is read from a child.
This usually means the child has exited.'''
class TIMEOUT(ExceptionPexpect):
'''Raised when a read time exceeds the timeout. '''
| StarcoderdataPython |
32679 | # encoding: utf-8
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: <NAME> (<EMAIL>)
#
from __future__ import absolute_import, division, unicode_literals
from mo_future import is_text, is_binary
import gzip
from io import BytesIO
import struct
from tempfile import TemporaryFile
import time
import zipfile
import zlib
from mo_future import PY3, long, text
from mo_logs import Log
from mo_logs.exceptions import suppress_exception
import mo_math
# LIBRARY TO DEAL WITH BIG DATA ARRAYS AS ITERATORS OVER (IR)REGULAR SIZED
# BLOCKS, OR AS ITERATORS OVER LINES
DEBUG = False
MIN_READ_SIZE = 8 * 1024
MAX_STRING_SIZE = 1 * 1024 * 1024
class FileString(text):
"""
ACTS LIKE A STRING, BUT IS A FILE
"""
def __init__(self, file):
self.file = file
def decode(self, encoding):
if encoding != "utf8":
Log.error("can not handle {{encoding}}", encoding= encoding)
self.encoding = encoding
return self
def split(self, sep):
if sep != "\n":
Log.error("Can only split by lines")
self.file.seek(0)
return LazyLines(self.file)
def __len__(self):
temp = self.file.tell()
self.file.seek(0, 2)
file_length = self.file.tell()
self.file.seek(temp)
return file_length
def __getslice__(self, i, j):
j = mo_math.min(j, len(self))
if j - 1 > 2 ** 28:
Log.error("Slice of {{num}} bytes is too big", num=j - i)
try:
self.file.seek(i)
output = self.file.read(j - i).decode(self.encoding)
return output
except Exception as e:
Log.error(
"Can not read file slice at {{index}}, with encoding {{encoding}}",
index=i,
encoding=self.encoding,
cause=e
)
def __add__(self, other):
self.file.seek(0, 2)
self.file.write(other)
def __radd__(self, other):
new_file = TemporaryFile()
new_file.write(other)
self.file.seek(0)
for l in self.file:
new_file.write(l)
new_file.seek(0)
return FileString(new_file)
def __getattr__(self, attr):
return getattr(self.file, attr)
def __del__(self):
self.file, temp = None, self.file
if temp:
temp.close()
def __iter__(self):
self.file.seek(0)
return self.file
if PY3:
def __str__(self):
if self.encoding == "utf8":
temp = self.file.tell()
self.file.seek(0, 2)
file_length = self.file.tell()
self.file.seek(0)
output = self.file.read(file_length).decode(self.encoding)
self.file.seek(temp)
return output
else:
def __unicode__(self):
if self.encoding == "utf8":
temp = self.file.tell()
self.file.seek(0, 2)
file_length = self.file.tell()
self.file.seek(0)
output = self.file.read(file_length).decode(self.encoding)
self.file.seek(temp)
return output
def safe_size(source):
"""
READ THE source UP TO SOME LIMIT, THEN COPY TO A FILE IF TOO BIG
RETURN A str() OR A FileString()
"""
if source is None:
return None
total_bytes = 0
bytes = []
b = source.read(MIN_READ_SIZE)
while b:
total_bytes += len(b)
bytes.append(b)
if total_bytes > MAX_STRING_SIZE:
try:
data = FileString(TemporaryFile())
for bb in bytes:
data.write(bb)
del bytes
del bb
b = source.read(MIN_READ_SIZE)
while b:
total_bytes += len(b)
data.write(b)
b = source.read(MIN_READ_SIZE)
data.seek(0)
Log.note("Using file of size {{length}} instead of str()", length= total_bytes)
return data
except Exception as e:
Log.error("Could not write file > {{num}} bytes", num= total_bytes, cause=e)
b = source.read(MIN_READ_SIZE)
data = b"".join(bytes)
del bytes
return data
class LazyLines(object):
"""
SIMPLE LINE ITERATOR, BUT WITH A BIT OF CACHING TO LOOK LIKE AN ARRAY
"""
def __init__(self, source, encoding="utf8"):
"""
ASSUME source IS A LINE ITERATOR OVER utf8 ENCODED BYTE STREAM
"""
self.source = source
self.encoding = encoding
self._iter = self.__iter__()
self._last = None
self._next = 0
def __getslice__(self, i, j):
if i == self._next - 1:
def output():
yield self._last
for v in self._iter:
self._next += 1
yield v
return output()
if i == self._next:
return self._iter
Log.error("Do not know how to slice this generator")
def __iter__(self):
def output():
for v in self.source:
self._last = v
yield self._last
return output()
def __getitem__(self, item):
try:
if item == self._next:
self._next += 1
return self._iter.next()
elif item == self._next - 1:
return self._last
else:
Log.error("can not index out-of-order too much")
except Exception as e:
Log.error("Problem indexing", e)
class CompressedLines(LazyLines):
"""
KEEP COMPRESSED HTTP (content-type: gzip) IN BYTES ARRAY
WHILE PULLING OUT ONE LINE AT A TIME FOR PROCESSING
"""
def __init__(self, compressed, encoding="utf8"):
"""
USED compressed BYTES TO DELIVER LINES OF TEXT
LIKE LazyLines, BUT HAS POTENTIAL TO seek()
"""
self.compressed = compressed
LazyLines.__init__(self, None, encoding=encoding)
self._iter = self.__iter__()
def __iter__(self):
return LazyLines(ibytes2ilines(compressed_bytes2ibytes(self.compressed, MIN_READ_SIZE), encoding=self.encoding)).__iter__()
def __getslice__(self, i, j):
if i == self._next:
return self._iter
if i == 0:
return self.__iter__()
if i == self._next - 1:
def output():
yield self._last
for v in self._iter:
yield v
return output()
Log.error("Do not know how to slice this generator")
def __getitem__(self, item):
try:
if item == self._next:
self._last = self._iter.next()
self._next += 1
return self._last
elif item == self._next - 1:
return self._last
else:
Log.error("can not index out-of-order too much")
except Exception as e:
Log.error("Problem indexing", e)
def __radd__(self, other):
new_file = TemporaryFile()
new_file.write(other)
self.file.seek(0)
for l in self.file:
new_file.write(l)
new_file.seek(0)
return FileString(new_file)
def compressed_bytes2ibytes(compressed, size):
"""
CONVERT AN ARRAY OF BYTES TO A BYTE-BLOCK GENERATOR
USEFUL IN THE CASE WHEN WE WANT TO LIMIT HOW MUCH WE FEED ANOTHER
GENERATOR (LIKE A DECOMPRESSOR)
"""
decompressor = zlib.decompressobj(16 + zlib.MAX_WBITS)
for i in range(0, mo_math.ceiling(len(compressed), size), size):
try:
block = compressed[i: i + size]
yield decompressor.decompress(block)
except Exception as e:
Log.error("Not expected", e)
def ibytes2ilines(generator, encoding="utf8", flexible=False, closer=None):
"""
CONVERT A GENERATOR OF (ARBITRARY-SIZED) byte BLOCKS
TO A LINE (CR-DELIMITED) GENERATOR
:param generator:
:param encoding: None TO DO NO DECODING
:param closer: OPTIONAL FUNCTION TO RUN WHEN DONE ITERATING
:return:
"""
decode = get_decoder(encoding=encoding, flexible=flexible)
_buffer = generator.next()
s = 0
e = _buffer.find(b"\n")
while True:
while e == -1:
try:
next_block = generator.next()
_buffer = _buffer[s:] + next_block
s = 0
e = _buffer.find(b"\n")
except StopIteration:
_buffer = _buffer[s:]
del generator
if closer:
closer()
if _buffer:
yield decode(_buffer)
return
yield decode(_buffer[s:e])
s = e + 1
e = _buffer.find(b"\n", s)
def ibytes2icompressed(source):
yield (
b'\037\213\010\000' + # Gzip file, deflate, no filename
struct.pack('<L', long(time.time())) + # compression start time
b'\002\377' # maximum compression, no OS specified
)
crc = zlib.crc32(b"")
length = 0
compressor = zlib.compressobj(9, zlib.DEFLATED, -zlib.MAX_WBITS, zlib.DEF_MEM_LEVEL, 0)
for d in source:
crc = zlib.crc32(d, crc) & 0xffffffff
length += len(d)
chunk = compressor.compress(d)
if chunk:
yield chunk
yield compressor.flush()
yield struct.pack("<2L", crc, length & 0xffffffff)
class GzipLines(CompressedLines):
"""
SAME AS CompressedLines, BUT USING THE GzipFile FORMAT FOR COMPRESSED BYTES
"""
def __init__(self, compressed, encoding="utf8"):
CompressedLines.__init__(self, compressed, encoding=encoding)
def __iter__(self):
buff = BytesIO(self.compressed)
return LazyLines(gzip.GzipFile(fileobj=buff, mode='r'), encoding=self.encoding).__iter__()
class ZipfileLines(CompressedLines):
"""
SAME AS CompressedLines, BUT USING THE ZipFile FORMAT FOR COMPRESSED BYTES
"""
def __init__(self, compressed, encoding="utf8"):
CompressedLines.__init__(self, compressed, encoding=encoding)
def __iter__(self):
buff = BytesIO(self.compressed)
archive = zipfile.ZipFile(buff, mode='r')
names = archive.namelist()
if len(names) != 1:
Log.error("*.zip file has {{num}} files, expecting only one.", num= len(names))
stream = archive.open(names[0], "r")
return LazyLines(sbytes2ilines(stream), encoding=self.encoding).__iter__()
def icompressed2ibytes(source):
"""
:param source: GENERATOR OF COMPRESSED BYTES
:return: GENERATOR OF BYTES
"""
decompressor = zlib.decompressobj(16 + zlib.MAX_WBITS)
last_bytes_count = 0 # Track the last byte count, so we do not show too many debug lines
bytes_count = 0
for bytes_ in source:
try:
data = decompressor.decompress(bytes_)
except Exception as e:
Log.error("problem", cause=e)
bytes_count += len(data)
if mo_math.floor(last_bytes_count, 1000000) != mo_math.floor(bytes_count, 1000000):
last_bytes_count = bytes_count
DEBUG and Log.note("bytes={{bytes}}", bytes=bytes_count)
yield data
def scompressed2ibytes(stream):
"""
:param stream: SOMETHING WITH read() METHOD TO GET MORE BYTES
:return: GENERATOR OF UNCOMPRESSED BYTES
"""
def more():
try:
while True:
bytes_ = stream.read(4096)
if not bytes_:
return
yield bytes_
except Exception as e:
Log.error("Problem iterating through stream", cause=e)
finally:
with suppress_exception:
stream.close()
return icompressed2ibytes(more())
def sbytes2ilines(stream, encoding="utf8", closer=None):
"""
CONVERT A STREAM (with read() method) OF (ARBITRARY-SIZED) byte BLOCKS
TO A LINE (CR-DELIMITED) GENERATOR
"""
def read():
try:
while True:
bytes_ = stream.read(4096)
if not bytes_:
return
yield bytes_
except Exception as e:
Log.error("Problem iterating through stream", cause=e)
finally:
try:
stream.close()
except Exception:
pass
if closer:
try:
closer()
except Exception:
pass
return ibytes2ilines(read(), encoding=encoding)
def get_decoder(encoding, flexible=False):
"""
RETURN FUNCTION TO PERFORM DECODE
:param encoding: STRING OF THE ENCODING
:param flexible: True IF YOU WISH TO TRY OUR BEST, AND KEEP GOING
:return: FUNCTION
"""
if encoding == None:
def no_decode(v):
return v
return no_decode
elif flexible:
def do_decode1(v):
return v.decode(encoding, 'ignore')
return do_decode1
else:
def do_decode2(v):
return v.decode(encoding)
return do_decode2
| StarcoderdataPython |
78892 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
r"""
A example workflow for task dependent.
This example will create two workflows named `task_dependent` and `task_dependent_external`.
`task_dependent` is true workflow define and run task dependent, while `task_dependent_external`
define outside workflow and task from dependent.
After this script submit, we would get workflow as below:
task_dependent_external:
task_1
task_2
task_3
task_dependent:
task_dependent(this task dependent on task_dependent_external.task_1 and task_dependent_external.task_2).
"""
from pydolphinscheduler.constants import ProcessDefinitionDefault
from pydolphinscheduler.core.process_definition import ProcessDefinition
from pydolphinscheduler.tasks.dependent import And, Dependent, DependentItem, Or
from pydolphinscheduler.tasks.shell import Shell
with ProcessDefinition(
name="task_dependent_external",
tenant="tenant_exists",
) as pd:
task_1 = Shell(name="task_1", command="echo task 1")
task_2 = Shell(name="task_2", command="echo task 2")
task_3 = Shell(name="task_3", command="echo task 3")
pd.submit()
with ProcessDefinition(
name="task_dependent_example",
tenant="tenant_exists",
) as pd:
task = Dependent(
name="task_dependent",
dependence=And(
Or(
DependentItem(
project_name=ProcessDefinitionDefault.PROJECT,
process_definition_name="task_dependent_external",
dependent_task_name="task_1",
),
DependentItem(
project_name=ProcessDefinitionDefault.PROJECT,
process_definition_name="task_dependent_external",
dependent_task_name="task_2",
),
)
),
)
pd.submit()
| StarcoderdataPython |
1719571 | '''
Created on 21 déc. 2020
@author: robert
'''
import math
from trajectory.Environment.Constants import Feet2Meter
from trajectory.Environment.Earth import EarthRadiusMeters
from trajectory.Guidance.GeographicalPointFile import GeographicalPoint
class RunWay(GeographicalPoint):
'''
The Charles De Gaulle airport has 2 configurations, depending on the wind directions.
However, in both configurations Eastward and Westward of Charles de Gaulle:
- The Run-ways 08R/26L and 09L/27R (far from the terminal) are mainly used for landings.
- The Run-ways 08L/26R and 09R/27L (near the terminal) are mainly used for take-offs.
Id, ICAO,Number, Length Meters, Length Feet, Orientation Degrees
The run-way true heading is defined as the angle
1) expressed in degrees
2) counted from the geographic NORTH,
3) clock-wise
4) with the run-way end point as the summit of the angle
Lat-long are the position of the end of the runway
1) end - if takeoff runway - is the location the aircraft starts its ground run
2) end - if landing runway - is the location where after the touch down and deceleration, the ac reaches the taxi speed
'''
className = ''
airportICAOcode = ''
Name = ''
LengthFeet = 0.0
TrueHeadingDegrees = 0.0
TakeOffLanding = ''
def __init__(self,
Name,
airportICAOcode,
LengthFeet,
TrueHeadingDegrees,
LatitudeDegrees,
LongitudeDegrees):
self.className = self.__class__.__name__
assert not(Name is None) and isinstance(Name, (str))
assert not(airportICAOcode is None) and isinstance(airportICAOcode, (str))
assert not (LengthFeet is None) and isinstance(LengthFeet, float) and (LengthFeet>0.0)
assert not (TrueHeadingDegrees is None) and isinstance(TrueHeadingDegrees, float)
assert (-360.0 <= TrueHeadingDegrees) and (TrueHeadingDegrees <= 360.0)
assert not (LatitudeDegrees is None) and (isinstance(LatitudeDegrees, float))
assert (-90.0 <= LatitudeDegrees) and (LatitudeDegrees <= 90.0)
assert not (LongitudeDegrees is None) and (isinstance(LongitudeDegrees, float))
assert (-180.0 <= LongitudeDegrees) and (LongitudeDegrees <= 180.0)
GeographicalPoint.__init__(self, LatitudeDegrees=LatitudeDegrees, LongitudeDegrees=LongitudeDegrees, AltitudeMeanSeaLevelMeters=EarthRadiusMeters)
self.airportICAOcode = airportICAOcode
self.Name = Name
self.LengthFeet = LengthFeet
self.TrueHeadingDegrees = TrueHeadingDegrees
def getName(self):
return self.Name
def getAirportICAOcode(self):
return self.airportICAOcode
def getLengthMeters(self):
return self.LengthFeet * Feet2Meter
def getTrueHeadingDegrees(self):
return self.TrueHeadingDegrees
def getLatitudeDegrees(self):
return self.LatitudeDegrees
def getLongitudeDegrees(self):
return self.LongitudeDegrees
def __str__(self):
strRunWay = self.className
strRunWay += ': runway= ' + self.Name
strRunWay += ' airport ICAO code= ' + self.airportICAOcode
strRunWay += ' length= {0:.2f} feet'.format(self.LengthFeet)
strRunWay += ' true heading= {0:.2f} degrees'.format(self.TrueHeadingDegrees)
strRunWay += ' latitude= {0:.2f} degrees'.format(self.LatitudeDegrees)
strRunWay += ' longitude= {0:.2f} degrees'.format(self.LongitudeDegrees)
return strRunWay
def getEndOfRunWay(self):
latitudeDegrees , longitudeDegrees = self.getGeoPointAtDistanceHeading(self.getLengthMeters(), self.getTrueHeadingDegrees())
return GeographicalPoint(latitudeDegrees , longitudeDegrees, EarthRadiusMeters)
def computeShortestDistanceToRunway(self, geographicalPoint):
''' https://en.wikipedia.org/wiki/Distance_from_a_point_to_a_line '''
assert not(geographicalPoint is None) and isinstance(geographicalPoint, (GeographicalPoint))
#x0, y0, z0 = geographicalPoint.convert2Cartesian()
#x0 , y0 = geographicalPoint.projectionMillerCylindrical()
x0 = geographicalPoint.new_x_coord()
y0 = geographicalPoint.new_y_coord()
''' these are cartesian coordinates from runway starting point '''
#x1, y1, z1 = self.convert2Cartesian()
#x1, y1 = self.projectionMillerCylindrical()
x1 = self.new_x_coord()
y1 = self.new_y_coord()
endOfRunway = self.getEndOfRunWay()
#x2, y2, z2 = endOfRunway.convert2Cartesian()
#x2 , y2 = endOfRunway.projectionMillerCylindrical()
x2 = endOfRunway.new_x_coord()
y2 = endOfRunway.new_y_coord()
''' Assuming that all three points are in the same Z plane '''
''' The numerator is twice the area of the triangle with its vertices at the three points, (x0, y0), P1 and P2 '''
numerator = math.fabs ( ( (x2 - x1) * (y1 - y0)) - ( (x1 - x0) * (y2 - y1) ) )
''' The denominator of this expression is the distance between P1 and P2 '''
denominator = math.sqrt( (x2 - x1)*(x2 - x1) + (y2 - y1)*(y2 - y1) )
return numerator / denominator
| StarcoderdataPython |
1793351 | <gh_stars>0
# Typed namedtuple
from typing import NamedTuple
class Employee(NamedTuple):
"""Represents an employee."""
name: str
id: int = 3
employee = Employee('Patrick', 2)
print(employee)
print(employee.__annotations__)
print(employee.__doc__)
# Another way to represent typed named tuples
Police = NamedTuple('Police', [('unit', str), ('id', dict)])
police_man = Police('Emping', 24)
print(police_man)
print(police_man.__annotations__)
print(police_man.__doc__) | StarcoderdataPython |
48490 | <reponame>JTechnologies/daze-tool
#!/usr/bin/env python
import sys
import os
def split(delimiters, string, maxsplit=0):
import re
regexPattern = '|'.join(map(re.escape, delimiters))
return re.split(regexPattern, string, maxsplit)
def toHtml(input, outputPart="full"):
head=input.split("$content")[0]
body=input.split("$content")[1]
head=split(["+!","+%","##"], head.strip("\n").replace("+%","+%~mv").replace("+!", "+!~v").replace("##","##~c"))
for i in range(len(head)):
head[i]=head[i].strip("\n")
head.pop(0)
variables={}
metaVariables={}
for i in range(len(head)):
if head[i][0]=="~" and head[i][1]=="v":
variables[head[i].split("=")[0][2::]]=head[i].split("=")[1]
elif head[i][0]=="~" and head[i][1]=="m" and head[i][2]=="v":
metaVariables[head[i].split("=")[0][3::]]=head[i].split("=")[1]
elif head[i][0]=="~" and head[i][1]=="c":
print(f"Comment at line {i+2}: '{head[i][2::]}'")
else:
print(f"!! Daze Syntax Error: Invalid Character in $variables at line {i+2}")
metahtml=""
for i in list(variables):
contents=variables[i]
for j in variables.keys():
if variables[j][0]=="'"or variables[j][0]=='"':
contents=contents.replace(f"!{j} ",variables[j][1:-1])
else:
contents=contents.replace(f"!{j} ",variables[j])
variables[i]=contents
for i in list(metaVariables):
contents=metaVariables[i]
for j in metaVariables.keys():
if metaVariables[j][0]=="'"or metaVariables[j][0]=='"':
contents=contents.replace(f"%{j} ",metaVariables[j][1:-1])
else:
contents=contents.replace(f"%{j} ",metaVariables[j])
metaVariables[i]=contents
for i in list(metaVariables):
contents=metaVariables[i]
for j in variables.keys():
if variables[j][0]=="'"or variables[j][0]=='"':
contents=contents.replace(f"!{j} ",variables[j][1:-1])
else:
contents=contents.replace(f"!{j} ",variables[j])
metaVariables[i]=contents
for i in range(len(metaVariables)):
if list(metaVariables)[i]=="title":
metahtml=metahtml+f'<title>{metaVariables[list(metaVariables)[i]][1:-1]}</title>\n'
else:
metahtml=metahtml+f'<meta name="{list(metaVariables)[i][1:-1]}" content="{metaVariables[i][1:-1]}">\n'
body=body.strip("\n").split("(")
body.pop(0)
bodyhtml=""
for i in range(len(body)):
part=body[i].split(")")
element=part[0].strip(" ").split(": ")
contents=element[1]
if contents[0]=='"' and contents[-1]=='"' or contents[0]=="'" and contents[-1]=="'":
contents=contents[1:-1]
for i in variables.keys():
if variables[i][0]=="'"or variables[i][0]=='"':
contents=contents.replace(f"!{i} ",variables[i][1:-1])
else:
contents=contents.replace(f"!{i} ",variables[i])
attributes_unprocessed=part[1].strip("\n").split("+")
attributes={}
for i in range(len(attributes_unprocessed)):
attributes_unprocessed[i]=attributes_unprocessed[i].strip("\n")
attributes_unprocessed.pop(0)
for i in range(len(attributes_unprocessed)):
attributes[attributes_unprocessed[i].split("=")[0]]=attributes_unprocessed[i].split("=")[1]
strAttributes=""
for i in attributes.keys():
strAttributes=strAttributes+f' {i}="{attributes[i][1:-1]}"'
if element[0]=="img":
bodyhtml=bodyhtml+f'<img src="{contents}" {strAttributes}>\n'
elif element[0]=="linkScript":
bodyhtml=bodyhtml+f'<script src="{contents}" {strAttributes}></script>\n'
elif element[0]=="linkStyle":
bodyhtml=bodyhtml+f'<link rel="stylesheet" href="{contents}" {strAttributes}>\n'
elif element[0]=="link":
bodyhtml=bodyhtml+f'<link href="{contents}" {strAttributes}>\n'
elif element[0]=="script":
bodyhtml=bodyhtml+f'<script {strAttributes}>\n{contents}\n</script>\n'
elif element[0]=="style":
bodyhtml=bodyhtml+f'<style {strAttributes}>\n{contents}\n</style>\n'
else:
bodyhtml=bodyhtml+f'<{element[0]}{strAttributes}>{contents}</{element[0]}>\n'
if outputPart=="full":
return(f"""<!DOCTYPE html>
<html>
<!-- Site compiled from Daze -->
<head>
{metahtml}
</head>
<body>
{bodyhtml}
</body>
</html>""")
elif outputPart=="head":
return(f"""<!-- Part compiled from Daze -->
{metahtml}
<!-- End of Part -->
""")
elif outputPart=="body":
return(f"""<body>
<!-- Part compiled from Daze -->
{bodyhtml}
<!-- End of Part -->
""")
if len(sys.argv)>=2:
if sys.argv[1]=="compile":
if len(sys.argv)==4:
inFile = sys.argv[2]
outFile = sys.argv[3]
with open(inFile,'r') as i:
lines = i.read()
with open(outFile,'w') as o:
o.write(toHtml(lines))
elif len(sys.argv)>4:
print("daze: Too many arguments")
elif len(sys.argv)==3:
print("daze: No output file specified")
elif len(sys.argv)==2:
print("daze: No input file specified")
else:
print("daze: Internal Error 0x01")
elif sys.argv[1]=="help":
print("""
Daze: A declarative programing language
compile: Compiles the input file into an html file (daze compile <input> <output>)
help: Shows this help message
""")
else:
print(f"daze: invalid command {sys.argv[1]}. Try 'daze help' to see available commands.")
else:
print("daze: no command. Try 'daze help' to see available commands.")
| StarcoderdataPython |
107022 | <reponame>wy1157497582/arcpy
# -*- coding:utf-8-*-
import arcpy
import time
# import datetime
try:
cursor = arcpy.da.InsertCursor(r'E:\苍穹软件\20171030_房屋\xy.shp', "SHAPE@")
for x in range(0, 25):
cursor.insertRow([x])
del cursor
except arcpy.ExecuteError:
print arcpy.GetMessages()
| StarcoderdataPython |
4823977 | <reponame>ssbgp/data-tools
from processing.data_loader import DataLoader
from processing.data_processor import DataProcessor
from processing.errors import ProcessingError
from processing.file_container import FileContainer
from processing.file_selector import FileSelector
from tools.utils import print_error
class Application:
def __init__(self, container: FileContainer, selector: FileSelector,
loader: DataLoader, processor: DataProcessor) -> None:
self.container = container
self.selector = selector
self.loader = loader
self.processor = processor
def run(self) -> None:
try:
print("Selecting files...")
data_files = self.selector.select(self.container)
print("Loading data...")
data = self.loader.load(data_files)
print("Processing...")
self.processor.process(data)
print("Completed successfully!")
except ProcessingError as e:
print_error(str(e))
print("Failed!")
| StarcoderdataPython |
4807075 | #!/usr/bin/env python
import json
import os
import re
import subprocess
devnull = open(os.devnull)
extensions_path = '/home/matejc/.config/chromium/Default/Extensions'
def list_apps(path):
result = []
for root, dirs, files in os.walk(path, followlinks=True):
if files:
for file in files:
if file == "manifest.json":
abspath = os.path.join(root, file)
app_id = os.path.basename(os.path.dirname(root))
with open(abspath) as f:
o = json.load(f)
result += [(app_id, o['name'])]
return result
def items():
result = []
result += list_apps(extensions_path)
# result += list_paths("/your/custom/dir", directory=True)
return map(lambda item: "{0:<50} [app_id: '{1}']".format(item[1], item[0]), result)
def join(paths):
return '\n'.join(paths).encode('utf-8')
def dmenu(args=[], options=[]):
dmenu_cmd = ["rofi", "-dmenu"]
if args:
dmenu_cmd += args
p = subprocess.Popen(
dmenu_cmd,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
if options:
stdout, _ = p.communicate('\n'.join(options).encode('utf-8'))
else:
stdout, _ = p.communicate()
return stdout.decode('utf-8').strip('\n')
def read_last(path):
result = []
if not os.path.isfile(path):
return result
with open(path, 'r') as f:
for line in f:
s = line.strip()
if s:
result += [s]
return result
def write_last(path, newentry):
lines = read_last(path)
if not newentry:
return
s = newentry.strip()
lines.insert(0, s)
with open(path, 'w') as f:
f.write(join(remove_duplicates(lines[0:4])))
def remove_duplicates(values):
result = []
seen = set()
for value in values:
if value not in seen:
result.append(value)
seen.add(value)
return result
s = join(read_last('/home/matejc/.dmenu_chrome_apps_last') + sorted(items()))
run = dmenu(['-p', 'run:', '-l', '10', '-b', '-i'], [s])
if run:
match = re.match(r'.+\s+\[app_id\: \'(.+)\'\]', run)
if match:
write_last('/home/matejc/.dmenu_chrome_apps_last', run)
subprocess.call('nodeenv electron /home/matejc/workarea/electron-chrome --app-id='+match.groups()[0], shell=True)
| StarcoderdataPython |
1702792 | from keras.datasets import mnist
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
np.random.seed(5)
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# x_train /= 255
# x_test /= 255
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# X = np.concatenate((x_train, x_test), axis=0)
plt.cla()
pca = decomposition.PCA(n_components=200)
pca.fit(X)
X = pca.transform(X)
print (X.shape)
| StarcoderdataPython |
3376795 | <gh_stars>0
import numpy as np
def n_kron(*inputs):
"""Return Kronecker product of a variable number of inputs.
Args:
Variable number of input matrices and vectors
Returns:
Kronecker product
"""
kp = np.array([[1.0]])
for op in inputs:
kp = np.kron(kp, op)
return kp
| StarcoderdataPython |
3249630 | import pathlib
import responses
from krypto.cli import IssueRunner
from krypto.github import prepare_body
from tests.conftest import (
sample_config,
username,
repository,
url,
todo_from_json,
raw_todo,
)
def test_runner():
runner = IssueRunner(
"./tests",
pathlib.Path.cwd(),
config=sample_config,
)
assert runner.cwd.parts[-1] == "krypto"
assert runner.config["username"] == username
assert runner.config["repository"] == repository
assert runner.todos == []
assert str(runner) == "Runner: cwd@krypto\n0 todos"
def test_runner_add_links(sample_todo, tmp_path):
d = tmp_path / "test"
d.mkdir()
origin = d / "sample_todo.py"
origin.write_text(raw_todo)
todo = sample_todo
todo.origin = origin
todo.issue_no = 1
runner = IssueRunner(
"./tests",
pathlib.Path.cwd(),
config=sample_config,
)
assert origin.read_text() == raw_todo
runner.todos = [todo]
runner.add_links()
print(origin.read_text())
assert (
origin.read_text()
== """
# TODO[Enhancement, Bug]: This is a sample title @https://github.com/antoniouaa/krypto/issues/1
# this is the body of the todo
""".strip()
)
def test_runner_requests(mock_requests, sample_todo):
runner = IssueRunner(
"./tests",
pathlib.Path.cwd(),
config={**sample_config, "dry": False},
)
todo = sample_todo
todo.issue_no = 1
runner.todos = [todo]
json = prepare_body(sample_todo, username=username, repository=repository)
mock_requests.add(
responses.GET,
f"{url}?state=all",
json=todo_from_json,
status=200,
)
mock_requests.add(
responses.PATCH,
f"{url}/1",
json=json,
status=200,
)
response = runner.run("token")
assert len(response) == 2
assert response[0] == [sample_todo.title]
assert response[1] == []
| StarcoderdataPython |
3386191 | """Global variables for Skyrim Unlocked build system"""
import os
DIR_REPO = "C:\\Users\\user\\Documents\\GitHub\\skyrim-unlocked"
"""Directory where the git repository for Skyrim Unlocked is stored."""
DIR_REPO_LE = DIR_REPO
"""Directory where all mod files for Legendary Edition are stored."""
DIR_REPO_SE = os.path.join(DIR_REPO, "SSE")
"""Directory where all mod files for Special Edition are stored."""
BSARCH = "C:\\Users\\user\\Documents\\Skyrim Tools\\BSArch\\bsarch.exe"
"""Path to BSArch.exe"""
| StarcoderdataPython |
1680061 | <filename>resupply_runner.sikuli/resupply_runner.py
from common import Common, logged
from fleet import Fleet
from sikuli import *
from config import Config
from status import Status
class ResupplyRunner(Common):
def __init__(self, fleets, from_small_resuppy=False, enable_expedition_check=False, message=None):
self.fleets = fleets
self.from_small_resuppy = from_small_resuppy
self.enable_expedition_check = enable_expedition_check
self.message = message
if self.enable_expedition_check:
self.expedition_img = Status(["on_expedition"]).get_images()[0]
@logged
def run(self):
if not self.from_small_resuppy:
supply_btn = "supply.png"
else:
supply_btn = "supply_small.png"
if self.message is not None:
self.message.set_need_check(False)
self.clickWithRandomLocationAndResetMouse(supply_btn)
for fleet in self.fleets:
self.clickWithRandomOffset(fleet.getNotSelectedImage())
if not self.__need_resupply():
continue
if self.message is not None: # need record expedition check
self.message.set_need_check(True)
self.__resupply_fleet()
self.back_home_port()
return True
def __need_resupply(self):
return not self.enable_expedition_check or not exists(self.expedition_img)
@logged
def __resupply_fleet(self):
self.clickWithRandomOffset("resupply_all.png")
sleep(3)
if __name__ == "__main__":
#runner = ResupplyRunner([Fleet(1)], from_small_resuppy=False)
runner = ResupplyRunner([Fleet(1), Fleet(2), Fleet(3), Fleet(4)], from_small_resuppy=False, enable_expedition_check=True)
runner.run() | StarcoderdataPython |
3253924 | " URL definitions "
from django.conf.urls.defaults import url, patterns
username = '(?P<username>[-\w]+)'
urlpatterns = patterns('jetpack.views',
# browsing packages
url(r'^addons/$', 'browser', {'type_id': 'a'},
name='jp_browser_addons'),
url(r'^libraries/$', 'browser', {'type_id': 'l'},
name='jp_browser_libraries'),
url(r'^addons/(?P<page_number>\d+)/$',
'browser', {'type_id': 'a'}, name='jp_browser_addons_page'),
url(r'^libraries/(?P<page_number>\d+)/$',
'browser', {'type_id': 'l'}, name='jp_browser_libraries_page'),
# by user
url(r'^addons/by/%s/$' % username,
'browser', {'type_id': 'a'}, name='jp_browser_user_addons'),
url(r'^libraries/by/%s/$' % username,
'browser', {'type_id': 'l'}, name='jp_browser_user_libraries'),
url(r'^addons/by/%s/(?P<page_number>\d+)/$' % username,
'browser', {'type_id': 'a'},
name='jp_browser_user_addons_page'),
url(r'^libraries/by/%s/(?P<page_number>\d+)/$' % username,
'browser', {'type_id': 'l'},
name='jp_browser_user_libraries_page'),
url(r'^get_latest_revision_number/(?P<package_id>\d+)/$',
'get_latest_revision_number', name='jp_get_latest_revision_number'),
url(r'^addon/new/upload_xpi/$', 'upload_xpi', name='jp_upload_xpi'),
# create new add-on/library
url(r'^addon/new/',
'create', {"type_id": "a"}, name='jp_addon_create'),
url(r'^library/new/',
'create', {"type_id": "l"}, name='jp_library_create'),
# package - display details of the PackageRevision
url(r'^package/(?P<pk>\d+)/latest/$',
'view_or_edit', {'latest': True}, name='jp_latest'),
url(r'^package/(?P<pk>\d+)/$', 'view_or_edit', name='jp_details'),
url(r'^package/(?P<pk>\d+)/version/(?P<version_name>.*)/$',
'view_or_edit', name='jp_version_details'),
url(r'^package/(?P<pk>\d+)/revision/(?P<revision_number>\d+)/$',
'view_or_edit', name='jp_revision_details'),
# get full module info
url(r'^get_module/(?P<revision_id>\d+)/(?P<filename>.*)$',
'get_module', name='jp_get_module'),
url(r'^module/(?P<pk>\d+)/$', 'download_module', name='jp_module'),
# copy a PackageRevision
url(r'^package/copy/(?P<revision_id>\d+)/$',
'copy', name='jp_package_revision_copy'),
# get Package revisions list
url(r'^revisions_list/(?P<revision_id>\d+)/$',
'get_revisions_list_html', name='jp_revisions_list_html'),
# save packagerevision
url(r'^package/save/(?P<revision_id>\d+)/$',
'save', name='jp_revision_save'),
# disable/activate/delete package
url(r'^package/disable/(?P<pk>[-\w]+)/$',
'disable', name='jp_package_disable'),
url(r'^package/activate/(?P<pk>[-\w]+)/$',
'activate', name='jp_package_activate'),
url(r'^package/delete/(?P<pk>[-\w]+)/$',
'delete', name='jp_package_delete'),
# get all, conflicting modules
url(r'^revision/(?P<pk>\d+)/get_modules_list/$',
'get_revision_modules_list', name='jp_revision_get_modules_list'),
url(r'^revision/(?P<pk>\d+)/get_conflicting_modules_list/$',
'get_revision_conflicting_modules_list',
name='jp_revision_get_conflicting_modules_list'),
# add/remove module
url(r'^package/add_module/(?P<revision_id>\d+)/$',
'add_module', name='jp_package_revision_add_module'),
url(r'^package/remove_module/(?P<revision_id>\d+)/$',
'remove_module', name='jp_package_revision_remove_module'),
# rename module
url(r'^package/rename_module/(?P<revision_id>\d+)/$',
'rename_module', name='jp_package_revision_rename_module'),
# switch SDK version
url(r'^package/switch_sdk/(?P<revision_id>\d+)/$',
'switch_sdk', name='jp_addon_switch_sdk_version'),
# add/remove attachment
url(r'^package/upload_attachment/(?P<revision_id>\d+)/$',
'upload_attachment', name='jp_package_revision_upload_attachment'),
url(r'^revision/(?P<pk>\d+)/add_attachment/',
'revision_add_attachment', name='jp_revision_add_attachment'),
url(r'^package/remove_attachment/(?P<revision_id>\d+)/$',
'remove_attachment', name='jp_package_revision_remove_attachment'),
# rename attachment
url(r'^package/rename_attachment/(?P<revision_id>\d+)/$',
'rename_attachment', name='jp_package_revision_rename_attachment'),
#add empty dir
url(r'^package/add_folder/(?P<revision_id>\d+)/$',
'add_folder', name='jp_package_revision_add_folder'),
#remove empty dir
url(r'^package/remove_folder/(?P<revision_id>\d+)/$',
'remove_folder', name='jp_package_revision_remove_folder'),
# display attachment
url(r'^attachment/(?P<uid>.*)$',
'download_attachment', name='jp_attachment'),
# autocomplete library
url(r'^autocomplete/library/$',
'library_autocomplete', name='jp_library_autocomplete'),
# assign library
url(r'^package/assign_library/(?P<revision_id>\d+)/$',
'assign_library', name='jp_package_revision_assign_library'),
# update library
url(r'^package/update_library/(?P<revision_id>\d+)/$',
'update_library', name='jp_package_revision_update_library'),
# remove library
url(r'^package/remove_dependency/(?P<revision_id>\d+)/$',
'remove_library', name='jp_package_revision_remove_library'),
# check libraries for latest versions
url(r'package/check_latest_dependencies/(?P<revision_id>\d+)/$',
'latest_dependencies', name='jp_package_check_latest_dependencies'),
# zip file
url(r'^revision/prepare_zip/(?P<revision_id>\d+)/$',
'prepare_zip', name='jp_revision_prepare_zip'),
url(r'^revision/download_zip/(?P<hashtag>[a-zA-Z0-9]+)/(?P<filename>.*)/$',
'get_zip', name='jp_revision_download_zip'),
url(r'^revision/check_zip/(?P<hashtag>[a-zA-Z0-9]+)/$',
'check_zip', name='jp_revision_check_zip'),
url(r'^package/zip/(?P<pk>\d+)/$',
'all_zip', name='jp_package_zip'),
)
| StarcoderdataPython |
72652 | <filename>app/app/calc.py
def add(x, y):
"""Adds two numbers"""
return x+y
def subtract(x, y):
"""Subtracts two numbers"""
return x-y
| StarcoderdataPython |
3223949 | # Generated by Django 3.2.10 on 2021-12-29 04:17
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('eye', '0002_auto_20211229_0256'),
]
operations = [
migrations.AlterModelOptions(
name='eventsession',
options={'ordering': ['timestamp']},
),
migrations.AddField(
model_name='session',
name='created_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='eye.application'),
),
migrations.AlterField(
model_name='eventsession',
name='id',
field=models.UUIDField(default=uuid.uuid1, editable=False, primary_key=True, serialize=False),
),
migrations.CreateModel(
name='ApplicationGroup',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=100, unique=True)),
('description', models.TextField(blank=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='application',
name='group',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='applications', to='eye.applicationgroup'),
),
]
| StarcoderdataPython |
4830185 | <filename>drwr/scripts/tf_records_generator.py
import startup
import sys
import os
import glob
import re
import random
import math
import numpy as np
from scipy.io import loadmat
from imageio import imread
from skimage.transform import resize as im_resize
from util.fs import mkdir_if_missing
from util.data import tf_record_options
from run.fuzz_pc_631 import get_z_candidate3
from run.possion import PoissonDiskSampler
import tensorflow as tf
from tensorflow import app
flags = tf.app.flags
flags.DEFINE_string('split_dir',
'',
'Directory path containing the input rendered images.')
flags.DEFINE_string('inp_dir_renders',
'',
'Directory path containing the input rendered images.')
flags.DEFINE_string('inp_dir_voxels',
'',
'Directory path containing the input voxels.')
flags.DEFINE_string('out_dir',
'',
'Directory path to write the output.')
flags.DEFINE_string('synth_set', '02691156',
'')
flags.DEFINE_boolean('store_camera', False, '')
flags.DEFINE_boolean('store_voxels', False, '')
flags.DEFINE_boolean('store_depth', False, '')
flags.DEFINE_string('split_path', '', '')
flags.DEFINE_integer('num_views', 10, 'Num of viewpoints in the input data.')
flags.DEFINE_integer('image_size', 64,
'Input images dimension (pixels) - width & height.')
flags.DEFINE_integer('vox_size', 32, 'Voxel prediction dimension.')
flags.DEFINE_boolean('tfrecords_gzip_compressed', False, 'Voxel prediction dimension.')
THRESHOLD = 0.6
FLAGS = flags.FLAGS
def read_camera(filename):
cam = loadmat(filename)
extr = cam["extrinsic"]
pos = cam["pos"]
return extr, pos
def loadDepth(dFile, minVal=0, maxVal=10):
dMap = imread(dFile)
dMap = dMap.astype(np.float32)
dMap = dMap*(maxVal-minVal)/(pow(2,16)-1) + minVal
return dMap
def _dtype_feature(ndarray):
ndarray = ndarray.flatten()
"""match appropriate tf.train.Feature class with dtype of ndarray. """
assert isinstance(ndarray, np.ndarray)
dtype_ = ndarray.dtype
if dtype_ == np.float64 or dtype_ == np.float32:
return tf.train.Feature(float_list=tf.train.FloatList(value=ndarray))
elif dtype_ == np.int64:
return tf.train.Feature(int64_list=tf.train.Int64List(value=ndarray))
else:
raise ValueError("The input should be numpy ndarray. \
Instaed got {}".format(ndarray.dtype))
def _string_feature(s):
s = s.encode('utf-8')
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[s]))
def binaryzation(mask):
mask = np.squeeze(mask)
l,h = mask.shape
for i in range(l):
for j in range(h):
if mask[i][j] >= 255 * THRESHOLD:
mask[i][j] = 255
else:
mask[i][j] = 0
return mask, 0,0
def binaryzation2(image):
image = np.squeeze(image)
w,h = image.shape
k =0.95
wd = np.where(image >= k)
bd = np.where(image < k)
wd = np.array(wd)
wd = np.transpose(wd)
bd = np.array(bd)
bd = np.transpose(bd)
return image, wd, bd
def sdf(image, white_point, w_nSample):
scale = 255 / (64 * math.sqrt(2))
image = 255 * image
w,h = image.shape
for i in range(w):
for j in range(h):
if image[i][j] == 0:
diffMat = np.tile([i,j],(w_nSample,1)) - white_point
sqDiffMat = diffMat**2
sqDistances = sqDiffMat.sum(axis = 1)
distances = sqDistances ** 0.5
min_dis = min(distances)
image[i][j] = 220 - scale * min_dis
image = image / 255.0
return image
def mask_sdf(image):
images = np.squeeze(image)
image_erzhi, white_point, black_point = binaryzation2(images)
w_nSample = white_point.shape[0]
b_nSample = black_point.shape[0]
if w_nSample == 0 or b_nSample == 0:
return image
image_res = sdf(images, white_point, w_nSample)
image_res = np.expand_dims(image_res, 2)
return image_res
def create_record(synth_set, split_name, models):
im_size = FLAGS.image_size
num_views = FLAGS.num_views
num_models = len(models)
mkdir_if_missing(FLAGS.out_dir)
# address to save the TFRecords file
train_filename = "{}/{}_{}.tfrecords".format(FLAGS.out_dir, synth_set, split_name)
# open the TFRecords file
options = tf_record_options(FLAGS)
print(train_filename)
writer = tf.python_io.TFRecordWriter(train_filename, options=options)
render_dir = os.path.join(FLAGS.inp_dir_renders, synth_set)
voxel_dir = os.path.join(FLAGS.inp_dir_voxels, synth_set)
for j, model in enumerate(models):
print("{}/{}: {}".format(j, num_models, model))
if FLAGS.store_voxels:
voxels_file = os.path.join(voxel_dir, "{}.mat".format(model))
voxels = loadmat(voxels_file)["Volume"].astype(np.float32)
voxels = np.transpose(voxels, (1, 0, 2))
voxels = np.flip(voxels, axis=1)
im_dir = os.path.join(render_dir, model)
images = sorted(glob.glob("{}/render_*.png".format(im_dir)))
rgbs = np.zeros((num_views, im_size, im_size, 3), dtype=np.float32)
masks = np.zeros((num_views, im_size, im_size, 1), dtype=np.float32)
mask_sdfs = np.zeros((num_views, im_size, im_size, 1), dtype=np.float32)
cameras = np.zeros((num_views, 4, 4), dtype=np.float32)
cam_pos = np.zeros((num_views, 3), dtype=np.float32)
depths = np.zeros((num_views, im_size, im_size, 1), dtype=np.float32)
assert(len(images) >= num_views)
error_flag = 0
for k in range(num_views):
im_file = images[k]
img = imread(im_file)
rgb = img[:, :, 0:3]
mask = img[:, :, [3]]
a,b,_ = mask.shape
erzhi_mask, _ , _ = binaryzation(mask)
mask = mask / 255.0
masks_sdf = mask_sdf(mask)
if True:
mask_fg = np.repeat(mask, 3, 2)
mask_bg = 1.0 - mask_fg
rgb = rgb * mask_fg + np.ones(rgb.shape)*255.0*mask_bg
rgb = rgb / 255.0
actual_size = rgb.shape[0]
if im_size != actual_size:
rgb = im_resize(rgb, (im_size, im_size), order=3)
mask = im_resize(mask, (im_size, im_size), order=3)
rgbs[k, :, :, :] = rgb
masks[k, :, :, :] = mask
mask_sdfs[k,:,:] = masks_sdf
fn = os.path.basename(im_file)
img_idx = int(re.search(r'\d+', fn).group())
if FLAGS.store_camera:
cam_file = "{}/camera_{}.mat".format(im_dir, img_idx)
cam_extr, pos = read_camera(cam_file)
cameras[k, :, :] = cam_extr
cam_pos[k, :] = pos
if FLAGS.store_depth:
depth_file = "{}/depth_{}.png".format(im_dir, img_idx)
depth = loadDepth(depth_file)
d_max = 10.0
d_min = 0.0
depth = (depth - d_min) / d_max
depth_r = im_resize(depth, (im_size, im_size), order=0)
depth_r = depth_r * d_max + d_min
depths[k, :, :] = np.expand_dims(depth_r, -1)
if error_flag == 1:
continue
# Create a feature
feature = {"image": _dtype_feature(rgbs),
"mask": _dtype_feature(masks),
"mask_sdfs": _dtype_feature(mask_sdfs),
"name": _string_feature(model)}
if FLAGS.store_voxels:
feature["vox"] = _dtype_feature(voxels)
if FLAGS.store_camera:
feature["extrinsic"] = _dtype_feature(cameras)
feature["cam_pos"] = _dtype_feature(cam_pos)
if FLAGS.store_depth:
feature["depth"] = _dtype_feature(depths)
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
writer.close()
sys.stdout.flush()
SPLIT_DEF = [("val", 0.05), ("train", 0.95)]
def generate_splits(input_dir):
files = [f for f in os.listdir(input_dir) if os.path.isdir(f)]
models = sorted(files)
random.shuffle(models)
num_models = len(models)
models = np.array(models)
out = {}
first_idx = 0
for k, splt in enumerate(SPLIT_DEF):
fraction = splt[1]
num_in_split = int(np.floor(fraction * num_models))
end_idx = first_idx + num_in_split
if k == len(SPLIT_DEF)-1:
end_idx = num_models
models_split = models[first_idx:end_idx]
out[splt[0]] = models_split
first_idx = end_idx
return out
def load_drc_split(base_dir, synth_set):
filename = os.path.join(base_dir, "{}.file".format(synth_set))
lines = [line.rstrip('\n') for line in open(filename)]
k = 3
split = {}
while k < len(lines):
_,_,name,_,_,num = lines[k:k+6]
k += 6
num = int(num)
split_curr = []
for i in range(num):
_, _, _, _, model_name = lines[k:k+5]
k += 5
split_curr.append(model_name)
split[name] = split_curr
return split
def generate_records(synth_set):
base_dir = FLAGS.split_dir
split = load_drc_split(base_dir, synth_set)
for key, value in split.items():
if key == 'val':
continue
create_record(synth_set, key, value)
break
def read_split(filename):
f = open(filename, "r")
lines = f.readlines()
lines = [l.rstrip() for l in lines]
return lines
def main(_):
generate_records(FLAGS.synth_set)
if __name__ == '__main__':
app.run()
| StarcoderdataPython |
59044 | <filename>courier/elements/statistics.py
from dataclasses import dataclass, field
from typing import Any, Dict, List, Tuple
from courier.config import get_config
from courier.utils import flatten
from .elements import CourierIssue
CONFIG = get_config()
@dataclass
class IssueStatistics:
issue: CourierIssue
total_pages: int = field(init=False)
assigned_pages: int = field(init=False)
consolidated_pages: int = field(init=False)
expected_article_pages: int = field(init=False)
number_of_articles: int = field(init=False)
year: int = field(init=False)
missing_pages: List[Tuple[str, int, int]] = field(init=False)
num_missing_pages: int = field(init=False)
errors: List[Dict[str, Any]] = field(init=False)
num_errors: int = field(init=False)
def __post_init__(self) -> None:
self.total_pages: int = len(self.issue)
self.assigned_pages: int = len(self.issue.get_assigned_pages())
self.consolidated_pages: int = len(self.issue.get_consolidated_pages())
self.expected_article_pages: int = len(self.issue.get_article_pages())
self.number_of_articles: int = self.issue.num_articles
self.year: int = CONFIG.issue_index.loc[int(self.issue.courier_id.lstrip('0'))].year
self.missing_pages: List[Tuple[str, int, int]] = [
(x.courier_id or '', x.record_number or 0, len(x.pages) - len(x.texts)) for x in self.issue.articles
]
self.num_missing_pages: int = sum(
[len(x.pages) - len(x.texts) for x in self.issue.articles]
) # FIXME: Should return (expected - consolidated) instead? Rename?
self.errors: List[Dict[str, Any]] = flatten(
[[error.asdict for error in page.errors] for page in self.issue.pages if len(page.errors) != 0]
)
self.num_errors: int = len(self.errors)
if __name__ == '__main__':
pass
| StarcoderdataPython |
146344 | from setuptools import setup, find_packages
from visual_perception import __version__
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='visual_perception',
version = __version__,
description='A High Level Python Library for Visual Recognition ',
url="https://github.com/SSusantAchary/Visual-Perception",
author="SusantAchary",
author_email="<EMAIL>",
maintainer="<NAME>",
maintainer_email="<EMAIL>",
long_description=long_description,
long_description_content_type="text/markdown",
packages=find_packages(),
python_requires='>=3.5, <4',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering :: Image Processing',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Software Development :: Libraries :: Python Modules'
],
keywords=[
"object detection",
"computer vision",
'yolo',
'yolov4',
'tinyyolo'],
install_requires=[
"tensorflow",
'keras',
'opencv-python',
'numpy',
'pillow',
'matplotlib',
'pandas',
'scikit-learn',
'progressbar2',
'scipy',
'h5py',
'imgaug',
'scikit-image',
'configobj',
],
project_urls={
'Bug Reports': 'https://github.com/SSusantAchary/Visual-Perception/issues',
'Source': 'https://github.com/SSusantAchary/Visual-Perception/'},
) | StarcoderdataPython |
1799163 | <reponame>mattjm/iam-messaging<filename>tools/aws_manage.py
#
# IAM AWS messaging mgement
#
# json classes
import json
import base64
import string
import time
import re
import os.path
from sys import exit
import signal
from optparse import OptionParser
import threading
import logging
from messagetools.iam_message import crypt_init
from messagetools.aws import AWS
import settings
#
# ---------------- gws_ce main --------------------------
#
# load configuration
parser = OptionParser()
parser.add_option('-o', '--operation', action='store', type='string', dest='operation',
help='cq:create_queue, ct:create_topic, sq:subscribe_queue, lq:list_queue(s) ')
parser.add_option('-t', '--topic', action='store', type='string', dest='topic', help='topic')
parser.add_option('-q', '--queue', action='store', type='string', dest='queue', help='queue')
parser.add_option('-v', '--verbose', action='store_true', dest='verbose', help='?')
parser.add_option('-c', '--conf', action='store', type='string', dest='config', help='config file')
options, args = parser.parse_args()
if options.operation==None:
print('operation must be entered')
exit(1)
crypt_init(settings.IAM_CONF)
logging.info("sws queue monitor starting.")
aws = AWS(settings.AWS_CONF)
if options.operation=='lqs':
print 'list queues '
queues = aws.get_all_queues()
for q in queues:
print q.arn
if options.verbose:
print q.get_attributes
if options.operation=='ct':
print('creating topic: ' + options.topic)
aws.create_topic(options.topic)
if options.operation=='cq':
print('creating queue: ' + options.queue)
aws.create_queue(options.queue)
if options.operation=='sq':
print('subscribing queue: ' + options.queue + ' to topic ' + options.topic)
aws.subscribe_queue(options.topic, options.queue)
| StarcoderdataPython |
3203559 | class Player():
"""docstring for Player"""
def __init__(self):
super(Player, self).__init__()
self.inventory = []
class Scene():
"""docstring for Scene"""
def __init__(self, intro, keywords, player=None, condition=None, success=None, fail=None):
super(Scene, self).__init__()
self.intro = intro
self.keywords = keywords
self.player = player
self.condition = condition
self.success = success
self.fail = fail
def run(self):
print("\n\n\n\n")
print(self.intro)
print("\n")
if self.player and self.condition:
if self.condition in self.player.inventory:
return self.success
else:
return self.fail
while True:
reponse = input(">").lower()
if reponse in self.keywords:
return reponse
class Giveaway():
"""docstring for ClassName"""
def __init__(self, item, player, callback):
super(Giveaway, self).__init__()
self.item = item
self.player = player
self.callback = callback
def run(self):
self.player.inventory.append(self.item)
return self.callback
player = Player()
scenes = {
'départ': Scene(
intro="Bonjour visiteurs, bienvenue à Antswood.\n\nUne forêt où cohabitent différentes espèces (comme ici une fourmi et une abeille) qui, ensemble, forment un écosystème complexe rempli de personnages, d’actions (et réactions), d’intrigues et de challenges à accomplir.",
keywords=['fourmi','abeille','forêt'],
),
'fourmi':Scene(
intro="Bonjour, je suis fourmi #27903. \n\nNous les fourmis entretenons les arbres et la forêt. Notre objectif: maintenir un certain équilibre dans l’écosystème.",
keywords=['start','abeille','forêt'],
),
'abeille': Scene(
intro="Bonjour, je suis une abeille. Nous nous chargeons de polliniser les fleurs. Notre objectif: trouver des fleurs. Parfois nous y trouvons des graines :)",
keywords=['départ','fourmi','forêt','graines'],
),
'graines': Giveaway('graines',player,'abeille2'),
'abeille2': Scene(
intro="Voici, prenez ces graines. Elles vous surront sûrement plus utiles.",
keywords=['départ','fourmi','forêt'],
),
'forêt': Scene(
intro="Vous vous balladez en forêt, vous vous appercevez que des arbres ont disparu.\nNous pourrions peut-être en planter.",
keywords=['départ','planter'],
),
'planter': Scene(
intro="...",
keywords=['départ'],
player=player,
condition='graines',
success='3ND',
fail='forêt'
),
'3ND': Scene(
intro="Vous avez planté les graines. Bien joué!",
keywords=['départ']
)
}
def main():
scene = "départ"
while True:
scene = scenes[scene].run()
main() | StarcoderdataPython |
1781455 | import datetime
from datetime import datetime
# imports for discord api
import discord
from discord.ext import commands
from discord.ext.commands import has_permissions
from discord.ext.commands import CommandNotFound
from discord.ext import tasks
# extras
import asyncio
import requests
import json
import pytz
from pytz import timezone
import os
# self methods
from interfaces.admin.review_json import supervision
from interfaces.admin.change_content import security
from interfaces.users.weather_by_city import user_interface
from interfaces.users.help_us import help_us
from interfaces.methods.daily import daily_control
from interfaces.methods.setups2_daily import sets
# check little database
f1 = open("hello.json","r")
weekly = json.load(f1)
f1.close()
dictofdays = {"Thursday":"Perşembe","Wednesday":"Çarşamba","Tuesday":"Salı","Monday":"Pazartesi","Sunday":"Pazar","Saturday":"Cumartesi","Friday":"Cuma"}
ADMINID = [596455467585110016,476010467404546052]
prefix = "!"
bot = commands.Bot(command_prefix=prefix)
bot.remove_command('help')
sets(bot,weekly)
user_interface(bot,dictofdays)
help_us(bot)
supervision(bot,weekly)
security(bot,weekly)
@bot.event
async def on_ready():
print('Logged in as')
print(bot.user.name)
print(bot.user.id)
print('------');await bot.change_presence(
activity=discord.Game(name="!weather city | 🌐 " + str(len(bot.guilds))+" servers")
);daily_control(bot,weekly)
@bot.event
async def on_guild_join(guild):
global ADMINID
embed = discord.Embed(title="**NEW SERVER**", colour=discord.Colour(0x4aff00), description="\n**Members:** " + str(guild.member_count) + "\n**Banner:** [click](" + str(guild.banner_url) + ")\n**Owner:** " + str(guild.owner_id) + "\n**Server Id:** " + str(guild.id))
embed.set_footer(text="Information Service")
embed.set_author(name=guild, icon_url=guild.icon_url)
embed.set_thumbnail(url=guild.icon_url)
for i in ADMINID:
user = await bot.fetch_user(i)
await user.send(embed=embed)
await bot.change_presence(
activity=discord.Game(
name="!weather city | 🌐 " + str(len(bot.guilds)) + " servers"))
@bot.event
async def on_guild_remove(guild):
global ADMINID
embed = discord.Embed(title="**LEAVED**", colour=discord.Colour(0xd0021b), description="\n**Members:** " + str(guild.member_count) + "\n**Banner:** [click](" + str(guild.banner_url) + ")\n**Owner:** " + str(guild.owner_id) + "\n**Server Id:** " + str(guild.id))
embed.set_footer(text="Information Service")
embed.set_author(name=guild, icon_url=guild.icon_url)
embed.set_thumbnail(url=guild.icon_url)
for i in ADMINID:
user = await bot.fetch_user(i)
await user.send(embed=embed)
await bot.change_presence(
activity=discord.Game(
name="!weather city | 🌐 " + str(len(bot.guilds)) + " servers"))
bot.run("TOKEN") | StarcoderdataPython |
1739208 | # Copyright 2018-20 <NAME>.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# This script parses the IDNA table from
# https://unicode.org/Public/idna/11.0.0/IdnaMappingTable.txt,
# and converts it to a C++ table.
import sys
import jinja2
def parse_line(line):
line = line[0:line.find('#')]
tokens = [token.strip() for token in line.split(';')] if line else []
if len(tokens) == 3:
tokens[2] = tokens[2].split(' ')[0]
return tokens
status_keys = [
'valid',
'mapped',
'disallowed',
'disallowed_STD3_valid',
'disallowed_STD3_mapped',
'ignored',
'deviation',
]
class CodePointRange(object):
def __init__(self, range, status, mapped):
if type(range) == str:
range = range.split('..') if '..' in range else [range, range]
if type(range[0]) == str:
range = [int(range[0], 16), int(range[1], 16)]
self.range = range
self.status = status
self.mapped = int(mapped, 16) if mapped else None
@property
def is_mapped(self):
return self.status in ('mapped', 'disallowed_STD3_mapped')
@property
def is_valid(self):
return self.status == 'valid'
@property
def can_be_16_bit(self):
return self.range[0] <= 0xffff and self.mapped is not None and self.mapped <= 0xffff
def main():
input, output = sys.argv[1], sys.argv[2]
with open(input, 'r') as input_file, open(output, 'w+') as output_file:
code_points = []
for line in input_file.readlines():
code_point = parse_line(line)
if code_point:
code_points.append(CodePointRange(
code_point[0], code_point[1], code_point[2] if len(code_point) > 2 else None))
# Store code point mappings as std::char16_t if they can, otherwise store them as std::char32_t
mapped_code_points_16, mapped_code_points_32 = [], []
for code_point in filter(lambda cp: cp.is_mapped, code_points):
(mapped_code_points_32, mapped_code_points_16)[code_point.can_be_16_bit].append(code_point)
# Squeeze code points to reduce table size, and remove all valid code points as they will be handled by default
code_points = [code_point for code_point in code_points if not code_point.is_valid]
template = jinja2.Template(
"""// Auto-generated.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef SKYR_V2_DOMAIN_IDNA_TABLES_HPP
#define SKYR_V2_DOMAIN_IDNA_TABLES_HPP
#include <algorithm>
#include <array>
#include <iterator>
#include <skyr/v2/domain/idna_status.hpp>
namespace skyr::inline v2::idna::details {
struct code_point_range {
char32_t first;
char32_t last;
idna_status status;
};
constexpr static auto statuses = std::array<code_point_range, {{ entries|length }}>{% raw %}{{{% endraw %}
{% for code_point in entries %} { U'\\x{{ '%04x' % code_point.range[0] }}', U'\\x{{ '%04x' % code_point.range[1] }}', idna_status::{{ code_point.status.lower() }} },
{% endfor %}{% raw %}}}{% endraw %};
struct mapped_16_code_point {
char16_t code_point;
char16_t mapped;
};
constexpr static auto mapped_16 = std::array<mapped_16_code_point, {{ mapped_entries_16|length }}>{% raw %}{{{% endraw %}
{% for code_point in mapped_entries_16 %} { U'\\x{{ '%04x' % code_point.range[0] }}', U'\\x{{ '%04x' % code_point.mapped }}' },
{% endfor %}{% raw %}}}{% endraw %};
struct mapped_32_code_point {
char32_t code_point;
char32_t mapped;
};
constexpr static auto mapped_32 = std::array<mapped_32_code_point, {{ mapped_entries_32|length }}>{% raw %}{{{% endraw %}
{% for code_point in mapped_entries_32 %} { U'\\x{{ '%04x' % code_point.range[0] }}', U'\\x{{ '%04x' % code_point.mapped }}' },
{% endfor %}{% raw %}}}{% endraw %};
} // namespace skyr::inline v2::idna::details
#endif // SKYR_V2_DOMAIN_IDNA_TABLES_HPP
""")
template.stream(
entries=code_points,
mapped_entries_16=mapped_code_points_16,
mapped_entries_32=mapped_code_points_32
).dump(output_file)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3208829 | import os
from setuptools import setup, find_packages
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name="grpc4bmi",
version="0.3.2",
author="<NAME>",
author_email="<EMAIL>",
description="Run your BMI implementation in a separate process and expose it as BMI-python with GRPC",
license="Apache License, Version 2.0",
url="https://github.com/eWaterCycle/grpc4bmi",
packages=find_packages(),
include_package_data=True,
long_description=read("README.md"),
long_description_content_type='text/markdown',
entry_points={"console_scripts": [
"run-bmi-server = grpc4bmi.run_server:main"
]},
install_requires=[
"grpcio==1.27.2",
"grpcio-reflection==1.27.2",
"grpcio-status==1.27.2",
"googleapis-common-protos>=1.5.5",
"protobuf==3.12.2",
"numpy",
"docker",
"bmipy",
"semver",
],
extras_require={
'R': ['rpy2'],
},
classifiers=["Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"Programming Language :: Python",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Utilities",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: Apache Software License"
],
)
| StarcoderdataPython |
3304075 | <reponame>Kevincrh/multi-model_fusion
import os
import bisect
import config
import numpy as np
import os.path as osp
import matplotlib.pyplot as plt
from tqdm import tqdm
from glob import glob
from math import isnan
from random import shuffle
from mpl_toolkits.mplot3d import Axes3D
def des(a, b):
return np.linalg.norm(a - b)
def get_info(shape_dir):
splits = shape_dir.split('/')
class_name = splits[-3]
set_name = splits[-2]
file_name = splits[-1].split('.')[0]
return class_name, set_name, file_name
def random_point_triangle(a, b, c):
r1 = np.random.random()
r2 = np.random.random()
p = np.sqrt(r1) * (r2 * c + b * (1-r2)) + a * (1-np.sqrt(r1))
return p
def triangle_area(p1, p2, p3):
a = des(p1, p2)
b = des(p1, p3)
c = des(p2, p3)
p = (a+b+c)/2.0
area = np.sqrt(p*(p-a)*(p-b)*(p-c))
if isnan(area):
# print('find nan')
area = 1e-6
return area
def uniform_sampling(points, faces, n_samples):
sampled_points = []
total_area = 0
cum_sum = []
for _idx, face in enumerate(faces):
total_area += triangle_area(points[face[0]], points[face[1]], points[face[2]])
if isnan(total_area):
print('find nan')
cum_sum.append(total_area)
for _idx in range(n_samples):
tmp = np.random.random()*total_area
face_idx = bisect.bisect_left(cum_sum, tmp)
pc = random_point_triangle(points[faces[face_idx][0]],
points[faces[face_idx][1]],
points[faces[face_idx][2]])
sampled_points.append(pc)
return np.array(sampled_points)
def resize_pc(pc, L):
"""
normalize point cloud in range L
:param pc: type list
:param L:
:return: type list
"""
pc_L_max = np.sqrt(np.sum(pc ** 2, 1)).max()
return pc/pc_L_max*L
def normal_pc(pc):
"""
normalize point cloud in range L
:param pc: type list
:return: type list
"""
pc_mean = pc.mean(axis=0)
pc = pc - pc_mean
pc_L_max = np.max(np.sqrt(np.sum(abs(pc ** 2), axis=-1)))
pc = pc/pc_L_max
return pc
def get_pc(shape, point_each):
points = []
faces = []
with open(shape, 'r') as f:
line = f.readline().strip()
if line == 'OFF':
num_verts, num_faces, num_edge = f.readline().split()
num_verts = int(num_verts)
num_faces = int(num_faces)
else:
num_verts, num_faces, num_edge = line[3:].split()
num_verts = int(num_verts)
num_faces = int(num_faces)
for idx in range(num_verts):
line = f.readline()
point = [float(v) for v in line.split()]
points.append(point)
for idx in range(num_faces):
line = f.readline()
face = [int(t_f) for t_f in line.split()]
faces.append(face[1:])
points = np.array(points)
pc = resize_pc(points, 10)
pc = uniform_sampling(pc, faces, point_each)
pc = normal_pc(pc)
return pc
def generate(raw_off_root, vis_pc=False, num_pc_each=2018):
shape_all = glob(osp.join(raw_off_root, '*', '*', '*.off'))
shuffle(shape_all)
cnt = 0
for shape in tqdm(shape_all):
class_name, set_name, file_name = get_info(shape)
new_folder = osp.join(config.pc_net.data_root, class_name, set_name)
new_dir = osp.join(new_folder, file_name)
if osp.exists(new_dir+'.npy'):
if vis_pc and not osp.exists(new_dir+'.jpg'):
pc = np.load(new_dir+'.npy')
draw_pc(pc, show=False, save_dir=new_dir+'.jpg')
else:
pc = get_pc(shape, num_pc_each)
if not osp.exists(new_folder):
os.makedirs(new_folder)
np.save(new_dir+'.npy', pc)
if vis_pc:
if cnt%10==0:
draw_pc(pc, show=False, save_dir=new_dir+'.jpg')
cnt += 1
def draw_pc(pc, show=True, save_dir=None):
ax = plt.figure().add_subplot(111, projection='3d')
ax.scatter(pc[:, 0], pc[:, 1], pc[:, 2], marker='.')
ax.grid(False)
# ax.axis('off')
if show:
plt.show()
if save_dir is not None:
plt.savefig(save_dir)
if __name__ == '__main__':
generate('/repository/Modelnet40')
# file_name = '/home/fyf/data/pc_ModelNet40/airplane/train/airplane_0165.npy'
# pc = np.load(file_name)
# draw_pc(pc)
| StarcoderdataPython |
1718579 | import scrapy
class StackOverSpider(scrapy.Spider):
"""Scraper for google serach."""
name = "stack_spider"
def __init__(self):
"""Initialize the spider."""
super(StackOverSpider, self).__init__()
self.start_urls = ['StackOverflow.com/jobs']
def parse(self, response):
"""Crawl through search and return first page information."""
SET_SELECTOR = '.-job-summary'
for post in response.css(SET_SELECTOR):
TITLE_SELECTOR = 'h2 a ::text'
SITE_SELECTOR = 'h2 a ::attr(href)'
# COMPANY_NAME = 'div '
yield{
'title': post.css(TITLE_SELECTOR).extract(),
'site': post.css(SITE_SELECTOR).extract_first()
} | StarcoderdataPython |
1632617 | import discord
from discord.ext import commands
import serial
arduino = serial.Serial('/dev/ttyACM0', 9600)
bot = commands.Bot(command_prefix=".")
@bot.event
async def on_ready():
print("bot is ready")
@bot.command()
async def temp(ctx):
arduino.write(b't')
t = arduino.readline()
print(t.decode().strip())
await ctx.send(t.decode().strip())
@bot.command()
async def hum(ctx):
arduino.write(b'h')
h = arduino.readline()
print(h.decode().strip())
await ctx.send(h.decode().strip())
@bot.command()
async def lighton(ctx):
arduino.write(b'o')
await ctx.send("light is on")
@bot.command()
async def lightoff(ctx):
arduino.write(b'f')
await ctx.send("light is off")
@bot.command()
async def carbon(ctx):
arduino.write(b'c')
c = arduino.readline()
print(c.decode().strip())
await ctx.send(c.decode().strip())
'''
@bot.command()
async def lux(ctx):
arduino.write(b'l')
l = arduino.readline()
print(l.decode().strip())
await ctx.send(l.decode().strip())
'''
bot.run("bot token")
| StarcoderdataPython |
3320636 | """
Library Features:
Name: lib_jupyter_plot_ts
Author(s): <NAME> (<EMAIL>)
Date: '20210113'
Version: '1.0.0'
"""
#######################################################################################
# Libraries
import os
import pandas as pd
from library.jupyter_generic.lib_jupyter_utils_system import make_folder
import matplotlib.pylab as plt
#######################################################################################
# -------------------------------------------------------------------------------------
# Method to configure time-series axes
def configure_ts_axes(dframe_data, time_format='%m-%d %H'):
tick_time_period = list(dframe_data.index)
tick_time_idx = dframe_data.index
tick_time_labels = [tick_label.strftime(time_format) for tick_label in dframe_data.index]
return tick_time_period, tick_time_idx, tick_time_labels
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to configure time-series attributes
def configure_ts_attrs(attrs_data,
tag_run_time='time_run', tag_restart_time='time_restart', tag_start_time='time_start',
tag_run_name='run_name', tag_run_domain='run_domain',
tag_section_name='section_name', tag_basin_name='section_domain',
tag_section_thr_alarm_discharge='section_discharge_thr_alarm',
tag_section_thr_alert_discharge='section_discharge_thr_alert',
tag_section_drainage_area='section_drained_area'):
attrs_ts = {}
for attr_key, attr_value in attrs_data.items():
if attr_key == tag_run_time:
attrs_ts[tag_run_time] = pd.Timestamp(attr_value)
if attr_key == tag_restart_time:
attrs_ts[tag_restart_time] = pd.Timestamp(attr_value)
if attr_key == tag_start_time:
attrs_ts[tag_start_time] = pd.Timestamp(attr_value)
elif attr_key == tag_run_name:
attrs_ts[tag_run_name] = attr_value
elif attr_key == tag_section_name:
attrs_ts[tag_section_name] = attr_value
elif attr_key == tag_basin_name:
attrs_ts[tag_basin_name] = attr_value
elif attr_key == tag_section_thr_alarm_discharge:
attrs_ts[tag_section_thr_alarm_discharge] = float(attr_value)
elif attr_key == tag_section_thr_alert_discharge:
attrs_ts[tag_section_thr_alert_discharge] = float(attr_value)
elif attr_key == tag_section_drainage_area:
attrs_ts[tag_section_drainage_area] = attr_value
elif attr_key == tag_run_domain:
attrs_ts[tag_run_domain] = attr_value
return attrs_ts
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to plot forcing time-series
def plot_ts_forcing(file_name,
df_rain=None, value_min_rain=0, value_max_rain=20,
df_airt=None, value_min_airt=-20, value_max_airt=35,
df_incrad=None, value_min_incrad=-50, value_max_incrad=1200,
df_rh=None, value_min_rh=0, value_max_rh=100,
df_winds=None, value_min_winds=0, value_max_winds=20,
attrs_forcing=None,
tag_time_name='time', tag_time_units='[hour]',
tag_rain_name='Rain', tag_rain_units='[mm]',
tag_airt_name='AirT', tag_airt_units='[C]',
tag_incrad_name='IncRad', tag_incrad_units='[W/m^2]',
tag_rh_name='RH', tag_rh_units='[%]',
tag_winds_name='Wind', tag_winds_units='[m/s]',
tag_sep=' ', fig_dpi=120):
# Configure ts attributes
attrs_ts = configure_ts_attrs(attrs_forcing)
# Configure ts time axes
[tick_time_period, tick_time_idx, tick_time_labels] = configure_ts_axes(df_rain)
# Axis labels
label_time = tag_sep.join([tag_time_name, tag_time_units])
label_rain = tag_sep.join([tag_rain_name, tag_rain_units])
label_airt = tag_sep.join([tag_airt_name, tag_airt_units])
label_incrad = tag_sep.join([tag_incrad_name, tag_incrad_units])
label_rh = tag_sep.join([tag_rh_name, tag_rh_units])
label_winds = tag_sep.join([tag_winds_name, tag_winds_units])
# Open figure
fig = plt.figure(figsize=(17, 11))
fig.autofmt_xdate()
# Subplot 1 [RAIN]
ax1 = plt.subplot(5, 1, 1)
ax1.set_xticks(tick_time_idx)
ax1.set_xticklabels([])
ax1.set_xlim(tick_time_period[0], tick_time_period[-1])
ax1.set_ylabel(label_rain, color='#000000')
ax1.set_ylim(value_min_rain, value_max_rain)
ax1.grid(b=True)
p11 = ax1.bar(df_rain.index, df_rain.values[:, 0], color='#33A1C9', alpha=1, width=0.025, align='edge')
p12 = ax1.axvline(attrs_ts['time_run'], color='#000000', linestyle='--', lw=2)
legend = ax1.legend([p11[0]], [tag_rain_name], frameon=False, loc=2)
ax1.add_artist(legend)
ax1.set_title('Time Series \n Domain: ' + attrs_ts['run_domain'] +
' \n TypeRun: ' + attrs_ts['run_name'] +
' == Time_Run: ' + str(attrs_ts['time_run']) + ' == Time_Restart: ' + str(attrs_ts['time_restart']) +
' == Time_Start: ' + str(attrs_ts['time_start']))
# Subplot 2 [AIR TEMPERATURE]
ax2 = plt.subplot(5, 1, 2)
ax2.set_xticks(tick_time_idx)
ax2.set_xticklabels([])
ax2.set_xlim(tick_time_period[0], tick_time_period[-1])
ax2.set_ylabel(label_airt, color='#000000')
ax2.set_ylim(value_min_airt, value_max_airt)
ax2.grid(b=True)
p21 = ax2.plot(df_airt.index, df_airt.values[:, 0], color='#FF0000', linestyle='-', lw=2)
p22 = ax2.axvline(attrs_ts['time_run'], color='#000000', linestyle='--', lw=2)
legend = ax2.legend([p21[0]], [tag_airt_name], frameon=False, loc=2)
ax2.add_artist(legend)
# Subplot 3 [INCOMING RADIATION]
ax3 = plt.subplot(5, 1, 3)
ax3.set_xticks(tick_time_idx)
ax3.set_xticklabels([])
ax3.set_xlim(tick_time_period[0], tick_time_period[-1])
ax3.set_ylabel(label_incrad, color='#000000')
ax3.set_ylim(value_min_incrad, value_max_incrad)
ax3.grid(b=True)
p31 = ax3.plot(df_incrad.index, df_incrad.values[:, 0], color='#9B26B6', linestyle='-', lw=2)
p32 = ax3.axvline(attrs_ts['time_run'], color='#000000', linestyle='--', lw=2)
legend = ax3.legend([p31[0]], [tag_incrad_name], frameon=False, loc=2)
ax3.add_artist(legend)
# Subplot 4 [RELATIVE HUMIDITY]
ax4 = plt.subplot(5, 1, 4)
ax4.set_xticks(tick_time_idx)
ax4.set_xticklabels([])
ax4.set_xlim(tick_time_period[0], tick_time_period[-1])
ax4.set_ylabel(label_rh, color='#000000', fontsize=6)
ax4.set_ylim(value_min_rh, value_max_rh)
ax4.grid(b=True)
p41 = ax4.plot(df_rh.index, df_rh.values[:, 0], color='#0093CC', linestyle='-', lw=2)
p42 = ax4.axvline(attrs_ts['time_run'], color='#000000', linestyle='--', lw=2)
legend = ax4.legend([p41[0]], [tag_rh_name], frameon=False, loc=2)
ax4.add_artist(legend)
# Subplot 5 [WIND SPEED]
ax5 = plt.subplot(5, 1, 5)
ax5.set_xticks(tick_time_idx)
ax5.set_xticklabels([])
ax5.set_xlim(tick_time_period[0], tick_time_period[-1])
ax5.set_ylabel(label_winds, color='#000000')
ax5.set_ylim(value_min_winds, value_max_winds)
ax5.grid(b=True)
p51 = ax5.plot(df_winds.index, df_winds.values[:, 0], color='#149414', linestyle='-', lw=2)
p52 = ax5.axvline(attrs_ts['time_run'], color='#000000', linestyle='--', lw=2)
legend = ax5.legend([p51[0]], [tag_winds_name], frameon=False, loc=2)
ax5.add_artist(legend)
ax5.set_xticks(tick_time_idx)
ax5.set_xticklabels(tick_time_labels, rotation=90, fontsize=8)
file_path, file_folder = os.path.split(file_name)
if not os.path.exists(file_path):
make_folder(file_path)
fig.savefig(file_name, dpi=fig_dpi)
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to plot discharge time-series
def plot_ts_discharge(file_name, df_discharge_sim, attrs_discharge_sim, df_discharge_obs=None,
value_min_discharge=0, value_max_discharge=100,
df_rain=None, value_min_rain=0, value_max_rain=20,
df_soil_moisture=None, value_min_soil_moisture=0, value_max_soil_moisture=1,
tag_time_name='time', tag_time_units='[hour]',
tag_discharge_generic_name='Discharge',
tag_discharge_sim_name='Discharge Simulated',
tag_discharge_obs_name='Discharge Observed', tag_discharge_units='[m^3/s]',
tag_rain_avg_name='Rain Avg', tag_rain_accumulated_name='Rain Accumulated', tag_rain_units='[mm]',
tag_soil_moisture_name='Soil Moisture', tag_soil_moisture_units='[-]',
tag_discharge_thr_alarm='discharge thr alarm', tag_discharge_thr_alert='discharge thr alert',
tag_sep=' ', fig_dpi=120):
# Configure ts attributes
attrs_ts = configure_ts_attrs(attrs_discharge_sim)
# Configure ts time axes
[tick_time_period, tick_time_idx, tick_time_labels] = configure_ts_axes(df_discharge_sim)
# Axis labels
label_time = tag_sep.join([tag_time_name, tag_time_units])
label_discharge_generic = tag_sep.join([tag_discharge_generic_name, tag_discharge_units])
label_discharge_sim = tag_sep.join([tag_discharge_sim_name, tag_discharge_units])
label_discharge_obs = tag_sep.join([tag_discharge_obs_name, tag_discharge_units])
label_rain_avg = tag_sep.join([tag_rain_avg_name, tag_rain_units])
label_rain_accumulated = tag_sep.join([tag_rain_accumulated_name, tag_rain_units])
label_soil_moisture = tag_sep.join([tag_soil_moisture_name, tag_soil_moisture_units])
# Open figure
fig = plt.figure(figsize=(17, 11))
fig.autofmt_xdate()
# Subplot 1 [RAIN]
ax1 = plt.subplot(3, 1, 1)
ax1.set_xticks(tick_time_idx)
ax1.set_xticklabels([])
ax1.set_xlim(tick_time_period[0], tick_time_period[-1])
ax1.set_ylabel(label_rain_avg, color='#000000')
ax1.set_ylim(value_min_rain, value_max_rain)
ax1.grid(b=True)
p11 = ax1.bar(df_rain.index, df_rain.values[:, 0],
color='#33A1C9', alpha=1, width=0.025, align='edge')
p13 = ax1.axvline(attrs_ts['time_run'], color='#000000', linestyle='--', lw=2)
ax3 = ax1.twinx()
ax3.set_ylabel(label_rain_accumulated, color='#000000')
ax3.set_ylim(value_min_rain, value_max_rain)
ax3.set_xticks(tick_time_idx)
ax3.set_xticklabels([])
ax3.set_xlim(tick_time_period[0], tick_time_period[-1])
p31 = ax3.plot(df_rain.index, df_rain.cumsum().values[:, 0],
color='#33A1C9', linestyle='-', lw=1)
# legend = ax1.legend(p11, [oRain_OBS_META['var_appearance']], frameon=False, loc=2)
legend = ax1.legend((p11[0], p31[0]),
(tag_rain_avg_name, tag_rain_accumulated_name,),
frameon=False, loc=2)
ax1.add_artist(legend)
ax1.set_title('Time Series \n Section: ' + attrs_ts['section_name'] +
' == Basin: ' + attrs_ts['section_domain'] +
' == Area [Km^2]: ' + attrs_ts['section_drained_area'] + ' \n TypeRun: ' + attrs_ts['run_name'] +
' == Time_Run: ' + str(attrs_ts['time_run']) + ' == Time_Restart: ' + str(attrs_ts['time_restart']) +
' == Time_Start: ' + str(attrs_ts['time_start']))
# Subplot 2 [DISCHARGE]
ax2 = plt.subplot(3, 1, (2, 3))
p21 = ax2.plot(df_discharge_obs.index, df_discharge_obs.values[:, 0],
color='#000000', linestyle='--', lw=1, marker='o', ms=4)
p22 = ax2.plot(df_discharge_sim.index, df_discharge_sim.values[:, 0],
color='#0000FF', linestyle='-', lw=1)
ax2.set_xlabel(label_time, color='#000000')
ax2.set_xlim(tick_time_period[0], tick_time_period[-1])
ax2.set_ylabel(label_discharge_generic, color='#000000')
ax2.set_ylim(value_min_discharge, value_max_discharge)
ax2.grid(b=True)
p27 = ax2.axvline(attrs_ts['time_run'], color='#000000', linestyle='--', lw=2, label='time run')
p28 = ax2.axhline(attrs_ts['section_discharge_thr_alarm'], color='#FFA500', linestyle='--',
linewidth=2, label=tag_discharge_thr_alarm)
p29 = ax2.axhline(attrs_ts['section_discharge_thr_alert'], color='#FF0000', linestyle='--',
linewidth=2, label=tag_discharge_thr_alert)
ax2.set_xticks(tick_time_idx)
ax2.set_xticklabels(tick_time_labels, rotation=90, fontsize=8)
ax4 = ax2.twinx()
p41 = ax4.plot(df_soil_moisture.index, df_soil_moisture.values[:, 0],
color='#DA70D6', linestyle='--', lw=2)
ax4.set_ylabel(label_soil_moisture, color='#000000')
ax4.set_ylim(value_min_soil_moisture, value_max_soil_moisture)
ax4.set_xticks(tick_time_idx)
ax4.set_xticklabels(tick_time_labels, rotation=90, fontsize=8)
legend1 = ax2.legend((p21[0], p22[0], p41[0]),
(tag_discharge_sim_name, tag_discharge_obs_name, tag_soil_moisture_name),
frameon=False, ncol=2, loc=0)
legend2 = ax2.legend((p28, p29),
(tag_discharge_thr_alarm, tag_discharge_thr_alert),
frameon=False, ncol=4, loc=9, bbox_to_anchor=(0.5, -0.2))
ax2.add_artist(legend1)
ax2.add_artist(legend2)
file_path, file_folder = os.path.split(file_name)
if not os.path.exists(file_path):
make_folder(file_path)
fig.savefig(file_name, dpi=fig_dpi)
# plt.show()
# plt.close()
# -------------------------------------------------------------------------------------
| StarcoderdataPython |
3368282 | import re
def find_longest_path_length(path):
r'''Finds the longest path to a file in a representation of a filesystem.
>>> find_longest_path_length("dir\n\tsubdir1\n\t\tfile1.ext\n\t\tsubsubdir1\n\tsubdir2\n\t\tsubsubdir2\n\t\t\tfile2.ext") # noqa
32
>>> find_longest_path_length("dir\n\tsubdir1\n\tsubdir2\n\t\tfile.ext")
20
'''
stack = []
result = 0
# regex captures tabs in a group and the name in the second group
for match in re.finditer(r'(\t*)([\w\.]+)', path):
indents = len(match[1]) + 1
name = match[2]
if '.' in name:
# File
# This can be made faster by saving current length of the directories on the stack
result = max(len('/'.join(stack) + f'/{name}'), result)
else:
# Directory
while len(stack) >= indents:
stack.pop()
if len(stack) < indents:
stack.append(name)
return result
| StarcoderdataPython |
146863 | <reponame>datavaluepeople/tentaclio<gh_stars>10-100
import pytest
from tentaclio.clients import base_client, decorators
class TestCheckConn:
def test_missing_connection_attribute(self):
class TestClient:
@decorators.check_conn
def func(self):
return True
def __enter__(self):
...
test_client = TestClient()
with pytest.raises(AttributeError):
test_client.func()
def test_inactive_client_connection(self):
url = "file:///path"
class TestClient(base_client.BaseClient):
allowed_schemes = ["file"]
def connect(self):
return True
@decorators.check_conn
def func(self):
return True
def _connect(self):
...
def __enter__(self):
...
test_client = TestClient(url)
with pytest.raises(ConnectionError):
test_client.func()
| StarcoderdataPython |
3392345 | <gh_stars>1-10
import os
from dotenv import load_dotenv
load_dotenv()
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = os.getenv('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.getenv('DEBUG', default=False)
def comma_separated_list(value: str):
return [x.strip() for x in value.split(',') if x.strip()]
ALLOWED_HOSTS = comma_separated_list(os.getenv('ALLOWED_HOSTS'))
INTERNAL_IPS = comma_separated_list(os.getenv('INTERNAL_IPS'))
INSTALLED_APPS = [
'users.apps.UsersConfig',
'posts.apps.PostsConfig',
'about.apps.AboutConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
# 'debug_toolbar.apps.DebugToolbarConfig',
'sorl.thumbnail',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
]
ROOT_URLCONF = 'twitter_killer.urls'
TEMPLATES_DIR = os.path.join(BASE_DIR, 'templates')
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'templates.includes.context_processors.year',
'templates.includes.context_processors.groups',
'templates.includes.context_processors.most_commented',
],
},
},
]
WSGI_APPLICATION = 'twitter_killer.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTH_USER_MODEL = 'users.User'
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': ('django.contrib.auth.password_validation.'
'UserAttributeSimilarityValidator'),
},
{
'NAME': ('django.contrib.auth.password_validation.'
'MinimumLengthValidator'),
},
{
'NAME': ('django.contrib.auth.password_validation.'
'CommonPasswordValidator'),
},
{
'NAME': ('django.contrib.auth.password_validation.'
'NumericPasswordValidator'),
},
]
LOGIN_URL = "/auth/login/"
LOGIN_REDIRECT_URL = "/"
# LOGOUT_REDIRECT_URL = "/"
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
LANGUAGE_CODE = 'ru'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
EMAIL_BACKEND = "django.core.mail.backends.filebased.EmailBackend"
EMAIL_FILE_PATH = os.path.join(BASE_DIR, "sent_emails")
SHELL_PLUS = "ipython"
SHELL_PLUS_PRINT_SQL = True
| StarcoderdataPython |
1667198 | import logging
import time
from dataclasses import dataclass
from dataclasses import field
from enum import Enum
from os import system
from typing import Callable
import irsdk
from racelogger.model.recorderstate import RecorderState
from racelogger.processing.carproc import CarProcessor
from racelogger.processing.driverproc import DriverProcessor
from racelogger.processing.msgproc import MessageProcessor
from racelogger.processing.subprocessors import Subprocessors
class RaceStates(Enum):
INVALID = 0
RACING = 1
CHECKERED_ISSUED = 2
CHECKERED_DONE = 3
COOLDOWN = 4
TERMINATE = 5
@dataclass
class RaceProcessor:
"""state machine which is triggered on every iteration of the main loop"""
recorderState: RecorderState
"""holds the data from iRacing"""
subprocessors: Subprocessors
"""container for subprocessors"""
state: RaceStates = RaceStates.INVALID
"""holds the current state of the race"""
session_unique_id: int = -1
"""holds the current session unique id"""
session_num: int = -1
"""holds the current session num (uses for resets and the like during testing)"""
onNewSession: Callable[[int],None] = None
"""if present this will be called if a new session num is detected"""
onRaceFinished: Callable[[],None] = None
"""if present this will be called if the race is considered to be finished"""
def __post_init__(self):
self.on_init_ir_state = self.recorderState.ir['SessionState'] # used for "race starts" message
self.stateSwitch = {
RaceStates.INVALID: self.state_invalid,
RaceStates.RACING: self.state_racing,
RaceStates.CHECKERED_ISSUED: self.state_finishing,
RaceStates.CHECKERED_DONE: self.state_racing,
RaceStates.COOLDOWN: self.state_cooldown,
RaceStates.TERMINATE: self.state_terminate,
}
self.logger = logging.getLogger("RaceProcessor")
def state_invalid(self,ir):
if ir['SessionInfo']['Sessions'][ir['SessionNum']]['SessionType'] == 'Race':
if ir['SessionState'] == irsdk.SessionState.racing:
self.logger.info(f'=== Race state detected ===')
# self.pit_proc.race_starts(ir)
self.subprocessors.car_proc.race_starts(ir)
self.state = RaceStates.RACING
if self.on_init_ir_state != ir['SessionState']:
self.logger.info(f'real race start detected')
self.subprocessors.msg_proc.add_race_starts()
pass
def state_racing(self,ir):
if ir['SessionState'] == irsdk.SessionState.checkered:
self.logger.info(f'checkered flag issued')
self.state = RaceStates.CHECKERED_ISSUED
self.subprocessors.car_proc.checkered_flag(ir)
self.subprocessors.msg_proc.add_checkered_issued()
# need to check where the leader is now. has he already crossed s/f ?
# (problem is a about to be lapped car in front of that car - which of course should not yet be considered as a finisher)
return
# TODO: do we still need pit_process???
# self.pit_proc.process(ir)
# state.driver_proc.process(ir)
self.subprocessors.car_proc.process(ir, self.subprocessors.msg_proc)
def state_finishing(self,ir):
if ir['SessionState'] == irsdk.SessionState.cool_down:
# at this point the last car may not be processed in the standings.
# signal a cooldown. this will give us a little more time until the final standings arrive
self.subprocessors.car_proc.process(ir, self.subprocessors.msg_proc)
self.logger.info(f'cooldown signaled - get out of here')
self.state = RaceStates.COOLDOWN
self.cooldown_signaled = time.time()
return
# self.subprocessors.pit_proc.process(ir)
self.subprocessors.car_proc.process(ir, self.subprocessors.msg_proc)
def state_cooldown(self, ir):
"""
on cooldown notice we want to stay 5 more secs active to get the latest standings.
"""
if (time.time() - self.cooldown_signaled) < 5:
self.subprocessors.car_proc.process(ir, self.subprocessors.msg_proc)
return
else:
self.logger.info(f'internal cooldown phase done. Terminating now')
self.state = RaceStates.TERMINATE
return
def state_terminate(self, ir ):
# TODO: think about shutting down only when specific config attribute is set to do so ;)
# for now it is ok.
self.logger.info(f'race is finished. check if callback is present')
if (self.onRaceFinished):
self.logger.info(f'callback is present. perform callback')
self.onRaceFinished()
def handle_new_session(self,ir):
self.subprocessors.msg_proc.clear_buffer()
# self.subprocessors.pit_proc.clear_buffer()
# state.car_proc.clear_buffer()
self.session_num = ir['SessionNum']
self.session_unique_id = ir['SessionUniqueID']
self.state = RaceStates.INVALID
self.on_init_ir_state = ir['SessionState'] # used for "race starts" message
# state.last_data.speedmap = SpeedMap(state.track_length)
self.logger.info(f'new unique session detected: {self.session_unique_id} sessionNum: {self.session_num}')
if self.onNewSession:
self.onNewSession(self.session_num)
def process(self, ir):
"""processes one step on the state machine according to the data in recorderState.ir"""
# handle global changes here
if ir['SessionUniqueID'] != 0 and ir['SessionUniqueID'] != self.session_unique_id:
self.handle_new_session(ir)
# handle processing depending on current state
self.stateSwitch[self.state](ir)
| StarcoderdataPython |
3399903 | <reponame>HogniJacobsen/Kattis-solutions.
hand = input().split()
dominant = hand[1]
values = {'A':[11,11],'K':[4,4],'Q':[3,3],'J':[20,2],'T':[10,10],'9':[14,0],'8':[0,0],'7':[0,0],}
sum = 0
for i in range(int(hand[0]) * 4):
number = input()
if number[1] == dominant:
sum += values[number[0]][0]
else:
sum += values[number[0]][1]
print(sum) | StarcoderdataPython |
3235305 | <reponame>cdoebler1/AIML2<filename>test/programytest/utils/files/test_config.py
import unittest
from programy.utils.files.filewriter import FileWriterConfiguration
class FileWriterConfigurationTests(unittest.TestCase):
def test_init_defaults(self):
config = FileWriterConfiguration("filename.txt")
self.assertEquals("filename.txt", config.filename)
self.assertEquals(None, config.file_format)
self.assertEquals("a", config.mode)
self.assertEquals("utf-8", config.encoding)
self.assertEquals(False, config.delete_on_start)
def test_init_no_defaults(self):
config = FileWriterConfiguration("filename2.txt", fileformat="txt", mode="r", encoding="ascii", delete_on_start=True)
self.assertEquals("filename2.txt", config.filename)
self.assertEquals("txt", config.file_format)
self.assertEquals("r", config.mode)
self.assertEquals("ascii", config.encoding)
self.assertEquals(True, config.delete_on_start)
| StarcoderdataPython |
167730 | <reponame>wowvwow/Phala-Network
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import requests
import json
def dingtalk(txt):
print(txt)
headers = {"Content-Type": "application/json"}
data = {"msgtype": "text", "text": {"content": txt}}
json_data = json.dumps(data)
access_token = '1111111111111111111111111111111111111111111111111'
requests.post(url=f'https://oapi.dingtalk.com/robot/send?access_token={access_token}',
data=json_data,
headers=headers)
if __name__ == '__main__':
if len(sys.argv) == 1:
print(f'python3 {sys.argv[0]} 信息')
exit(0)
dingtalk(sys.argv[1])
| StarcoderdataPython |
3295570 | <reponame>timo95/knausj_talon
from talon import Module, Context
# --- App definitions ---
mod = Module()
mod.apps.fanfictionnet = """
tag: browser
browser.host: www.fanfiction.net
browser.host: m.fanfiction.net
"""
mod.apps.fictionpress = """
tag: browser
browser.host: www.fictionpress.com
browser.host: m.fictionpress.com
"""
# Context matching
ctx = Context()
ctx.matches = r"""
app: fanfictionnet
app: fictionpress
"""
# --- Define lists ---
# TODO: add more, split between categories
mod.list("fanfictionnet_fandom", desc="Fandoms to browse")
ctx.lists["user.fanfictionnet_fandom"] = {
"harry potter": "book/Harry-Potter/",
"naruto": "anime/Naruto/",
"bleach": "anime/Bleach/",
"ruby": "anime/RWBY/",
"my hero academia": "anime/My-Hero-Academia-僕のヒーローアカデミア/",
"madoka magica": "anime/Puella-Magi-Madoka-Magica-魔法少女まどか-マギカ/",
}
# TODO: add all categories
mod.list("fanfictionnet_category", desc="Categories of fandoms")
ctx.lists["user.fanfictionnet_category"] = {
"anime": "anime/",
"books": "book/",
}
# TODO: add poetry genres
mod.list("fictionpress_genre_fiction", desc="Fiction genre to browse")
ctx.lists["user.fictionpress_genre_fiction"] = {
"general": "General/",
"romance": "Romance/",
"fantasy": "Fantasy/",
"young adult": "Young-Adult/",
"adult": "Adult/",
"horror": "Horror/",
"supernatural": "Supernatural/",
"humor": "Humor/",
"sci-fi": "Sci-Fi/",
"action": "Action/",
"essay": "Essay/",
"manga": "Manga/",
"historical": "Historical/",
"mystery": "Mystery/",
"biography": "Biography/",
"thriller": "Thriller/",
"spiritual": "Spiritual/",
"mythology": "Mythology/",
"play": "Play/",
"fable": "Fable/",
"kids": "Kids/",
"western": "Western/",
}
| StarcoderdataPython |
3312471 | from .svd import SVD
from .eigh import EigenSolver
from .qr import QR
| StarcoderdataPython |
183860 | from django.apps import AppConfig
class KdlWagtailPeopleConfig(AppConfig):
name = 'kdl_wagtail.people'
label = 'kdl_wagtail_people'
| StarcoderdataPython |
4824130 | <reponame>vegetablejuiceftw/vacuum<filename>players/prefer_last_lazy.py<gh_stars>0
from agent import Point
from players.prefer_last import RandomPreferLastMoveAgent
class LazyRandomPreferLastMoveAgent(RandomPreferLastMoveAgent):
NAME = "Lazy"
AUTHOR = "<EMAIL>"
def __init__(self) -> None:
super().__init__()
self.steps = 0
def step(self, perception: Point) -> str:
self.steps += 1
if perception.dirty:
return self.ACTION.SUCK
if self.steps % 3 == 0:
return self.ACTION.NOOP
return super().step(perception)
Robot = LazyRandomPreferLastMoveAgent
| StarcoderdataPython |
1728539 | <filename>Python3/461.hamming-distance.py<gh_stars>0
#
# @lc app=leetcode id=461 lang=python3
#
# [461] Hamming Distance
#
# @lc code=start
class Solution:
def hammingDistance(self, x: int, y: int):
if x == y:
return 0
cnt = 0
while x > 0 or y > 0:
if x & 1 != y & 1:
cnt += 1
x = x >> 1
y = y >> 1
return cnt
# @lc code=end
| StarcoderdataPython |
3268623 | import cv2
import numpy as np
img = cv2.imread(r'C:\Users\Lenovo\OneDrive\Desktop\n1',cv2.IMREAD_COLOR)
img1=img
grayscaled = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
th = cv2.adaptiveThreshold(grayscaled, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 115, 1)
#cv2.imshow('original',img)
#cv2.imshow('Adaptive threshold',th)
kernel = np.ones((5,5),np.float32)
opening = cv2.morphologyEx(th, cv2.MORPH_OPEN, kernel)
#cv2.imshow('Erosion and Dilation',dilate)
#blur = cv2.GaussianBlur(opening,(7,7),0)
#blur= cv2.medianBlur(opening,5)
blur=cv2.bilateralFilter(opening,19,75,75)
dilate=cv2.dilate(blur,kernel,iterations=1)
cv2.namedWindow('Blurring',cv2.WINDOW_NORMAL)
cv2.resizeWindow('Blurring', 800,600)
edged = cv2.Canny(dilate, 50, 150) #canny edge detection
cv2.namedWindow('Canny',cv2.WINDOW_NORMAL)
cv2.resizeWindow('Canny', 800,600)
cv2.imshow('Canny',edged)
cv2.imshow('Blurring',dilate)
contours, hierarchy = cv2.findContours(edged,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# for contour in contours:
# x,y,w,h = cv2.boundingRect(contour)
# if w>50 and h>50:
# cv2.rectangle(img1,(x,y),(x+w,y+h),(0,255,0),2)
# try: hierarchy = hierarchy[0]
# except: hierarchy = []
# computes the bounding box for the contour, and draws it on the frame,
for contour in contours:
epsilon = 0.01*cv2.arcLength(contour,True)
approx = cv2.approxPolyDP(contour,epsilon,True)
cv2.drawContours(img1,contour,-1,(255,0,0),4)
# (x,y,w,h) = cv2.boundingRect(contour)
# if w>80 and h>80:
# cv2.rectangle(img1, (x,y), (x+w,y+h), (255, 0, 0), 2)
cv2.namedWindow('final',cv2.WINDOW_NORMAL)
cv2.resizeWindow('final', 800,600)
cv2.imshow('final',img1)
cv2.waitKey(0)
cv2.destroyAllWindows()
| StarcoderdataPython |
1744011 | from django.contrib import admin
from models_completed import CompletedTask
from models import Task
class TaskAdmin(admin.ModelAdmin):
display_filter = ['task_name']
list_display = ['task_name', 'task_params', 'run_at', 'priority', 'attempts']
admin.site.register(Task, TaskAdmin)
admin.site.register(CompletedTask) | StarcoderdataPython |
3340502 | '''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
import datetime
import itertools
import uuid
from copy import deepcopy
from enum import Enum
from pm4py.objects.petri_net.utils import petri_utils as pn_util
from pm4py.objects.petri_net.obj import PetriNet
from pm4py.objects.process_tree import obj as pt_operator
from pm4py.objects.process_tree.utils import generic as pt_util
from pm4py.objects.process_tree.utils.generic import tree_sort
from pm4py.util import exec_utils
TRANSITION_PREFIX = str(uuid.uuid4())
class Parameters(Enum):
DEBUG = "debug"
FOLD = "fold"
def generate_label_for_transition(t):
return 'tau' if t.label is None else '\'' + t.label + '\'' if not t.name.startswith(
TRANSITION_PREFIX) else t.label
def generate_new_binary_transition(t1, t2, operator, net):
t = PetriNet.Transition(TRANSITION_PREFIX + str(datetime.datetime.now()))
t.label = str(operator) + '(' + generate_label_for_transition(
t1) + ', ' + generate_label_for_transition(t2) + ')'
return t
def loop_requirement(t1, t2):
if t1 == t2:
return False
for p in pn_util.pre_set(t2):
if len(pn_util.pre_set(p)) != 1: # check that the preset of the t2 preset has one entry
return False
if t1 not in pn_util.pre_set(p): # check that t1 is the unique way to mark the preset of t2
return False
for p in pn_util.post_set(t2):
if len(pn_util.post_set(p)) != 1:
return False
if t1 not in pn_util.post_set(p):
return False
for p in pn_util.pre_set(t1):
if len(pn_util.post_set(p)) != 1:
return False
if t1 not in pn_util.post_set(p):
return False
if t2 not in pn_util.pre_set(p): # t2 has to enable t1!
return False
for p in pn_util.post_set(t1):
if len(pn_util.pre_set(p)) != 1: # check that the preset of the t2 preset has one entry
return False
if t1 not in pn_util.pre_set(p): # check that t1 is the unique way to mark the preset of t2
return False
if t2 not in pn_util.post_set(p):
return False
return True
def binary_loop_detection(net):
c1 = None
c2 = None
for t1, t2 in itertools.product(net.transitions, net.transitions):
if loop_requirement(t1, t2):
c1 = t1
c2 = t2
break
if c1 is not None and c2 is not None:
t = generate_new_binary_transition(c1, c2, pt_operator.Operator.LOOP, net)
net.transitions.add(t)
# reduce
for a in c1.in_arcs:
pn_util.add_arc_from_to(a.source, t, net)
for a in c1.out_arcs:
pn_util.add_arc_from_to(t, a.target, net)
pn_util.remove_transition(net, c1)
pn_util.remove_transition(net, c2)
return net
return None
def concurrent_requirement(t1, t2):
if t1 == t2: # check if transitions different
return False
if len(pn_util.pre_set(t1)) == 0 or len(pn_util.post_set(t1)) == 0 or len(pn_util.pre_set(t2)) == 0 or len(
pn_util.post_set(t2)) == 0: # not possible in WF-net, just checking...
return False
pre_pre = set()
post_post = set()
for p in pn_util.pre_set(t1): # check if t1 is unique post of its preset
pre_pre = set.union(pre_pre, pn_util.pre_set(p))
if len(pn_util.post_set(p)) > 1 or t1 not in pn_util.post_set(p):
return False
for p in pn_util.post_set(t1): # check if t1 is unique pre of its postset
post_post = set.union(post_post, pn_util.post_set(p))
if len(pn_util.pre_set(p)) > 1 or t1 not in pn_util.pre_set(p):
return False
for p in pn_util.pre_set(t2): # check if t2 is unique post of its preset
pre_pre = set.union(pre_pre, pn_util.pre_set(p))
if len(pn_util.post_set(p)) > 1 or t2 not in pn_util.post_set(p):
return False
for p in pn_util.post_set(t2): # check if t2 is unique pre of its postset
post_post = set.union(post_post, pn_util.post_set(p))
if len(pn_util.pre_set(p)) > 1 or t2 not in pn_util.pre_set(p):
return False
for p in set.union(pn_util.pre_set(t1), pn_util.pre_set(t2)): # check if presets synchronize
for t in pre_pre:
if t not in pn_util.pre_set(p):
return False
for p in set.union(pn_util.post_set(t1), pn_util.post_set(t2)): # check if postsets synchronize
for t in post_post:
if t not in pn_util.post_set(p):
return False
return True
def binary_concurrency_detection(net):
c1 = None
c2 = None
for t1, t2 in itertools.product(net.transitions, net.transitions):
if concurrent_requirement(t1, t2):
c1 = t1
c2 = t2
break
if c1 is not None and c2 is not None:
t = generate_new_binary_transition(c1, c2, pt_operator.Operator.PARALLEL, net)
net.transitions.add(t)
# reduce
for a in c1.in_arcs:
pn_util.add_arc_from_to(a.source, t, net)
for a in c1.out_arcs:
pn_util.add_arc_from_to(t, a.target, net)
for a in c2.in_arcs:
pn_util.add_arc_from_to(a.source, t, net)
for a in c2.out_arcs:
pn_util.add_arc_from_to(t, a.target, net)
pn_util.remove_transition(net, c1)
pn_util.remove_transition(net, c2)
return net
return None
def choice_requirement(t1, t2):
return t1 != t2 and pn_util.pre_set(t1) == pn_util.pre_set(t2) and pn_util.post_set(t1) == pn_util.post_set(
t2) and len(pn_util.pre_set(t1)) > 0 and len(
pn_util.post_set(t1)) > 0
def binary_choice_detection(net):
c1 = None
c2 = None
for t1, t2 in itertools.product(net.transitions, net.transitions):
if choice_requirement(t1, t2):
c1 = t1
c2 = t2
break
if c1 is not None and c2 is not None:
t = generate_new_binary_transition(c1, c2, pt_operator.Operator.XOR, net)
net.transitions.add(t)
for a in c1.in_arcs:
pn_util.add_arc_from_to(a.source, t, net)
for a in c2.out_arcs:
pn_util.add_arc_from_to(t, a.target, net)
pn_util.remove_transition(net, c1)
pn_util.remove_transition(net, c2)
return net
return None
def sequence_requirement(t1, t2):
if t1 == t2:
return False
if len(pn_util.pre_set(t2)) == 0:
return False
for p in pn_util.post_set(t1):
if len(pn_util.pre_set(p)) != 1 or len(pn_util.post_set(p)) != 1:
return False
if t1 not in pn_util.pre_set(p):
return False
if t2 not in pn_util.post_set(p):
return False
for p in pn_util.pre_set(t2):
if len(pn_util.pre_set(p)) != 1 or len(pn_util.post_set(p)) != 1:
return False
if t1 not in pn_util.pre_set(p):
return False
if t2 not in pn_util.post_set(p): # redundant check, just to be sure...
return False
return True
def binary_sequence_detection(net):
c1 = None
c2 = None
for t1, t2 in itertools.product(net.transitions, net.transitions):
if sequence_requirement(t1, t2):
c1 = t1
c2 = t2
break
if c1 is not None and c2 is not None:
t = generate_new_binary_transition(c1, c2, pt_operator.Operator.SEQUENCE, net)
net.transitions.add(t)
for a in c1.in_arcs:
pn_util.add_arc_from_to(a.source, t, net)
for a in c2.out_arcs:
pn_util.add_arc_from_to(t, a.target, net)
for p in pn_util.post_set(c1):
pn_util.remove_place(net, p)
pn_util.remove_transition(net, c1)
pn_util.remove_transition(net, c2)
return net
return None
def group_blocks_in_net(net, parameters=None):
"""
Groups the blocks in the Petri net
Parameters
--------------
net
Petri net
parameters
Parameters of the algorithm
Returns
--------------
grouped_net
Petri net (blocks are grouped according to the algorithm)
"""
if parameters is None:
parameters = {}
from pm4py.algo.analysis.workflow_net import algorithm as wf_eval
if not wf_eval.apply(net):
raise ValueError('The Petri net provided is not a WF-net')
net = deepcopy(net)
while len(net.transitions) > 1:
if binary_choice_detection(net) is not None:
continue
elif binary_sequence_detection(net) is not None:
continue
elif binary_concurrency_detection(net) is not None:
continue
elif binary_loop_detection(net) is not None:
continue
else:
break
return net
def apply(net, im, fm, parameters=None):
"""
Transforms a WF-net to a process tree
Parameters
-------------
net
Petri net
im
Initial marking
fm
Final marking
Returns
-------------
tree
Process tree
"""
if parameters is None:
parameters = {}
debug = exec_utils.get_param_value(Parameters.DEBUG, parameters, False)
fold = exec_utils.get_param_value(Parameters.FOLD, parameters, True)
grouped_net = group_blocks_in_net(net, parameters=parameters)
if len(grouped_net.transitions) == 1:
pt_str = list(grouped_net.transitions)[0].label
pt = pt_util.parse(pt_str)
ret = pt_util.fold(pt) if fold else pt
tree_sort(ret)
return ret
else:
if debug:
from pm4py.visualization.petri_net import visualizer as pn_viz
pn_viz.view(pn_viz.apply(grouped_net, parameters={"format": "svg"}))
raise ValueError('Parsing of WF-net Failed')
| StarcoderdataPython |
68037 | from setuptools import setup, find_packages
"""
Instructions for creating a release of the scispacy library.
1. Make sure your working directory is clean.
2. Make sure that you have changed the versions in "scispacy/version.py".
3. Create the distribution by running "python setup.py sdist" in the root of the repository.
4. Check you can install the new distribution in a clean environment.
5. Upload the distribution to pypi by running "twine upload <path to the distribution> -u <username> -p <password>".
This step will ask you for a username and password - the username is "scispacy" you can
get the password from LastPass.
"""
VERSION = {}
# version.py defines VERSION and VERSION_SHORT variables.
# We use exec here to read it so that we don't import scispacy
# whilst setting up the package.
with open("scispacy/version.py", "r") as version_file:
exec(version_file.read(), VERSION)
setup(
name="scispacy",
version=VERSION["VERSION"],
url="https://allenai.github.io/SciSpaCy/",
author="Allen Institute for Artificial Intelligence",
author_email="<EMAIL>",
description="A full SpaCy pipeline and models for scientific/biomedical documents.",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
keywords=["bioinformatics nlp spacy SpaCy biomedical"],
classifiers=[
"Intended Audience :: Science/Research",
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
license="Apache",
install_requires=[
"spacy>=3.0.0,<3.1.0",
"requests>=2.0.0,<3.0.0",
"conllu",
"numpy",
"joblib",
"nmslib>=1.7.3.6",
"scikit-learn>=0.20.3",
"pysbd",
],
tests_require=["pytest", "pytest-cov", "flake8", "black", "mypy"],
python_requires=">=3.6.0",
)
| StarcoderdataPython |
1709372 | <filename>evennia/contrib/base_systems/email_login/tests.py
"""
Test email login.
"""
from evennia.commands.default.tests import BaseEvenniaCommandTest
from . import email_login
class TestEmailLogin(BaseEvenniaCommandTest):
def test_connect(self):
self.call(
email_login.CmdUnconnectedConnect(),
"<EMAIL> test",
"The email '<EMAIL>' does not match any accounts.",
)
self.call(
email_login.CmdUnconnectedCreate(),
'"mytest" <EMAIL> test11111',
"A new account 'mytest' was created. Welcome!",
)
self.call(
email_login.CmdUnconnectedConnect(),
"<EMAIL> test11111",
"",
caller=self.account.sessions.get()[0],
)
def test_quit(self):
self.call(email_login.CmdUnconnectedQuit(), "", "", caller=self.account.sessions.get()[0])
def test_unconnectedlook(self):
self.call(email_login.CmdUnconnectedLook(), "", "==========")
def test_unconnectedhelp(self):
self.call(email_login.CmdUnconnectedHelp(), "", "You are not yet logged into the game.")
| StarcoderdataPython |
1733193 | <filename>CS2/9200_data_structures/linked_lists/linked_list_student_day5.py
'''
Fill in the code to make the removeValue function work
in the linked list class below.
You can use any previous linked list code.
An addToHead function has been provided.
'''
# Linked list example in Python
# Link class
class Link:
# Function to initialize the node object
def __init__(self, v):
self.val = v # Assign value
self.next = None # Initialize next as null
# Linked List class
class LinkedList:
# Function to initialize the Linked List
def __init__(self):
self.head = None
def addToHead(self, value):
temp = self.head
self.head = Link(value)
self.head.next = temp
def removeValue(self, value):
'''Remove the first instance of value
from the list.
Example: Suppose we want to remove 'A' and the
list is currently A->C->G->A->C->None
Then the list should become
C->G->A->C->None
If we remove 'A' again the list should become
C->G->C->None
If the list is empty, do nothing.
If the user asks to remove a value that is not in
the list, do nothing.
This function does not return anything.
'''
pass #TODO: Write code here.
def getLength(self):
length = 0
n = self.head
while n!=None:
length += 1
n = n.next
return length
def print(self):
n = self.head
string = ''
while n!=None:
string += str(n.val)+', '
n = n.next
#Print after chopping off the final comma and space
print(string[:len(string)-2])
linkl = LinkedList()
linkl.addToHead('C')
linkl.addToHead('A')
linkl.addToHead('G')
linkl.addToHead('C')
linkl.addToHead('A')
points = 0
try:
if linkl.getLength() == 5:
points += 1
except:
pass
linkl.removeValue('A')
try:
if linkl.getLength() == 4:
points += 1
print('a')
if linkl.head.val=='C':
points += 1
print('b')
if linkl.head.next.val=='G':
points += 1
print('c')
if linkl.head.next.next.val=='A':
points += 1
print('d')
except:
pass
linkl.removeValue('A')
try:
if linkl.getLength() == 3:
points += 1
print('e')
if linkl.head.val=='C':
points += 1
print('f')
if linkl.head.next.val=='G':
points += 1
print('g')
if linkl.head.next.next.val=='C':
points += 1
print('h')
if linkl.head.next.next.next==None:
points += 1
print('i')
except:
pass
try:
linkl.removeValue('A')
print('j')
points += 1
if linkl.getLength() == 3:
points += 1
print('k')
except:
pass
linkl.removeValue('G')
try:
if linkl.getLength() == 2:
points += 1
print('l')
if linkl.head.val=='C':
points += 1
print('m')
if linkl.head.next.val=='C':
points += 1
print('n')
if linkl.head.next.next==None:
points += 1
print('o')
except:
pass
linkl.removeValue('C')
linkl.removeValue('C')
try:
if linkl.getLength() == 0:
points += 1
print('p')
if linkl.head==None:
points += 1
print('q')
except:
pass
try:
linkl.removeValue('C')
points += 1
print('r')
except:
pass
print('Your code scored '+str(points)+' out of 19')
| StarcoderdataPython |
3256519 | import tvm.relay as relay
import tvm
def create_target(device):
if device == "x86":
print("from x86")
target = tvm.target.create("llvm -mcpu=core-avx2")
elif device == "x86-avx2":
print("from x86-avx2")
target = tvm.target.create("llvm -mcpu=core-avx2")
elif device == "x86-avx512":
print("from x86-avx-512")
target = tvm.target.create("llvm -mcpu=skylake-avx512")
elif device == "gpu":
target = tvm.target.cuda()
elif device == "aarch64":
target = tvm.target.create('llvm -device=arm_cpu -target=aarch64-linux-gnu -mattr=+neon')
elif device == "arm":
target = tvm.target.create('llvm -device=arm_cpu -target=armv7l-linux-gnueabihf -mattr=+neon')
return target
def create_ctx(device, did = 0):
print('ctx on device ' + device)
if 'x86' in device :
ctx = tvm.cpu(did)
elif device == "gpu":
ctx = tvm.gpu(did)
return ctx
def speed(graph, lib, params, ctx):
import numpy as np
import tvm.contrib.graph_runtime as runtime
import json
graph_dict = json.loads(graph)
input_shape = graph_dict["attrs"]["shape"][1][0]
input_name = graph_dict["nodes"][0]["name"]
data_tvm = tvm.nd.array(np.random.uniform(size = input_shape).astype("float32"))
module = runtime.create(graph, lib, ctx)
module.set_input(input_name, data_tvm)
#module.load_params(params)
module.set_input(**params)
ftimer = module.module.time_evaluator("run", ctx, number = 1, repeat = 100)
prof_res = np.array(ftimer().results) * 1000
return np.mean(prof_res)
def speed_profile(graph, lib, params, ctx):
import numpy as np
#import tvm.contrib.graph_runtime as runtime
from tvm.contrib.debugger import debug_runtime as runtime
import json
graph_dict = json.loads(graph)
input_shape = graph_dict["attrs"]["shape"][1][0]
input_name = graph_dict["nodes"][0]["name"]
data_tvm = tvm.nd.array(np.random.uniform(size = input_shape).astype("float32"))
module = runtime.create(graph, lib, ctx)
module.set_input(input_name, data_tvm)
#module.load_params(params)
module.set_input(**params)
ftimer = module.module.time_evaluator("run", ctx, number = 1, repeat = 100)
prof_res = np.array(ftimer().results) * 1000
# profile
module.run()
#
return np.mean(prof_res)
def get_onnx(path, batch=1):
import onnx
on = onnx.load(open(path, "rb"))
name = on.graph.input[0].name
input_shape = [i.dim_value for i in on.graph.input[0].type.tensor_type.shape.dim]
input_shape[0] = batch
return on, {name : input_shape}
def get_model(path):
graph = open(path + ".json").read()
lib = tvm.runtime.module.load_module(path + ".tar")
params = bytearray(open(path + ".params", "rb").read())
return graph, lib, params
def build_model_from_onnx(onnx_model, input_shape, target, log = ""):
from tvm import autotvm
import os
model, relay_params = relay.frontend.from_onnx(onnx_model, input_shape)
func = model["main"]
if os.path.isfile(log):
with autotvm.apply_history_best(log):
with relay.build_config(opt_level=4):
graph, lib, params = relay.build(func , target, params = relay_params)
else:
with relay.build_config(opt_level=4):
graph, lib, params = relay.build(func , target, params = relay_params)
return graph, lib , params
def save_model(graph, lib, params, prefix = "relay"):
deploy_name = prefix
import os
dir_name = os.path.dirname(deploy_name)
try:
os.mkdir(dir_name)
except:
pass
print("save to %s" % (deploy_name))
lib.export_library(deploy_name + '.tar' )
with open(deploy_name + ".json", "w") as fo:
fo.write(graph)
with open(deploy_name + ".params", "wb") as fo:
fo.write(relay.save_param_dict(params))
return True
| StarcoderdataPython |
198784 | <gh_stars>0
# coding: utf-8
pyslim_version = '0.700'
slim_file_version = '0.7'
# other file versions that require no modification
compatible_slim_file_versions = ['0.7']
| StarcoderdataPython |
1705615 | #Return the element with maximum frequency in a list
def maxfreq(x):
y=max(set(x),key=x.count)
return y
#Return the element with minimum frequency in a list
def minfreq(x):
y=min(set(x),key=x.count)
return y
#Return a List of all "keys" in a dictionary
def dkey(x):
y=list(x.keys())
return y
#Return a List of all "values" in a dictionary
def dvalue(x):
y=list(x.values())
return y
#Return keys of a dictionary as a list whose values are greater than some input value
def vgreaterthan(d,i):
l=list()
for key, value in d.items():
if value>i:
l.append(key)
return l
#Return keys of a dictionary as a list whose values are lesser than some input value
def vlesserthan(d,i):
l=list()
for key, value in d.items():
if value<i:
l.append(key)
return l
#Return keys of a dictionary as a list whose values are equal to some input value
def vequalto(d,i):
l=list()
for key, value in d.items():
if value==i:
l.append(key)
return l
#Check if multiple keys are present in a dictionary
def mulkey(d,*args):
y=args[0]
for i in args:
y=d.keys() >= {i}
print(i,y)
#Returns a dictionary of frequencies of items in a list
def itemfreq(l):
count = {}
for i in l:
count[i] = count.get(i, 0) + 1
return count
| StarcoderdataPython |
3369917 | """
To solve this puzzle, you must press and hold keys 3, 7 and 11.
"""
import mpr121
from machine import Pin
i2c = machine.I2C(3)
mpr = mpr121.MPR121(i2c)
# the winning combination is 3, 7 and 11
combination = (1<<3) | (1<<7) | (1<<11)
# check all keys
def check(pin):
t = mpr.touched()
print(t)
if t & combination == combination:
print("You found the winning combination!")
d3 = Pin('D3', Pin.IN, Pin.PULL_UP)
d3.irq(check, Pin.IRQ_FALLING)
| StarcoderdataPython |
1649500 | #!/usr/bin/env python
import os
import socket
from twisted.internet import defer
from twisted.internet import reactor
from twisted.internet import error
from twisted.names import error
from twisted.names.common import extractRecord
# from twisted.names.client import getResolver
from common import getResolver # XXX: See common.py
from twisted.names.client import dns
from twisted.names.client import Resolver
from twisted.names.client import DNSClientFactory
from twisted.python.failure import Failure
import common
from check import DomainCheck
from check import HostCheck
from err import *
class ZoneTransferRefusedResolver(Resolver):
def connectionLost(self, reason):
pass
class ZoneTransferRefusedAXFRController(object):
timeoutCall = None
def __init__(self, name, deferred):
self.name = name
self.deferred = deferred
def connectionMade(self, protocol):
message = dns.Message(protocol.pickID(), recDes=0)
message.queries = [dns.Query(self.name, dns.AXFR, dns.IN)]
protocol.writeMessage(message)
def connectionLost(self, protocol):
pass
def messageReceived(self, message, protocol):
if self.timeoutCall is not None:
self.timeoutCall.cancel()
self.timeoutCall = None
if self.deferred is not None:
self.deferred.callback(message)
self.deferred = None
class ZoneTransferRefused(DomainCheck):
"""Nameservers refuse zone transfer (AXFR)"""
def __init__(self, *args, **kwargs):
super(ZoneTransferRefused, self).__init__(*args, **kwargs)
self.resolver = getResolver()
def extractAllAddressRecords(self, name, answers, effort):
# Derivation of twisted.names.client.extractRecord/_cbRecords that
# returns all possible addresses.
addresses = []
if not effort:
return None
for x in answers:
if x.name == name:
if hasattr(socket, 'inet_ntop') and x.type in (dns.A6, dns.AAAA):
addresses.append(socket.inet_ntop(socket.AF_INET6, x.payload.address))
elif x.type == dns.A:
addresses.append(socket.inet_ntop(socket.AF_INET, x.payload.address))
elif x.type == dns.CNAME:
result = extractRecord(self.resolver, x.payload.name, answers, effort - 1)
if result:
addresses.append(result)
if not addresses:
for x in answers:
if x.type == dns.NS:
r = ZoneTransferRefusedResolver(servers=[(str(x.payload.name), dns.PORT)])
d = (r.lookupAddress(str(name))
.addCallback(lambda (ans, auth, add): extractRecord(r, name, ans + auth + add, effort - 1)))
addresses.append(d)
return list(set(addresses))
def getAllPossibleHosts(self, name):
d = (self.resolver.lookupAllRecords(name)
.addCallback(lambda (ans, auth, add): self.extractAllAddressRecords(dns.Name(name), ans + auth + add, effort=20)))
return d
def doAXFRRequest(self, name, host, port = dns.PORT, timeout = 10):
d = defer.Deferred()
controller = ZoneTransferRefusedAXFRController(name, d)
factory = DNSClientFactory(controller, timeout)
factory.noisy = False
connector = reactor.connectTCP(host, port, factory)
controller.timeoutCall = reactor.callLater(timeout or 10, self._timeoutZone, d, controller, connector, timeout or 10)
return d.addCallback(self._cbDoAXFRRequest, connector)
def _cbDoAXFRRequest(self, result, connector):
connector.disconnect()
return result
def _timeoutZone(self, d, controller, connector, seconds):
connector.disconnect()
controller.timeoutCall = None
controller.deferred = None
d.errback(error.TimeoutError("Timeout after %ds" % (seconds,)))
def tryZoneTransferTCP(self, nameserver):
d = (self.doAXFRRequest(self.target, nameserver)
.addCallback(lambda res: self.addSubresult('(TCP) NS: {0}'.format(nameserver), not res.answers, 'AXFR allowed' if res.answers else ''))
.addErrback(lambda res: self.addSubresult('(TCP) NS: {0}'.format(nameserver), CHECK_RESULT_UNCERTAIN, failure=res)))
return d
def tryZoneTransferUDP(self, nameserver):
r = ZoneTransferRefusedResolver(servers=[(nameserver, dns.PORT),])
d = (r.queryUDP([dns.Query(self.target, dns.AXFR, dns.IN),])
.addCallback(lambda res: self.addSubresult('(UDP) NS: {0}'.format(nameserver), not res.answers, 'AXFR allowed' if res.answers else ''))
.addErrback(lambda res: self.addSubresult('(UDP) NS: {0}'.format(nameserver), CHECK_RESULT_UNCERTAIN, failure=res)))
return d
def runZoneTransfers(self, nameservers):
sem = common.getSemaphore()
deferreds = []
for ns in nameservers:
deferreds.append(sem.run(self.tryZoneTransferTCP, ns))
deferreds.append(sem.run(self.tryZoneTransferUDP, ns))
dl = defer.DeferredList(deferreds)
return dl
def _getHosts(self, (ans, auth, add)):
targets = [str(x.payload.name) for x in ans]
dl = (common.semMap(self.getAllPossibleHosts, targets)
.addCallback(self._cbGetHosts))
dl.consumeErrors = True
return dl
def _cbGetHosts(self, dl):
# getHosts dl gives list of (status, result) where result is
# list of lists of IP addresses... e.g.:
# [(True, ['172.16.17.32', '8.8.8.8]),
# (True, ['172.16.58.3']),
# (True, ['172.16.31.10', '172.16.31.10']),
# (True, ['172.16.31.10'])
# ]
results = []
valid = [x[1] for x in dl if x[0]]
for addressSet in valid:
for address in addressSet:
results.append(address)
return results
def run(self):
self.setResult(CHECK_RESULT_SUB)
d = (self.resolver.lookupNameservers(self.target)
.addCallback(self._getHosts)
.addCallback(self.runZoneTransfers))
return d
class MXRecordsExist(DomainCheck):
"""MX records exist"""
def run(self):
r = getResolver()
d = (r.lookupMailExchange(self.target)
.addCallback(lambda (ans, auth, add): self.setResult(bool(ans))))
return d
class ValidRDNS(HostCheck):
"""RDNS points back to the host"""
def __init__(self, *args, **kwargs):
super(ValidRDNS, self).__init__(*args, **kwargs)
self.resolver = getResolver()
def errorHandler(self, failure):
self.setResult(CHECK_NOT_APPLICABLE, extra='No RDNS PTR')
return failure
def run(self):
if self.target not in ('127.0.0.1', '::1'):
d = (common.reverseHostLookup(self.target)
.addErrback(self.errorHandler)
.addCallback(self.resolver.getHostByName)
.addCallback(lambda newhost: self.setResult(newhost == self.target)))
else:
self.setResult(CHECK_NOT_APPLICABLE)
d = defer.Deferred()
reactor.callLater(0, d.callback, "herp derp")
return d
| StarcoderdataPython |
1634540 | from gii.core import app
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import Qt
class ListStackModel(QtCore.QAbstractListModel):
def __init__(self, stacks):
super(ListStackModel,self).__init__()
self.stacks=stacks
def rowCount(self, parent):
return len(self.stacks)
def data(self, idx, role=Qt.DisplayRole):
if not idx.isValid(): return None
row=idx.row()
level=self.stacks[row]
if role==Qt.DisplayRole:
return level['string']
if role==Qt.UserRole:
return level
return None
class ListStackView(QtGui.QListView):
def __init__(self):
super(ListStackView, self).__init__()
self.setModel(ListStackModel([]))
self.clicked.connect(self.onClicked)
self.doubleClicked.connect(self.onDClicked)
def loadStackData(self,data):
self.model().beginResetModel()
self.model().stacks=data
self.model().endResetModel()
idx=self.selectRow(0)
self.onClicked(idx)
self.onDClicked(idx)
def selectRow(self, row):
idx=self.model().index(row,0)
if idx.isValid(): self.setCurrentIndex(idx)
return idx
def onClicked(self, idx):
if not idx.isValid(): return
id=idx.row()
app.getModule('script_view').debuggerHandler.getScopeInfo(id+1)
def onDClicked( self, idx ):
if not idx.isValid(): return
level = self.model().data(idx, Qt.UserRole)
if level['file']:
highLight = ( idx.row() == 0 )
app.getModule('script_view').locateFile(
level['file'], level['line'], highLight and 'normal' or False)
| StarcoderdataPython |
3273402 | <gh_stars>0
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
head = ListNode(0)
if l1.val < l2.val:
result = l1
l1 = l1.next
elif l2.val < l1.val:
result = l2
l2 = l2.next
elif l1.val == l2.val:
result = l1
l1 = l1.next
l2 = l2.next
head.next = result
while(l1.next is not None or l2.next is not None):
if l1.val < l2.val:
result.next = l1
l1 = l1.next
elif l2.val < l1.val:
result.next = l2
l2 = l2.next
elif l1.val == l2.val:
result.next = l1
l1 = l1.next
l2 = l2.next
return head.next
l1_head = ListNode(0)
l1 = ListNode(1)
l1_head.next = l1
l1.next = ListNode(2)
l1 = l1.next
l1.next = ListNode(4)
l1 = l1_head.next
l2_head = ListNode(0)
l2 = ListNode(1)
l2_head.next = l2
l2.next = ListNode(3)
l2 = l2.next
l2.next = ListNode(4)
l2 = l2.next
a = Solution().mergeTwoLists(l1, l2) | StarcoderdataPython |
3284538 | <gh_stars>0
# -*- coding: utf-8 -*-
#
# Copyright (c) 2007 <NAME> <<EMAIL>>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
#from django import newforms as forms
from django import forms
attrs_dict = { 'class': 'required' }
class CommentForm(forms.Form):
comment = forms.CharField(widget=forms.widgets.Textarea(attrs={'class': 'required icomment',
'rows': 5,
'cols': 50}))
redirect = forms.CharField(max_length=255, widget=forms.widgets.HiddenInput(),
required=False)
parent_id = forms.IntegerField(widget=forms.widgets.HiddenInput(), required=False)
nesting = forms.IntegerField(widget=forms.widgets.HiddenInput(), required=False)
toplevel = forms.IntegerField(widget=forms.widgets.HiddenInput(), required=False)
class ArgueForm(forms.Form):
"""Form used for one user to challenge another to an argument"""
argument = forms.CharField(widget=forms.widgets.Textarea(attrs={'class': 'required icomment',
'rows': 5,
'cols': 50}))
title = forms.CharField(max_length=140,
label="Title",
widget=forms.TextInput(attrs={'size': '40'}))
parent_id = forms.IntegerField(widget=forms.widgets.HiddenInput(), required=False)
class DeleteForm(forms.Form):
comment_id = forms.IntegerField(widget=forms.widgets.HiddenInput())
referring_page = forms.CharField(max_length=255, widget=forms.widgets.HiddenInput(), required=False)
class RebutForm(forms.Form):
comment = forms.CharField(widget=forms.widgets.Textarea(attrs={'class': 'required icomment',
'rows': 5,
'cols': 50}))
redirect = forms.CharField(max_length=255, widget=forms.widgets.HiddenInput(),
required=False)
parent_id = forms.IntegerField(widget=forms.widgets.HiddenInput(), required=False)
nesting = forms.IntegerField(widget=forms.widgets.HiddenInput(), required=False)
toplevel = forms.IntegerField(widget=forms.widgets.HiddenInput(), required=False)
arg_id = forms.IntegerField(widget=forms.widgets.HiddenInput())
class FollowForm(forms.Form):
item = forms.CharField(widget=forms.widgets.HiddenInput())
id = forms.IntegerField(widget=forms.widgets.HiddenInput())
| StarcoderdataPython |
127522 | <gh_stars>10-100
# stdlib imports
import os
import sys
import unittest
# src imports
import_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../schematool')
sys.path.append(import_path)
from command import CommandContext, DownCommand
from db import MemoryDb
from errors import MissingRefError
# test util imports
import_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../util')
sys.path.append(import_path)
from alter_util import AlterUtil
from env_util import EnvironmentUtil
from test_util import make_argv
class DownTest(unittest.TestCase):
def setUp(self):
EnvironmentUtil.setup_fresh_test_env()
self.context = CommandContext.via({
'type': 'memory-db'})
self.downCommand = DownCommand(self.context)
def tearDown(self):
EnvironmentUtil.teardown_fresh_test_env()
def test_all_undoes_all_current_alters_when_none(self):
self.assertEqual(len(MemoryDb.data), 0)
sys.argv = make_argv(['all'])
self.downCommand.run()
self.assertEqual(len(MemoryDb.data), 0)
def test_all_undoes_all_current_alters_when_alters(self):
AlterUtil.create_alters([1])
AlterUtil.run_alters()
self.assertEqual(len(MemoryDb.data), 1)
sys.argv = make_argv(['all'])
self.downCommand.run()
self.assertEqual(len(MemoryDb.data), 0)
def test_ref_undoes_all_alters_including_ref(self):
AlterUtil.create_alters([1,2,3])
ids = AlterUtil.run_alters()
self.assertEqual(len(MemoryDb.data), 3)
sys.argv = make_argv([str(ids[1])])
self.downCommand.run()
self.assertEqual(len(MemoryDb.data), 1)
def test_ref_undoes_nothing_when_ref_doesnt_exist(self):
AlterUtil.create_alters([1, 2, 3, 4])
AlterUtil.run_alters()
self.assertEqual(len(MemoryDb.data), 4)
sys.argv = make_argv([str(10)])
try:
self.downCommand.run()
except MissingRefError:
pass
self.assertEqual(len(MemoryDb.data), 4)
def test_base_undoes_all_but_last_when_more_than_one(self):
AlterUtil.create_alters([1, 2])
AlterUtil.run_alters()
self.assertEqual(len(MemoryDb.data), 2)
sys.argv = make_argv(['base'])
self.downCommand.run()
self.assertEqual(len(MemoryDb.data), 1)
def test_base_undoes_none_when_no_alters(self):
self.assertEqual(len(MemoryDb.data), 0)
sys.argv = make_argv(['base'])
self.downCommand.run()
self.assertEqual(len(MemoryDb.data), 0)
def test_base_undoes_none_when_one_alter(self):
AlterUtil.create_alters([1])
AlterUtil.run_alters()
self.assertEqual(len(MemoryDb.data), 1)
sys.argv = make_argv(['base'])
self.downCommand.run()
self.assertEqual(len(MemoryDb.data), 1)
def test_n_option_runs_down_given_number_of_alters(self):
AlterUtil.create_alters([1, 2, 3, 4])
AlterUtil.run_alters()
self.assertEqual(len(MemoryDb.data), 4)
sys.argv = make_argv(['-n2'])
self.downCommand.run()
self.assertEqual(len(MemoryDb.data), 2)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3212230 | from collections import Counter, defaultdict
import csv
import requests
CSV_URL = 'https://raw.githubusercontent.com/pybites/SouthParkData/master/by-season/Season-{}.csv' # noqa E501
def get_season_csv_file(season):
"""Receives a season int, and downloads loads in its
corresponding CSV_URL"""
with requests.Session() as s:
download = s.get(CSV_URL.format(season))
return download.content.decode('utf-8')
def get_num_words_spoken_by_character_per_episode(content):
"""Receives loaded csv content (str) and returns a dict of
keys=characters and values=Counter object,
which is a mapping of episode=>words spoken"""
data = csv.DictReader(content.splitlines(), delimiter=',')
# count_character_words = defaultdict(lambda: Counter())
count_character_words = defaultdict(Counter)
for datum in data:
character = datum['Character']
episode = datum['Episode']
word_length = len(datum['Line'].split())
count_character_words[character][episode] += word_length
return count_character_words
# tests
import pytest
from southpark import (get_season_csv_file,
get_num_words_spoken_by_character_per_episode)
@pytest.fixture(scope="module")
def words_spoken_s1():
# module scope to not call requests for every test function
content = get_season_csv_file(season=1)
return get_num_words_spoken_by_character_per_episode(content)
@pytest.fixture(scope="module")
def words_spoken_s5():
content = get_season_csv_file(season=5)
return get_num_words_spoken_by_character_per_episode(content)
def test_get_words_spoken_season1_stan(words_spoken_s1):
expected = [('4', 615), ('6', 572), ('5', 514)]
assert words_spoken_s1['Stan'].most_common()[:3] == expected
def test_get_words_spoken_season1_cartman(words_spoken_s1):
expected = [('1', 735), ('10', 669), ('13', 621)]
alt_expected = [('1', 738), ('10', 669), ('13', 621)]
assert words_spoken_s1['Cartman'].most_common()[:3] in (expected,
alt_expected)
def test_get_words_spoken_season1_cartman_least_talkative(words_spoken_s1):
expected = [('11', 285), ('6', 264), ('4', 244)]
assert words_spoken_s1['Cartman'].most_common()[-3:] == expected
def get_words_spoken_non_existing_character(words_spoken_s1):
assert words_spoken_s1['bogus'].most_common() == []
# let's look at another season and other characters
def test_get_words_spoken_season5_sheila(words_spoken_s5):
expected = [('11', 295), ('6', 212), ('7', 52)]
assert words_spoken_s5['Sheila'].most_common()[:3] == expected
def test_get_words_spoken_season5_choksondik(words_spoken_s5):
expected = [('7', 749), ('10', 131), ('1', 129)]
assert words_spoken_s5['Ms. Choksondik'].most_common()[:3] == expected | StarcoderdataPython |
4808552 | <filename>lib/bes/git/git_changelog_options.py
# -*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
from bes.common.check import check
class git_changelog_options(object):
def __init__(self, **kargs):
self._check_options(kargs)
self.max_chars = kargs.get('max_chars', None)
self.revision_chars = kargs.get('revision_chars', 7)
self.message_chars = kargs.get('message_chars', None)
self.balance = kargs.get('balance', 0.5)
self.disable_date = kargs.get('disable_date', False)
self.disable_author = kargs.get('disable_author', False)
self.drop_message = kargs.get('drop_message', '[dropped]')
@staticmethod
def _check_options(kargs):
max_chars = kargs.get('max_chars', None)
revision_chars = kargs.get('revision_chars', 7)
message_chars = kargs.get('message_chars', None)
balance = kargs.get('balance', 0.5)
disable_date = kargs.get('disable_date', False)
disable_author = kargs.get('disable_author', False)
drop_message = kargs.get('drop_message', '[dropped]')
check.check_int(max_chars, allow_none=True)
check.check_int(revision_chars)
check.check_int(message_chars, allow_none=True)
check.check_float(balance)
check.check_bool(disable_date)
check.check_bool(disable_author)
check.check_string(drop_message, allow_none=True)
if max_chars and max_chars < 100:
raise ValueError("max_chars argument can't be less than 100")
if revision_chars < 1:
raise ValueError("revision_chars argument can't be less than 1")
if message_chars and message_chars < 1:
raise ValueError("message_chars argument can't be less than 1")
if balance <= 0 or balance > 1:
raise ValueError("balance argument value must be inside next range - (0, 1]")
def __str__(self):
return str(self.__dict__)
| StarcoderdataPython |
120695 | <filename>tests/user/test_model.py<gh_stars>0
# -*- coding: utf-8
# Core
import pytest
from mixer.backend.django import mixer
# Models
from custom_auth_user.models import User
@pytest.mark.django_db
class TestUserModel():
def test_user_model(self):
user = mixer.blend(User, first_name='first', last_name='last')
assert isinstance(user, User), 'Should create new user instance'
assert user.get_full_name() == 'first last', \
'Should full name, "{first_name} {last_name}" format'
assert user.get_short_name() == 'first', \
'Should get short name, "{first_name}" format'
| StarcoderdataPython |
62194 | from mcc_libusb import *
import datetime
import time
import numpy as np
mcc = USB1208FS()
mcc.usbOpen()
#mcc.usbDConfigPort(DIO_PORTA, DIO_DIR_OUT)
#mcc.usbDConfigPort(DIO_PORTB, DIO_DIR_IN)
#mcc.usbDOut(DIO_PORTA, 0)
#num = mcc.usbAIn(1, BP_1_00V)
#print(str(mcc.volts_FS(BP_1_00V, num)))
#channel = np.array([1, 2, 3, 7])
#gain = np.array([SE_10_00V, BP_10_00V, BP_20_00V, BP_1_25V])
#mcc.usbALoadQueue(4, channel, gain)
#mcc.usbReset()
#mcc.usbAIn_Stop()
options = AIN_EXECUTION | AIN_GAIN_QUEUE
sdata = mcc.usbAIn_Scan_SE(0, 0, 50, 1000, options)
print(sdata)
print(mcc.volts_SE(np.average(sdata)))
#mcc.usbALoadQueue(1, np.array([1]), np.array([BP_10_00V]))
#sdata1 = mcc.usbAIn_Scan(1,1,50,1000, AIN_EXECUTION)
#print(sdata1)
#print(mcc.volts_FS(BP_10_00V, np.average(sdata1)))
mcc.usbClose()
'''
while 1:
print("\nUSB 1208FS Testing")
print("----------------")
print("Hit 'b' to blink LED")
print("Hit 'c' to test counter")
print("Hit 'e' to exit")
print("Hit 'd' to test digital I/O");
print("Hit 'g' to test analog input scan (differential).")
print("Hit 'j' to test analog input scan (single ended).")
print("Hit 'i' to test analog input (differential mode)")
print("Hit 'h' to test analog input (single ended)")
print("Hit 'o' to test analog output")
print("Hit 'O' to test analog output scan")
print("Hit 'r' to reset")
print("Hit 'S' to get status")
print("Hit 's' to get serial number")
i = input(">> ")
if i == 'b': #test to see if led blinks
mcc.usbBlink()
elif i == 'e':
mcc.close()
exit(1)
elif i == 'd':
print("\nTesting Digital I/O....")
print("connect pins 21 through 28 <=> 32 through 39")
temp = int(input("Enter a byte number [0-0xff]: "))
mcc.usbDOut(DIO_PORTA, temp)
din = mcc.usbDIn(DIO_PORTB)
print("The number you entered = " + hex(din & 0xff))
elif i == 'i':
print("Testing the analog input differential...")
gain = int(input("Enter gain: "))
channel = int(input("Enter channel [0-7]: "))
value = mcc.usbAIn(channel, gain)
print("Channel: " + str(channel) + ": value = " + str(value))
elif i == 'h':
print("Testing the analog input single ended...")
#channel = input("Entner channel [0-7]: ")
for i in range(0, 100):
start = datetime.datetime.now()
for j in range(0,8):
value = mcc.usbAIn(j, SE_10_00V)
print("Channel: %d: Value = 0x%04X, %.2fV" % (j%8 ,value, mcc.volts_SE(value)))
delta = datetime.datetime.now() - start;
print("%d" % (delta.microseconds))
time.sleep(0.1)
elif i == 'o': #test the analog output
print("Testing the analog output...")
channel = int(input("Enter channel [0-1] => (pin 13-14): "))
value = int(input("Enter a value: "))
mcc.usbAOut(channel, value)
else:
continue
'''
| StarcoderdataPython |
4808748 | <filename>blog/migrations/0003_post_picture.py
# Generated by Django 3.2.11 on 2022-01-31 10:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_post_counted_views'),
]
operations = [
migrations.AddField(
model_name='post',
name='picture',
field=models.ImageField(default='statics/assets/img/author-1.jpg', max_length=255, upload_to=''),
),
]
| StarcoderdataPython |
4823826 | """
=============================================================
~/fn_portal/tests/api/crud_api/test_fn121.py
Created: 08 Sep 2020 10:40:55
DESCRIPTION:
This file contains a number of unit tests that verify that the api
endpoint for FN121 objects works as expected:
+ sample-list should be available to both logged in and anonomous users
+ sample-list be filterable for gear, site depth, date, grid(s)
+ sample detail should contains the proper elements:
+ "slug"
+ "sam"
+ "effdt0"
+ "effdt1"
+ "effdur"
+ "efftm0"
+ "efftm1"
+ "effst"
+ "sidep"
+ "site"
+ "grid5":
+ "dd_lat"
+ "dd_lon"
+ "sitem"
+ "comment1"
+ "secchi"
+ post, put and delete endpoint should only be available to admin or
project lead users, they should not be available for anaoous users, or
field crew (who cannot edit or create projects)
<NAME>
=============================================================
"""
from datetime import datetime
import pytest
from django.urls import reverse
from fn_portal.models import FN121
from rest_framework import status
from ..fixtures import api_client, grid, lake, net_sets, project, users
@pytest.fixture
def netset_data(grid):
"""A fixture that returns a dictionary corresponding to a net set object."""
netset_data = {
"sam": "16",
"ssn": "00",
"space": "11",
"mode": "AA",
"effdt0": "2019-10-03",
"effdt1": "2019-10-04",
"effdur": 25.00,
"efftm0": "10:40:00",
"efftm1": "11:40:00",
"effst": "1",
# "grtp": "GL",
# "gr": "GL50",
# "orient": "1",
"sidep": 5.7,
"site": "44",
"dd_lat": 45.8595,
"dd_lon": -80.8095,
# "grid5": {"grid": str(grid.grid), "slug": grid.slug},
"grid5": str(grid.grid),
"sitem": 23,
"comment1": "Some sample data",
"secchi": 10,
}
return netset_data
@pytest.mark.django_db
def test_fn121_listview(api_client, project, net_sets):
"""The FN121 list view should return a json resposne containing all of
the net sets in a project.
It should be available to any user.
"""
prj_cd = project.prj_cd
url = reverse("fn_portal_api:FN121_listview", kwargs={"prj_cd": prj_cd})
response = api_client.get(url)
assert response.status_code == status.HTTP_200_OK
data = [x.get("sam") for x in response.data["results"]]
assert len(data) == 3
expected = ["sam1", "sam2", "sam3"]
for x in expected:
assert x in data
@pytest.mark.django_db
def test_fn121_listview_none(api_client, project, net_sets):
"""If the list view is provided a project code that does not exist, it
should return a 404 response and not throw an error.
"""
# create a project code that does not exist:
prj_cd = project.prj_cd[:-1] + "X"
url = reverse("fn_portal_api:FN121_listview", kwargs={"prj_cd": prj_cd})
response = api_client.get(url)
assert response.status_code == status.HTTP_404_NOT_FOUND
# parameters for our list filter test. Filter, value, and the expected
# sample numbers that should be returned.
filter_list = [
("active", "True", ["sam3"]),
("sidep__gte", "25", ["sam1", "sam3"]),
("sidep__lte", "25", ["sam2", "sam3"]),
("grtp", "TP", ["sam2"]), # fail
("grtp", "TP,HP", ["sam2"]), # fail
("grtp__not", "TP", ["sam1", "sam3"]),
("gr", "GL32", ["sam1"]),
("gr", "GL10,GL32", ["sam1", "sam3"]),
("gr__not", "GL32", ["sam2", "sam3"]), # fail
# # ("grid", "714", []),
("effdur__gte", "22.5", ["sam2"]),
("effdur__lte", "22.5", ["sam1"]),
("set_date", "2019-10-21", ["sam1"]),
("set_date__gte", "2019-10-21", ["sam1", "sam3"]),
("set_date__lte", "2019-10-21", ["sam1", "sam2"]),
("lift_date", "2019-10-22", ["sam1"]),
("lift_date__gte", "2019-10-22", ["sam1"]),
("lift_date__lte", "2019-10-22", ["sam1", "sam2"]),
("set_time", "12:00", ["sam3"]),
("set_time__gte", "12:00", ["sam2", "sam3"]),
("set_time__lte", "12:00", ["sam1", "sam3"]),
("lift_time", "13:30", ["sam2"]),
("lift_time__gte", "13:30", ["sam2"]),
("lift_time__lte", "13:30", ["sam1", "sam2"]),
]
@pytest.mark.parametrize("filter,value,expected", filter_list)
@pytest.mark.django_db
def test_fn121_listview_filters(api_client, project, net_sets, filter, value, expected):
"""The list view accepts filters for net set attributes as query
parameters, these should limit the number net sets returned.
this could be a parameterized query that accepts a filter, and a
list of expected samples numbers that should be returned based on
that filter.
The list view end point has filters for a number of attribrutes:
+ active
+ sidep_gte
+ sidep_lte
+ grtp
+ gr
+ grid
+ effdur_gte
+ effdur_lte
+ set_date
+ set_date_gte
+ set_date_lte
+ lift_date
+ lift_date_gte
+ lift_date_lte
+ set_time
+ set_time_gte
+ set_time_lte
+ lift_time
+ lift_time_gte
+ lift_time_lte
"""
q = "?{}={}".format(filter, value)
url = reverse("fn_portal_api:FN121_listview", kwargs={"prj_cd": project.prj_cd})
response = api_client.get(url + q)
data = [x.get("sam") for x in response.data["results"]]
for x in expected:
assert x in data
username_list = [
(None, status.HTTP_403_FORBIDDEN),
("gcostanza", status.HTTP_403_FORBIDDEN),
("hsimpson", status.HTTP_201_CREATED),
("bgumble", status.HTTP_201_CREATED),
("mburns", status.HTTP_201_CREATED),
]
@pytest.mark.parametrize("username,expected", username_list)
@pytest.mark.django_db
def test_fn121_listview_create_permissions(
api_client, project, grid, netset_data, users, username, expected
):
"""a get request should be available for any user, a post request
should only be available to authorized users.
"""
if username:
login = api_client.login(username=username, password="<PASSWORD>")
assert login is True
netset_data["project"] = project.prj_cd
# ssn, space, and mode are slug related fields - repalce the
# labels with the slugs so the serializer can create the relationships
ssn = netset_data["ssn"]
netset_data["ssn"] = f"{project.prj_cd}-{ssn}".lower()
space = netset_data["space"]
netset_data["space"] = f"{project.prj_cd}-{space}".lower()
mode = netset_data["mode"]
netset_data["mode"] = f"{project.prj_cd}-{mode}".lower()
grid = netset_data["grid5"]
netset_data["grid5"] = "hu_" + grid
url = reverse("fn_portal_api:FN121_listview", kwargs={"prj_cd": project.prj_cd})
response = api_client.post(url, netset_data, format="json")
assert response.status_code == expected
@pytest.mark.django_db
def test_fn121_listview_create(api_client, project, grid, netset_data):
"""an authorized user should be able to create a new net set object by
submitting an appropriate data object to the fn121 list view api
endpoint.
"""
login = api_client.login(username="hsimpson", password="<PASSWORD>")
assert login is True
netset_data["project"] = project.prj_cd
# ssn, space, and mode are slug related fields - repalce the
# labels with the slugs so the serializer can create the relationships
ssn = netset_data["ssn"]
netset_data["ssn"] = f"{project.prj_cd}-{ssn}".lower()
space = netset_data["space"]
netset_data["space"] = f"{project.prj_cd}-{space}".lower()
mode = netset_data["mode"]
netset_data["mode"] = f"{project.prj_cd}-{mode}".lower()
grid = netset_data["grid5"]
netset_data["grid5"] = "hu_" + grid
url = reverse("fn_portal_api:FN121_listview", kwargs={"prj_cd": project.prj_cd})
response = api_client.post(url, netset_data, format="json")
fname = "c:/1work/scrapbook/wtf.html"
with open(fname, "wb") as f:
f.write(response.content)
assert response.status_code == status.HTTP_201_CREATED
# verify some of our attributes:
fn121 = FN121.objects.get(project=project, sam=netset_data["sam"])
assert fn121.effdt0 == datetime.strptime(netset_data["effdt0"], "%Y-%m-%d").date()
assert fn121.effdt1 == datetime.strptime(netset_data["effdt1"], "%Y-%m-%d").date()
assert fn121.effdur == netset_data["effdur"]
assert fn121.efftm0 == datetime.strptime(netset_data["efftm0"], "%H:%M:%S").time()
assert fn121.efftm1 == datetime.strptime(netset_data["efftm1"], "%H:%M:%S").time()
assert fn121.effst == netset_data["effst"]
assert fn121.sidep == netset_data["sidep"]
assert fn121.site == netset_data["site"]
assert fn121.dd_lat == netset_data["dd_lat"]
assert fn121.dd_lon == netset_data["dd_lon"]
assert fn121.sitem == netset_data["sitem"]
assert fn121.comment1 == netset_data["comment1"]
assert fn121.secchi == netset_data["secchi"]
grid = fn121.grid5
assert grid.slug == netset_data["grid5"]
usernames = [None, "gcostanza", "hsimpson", "bgumble", "mburns"]
@pytest.mark.parametrize("username", usernames)
@pytest.mark.django_db
def test_fn121_detailview_get_permissions(
api_client, project, net_sets, netset_data, username
):
"""a get request should be available to anyone."""
net_set = net_sets[0]
if username:
login = api_client.login(username=username, password="<PASSWORD>")
assert login is True
url = reverse("fn_portal_api:FN121_detailview", kwargs={"slug": net_set.slug})
response = api_client.get(url)
expected = status.HTTP_200_OK
assert response.status_code == expected
@pytest.mark.django_db
def test_fn121_detailview(api_client, net_sets):
"""Ensure that the detail view includes all of the necessary elements
and that that they correspond to the values in the net set.
"""
net_set = net_sets[0]
url = reverse("fn_portal_api:FN121_detailview", kwargs={"slug": net_set.slug})
response = api_client.get(url)
assert response.status_code == status.HTTP_200_OK
expected = {
"id": net_set.id,
"slug": net_set.slug,
"sam": net_set.sam,
"ssn": net_set.ssn.ssn,
"space": net_set.space.space,
"mode": net_set.mode.mode,
"effdt0": net_set.effdt0.strftime("%Y-%m-%d"),
"effdt1": net_set.effdt1.strftime("%Y-%m-%d"),
"effdur": net_set.effdur,
"efftm0": net_set.efftm0.strftime("%H:%M:%S"),
"efftm1": net_set.efftm1.strftime("%H:%M:%S"),
"effst": net_set.effst,
"sidep": net_set.sidep,
"site": net_set.site,
# "grid5": {"grid": str(net_set.grid5.grid), "slug": net_set.grid5.slug},
"grid5": str(net_set.grid5.grid),
"dd_lat": net_set.dd_lat,
"dd_lon": net_set.dd_lon,
"sitem": net_set.sitem,
"comment1": net_set.comment1,
"secchi": net_set.secchi,
}
for k, v in expected.items():
assert response.data[k] == v
@pytest.mark.parametrize("username", usernames)
@pytest.mark.django_db
def test_fn121_detailview_put_permissions(
api_client, project, net_sets, netset_data, username
):
"""a put (update) request should only be available to authorized users."""
net_set = net_sets[0]
if username:
login = api_client.login(username=username, password="<PASSWORD>")
assert login is True
netset_data["project"] = project.prj_cd
# ssn, space, and mode are slug related fields - repalce the
# labels with the slugs so the serializer can create the relationships
ssn = netset_data["ssn"]
netset_data["ssn"] = f"{project.prj_cd}-{ssn}".lower()
space = netset_data["space"]
netset_data["space"] = f"{project.prj_cd}-{space}".lower()
mode = netset_data["mode"]
netset_data["mode"] = f"{project.prj_cd}-{mode}".lower()
grid = netset_data["grid5"]
netset_data["grid5"] = "hu_" + grid
url = reverse("fn_portal_api:FN121_detailview", kwargs={"slug": net_set.slug})
response = api_client.put(url, netset_data, format="json")
if username is None or username == "gcostanza":
expected = status.HTTP_403_FORBIDDEN
else:
expected = status.HTTP_200_OK
assert response.status_code == expected
@pytest.mark.django_db
def test_fn121_update(api_client, project, net_sets):
"""An authorized user should be able update an object by sumbitting a
put request to the FN121DetailView endpoint.
"""
net_set = net_sets[0]
login = api_client.login(username="hsimpson", password="<PASSWORD>")
assert login is True
new_data = {
# "gr": "GL11",
# "grtp": "GL",
"effdt0": "2019-10-25",
"effdt1": "2019-10-26",
"efftm0": "09:30:00",
"efftm1": "13:30:00",
"effdur": 28.00,
}
url = reverse("fn_portal_api:FN121_detailview", kwargs={"slug": net_set.slug})
# get our original data:
response = api_client.get(url, new_data, format="json")
data = response.data.copy()
for k, v in new_data.items():
data[k] = v
# we need to supply slugs for fk fields:
prj_cd = project.prj_cd
data["project"] = prj_cd
# ssn, space, and mode are slug related fields - repalce the
# labels with the slugs so the serializer can create the relationships
ssn = data["ssn"]
data["ssn"] = f"{prj_cd}-{ssn}".lower()
space = data["space"]
data["space"] = f"{prj_cd}-{space}".lower()
mode = data["mode"]
data["mode"] = f"{prj_cd}-{mode}".lower()
grid5 = data["grid5"]
data["grid5"] = f"hu_{grid5}".lower()
# now resubmit our updated data:
response = api_client.put(url, data, format="json")
assert response.status_code == status.HTTP_200_OK
# verify that our new values are reflected on our object:
fn121 = FN121.objects.get(slug=net_set.slug)
# check that the times are being converted propery too:
assert fn121.effdt0 == datetime.strptime(new_data["effdt0"], "%Y-%m-%d").date()
assert fn121.effdt1 == datetime.strptime(new_data["effdt1"], "%Y-%m-%d").date()
assert fn121.effdur == new_data["effdur"]
assert fn121.efftm0 == datetime.strptime(new_data["efftm0"], "%H:%M:%S").time()
assert fn121.efftm1 == datetime.strptime(new_data["efftm1"], "%H:%M:%S").time()
@pytest.mark.parametrize("username", usernames)
@pytest.mark.django_db
def test_fn121_detailview_destroy_permissions(api_client, project, net_sets, username):
"""A delete request should only be available to authorized users."""
net_set = net_sets[0]
if username:
login = api_client.login(username=username, password="<PASSWORD>")
assert login is True
url = reverse("fn_portal_api:FN121_detailview", kwargs={"slug": net_set.slug})
response = api_client.delete(url)
if username is None or username == "gcostanza":
expected = status.HTTP_403_FORBIDDEN
else:
expected = status.HTTP_204_NO_CONTENT
assert response.status_code == expected
@pytest.mark.django_db
def test_fn121_destroy(api_client, project, net_sets):
"""An authorized user should be able to delete an object by sumbitting a
delete request to the FN121DetailView endpoint.
"""
net_set = net_sets[0]
fn121 = FN121.objects.filter(project=project).count()
assert fn121 == 3
login = api_client.login(username="hsimpson", password="<PASSWORD>")
assert login is True
url = reverse("fn_portal_api:FN121_detailview", kwargs={"slug": net_set.slug})
response = api_client.delete(url)
expected = status.HTTP_204_NO_CONTENT
assert response.status_code == expected
fn121 = FN121.objects.filter(project=project).count()
assert fn121 == 2
| StarcoderdataPython |
34840 | <filename>testTF.py
# Importing required libraries
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing.text import Tokenizer
# List of sample sentences that we want to tokenize
sentences = ['I love my dog',
'I love my cat',
'you love my dog!',
'Do you think my dog is amazing?',
]
# adding a "out of vocabulary" word to the tokenizer
tokenizer = Tokenizer(num_words = 100,oov_token="<OOV>")
tokenizer.fit_on_texts(sentences)
word_index = tokenizer.word_index
sequences = tokenizer.texts_to_sequences(sentences)
test_data = ['i really love my dog',
'my dog loves my manatee',
]
test_seq = tokenizer.texts_to_sequences(test_data)
print(word_index)
print(test_seq) | StarcoderdataPython |
131970 | <reponame>arvy-p/sagemaker-run-notebook<gh_stars>10-100
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Run a notebook on demand or on a schedule using Amazon SageMaker Processing Jobs"""
import asyncio
import errno
import io
import logging
import json
import os
import re
import time
from subprocess import Popen, PIPE, STDOUT, DEVNULL
from shlex import split
import zipfile as zip
import botocore
import boto3
from .utils import default_bucket, get_execution_role
abbrev_image_pat = re.compile(
r"(?P<account>\d+).dkr.ecr.(?P<region>[^.]+).amazonaws.com/(?P<image>[^:/]+)(?P<tag>:[^:]+)?"
)
def abbreviate_image(image):
"""If the image belongs to this account, just return the base name"""
m = abbrev_image_pat.fullmatch(image)
if m:
tag = m.group("tag")
if tag == None or tag == ":latest":
tag = ""
return m.group("image") + tag
else:
return image
abbrev_role_pat = re.compile(r"arn:aws:iam::(?P<account>\d+):role/(?P<name>[^/]+)")
def abbreviate_role(role):
"""If the role belongs to this account, just return the base name"""
m = abbrev_role_pat.fullmatch(role)
if m:
return m.group("name")
else:
return role
def upload_notebook(notebook, session=None):
"""Uploads a notebook to S3 in the default SageMaker Python SDK bucket for
this user. The resulting S3 object will be named "s3://<bucket>/papermill-input/notebook-YYYY-MM-DD-hh-mm-ss.ipynb".
Args:
notebook (str):
The filename of the notebook you want to upload. (Required)
session (boto3.Session):
A boto3 session to use. Will create a default session if not supplied. (Default: None)
Returns:
The resulting object name in S3 in URI format.
"""
with open(notebook, "rb") as f:
return upload_fileobj(f, session)
def upload_fileobj(notebook_fileobj, session=None):
"""Uploads a file object to S3 in the default SageMaker Python SDK bucket for
this user. The resulting S3 object will be named "s3://<bucket>/papermill-input/notebook-YYYY-MM-DD-hh-mm-ss.ipynb".
Args:
notebook_fileobj (fileobj):
A file object (as returned from open) that is reading from the notebook you want to upload. (Required)
session (boto3.Session):
A boto3 session to use. Will create a default session if not supplied. (Default: None)
Returns:
The resulting object name in S3 in URI format.
"""
session = ensure_session(session)
snotebook = "notebook-{}.ipynb".format(
time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
)
s3 = session.client("s3")
key = "papermill_input/" + snotebook
bucket = default_bucket(session)
s3path = "s3://{}/{}".format(bucket, key)
s3.upload_fileobj(notebook_fileobj, bucket, key)
return s3path
def get_output_prefix():
"""Returns an S3 prefix in the Python SDK default bucket."""
return "s3://{}/papermill_output".format(default_bucket())
def execute_notebook(
*,
image,
input_path,
output_prefix,
notebook,
parameters,
role=None,
instance_type,
session,
):
session = ensure_session(session)
if not role:
role = get_execution_role(session)
elif "/" not in role:
account = session.client("sts").get_caller_identity()["Account"]
role = "arn:aws:iam::{}:role/{}".format(account, role)
if "/" not in image:
account = session.client("sts").get_caller_identity()["Account"]
region = session.region_name
image = "{}.dkr.ecr.{}.amazonaws.com/{}:latest".format(account, region, image)
if notebook == None:
notebook = input_path
base = os.path.basename(notebook)
nb_name, nb_ext = os.path.splitext(base)
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
job_name = (
("papermill-" + re.sub(r"[^-a-zA-Z0-9]", "-", nb_name))[: 62 - len(timestamp)]
+ "-"
+ timestamp
)
input_directory = "/opt/ml/processing/input/"
local_input = input_directory + os.path.basename(input_path)
result = "{}-{}{}".format(nb_name, timestamp, nb_ext)
local_output = "/opt/ml/processing/output/"
api_args = {
"ProcessingInputs": [
{
"InputName": "notebook",
"S3Input": {
"S3Uri": input_path,
"LocalPath": input_directory,
"S3DataType": "S3Prefix",
"S3InputMode": "File",
"S3DataDistributionType": "FullyReplicated",
},
},
],
"ProcessingOutputConfig": {
"Outputs": [
{
"OutputName": "result",
"S3Output": {
"S3Uri": output_prefix,
"LocalPath": local_output,
"S3UploadMode": "EndOfJob",
},
},
],
},
"ProcessingJobName": job_name,
"ProcessingResources": {
"ClusterConfig": {
"InstanceCount": 1,
"InstanceType": instance_type,
"VolumeSizeInGB": 40,
}
},
"StoppingCondition": {"MaxRuntimeInSeconds": 7200},
"AppSpecification": {
"ImageUri": image,
"ContainerArguments": [
"run_notebook",
],
},
"RoleArn": role,
"Environment": {},
}
api_args["Environment"]["PAPERMILL_INPUT"] = local_input
api_args["Environment"]["PAPERMILL_OUTPUT"] = local_output + result
if os.environ.get("AWS_DEFAULT_REGION") != None:
api_args["Environment"]["AWS_DEFAULT_REGION"] = os.environ["AWS_DEFAULT_REGION"]
api_args["Environment"]["PAPERMILL_PARAMS"] = json.dumps(parameters)
api_args["Environment"]["PAPERMILL_NOTEBOOK_NAME"] = notebook
client = boto3.client("sagemaker")
result = client.create_processing_job(**api_args)
job_arn = result["ProcessingJobArn"]
job = re.sub("^.*/", "", job_arn)
return job
def wait_for_complete(job_name, progress=True, sleep_time=10, session=None):
"""Wait for a notebook execution job to complete.
Args:
job_name (str):
The name of the SageMaker Processing Job executing the notebook. (Required)
progress (boolean):
If True, print a period after every poll attempt. (Default: True)
sleep_time (int):
The number of seconds between polls. (Default: 10)
session (boto3.Session):
A boto3 session to use. Will create a default session if not supplied. (Default: None)
Returns:
A tuple with the job status and the failure message if any.
"""
session = ensure_session(session)
client = session.client("sagemaker")
done = False
while not done:
if progress:
print(".", end="")
desc = client.describe_processing_job(ProcessingJobName=job_name)
status = desc["ProcessingJobStatus"]
if status != "InProgress":
done = True
else:
time.sleep(sleep_time)
if progress:
print()
return status, desc.get("FailureReason")
def download_notebook(job_name, output=".", session=None):
"""Download the output notebook from a previously completed job.
Args:
job_name (str): The name of the SageMaker Processing Job that executed the notebook. (Required)
output (str): The directory to copy the output file to. (Default: the current working directory)
session (boto3.Session):
A boto3 session to use. Will create a default session if not supplied. (Default: None)
Returns:
The filename of the downloaded notebook.
"""
session = ensure_session(session)
client = session.client("sagemaker")
desc = client.describe_processing_job(ProcessingJobName=job_name)
prefix = desc["ProcessingOutputConfig"]["Outputs"][0]["S3Output"]["S3Uri"]
notebook = os.path.basename(desc["Environment"]["PAPERMILL_OUTPUT"])
s3path = "{}/{}".format(prefix, notebook)
if not os.path.exists(output):
try:
os.makedirs(output)
except OSError as e:
if e.errno != errno.EEXIST:
raise
p1 = Popen(split("aws s3 cp --no-progress {} {}/".format(s3path, output)))
p1.wait()
return "{}/{}".format(output.rstrip("/"), notebook)
def run_notebook(
image,
notebook,
parameters={},
role=None,
instance_type="ml.m5.large",
output_prefix=None,
output=".",
session=None,
):
"""Run a notebook in SageMaker Processing producing a new output notebook.
Args:
image (str): The ECR image that defines the environment to run the job (required).
notebook (str): The local notebook to upload and run (required).
parameters (dict): The dictionary of parameters to pass to the notebook (default: {}).
role (str): The name of a role to use to run the notebook (default: calls get_execution_role()).
instance_type (str): The SageMaker instance to use for executing the job (default: ml.m5.large).
output_prefix (str): The prefix path in S3 for where to store the output notebook
(default: determined based on SageMaker Python SDK)
output (str): The directory to copy the output file to (default: the current working directory).
session (boto3.Session): The boto3 session to use. Will create a default session if not supplied (default: None).
Returns:
A tuple with the processing job name, the job status, the failure reason (or None) and the the path to
the result notebook. The output notebook name is formed by adding a timestamp to the original notebook name.
"""
session = ensure_session(session)
if output_prefix is None:
output_prefix = get_output_prefix()
s3path = upload_notebook(notebook, session)
job_name = execute_notebook(
image=image,
input_path=s3path,
output_prefix=output_prefix,
notebook=notebook,
parameters=parameters,
role=role,
instance_type=instance_type,
session=session,
)
print("Job {} started".format(job_name))
status, failure_reason = wait_for_complete(job_name)
if status == "Completed":
local = download_notebook(job_name, output=output)
else:
local = None
return (job_name, status, local, failure_reason)
def stop_run(job_name, session=None):
"""Stop the named processing job
Args:
job_name (string): The name of the job to stop
session (boto3.Session): The boto3 session to use. Will create a default session if not supplied (default: None)."""
session = ensure_session(session)
client = session.client("sagemaker")
client.stop_processing_job(ProcessingJobName=job_name)
def describe_runs(n=0, notebook=None, rule=None, session=None):
"""Returns a generator of descriptions for all the notebook runs. See :meth:`describe_run` for details of
the description.
Args:
n (int): The number of runs to return or all runs if 0 (default: 0)
notebook (str): If not None, return only runs of this notebook (default: None)
rule (str): If not None, return only runs invoked by this rule (default: None)
session (boto3.Session): The boto3 session to use. Will create a default session if not supplied (default: None).
"""
session = ensure_session(session)
client = session.client("sagemaker")
paginator = client.get_paginator("list_processing_jobs")
page_iterator = paginator.paginate(NameContains="papermill-")
for page in page_iterator:
for item in page["ProcessingJobSummaries"]:
job_name = item["ProcessingJobName"]
if not job_name.startswith("papermill-"):
continue
d = describe_run(job_name, session)
if notebook != None and notebook != d["Notebook"]:
continue
if rule != None and rule != d["Rule"]:
continue
yield d
if n > 0:
n = n - 1
if n == 0:
return
def describe_run(job_name, session=None):
"""Describe a particular notebook run.
Args:
job_name (str): The name of the processing job that ran the notebook.
Returns:
A dictionary with keys for each element of the job description. For example::
{'Notebook': 'scala-spark-test.ipynb',
'Rule': '',
'Parameters': '{"input": "s3://notebook-testing/const.txt"}',
'Job': 'papermill-scala-spark-test-2020-10-21-20-00-11',
'Status': 'Completed',
'Failure': None,
'Created': datetime.datetime(2020, 10, 21, 13, 0, 12, 817000, tzinfo=tzlocal()),
'Start': datetime.datetime(2020, 10, 21, 13, 4, 1, 58000, tzinfo=tzlocal()),
'End': datetime.datetime(2020, 10, 21, 13, 4, 55, 710000, tzinfo=tzlocal()),
'Elapsed': datetime.timedelta(seconds=54, microseconds=652000),
'Result': 's3://sagemaker-us-west-2-1234567890/papermill_output/scala-spark-test-2020-10-21-20-00-11.ipynb',
'Input': 's3://sagemaker-us-west-2-1234567890/papermill_input/notebook-2020-10-21-20-00-08.ipynb',
'Image': 'spark-scala-notebook-runner',
'Instance': 'ml.m5.large',
'Role': 'BasicExecuteNotebookRole-us-west-2'}
"""
session = ensure_session(session)
client = session.client("sagemaker")
while True:
try:
desc = client.describe_processing_job(ProcessingJobName=job_name)
break
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "ThrottlingException":
time.sleep(1)
else:
raise e
status = desc["ProcessingJobStatus"]
if status == "Completed":
output_prefix = desc["ProcessingOutputConfig"]["Outputs"][0]["S3Output"][
"S3Uri"
]
notebook_name = os.path.basename(desc["Environment"]["PAPERMILL_OUTPUT"])
result = "{}/{}".format(output_prefix, notebook_name)
else:
result = None
if status == "Failed":
failure = desc["FailureReason"]
else:
failure = None
d = {}
d["Notebook"] = desc["Environment"].get("PAPERMILL_NOTEBOOK_NAME", "")
d["Rule"] = desc["Environment"].get("AWS_EVENTBRIDGE_RULE", "")
d["Parameters"] = desc["Environment"].get("PAPERMILL_PARAMS", "")
d["Job"] = job_name
d["Status"] = status
d["Failure"] = failure
d["Created"] = desc["CreationTime"]
d["Start"] = desc.get("ProcessingStartTime")
d["End"] = desc.get("ProcessingEndTime")
elapsed = None
if d.get("Start") is not None and d.get("End") is not None:
elapsed = d["End"] - d["Start"]
d["Elapsed"] = elapsed
d["Result"] = result
d["Input"] = desc["ProcessingInputs"][0]["S3Input"]["S3Uri"]
d["Image"] = abbreviate_image(desc["AppSpecification"]["ImageUri"])
d["Instance"] = desc["ProcessingResources"]["ClusterConfig"]["InstanceType"]
d["Role"] = abbreviate_role(desc["RoleArn"])
return d
def expand_params(params):
try:
param_map = json.loads(params)
return ", ".join([f"{p}={v}" for p, v in param_map.items()])
except Exception:
return ""
class NewJobs:
def __init__(self, client):
self.client = client
self.latest_seen_job = None
self.next_latest_seen_job = None
async def get_new(self):
next_token = None
if self.next_latest_seen_job != None:
self.latest_seen_job = self.next_latest_seen_job
self.next_latest_seen_job = None
while True:
args = {"NextToken": next_token} if next_token else {}
while True:
try:
await asyncio.sleep(0)
result = self.client.list_processing_jobs(MaxResults=30, **args)
break
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "ThrottlingException":
time.sleep(1)
else:
raise e
jobs = result["ProcessingJobSummaries"]
for job in jobs:
if not self.next_latest_seen_job:
self.next_latest_seen_job = job["ProcessingJobName"]
if job["ProcessingJobName"] == self.latest_seen_job:
return
yield job
next_token = result.get("NextToken")
if not next_token:
break
class NotebookRunTracker:
"""
NotebookRunTracker keeps track of many recent running jobs and optimizes the number of boto calls
you're doing to get the status by remembering previous runs and knowing that only in progress jobs can
change status (and therefore need to be polled).
"""
# We store the list backwards from how it's viewed outside so that we can just append new jobs on
# the end.
def __init__(self, max_jobs=20, session=None, log=None):
self.session = ensure_session(session)
self.client = self.session.client("sagemaker")
self.log = log or logging.getLogger(__name__)
self.max_jobs = max_jobs
self.new_jobs = NewJobs(self.client)
self.run_list = []
self.in_progress = {}
def __getitem__(self, item):
return self.run_list[::-1][item]
def __len__(self):
return len(self.run_list)
async def update_list(self):
list_count = 0
new_runs = []
async for job in self.new_jobs.get_new():
job_name = job["ProcessingJobName"]
if not job_name.startswith("papermill-"):
continue
await asyncio.sleep(0)
self.log.debug(f"Describing new job: {job_name}")
desc = describe_run(job_name, session=self.session)
new_runs.append(desc)
if desc["Status"] == "InProgress" or desc["Status"] == "Stopping":
self.in_progress[job_name] = desc
list_count += 1
if list_count >= self.max_jobs:
break
self.run_list.extend(new_runs[::-1])
if len(self.run_list) > self.max_jobs:
trimlen = len(self.run_list) - self.max_jobs
for r in self.run_list[:trimlen]:
if r["Status"] == "InProgress" or r["Status"] == "Stopping":
if r["Job"] in self.in_progress:
del self.in_progress[r["Job"]]
self.run_list = self.run_list[trimlen:]
async def update_in_progress(self):
for job, desc in list(self.in_progress.items()):
await asyncio.sleep(0)
self.log.debug(f"Describing in progress job: {job}")
new_desc = describe_run(job, session=self.session)
desc["Status"] = new_desc["Status"]
desc["Failure"] = new_desc["Failure"]
desc["Start"] = new_desc["Start"]
desc["End"] = new_desc["End"]
desc["Elapsed"] = new_desc["Elapsed"]
desc["Result"] = new_desc["Result"]
if not (
new_desc["Status"] == "InProgress" or new_desc["Status"] == "Stopping"
):
if (
job in self.in_progress
): # because of the asyncio it's posssible for us to race here
del self.in_progress[job]
async def update(self):
await self.update_list()
await self.update_in_progress()
def list_runs(n=0, notebook=None, rule=None, session=None):
"""Returns a pandas data frame of the runs, with the most recent at the top.
Args:
n (int): The number of runs to return or all runs if 0 (default: 0)
notebook (str): If not None, return only runs of this notebook (default: None)
rule (str): If not None, return only runs invoked by this rule (default: None)
session (boto3.Session): The boto3 session to use. Will create a default session if not supplied (default: None).
"""
import pandas as pd # pylint: disable=import-error
df = pd.DataFrame(describe_runs(n=n, notebook=notebook, rule=rule, session=session))
df["Parameters"] = df["Parameters"].map(expand_params)
return df
def download_all(lis, output=".", session=None):
"""Download each of the output notebooks from a list previously completed jobs.
Args:
lis (list, pandas.Series, or pandas.DataFrame): A list of jobs or a pandas DataFrame with a "Job" column (as returned by :meth:`list_runs`). (Required)
output (str): The directory to copy the output files to. (Default: the current working directory)
session (boto3.Session):
A boto3 session to use. Will create a default session if not supplied. (Default: None)
Returns:
The list of the filenames of the downloaded notebooks.
"""
import pandas as pd # pylint: disable=import-error
if isinstance(lis, pd.DataFrame):
lis = list(lis["Job"])
elif isinstance(lis, pd.Series):
lis = list(lis)
session = ensure_session(session)
return [download_notebook(job, output, session) for job in lis]
def ensure_session(session=None):
"""If session is None, create a default session and return it. Otherwise return the session passed in"""
if session is None:
session = boto3.session.Session()
return session
code_file = "lambda_function.py"
lambda_function_name = "RunNotebook"
lambda_description = (
"A function to run Jupyter notebooks using SageMaker processing jobs"
)
def create_lambda(role=None, session=None):
session = ensure_session(session)
created = False
if role is None:
print(
"No role specified, will create a minimal role and policy to execute the lambda"
)
role = create_lambda_role()
created = True
# time.sleep(30) # wait for eventual consistency, we hope
if "/" not in role:
account = session.client("sts").get_caller_identity()["Account"]
role = "arn:aws:iam::{}:role/{}".format(account, role)
code_bytes = zip_bytes(code_file)
client = session.client("lambda")
print("Role={}".format(role))
retries = 0
while True:
try:
result = client.create_function(
FunctionName=lambda_function_name,
Runtime="python3.8",
Role=role,
Handler="lambda_function.lambda_handler",
Code={"ZipFile": code_bytes},
Description=lambda_description,
Timeout=30,
Publish=True,
)
return result
except botocore.exceptions.ClientError as e:
if (
created
and retries < 60
and e.response["Error"]["Code"] == "InvalidParameterValueException"
):
time.sleep(1)
else:
raise e
def create_lambda_role(name="run-notebook", session=None):
"""Create a default, minimal IAM role and policy for running the lambda function.
Args:
name (str): The name of the role and policy to create (default: "run-notebook").
session (boto3.Session): The boto3 session to use. Will create a default session if not supplied (default: None).
Returns:
str: The ARN of the resulting role.
"""
session = ensure_session(session)
iam = session.client("iam")
assume_role_policy_doc = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {"Service": "lambda.amazonaws.com"},
"Action": "sts:AssumeRole",
}
],
}
role = iam.create_role(
RoleName=name,
Description="A role for starting notebook execution from a lambda function",
AssumeRolePolicyDocument=json.dumps(assume_role_policy_doc),
)
policy_document = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["sagemaker:CreateProcessingJob", "iam:PassRole"],
"Resource": "*",
}
],
}
policy = iam.create_policy(
PolicyName=name, PolicyDocument=json.dumps(policy_document)
)
iam.attach_role_policy(PolicyArn=policy["Policy"]["Arn"], RoleName=name)
return role["Role"]["Arn"]
def zip_bytes(*files):
file_dir = os.path.dirname(os.path.abspath(__file__))
zip_io = io.BytesIO()
with zip.ZipFile(zip_io, "w") as z:
for cf in files:
with open("{}/{}".format(file_dir, cf), "rb") as f:
code_bytes = f.read()
info = zip.ZipInfo(cf)
info.external_attr = 0o777 << 16 # give full access to included file
z.writestr(info, code_bytes)
zip_io.seek(0)
return zip_io.read()
class InvokeException(Exception):
pass
def invoke(
notebook,
image="notebook-runner",
input_path=None,
output_prefix=None,
parameters={},
role=None,
instance_type="ml.m5.large",
extra_fns=[],
session=None,
):
"""Run a notebook in SageMaker Processing producing a new output notebook.
Invokes the installed Lambda function to immediately start a notebook execution in a SageMaker Processing Job.
Can upload a local notebook file to run or use one previously uploaded to S3. This function returns when
the Lambda function does without waiting for the notebook execution. To wait for the job and download the
results, see :meth:`wait_for_complete` and :meth:`download_notebook`.
To add extra arguments to the SageMaker Processing job, you can use the `extra_fns` argument. Each element of
that list is a function that takes a dict and returns a dict with new fields added. For example::
def time_limit(seconds):
def proc(extras):
extras["StoppingCondition"] = dict(MaxRuntimeInSeconds=seconds)
return extras
return proc
job = run.invoke(notebook="powers.ipynb", extra_fns=[time_limit(86400)])
Args:
notebook (str): The notebook name. If `input_path` is None, this is a file to be uploaded before the Lambda is called.
all cases it is used as the name of the notebook when it's running and serves as the base of the
output file name (with a timestamp attached) (required).
image (str): The ECR image that defines the environment to run the job (Default: "notebook-runner").
input_path (str): The S3 object containing the notebook. If this is None, the `notebook` argument is
taken as a local file to upload (default: None).
output_prefix (str): The prefix path in S3 for where to store the output notebook
(default: determined based on SageMaker Python SDK).
parameters (dict): The dictionary of parameters to pass to the notebook (default: {}).
role (str): The name of a role to use to run the notebook. This can be a name local to the account or a full ARN
(default: calls get_execution_role() or uses "BasicExecuteNotebookRole-<region>" if there's no execution role).
instance_type (str): The SageMaker instance to use for executing the job (default: ml.m5.large).
extra_fns (list of functions): The list of functions to amend the extra arguments for the processing job.
session (boto3.Session): The boto3 session to use. Will create a default session if not supplied (default: None).
Returns:
The name of the processing job created to run the notebook.
"""
session = ensure_session(session)
if "/" not in image:
account = session.client("sts").get_caller_identity()["Account"]
region = session.region_name
image = "{}.dkr.ecr.{}.amazonaws.com/{}:latest".format(account, region, image)
if not role:
try:
role = get_execution_role(session)
except ValueError:
role = "BasicExecuteNotebookRole-{}".format(session.region_name)
if "/" not in role:
account = session.client("sts").get_caller_identity()["Account"]
role = "arn:aws:iam::{}:role/{}".format(account, role)
if input_path is None:
input_path = upload_notebook(notebook)
if output_prefix is None:
output_prefix = get_output_prefix()
extra_args = {}
for f in extra_fns:
extra_args = f(extra_args)
args = {
"image": image,
"input_path": input_path,
"output_prefix": output_prefix,
"notebook": os.path.basename(notebook),
"parameters": parameters,
"role": role,
"instance_type": instance_type,
"extra_args": extra_args,
}
client = session.client("lambda")
result = client.invoke(
FunctionName=lambda_function_name,
InvocationType="RequestResponse",
LogType="None",
Payload=json.dumps(args).encode("utf-8"),
)
payload = json.loads(result["Payload"].read())
if "errorMessage" in payload:
raise InvokeException(payload["errorMessage"])
job = payload["job_name"]
return job
RULE_PREFIX = "RunNotebook-"
def schedule(
notebook,
rule_name,
schedule=None,
event_pattern=None,
image="notebook-runner",
input_path=None,
output_prefix=None,
parameters={},
role=None,
instance_type="ml.m5.large",
extra_fns=[],
session=None,
):
"""Create a schedule for running a notebook in SageMaker Processing.
Creates a CloudWatch Event rule to invoke the installed Lambda either on the provided schedule or in response
to the provided event. \
:meth:`schedule` can upload a local notebook file to run or use one previously uploaded to S3.
To find jobs run by the schedule, see :meth:`list_runs` using the `rule` argument to filter to
a specific rule. To download the results, see :meth:`download_notebook` (or :meth:`download_all`
to download a group of notebooks based on a :meth:`list_runs` call).
To add extra arguments to the SageMaker Processing job, you can use the `extra_fns` argument. Each element of
that list is a function that takes a dict and returns a dict with new fields added. For example::
def time_limit(seconds):
def proc(extras):
extras["StoppingCondition"] = dict(MaxRuntimeInSeconds=seconds)
return extras
return proc
job = run.schedule(notebook="powers.ipynb", rule_name="Powers", schedule="rate(1 hour)", extra_fns=[time_limit(86400)])
Args:
notebook (str): The notebook name. If `input_path` is None, this is a file to be uploaded before the Lambda is called.
all cases it is used as the name of the notebook when it's running and serves as the base of the
output file name (with a timestamp attached) (required).
rule_name (str): The name of the rule for CloudWatch Events (required).
schedule (str): A schedule string which defines when the job should be run. For details,
see https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html
(default: None. Note: one of `schedule` or `event_pattern` must be specified).
event_pattern (str): A pattern for events that will trigger notebook execution. For details,
see https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/CloudWatchEventsandEventPatterns.html.
(default: None. Note: one of `schedule` or `event_pattern` must be specified).
image (str): The ECR image that defines the environment to run the job (Default: "notebook-runner").
input_path (str): The S3 object containing the notebook. If this is None, the `notebook` argument is
taken as a local file to upload (default: None).
output_prefix (str): The prefix path in S3 for where to store the output notebook
(default: determined based on SageMaker Python SDK).
parameters (dict): The dictionary of parameters to pass to the notebook (default: {}).
role (str): The name of a role to use to run the notebook. This can be a name local to the account or a full ARN
(default: calls get_execution_role() or uses "BasicExecuteNotebookRole-<region>" if there's no execution role).
instance_type (str): The SageMaker instance to use for executing the job (default: ml.m5.large).
extra_fns (list of functions): The list of functions to amend the extra arguments for the processing job.
session (boto3.Session): The boto3 session to use. Will create a default session if not supplied (default: None).
"""
kwargs = {}
if schedule != None:
kwargs["ScheduleExpression"] = schedule
if event_pattern != None:
kwargs["EventPattern"] = event_pattern
if len(kwargs) == 0:
raise Exception("Must specify one of schedule or event_pattern")
session = ensure_session(session)
# prepend a common prefix to the rule so it's easy to find notebook rules
prefixed_rule_name = RULE_PREFIX + rule_name
if "/" not in image:
account = session.client("sts").get_caller_identity()["Account"]
region = session.region_name
image = "{}.dkr.ecr.{}.amazonaws.com/{}:latest".format(account, region, image)
if not role:
try:
role = get_execution_role(session)
except ValueError:
role = "BasicExecuteNotebookRole-{}".format(session.region_name)
if "/" not in role:
account = session.client("sts").get_caller_identity()["Account"]
role = "arn:aws:iam::{}:role/{}".format(account, role)
if input_path is None:
input_path = upload_notebook(notebook)
if output_prefix is None:
output_prefix = get_output_prefix()
extra_args = {}
for f in extra_fns:
extra_args = f(extra_args)
args = {
"image": image,
"input_path": input_path,
"output_prefix": output_prefix,
"notebook": os.path.basename(notebook),
"parameters": parameters,
"role": role,
"instance_type": instance_type,
"extra_args": extra_args,
"rule_name": rule_name,
}
events = boto3.client("events")
result = events.put_rule(
Name=prefixed_rule_name,
Description='Rule to run the Jupyter notebook "{}"'.format(notebook),
**kwargs,
)
account = session.client("sts").get_caller_identity()["Account"]
region = session.region_name
target_arn = "arn:aws:lambda:{}:{}:function:{}".format(
region, account, lambda_function_name
)
result = events.put_targets(
Rule=prefixed_rule_name,
Targets=[{"Id": "Default", "Arn": target_arn, "Input": json.dumps(args)}],
)
def unschedule(rule_name, session=None):
"""Delete an existing notebook schedule rule.
Args:
rule_name (str): The name of the rule for CloudWatch Events (required).
session (boto3.Session): The boto3 session to use. Will create a default session if not supplied (default: None).
"""
prefixed_rule_name = RULE_PREFIX + rule_name
session = ensure_session(session)
events = boto3.client("events")
lambda_ = session.client("lambda")
try:
lambda_.remove_permission(
FunctionName="RunNotebook", StatementId="EB-{}".format(rule_name)
)
except botocore.exceptions.ClientError as ce:
message = ce.response.get("Error", {}).get("Message", "Unknown error")
if (
not "is not found" in message
): # ignore it if the permission was already deleted
raise
events.remove_targets(Rule=prefixed_rule_name, Ids=["Default"])
events.delete_rule(Name=prefixed_rule_name)
def describe_schedules(n=0, rule_prefix=None, session=None):
"""A generator that returns descriptions of all the notebook schedule rules
Args:
n (int): The number of rules to return or all runs if 0 (default: 0)
rule_prefix (str): If not None, return only rules whose names begin with the prefix (default: None)
session (boto3.Session): The boto3 session to use. Will create a default session if not supplied (default: None)."""
if not rule_prefix:
rule_prefix = ""
rule_prefix = RULE_PREFIX + rule_prefix
session = ensure_session(session)
client = session.client("events")
paginator = client.get_paginator("list_rules")
page_iterator = paginator.paginate(NamePrefix=rule_prefix)
for page in page_iterator:
for item in page["Rules"]:
rule_name = item["Name"][len(RULE_PREFIX) :]
d = describe_schedule(rule_name, item, session)
yield d
if n > 0:
n = n - 1
if n == 0:
return
def describe_schedule(rule_name, rule_item=None, session=None):
"""Describe a notebook execution schedule.
Args:
rule_name (str): The name of the schedule rule to describe. (Required)
rule_item: Only used to optimize :meth:`describe_schedules`. Should be omitted in normal use. (Default: None)
Returns:
A dictionary with keys for each element of the rule. For example::
{'name': 'Powers',
'notebook': 'powers.ipynb',
'parameters': {},
'schedule': 'rate(1 hour)',
'event_pattern': None,
'image': 'notebook-runner',
'instance': 'ml.m5.large',
'role': 'BasicExecuteNotebookRole-us-west-2',
'state': 'ENABLED',
'input_path': 's3://sagemaker-us-west-2-123456789012/papermill_input/notebook-2020-11-02-19-49-24.ipynb',
'output_prefix': 's3://sagemaker-us-west-2-123456789012/papermill_output'}
"""
rule_name = RULE_PREFIX + rule_name
session = ensure_session(session)
ev = session.client("events")
if not rule_item:
rule_item = ev.describe_rule(Name=rule_name)
targets = ev.list_targets_by_rule(Rule=rule_name)
if "Targets" in targets and len(targets["Targets"]) > 0:
target = targets["Targets"][0]
inp = json.loads(target["Input"])
else:
# This is a broken rule. This could happen if we have weird IAM permissions and try to do a delete.
inp = {}
d = dict(
name=rule_name[len(RULE_PREFIX) :],
notebook=inp.get("notebook", ""),
parameters=inp.get("parameters", {}),
schedule=rule_item.get("ScheduleExpression"),
event_pattern=rule_item.get("EventPattern"),
image=abbreviate_image(inp.get("image", "")),
instance=inp.get("instance_type", ""),
role=abbreviate_role(inp.get("role", "")),
state=rule_item["State"],
input_path=inp.get("input_path", ""),
output_prefix=inp.get("output_prefix", ""),
)
return d
image_pat = re.compile(r"([0-9]+)\.[^/]+/(.*)$")
def base_image(s):
"""Determine just the repo and tag from the ECR image descriptor"""
m = image_pat.match(s)
if m:
return m.group(2)
else:
return s
role_pat = re.compile(r"arn:aws:iam::([0-9]+):role/(.*)$")
def base_role(s):
"""Determine just the role name from a role arn"""
m = role_pat.match(s)
if m:
return m.group(2)
else:
return s
def list_schedules(n=0, rule_prefix=None, session=None):
"""Return a pandas data frame of the schedule rules.
Args:
n (int): The number of rules to return or all rules if 0 (default: 0)
rule_prefix (str): If not None, return only rules whose names begin with the prefix (default: None)
session (boto3.Session): The boto3 session to use. Will create a default session if not supplied (default: None).
"""
import pandas as pd # pylint: disable=import-error
l = pd.DataFrame(describe_schedules(n=n, rule_prefix=rule_prefix, session=session))
if l is not None and l.shape[0] > 0:
l = l.drop(columns=["input_path", "output_prefix"])
l["image"] = l["image"].map(base_image)
l["role"] = l["role"].map(base_role)
for c in ["schedule", "event_pattern"]:
l[c] = l[c].map(lambda x: x if x else "")
return l
| StarcoderdataPython |
149786 | <gh_stars>1-10
import selenium
from functions.Functions import Functions as Selenium
import unittest
from classes.FormLogin import EventLogin
from classes.FormTerminosCondiciones import EventTerminosCondiciones as EventTC
class TratamientoDatos(Selenium,unittest.TestCase):
def setUp(self):
Selenium.abrir_navegador(self)
Selenium.get_json_file(self,"TerminosCondicion")
self.driver.maximize_window()
def testTerminosCondiciones_Aceptar(self):
Cedula=Selenium.leer_celda(self, 'K3')
EventLogin.Loguin(self,Cedula,Cedula)
EventTC.AceptarTratamientoDatos(self)
EventTC.AceptarEnrolamiento(self)
EventTC.AceptarFirmaElectronica(self)
Selenium.esperar(self, 2)
Selenium.assert_text(self, 'AsersionTituloAprobacionDoc','VISUALIZACIÓN Y APROBACIÓN DE DOCUMENTOS')
def testTratamientoDatos_Rechazado(self):
Cedula=Selenium.leer_celda(self, 'K4')
EventLogin.Loguin(self,Cedula,Cedula)
EventTC.RechazarTratamientoDatos(self)
Selenium.esperar(self, 2)
def testEnrolamiento_Rechazado(self):
Cedula=Selenium.leer_celda(self, 'K5')
EventLogin.Loguin(self,Cedula,Cedula)
Selenium.esperar(self, 2)
EventTC.AceptarTratamientoDatos(self)
Selenium.esperar(self, 2)
EventTC.RechazarEnrolamientoFacial(self)
Selenium.esperar(self, 2)
def testFirmaElectronica_Rechazado(self):
Cedula=Selenium.leer_celda(self, 'K6')
EventLogin.Loguin(self,Cedula,Cedula)
Selenium.esperar(self, 2)
EventTC.AceptarTratamientoDatos(self)
Selenium.esperar(self, 2)
EventTC.AceptarEnrolamiento(self)
Selenium.esperar(self, 2)
EventTC.RechazarFirmaElectronica(self)
Selenium.esperar(self, 2)
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
1661808 | """Robotx is a set of automation toolset.
..."""
__version__ = '0.2.2'
| StarcoderdataPython |
3220530 | ROOT_SCOPE = "root"
ENDPOINT_SCOPE = "endpoint"
REQUEST_SCOPE = "request"
class MalformedSpecError(Exception):
pass
class HTTPMethodNotAllowedError(MalformedSpecError):
"""Raised when the HTTP method in the API spec is invalid"""
def __init__(self, method, allowed_methos, *args):
message = (
f"HTTP method not supported: {method}. Supported methods: {allowed_methos}."
)
super(HTTPMethodNotAllowedError, self).__init__(message, *args)
class APIKeyMissingError(MalformedSpecError):
"""Raised when `api` key is not specified at root scope in the API spec"""
def __init__(self, *args):
message = "Missing api `key` at root scope in the API spec"
super(APIKeyMissingError, self).__init__(message, *args)
class InvalidKeyError(MalformedSpecError):
"""Raised when an invalid key is specified in the API spec"""
def __init__(self, key, scope, available_keys, *args):
message = f"Invalid key `{key}` at `{scope}` scope. Available keys are: {available_keys}"
super(InvalidKeyError, self).__init__(message, *args)
class InvalidPythonCodeError(MalformedSpecError):
"""Raised when python code defined in the API spec raises an error"""
def __init__(self, error_message, *args):
error_message = f"Invalid Python code defined in the API spec: {error_message}"
super(InvalidPythonCodeError, self).__init__(error_message, *args)
class BadConfigurationError(Exception):
"""Raised when an environment variable was not set or was badly configured"""
def __init__(self, env_var, *args):
super(BadConfigurationError, self).__init__(
f"{env_var} environment variable not set or badly configured", *args
)
| StarcoderdataPython |
3232513 | <filename>codes/dataops/batchaug.py
import random
import numpy as np
import torch
from torch.nn import functional as F
class BatchAugment:
def __init__(self, train_opt):
self.mixopts = train_opt.get(
"mixopts", ["blend", "rgb", "mixup", "cutmix",
"cutmixup", "cutout"]) # , "cutblur"]
self.mixprob = train_opt.get(
"mixprob", [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]) # , 1.0]
self.mixalpha = train_opt.get(
"mixalpha", [0.6, 1.0, 1.2, 0.7, 0.7, 0.001]) # , 0.7]
self.aux_mixprob = train_opt.get("aux_mixprob", 1.0)
self.aux_mixalpha = train_opt.get("aux_mixalpha", 1.2)
self.mix_p = train_opt.get("mix_p", None)
self.aug = None
self.mask = None
def __call__(self, img1, img2):
"""Apply the configured augmentations.
Args:
img1: the target image.
img2: the input image.
"""
img1_aug, img2_aug, self.mask, self.aug = BatchAug(
img1, img2, self.mixopts, self.mixprob, self.mixalpha,
self.aux_mixprob, self.aux_mixalpha, self.mix_p)
return img1_aug, img2_aug
def apply_mask(self, img1, img2):
"""cutout-ed pixels are discarded when calculating loss by
masking removed pixels from generated and target images.
Args:
img1: the generated image.
img2: the target image.
"""
if self.aug == "cutout":
img1, img2 = img1 * self.mask, img2 * self.mask
return img1, img2
def BatchAug(img1, img2, options, probs, alphas,
aux_prob=None, aux_alpha=None, mix_p=None):
""" Mixture of Batch Augmentations (MoA)
Randomly selects single augmentation from the augmentation pool
and applies it to the batch.
Note: most of these augmentations require batch size > 1
References:
https://github.com/facebookresearch/mixup-cifar10/blob/master/train.py
https://github.com/kakaobrain/fast-autoaugment/blob/master/FastAutoAugment/aug_mixup.py
https://github.com/clovaai/CutMix-PyTorch/blob/master/train.py
https://github.com/hysts/pytorch_cutmix/blob/master/cutmix.py
https://github.com/clovaai/cutblur/blob/master/augments.py
"""
idx = np.random.choice(len(options), p=mix_p)
aug = options[idx]
prob = float(probs[idx])
alpha = float(alphas[idx])
mask = None
if aug == "none":
img1_aug, img2_aug = img1.clone(), img2.clone()
elif aug == "blend":
img1_aug, img2_aug = blend(
img1.clone(), img2.clone(),
prob=prob, alpha=alpha
)
elif aug == "rgb":
img1_aug, img2_aug = rgb(
img1.clone(), img2.clone(),
prob=prob
)
elif aug == "mixup":
img1_aug, img2_aug, = mixup(
img1.clone(), img2.clone(),
prob=prob, alpha=alpha,
)
elif aug == "cutout":
img1_aug, img2_aug, mask, _ = cutout(
img1.clone(), img2.clone(),
prob=prob, alpha=alpha
)
elif aug == "cutmix":
img1_aug, img2_aug = cutmix(
img1.clone(), img2.clone(),
prob=prob, alpha=alpha,
)
elif aug == "cutmixup":
img1_aug, img2_aug = cutmixup(
img1.clone(), img2.clone(),
mixup_prob=aux_prob, mixup_alpha=aux_alpha,
cutmix_prob=prob, cutmix_alpha=alpha,
)
elif aug == "cutblur":
img1_aug, img2_aug = cutblur(
img1.clone(), img2.clone(),
prob=prob, alpha=alpha
)
else:
raise ValueError("{} is not invalid.".format(aug))
return img1_aug, img2_aug, mask, aug
def blend(img1, img2, prob=1.0, alpha=0.6):
"""
Blend image with vectorv = (v1, v2, v3) , where vi ∼Unif(α,1).
"""
if alpha <= 0 or random.random() >= prob:
return img1, img2
h1, w1 = img1.shape[2:]
h2, w2 = img2.shape[2:]
c = torch.empty((img2.size(0), 3, 1, 1),
device=img2.device).uniform_(0, 1.0)
rimg1 = c.repeat((1, 1, h1, w1))
rimg2 = c.repeat((1, 1, h2, w2))
v = np.random.uniform(alpha, 1)
img1 = v * img1 + (1-v) * rimg1
img2 = v * img2 + (1-v) * rimg2
return img1, img2
def rgb(img1, img2, prob=1.0):
"""Randomly permute RGB channels."""
if random.random() >= prob:
return img1, img2
perm = np.random.permutation(img2.shape[1])
img1 = img1[:, perm]
img2 = img2[:, perm]
return img1, img2
def mixup(img1, img2, prob=1.0, alpha=1.2):
""" Blend two randomly selected images.
Uses the default setting of Feng et al. which is:
I0 = λIi + (1−λ)Ij, where λ ∼Beta(α,α).
From: "<NAME>, <NAME>, <NAME>, and <NAME>. mixup: Beyond empirical risk minimization.
arXiv preprint arXiv:1710.09412, 2017"
Args
img1: targets (labels, images, etc)
img1: input images tensor (in batch > 1)
alpha: used to calculate the random lambda (lam) combination
ratio from beta distribution
Returns mixed inputs and mixed targets
img1: is the random mixed image target
img2: is the result of mixing a random image in the batch with
the other images, selected with the random index "index"
"""
if alpha <= 0 or random.random() >= prob:
return img1, img2
"""
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
lam = max(lam, 1. - lam)
#assert 0.0 <= lam <= 1.0, lam
"""
# batch_size = img1.size()[0]
lam = np.random.beta(alpha, alpha)
r_index = torch.randperm(img1.size(0)).to(img2.device)
img1 = lam * img1 + (1 - lam) * img1[r_index, :]
img2 = lam * img2 + (1 - lam) * img2[r_index, :]
return img1, img2
#TODO: no longer used in cutmix, but can be repurposed elsewhere
def rand_bbox(size, lam):
W = size[2]
H = size[3]
# image_h, image_w = data.shape[2:]
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
cx = np.random.randint(W) # cx = np.random.uniform(0, image_w)
cy = np.random.randint(H) # cy = np.random.uniform(0, image_h)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
def _cutmix(img2, prob=1.0, alpha=1.0):
if alpha <= 0 or random.random() >= prob:
return None
cut_ratio = np.random.randn() * 0.01 + alpha
h, w = img2.shape[2:]
ch, cw = np.int(h * cut_ratio), np.int(w * cut_ratio)
fcy = np.random.randint(0, h-ch+1)
fcx = np.random.randint(0, w-cw+1)
tcy, tcx = fcy, fcx
r_index = torch.randperm(img2.size(0)).to(img2.device)
return {
"r_index": r_index, "ch": ch, "cw": cw,
"tcy": tcy, "tcx": tcx, "fcy": fcy, "fcx": fcx,
}
def cutmix(img1, img2, prob=1.0, alpha=1.0):
""" Replace randomly selected square-shape region to
sub-patch from other image in the batch. The coordinates are
calculated as:
rx = Unif(0,W), rw = λW , where λ ∼N(α,0.01)
(same for ry and rh).
From: "<NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, and <NAME>. Cutmix: Regularization strategy
to train strong classifiers with localizable features.
arXiv preprint arXiv:1905.04899, 2019"
Args:
img1: targets (labels, images, etc)
img2: input images tensor (in batch > 1)
alpha: used to calculate the random lambda (lam) combination
ratio from beta distribution
Returns mixed inputs and mixed targets
img1: is the random mixed image target
img2: is the result of mixing a random image in the batch with
the other images, selected with the random index "index"
"""
c = _cutmix(img2, prob, alpha)
if c is None:
return img1, img2
scale = img1.size(2) // img2.size(2)
r_index, ch, cw = c["r_index"], c["ch"], c["cw"]
tcy, tcx, fcy, fcx = c["tcy"], c["tcx"], c["fcy"], c["fcx"]
hch, hcw = ch * scale, cw * scale
hfcy, hfcx, htcy, htcx = (
fcy * scale, fcx * scale, tcy * scale, tcx * scale)
img1[..., htcy:htcy+hch, htcx:htcx+hcw] = img1[r_index, :, hfcy:hfcy+hch, hfcx:hfcx+hcw]
img2[..., tcy:tcy+ch, tcx:tcx+cw] = img2[r_index, :, fcy:fcy+ch, fcx:fcx+cw]
return img1, img2
def cutmixup(img1, img2, mixup_prob=1.0, mixup_alpha=1.0,
cutmix_prob=1.0, cutmix_alpha=1.0): # (α1 / α2) -> 0.7 / 1.2
""" CutMix with the Mixup-ed image.
CutMix and Mixup procedure use hyper-parameter α1 and α2 respectively.
"""
c = _cutmix(img2, cutmix_prob, cutmix_alpha)
if c is None:
return img1, img2
scale = img1.size(2) // img2.size(2)
r_index, ch, cw = c["r_index"], c["ch"], c["cw"]
tcy, tcx, fcy, fcx = c["tcy"], c["tcx"], c["fcy"], c["fcx"]
hch, hcw = ch * scale, cw * scale
hfcy, hfcx, htcy, htcx = (
fcy * scale, fcx * scale, tcy * scale, tcx * scale)
v = np.random.beta(mixup_alpha, mixup_alpha)
if mixup_alpha <= 0 or random.random() >= mixup_prob:
img1_aug = img1[r_index, :]
img2_aug = img2[r_index, :]
else:
img1_aug = v * img1 + (1-v) * img1[r_index, :]
img2_aug = v * img2 + (1-v) * img2[r_index, :]
# apply mixup to inside or outside
if np.random.random() > 0.5:
img1[..., htcy:htcy+hch, htcx:htcx+hcw] = img1_aug[..., hfcy:hfcy+hch, hfcx:hfcx+hcw]
img2[..., tcy:tcy+ch, tcx:tcx+cw] = img2_aug[..., fcy:fcy+ch, fcx:fcx+cw]
else:
img1_aug[..., htcy:htcy+hch, htcx:htcx+hcw] = img1[..., hfcy:hfcy+hch, hfcx:hfcx+hcw]
img2_aug[..., tcy:tcy+ch, tcx:tcx+cw] = img2[..., fcy:fcy+ch, fcx:fcx+cw]
img2, img1 = img2_aug, img1_aug
return img1, img2
def cutblur(img1, img2, prob=1.0, alpha=1.0):
""" Perform CutMix with same image but different resolution,
producing xHR→LR (HR patch pasted into LR) and xLR→HR (LR patch
pasted into HR). Randomly choose x from [xHR→LR, xLR→HR],
to one as input of the network.
Returns the modified LR image (img2) and unchanged target HR.
From: "<NAME> and <NAME> and <NAME>. Rethinking
Data Augmentation for Image Super-resolution: A Comprehensive
Analysis and a New Strategy. arXiv:2004.00448"
"""
if img1.size() != img2.size():
raise ValueError("img1 and img2 have to be the same resolution.")
if alpha <= 0 or random.random() >= prob:
return img1, img2
cut_ratio = np.random.randn() * 0.01 + alpha
h, w = img2.size(2), img2.size(3)
ch, cw = np.int(h*cut_ratio), np.int(w*cut_ratio)
cy = np.random.randint(0, h-ch+1)
cx = np.random.randint(0, w-cw+1)
# apply CutBlur to inside or outside
if np.random.random() > 0.5:
img2[..., cy:cy+ch, cx:cx+cw] = img1[..., cy:cy+ch, cx:cx+cw]
else:
img2_aug = img1.clone()
img2_aug[..., cy:cy+ch, cx:cx+cw] = img2[..., cy:cy+ch, cx:cx+cw]
img2 = img2_aug
return img1, img2
def cutout(img1, img2, prob=1.0, alpha=0.1):
"""
Erase (zero-out) randomly sampled pixels with probability α.
Cutout-ed pixels are discarded when calculating loss by masking
removed pixels.
From: "<NAME> and <NAME>. Improved
regularization of convolutional neural networks with cutout.
arXiv preprint arXiv:1708.04552, 2017."
"""
scale = img1.size(2) // img2.size(2)
fsize = (img2.size(0), 1) + img2.size()[2:]
if alpha <= 0 or random.random() >= prob:
fimg2 = np.ones(fsize)
fimg2 = torch.tensor(fimg2, dtype=torch.float, device=img2.device)
fimg1 = F.interpolate(fimg2, scale_factor=scale, mode="nearest")
return img1, img2, fimg1, fimg2
fimg2 = np.random.choice([0.0, 1.0], size=fsize, p=[alpha, 1-alpha])
fimg2 = torch.tensor(fimg2, dtype=torch.float, device=img2.device)
fimg1 = F.interpolate(fimg2, scale_factor=scale, mode="nearest")
img2 *= fimg2
return img1, img2, fimg1, fimg2
| StarcoderdataPython |
1728791 | from flask import abort, jsonify, session
from app.util import request_helper
from app.services import reddit_service
from app.db.models.raffle import Raffle
from app.db.models.user import User
@request_helper.require_login
def get_user_submissions():
""" Return the user's Reddit submissions that are not already made
into raffles. """
user = User.find_by_jwt(session["jwt"])
if not user:
abort(401)
submissions = reddit_service.get_submissions_for_user(user.get_refresh_token())
if len(submissions) == 0:
return jsonify([])
# Remove submissions that were already made into raffles.
existing_verified_raffle_ids = set(
[r.submission_id for r in Raffle.get_verified_raffles()]
)
filtered_submissions = [
submission
for submission in submissions
if submission["id"] not in existing_verified_raffle_ids
]
return jsonify(filtered_submissions)
def get_user_raffles(username):
""" Returns all the raffles that were created by the given user. """
user = User.query.filter_by(username=username).first()
if not user:
return jsonify({"message": "User not found."}), 404
return jsonify([r.as_dict() for r in user.raffles])
RouteConfigs = [
{"rule": "/submissions", "view_func": get_user_submissions},
{"rule": "/users/<username>/raffles", "view_func": get_user_raffles},
]
| StarcoderdataPython |
3387057 | from torch import nn
from fairscale.utils.meta import init_meta_context, materialize_module
def test_meta():
with init_meta_context():
m = nn.Linear(in_features=1, out_features=1)
assert m.weight.device.type == "meta"
print(m)
materialize_module(m)
assert m.weight.device.type == "cpu"
print(m.weight)
print(m)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.