blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5e15489107b3c51fb2cfad091143fbf0e6ceb0fc | 9009ad47bc1d6adf8ee6d0f2f2b3125dea44c0aa | /cf-540-a.py | 44dd74a1dc85a6de815348507461f004dcdbb3da | [] | no_license | luctivud/Coding-Trash | 42e880624f39a826bcaab9b6194add2c9b3d71fc | 35422253f6169cc98e099bf83c650b1fb3acdb75 | refs/heads/master | 2022-12-12T00:20:49.630749 | 2020-09-12T17:38:30 | 2020-09-12T17:38:30 | 241,000,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,017 | py | # JAI SHREE RAM
import math; from collections import *
import sys; from functools import reduce
# sys.setrecursionlimit(10**6)
def get_ints(): return map(int, input().strip().split())
def get_list(): return list(get_ints())
def get_string(): return list(input().strip().split())
def printxsp(*args): return print(*args, end="")
def printsp(*args): return print(*args, end=" ")
UGLYMOD = int(1e9)+7; SEXYMOD = 998244353; MAXN = int(1e5)
# sys.stdin=open("input.txt","r");sys.stdout=open("output.txt","w")
# for _testcases_ in range(int(input())):
n = int(input())
s = input()
t = input()
ans = 0
for i in range(n):
first = int(s[i])
secon = int(t[i])
diff = max(first, secon) - min(first, secon)
ans += min(diff, 10-diff)
print(ans)
'''
>>> COMMENT THE STDIN!! CHANGE ONLINE JUDGE !!
THE LOGIC AND APPROACH IS MINE @luctivud ( UDIT GUPTA )
Link may be copy-pasted here if it's taken from other source.
DO NOT PLAGIARISE.
>>> COMMENT THE STDIN!! CHANGE ONLINE JUDGE !!
''' | [
"[email protected]"
] | |
36d766acb64d266f4988a64145c619c6d89a0910 | 17331ee8285a1f19e4ca1abd89dac64da381959d | /03-accessing-web-data/reading-webpages.py | 537e39244e68328b3514cbd9f43c78a7595785c4 | [] | no_license | chaochaocodes/PY4E | 3681367ce548fe9a423adb895fe76efda60521bb | 09930f6187c3388b61903680bcd4a1533b0b4f82 | refs/heads/main | 2023-03-28T11:29:09.209120 | 2021-04-01T02:34:58 | 2021-04-01T02:34:58 | 333,506,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | '''
Reading Webpages like Files using urllib
'''
import urllib.request, urllib.parse, urllib.error
# 1. Read like a File
fhand = urllib.request.urlopen('http://data.pr4e.org/romeo.txt')
for line in fhand:
print(line.decode().strip())
# reads the HTML file!
# returns header + body, but header not returned in this for loop; accessed another way
# 2. Working with the data. Retrieve and find frequency of words
fhand = urllib.request.urlopen('http://data.pr4e.org/romeo.txt')
counts = dict()
for line in fhand:
words = line.decode().split()
# line is a byte string, decode into character string
for word in words:
counts[word]: counts.get(word,0) + 1
print(counts)
# array of words, count and save in dict
| [
"[email protected]"
] | |
a4b161d665baf8d27aecbdb191e60e06308b2f62 | 8c7fba506eb022e627537e6017b97508ca453b65 | /models/dbsetup.py | 7b7c8e89cda78eb02cff7f6496740a112f1c6dcd | [
"MIT"
] | permissive | laminko/wBlog | 4a6851ba159c5cf30461fd08b428647c14622c14 | c2bdecede8bf589eabb57bd080e90d995261aafd | refs/heads/master | 2020-04-06T07:05:22.722787 | 2016-09-15T09:01:34 | 2016-09-15T09:01:34 | 65,677,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,885 | py | from datetime import datetime
SYM_PAGE_BREAKER = " {LMK:PAGE-BREAK} "
SINGLE_SPACE = " "
# Tables
db.define_table('post',
Field('title', 'string'),
Field('body', 'text'),
Field('body_pagebreak',
compute=lambda r: (
r['body'] or "").split(SYM_PAGE_BREAKER)[0]),
Field('body_nobreak',
compute=lambda r: (
r['body'] or "").replace(SYM_PAGE_BREAKER,
SINGLE_SPACE)),
Field('has_pagebreak',
compute=lambda r: SYM_PAGE_BREAKER in (r['body'] or "")),
Field('is_draft', 'boolean', default=False),
Field('total_likes', 'integer', default=0,
readable=False, writable=False),
Field('created_on', 'datetime', default=request.now,
readable=False, writable=False),
Field('created_by', 'reference auth_user',
default=auth.user_id,
readable=False, writable=False),
Field('modified_on', 'datetime', update=request.now,
readable=False, writable=False),
Field('modified_by', 'reference auth_user',
update=auth.user_id,
readable=False, writable=False),
Field('tags', 'list:string'))
db.define_table('postcomment',
Field('post', 'reference post',
readable=False, writable=False),
Field('body', 'text', label=T("Comment")),
Field('is_approved', 'boolean', default=False,
readable=False, writable=False),
Field('is_deleted', 'boolean', default=False,
readable=False, writable=False),
Field('reply_to', 'reference postcomment',
readable=False, writable=False),
Field('created_on', 'datetime', default=request.now,
readable=False, writable=False),
Field('created_by', 'reference auth_user',
default=auth.user_id,
readable=False, writable=False),
Field('modified_on', 'datetime', update=request.now,
readable=False, writable=False),
Field('modified_by', 'reference auth_user',
update=auth.user_id,
readable=False, writable=False))
db.define_table('contact',
Field('name', 'string', requires=IS_NOT_EMPTY()),
Field('email', 'string', requires=[
IS_NOT_EMPTY(), IS_EMAIL()]),
Field('description', 'text', requires=IS_NOT_EMPTY()),
Field('created_on', 'datetime', default=request.now,
readable=False, writable=False))
db.define_table('bulletin',
Field('title', 'string', requires=IS_NOT_EMPTY()),
Field('message_body', 'text', requires=IS_NOT_EMPTY()),
Field('message_type', 'string',
default='info',
requires=IS_IN_SET(('success',
'info',
'warning',
'danger',
'special'))),
Field('expires_on', 'datetime', default=None),
Field('is_active', 'boolean', default=False),
Field('created_on', 'datetime', default=request.now,
readable=False, writable=False),
Field('created_by', 'reference auth_user',
default=auth.user_id,
readable=False, writable=False),
Field('modified_on', 'datetime', update=request.now,
readable=False, writable=False),
Field('modified_by', 'reference auth_user',
update=auth.user_id,
readable=False, writable=False))
db.define_table('eventinfo',
Field('title', 'string', requires=IS_NOT_EMPTY()),
Field('event_detail', 'text', requires=IS_NOT_EMPTY()),
Field('image_url', 'text'),
Field('location_text', 'text'),
Field('location_lat', 'float'),
Field('location_lng', 'float'),
Field('event_start', 'datetime'),
Field('event_end', 'datetime'),
Field('is_active', 'boolean', default=False),
Field('created_on', 'datetime', default=request.now,
readable=False, writable=False),
Field('created_by', 'reference auth_user',
default=auth.user_id,
readable=False, writable=False),
Field('modified_on', 'datetime', update=request.now,
readable=False, writable=False),
Field('modified_by', 'reference auth_user',
update=auth.user_id,
readable=False, writable=False))
db.define_table('upload',
Field('title', 'string', requires=IS_NOT_EMPTY()),
Field('the_file', 'upload'),
Field('is_public', 'boolean', default=True,
comment='Public url is like <b>/getobject/(id)</b>.'),
Field('created_on', 'datetime', default=request.now,
readable=False, writable=False),
Field('created_by', 'reference auth_user',
default=auth.user_id,
readable=False, writable=False),
Field('modified_on', 'datetime', update=request.now,
readable=False, writable=False),
Field('modified_by', 'reference auth_user',
update=auth.user_id,
readable=False, writable=False))
# check default root user exists or not.
if db(db.auth_user).count() < 1:
# if not:
# create groups once.
db.auth_group.bulk_insert([
dict(role='Root', description='System user'),
dict(role='Admin', description='Blog admin'),
dict(role='Editor', description='Blog editor'),
dict(role='Moderator', description='Blog moderator'),
dict(role='User', description='Blog reader')
])
# create default root user.
db.auth_user.insert(
**dict(
first_name='System',
last_name='User',
email='[email protected]',
password=db.auth_user.password.validate('[email protected]')[0]
)
)
# set permission for default user.
auth.add_membership(user_id=1, group_id=1)
| [
"="
] | = |
f7881b2609d4092aa8e483ad9b8bc0d585901f87 | 67d8173a716da10a7350213d98938aae9f2115ce | /ProgrammingCourses/CS61A/project/maps/data/__init__.py | 41d0dcae71d4be63e17e6aabe7e0795053028508 | [] | no_license | jxie0755/Learning_Python | 94490d41bdf93acf8396f843328e38b6da310b0f | 143422321cbc3715ca08f6c3af8f960a55887ced | refs/heads/master | 2021-11-02T22:47:35.790239 | 2021-09-26T04:26:23 | 2021-09-26T04:26:23 | 101,445,132 | 0 | 2 | null | 2019-02-19T15:48:44 | 2017-08-25T22:00:16 | Python | UTF-8 | Python | false | false | 3,054 | py | import collections
import os
from abstractions import *
import data.jsonl
DATA_DIRECTORY = "data"
USER_DIRECTORY = "users"
def load_data(user_dataset, review_dataset, restaurant_dataset):
with open(os.path.join(DATA_DIRECTORY, user_dataset)) as f:
user_data = jsonl.load(f)
with open(os.path.join(DATA_DIRECTORY, review_dataset)) as f:
review_data = jsonl.load(f)
with open(os.path.join(DATA_DIRECTORY, restaurant_dataset)) as f:
restaurant_data = jsonl.load(f)
# Load users.
userid_to_user = {}
for user in user_data:
name = user["name"]
_user_id = user["user_id"]
user = make_user(name, []) # MISSING: reviews
userid_to_user[_user_id] = user
# Load restaurants.
busid_to_restaurant = {}
for restaurant in restaurant_data:
name = restaurant["name"]
location = float(restaurant["latitude"]), float(restaurant["longitude"])
categories = restaurant["categories"]
price = restaurant["price"]
if price is not None:
price = int(price)
num_reviews = int(restaurant["review_count"])
_business_id = restaurant["business_id"]
restaurant = make_restaurant(name, location, categories, price, []) # MISSING: reviews
busid_to_restaurant[_business_id] = restaurant
# Load reviews.
reviews = []
busid_to_reviews = collections.defaultdict(list)
userid_to_reviews = collections.defaultdict(list)
for review in review_data:
_user_id = review["user_id"]
_business_id = review["business_id"]
restaurant = restaurant_name(busid_to_restaurant[_business_id])
rating = float(review["stars"])
review = make_review(restaurant, rating)
reviews.append(review)
busid_to_reviews[_business_id].append(review)
userid_to_reviews[_user_id].append(review)
# Reviews done.
restaurants = {}
for busid, restaurant in busid_to_restaurant.items():
name = restaurant_name(restaurant)
location = list(restaurant_location(restaurant))
categories = restaurant_categories(restaurant)
price = restaurant_price(restaurant)
restaurant_reviews = busid_to_reviews[busid]
restaurant = make_restaurant(name, location, categories, price, restaurant_reviews)
restaurants[name] = restaurant
# Restaurants done.
users = []
for userid, user in userid_to_user.items():
name = user_name(user)
user_reviews = userid_to_reviews[userid]
user = make_user(name, user_reviews)
users.append(user)
# Users done.
return users, reviews, list(restaurants.values())
USERS, REVIEWS, ALL_RESTAURANTS = load_data("users.json", "reviews.json", "restaurants.json")
CATEGORIES = {c for r in ALL_RESTAURANTS for c in restaurant_categories(r)}
def load_user_file(user_file):
with open(os.path.join(USER_DIRECTORY, user_file)) as f:
return eval(f.read())
import glob
USER_FILES = [f[6:-4] for f in glob.glob("users/*.dat")]
| [
"[email protected]"
] | |
d3c366292f09c31949649f09f59f18df63e790be | 1cad3fa574350c9be29282f518f4927efb26e18f | /http_api/api_intro.py | 0c1edd60bc8199386fc7a5c103448f64018fc4dc | [] | no_license | EvgeniyBudaev/python_learn | c72fdc2c5a84dae03bfd6e5afc5453b795ada17f | 1a5385e3412832dd9017536dad1140138143600e | refs/heads/main | 2023-05-27T04:41:50.754525 | 2021-05-27T02:11:18 | 2021-05-27T02:11:18 | 337,162,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | import requests
# url = 'https://earthquake.usgs.gov/fdsnws/event/1/query?format=geojson&starttime=2014-01-01&endtime=2014-01-02'
# response = requests.get(url, headers={'Accept':'application/json'})
url = 'https://earthquake.usgs.gov/fdsnws/event/1/query?'
response = requests.get(url, headers={'Accept':'application/json'}, params={
'format': 'geojson',
'starttime': '2014-01-01',
'endtime': '2014-01-02'
})
# print(response.text)
# print(response.json())
# print(type(response.json())) # dict
data = response.json()
print(data['features'][0]['properties']['place']) | [
"[email protected]"
] | |
9301f373603392c31e4ef37ab57d6eace6eb163f | cf470f7d3fd0ea481970bcdedcd869258f692d05 | /aces_1.2/python/bin/create_aces_config | 198eabe513ced3b60c560b6793299b56d29d36e7 | [
"LicenseRef-scancode-unknown-license-reference",
"AMPAS"
] | permissive | colour-science/OpenColorIO-Configs | 3acef083127b698eb3252b45d724dfd4f5346c1a | b0a3ae218c24ed452e01ac1282d0b40e31dede6e | refs/heads/master | 2023-09-03T11:51:31.862794 | 2022-04-14T20:17:13 | 2022-04-14T20:17:13 | 54,505,320 | 619 | 440 | NOASSERTION | 2022-04-14T20:17:14 | 2016-03-22T20:06:48 | Roff | UTF-8 | Python | false | false | 802 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: AMPAS
# Copyright Academy of Motion Picture Arts and Sciences
"""
Creates the *ACES* configuration.
"""
from __future__ import division
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from aces_ocio.generate_config import main
__author__ = (
'Haarm-Pieter Duiker, Thomas Mansencal, Stephen Hill, Kevin Wheatley, '
'Joseph Goldstone')
__copyright__ = (
'Copyright (C) 2014-2021 Academy of Motion Picture Arts and Sciences')
__license__ = 'Academy of Motion Picture Arts and Sciences License Terms'
__maintainer__ = 'Academy of Motion Picture Arts and Sciences'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = []
if __name__ == '__main__':
main()
| [
"[email protected]"
] | ||
e58b36b05c142642d3001d70c865a8a112804449 | 75dcb56e318688499bdab789262839e7f58bd4f6 | /_algorithms_challenges/projecteuler/ProjectEuler-master(2)/ProjectEuler-master/156.py | aafe0dea8075d2124fd3dc79cbb842ba780bd38f | [] | no_license | syurskyi/Algorithms_and_Data_Structure | 9a1f358577e51e89c862d0f93f373b7f20ddd261 | 929dde1723fb2f54870c8a9badc80fc23e8400d3 | refs/heads/master | 2023-02-22T17:55:55.453535 | 2022-12-23T03:15:00 | 2022-12-23T03:15:00 | 226,243,987 | 4 | 1 | null | 2023-02-07T21:01:45 | 2019-12-06T04:14:10 | Jupyter Notebook | UTF-8 | Python | false | false | 2,015 | py | import sys
class Problem():
def __init__(self):
self.found = None
def solve(self):
count = 0
for digit in range(1, 10):
solution_sum = self.s(digit)
print(digit, solution_sum)
count += solution_sum
print(count)
def s(self, digit):
self.found = []
self.binary_search(1, 10**11, digit)
return sum(self.found)
def f(self, n, digit):
count = 0
factor = 1
while n // factor != 0:
lower_number = n - (n // factor) * factor
curr_number = (n // factor) % 10
higher_number = n // (factor * 10)
if curr_number < digit:
count += higher_number * factor
elif curr_number == digit:
count += higher_number * factor + lower_number + 1
else:
count += (higher_number + 1) * factor
factor *= 10
return count
def binary_search(self, lower, upper, digit):
if lower + 1 == upper:
if self.f(lower, digit) == lower:
self.found.append(lower)
return
middle = (lower + upper) // 2
lower_value = self.f(lower, digit)
upper_value = self.f(upper, digit)
middle_value = self.f(middle, digit)
if middle_value >= lower and middle >= lower_value:
self.binary_search(lower, middle, digit)
if upper_value >= middle and upper >= middle_value:
self.binary_search(middle, upper, digit)
def f_naive(self, n, digit):
return sum([self.count_naive(i, digit) for i in range(1, n+1)])
def count_naive(self, n, digit):
count = 0
while n > 0:
n, r = divmod(n, 10)
if r == digit:
count += 1
return count
def main():
problem = Problem()
problem.solve()
if __name__ == '__main__':
sys.exit(main())
| [
"[email protected]"
] | |
b34734bccd0addbe7a3f95e5866fe250ba44c343 | e6ebd1f9e3968f6ed613e9f35e46716115e6e9c3 | /chapter4/demo2.py | 9d025bcb4070abbf880ddf67f5d49444fcbfdbdb | [] | no_license | huwanping001/Python | 897046d3d6d1b420befeefcaa2b9544efa7d1881 | 3c76278f7a9b216b28b8880e0108af3c550b9372 | refs/heads/main | 2023-08-21T00:45:17.991833 | 2021-10-18T13:47:52 | 2021-10-18T13:47:52 | 409,586,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | # 学校:四川轻化工大学
# 学院:自信学院
# 学生:胡万平
# 开发时间:2021/9/18 9:54
#测试对象的bool值
print(bool(False)) #False
print(bool(0)) #False
print(bool(0.0)) #False
print(bool(None)) #False
print(bool('')) #False
print(bool("")) #False
print(bool(list())) #空列表 False
print(bool([])) #空列表 False
print(bool(())) #空元组 False
print(bool(tuple())) #空元组 False
print(bool({})) #空字典 False
print(bool(dict())) #空字典False
print(bool(set())) #空集合 False
print('-----------------其他对象的bool值均为True------------------')
print(bool(18))
print(bool(True))
print(bool('xiaohu')) | [
"[email protected]"
] | |
3df69c8078977d9b51a98b936360a4cf6bcf6b89 | 1260ce7869ce32d6b434afbf273273b7b1ebea2d | /lorentz_equivariant_gnn/architectures/EquivariantGNN/egnn_base.py | ddcd258aec9bfcb6cb6915b699a963195d25437c | [] | no_license | savvy379/Lorentz-Equivariant-GNN | b3b30e964cfa9af39adcb4e8b73bc78b4f8b7b5e | 3d1c74081bdd43387a7c530bce73580db379d22d | refs/heads/master | 2023-08-01T06:43:13.229014 | 2021-09-22T18:35:15 | 2021-09-22T18:35:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,499 | py | import sys, os
import logging
import pytorch_lightning as pl
from pytorch_lightning import LightningModule
import torch.nn.functional as F
from torch.nn import Linear
from torch_geometric.data import DataLoader
import torch
import numpy as np
from sklearn.metrics import roc_auc_score, roc_curve
from .utils import load_datasets
class EGNNBase(LightningModule):
def __init__(self, hparams):
super().__init__()
"""
Initialise the Lightning Module that can scan over different Equivariant GNN training regimes
"""
# Assign hyperparameters
self.save_hyperparameters(hparams)
def setup(self, stage):
# Handle any subset of [train, val, test] data split, assuming that ordering
self.trainset, self.valset = load_datasets(self.hparams["input_dir"], self.hparams["data_split"])
def train_dataloader(self):
if self.trainset is not None:
return DataLoader(self.trainset, batch_size=self.hparams["train_batch"], num_workers=1, shuffle=True)
else:
return None
def val_dataloader(self):
if self.valset is not None:
return DataLoader(self.valset, batch_size=self.hparams["val_batch"], num_workers=1)
else:
return None
def test_dataloader(self):
if self.testset is not None:
return DataLoader(self.testset, batch_size=1, num_workers=1)
else:
return None
def configure_optimizers(self):
optimizer = [
torch.optim.AdamW(
self.parameters(),
lr=(self.hparams["lr"]),
betas=(0.9, 0.999),
eps=1e-08,
amsgrad=True,
)
]
scheduler = [
{
"scheduler": torch.optim.lr_scheduler.StepLR(
optimizer[0],
step_size=self.hparams["patience"],
gamma=self.hparams["factor"],
),
"interval": "epoch",
"frequency": 1,
}
]
return optimizer, scheduler
def get_metrics(self, batch, output):
prediction = torch.sigmoid(output)
tp = (prediction.round() == batch.y).sum().item()
acc = tp / len(batch.y)
try:
auc = roc_auc_score(batch.y.bool().cpu().detach(), prediction.cpu().detach())
except:
auc = 0
fpr, tpr, _ = roc_curve(batch.y.bool().cpu().detach(), prediction.cpu().detach())
# Calculate which threshold gives the best signal goal
signal_goal_idx = abs(tpr - self.hparams["signal_goal"]).argmin()
eps = fpr[signal_goal_idx]
return prediction, acc, auc, eps
def training_step(self, batch, batch_idx):
output = self(batch).squeeze(-1)
loss = F.binary_cross_entropy_with_logits(output, batch.y.float())
prediction, acc, auc, inv_eps = self.get_metrics(batch, output)
self.log_dict({"train_loss": loss, "train_acc": acc}, on_step=False, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
output = self(batch).squeeze(-1)
loss = F.binary_cross_entropy_with_logits(output, batch.y.float())
prediction, acc, auc, eps = self.get_metrics(batch, output)
current_lr = self.optimizers().param_groups[0]["lr"]
self.log_dict({"val_loss": loss, "acc": acc, "auc": auc, "current_lr": current_lr}, on_step=False, on_epoch=True)
return {
"loss": loss,
"preds": prediction,
"acc": acc,
"auc": auc,
"eps": eps
}
def validation_epoch_end(self, step_outputs):
mean_eps = np.mean([output["eps"] for output in step_outputs])
if mean_eps != 0:
self.log_dict({"inv_eps": 1/mean_eps})
def optimizer_step(
self,
epoch,
batch_idx,
optimizer,
optimizer_idx,
optimizer_closure=None,
on_tpu=False,
using_native_amp=False,
using_lbfgs=False,
):
# warm up lr
if (self.hparams["warmup"] is not None) and (
self.trainer.global_step < self.hparams["warmup"]
):
lr_scale = min(
1.0, float(self.trainer.global_step + 1) / self.hparams["warmup"]
)
for pg in optimizer.param_groups:
pg["lr"] = lr_scale * self.hparams["lr"]
# update params
optimizer.step(closure=optimizer_closure)
optimizer.zero_grad()
def compute_radials(edge_index, x):
"""
Calculates the Minkowski distance (squared) between coordinates (node embeddings) x_i and x_j
:param edge_index: Array containing the connection between nodes
:param x: The coordinates (node embeddings)
:return: Minkowski distances (squared) and coordinate differences x_i - x_j
"""
row, col = edge_index
coordinate_differences = x[row] - x[col]
minkowski_distance_squared = coordinate_differences ** 2
minkowski_distance_squared[:, 0] = -minkowski_distance_squared[:, 0] # Place minus sign on time coordinate as \eta = diag(-1, 1, 1, 1)
radial = torch.sum(minkowski_distance_squared, 1).unsqueeze(1)
return radial, coordinate_differences
| [
"[email protected]"
] | |
300dc5d3cf9ec6b7d67dca8ceb272fa0ad0e6d80 | 6a7e9e0e9c08132166f566bd88ae1c46ff8f9c0a | /azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/security_rule_py3.py | 0f8b6dc1a56b4743ed087bfac58ebfdafeb3318d | [
"MIT"
] | permissive | ashirey-msft/azure-sdk-for-python | d92381d11c48f194ec9f989f5f803db614fb73f2 | e04778e13306dad2e8fb044970215bad6296afb6 | refs/heads/master | 2020-03-23T06:05:39.283442 | 2018-09-15T00:18:26 | 2018-09-15T00:18:26 | 141,188,192 | 0 | 1 | MIT | 2018-07-16T20:02:52 | 2018-07-16T20:02:52 | null | UTF-8 | Python | false | false | 7,668 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource_py3 import SubResource
class SecurityRule(SubResource):
"""Network security rule.
All required parameters must be populated in order to send to Azure.
:param id: Resource ID.
:type id: str
:param description: A description for this rule. Restricted to 140 chars.
:type description: str
:param protocol: Required. Network protocol this rule applies to. Possible
values are 'Tcp', 'Udp', and '*'. Possible values include: 'Tcp', 'Udp',
'*'
:type protocol: str or
~azure.mgmt.network.v2017_09_01.models.SecurityRuleProtocol
:param source_port_range: The source port or range. Integer or range
between 0 and 65535. Asterix '*' can also be used to match all ports.
:type source_port_range: str
:param destination_port_range: The destination port or range. Integer or
range between 0 and 65535. Asterix '*' can also be used to match all
ports.
:type destination_port_range: str
:param source_address_prefix: The CIDR or source IP range. Asterix '*' can
also be used to match all source IPs. Default tags such as
'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If
this is an ingress rule, specifies where network traffic originates from.
:type source_address_prefix: str
:param source_address_prefixes: The CIDR or source IP ranges.
:type source_address_prefixes: list[str]
:param source_application_security_groups: The application security group
specified as source.
:type source_application_security_groups:
list[~azure.mgmt.network.v2017_09_01.models.ApplicationSecurityGroup]
:param destination_address_prefix: The destination address prefix. CIDR or
destination IP range. Asterix '*' can also be used to match all source
IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and
'Internet' can also be used.
:type destination_address_prefix: str
:param destination_address_prefixes: The destination address prefixes.
CIDR or destination IP ranges.
:type destination_address_prefixes: list[str]
:param destination_application_security_groups: The application security
group specified as destination.
:type destination_application_security_groups:
list[~azure.mgmt.network.v2017_09_01.models.ApplicationSecurityGroup]
:param source_port_ranges: The source port ranges.
:type source_port_ranges: list[str]
:param destination_port_ranges: The destination port ranges.
:type destination_port_ranges: list[str]
:param access: Required. The network traffic is allowed or denied.
Possible values are: 'Allow' and 'Deny'. Possible values include: 'Allow',
'Deny'
:type access: str or
~azure.mgmt.network.v2017_09_01.models.SecurityRuleAccess
:param priority: The priority of the rule. The value can be between 100
and 4096. The priority number must be unique for each rule in the
collection. The lower the priority number, the higher the priority of the
rule.
:type priority: int
:param direction: Required. The direction of the rule. The direction
specifies if rule will be evaluated on incoming or outcoming traffic.
Possible values are: 'Inbound' and 'Outbound'. Possible values include:
'Inbound', 'Outbound'
:type direction: str or
~azure.mgmt.network.v2017_09_01.models.SecurityRuleDirection
:param provisioning_state: The provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'protocol': {'required': True},
'access': {'required': True},
'direction': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'source_port_range': {'key': 'properties.sourcePortRange', 'type': 'str'},
'destination_port_range': {'key': 'properties.destinationPortRange', 'type': 'str'},
'source_address_prefix': {'key': 'properties.sourceAddressPrefix', 'type': 'str'},
'source_address_prefixes': {'key': 'properties.sourceAddressPrefixes', 'type': '[str]'},
'source_application_security_groups': {'key': 'properties.sourceApplicationSecurityGroups', 'type': '[ApplicationSecurityGroup]'},
'destination_address_prefix': {'key': 'properties.destinationAddressPrefix', 'type': 'str'},
'destination_address_prefixes': {'key': 'properties.destinationAddressPrefixes', 'type': '[str]'},
'destination_application_security_groups': {'key': 'properties.destinationApplicationSecurityGroups', 'type': '[ApplicationSecurityGroup]'},
'source_port_ranges': {'key': 'properties.sourcePortRanges', 'type': '[str]'},
'destination_port_ranges': {'key': 'properties.destinationPortRanges', 'type': '[str]'},
'access': {'key': 'properties.access', 'type': 'str'},
'priority': {'key': 'properties.priority', 'type': 'int'},
'direction': {'key': 'properties.direction', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, protocol, access, direction, id: str=None, description: str=None, source_port_range: str=None, destination_port_range: str=None, source_address_prefix: str=None, source_address_prefixes=None, source_application_security_groups=None, destination_address_prefix: str=None, destination_address_prefixes=None, destination_application_security_groups=None, source_port_ranges=None, destination_port_ranges=None, priority: int=None, provisioning_state: str=None, name: str=None, etag: str=None, **kwargs) -> None:
super(SecurityRule, self).__init__(id=id, **kwargs)
self.description = description
self.protocol = protocol
self.source_port_range = source_port_range
self.destination_port_range = destination_port_range
self.source_address_prefix = source_address_prefix
self.source_address_prefixes = source_address_prefixes
self.source_application_security_groups = source_application_security_groups
self.destination_address_prefix = destination_address_prefix
self.destination_address_prefixes = destination_address_prefixes
self.destination_application_security_groups = destination_application_security_groups
self.source_port_ranges = source_port_ranges
self.destination_port_ranges = destination_port_ranges
self.access = access
self.priority = priority
self.direction = direction
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
| [
"[email protected]"
] | |
d05ce141ecc9bf14ab3e7757f48348f9ccdd9d61 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/59/usersdata/171/61531/submittedfiles/testes.py | 0394990bdadaa06eebff3565e0697e79fea81b66 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | # -*- coding: utf-8 -*-
import math
#COMECE AQUI ABAIXO
def media(a):
soma=0
for i in range(0,len(a),1):
soma=soma+a[i]
media=soma/(len(a))
return(media)
n=int(input('digite numero de elementos da lista:'))
a=[]
for i in range(0,n,1):
numero=float(input('digite numero á ser inserido na lista:'))
a.append(numero)
print('%.3f'%a[0])
print('%.3f'%a[len(a)-1])
print'%.3f'%(media(a))
print(a) | [
"[email protected]"
] | |
c61d403099fed6fbcb69b33fa047ee2d16e137e1 | 5eb52c07e5b1bd00af77306f927f382b684cd6ff | /indy_common/generates_request.py | b70eefee0dfbe79d777c07d732f89f1458602edb | [
"Apache-2.0"
] | permissive | hyperledger/indy-node | bce39486988f5114581cff4f6d14fc1b7684143c | e6bb87d4c605aff9914491d062248b6ec857334c | refs/heads/main | 2023-09-03T15:33:08.187153 | 2023-05-08T22:48:21 | 2023-05-08T22:48:21 | 77,021,566 | 691 | 783 | Apache-2.0 | 2023-05-09T15:42:43 | 2016-12-21T05:45:04 | Python | UTF-8 | Python | false | false | 328 | py | from abc import abstractmethod
class GeneratesRequest:
@abstractmethod
def _op(self):
pass
@abstractmethod
def ledgerRequest(self):
"""
Generates a Request object to be submitted to the ledger.
:return: a Request to be submitted, or None if it shouldn't be written
"""
| [
"[email protected]"
] | |
7c30c30dcc7cc854a841fbb8a6e3e7b45eb5bcf8 | 22aa900e70c8cc6005ecadbb2ae710526af8d3ba | /course/forms.py | 8da7a5b70959e28879bb8df24e4db2ededff90aa | [] | no_license | skafis/career_choice | f79ac3df223122a19a7718d9247ca4e2e72ee22e | 84d3ec752ba6da60e7130f132bd329ff72d66cae | refs/heads/master | 2021-01-13T02:50:44.580867 | 2016-12-22T16:32:40 | 2016-12-22T16:32:40 | 77,144,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | from django import forms
from .models import Courses
class add_coursesForm(forms.ModelForm):
class Meta:
model = Courses
fields = [
'name',
'time',
'cost'
] | [
"[email protected]"
] | |
1999644c558f0f3bf2fc69e88aea396932927a64 | f3ad39ebf9654c99edb33c0fee843a53f9b6c31a | /backend/wesmusicmedia_20833/settings.py | 66d308873bed16394e0b2159023b8a1dcdfc1907 | [] | no_license | crowdbotics-apps/wesmusicmedia-20833 | 306814d32b3acd43c446cd004351c9fb93009afa | 474162c36a486c6028cfec8214d93d83fda4e235 | refs/heads/master | 2022-12-25T19:45:48.328522 | 2020-09-29T17:51:09 | 2020-09-29T17:51:09 | 299,693,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,900 | py | """
Django settings for wesmusicmedia_20833 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"event",
]
LOCAL_APPS = [
"home",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
# start fcm_django push notifications
"fcm_django",
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "wesmusicmedia_20833.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "wesmusicmedia_20833.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"[email protected]"
] | |
454994b05910daee4f982348fb1beb8bab821645 | cf5bfac1e203ae0da1802cf539f32250b57e7224 | /4.exceptions/exception101.py | dafb2cf845bdff6ba846e02634c5ec0ab88084d4 | [] | no_license | jnepal/OReilly-Python-Beyond-the-Basics-OOP | fba2229ffd31b87e2ceab48c6c3f7f445ab47493 | 05050a7ecd0db5c9f18cc6e5ae49a07ddf6054cf | refs/heads/master | 2021-05-01T07:16:26.957247 | 2018-02-11T18:16:36 | 2018-02-11T18:16:36 | 121,152,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | '''
Handling Exceptions
'''
import sys
mydict = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
key = input('please input a key: ')
try:
print("The value for {0} is {1}".format(key, mydict[key]))
except KeyError as err:
print('the key ' + key + ' does not exists')
print(err)
# print(sys.exc_info()[0])
'''
Raising Exceptions
'''
def divideByZero(num):
return num / 0
try:
divideByZero(5)
except ZeroDivisionError as error:
raise ZeroDivisionError("ZeroDivisionError: You cannot divide a number by zero")
| [
"[email protected]"
] | |
58be87c385080aa2d8610c062e6534b8eb59cef9 | 06adea92d1e66d653d0884e8469b7352f5de4f04 | /matplotlibMine/change/ACF_PACFPlot.py | 525672a6ccb4aba2cba6966636059490d812989e | [] | no_license | Gedanke/FigureDemo | a3cf1f0998fb0dc7acce9b90ff55453372759575 | e37164521d9c4e8c5a05592749f1779bed2b0903 | refs/heads/master | 2023-04-19T08:47:35.417971 | 2021-05-05T05:09:48 | 2021-05-05T05:09:48 | 361,770,137 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | # -*- coding:utf-8 -*-
import pandas
import matplotlib.pyplot as plt
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
# Import Data
df = pandas.read_csv('../dataset/AirPassengers.csv')
# Draw Plot
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6), dpi=80)
plot_acf(df.value.tolist(), ax=ax1, lags=50)
plot_pacf(df.value.tolist(), ax=ax2, lags=20)
# Decorate
# lighten the borders
ax1.spines["top"].set_alpha(.3)
ax2.spines["top"].set_alpha(.3)
ax1.spines["bottom"].set_alpha(.3)
ax2.spines["bottom"].set_alpha(.3)
ax1.spines["right"].set_alpha(.3)
ax2.spines["right"].set_alpha(.3)
ax1.spines["left"].set_alpha(.3)
ax2.spines["left"].set_alpha(.3)
# font size of tick labels
ax1.tick_params(axis='both', labelsize=12)
ax2.tick_params(axis='both', labelsize=12)
plt.savefig("../photos/change/ACF_PACFPlot.png")
plt.show()
| [
"[email protected]"
] | |
c7a4356d7fdf1cd0601244d175b68f0a61ee4a2a | 4c6113392ea456e1eb964172b43f0c9846ca712a | /tests/test_volatility.py | 8ff6fe6b0143161483ac60f94b8552bc2c8018be | [
"MIT"
] | permissive | g8a9/pyti | abd344d4d5eb30f36e6c860eb82567d7cacbd780 | 1697ea000730a2238df70505ba77e165619fdf8c | refs/heads/master | 2020-03-27T11:04:22.417031 | 2019-09-09T08:50:51 | 2019-09-09T08:50:51 | 146,463,237 | 0 | 1 | MIT | 2018-08-28T14:53:08 | 2018-08-28T14:53:07 | null | UTF-8 | Python | false | false | 10,070 | py | from __future__ import absolute_import
import unittest
import numpy as np
from tests.sample_data import SampleData
from pyti import volatility
class TestVolatility(unittest.TestCase):
def setUp(self):
"""Create data to use for testing."""
self.data = SampleData().get_sample_close_data()
self.volatility_period_6_expected = [np.nan, np.nan, np.nan, np.nan,
np.nan, 0.1524903845374864, 0.28384513123292787, 0.27472499826423863,
0.38252018527403447, 0.38119139344971686, 0.3932640765681284,
0.345017172104509, 0.35502207797108942, 0.26270939140810423,
0.24341424180238344, 0.12003756515189819, 0.093471666193184894,
0.069100389420744604, 0.070675428704493393, 0.062386517106180067,
0.076730224716846165, 0.099142360710378297, 0.10592610770119171,
0.095343491114294895, 0.094432880117036253, 0.11449523380936444,
0.19874308222305631, 0.26016821375802046, 0.2507081012898657,
0.24600361380487154, 0.24486737357919627, 0.20644095495335393,
0.083562464522411659, 0.089427901528106007, 0.087018108708016018,
0.059113141553367478, 0.04533882542423192, 0.043745342815681064,
0.060849166597298179, 0.070157646564281986, 0.076687212024214385,
0.076789868891622204, 0.079975193196433952, 0.062270973308414995,
0.065217619381487457, 0.080236726179575529, 0.11968338992681561,
0.11104995450689067, 0.14933752225515703, 0.15539159036348982,
0.18969228060158044, 0.18590923547841665, 0.10597103882205337,
0.10565132353205849, 0.097757896252116783, 0.10432107220772911,
0.15464388622372643, 0.24770610313421526, 0.1937347685557344,
0.18639971736694658, 0.17219385405371773, 0.18521003184180665,
0.19111515274069815, 0.67712758698244713, 0.75084329516417858,
0.2899490374301914, 0.23434490783501213, 0.23349254824431451,
0.19491130883035751, 0.17291688690443052, 0.18952455627896306,
0.14943455591620675, 0.12093538881060768, 0.11352129790844248,
0.13675111326211081, 0.19911771276113485, 0.19719310858321595,
0.20301877064572385, 0.17585792951513424, 0.15166114398944808,
0.12154473460299797, 0.1127687218024727, 0.13396457711138229,
0.11961401876780703, 0.12471283828508464, 0.11990156860184273,
0.15070446430502768, 0.37046083687443693, 0.48587580247276602,
0.48262814317551972, 0.4766783934789619, 0.44934857972966907,
0.32796411485291727, 0.24385698905210901, 0.22975650992357466,
0.29279256778033158, 0.2895923424432123, 0.34144133236091717,
0.37761426331474501, 0.37476224778013606, 0.36994155773453391,
0.78667112121907068, 0.86300585080251269, 0.23534333044989458,
0.20968259166195685, 0.22613400310199541, 0.26667264020071202,
0.19666727318947325, 0.074324483776256126, 0.055897268298958649,
0.050047074730884822, 0.053240810369060795, 0.076958905307395881,
0.25066238890997816, 0.3985022148002676, 0.45339018813190163,
0.40090074005473725, 0.11853669350027883, 0.10192315366136466,
0.084981565206439555, 0.094696345543641286, 0.10816591739333566,
0.14787686072786857, 0.094089878168633442, 0.092418384168373155,
0.087753488657869638, 0.12011498586095044]
self.volatility_period_8_expected = [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, 0.14242985319405954, 0.24169985423921733,
0.3113338136575341, 0.35823660012175351, 0.2897109786723715,
0.33920738862680405, 0.30084397674280794, 0.27874472606006989,
0.14104732116634003, 0.10350850671692319, 0.06808649301377627,
0.06174397939422651, 0.055043294296986407, 0.055977225305731342,
0.063756934514425712, 0.084965776367954382, 0.096566525441470791,
0.11148807968007421, 0.11115393420884391, 0.10616253483420113,
0.12732666627619157, 0.20137858090346494, 0.22437096030734238,
0.26314520377982997, 0.23975292286883237, 0.094119224441386942,
0.092781237738100916, 0.096445271412968908, 0.068309958550182667,
0.053436187360247279, 0.050255241224061296, 0.050347489184081405,
0.051256468547238379, 0.069732912097680774, 0.077163466932232569,
0.080016909130893973, 0.069083593021742828, 0.065739601194198222,
0.058817456815561914, 0.060853857257781578, 0.068147115460754693,
0.10291856257863505, 0.13082035431264472, 0.17108073831653245,
0.17704710115987887, 0.12132604897965137, 0.094112286486332075,
0.085525186449793872, 0.10638905070274754, 0.11330484467160756,
0.12192041336088531, 0.15087971223128982, 0.21614349344681355,
0.19857901026629468, 0.19399819303164684, 0.1818708611384795,
0.20511592974926141, 0.22512870638934221, 0.3249909324804976,
0.25715416486495046, 0.25562259799227699, 0.19332500477233347,
0.16618756076676156, 0.18501467898617538, 0.16520561630664882,
0.13640762590737562, 0.1282284121401932, 0.13201283568134109,
0.11105953157811391, 0.11589605525642854, 0.18343547199822768,
0.19311704180590059, 0.17658236946475381, 0.13926554193674917,
0.12236363220142392, 0.1235239400745423, 0.12530921417976978,
0.12816011884378287, 0.12376469343773101, 0.1363460994814035,
0.13827606997226946, 0.17106893662357836, 0.41897704683504988,
0.43046502750119209, 0.38435154822328638, 0.3510007201166348,
0.27101422613079296, 0.20413836250583231, 0.21157867968786048,
0.22742953561116996, 0.24739832604356007, 0.25462527840422455,
0.30406177112394239, 0.3814716445475102, 0.42768111395987835,
0.42847432237222566, 0.27567929241868661, 0.2289390835731577,
0.21867688679964709, 0.22972338923114549, 0.18365959087967343,
0.076786556913883058, 0.059003697401793037, 0.052832920168568283,
0.049505139847874559, 0.051157688941951211, 0.057120316051869298,
0.083940965662256742, 0.24914260070072689, 0.32979011062763736,
0.1323096052074898, 0.10480876704059268, 0.085936680527470583,
0.086629096266763336, 0.083217014518560464, 0.081182983860638047,
0.073828217218582196, 0.086704492613238301, 0.081142442111067303,
0.090650588908834859]
self.volatility_period_10_expected = [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan, 0.15417809035843458,
0.2408116896603047, 0.25045642049343686, 0.30298855648830314,
0.28271934810077803, 0.30019751989944815, 0.15531803130191857,
0.11220844736986649, 0.074895516632368819, 0.064588551678489051,
0.052438052372962583, 0.049009961768482831, 0.05333860076049448,
0.061982060819676429, 0.071085846449511506, 0.094584376755873154,
0.10922283535084741, 0.12007414225686562, 0.11657088324098044,
0.11597960977183221, 0.13634800090518195, 0.21566211367290425,
0.21050418453382061, 0.1025548335663263, 0.1041811347612574,
0.10414591275448988, 0.075824744175844699, 0.059981588478072154,
0.056211687126943105, 0.05682230691715013, 0.056793800883131212,
0.056888695537798205, 0.0547620613726214, 0.068348344590359697,
0.069404424523249103, 0.071850312728412358, 0.065538294186633275,
0.05876109463482912, 0.051571185068965152, 0.058338048271236238,
0.075289596304524004, 0.11326061831246771, 0.14456281738597818,
0.13184989418600479, 0.10571466044585399, 0.092178120503772679,
0.090290148369258125, 0.09258525448595116, 0.11796784502651182,
0.11401260555428137, 0.11748713942752384, 0.15238500510042516,
0.21846560190322034, 0.20518196327986202, 0.20472687260035038,
0.20236309821966347, 0.2124558034437691, 0.20628022609509283,
0.27463453666990251, 0.20551996997796912, 0.17147408459105828,
0.17970534330383031, 0.16991339139073275, 0.15066791286212405,
0.14064333770797666, 0.13913782012536724, 0.11980998348323495,
0.1102096991747443, 0.11004628609875898, 0.11990645094663482,
0.16908191602784542, 0.15575422107109085, 0.13762855713533648,
0.13846608743399774, 0.13277770682867118, 0.12888861589990716,
0.13074380575879921, 0.13964472589084975, 0.13814264807746032,
0.14421353523639924, 0.14995556537715846, 0.19213160105122412,
0.37883088187714375, 0.32162673649585843, 0.2506596765354181,
0.21433145049850072, 0.2210267024430434, 0.19378146428300974,
0.1856025458775277, 0.20103227506988883, 0.22364031524778469,
0.25160504803461164, 0.33144950644656002, 0.42572082344622619,
0.28448686654260275, 0.24665815278320147, 0.23988027396914213,
0.2335068846511005, 0.17518123479515843, 0.079487247958078391,
0.060986278450694285, 0.052450777256972343, 0.049087834377186737,
0.050147935844908974, 0.049494022236588019, 0.052777461547207034,
0.060753791909360075, 0.088537303234590733, 0.12458655576002062,
0.10764438999368131, 0.089739789240085133, 0.084219952095353462,
0.078835298860090011, 0.072477863673140144, 0.062254121762306984,
0.062903192247182049, 0.064946985330127008, 0.080325571807449661]
def test_volatility_period_6(self):
period = 6
v = volatility.volatility(self.data, period)
np.testing.assert_array_equal(v, self.volatility_period_6_expected)
def test_volatility_period_8(self):
period = 8
v = volatility.volatility(self.data, period)
np.testing.assert_array_equal(v, self.volatility_period_8_expected)
def test_volatility_period_10(self):
period = 10
v = volatility.volatility(self.data, period)
np.testing.assert_array_equal(v, self.volatility_period_10_expected)
def test_volatility_invalid_period(self):
period = 128
with self.assertRaises(Exception) as cm:
volatility.volatility(self.data, period)
expected = "Error: data_len < period"
self.assertEqual(str(cm.exception), expected)
| [
"[email protected]"
] | |
47a973711a8b923b936e2065c5d59905c74acf35 | 810412fc189697eaad5731cd66cc291f1d82c3b5 | /cap2/extensions/experimental/strains/merge_snp_graph.py | 17ef682831a202a1ef92a7392795e9a09cedac61 | [
"MIT"
] | permissive | MetaSUB/CAP2 | c511655ed15a7e886d5216a358fc6e5904b25f24 | 5ccdc0af310dd4ee382a81c7330e04927d9ef5fe | refs/heads/master | 2022-12-03T15:50:59.694245 | 2021-12-29T17:03:50 | 2021-12-29T17:03:50 | 213,112,026 | 12 | 7 | MIT | 2022-11-22T09:28:20 | 2019-10-06T05:09:18 | Python | UTF-8 | Python | false | false | 1,890 | py |
from .tasks import StrainCapGroupTask
from ....pipeline.config import PipelineConfig
from .strainotyping import (
VERSION,
merge_filter_graphs_from_filepaths,
write_graph_to_filepath,
graph_node_table,
)
from .make_snp_graph import MakeSNPGraph
class MergeSNPGraph(StrainCapGroupTask):
MIN_WEIGHT = 2
module_description = """
This module
Motivation:
Negatives:
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.config = PipelineConfig(self.config_filename)
@property
def snp_graphs(self):
return self.module_req_list(MakeSNPGraph)
def requires(self):
return self.snp_graphs
@classmethod
def version(cls):
return 'v0.1.0'
def tool_version(self):
return VERSION
@classmethod
def dependencies(cls):
return [MakeSNPGraph]
@classmethod
def _module_name(cls):
return 'experimental::merge_snp_graph'
def output(self):
out = {
f'merged_snp_graph__{self.genome_name}': self.get_target(f'merged_snp_graph__{self.genome_name}', 'gml.gz'),
f'merged_snp_nodes__{self.genome_name}': self.get_target(f'merged_snp_nodes__{self.genome_name}', 'csv.gz'),
}
return out
@property
def graph_path(self):
return self.output()[f'merged_snp_graph__{self.genome_name}'].path
@property
def node_path(self):
return self.output()[f'merged_snp_nodes__{self.genome_name}'].path
def _run(self):
graph_paths = [snp_graph.graph_path for snp_graph in self.snp_graphs]
merged_graph = merge_filter_graphs_from_filepaths(graph_paths, min_weight=self.MIN_WEIGHT)
write_graph_to_filepath(merged_graph, self.graph_path)
tbl = graph_node_table(merged_graph)
tbl.to_csv(self.node_path, compression='gzip')
| [
"[email protected]"
] | |
235f8543683b0f8e93ab3658fce247f2507db2ac | 2a3743ced45bd79826dcdc55f304da049f627f1b | /venv/lib/python3.7/site-packages/deribit_api.py | 29835d45975a42ff171d83291465e8b8813c9460 | [
"MIT"
] | permissive | Dimasik007/Deribit_funding_rate_indicator | 12cc8cd7c0be564d6e34d9eae91940c62492ae2a | 3251602ae5249069489834f9afb57b11ff37750e | refs/heads/master | 2023-05-26T10:14:20.395939 | 2019-08-03T11:35:51 | 2019-08-03T11:35:51 | 198,705,946 | 5 | 3 | MIT | 2023-05-22T22:29:24 | 2019-07-24T20:32:19 | Python | UTF-8 | Python | false | false | 5,534 | py | # -*- coding: utf-8 -*-
import time, hashlib, requests, base64, sys
from collections import OrderedDict
class RestClient(object):
def __init__(self, key=None, secret=None, url=None):
self.key = key
self.secret = secret
self.session = requests.Session()
if url:
self.url = url
else:
self.url = "https://www.deribit.com"
def request(self, action, data):
response = None
if action.startswith("/api/v1/private/"):
if self.key is None or self.secret is None:
raise Exception("Key or secret empty")
signature = self.generate_signature(action, data)
response = self.session.post(self.url + action, data=data, headers={'x-deribit-sig': signature}, verify=True)
else:
response = self.session.get(self.url + action, params=data, verify=True)
if response.status_code != 200:
raise Exception("Wrong response code: {0}".format(response.status_code))
json = response.json()
if json["success"] == False:
raise Exception("Failed: " + json["message"])
if "result" in json:
return json["result"]
elif "message" in json:
return json["message"]
else:
return "Ok"
def generate_signature(self, action, data):
tstamp = int(time.time()* 1000)
signature_data = {
'_': tstamp,
'_ackey': self.key,
'_acsec': self.secret,
'_action': action
}
signature_data.update(data)
sorted_signature_data = OrderedDict(sorted(signature_data.items(), key=lambda t: t[0]))
def converter(data):
key = data[0]
value = data[1]
if isinstance(value, list):
return '='.join([str(key), ''.join(value)])
else:
return '='.join([str(key), str(value)])
items = map(converter, sorted_signature_data.items())
signature_string = '&'.join(items)
sha256 = hashlib.sha256()
sha256.update(signature_string.encode("utf-8"))
sig = self.key + "." + str(tstamp) + "."
sig += base64.b64encode(sha256.digest()).decode("utf-8")
return sig
def getorderbook(self, instrument):
return self.request("/api/v1/public/getorderbook", {'instrument': instrument})
def getinstruments(self):
return self.request("/api/v1/public/getinstruments", {})
def getcurrencies(self):
return self.request("/api/v1/public/getcurrencies", {})
def getlasttrades(self, instrument, count=None, since=None):
options = {
'instrument': instrument
}
if since:
options['since'] = since
if count:
options['count'] = count
return self.request("/api/v1/public/getlasttrades", options)
def getsummary(self, instrument):
return self.request("/api/v1/public/getsummary", {"instrument": instrument})
def index(self):
return self.request("/api/v1/public/index", {})
def stats(self):
return self.request("/api/v1/public/stats", {})
def account(self):
return self.request("/api/v1/private/account", {})
def buy(self, instrument, quantity, price, postOnly=None, label=None):
options = {
"instrument": instrument,
"quantity": quantity,
"price": price
}
if label:
options["label"] = label
if postOnly:
options["postOnly"] = postOnly
return self.request("/api/v1/private/buy", options)
def sell(self, instrument, quantity, price, postOnly=None, label=None):
options = {
"instrument": instrument,
"quantity": quantity,
"price": price
}
if label:
options["label"] = label
if postOnly:
options["postOnly"] = postOnly
return self.request("/api/v1/private/sell", options)
def cancel(self, orderId):
options = {
"orderId": orderId
}
return self.request("/api/v1/private/cancel", options)
def cancelall(self, typeDef="all"):
return self.request("/api/v1/private/cancelall", {"type": typeDef})
def edit(self, orderId, quantity, price):
options = {
"orderId": orderId,
"quantity": quantity,
"price": price
}
return self.request("/api/v1/private/edit", options)
def getopenorders(self, instrument=None, orderId=None):
options = {}
if instrument:
options["instrument"] = instrument
if orderId:
options["orderId"] = orderId
return self.request("/api/v1/private/getopenorders", options)
def positions(self):
return self.request("/api/v1/private/positions", {})
def orderhistory(self, count=None):
options = {}
if count:
options["count"] = count
return self.request("/api/v1/private/orderhistory", options)
def tradehistory(self, countNum=None, instrument="all", startTradeId=None):
options = {
"instrument": instrument
}
if countNum:
options["count"] = countNum
if startTradeId:
options["startTradeId"] = startTradeId
return self.request("/api/v1/private/tradehistory", options)
| [
"[email protected]"
] | |
dd8b87c4e3eb039651b484713ee069ae838bf750 | 28bea635167b3e0b99c3abf1236a5d6970d65d49 | /esgf2zarr/_version.py | 4d23300f6cbd39cbfe292238ba9c4635b656a65a | [
"Apache-2.0"
] | permissive | pangeo-data/esgf2xarray | 4531dbe0d1108d916cc3a00f807d9abe9e495aac | 6a5e4df0d329c2f23b403cbfbb65f0f1dfa98d52 | refs/heads/master | 2020-04-27T08:59:03.829876 | 2019-03-08T18:05:52 | 2019-03-08T18:05:52 | 174,194,614 | 4 | 1 | Apache-2.0 | 2019-03-08T18:06:26 | 2019-03-06T18:01:20 | Jupyter Notebook | UTF-8 | Python | false | false | 18,555 | py | # This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "xndframes-"
cfg.versionfile_source = "xndframes/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = (
"unable to parse git-describe output: \
'%s'"
% describe_out
)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
} | [
"[email protected]"
] | |
39694e63e136c7de35f1644beaf3721a4977f8c9 | e6f62e277c63ad417664c292989ac7b18b5d3027 | /sanguo/core/plunder.py | 6a6fd5fdf9222be63677815da601b879f2ae45c8 | [] | no_license | yueyoum/sanguo-server | 1fe8df69ca923e0166fd6f75e11d08a2b4bbde37 | 08df991a3bffea4b4f56f20ffea23bc373465332 | refs/heads/master | 2021-01-15T15:33:00.427811 | 2016-06-21T09:28:03 | 2016-06-21T09:28:03 | 13,704,516 | 1 | 3 | null | 2014-10-22T07:45:31 | 2013-10-19T16:38:19 | Python | UTF-8 | Python | false | false | 16,744 | py | # -*- coding: utf-8 -*-
__author__ = 'Wang Chao'
__date__ = '1/22/14'
import time
import random
import base64
import dill
from mongoscheme import DoesNotExist
from core.server import server
from core.character import Char
from core.battle import PlunderBattle
from core.mongoscheme import MongoPlunder, MongoAffairs, MongoPlunderBoard
from core.exception import SanguoException
from core.task import Task
from core.prison import Prison
from core.resource import Resource
from core.attachment import make_standard_drop_from_template, get_drop
from core.achievement import Achievement
from core.formation import Formation
from core.signals import plunder_finished_signal
from core.msgpipe import publish_to_char
from core.msgfactory import create_character_infomation_message
from core.times_log import TimesLogPlunder
from core.activity import ActivityEntry
from utils.api import apicall, api_server_list
from utils import pack_msg
from preset.settings import (
PRISONER_POOL,
PLUNDER_GOT_GOLD_PARAM_BASE_ADJUST,
PLUNDER_GET_PRISONER_PROB,
PLUNDER_GET_DROPS_TIMES,
PLUNDER_DROP_DECREASE_FACTOR,
PLUNDER_DROP_MIN_FACTOR,
)
from preset import errormsg
from preset.data import VIP_FUNCTION, BATTLES
from protomsg import GetPlunderLeaderboardResponse
from protomsg import Battle as MsgBattle
from protomsg import PlunderNotify
from protomsg import Plunder as MsgPlunder
class PlunderCurrentTimeOut(Exception):
pass
class PlunderRival(object):
@classmethod
def search(cls, city_id, exclude_char_id=None, return_dumps=False):
docs = MongoAffairs._get_collection().find(
{'hang_city_id': city_id},
{'_id': 1}
)
affair_ids = [doc['_id'] for doc in docs]
rival_id = 0
while affair_ids:
rival_id = random.choice(affair_ids)
if rival_id != exclude_char_id:
break
affair_ids.remove(rival_id)
rival_id = 0
obj = cls(rival_id, city_id)
if not return_dumps:
return obj
return base64.b64encode(dill.dumps(obj))
@classmethod
def search_all_servers(cls, city_id, exclude_char_id=None):
# 跨服掠夺
# 流程
# 1. 向HUB取到所有的servers
# 2. random choice一个 server,并且调用其API,获得目标玩家数据
# 3. 开打
# 4. 调用HUB 打完的API
# 5. HUB收到请求后,根据target_char_id所在的server,并调用其对于API
data = {
'is_test': 1 if server.test else 0
}
servers = api_server_list(data=data)
s = random.choice(servers['data'])
url = "https://{0}:{1}/api/plunder/search/".format(s['host'], s['port_https'])
data = {
'city_id': city_id,
'exclude_char_id': exclude_char_id,
}
res = apicall(data=data, cmd=url)
target = res['data']
obj = dill.loads(base64.b64decode(target))
obj.server_url = "https://{0}:{1}".format(s['host'], s['port_https'])
return obj
def __init__(self, char_id, city_id):
from core.affairs import Affairs
from core.battle.hero import BattleHero
self.city_id = city_id
if char_id:
char = Char(char_id)
self.char_id = char_id
self.name = char.mc.name
self.level = char.mc.level
self.power = char.power
self.leader = char.leader_oid
f = Formation(char_id)
self.formation = f.in_formation_hero_ids()
self.hero_original_ids = f.in_formation_hero_original_ids()
self.gold = Affairs(self.char_id).get_drop()['gold']
self.msg_char_information = create_character_infomation_message(self.char_id).SerializeToString()
battle_heros = []
for hid in self.formation:
if hid == 0:
battle_heros.append(None)
else:
battle_heros.append(BattleHero(hid))
self.battle_heros = base64.b64encode(dill.dumps(battle_heros))
else:
self.char_id = 0
self.name = ""
self.level = 0
self.power = 0
self.leader = 0
self.formation = []
self.hero_original_ids = []
self.gold = 0
self.msg_char_information = ""
self.battle_heros = base64.b64encode(dill.dumps([None] * 9))
def get_plunder_gold(self, level):
level_diff = self.level - level
if level_diff > 8:
level_diff = 8
if level_diff < -8:
level_diff = -8
result = level_diff * 0.025 + PLUNDER_GOT_GOLD_PARAM_BASE_ADJUST
return int(result * self.gold)
def make_plunder_msg(self, level):
msg = MsgPlunder()
msg.char.MergeFromString(self.msg_char_information)
msg.gold = self.get_plunder_gold(level)
return msg
def __bool__(self):
return self.char_id != 0
__nonzero__ = __bool__
class Plunder(object):
def __init__(self, char_id):
self.char_id = char_id
self.load_mongo_record()
def load_mongo_record(self):
try:
self.mongo_plunder = MongoPlunder.objects.get(id=self.char_id)
self.set_default_value()
except DoesNotExist:
self.mongo_plunder = MongoPlunder(id=self.char_id)
self.mongo_plunder.current_times = self.max_plunder_times()
self.mongo_plunder.save()
def set_default_value(self):
# 后面新增加的fileds需要初始化数值的。 比如 current_times
data = {
'current_times': self.max_plunder_times(),
'current_times_lock': False,
'char_id': 0,
'char_name': "",
'char_gold': 0,
'char_power': 0,
'char_leader': 0,
'char_formation': [],
'char_hero_original_ids': [],
'char_city_id': 0
}
changed = False
record = self.mongo_plunder._get_collection().find_one({'_id': self.char_id})
for k, v in data.iteritems():
if k not in record:
setattr(self.mongo_plunder, k, v)
changed = True
if changed:
self.mongo_plunder.save()
def get_plunder_target(self, city_id):
"""
@:rtype: PlunderRival
"""
target = PlunderRival.search_all_servers(city_id, exclude_char_id=self.char_id)
self.mongo_plunder.char_id = target.char_id
self.mongo_plunder.char_name = target.name
self.mongo_plunder.char_gold = target.get_plunder_gold(Char(self.char_id).mc.level)
self.mongo_plunder.char_power = target.power
self.mongo_plunder.char_leader = target.leader
self.mongo_plunder.char_formation = target.formation
self.mongo_plunder.char_hero_original_ids = target.hero_original_ids
self.mongo_plunder.char_city_id = target.city_id
self.mongo_plunder.battle_heros = target.battle_heros
self.mongo_plunder.server_url = target.server_url
self.mongo_plunder.save()
if target:
gold_needs = BATTLES[city_id].refresh_cost_gold
resource = Resource(self.char_id, "Plunder Refresh")
resource.check_and_remove(gold=-gold_needs)
return target
def max_plunder_times(self):
char = Char(self.char_id)
times = VIP_FUNCTION[char.mc.vip].plunder
ae = ActivityEntry(self.char_id, 40007)
if not ae or not ae.is_ok():
return times
if times > 10:
return times
return 10
def clean_plunder_target(self):
self.mongo_plunder.char_id = 0
self.mongo_plunder.char_name = ""
self.mongo_plunder.char_gold = 0
self.mongo_plunder.char_power = 0
self.mongo_plunder.char_leader = 0
self.mongo_plunder.char_formation = []
self.mongo_plunder.char_hero_original_ids = []
self.mongo_plunder.char_city_id = 0
self.mongo_plunder.battle_heros = ""
self.mongo_plunder.server_url = ""
self.mongo_plunder.save()
def change_current_plunder_times(self, change_value, allow_overflow=True):
max_times = self.max_plunder_times()
if change_value > 0 and not allow_overflow and self.mongo_plunder.current_times > max_times:
return
# for i in range(10):
# self.load_mongo_record()
# if not self.mongo_plunder.current_times_lock:
# self.mongo_plunder.current_times_lock = True
# self.mongo_plunder.save()
# break
# else:
# time.sleep(0.2)
# else:
# raise PlunderCurrentTimeOut()
#
# try:
# self.mongo_plunder.current_times += change_value
# if self.mongo_plunder.current_times < 0:
# self.mongo_plunder.current_times = 0
#
# if not allow_overflow and change_value > 0:
# if self.mongo_plunder.current_times > max_times:
# self.mongo_plunder.current_times = max_times
# finally:
# self.mongo_plunder.current_times_lock = False
# self.mongo_plunder.save()
# self.send_notify()
MongoPlunder._get_collection().update(
{'_id': self.char_id},
{'$inc': {'current_times': change_value}}
)
self.load_mongo_record()
if self.mongo_plunder.current_times < 0:
MongoPlunder._get_collection().update(
{'_id': self.char_id},
{'$set': {'current_times': 0}}
)
if not allow_overflow:
if self.mongo_plunder.current_times > max_times:
MongoPlunder._get_collection().update(
{'_id': self.char_id},
{'$set': {'current_times': max_times}}
)
self.send_notify()
def plunder(self):
if not self.mongo_plunder.char_id:
raise SanguoException(
errormsg.PLUNDER_NO_RIVAL,
self.char_id,
"Plunder Battle",
"no rival target"
)
if self.mongo_plunder.current_times <= 0:
raise SanguoException(
errormsg.PLUNDER_NO_TIMES,
self.char_id,
"Plunder Battle",
"no times"
)
self.change_current_plunder_times(change_value=-1)
rival_battle_heros = dill.loads(base64.b64decode(self.mongo_plunder.battle_heros))
msg = MsgBattle()
pvp = PlunderBattle(
self.char_id,
self.mongo_plunder.char_id,
msg,
self.mongo_plunder.char_name,
rival_battle_heros,
)
pvp.start()
t = Task(self.char_id)
t.trig(3)
to_char_id = self.mongo_plunder.char_id
target_server_url = self.mongo_plunder.server_url
if msg.self_win:
standard_drop = self._get_plunder_reward(
self.mongo_plunder.char_city_id,
self.mongo_plunder.char_gold,
self.mongo_plunder.char_hero_original_ids
)
self.clean_plunder_target()
achievement = Achievement(self.char_id)
achievement.trig(12, 1)
PlunderLeaderboardWeekly.incr(self.char_id)
TimesLogPlunder(self.char_id).inc()
else:
standard_drop = make_standard_drop_from_template()
self.mongo_plunder.plunder_times += 1
self.mongo_plunder.save()
self.send_notify()
plunder_finished_signal.send(
sender=None,
from_char_id=self.char_id,
from_char_name=Char(self.char_id).mc.name,
to_char_id=to_char_id,
from_win=msg.self_win,
standard_drop=standard_drop,
target_server_url=target_server_url,
)
return (msg, standard_drop)
def _get_plunder_reward(self, city_id, gold, hero_original_ids):
def _get_prisoner():
prison = 0
heros = [hid for hid in hero_original_ids if hid]
while heros:
hid = random.choice(heros)
heros.remove(hid)
if hid in PRISONER_POOL:
prison = hid
break
ac = ActivityEntry(self.char_id, 30005)
"""@type: core.activity.Activity30005"""
if not ac:
_prob = PLUNDER_GET_PRISONER_PROB
else:
_prob = ac.get_prisoner_prob()
ae = ActivityEntry(self.char_id, 50005)
if ae and ae.is_valid():
_vip = ae.get_current_value(self.char_id)
if _vip == 6:
_prob = 50
elif _vip >= 7:
_prob = 100
if random.randint(1, 100) <= _prob:
return prison
return 0
char = Char(self.char_id).mc
vip_plus = VIP_FUNCTION[char.vip].plunder_addition
standard_drop = make_standard_drop_from_template()
standard_drop['gold'] = int(gold * (1 + vip_plus / 100.0))
# 战俘
got_hero_id = _get_prisoner()
if got_hero_id:
p = Prison(self.char_id)
p.prisoner_add(got_hero_id, gold/2)
achievement = Achievement(self.char_id)
achievement.trig(13, 1)
# 掉落
city = BATTLES[city_id]
if city.normal_drop:
drop_ids = [int(i) for i in city.normal_drop.split(',')]
drop_prob = max(
PLUNDER_GET_DROPS_TIMES - (self.mongo_plunder.plunder_times - 1) * PLUNDER_DROP_DECREASE_FACTOR,
PLUNDER_GET_DROPS_TIMES * PLUNDER_DROP_MIN_FACTOR
)
drop = get_drop(drop_ids, multi=int(drop_prob))
drop.pop('gold')
standard_drop.update(drop)
resource = Resource(self.char_id, "Plunder Reward")
resource.add(**standard_drop)
self.send_notify()
if got_hero_id:
standard_drop['heros'] = [(got_hero_id, 1)]
return standard_drop
def send_notify(self):
self.load_mongo_record()
msg = PlunderNotify()
msg.current_times = self.mongo_plunder.current_times
msg.max_times = self.max_plunder_times()
msg.success_times_weekly = PlunderLeaderboardWeekly.get_char_times(self.char_id)
publish_to_char(self.char_id, pack_msg(msg))
@staticmethod
def cron_job():
MongoPlunder._get_collection().update({}, {'$set': {'plunder_times': 0}}, multi=True)
class PlunderLeaderboardWeekly(object):
@staticmethod
def incr(char_id, times=1):
try:
board = MongoPlunderBoard.objects.get(id=char_id)
except DoesNotExist:
board = MongoPlunderBoard(id=char_id)
board.times = 0
board.times += times
board.save()
@staticmethod
def get_leaderboard(length=10):
boards = MongoPlunderBoard.objects.order_by('-times').limit(length)
return [(b.id, b.times) for b in boards]
@staticmethod
def get_char_times(char_id):
try:
board = MongoPlunderBoard.objects.get(id=char_id)
except DoesNotExist:
board = MongoPlunderBoard(id=char_id)
board.times = 0
board.save()
return board.times
@staticmethod
def clean():
MongoPlunderBoard.drop_collection()
@staticmethod
def make_get_response():
msg = GetPlunderLeaderboardResponse()
msg.ret = 0
for cid, times in PlunderLeaderboardWeekly.get_leaderboard():
leader = msg.leaders.add()
leader.char.MergeFrom(create_character_infomation_message(cid))
leader.times = times
return msg
@staticmethod
def load_from_redis():
# 仅运行一次,用作将redis中的数据导入mongodb
# 因为已经清除redis_persistence的配置,所以这里写死,先前的配置是 127.0.0.1:6380
import redis
from core.server import server
REDISKEY = '_plunder_leaderboard_weekly:{0}'.format(server.id)
r = redis.Redis(host='127.0.0.1', port=6380)
data = r.zrange(REDISKEY, 0, -1, withscores=True)
for char_id, times in data:
char_id = int(char_id)
times = int(times)
PlunderLeaderboardWeekly.incr(char_id, times)
| [
"[email protected]"
] | |
00e9f5fe14e266706112b3eda5db3a81edd109a1 | 4fdd98d5e82385393d4eb2f6526cddb15563c477 | /src/morphforge/core/quantities/__init__.py | 8ed61d5caaf565c48693fc05504751f56db48a69 | [
"BSD-2-Clause"
] | permissive | bmerrison/morphforge | f8541d4471ce13519986c42d4ebb3714a238e390 | 6d06845493bf01aae94a706bfde5d4eb9c733659 | refs/heads/master | 2021-01-18T07:49:47.645031 | 2012-09-26T20:54:13 | 2012-09-26T20:54:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,047 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
#from quantities import *
from morphforge.core.quantities.fromcore import factorise_units_from_list
from morphforge.core.quantities.fromcore import unit
import common_neuroscience_defs
from morphforge.core.quantities.wrappers import NpPqWrappers
from morphforge.core.quantities.common_neuroscience_defs import mS, uS, nS, pS
from morphforge.core.quantities.common_neuroscience_defs import mF, uF, nF, pF
from morphforge.core.quantities.common_neuroscience_defs import um2, cm2
from morphforge.core.quantities.common_neuroscience_defs import mm2, m2
from morphforge.core.quantities.common_neuroscience_defs import Molar, nMolar
from morphforge.core.quantities.common_neuroscience_defs import uMolar
from morphforge.core.quantities.common_neuroscience_defs import ohmcm
from morphforge.core.quantities.common_neuroscience_defs import MOhm
from morphforge.core.quantities.common_neuroscience_defs import mV
from morphforge.core.quantities.common_neuroscience_defs import pA_um2
from quantities import ms, Quantity, millivolt, milliamp, picoamp
from quantities import milli, siemens, millisecond, volt, J, second
U = unit
__all__ = [
'factorise_units_from_list',
'unit',
'NpPqWrappers',
'common_neuroscience_defs',
'mS', 'uS', 'nS', 'pS',
'mF', 'uF', 'nF', 'pF',
'um2', 'cm2', 'mm2', 'm2',
'Molar', 'uMolar', 'nMolar',
'ohmcm', 'MOhm',
'mV','pA_um2',
'ms',
'Quantity',
'millivolt','milliamp','picoamp',
'milli', 'siemens',
'millisecond',
'volt','J','second'
]
| [
"[email protected]"
] | |
95b4ba670fad9aa6e2ada7300f4aa62646de42ef | 897d82d4953ed7b609746a0f252f3f3440b650cb | /day07/exercise_personal/08_exercise.py | 36811015b50dfbbced51178172c00201a0a3c549 | [] | no_license | haiou90/aid_python_core | dd704e528a326028290a2c18f215b1fd399981bc | bd4c7a20950cf7e22e8e05bbc42cb3b3fdbe82a1 | refs/heads/master | 2022-11-26T19:13:36.721238 | 2020-08-07T15:05:17 | 2020-08-07T15:05:17 | 285,857,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | list_poker = []
for r in range(1,7):
for c in range(1,7):
for v in range(1,7):
list_poker.append((r,c,v))
print(list_poker) | [
"[email protected]"
] | |
8a752594fbaede8a55376c2bb862d7962842e631 | fa67314df981eb8c72790819ca29f45c37c52c69 | /Assignment-1_CS16BTECH11036/Question4/Dtree.py | a37b07fb18afb5767de65c0ba9122cf331dccafb | [] | no_license | omsitapara23/AML | 5ce142751354cee72a8007ba952c55ae8a90d193 | 7d320ef6ce342590dfbce9e70d9d9fff7561939b | refs/heads/master | 2020-04-20T01:45:41.095561 | 2019-03-12T17:13:57 | 2019-03-12T17:13:57 | 168,553,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,800 | py | import numpy as np
import csv
import json
from sklearn.neighbors import KNeighborsClassifier
from sklearn import preprocessing
from collections import Counter
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from sklearn import tree
def makeTrain(data_train):
totAttr = set([])
count=0
labels = []
for item in data_train:
labels.append(item.get("cuisine"))
ingredits = item.get("ingredients")
for i in ingredits:
totAttr.add(i)
count += 1
featureVec = []
for i in totAttr:
featureVec.append(i)
data = np.zeros((count, len(totAttr)))
count =0
for item in data_train:
ingredits = item.get("ingredients")
for i in ingredits:
if i in featureVec:
ind = featureVec.index(i)
data[count,ind] = 1
count +=1
return data, len(totAttr), labels, featureVec
def makeTest(data_test, totAttr, featureVec):
no = 0
for item in data_test:
no += 1
ids = []
data = np.zeros((no, totAttr))
count = 0
for item in data_test:
ids.append(item.get("id"))
ingredits = item.get("ingredients")
for i in ingredits:
if i in featureVec:
ind = featureVec.index(i)
data[count,ind] = 1
count += 1
return data, ids
def preprocessing_data(data_train, data_test):
return preprocessing.scale(data_train), preprocessing.scale(data_test)
def learn(data_train, labels):
model = tree.DecisionTreeClassifier()
model.fit(data_train, labels)
return model
def test(data_test, model):
output = model.predict(data_test)
return output
def write_csv(output, ids):
text_file = open("Output.csv", "w")
text_file.write("id,cuisine\n")
counter = 0
for instance in output:
text_file.write("%d,%s\n" % (ids[counter] , instance))
counter += 1
text_file.close()
if __name__ == "__main__":
#opening the files
with open('train.json') as f:
data_train = json.load(f)
with open('test.json') as f1:
data_test = json.load(f1)
data_train, totAttr, labels, featureVec = makeTrain(data_train)
print "Train loaded"
data_test, ids = makeTest(data_test, totAttr, featureVec)
print "Test loaded"
print "Preprocessing..."
data_train, data_test = preprocessing_data(data_train, data_test)
print "Preprocessing complete"
print "Learning..."
model = learn(data_train, labels)
print "Model learned"
print "Predicting..."
output = test(data_test, model)
print "Predection complete writing to file..."
write_csv(output, ids)
print "Writing success"
| [
"[email protected]"
] | |
828e53f2e62d6cc45ed309a2d29a4778afa6d5a6 | 057bdbd048d8b99064eb06af45d9e40beff6fe80 | /examples/app.py | 5726ced00da4c2d832a28e7d5bce9fbca39c9927 | [
"MIT"
] | permissive | miguelgrinberg/APIFairy | 5a058f9763c381b765a4139366e35e579b4a1723 | ed2c9b99e8ed8b7cd61a1b95f7f295bd2a902590 | refs/heads/main | 2023-07-24T14:22:21.282560 | 2023-07-15T23:01:50 | 2023-07-15T23:01:50 | 299,060,489 | 303 | 28 | MIT | 2023-01-05T15:49:05 | 2020-09-27T15:24:33 | Python | UTF-8 | Python | false | false | 2,202 | py | """Welcome to the APIFairy Simple Example project!
## Overview
This is a short and simple example that demonstrates many of the features of
APIFairy.
"""
from typing import Annotated
from uuid import uuid4
from flask import Flask, abort
from flask_marshmallow import Marshmallow
from apifairy import APIFairy, body, response, other_responses
app = Flask(__name__)
app.config['APIFAIRY_TITLE'] = 'APIFairy Simple Example'
app.config['APIFAIRY_VERSION'] = '1.0'
ma = Marshmallow(app)
apifairy = APIFairy(app)
users = []
class UserSchema(ma.Schema):
class Meta:
description = 'This schema represents a user'
id = ma.String(dump_only=True, description="The user's id")
username = ma.String(required=True, description="The user's username")
first_name = ma.String(description="The user's first name")
last_name = ma.String(description="The user's last name")
age = ma.Integer(description="The user's age")
password = ma.String(load_only=True, description="The user's password")
@app.get('/users')
@response(UserSchema(many=True), description="The users")
def get_users():
"""Return all the users."""
return users
@app.post('/users')
@body(UserSchema)
@response(UserSchema, description="The new user")
@other_responses({400: 'Duplicate username or validation error'})
def new_user(user):
"""Create a new user."""
if any([u['username'] == user['username'] for u in users]):
abort(400)
new_id = uuid4().hex
user['id'] = new_id
users.append(user)
return user
@app.get('/users/<id>')
@response(UserSchema, description="The requested user")
@other_responses({404: 'User not found'})
def get_user(id: Annotated[str, 'The id of the user']):
"""Return a user."""
user = [u for u in users if u['id'] == id]
if not user:
abort(404)
return user[0]
@app.errorhandler(400)
def bad_request(e):
return {'code': 400, 'error': 'bad request'}
@app.errorhandler(404)
def not_found(e):
return {'code': 404, 'error': 'not found'}
@apifairy.error_handler
def validation_error(status_code, messages):
return {'code': status_code, 'error': 'validation error',
'messages': messages['json']}
| [
"[email protected]"
] | |
91c04102d7309c5dc96caf9dbaefa29ae8dc3d40 | ecb113be53f2fe1768e85a1004d571c74d87ae8d | /tests/fmlaas/model/model.py | 0621cd46327918a951006e15f1e784933fe91ece | [] | no_license | Internet-SmokeAlarm/core | 39351e4d5bddf19bd59faf51bbc225c0e0521905 | 87b66a10042ec41916c490bb20cb4117f3caf1ba | refs/heads/master | 2023-02-17T18:40:12.822530 | 2020-07-05T20:28:38 | 2020-07-05T20:28:38 | 216,093,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,953 | py | import unittest
from dependencies.python.fmlaas.s3_storage import JobAggregateModelPointer
from dependencies.python.fmlaas.model import Model
class ModelTestCase(unittest.TestCase):
def test_to_json_pass(self):
model = Model("1234", str(JobAggregateModelPointer("4456", "5567", "1234")), "123552")
json_data = model.to_json()
self.assertEqual(model.get_entity_id(), json_data["entity_id"])
self.assertEqual("4456/5567/1234/aggregate_model", json_data["name"])
self.assertEqual(model.get_size(), json_data["size"])
def test_from_json_pass(self):
json_data = {
'entity_id': '1234',
'name': '4456/5567/1234/aggregate_model',
'size': "123552"}
model = Model.from_json(json_data)
self.assertEqual(model.get_entity_id(), "1234")
self.assertEqual(model.get_name(), JobAggregateModelPointer("4456", "5567", "1234"))
self.assertEqual(model.get_size(), "123552")
def test_is_valid_json_pass(self):
self.assertTrue(Model.is_valid_json(
{'entity_id': '1234', 'name': '4456/5567/1234', 'size': "123552"}))
self.assertFalse(Model.is_valid_json(
{'name': '4456/5567/1234', 'size': "123552"}))
self.assertFalse(Model.is_valid_json(
{'entity_id': '1234', 'size': "123552"}))
self.assertFalse(Model.is_valid_json(
{'entity_id': '1234', 'name': '4456/5567/1234'}))
def test_eq_pass(self):
model_1 = Model("123123", "23123/123123/1231231", "12312313")
model_2 = Model("564543", "23123/123123/1231231", "12312313")
model_3 = Model("564543", "23123/123123/1231231", "12312313")
model_4 = Model("564543", "23123/123123/1231231", "123512313")
self.assertTrue(model_1 == model_1)
self.assertFalse(model_1 == model_2)
self.assertTrue(model_2 == model_3)
self.assertFalse(model_2 == model_4)
| [
"[email protected]"
] | |
33e0f6e0f58713cd6b9e0bf434b0190abffc395a | a47e4480d1584c5a2bb4c31ac512c864d0c2c240 | /core/settings.py | 8e44faed4463d4a577291f1a56a01053b9a77cef | [
"MIT"
] | permissive | shaymk1/ke-nako-shop | 014bd960e2048d4e2b5cc77c0b2d99f2058208d4 | 5c6f3dfb6b1e89efe111c1c6daa21434c7843ddc | refs/heads/main | 2023-08-02T08:19:31.702068 | 2021-09-20T19:21:53 | 2021-09-20T19:21:53 | 406,715,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,243 | py |
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-twj=7-)r(w9l5r96^0xf30w$w-id1f3uo=8pqc_d6_o#d!6i!#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'store',
'category',
'accounts.apps.AccountsConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'core.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'category.context_processors.menu_links',
],
},
},
]
WSGI_APPLICATION = 'core.wsgi.application'
# saying we are using custom user model
AUTH_USER_MODEL = 'accounts.Account'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATICFILES_DIRS = [
BASE_DIR/'static'
]
MEDIA_ROOT = BASE_DIR/'static'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"[email protected]"
] | |
78b752d117f11c2a5a5d056b47227a18ba096e0b | 185b7529d9d439a0d554db2fc7b60a1531a5a836 | /scrappy_settings/asgi.py | 2d14dbc27187113e3031e51b3b38ab18a5531eeb | [] | no_license | cavidanhasanli/Scrappy_price | 8901baeaa40beb7102042d687d405258ae20d7fe | b5cc50010f727ba95686d89cac29f76533d860c2 | refs/heads/main | 2023-03-11T00:14:56.576016 | 2021-02-16T09:58:00 | 2021-02-16T09:58:00 | 338,854,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | """
ASGI config for scrappy_settings project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'scrappy_settings.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
6b7df363e07c32497d7b6a3ae77012127a2fb79a | 789f108a849be99052f13cdec68953266458e646 | /nfe_mde/nfe_schedule.py | c0e580b66329ba0eeea7f4afc2737145ed796e5a | [] | no_license | rick-romero/odoo-brazil-eletronic-documents | 6ebe1b30deaa854861aa632ee62b022b8eeb2d8a | 2a1f144612ef23b77b57b9edcf2089a2b2b3077a | refs/heads/8.0 | 2021-01-14T14:07:27.728313 | 2016-07-12T22:11:29 | 2016-07-12T22:11:29 | 59,238,349 | 0 | 0 | null | 2016-05-19T19:58:39 | 2016-05-19T19:58:39 | null | UTF-8 | Python | false | false | 7,908 | py | # coding=utf-8
###############################################################################
# #
# Copyright (C) 2015 Danimar Ribeiro www.trustcode.com.br #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
import re
import base64
import logging
from lxml import objectify
from datetime import datetime
from .service.mde import distribuicao_nfe
from openerp import models, api, fields
from openerp.exceptions import Warning as UserError
from openerp.addons.nfe.sped.nfe.validator.config_check import \
validate_nfe_configuration
_logger = logging.getLogger(__name__)
class nfe_schedule(models.TransientModel):
_name = 'nfe.schedule'
state = fields.Selection(
string="Estado",
selection=[('init', 'Não iniciado'), ('done', 'Finalizado')],
default='init'
)
@staticmethod
def _mask_cnpj(cnpj):
if cnpj:
val = re.sub('[^0-9]', '', cnpj)
if len(val) == 14:
cnpj = "%s.%s.%s/%s-%s" % (val[0:2], val[2:5], val[5:8],
val[8:12], val[12:14])
return cnpj
@api.model
def schedule_download(self, raise_error=False):
companies = self.env['res.company'].search([])
for company in companies:
try:
validate_nfe_configuration(company)
nfe_result = distribuicao_nfe(company, company.last_nsu_nfe)
env_events = self.env['l10n_br_account.document_event']
if nfe_result['code'] == '137' or nfe_result['code'] == '138':
event = {
'type': '12', 'company_id': company.id,
'response': 'Consulta distribuição: sucesso',
'status': nfe_result['code'],
'message': nfe_result['message'],
'create_date': datetime.now(),
'write_date': datetime.now(),
'end_date': datetime.now(),
'state': 'done', 'origin': 'Scheduler Download'
}
obj = env_events.create(event)
self.env['ir.attachment'].create(
{
'name': u"Consulta manifesto - {0}".format(
company.cnpj_cpf),
'datas': base64.b64encode(
nfe_result['file_returned']),
'datas_fname': u"Consulta manifesto - {0}".format(
company.cnpj_cpf),
'description': u'Consulta distribuição: sucesso',
'res_model': 'l10n_br_account.document_event',
'res_id': obj.id
})
env_mde = self.env['nfe.mde']
for nfe in nfe_result['list_nfe']:
if nfe['schema'] == 'resNFe_v1.00.xsd':
root = objectify.fromstring(nfe['xml'])
cnpj_forn = self._mask_cnpj(('%014d' % root.CNPJ))
partner = self.env['res.partner'].search(
[('cnpj_cpf', '=', cnpj_forn)])
invoice_eletronic = {
'chNFe': root.chNFe,
'nSeqEvento': nfe['NSU'], 'xNome': root.xNome,
'tpNF': str(root.tpNF), 'vNF': root.vNF,
'cSitNFe': str(root.cSitNFe),
'state': 'pending',
'dataInclusao': datetime.now(),
'CNPJ': cnpj_forn,
'IE': root.IE,
'partner_id': partner.id,
'dEmi': datetime.strptime(str(root.dhEmi)[:19],
'%Y-%m-%dT%H:%M:%S'),
'company_id': company.id,
'formInclusao': u'Verificação agendada'
}
obj_nfe = env_mde.create(invoice_eletronic)
file_name = 'resumo_nfe-%s.xml' % nfe['NSU']
self.env['ir.attachment'].create(
{
'name': file_name,
'datas': base64.b64encode(nfe['xml']),
'datas_fname': file_name,
'description': u'NFe via manifesto',
'res_model': 'nfe.mde',
'res_id': obj_nfe.id
})
company.last_nsu_nfe = nfe['NSU']
else:
event = {
'type': '12',
'response': 'Consulta distribuição com problemas',
'company_id': company.id,
'file_returned': nfe_result['file_returned'],
'file_sent': nfe_result['file_sent'],
'message': nfe_result['message'],
'create_date': datetime.now(),
'write_date': datetime.now(),
'end_date': datetime.now(),
'status': nfe_result['code'],
'state': 'done', 'origin': 'Scheduler Download'
}
obj = env_events.create(event)
self.env['ir.attachment'].create(
{
'name': u"Consulta manifesto - {0}".format(
company.cnpj_cpf),
'datas': base64.b64encode(
nfe_result['file_returned']),
'datas_fname': u"Consulta manifesto - {0}".format(
company.cnpj_cpf),
'description': u'Consulta manifesto com erro',
'res_model': 'l10n_br_account.document_event',
'res_id': obj.id
})
except Exception as ex:
_logger.error("Erro ao consultar Manifesto", exc_info=True)
if raise_error:
raise UserError(
u'Atenção',
u'Não foi possivel efetuar a consulta!\n Verifique o log')
@api.one
def execute_download(self):
self.schedule_download(raise_error=True)
| [
"[email protected]"
] | |
94e544a15e0e29b8f771385dfbdcefcb09413fcd | 30467bd47c29412687a384d824655daa7400cef4 | /examples/dockerbuild.py | d7f0e1ae2176bdb6508f530e8a2f4f6e916f5b3c | [] | no_license | dpedu/shipper | 556409843c6da888338d2a791d4f06b17c709a52 | e5544416c2b0ee818285b9a13761f1c351d7676f | refs/heads/master | 2020-05-17T17:39:45.645549 | 2019-02-03T00:59:34 | 2019-02-03T00:59:34 | 183,860,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,308 | py | from shipper.lib import ShipperJob, SshConnection, GiteaCheckoutTask, LambdaTask, \
DockerBuildTask, DockerTagTask, DockerPushTask
# This job accepts gitea webooks and builds docker images. If the "imagename" parameter is passed, it will be used to
# name the image. Otherwise, a repo named "docker-image-name" would builds/pushes a docker image called "image-name".
job = ShipperJob()
job.default_connection(SshConnection(None, None, key="testkey.pem"))
job.add_task(GiteaCheckoutTask("code", allow_branches=["master"]))
def getimgname(job):
if "imagename" in job.props: # prefer "imagename" url param
imagename = job.props["imagename"]
else: # fall back to repo name, stripping 'docker-' prefix if needed.
imagename = job.props["payload"]["repository"]["name"] # Expecting a repo name like "docker-nginx"
if imagename.startswith("docker-"): # strip the "docker-" repo name prefix
imagename = imagename[len("docker-"):]
job.props["docker_imagename"] = "dpedu/" + imagename # we'll build the image locally as this
job.props["docker_tag"] = "apps2reg:5000/dpedu/" + imagename # then tag and push it as this
job.add_task(LambdaTask(getimgname))
job.add_task(DockerBuildTask())
job.add_task(DockerTagTask())
job.add_task(DockerPushTask())
| [
"[email protected]"
] | |
3bd68f15f1ba900bd732975bf7fe77e8c8d0874c | c4cfce852c59bdd65d5ab5e77021e42cb7b02ff8 | /eng_to_kana_test/test_eng_to_kana.py | c7f683673655332c566e7794109d70e9fe281858 | [
"MIT"
] | permissive | yokolet/transcript | 5749be490a7f53e907b2143696afaa592647dc59 | 4a83cc70d868bb243846ebee8c322c63c2092141 | refs/heads/master | 2020-05-28T09:47:27.771042 | 2019-06-15T21:46:53 | 2019-06-15T21:46:59 | 188,961,209 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import unittest
from eng_to_kana.eng_to_kana import EngToKana
class TestEngToKana(unittest.TestCase):
def setUp(self):
self.list_func = EngToKana().fromWordList
self.file_func = EngToKana().fromFile
def test_1(self):
words = ['what', 'girl', 'cat', 'judge', 'majority']
expected = [['ワット', 'ホワット'], ['ガール'], ['キャット'], ['ジャッジ'], ['マジョリティー']]
self.assertEqual(expected, self.list_func(words))
def test_2(self):
words = ['gaga']
self.assertEqual([['E_DIC']], self.list_func(words)) | [
"[email protected]"
] | |
360c489c9cbf919e2cb62c14bf5a0f370355366e | 96f9c82d0331a853abb602aa0e214ba10d97e782 | /gcp/plugins/modules/gcp_compute_target_pool_info.py | 904029bfbefcc02c8d4b9c707949d7d57de1ea26 | [] | no_license | gundalow-collections/google | c5a269477dd44e44d50fba3e1145ba3150585ba8 | d4148513bec8926ec617c4450900236adb5e87bf | refs/heads/master | 2020-07-25T02:11:46.852809 | 2019-09-16T20:13:35 | 2019-09-16T20:13:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,814 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: google.gcp.gcp_compute_target_pool_info
description:
- Gather info for GCP TargetPool
- This module was called C(google.gcp.gcp_compute_target_pool_facts) before Ansible 2.9. The
usage has not changed.
short_description: Gather info for GCP TargetPool
version_added: 2.7
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
filters:
description:
- A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters).
- Each additional filter in the list will act be added as an AND condition (filter1
and filter2) .
type: list
region:
description:
- The region where the target pool resides.
required: true
type: str
extends_documentation_fragment: google.gcp.gcp
'''
EXAMPLES = '''
- name: get info on a target pool
gcp_compute_target_pool_info:
region: us-west1
filters:
- name = test_object
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
resources:
description: List of resources
returned: always
type: complex
contains:
backupPool:
description:
- This field is applicable only when the containing target pool is serving a
forwarding rule as the primary pool, and its failoverRatio field is properly
set to a value between [0, 1].
- 'backupPool and failoverRatio together define the fallback behavior of the
primary target pool: if the ratio of the healthy instances in the primary
pool is at or below failoverRatio, traffic arriving at the load-balanced IP
will be directed to the backup pool.'
- In case where failoverRatio and backupPool are not set, or all the instances
in the backup pool are unhealthy, the traffic will be directed back to the
primary pool in the "force" mode, where traffic will be spread to the healthy
instances with the best effort, or to all instances when no instance is healthy.
returned: success
type: dict
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource.
returned: success
type: str
failoverRatio:
description:
- This field is applicable only when the containing target pool is serving a
forwarding rule as the primary pool (i.e., not as a backup pool to some other
target pool). The value of the field must be in [0, 1].
- 'If set, backupPool must also be set. They together define the fallback behavior
of the primary target pool: if the ratio of the healthy instances in the primary
pool is at or below this number, traffic arriving at the load-balanced IP
will be directed to the backup pool.'
- In case where failoverRatio is not set or all the instances in the backup
pool are unhealthy, the traffic will be directed back to the primary pool
in the "force" mode, where traffic will be spread to the healthy instances
with the best effort, or to all instances when no instance is healthy.
returned: success
type: str
healthCheck:
description:
- A reference to a HttpHealthCheck resource.
- A member instance in this pool is considered healthy if and only if the health
checks pass. If not specified it means all member instances will be considered
healthy at all times.
returned: success
type: dict
id:
description:
- The unique identifier for the resource.
returned: success
type: int
instances:
description:
- A list of virtual machine instances serving this pool.
- They must live in zones contained in the same region as this pool.
returned: success
type: list
name:
description:
- Name of the resource. Provided by the client when the resource is created.
The name must be 1-63 characters long, and comply with RFC1035. Specifically,
the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
sessionAffinity:
description:
- 'Session affinity option. Must be one of these values: - NONE: Connections
from the same client IP may go to any instance in the pool.'
- "- CLIENT_IP: Connections from the same client IP will go to the same instance
in the pool while that instance remains healthy."
- "- CLIENT_IP_PROTO: Connections from the same client IP with the same IP protocol
will go to the same instance in the pool while that instance remains healthy."
returned: success
type: str
region:
description:
- The region where the target pool resides.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible_collections.google.gcp.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str')))
if module._name == 'gcp_compute_target_pool_facts':
module.deprecate("The 'gcp_compute_target_pool_facts' module has been renamed to 'gcp_compute_target_pool_info'", version='2.13')
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))}
module.exit_json(**return_value)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/targetPools".format(**module.params)
def fetch_list(module, link, query):
auth = GcpSession(module, 'compute')
return auth.list(link, return_if_object, array_name='items', params={'filter': query})
def query_options(filters):
if not filters:
return ''
if len(filters) == 1:
return filters[0]
else:
queries = []
for f in filters:
# For multiple queries, all queries should have ()
if f[0] != '(' and f[-1] != ')':
queries.append("(%s)" % ''.join(f))
else:
queries.append(f)
return ' '.join(queries)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
4094dc115614d752fdc61bd95ecac6cfb7797367 | 7b3711d4c6d7284255ba0270d49d120f984bf7c6 | /problems/2361_minimum_cost_using_the_train_line.py | 546dcff817a1d4ba91ccab3ae95614fb4d2f1ff7 | [] | no_license | loganyu/leetcode | 2d336f30feb55379aaf8bf0273d00e11414e31df | 77c206305dd5cde0a249365ce7591a644effabfc | refs/heads/master | 2023-08-18T09:43:10.124687 | 2023-08-18T00:44:51 | 2023-08-18T00:44:51 | 177,875,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,677 | py | '''
A train line going through a city has two routes, the regular route and the express route. Both routes go through the same n + 1 stops labeled from 0 to n. Initially, you start on the regular route at stop 0.
You are given two 1-indexed integer arrays regular and express, both of length n. regular[i] describes the cost it takes to go from stop i - 1 to stop i using the regular route, and express[i] describes the cost it takes to go from stop i - 1 to stop i using the express route.
You are also given an integer expressCost which represents the cost to transfer from the regular route to the express route.
Note that:
There is no cost to transfer from the express route back to the regular route.
You pay expressCost every time you transfer from the regular route to the express route.
There is no extra cost to stay on the express route.
Return a 1-indexed array costs of length n, where costs[i] is the minimum cost to reach stop i from stop 0.
Note that a stop can be counted as reached from either route.
Example 1:
Input: regular = [1,6,9,5], express = [5,2,3,10], expressCost = 8
Output: [1,7,14,19]
Explanation: The diagram above shows how to reach stop 4 from stop 0 with minimum cost.
- Take the regular route from stop 0 to stop 1, costing 1.
- Take the express route from stop 1 to stop 2, costing 8 + 2 = 10.
- Take the express route from stop 2 to stop 3, costing 3.
- Take the regular route from stop 3 to stop 4, costing 5.
The total cost is 1 + 10 + 3 + 5 = 19.
Note that a different route could be taken to reach the other stops with minimum cost.
Example 2:
Input: regular = [11,5,13], express = [7,10,6], expressCost = 3
Output: [10,15,24]
Explanation: The diagram above shows how to reach stop 3 from stop 0 with minimum cost.
- Take the express route from stop 0 to stop 1, costing 3 + 7 = 10.
- Take the regular route from stop 1 to stop 2, costing 5.
- Take the express route from stop 2 to stop 3, costing 3 + 6 = 9.
The total cost is 10 + 5 + 9 = 24.
Note that the expressCost is paid again to transfer back to the express route.
Constraints:
n == regular.length == express.length
1 <= n <= 105
1 <= regular[i], express[i], expressCost <= 105
'''
class Solution:
def minimumCosts(self, regular: List[int], express: List[int], expressCost: int) -> List[int]:
prevReg = 0
prevExp = expressCost
ans = [None] * len(regular)
for i in range(1, len(regular) + 1):
reg = regular[i-1] + min(prevReg, prevExp)
exp = express[i-1] + min(expressCost + prevReg, prevExp)
ans[i-1] = min(reg, exp)
prevReg = reg
prevExp = exp
return ans
| [
"[email protected]"
] | |
cc15539f09c655e2a85fd8d417d67c0477c45e87 | a323fc11db97690c4ea50d92766d9d5db0418aac | /article/migrations/0020_auto_20200719_1016.py | ae9b1b99f0de83746e98a9195dcf7750dc6193a6 | [] | no_license | sparshjaincs/articleplus | ad909f937ebf856b6da87bd623af0776f8faafc3 | 0fa34a5384d8cfc52181be42c130aadd03ad8ef2 | refs/heads/master | 2023-08-10T23:21:44.845993 | 2021-09-30T22:29:13 | 2021-09-30T22:29:13 | 279,252,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,419 | py | # Generated by Django 2.2.6 on 2020-07-19 04:46
import datetime
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('article', '0019_auto_20200719_0951'),
]
operations = [
migrations.RemoveField(
model_name='articles',
name='mute',
),
migrations.RemoveField(
model_name='articles',
name='subscribe',
),
migrations.AddField(
model_name='profile',
name='mute',
field=models.ManyToManyField(blank=True, default=None, related_name='mute_title', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='profile',
name='subscribe',
field=models.ManyToManyField(blank=True, default=None, related_name='subscribe_title', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='activity',
name='activity_time',
field=models.TimeField(default=datetime.datetime(2020, 7, 19, 10, 16, 18, 77112)),
),
migrations.AlterField(
model_name='articles',
name='time',
field=models.TimeField(default=datetime.datetime(2020, 7, 19, 10, 16, 18, 61070)),
),
]
| [
"[email protected]"
] | |
2e0fad46c16958cbd3582723916a0ac1dda5a23e | 0bd14d7590db43af015433edc95c101b325f2b45 | /simple_sso/sso_server/admin.py | e21b92dc68b986e617291c935f1c31c63bf08af1 | [
"BSD-3-Clause"
] | permissive | chrisglass/django-simple-sso | 21f390535c012af4bba9a1b78a23b298592611df | b63d37ac64450ff5a506e6b1c2e34e42109b8cd8 | refs/heads/master | 2020-12-25T03:22:00.352693 | 2011-08-10T15:40:09 | 2011-08-10T15:40:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | # -*- coding: utf-8 -*-
from django.contrib import admin
from simple_sso.sso_server.models import Client
admin.site.register(Client)
| [
"[email protected]"
] | |
0b7ff06c8aa9f6a941ff4fe8a749d7d0a028286b | 4da0c8906c9cd671e3a4bee3a6ee801a353e3d9a | /Water/watres/migrations/0012_targetusewo_checkin.py | 82f4465901163fac353bd3219380d8d3cb10db6e | [] | no_license | avpakh/GVK | 2a5a699caa8a986a3fd0dadbe2160fc9da5bf193 | ac8b8d8ad5cd5ef8485e98cd532a29cd420e0cae | refs/heads/master | 2020-06-13T10:35:36.663668 | 2017-01-06T09:01:42 | 2017-01-06T09:01:42 | 75,392,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-02 14:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('watres', '0011_auto_20160816_1523'),
]
operations = [
migrations.AddField(
model_name='targetusewo',
name='checkin',
field=models.BooleanField(default=1),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
2445f62695bc503243d90b47fd380b81e2c25e92 | 3528abad46b15133b2108c237f926a1ab252cbd5 | /Core/ableton/v2/control_surface/elements/optional.py | 86a72e77d342aa7dee2a59777b27af577769514a | [] | no_license | scottmudge/MPK261_Ableton | 20f08234f4eab5ba44fde6e5e745752deb968df2 | c2e316b8347367bd157276f143b9f1a9bc2fe92c | refs/heads/master | 2020-03-20T10:56:32.421561 | 2018-06-14T19:12:47 | 2018-06-14T19:12:47 | 137,389,086 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,241 | py | # Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/ableton/v2/control_surface/elements/optional.py
# Compiled at: 2018-04-23 20:27:04
from __future__ import absolute_import, print_function, unicode_literals
from ...base import listens
from .combo import ToggleElement
class ChoosingElement(ToggleElement):
u"""
An Element wrapper that enables one of the nested elements based on
the value of the given flag.
"""
def __init__(self, flag=None, *a, **k):
super(ChoosingElement, self).__init__(*a, **k)
self.__on_flag_changed.subject = flag
self.__on_flag_changed(flag.value)
@listens('value')
def __on_flag_changed(self, value):
self.set_toggled(value)
class OptionalElement(ChoosingElement):
u"""
An Element wrapper that enables the nested element IFF some given
flag is set to a specific value.
"""
def __init__(self, control=None, flag=None, value=None, *a, **k):
on_control = control if value else None
off_control = None if value else control
super(OptionalElement, self).__init__(on_control=on_control, off_control=off_control, flag=flag, *a, **k)
return
| [
"[email protected]"
] | |
e1ff8838e74408dffc4bbb3722723ff62a425439 | 5afa0b8e447bb6b1565a64d201ee38adfa406e44 | /rapidsmsrw1000/apps/ubuzima/reports/utils.py | d393926a512649803396bf1457d11c5f819a56f9 | [] | no_license | daaray/rapidsmsrw1000 | 98ad2cb24a4b5cbbd8b496c64ad357c6ff687874 | 013a06a61987b18e61bdb0da8da09140b8b16d9a | refs/heads/master | 2020-12-25T00:38:29.934693 | 2013-03-26T07:15:11 | 2013-03-26T07:15:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,183 | py | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from rapidsmsrw1000.apps.ubuzima.models import *
from rapidsmsrw1000.apps.ambulances.models import *
from rapidsmsrw1000.apps.ubuzima.models import *
from rapidsmsrw1000.apps.chws.models import *
from django.utils.translation import ugettext as _
from django.utils.translation import activate, get_language
from decimal import *
from exceptions import Exception
import traceback
from datetime import *
from time import *
from django.db.models import Q
from django.conf import settings
import re
from random import randint
from rapidsms.router import send
from rapidsms.models import Connection
def forward (message, identity, text):
if message.connection:
conn = Connection(backend = message.connection.backend, identity = identity)
send( text, conn)
#print conn, text
return True
else:
return False
def read_weight(code_string, weight_is_mothers=False):
try:
field_type = FieldType.objects.get(key="child_weight" if not weight_is_mothers else "mother_weight")
value = Decimal(code_string[2:])
field = Field(type=field_type, value=value)
return field
except: return None
def read_height(code_string, height_is_mothers=False):
try:
field_type = FieldType.objects.get(key="child_height" if not height_is_mothers else "mother_height")
value = Decimal(code_string[2:])
field = Field(type=field_type, value=value)
return field
except: return None
def read_key(code_string):
try:
field_type = FieldType.objects.get(key = code_string.lower())
field = Field(type=field_type)
return field
except: return None
def parse_date(dob_string):
"""Tries to parse a string into some kind of date representation. Note that we don't use Date objects
to store things away, because we want to accept limited precision dates, ie, just the year if
necessary."""
# simple #### date.. ie, 1987 or 87
m3 = re.search("^(\d+)$", dob_string)
if m3:
value = m3.group(1)
# two digit date, guess on the first digits based on size
if len(value) == 2:
if int(value) <= date.today().year % 100:
value = "20%s" % value
else:
value = "19%s" % value
# we have a four digit date, does it look reasonable?
if len(value) == 4:
return value
# full date: DD.MM.YYYY
m3 = re.search("^(\d+)\.(\d+)\.(\d+)$", dob_string)
if m3:
dd = m3.group(1)
mm = m3.group(2)
yyyy = m3.group(3)
# print "%s = '%s' '%s' '%s'" % (dob_string, dd, mm, yyyy)
# make sure we are in the right format
if len(dd) > 2 or len(mm) > 2 or len(yyyy) > 4:
raise Exception(_("Invalid date format, must be in the form: DD.MM.YYYY"))
# invalid month
if int(mm) > 12 or int(mm) < 1:
raise Exception(_("Invalid date format, must be in the form: DD.MM.YYYY"))
# invalid day
if int(dd) > 31 or int(dd) < 1:
raise Exception(_("Invalid date format, must be in the form: DD.MM.YYYY"))
# Otherwise, parse into our format
return "%02d.%02d.%04d" % (int(dd), int(mm), int(yyyy))
return None
def read_fields(code_string, accept_date=False, weight_is_mothers=False):
"""Tries to parse all the fields according to our set of action and movement codes. We also
try to figure out if certain fields are dates and stuff them in as well. """
# split our code string by spaces
codes = code_string.split()
fields = []
invalid_codes = []
num_mov_codes = 0
# the dob we might extract from this
dob = None
# for each code
for code in codes:
try:
# first try to look up the code in the DB
field_type = FieldType.objects.get(key=code.lower())
fields.append(Field(type=field_type))
7
# if the action code is a movement code, increment our counter of movement codes
# messages may only have one movement code
if field_type.category.id == 4:
num_mov_codes += 1
# didn't recognize this code? then it is a scalar value, run some regexes to derive what it is
except FieldType.DoesNotExist:
m1 = re.search("(\d+\.?\d*)(k|kg|kilo|kilogram)", code, re.IGNORECASE)
m2 = re.search("(\d+\.?\d*)(c|cm|cent|centimeter)", code, re.IGNORECASE)
# this is a weight
if m1:
field_type = FieldType.objects.get(key="child_weight" if not weight_is_mothers else "mother_weight")
value = Decimal(m1.group(1))
field = Field(type=field_type, value=value)
fields.append(field)
# this is a length
elif m2:
field_type = FieldType.objects.get(key="muac")
value = Decimal(m2.group(1))
field = Field(type=field_type, value=value)
fields.append(field)
# unknown
else:
# try to parse as a dob
date = parse_dob(code)
if accept_date and date:
dob = date
else:
invalid_codes.append(code)
# take care of any error messaging
error_msg = ""
if len(invalid_codes) > 0:
error_msg += _("Unknown action code: %(invalidcode)s. ") % \
{ 'invalidcode': ", ".join(invalid_codes)}
if num_mov_codes > 1:
error_msg += unicode(_("You cannot give more than one location code"))
if error_msg:
error_msg = _("Error. %(error)s") % { 'error': error_msg }
# there's actually an error, throw it over the fence
raise Exception(error_msg)
return (fields, dob)
def get_or_create_patient(reporter, national_id):
"""Takes care of searching our DB for the passed in patient. Equality is determined
using the national id only (IE, dob doesn't come into play). This will create a
new patient with the passed in reporter if necessary."""
# try to look up the patent by id
try:
patient = Patient.objects.get(national_id=national_id)
except Patient.DoesNotExist, e:
# not found? create the patient instead
patient = Patient.objects.create(national_id=national_id, location = reporter.health_centre)
return patient
def create_report(report_type_name, patient, reporter):
"""Convenience for creating a new Report object from a reporter, patient and type """
report_type = ReportType.objects.get(name=report_type_name)
report = Report(patient=patient, reporter=reporter, type=report_type,
location=reporter.health_centre, village=reporter.village)
return report
def run_triggers(message, report):
"""Called whenever we get a new report. We run our triggers, figuring out if there
are messages to send out to supervisors. We return the message that should be sent
to the reporter themselves, or None if there is no matching trigger for the reporter."""
try:
# find all matching triggers
triggers = TriggeredText.get_triggers_for_report(report)
# the message we'll send back to the reporter
reporter_message = None
# for each one
for trigger in triggers:
lang = get_language()
alert = TriggeredAlert(reporter=report.reporter, report=report, trigger=trigger, location = report.location, village = report.reporter.village,\
cell = report.reporter.cell, sector = report.reporter.sector, district = report.reporter.district,\
province = report.reporter.province, nation= report.reporter.nation)
alert.save()
curloc = report.location
# if the destination is the reporter himself, need to respond correctly
if trigger.destination == TriggeredText.DESTINATION_CHW:
# calculate our message based on language, we'll return it in a bit
lang = get_language()
reporter_message = trigger.message_kw
if lang == 'en':
reporter_message = trigger.message_en
elif lang == 'fr':
reporter_message = trigger.message_fr
# if we are supposed to tell the district supervisor and our current location
# is a health clinic, then walk up the tree looking for a hospital
elif trigger.destination == TriggeredText.DESTINATION_DIS or trigger.destination == TriggeredText.DESTINATION_SUP:
# find the parent
location = curloc
sups = Supervisor.objects.filter(health_centre = location).order_by("pk")
if trigger.destination == TriggeredText.DESTINATION_DIS:
location = report.reporter.referral_hospital
sups = Supervisor.objects.filter(health_centre = location).order_by("pk")
# couldn't find it? oh well, we'll alert the normal supervisor
#print [sup.connection() for sup in sups]
# for each supervisor
for sup in sups:
# load the connection for it
conn = sup.connection()
lang = sup.language
# get th appropriate message to send
text = trigger.message_kw
code_lang = trigger.triggers.all()[0].kw
if lang == 'en':
text = trigger.message_en
code_lang = trigger.triggers.all()[0].en
elif lang == 'fr':
text = trigger.message_fr
code_lang = trigger.triggers.all()[0].fr
# and send this message to them
msg_forward = text % (message.connection.identity, report.patient.national_id, report.reporter.village, code_lang)
forward(message, conn.identity, msg_forward)
elif trigger.destination == TriggeredText.DESTINATION_AMB:
try:
ambs = AmbulanceDriver.objects.filter(health_centre = curloc)
if ambs.count() < 1:
curloc = report.reporter.referral_hospital
ambs = AmbulanceDriver.objects.filter(referral_hospital = curloc)
for amb in ambs:
amb.send_notification(message, report)
forward(message, amb.phonenumber, trigger.message_kw % (message.connection.identity, report.patient.national_id, report.reporter.village, trigger.triggers.all()[0].kw))
except Exception, e:
print e
continue
# return our advice texts
if is_mother_weight_loss(report):
forward(message, message.connection.identity, "Uyu mubyeyi %s yatakaje ibiro, nukureba uko wamugira inama." % report.patient.national_id)
elif is_mother_risky(report):
forward(message, message.connection.identity, "Uyu mubyeyi %s afite uburebure budashyitse, nukureba uko mwamuba hafi kugeza abyaye." \
% report.patient.national_id)
return reporter_message
except Exception, e:
print e
return None
def cc_supervisor(message, report):
""" CC's the supervisor of the clinic for this CHW """
try:
# now look up to see if we have any reporters in this group with the same location as
# our reporter
sups = Supervisor.objects.filter(health_centre = message.reporter.health_centre).order_by("pk")
# reporter identity
reporter_ident = message.reporter.connection().identity
#reporter village
reporter_village = message.reporter.village
# we have at least one supervisor
if sups:
for sup in sups:
# load the connection for it
conn = sup.connection()
# and send this message to them
msg_forward = _("%(phone)s: %(report)s" % { 'phone': reporter_ident, 'report': report.as_verbose_string() })
forward(message, conn.identity, msg_forward)
except Exception, e:
#print e
pass
def parse_dob(dob_string):
"""Tries to parse a string into some kind of date representation. Note that we don't use Date objects
to store things away, because we want to accept limited precision dates, ie, just the year if
necessary."""
# simple #### date.. ie, 1987 or 87
m3 = re.search("^(\d+)$", dob_string)
if m3:
value = m3.group(1)
# two digit date, guess on the first digits based on size
if len(value) == 2:
if int(value) <= date.today().year % 100:
value = "20%s" % value
else:
value = "19%s" % value
# we have a four digit date, does it look reasonable?
if len(value) == 4:
return value
# full date: DD.MM.YYYY
m3 = re.search("^(\d+)\.(\d+)\.(\d+)$", dob_string)
if m3:
dd = m3.group(1)
mm = m3.group(2)
yyyy = m3.group(3)
# print "%s = '%s' '%s' '%s'" % (dob_string, dd, mm, yyyy)
# make sure we are in the right format
if len(dd) > 2 or len(mm) > 2 or len(yyyy) > 4:
raise Exception(_("Invalid date format, must be in the form: DD.MM.YYYY"))
# invalid month
if int(mm) > 12 or int(mm) < 1:
raise Exception(_("Invalid date format, must be in the form: DD.MM.YYYY"))
# invalid day
if int(dd) > 31 or int(dd) < 1:
raise Exception(_("Invalid date format, must be in the form: DD.MM.YYYY"))
# is the year in the future
if int(yyyy) > int(date.today().year):
raise Exception(_("Invalid date, cannot be in the future."))
#is the the date in future
dob="%02d.%02d.%04d" % (int(dd), int(mm), int(yyyy))
if datetime.strptime(dob,"%d.%m.%Y").date() > date.today():
raise Exception(_("Invalid date, cannot be in the future."))
# Otherwise, parse into our format
return "%02d.%02d.%04d" % (int(dd), int(mm), int(yyyy))
return None
def read_muac(code_string):
try:
field_type = FieldType.objects.get(key="muac")
value = Decimal(code_string[4:])
field = Field(type=field_type, value=value)
return field
except: return None
def read_number(code_string):
try:
field_type = FieldType.objects.get(key="child_number")
value = Decimal(code_string)
field = Field(type=field_type, value=value)
return field
except: return None
def read_gravity(code_string):
try:
field_type = FieldType.objects.get(key="gravity")
value = Decimal(code_string)
field = Field(type=field_type, value=value)
return field
except: return None
def read_parity(code_string):
try:
field_type = FieldType.objects.get(key="parity")
value = Decimal(code_string)
field = Field(type=field_type, value=value)
return field
except: return None
def read_bmi(report):
try:
weight = report.fields.get(type__key = 'mother_weight').value
height = report.fields.get(type__key = 'mother_height').value
bmi = weight*100*100/(height*height)
return bmi
except: pass
def is_mother_weight_loss(report):
try:
weight = report.fields.get(type__key = 'mother_weight').value
history = Report.objects.filter(patient = report.patient).order_by('-id')[0].fields.get(type__key = 'mother_weight').value
if weight < history: return True
else: return False
except: return False
def is_mother_risky(report):
try:
height = report.fields.get(type__key = 'mother_height').value
if height < 145: return True
else: return False
except: return False
def read_nid(message, nid):
if len(nid) != 16:
err = ErrorNote(errmsg = message.text, type = ErrorType.objects.get(name = "Invalid ID"), errby = message.reporter, identity =\
message.connection.identity, location =message.reporter.health_centre , village=message.reporter.village,\
cell = message.reporter.cell, sector = message.reporter.sector, district = message.reporter.health_centre.district,\
province = message.reporter.health_centre.province, nation = message.reporter.health_centre.nation).save()
raise Exception(_("Error. National ID must be exactly 16 digits, you sent the nid: %(nat_id)s with only %(uburefu)d digits") %
{ "nat_id": nid , "uburefu": len(nid) } )
else: return nid
def set_date_string(date_string):
"""
Trap anybody setting the date_string and try to set the date from it.
"""
try:
date = datetime.strptime(date_string, "%d.%m.%Y").date()
return date
except ValueError,e:
# no-op, just keep the date_string value
pass
def message_reporter(message):
try:
return Reporter.objects.filter(national_id = message.connection.contact.name )[0]
except :
if settings.TRAINING_ENV == True: return anonymous_reporter(message.connection.identity)
else: raise Exception(_("You need to be registered first, use the REG keyword"))
def anonymous_reporter(identity):
reporter = None
try:
names = "ANONYMOUS"
telephone = identity
try:
hc = HealthCentre.objects.get(name = "TEST")
hp = Hospital.objects.get(name = "TEST")
telephone = parse_phone_number(telephone)
nid = "%s%s" % ( telephone[3:] , str(random_with_N_digits(6)))
try: tester = Reporter.objects.get(telephone_moh = telephone, health_centre = hc, referral_hospital = hp)
except:
tester, created = Reporter.objects.get_or_create(telephone_moh = telephone, national_id = nid, health_centre = hc, referral_hospital = hp)
tester.surname = names
tester.role = Role.objects.get(code = 'asm')
tester.sex = Reporter.sex_male
tester.education_level = Reporter.education_universite
tester.date_of_birth = datetime.today()
tester.join_date = datetime.today()
tester.district = hc.district
tester.nation = hc.nation
tester.province = hc.province
tester.sector = Sector.objects.get(name = 'TEST')
tester.cell = Cell.objects.get(name = 'TEST')
tester.village = Village.objects.get(name = 'TEST')
tester.updated = datetime.now()
tester.language = Reporter.language_kinyarwanda
tester.save()
confirm, created = RegistrationConfirmation.objects.get_or_create(reporter = tester)
confirm.save()
reporter = tester
except Exception, e:
print e
pass
except Exception, e:
print e
pass
return reporter
def parse_phone_number(number):
number = number
try:
number = str(int(float(number)))
except:
try:
number = str(int(number))
except:
try:
number = str(number)
except:
return False
number = number.replace(" ", "")
try:
if type(number)!=str:
number=str(int(number))
if number[:3]=="+25" and len(number[3:])==10:
number=number
elif number[:3]=="250" and len(number[3:])==9:
number="+"+number
elif number[:3]=="078" and len(number[3:])==7:
number="+25"+number
elif number[:2]=="78" and len(number[2:])==7:
number="+250"+number
return number
except:
return False
def random_with_N_digits(n):
range_start = 10**(n-1)
range_end = (10**n)-1
return randint(range_start, range_end)
| [
"[email protected]"
] | |
140475678049842dcc7a9513455b15a220182ac9 | fe8d49331e73fe89be9195bf748159830d2c3622 | /zerver/views/drafts.py | 47b5c6fa242f0d66e718fb84c1ffb31a7fce178b | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] | permissive | lizzzp1/zulip | 13e1a4428b5ed6d9cdc06cb291b126ee127a03e8 | 4e8067aadc7d5a4b2644e383898c5c731740ffd5 | refs/heads/master | 2022-12-13T23:44:52.351757 | 2020-09-12T19:04:24 | 2020-09-12T19:04:24 | 295,025,435 | 1 | 0 | Apache-2.0 | 2020-09-12T21:00:35 | 2020-09-12T21:00:34 | null | UTF-8 | Python | false | false | 5,766 | py | import time
from typing import Any, Dict, List, Set
from django.core.exceptions import ValidationError
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.lib.actions import recipient_for_user_profiles
from zerver.lib.addressee import get_user_profiles_by_ids
from zerver.lib.exceptions import JsonableError
from zerver.lib.message import truncate_body, truncate_topic
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_error, json_success
from zerver.lib.streams import access_stream_by_id
from zerver.lib.timestamp import timestamp_to_datetime
from zerver.lib.validator import (
check_dict_only,
check_float,
check_int,
check_list,
check_required_string,
check_string,
check_string_in,
check_union,
)
from zerver.models import Draft, UserProfile
VALID_DRAFT_TYPES: Set[str] = {"", "private", "stream"}
# A validator to verify if the structure (syntax) of a dictionary
# meets the requirements to be a draft dictionary:
draft_dict_validator = check_dict_only(
required_keys=[
("type", check_string_in(VALID_DRAFT_TYPES)),
("to", check_list(check_int)), # The ID of the stream to send to, or a list of user IDs.
("topic", check_string), # This string can simply be empty for private type messages.
("content", check_required_string),
],
optional_keys=[
("timestamp", check_union([check_int, check_float])), # A Unix timestamp.
]
)
def further_validated_draft_dict(draft_dict: Dict[str, Any],
user_profile: UserProfile) -> Dict[str, Any]:
""" Take a draft_dict that was already validated by draft_dict_validator then
further sanitize, validate, and transform it. Ultimately return this "further
validated" draft dict. It will have a slightly different set of keys the values
for which can be used to directly create a Draft object. """
content = truncate_body(draft_dict["content"])
if "\x00" in content:
raise JsonableError(_("Content must not contain null bytes"))
timestamp = draft_dict.get("timestamp", time.time())
timestamp = round(timestamp, 6)
if timestamp < 0:
# While it's not exactly an invalid timestamp, it's not something
# we want to allow either.
raise JsonableError(_("Timestamp must not be negative."))
last_edit_time = timestamp_to_datetime(timestamp)
topic = ""
recipient = None
to = draft_dict["to"]
if draft_dict["type"] == "stream":
topic = truncate_topic(draft_dict["topic"])
if "\x00" in topic:
raise JsonableError(_("Topic must not contain null bytes"))
if len(to) != 1:
raise JsonableError(_("Must specify exactly 1 stream ID for stream messages"))
stream, recipient, sub = access_stream_by_id(user_profile, to[0])
elif draft_dict["type"] == "private" and len(to) != 0:
to_users = get_user_profiles_by_ids(set(to), user_profile.realm)
try:
recipient = recipient_for_user_profiles(to_users, False, None, user_profile)
except ValidationError as e: # nocoverage
raise JsonableError(e.messages[0])
return {
"recipient": recipient,
"topic": topic,
"content": content,
"last_edit_time": last_edit_time,
}
def fetch_drafts(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
user_drafts = Draft.objects.filter(user_profile=user_profile).order_by("last_edit_time")
draft_dicts = {str(draft.id): draft.to_dict() for draft in user_drafts}
return json_success({"count": user_drafts.count(), "drafts": draft_dicts})
@has_request_variables
def create_drafts(request: HttpRequest, user_profile: UserProfile,
draft_dicts: List[Dict[str, Any]]=REQ("drafts",
validator=check_list(draft_dict_validator)),
) -> HttpResponse:
draft_objects = []
for draft_dict in draft_dicts:
valid_draft_dict = further_validated_draft_dict(draft_dict, user_profile)
draft_objects.append(Draft(
user_profile=user_profile,
recipient=valid_draft_dict["recipient"],
topic=valid_draft_dict["topic"],
content=valid_draft_dict["content"],
last_edit_time=valid_draft_dict["last_edit_time"],
))
created_draft_objects = Draft.objects.bulk_create(draft_objects)
draft_ids = [draft_object.id for draft_object in created_draft_objects]
return json_success({"ids": draft_ids})
@has_request_variables
def edit_draft(request: HttpRequest, user_profile: UserProfile, draft_id: int,
draft_dict: Dict[str, Any]=REQ("draft", validator=draft_dict_validator),
) -> HttpResponse:
try:
draft_object = Draft.objects.get(id=draft_id, user_profile=user_profile)
except Draft.DoesNotExist:
return json_error(_("Draft does not exist"), status=404)
valid_draft_dict = further_validated_draft_dict(draft_dict, user_profile)
draft_object.content = valid_draft_dict["content"]
draft_object.topic = valid_draft_dict["topic"]
draft_object.recipient = valid_draft_dict["recipient"]
draft_object.last_edit_time = valid_draft_dict["last_edit_time"]
draft_object.save()
return json_success()
def delete_draft(request: HttpRequest, user_profile: UserProfile, draft_id: int) -> HttpResponse:
try:
draft_object = Draft.objects.get(id=draft_id, user_profile=user_profile)
except Draft.DoesNotExist:
return json_error(_("Draft does not exist"), status=404)
draft_object.delete()
return json_success()
| [
"[email protected]"
] | |
387f51e0f8907ab9ea32d68006e9dec8eae78b6c | 7d9bf6444ef321d3b8264f814fc52036c9373805 | /ba_data_paths/__init__.py | 19a5403cda72cfbf2efcef05a4c4be3112cc29da | [
"Apache-2.0"
] | permissive | knu2xs/ba_data_paths | ef5f34d1f054bed2beddd2eb0461c981ade7a4db | c161feec529882a2edfb2ed88b8a89cf07ec3243 | refs/heads/master | 2020-07-09T21:12:17.956351 | 2019-10-14T19:59:42 | 2019-10-14T19:59:42 | 204,085,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | from ba_data_paths.ba_data import ba_data
| [
"[email protected]"
] | |
a83a11d7de133095f348d5920113cb836562415e | 8e95e79840005f6c34dfb978e8fe6e0ec4f7f643 | /7_Image Processing in Python_/29_Edges.py | f4953bd068a7dbf38dcc7620a093d0b9b0858f0d | [] | no_license | Naysla/Machine_Learning | a0593cac41ef1561f14bec55780570b82fc37720 | e75d5cd2894ccb005228ab3da87dde9025385a08 | refs/heads/master | 2023-02-01T17:19:32.413609 | 2020-12-22T20:36:45 | 2020-12-22T20:36:45 | 323,708,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | #Edges
#In this exercise you will identify the shapes in a grapefruit image by detecting the edges, using the Canny algorithm.
#Image preloaded as grapefruit.
#The color module has already been preloaded for you.
# Import the canny edge detector
from skimage.feature import canny
# Convert image to grayscale
grapefruit = color.rgb2gray(grapefruit)
# Apply canny edge detector
canny_edges = canny(grapefruit)
# Show resulting image
show_image(canny_edges, "Edges with Canny")
#You can see the shapes and details of the grapefruits of the original image being highlighted. | [
"[email protected]"
] | |
b1c806080769dbbd96a828a4f775b7cd730fbd53 | 8eeef7742573a8b671648d94e448d5614272c5d6 | /core2web/week2/day7/printNumber.py | 33b2619d33b2dfbf88f662253e9577e0f68a5cc6 | [] | no_license | damodardikonda/Python-Basics | 582d18bc9d003d90b1a1930c68b9b39a85778ea7 | fd239722fc6e2a7a02dae3e5798a5f1172f40378 | refs/heads/master | 2023-01-28T16:22:19.153514 | 2020-12-11T06:36:49 | 2020-12-11T06:36:49 | 270,733,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | """
Program 1: Write a program that accepts an integer from user and print it.
Input: 11
Output: 11
"""
v=(int)(input("enter the number"))
print("output",v)
| [
"[email protected]"
] | |
b87d3c6c3e1f49c4c0cfbc2f7d0ecab4016fc060 | fb2cc597f319380d228fc15c4008760a82203687 | /var/spack/repos/builtin/packages/py-linear-operator/package.py | 8133edf5144a33322dd069c679c2a2a0f9be91e9 | [
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"LGPL-2.1-only"
] | permissive | JayjeetAtGithub/spack | c41b5debcbe139abb2eab626210505b7f930d637 | 6c2df00443a2cd092446c7d84431ae37e64e4296 | refs/heads/develop | 2023-03-21T02:35:58.391230 | 2022-10-08T22:57:45 | 2022-10-08T22:57:45 | 205,764,532 | 0 | 0 | MIT | 2019-09-02T02:44:48 | 2019-09-02T02:44:47 | null | UTF-8 | Python | false | false | 888 | py | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyLinearOperator(PythonPackage):
"""A linear operator implementation, primarily designed for finite-dimensional
positive definite operators (i.e. kernel matrices)."""
homepage = "https://github.com/cornellius-gp/linear_operator/"
pypi = "linear_operator/linear_operator-0.1.1.tar.gz"
version("0.1.1", sha256="81adc1aea9e98f3c4f07f5608eb77b689bc61793e9beebfea82155e9237bf1be")
depends_on("[email protected]:", type=("build", "run"))
depends_on("py-setuptools", type="build")
depends_on("py-setuptools-scm", type="build")
depends_on("[email protected]:", type=("build", "run"))
depends_on("py-scipy", type=("build", "run"))
| [
"[email protected]"
] | |
6b89749fe8823ae962abbaa45373e75891ef3212 | 15e6385746ccf4b8eb6c6e302aca236021bb8781 | /LintcodePartII/li405_submatrixSum.py | 443777a2c60f7562a7b839a401d41945fa35145d | [] | no_license | akb46mayu/Data-Structures-and-Algorithms | 11c4bbddc9b4d286e1aeaa9481eb6a620cd54746 | de98494e14fff3e2a468da681c48d60b4d1445a1 | refs/heads/master | 2021-01-12T09:51:32.618362 | 2018-05-16T16:37:18 | 2018-05-16T16:37:18 | 76,279,268 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,307 | py | """
Given an integer matrix, find a submatrix where the sum of numbers is zero.
Your code should return the coordinate of the left-up and right-down number.
Have you met this question in a real interview? Yes
Example
Given matrix
[
[1 ,5 ,7],
[3 ,7 ,-8],
[4 ,-8 ,9],
]
return [(1,1), (2,2)]
"""
class Solution:
# @param {int[][]} matrix an integer matrix
# @return {int[][]} the coordinate of the left-up and right-down number
def submatrixSum(self, matrix):
# Write your code here
if not matrix:
return [[0,0], [0,0]]
m, n = len(matrix), len(matrix[0])
psum = [[0]*n for _ in range(m)]
for i in range(m):
for j in range(n):
if i == 0:
psum[i][j] = matrix[i][j]
else:
psum[i][j] = psum[i-1][j] + matrix[i][j]
for lx in range(m):
for rx in range(lx, m):
dict = {0:-1}
sum0 = 0
for j in range(n):
sumcur = psum[rx][j] - psum[lx-1][j] if lx >= 1 else psum[rx][j]
sum0 += sumcur
if sum0 in dict:
return [[lx,dict[sum0]+1],[rx,j]]
dict[sum0] = j
return [[0,0], [0,0]]
| [
"[email protected]"
] | |
dbe0a5a91d774e8c317b43293a42fde45b272cee | e5b8a5d93989dd53933c5cd417afa8b2a39ad307 | /ultracart/models/oauth_token_response.py | 3c0fa3d282238a67a49514ccec2bfe343dd0916b | [
"Apache-2.0"
] | permissive | gstingy/uc_python_api | f3586bfce9c962af2e8c1bc266ff25e0f1971278 | 9a0bd3f6e63f616586681518e44fe37c6bae2bba | refs/heads/master | 2020-03-28T11:13:22.537641 | 2018-09-10T17:07:59 | 2018-09-10T17:07:59 | 148,190,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,949 | py | # coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2
OpenAPI spec version: 2.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class OauthTokenResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'access_token': 'str',
'error': 'str',
'error_description': 'str',
'error_uri': 'str',
'expires_in': 'str',
'refresh_token': 'str',
'scope': 'str',
'token_type': 'str'
}
attribute_map = {
'access_token': 'access_token',
'error': 'error',
'error_description': 'error_description',
'error_uri': 'error_uri',
'expires_in': 'expires_in',
'refresh_token': 'refresh_token',
'scope': 'scope',
'token_type': 'token_type'
}
def __init__(self, access_token=None, error=None, error_description=None, error_uri=None, expires_in=None, refresh_token=None, scope=None, token_type=None):
"""
OauthTokenResponse - a model defined in Swagger
"""
self._access_token = None
self._error = None
self._error_description = None
self._error_uri = None
self._expires_in = None
self._refresh_token = None
self._scope = None
self._token_type = None
self.discriminator = None
if access_token is not None:
self.access_token = access_token
if error is not None:
self.error = error
if error_description is not None:
self.error_description = error_description
if error_uri is not None:
self.error_uri = error_uri
if expires_in is not None:
self.expires_in = expires_in
if refresh_token is not None:
self.refresh_token = refresh_token
if scope is not None:
self.scope = scope
if token_type is not None:
self.token_type = token_type
@property
def access_token(self):
"""
Gets the access_token of this OauthTokenResponse.
Access token to use in OAuth authenticated API call
:return: The access_token of this OauthTokenResponse.
:rtype: str
"""
return self._access_token
@access_token.setter
def access_token(self, access_token):
"""
Sets the access_token of this OauthTokenResponse.
Access token to use in OAuth authenticated API call
:param access_token: The access_token of this OauthTokenResponse.
:type: str
"""
self._access_token = access_token
@property
def error(self):
"""
Gets the error of this OauthTokenResponse.
:return: The error of this OauthTokenResponse.
:rtype: str
"""
return self._error
@error.setter
def error(self, error):
"""
Sets the error of this OauthTokenResponse.
:param error: The error of this OauthTokenResponse.
:type: str
"""
self._error = error
@property
def error_description(self):
"""
Gets the error_description of this OauthTokenResponse.
:return: The error_description of this OauthTokenResponse.
:rtype: str
"""
return self._error_description
@error_description.setter
def error_description(self, error_description):
"""
Sets the error_description of this OauthTokenResponse.
:param error_description: The error_description of this OauthTokenResponse.
:type: str
"""
self._error_description = error_description
@property
def error_uri(self):
"""
Gets the error_uri of this OauthTokenResponse.
:return: The error_uri of this OauthTokenResponse.
:rtype: str
"""
return self._error_uri
@error_uri.setter
def error_uri(self, error_uri):
"""
Sets the error_uri of this OauthTokenResponse.
:param error_uri: The error_uri of this OauthTokenResponse.
:type: str
"""
self._error_uri = error_uri
@property
def expires_in(self):
"""
Gets the expires_in of this OauthTokenResponse.
The number of seconds since issuance when the access token will expire and need to be refreshed using the refresh token
:return: The expires_in of this OauthTokenResponse.
:rtype: str
"""
return self._expires_in
@expires_in.setter
def expires_in(self, expires_in):
"""
Sets the expires_in of this OauthTokenResponse.
The number of seconds since issuance when the access token will expire and need to be refreshed using the refresh token
:param expires_in: The expires_in of this OauthTokenResponse.
:type: str
"""
self._expires_in = expires_in
@property
def refresh_token(self):
"""
Gets the refresh_token of this OauthTokenResponse.
The refresh token that should be used to fetch a new access token when the expiration occurs
:return: The refresh_token of this OauthTokenResponse.
:rtype: str
"""
return self._refresh_token
@refresh_token.setter
def refresh_token(self, refresh_token):
"""
Sets the refresh_token of this OauthTokenResponse.
The refresh token that should be used to fetch a new access token when the expiration occurs
:param refresh_token: The refresh_token of this OauthTokenResponse.
:type: str
"""
self._refresh_token = refresh_token
@property
def scope(self):
"""
Gets the scope of this OauthTokenResponse.
The scope of permissions associated with teh access token
:return: The scope of this OauthTokenResponse.
:rtype: str
"""
return self._scope
@scope.setter
def scope(self, scope):
"""
Sets the scope of this OauthTokenResponse.
The scope of permissions associated with teh access token
:param scope: The scope of this OauthTokenResponse.
:type: str
"""
self._scope = scope
@property
def token_type(self):
"""
Gets the token_type of this OauthTokenResponse.
Type of token
:return: The token_type of this OauthTokenResponse.
:rtype: str
"""
return self._token_type
@token_type.setter
def token_type(self, token_type):
"""
Sets the token_type of this OauthTokenResponse.
Type of token
:param token_type: The token_type of this OauthTokenResponse.
:type: str
"""
allowed_values = ["bearer"]
if token_type not in allowed_values:
raise ValueError(
"Invalid value for `token_type` ({0}), must be one of {1}"
.format(token_type, allowed_values)
)
self._token_type = token_type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, OauthTokenResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
2ae16a9e9e78108fc155c5ad03fae33bc317ad74 | d63222abe326a3c8debd59bb8d24cb7eab3de09e | /leetcode/mock-interviews/reorganize_string/solve2.py | 457e7f2208383c86f7b71461edd8321eeb4e2c1e | [] | no_license | tariqrahiman/pyComPro | 91f47e93eb0a077d489659fcf0a75d5c1a65fc17 | 86ec13f47506a2495ab6b6bbb58d4e8b2a21538b | refs/heads/master | 2022-02-10T04:15:40.194828 | 2019-06-16T10:22:38 | 2019-06-16T10:22:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 948 | py | class Solution(object):
def reorganizeString(self, S):
count_letter = [[0, i] for i in xrange(25)]
for char in S: count_letter[ord(char) - 97][0] += 1
count_letter.sort(reverse=True)
count_letter = [k for k in count_letter if k[0] > 0]
res = [""]
def decrease(index):
res[0] += chr(count_letter[index][1] + 97)
count_letter[index][0] -= 1
if count_letter[index][0] == 0: del count_letter[index]
print count_letter
while len(count_letter) > 1:
i = len(count_letter) - 1
while i > 0:
for _ in xrange(count_letter[i][0]):
decrease(i); decrease(i - 1)
i -= 2
print res[0]
print count_letter
if len(count_letter) == 1:
if count_letter[0][0] != 1: return " "
return chr(count_letter[0][1] + 97) + res[0]
return res[0]
| [
"[email protected]"
] | |
185c7b7f95c8487e2f85422f38c93095e8bd3438 | 3f36a8e71ea13a135467ea64367d6e3358333f74 | /movie_details.py | b88daf458d68088e861cd4d0c53e98e1ee709f51 | [
"MIT"
] | permissive | gorpo/Exemplos-Python | 4257873af5a23b79d51cc60e8ea84185b7e299c4 | 2cc11e0604d83c4f0a46645ceef0b209e467e6e6 | refs/heads/master | 2023-03-09T00:24:27.404626 | 2020-08-24T04:49:59 | 2020-08-24T04:49:59 | 264,974,378 | 4 | 4 | MIT | 2021-02-26T02:53:36 | 2020-05-18T15:02:56 | Python | UTF-8 | Python | false | false | 1,600 | py | import urllib.request
import mechanize
from bs4 import BeautifulSoup
# Create a Browser
browser = mechanize.Browser()
# Disable loading robots.txt
browser.set_handle_robots(False)
browser.addheaders = [('User-agent',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows 98;)')]
movie_title = input("Enter movie title: ")
movie_types = ('feature', 'tv_movie', 'tv_series', 'tv_episode', 'tv_special',
'tv_miniseries', 'documentary', 'video_game', 'short', 'video', 'tv_short')
# Navigate
browser.open('http://www.imdb.com/search/title')
# Choose a form
browser.select_form(nr=1)
browser['title'] = movie_title
# Check all the boxes of movie types
for m_type in movie_types:
browser.find_control(type='checkbox', nr=0).get(m_type).selected = True
# Submit
fd = browser.submit()
soup = BeautifulSoup(fd.read(), 'html5lib')
# Updated from td tag to h3 tag
for div in soup.findAll('h3', {'class': 'lister-item-header'}, limit=1):
a = div.findAll('a')[0]
hht = 'http://www.imdb.com' + a.attrs['href']
print(hht)
page = urllib.request.urlopen(hht)
soup2 = BeautifulSoup(page.read(), 'html.parser')
find = soup2.find
print("Title: " + find(itemprop='name').get_text().strip())
print("Duration: " + find(itemprop='duration').get_text().strip())
print("Director: " + find(itemprop='director').get_text().strip())
print("Genre: " + find(itemprop='genre').get_text().strip())
print("IMDB rating: " + find(itemprop='ratingValue').get_text().strip())
print("Summary: " + find(itemprop='description').get_text().strip())
| [
"[email protected]"
] | |
4681f93f39d6f4d7e12d2abc33f56032b610f0e0 | d50dec961435073f35bd89be322341862cf7ae6c | /enaml/qt/docking/q_dock_container.py | 7c95686787621238b95078e2b6c0b4259b34ad77 | [
"BSD-3-Clause"
] | permissive | johnelund/enaml | 19971d298b46c5c08f662110cb1c3b6bab976936 | 1e957da694e84d016a19c4866a1801ca04651fa5 | refs/heads/master | 2021-01-18T08:51:44.403979 | 2013-07-03T20:37:36 | 2013-07-03T20:37:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,875 | py | #------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import Typed, Bool
from enaml.qt.QtCore import Qt, QMargins, QPoint, QRect, QEvent, Signal
from enaml.qt.QtGui import QApplication, QLayout, QIcon
from .q_dock_area import QDockArea
from .q_dock_frame import QDockFrame
from .q_dock_frame_layout import QDockFrameLayout
from .q_dock_tab_widget import QDockTabWidget
from .utils import repolish
class QDockContainerLayout(QDockFrameLayout):
""" A QDockFrameLayout subclass which works with a QDockContainer.
"""
def invalidate(self):
""" Invalidate the cached layout data.
"""
super(QDockContainerLayout, self).invalidate()
widget = self.getWidget()
if widget is not None:
self.parentWidget().setSizePolicy(widget.sizePolicy())
def _computePressPos(container, coeff):
""" Compute the press position for a title bar.
Parameters
----------
container : QDockContainer
The dock container which owns the title bar of interest.
coeff : float
A floating point value between 0.0 and 1.0 which is the
proportional x-offset of the mouse press in the title bar.
"""
margins = container.layout().contentsMargins()
button_width = 50 # general approximation
max_x = container.width() - margins.right() - button_width
test_x = int(coeff * container.width())
new_x = max(margins.left() + 5, min(test_x, max_x))
title_bar = container.dockItem().titleBarWidget()
title_height = title_bar.height() / 2
mid_title = title_bar.mapTo(container, QPoint(0, title_height))
return QPoint(new_x, mid_title.y())
class QDockContainer(QDockFrame):
""" A QDockFrame which holds a QDockItem instance.
A QDockContainer has a dynamic boolean property 'floating' which
can be used to apply custom stylesheet styling when the container
is a floating top level window versus docked in a dock area.
"""
#: A signal emitted when the container changes its toplevel state.
topLevelChanged = Signal(bool)
class FrameState(QDockFrame.FrameState):
""" A private class for managing container drag state.
"""
#: The original title bar press position.
press_pos = Typed(QPoint)
#: Whether or not the dock item is being dragged.
dragging = Bool(False)
#: Whether the dock item is maximized in the dock area.
item_is_maximized = Bool(False)
def __init__(self, manager, parent=None):
""" Initialize a QDockContainer.
Parameters
----------
manager : DockManager
The manager which owns the container.
parent : QWidget or None
The parent of the QDockContainer.
"""
super(QDockContainer, self).__init__(manager, parent)
layout = QDockContainerLayout()
layout.setSizeConstraint(QLayout.SetMinAndMaxSize)
self.setLayout(layout)
self.setProperty('floating', False)
self._dock_item = None
#--------------------------------------------------------------------------
# Reimplementations
#--------------------------------------------------------------------------
def titleBarGeometry(self):
""" Get the geometry rect for the title bar.
Returns
-------
result : QRect
The geometry rect for the title bar, expressed in frame
coordinates. An invalid rect is returned if title bar
should not be active.
"""
title_bar = self.dockItem().titleBarWidget()
if title_bar.isHidden():
return QRect()
pt = title_bar.mapTo(self, QPoint(0, 0))
return QRect(pt, title_bar.size())
def resizeMargins(self):
""" Get the margins to use for resizing the container.
Returns
-------
result : QMargins
The margins to use for container resizing when the container
is a top-level window.
"""
if self.isMaximized():
return QMargins()
return self.layout().contentsMargins()
def showMaximized(self):
""" Handle the show maximized request for the dock container.
"""
def update_buttons(bar, link=False):
buttons = bar.buttons()
buttons |= bar.RestoreButton
buttons &= ~bar.MaximizeButton
if link:
buttons &= ~bar.LinkButton
bar.setButtons(buttons)
if self.isWindow():
super(QDockContainer, self).showMaximized()
self.setLinked(False)
update_buttons(self.dockItem().titleBarWidget(), link=True)
else:
area = self.parentDockArea()
if area is not None:
item = self.dockItem()
update_buttons(item.titleBarWidget())
area.setMaximizedWidget(item)
self.frame_state.item_is_maximized = True
item.installEventFilter(self)
def showNormal(self):
""" Handle the show normal request for the dock container.
"""
def update_buttons(bar, link=False):
buttons = bar.buttons()
buttons |= bar.MaximizeButton
buttons &= ~bar.RestoreButton
if link:
buttons |= bar.LinkButton
bar.setButtons(buttons)
if self.isWindow():
super(QDockContainer, self).showNormal()
self.setLinked(False)
update_buttons(self.dockItem().titleBarWidget(), link=True)
elif self.frame_state.item_is_maximized:
item = self.dockItem()
update_buttons(item.titleBarWidget())
self.layout().setWidget(item)
self.frame_state.item_is_maximized = False
item.removeEventFilter(self)
#--------------------------------------------------------------------------
# Framework API
#--------------------------------------------------------------------------
def dockItem(self):
""" Get the dock item installed on the container.
Returns
-------
result : QDockItem or None
The dock item installed in the container, or None.
"""
return self._dock_item
def setDockItem(self, dock_item):
""" Set the dock item for the container.
Parameters
----------
dock_item : QDockItem
The dock item to use in the container.
"""
layout = self.layout()
old = layout.getWidget()
if old is not None:
old.maximizeButtonClicked.disconnect(self.showMaximized)
old.restoreButtonClicked.disconnect(self.showNormal)
old.closeButtonClicked.disconnect(self.close)
old.linkButtonToggled.disconnect(self.linkButtonToggled)
old.titleBarLeftDoubleClicked.disconnect(self.toggleMaximized)
if dock_item is not None:
dock_item.maximizeButtonClicked.connect(self.showMaximized)
dock_item.restoreButtonClicked.connect(self.showNormal)
dock_item.closeButtonClicked.connect(self.close)
dock_item.linkButtonToggled.connect(self.linkButtonToggled)
dock_item.titleBarLeftDoubleClicked.connect(self.toggleMaximized)
layout.setWidget(dock_item)
self._dock_item = dock_item
def title(self):
""" Get the title for the container.
This proxies the call to the underlying dock item.
"""
item = self.dockItem()
if item is not None:
return item.title()
return u''
def icon(self):
""" Get the icon for the container.
This proxies the call to the underlying dock item.
"""
item = self.dockItem()
if item is not None:
return item.icon()
return QIcon()
def closable(self):
""" Get whether or not the container is closable.
This proxies the call to the underlying dock item.
"""
item = self.dockItem()
if item is not None:
return item.closable()
return True
def isLinked(self):
""" Get whether or not the container is linked.
This proxies the call to the underlying dock item.
"""
item = self.dockItem()
if item is not None:
return item.isLinked()
return False
def setLinked(self, linked):
""" Set whether or not the container should be linked.
This proxies the call to the underlying dock item.
"""
item = self.dockItem()
if item is not None:
item.setLinked(linked)
def showTitleBar(self):
""" Show the title bar for the container.
This proxies the call to the underlying dock item.
"""
item = self.dockItem()
if item is not None:
item.titleBarWidget().show()
def hideTitleBar(self):
""" Hide the title bar for the container.
This proxies the call to the underlying dock item.
"""
item = self.dockItem()
if item is not None:
item.titleBarWidget().hide()
def showLinkButton(self):
""" Show the link button on the title bar.
"""
item = self.dockItem()
if item is not None:
bar = item.titleBarWidget()
bar.setButtons(bar.buttons() | bar.LinkButton)
def hideLinkButton(self):
""" Show the link button on the title bar.
"""
item = self.dockItem()
if item is not None:
bar = item.titleBarWidget()
bar.setButtons(bar.buttons() & ~bar.LinkButton)
def toggleMaximized(self):
""" Toggle the maximized state of the container.
"""
is_win = self.isWindow()
is_maxed = self.isMaximized()
item_maxed = self.frame_state.item_is_maximized
if is_win and is_maxed or item_maxed:
self.showNormal()
else:
self.showMaximized()
def reset(self):
""" Reset the container to the initial pre-docked state.
"""
state = self.frame_state
state.dragging = False
state.press_pos = None
self.showNormal()
self.unfloat()
self.hideLinkButton()
self.setLinked(False)
self.showTitleBar()
self.setAttribute(Qt.WA_WState_ExplicitShowHide, False)
self.setAttribute(Qt.WA_WState_Hidden, False)
def float(self):
""" Set the window state to be a toplevel floating window.
"""
self.hide()
self.setAttribute(Qt.WA_Hover, True)
flags = Qt.Tool | Qt.FramelessWindowHint
self.setParent(self.manager().dock_area(), flags)
self.layout().setContentsMargins(QMargins(5, 5, 5, 5))
self.setProperty('floating', True)
self.setLinked(False)
self.showLinkButton()
repolish(self)
self.topLevelChanged.emit(True)
def unfloat(self):
""" Set the window state to be non-floating window.
"""
self.hide()
self.setAttribute(Qt.WA_Hover, False)
self.setParent(self.manager().dock_area(), Qt.Widget)
self.layout().setContentsMargins(QMargins(0, 0, 0, 0))
self.unsetCursor()
self.setProperty('floating', False)
self.setLinked(False)
self.hideLinkButton()
repolish(self)
self.topLevelChanged.emit(False)
def parentDockArea(self):
""" Get the parent dock area of the container.
Returns
-------
result : QDockArea or None
The nearest ancestor which is an instance of QDockArea, or
None if no such ancestor exists.
"""
parent = self.parent()
while parent is not None:
if isinstance(parent, QDockArea):
return parent
parent = parent.parent()
def parentDockTabWidget(self):
""" Get the parent dock area of the container.
Returns
-------
result : QDockTabWidget or None
The nearest ancestor which is an instance of QDockTabWidget,
or None if no such ancestor exists.
"""
parent = self.parent()
while parent is not None:
if isinstance(parent, QDockTabWidget):
return parent
parent = parent.parent()
def unplug(self):
""" Unplug the container from its containing dock area.
This method is invoked by the framework when appropriate. It
should not need to be called by user code.
Returns
-------
result : bool
True if the container was unplugged, False otherwise.
"""
dock_area = self.parentDockArea()
if dock_area is None:
return False
# avoid a circular import
from .layout_handling import unplug_container
return unplug_container(dock_area, self)
def untab(self, pos):
""" Unplug the container from a tab control.
This method is invoked by the QDockTabBar when the container
should be torn out. It synthesizes the appropriate internal
state so that the item can continue to be dock dragged. This
method should not be called by user code.
Parameters
----------
pos : QPoint
The global mouse position.
Returns
-------
result : bool
True on success, False otherwise.
"""
if not self.unplug():
return
state = self.frame_state
state.mouse_title = True
state.dragging = True
self.float()
self.raiseFrame()
title_bar = self.dockItem().titleBarWidget()
title_pos = QPoint(title_bar.width() / 2, title_bar.height() / 2)
margins = self.layout().contentsMargins()
offset = QPoint(margins.left(), margins.top())
state.press_pos = title_bar.mapTo(self, title_pos) + offset
self.move(pos - state.press_pos)
self.show()
self.grabMouse()
self.activateWindow()
self.raise_()
#--------------------------------------------------------------------------
# Event Handlers
#--------------------------------------------------------------------------
def eventFilter(self, obj, event):
""" Filter the events for the dock item.
This filter will proxy out the mouse events for the dock item.
This event filter will only be activated when the dock item is
set to maximzed mode.
"""
if obj is not self._dock_item:
return False
if event.type() == QEvent.MouseButtonPress:
return self.filteredMousePressEvent(event)
elif event.type() == QEvent.MouseMove:
return self.filteredMouseMoveEvent(event)
elif event.type() == QEvent.MouseButtonRelease:
return self.filteredMouseReleaseEvent(event)
return False
def filteredMousePressEvent(self, event):
""" Handle the filtered mouse press event for the dock item.
"""
bar = self.dockItem().titleBarWidget()
if bar.isVisible() and bar.geometry().contains(event.pos()):
self.frame_state.mouse_title = True
return self.titleBarMousePressEvent(event)
return False
def filteredMouseMoveEvent(self, event):
""" Handle the filtered mouse move event for the dock item.
"""
if self.frame_state.mouse_title:
return self.titleBarMouseMoveEvent(event)
return False
def filteredMouseReleaseEvent(self, event):
""" Handle the filtered mouse release event for the dock item.
"""
if self.frame_state.mouse_title:
self.frame_state.mouse_title = False
return self.titleBarMouseReleaseEvent(event)
return False
def closeEvent(self, event):
""" Handle the close event for the dock container.
"""
self.manager().close_container(self, event)
def titleBarMousePressEvent(self, event):
""" Handle a mouse press event on the title bar.
Returns
-------
result : bool
True if the event is handled, False otherwise.
"""
if event.button() == Qt.LeftButton:
state = self.frame_state
if state.press_pos is None:
state.press_pos = event.pos()
return True
return False
def titleBarMouseMoveEvent(self, event):
""" Handle a mouse move event on the title bar.
Returns
-------
result : bool
True if the event is handled, False otherwise.
"""
state = self.frame_state
if state.press_pos is None:
return False
# If dragging and floating, move the container's position and
# notify the manager of that the container was mouse moved. If
# the container is maximized, it is first restored before.
global_pos = event.globalPos()
if state.dragging:
if self.isWindow():
target_pos = global_pos - state.press_pos
self.manager().drag_move_frame(self, target_pos, global_pos)
return True
# Ensure the drag has crossed the app drag threshold.
dist = (event.pos() - state.press_pos).manhattanLength()
if dist <= QApplication.startDragDistance():
return True
# If the container is already floating, ensure that it is shown
# normal size. The next move event will move the window.
state.dragging = True
if self.isWindow():
if self.isMaximized():
coeff = state.press_pos.x() / float(self.width())
self.showNormal()
state.press_pos = _computePressPos(self, coeff)
return True
# Restore a maximized dock item before unplugging.
if state.item_is_maximized:
bar = self.dockItem().titleBarWidget()
coeff = state.press_pos.x() / float(bar.width())
self.showNormal()
state.press_pos = _computePressPos(self, coeff)
# Unplug the container from the layout before floating so
# that layout widgets can clean themselves up when empty.
if not self.unplug():
return False
# Make the container a toplevel frame, update it's Z-order,
# and grab the mouse to continue processing drag events.
self.float()
self.raiseFrame()
margins = self.layout().contentsMargins()
state.press_pos += QPoint(0, margins.top())
self.move(global_pos - state.press_pos)
self.show()
self.grabMouse()
self.activateWindow()
self.raise_()
return True
def titleBarMouseReleaseEvent(self, event):
""" Handle a mouse release event on the title bar.
Returns
-------
result : bool
True if the event is handled, False otherwise.
"""
if event.button() == Qt.LeftButton:
state = self.frame_state
if state.press_pos is not None:
self.releaseMouse()
if self.isWindow():
self.manager().drag_release_frame(self, event.globalPos())
state.dragging = False
state.press_pos = None
return True
return False
| [
"[email protected]"
] | |
08bb23fdc4d27bf24fc8acba539dc31a6c16a40d | 0412893529999de784ab9cb914f385ba788a3684 | /logicmonitor_sdk/models/service_alert.py | f140a8c85cf85576b2c428c2b3678d325ef7a839 | [
"Apache-2.0"
] | permissive | JeremyTangCD/lm-sdk-python | 0326bf034c16b022b760600dc18fe7aaad42fa26 | 2a15e055e5a3f72d2f2e4fb43bdbed203c5a9983 | refs/heads/master | 2020-04-15T15:39:59.276224 | 2019-01-09T09:55:36 | 2019-01-09T09:55:36 | 164,803,314 | 0 | 0 | Apache-2.0 | 2019-01-09T09:58:55 | 2019-01-09T06:33:40 | Python | UTF-8 | Python | false | false | 14,296 | py | # coding: utf-8
"""
LogicMonitor REST API
LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from logicmonitor_sdk.models.widget import Widget # noqa: F401,E501
class ServiceAlert(Widget):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'last_updated_by': 'str',
'user_permission': 'str',
'dashboard_id': 'int',
'name': 'str',
'description': 'str',
'last_updated_on': 'int',
'theme': 'str',
'interval': 'int',
'id': 'int',
'type': 'str',
'timescale': 'str',
'device_id': 'int',
'device_display_name': 'str'
}
attribute_map = {
'last_updated_by': 'lastUpdatedBy',
'user_permission': 'userPermission',
'dashboard_id': 'dashboardId',
'name': 'name',
'description': 'description',
'last_updated_on': 'lastUpdatedOn',
'theme': 'theme',
'interval': 'interval',
'id': 'id',
'type': 'type',
'timescale': 'timescale',
'device_id': 'deviceId',
'device_display_name': 'deviceDisplayName'
}
def __init__(self, last_updated_by=None, user_permission=None, dashboard_id=None, name=None, description=None, last_updated_on=None, theme=None, interval=None, id=None, type=None, timescale=None, device_id=None, device_display_name=None): # noqa: E501
"""ServiceAlert - a model defined in Swagger""" # noqa: E501
self._last_updated_by = None
self._user_permission = None
self._dashboard_id = None
self._name = None
self._description = None
self._last_updated_on = None
self._theme = None
self._interval = None
self._id = None
self._type = None
self._timescale = None
self._device_id = None
self._device_display_name = None
self.discriminator = None
if last_updated_by is not None:
self.last_updated_by = last_updated_by
if user_permission is not None:
self.user_permission = user_permission
self.dashboard_id = dashboard_id
self.name = name
if description is not None:
self.description = description
if last_updated_on is not None:
self.last_updated_on = last_updated_on
if theme is not None:
self.theme = theme
if interval is not None:
self.interval = interval
if id is not None:
self.id = id
self.type = type
if timescale is not None:
self.timescale = timescale
self.device_id = device_id
if device_display_name is not None:
self.device_display_name = device_display_name
@property
def last_updated_by(self):
"""Gets the last_updated_by of this ServiceAlert. # noqa: E501
The user that last updated the widget # noqa: E501
:return: The last_updated_by of this ServiceAlert. # noqa: E501
:rtype: str
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, last_updated_by):
"""Sets the last_updated_by of this ServiceAlert.
The user that last updated the widget # noqa: E501
:param last_updated_by: The last_updated_by of this ServiceAlert. # noqa: E501
:type: str
"""
self._last_updated_by = last_updated_by
@property
def user_permission(self):
"""Gets the user_permission of this ServiceAlert. # noqa: E501
The permission level of the user who last modified the widget # noqa: E501
:return: The user_permission of this ServiceAlert. # noqa: E501
:rtype: str
"""
return self._user_permission
@user_permission.setter
def user_permission(self, user_permission):
"""Sets the user_permission of this ServiceAlert.
The permission level of the user who last modified the widget # noqa: E501
:param user_permission: The user_permission of this ServiceAlert. # noqa: E501
:type: str
"""
self._user_permission = user_permission
@property
def dashboard_id(self):
"""Gets the dashboard_id of this ServiceAlert. # noqa: E501
The id of the dashboard the widget belongs to # noqa: E501
:return: The dashboard_id of this ServiceAlert. # noqa: E501
:rtype: int
"""
return self._dashboard_id
@dashboard_id.setter
def dashboard_id(self, dashboard_id):
"""Sets the dashboard_id of this ServiceAlert.
The id of the dashboard the widget belongs to # noqa: E501
:param dashboard_id: The dashboard_id of this ServiceAlert. # noqa: E501
:type: int
"""
if dashboard_id is None:
raise ValueError("Invalid value for `dashboard_id`, must not be `None`") # noqa: E501
self._dashboard_id = dashboard_id
@property
def name(self):
"""Gets the name of this ServiceAlert. # noqa: E501
The name of the widget # noqa: E501
:return: The name of this ServiceAlert. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ServiceAlert.
The name of the widget # noqa: E501
:param name: The name of this ServiceAlert. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def description(self):
"""Gets the description of this ServiceAlert. # noqa: E501
The description of the widget # noqa: E501
:return: The description of this ServiceAlert. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this ServiceAlert.
The description of the widget # noqa: E501
:param description: The description of this ServiceAlert. # noqa: E501
:type: str
"""
self._description = description
@property
def last_updated_on(self):
"""Gets the last_updated_on of this ServiceAlert. # noqa: E501
The time that corresponds to when the widget was last updated, in epoch format # noqa: E501
:return: The last_updated_on of this ServiceAlert. # noqa: E501
:rtype: int
"""
return self._last_updated_on
@last_updated_on.setter
def last_updated_on(self, last_updated_on):
"""Sets the last_updated_on of this ServiceAlert.
The time that corresponds to when the widget was last updated, in epoch format # noqa: E501
:param last_updated_on: The last_updated_on of this ServiceAlert. # noqa: E501
:type: int
"""
self._last_updated_on = last_updated_on
@property
def theme(self):
"""Gets the theme of this ServiceAlert. # noqa: E501
The color scheme of the widget. Options are: borderPurple | borderGray | borderBlue | solidPurple | solidGray | solidBlue | simplePurple | simpleBlue | simpleGray | newBorderGray | newBorderBlue | newBorderDarkBlue | newSolidGray | newSolidBlue | newSolidDarkBlue | newSimpleGray | newSimpleBlue |newSimpleDarkBlue # noqa: E501
:return: The theme of this ServiceAlert. # noqa: E501
:rtype: str
"""
return self._theme
@theme.setter
def theme(self, theme):
"""Sets the theme of this ServiceAlert.
The color scheme of the widget. Options are: borderPurple | borderGray | borderBlue | solidPurple | solidGray | solidBlue | simplePurple | simpleBlue | simpleGray | newBorderGray | newBorderBlue | newBorderDarkBlue | newSolidGray | newSolidBlue | newSolidDarkBlue | newSimpleGray | newSimpleBlue |newSimpleDarkBlue # noqa: E501
:param theme: The theme of this ServiceAlert. # noqa: E501
:type: str
"""
self._theme = theme
@property
def interval(self):
"""Gets the interval of this ServiceAlert. # noqa: E501
The refresh interval of the widget, in minutes # noqa: E501
:return: The interval of this ServiceAlert. # noqa: E501
:rtype: int
"""
return self._interval
@interval.setter
def interval(self, interval):
"""Sets the interval of this ServiceAlert.
The refresh interval of the widget, in minutes # noqa: E501
:param interval: The interval of this ServiceAlert. # noqa: E501
:type: int
"""
self._interval = interval
@property
def id(self):
"""Gets the id of this ServiceAlert. # noqa: E501
The Id of the widget # noqa: E501
:return: The id of this ServiceAlert. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ServiceAlert.
The Id of the widget # noqa: E501
:param id: The id of this ServiceAlert. # noqa: E501
:type: int
"""
self._id = id
@property
def type(self):
"""Gets the type of this ServiceAlert. # noqa: E501
alert | deviceNOC | html | serviceOverallStatus | sgraph | ngraph | serviceNOC | serviceSLA | bigNumber | gmap | serviceIndividualStatus | gauge | pieChart | ngraph | batchjob # noqa: E501
:return: The type of this ServiceAlert. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ServiceAlert.
alert | deviceNOC | html | serviceOverallStatus | sgraph | ngraph | serviceNOC | serviceSLA | bigNumber | gmap | serviceIndividualStatus | gauge | pieChart | ngraph | batchjob # noqa: E501
:param type: The type of this ServiceAlert. # noqa: E501
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def timescale(self):
"""Gets the timescale of this ServiceAlert. # noqa: E501
The default timescale of the widget # noqa: E501
:return: The timescale of this ServiceAlert. # noqa: E501
:rtype: str
"""
return self._timescale
@timescale.setter
def timescale(self, timescale):
"""Sets the timescale of this ServiceAlert.
The default timescale of the widget # noqa: E501
:param timescale: The timescale of this ServiceAlert. # noqa: E501
:type: str
"""
self._timescale = timescale
@property
def device_id(self):
"""Gets the device_id of this ServiceAlert. # noqa: E501
:return: The device_id of this ServiceAlert. # noqa: E501
:rtype: int
"""
return self._device_id
@device_id.setter
def device_id(self, device_id):
"""Sets the device_id of this ServiceAlert.
:param device_id: The device_id of this ServiceAlert. # noqa: E501
:type: int
"""
if device_id is None:
raise ValueError("Invalid value for `device_id`, must not be `None`") # noqa: E501
self._device_id = device_id
@property
def device_display_name(self):
"""Gets the device_display_name of this ServiceAlert. # noqa: E501
:return: The device_display_name of this ServiceAlert. # noqa: E501
:rtype: str
"""
return self._device_display_name
@device_display_name.setter
def device_display_name(self, device_display_name):
"""Sets the device_display_name of this ServiceAlert.
:param device_display_name: The device_display_name of this ServiceAlert. # noqa: E501
:type: str
"""
self._device_display_name = device_display_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ServiceAlert, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ServiceAlert):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
8b4b8b5b6d763fd2a7db57022a79bde58116674a | 9692a20a1e7a224a72785e4495f31421639b9f3b | /frex/stores/sparql_queryable.py | ca7711df4642f8043fbcf8e36450204ce9c9d5df | [] | no_license | solashirai/FREx | 6b0cb040930761a0e269f4591d7dde36e3f636d1 | 36ad09a0cb0020661ee990c7800bafd110e2ec04 | refs/heads/master | 2023-08-14T08:49:49.270281 | 2021-09-29T14:58:23 | 2021-09-29T14:58:23 | 291,760,109 | 0 | 0 | null | 2021-09-24T22:41:19 | 2020-08-31T15:57:47 | Python | UTF-8 | Python | false | false | 526 | py | from abc import ABC, abstractmethod
from rdflib.query import Result
class SparqlQueryable(ABC):
"""
SparqlQueryable is the base class for stores that can be queried in some way using SPARQL queries.
"""
@abstractmethod
def query(self, *, sparql: str) -> Result:
"""
Query the sparql queryable and retrieve a result.
:param sparql: A string containing valid SPARQL to query.
:return: A Result containing the result from calling the SPARQL query.
"""
pass
| [
"[email protected]"
] | |
58bcf3d3d7a9e42fa01ca8b29a710f6e81cfde90 | b086a1caa4e3457c1faa0889d7a7291e653a0248 | /tests/test_decontaminate.py | ed588a42754b1f20fbaafdd1f11bfdc4e4ef65af | [
"MIT"
] | permissive | hover2pi/specialsoss | a29381bbfcf7cc15a82e0aba8e607b99192dc48f | 6afde9fbd83bb33afa9e606e681c330b64e64aa2 | refs/heads/master | 2023-01-12T19:22:03.636104 | 2022-11-30T18:51:16 | 2022-11-30T18:51:16 | 152,112,781 | 1 | 1 | MIT | 2022-12-26T20:46:35 | 2018-10-08T16:36:32 | Jupyter Notebook | UTF-8 | Python | false | false | 500 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `decontaminate` module."""
import unittest
from pkg_resources import resource_filename
from specialsoss import decontaminate
class TestDecontaminate(unittest.TestCase):
"""Test functions in decontaminate.py"""
def setUp(self):
"""Test instance setup"""
# Get files for testing
self.frame = np.ones((256, 2048))
self.tso3d = np.ones((4, 256, 2048))
self.tso4d = np.ones((2, 2, 256, 2048))
| [
"[email protected]"
] | |
28695cb41961f9b7f6d93c97584d9999c98e5d78 | cb4c67ff2ad27834bed67f7e920a12fb1c545fcd | /tensorflow/python/keras/callbacks.py | f2feeb85a1e0db976a1c29e47257558ba40cc856 | [
"Apache-2.0"
] | permissive | ahoneybun/tensorflow | b67668cc0d9375eaab3bee4ed0791626f6eac02d | 5134e65300d1ac384eeb1f4ca72a011ad7225bc8 | refs/heads/master | 2020-03-26T00:53:51.554467 | 2018-08-13T13:36:55 | 2018-08-13T13:36:55 | 144,342,477 | 0 | 0 | Apache-2.0 | 2018-08-13T13:36:56 | 2018-08-11T00:07:43 | C++ | UTF-8 | Python | false | false | 51,653 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-import-not-at-top
"""Callbacks: utilities called at certain points during model training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import deque
from collections import Iterable
from collections import OrderedDict
import copy
import csv
import json
import math
import os
import time
import numpy as np
import six
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine.training_utils import standardize_input_data
from tensorflow.python.keras.utils.data_utils import Sequence
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary as tf_summary
from tensorflow.python.training import saver
from tensorflow.python.util.tf_export import tf_export
try:
import requests
except ImportError:
requests = None
def configure_callbacks(callbacks,
model,
do_validation=False,
val_inputs=None,
val_targets=None,
val_sample_weights=None,
batch_size=None,
epochs=None,
steps_per_epoch=None,
samples=None,
validation_steps=None,
verbose=1,
count_mode='steps'):
"""Configures callbacks for use in various training loops.
Arguments:
callbacks: List of Callbacks.
model: Model being trained.
do_validation: Whether or not validation loop will be run.
val_inputs: Inputs to Model for validation loop. Can be any
data format Keras accepts.
val_targets: Targets for Model for validation loop. Can be any
data format Keras accepts.
val_sample_weights: Sample weights for Model for validation loop.
Can be any data format Keras accepts.
batch_size: Number of samples per batch.
epochs: Number of epoch to train.
steps_per_epoch: Number of batches to run per training epoch.
samples: Number of training samples.
validation_steps: Number of batches to run per validation epoch.
verbose: int, 0 or 1. Keras logging verbosity to pass to ProgbarLogger.
count_mode: One of 'steps' or 'samples'. Per-batch or per-sample count.
Returns:
Instance of CallbackList used to control all Callbacks.
"""
# Add additional callbacks
model.history = History()
stateful_metric_names = None
if hasattr(model, 'stateful_metric_names'):
stateful_metric_names = model.stateful_metric_names
callbacks = [BaseLogger(stateful_metrics=stateful_metric_names)
] + (callbacks or []) + [model.history]
if verbose:
callbacks.append(
ProgbarLogger(count_mode, stateful_metrics=stateful_metric_names))
callback_list = CallbackList(callbacks)
# Set callback model
callback_model = model._get_callback_model() # pylint: disable=protected-access
if do_validation and val_inputs and not context.executing_eagerly():
# Need to create the test_function before start of the first epoch
# because TensorBoard callback on_epoch_begin adds summary to the
# list of fetches of the test_function
callback_model._make_test_function() # pylint: disable=protected-access
callback_list.set_model(callback_model)
# Set callback parameters
callback_metrics = []
# When we have deferred build scenario with iterator input, we will compile
# when we standardize first batch of data.
if model._is_compiled: # pylint: disable=protected-access
callback_metrics = copy.copy(model.metrics_names)
if do_validation:
callback_metrics += ['val_' + n for n in model.metrics_names]
if validation_steps is None and isinstance(val_inputs, Sequence):
validation_steps = len(val_inputs)
callback_params = {
'batch_size': batch_size,
'epochs': epochs,
'steps': steps_per_epoch,
'samples': samples,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
'validation_steps': validation_steps
}
callback_list.set_params(callback_params)
# Pass validation data to callbacks
if not val_inputs:
val_data = []
elif _is_generator_like(val_inputs):
val_data = val_inputs
else:
val_data = val_inputs + val_targets
if val_sample_weights:
val_data += val_sample_weights
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
val_data += [0.]
for cbk in callbacks:
cbk.validation_data = val_data
callback_list.model.stop_training = False
return callback_list
def _is_generator_like(data):
"""Checks if data is a generator, Sequence, or Iterator."""
return (hasattr(data, 'next') or hasattr(data, '__next__') or isinstance(
data, (Sequence, iterator_ops.Iterator, iterator_ops.EagerIterator)))
class CallbackList(object):
"""Container abstracting a list of callbacks.
Arguments:
callbacks: List of `Callback` instances.
queue_length: Queue length for keeping
running statistics over callback execution time.
"""
def __init__(self, callbacks=None, queue_length=10):
callbacks = callbacks or []
self.callbacks = [c for c in callbacks]
self.queue_length = queue_length
self.params = {}
self.model = None
def append(self, callback):
self.callbacks.append(callback)
def set_params(self, params):
self.params = params
for callback in self.callbacks:
callback.set_params(params)
def set_model(self, model):
self.model = model
for callback in self.callbacks:
callback.set_model(model)
def on_epoch_begin(self, epoch, logs=None):
"""Called at the start of an epoch.
Arguments:
epoch: integer, index of epoch.
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs)
self._delta_t_batch = 0.
self._delta_ts_batch_begin = deque([], maxlen=self.queue_length)
self._delta_ts_batch_end = deque([], maxlen=self.queue_length)
def on_epoch_end(self, epoch, logs=None):
"""Called at the end of an epoch.
Arguments:
epoch: integer, index of epoch.
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs)
def on_batch_begin(self, batch, logs=None):
"""Called right before processing a batch.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dictionary of logs.
"""
logs = logs or {}
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_begin(batch, logs)
self._delta_ts_batch_begin.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_begin)
if (self._delta_t_batch > 0. and
delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1):
logging.warning('Method on_batch_begin() is slow compared '
'to the batch update (%f). Check your callbacks.',
delta_t_median)
self._t_enter_batch = time.time()
def on_batch_end(self, batch, logs=None):
"""Called at the end of a batch.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dictionary of logs.
"""
logs = logs or {}
if not hasattr(self, '_t_enter_batch'):
self._t_enter_batch = time.time()
self._delta_t_batch = time.time() - self._t_enter_batch
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_end(batch, logs)
self._delta_ts_batch_end.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_end)
if (self._delta_t_batch > 0. and
(delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1)):
logging.warning('Method on_batch_end() is slow compared '
'to the batch update (%f). Check your callbacks.',
delta_t_median)
def on_train_begin(self, logs=None):
"""Called at the beginning of training.
Arguments:
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_train_begin(logs)
def on_train_end(self, logs=None):
"""Called at the end of training.
Arguments:
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_train_end(logs)
def __iter__(self):
return iter(self.callbacks)
@tf_export('keras.callbacks.Callback')
class Callback(object):
"""Abstract base class used to build new callbacks.
Attributes:
params: dict. Training parameters
(eg. verbosity, batch size, number of epochs...).
model: instance of `keras.models.Model`.
Reference of the model being trained.
The `logs` dictionary that callback methods
take as argument will contain keys for quantities relevant to
the current batch or epoch.
Currently, the `.fit()` method of the `Sequential` model class
will include the following quantities in the `logs` that
it passes to its callbacks:
on_epoch_end: logs include `acc` and `loss`, and
optionally include `val_loss`
(if validation is enabled in `fit`), and `val_acc`
(if validation and accuracy monitoring are enabled).
on_batch_begin: logs include `size`,
the number of samples in the current batch.
on_batch_end: logs include `loss`, and optionally `acc`
(if accuracy monitoring is enabled).
"""
def __init__(self):
self.validation_data = None
self.model = None
def set_params(self, params):
self.params = params
def set_model(self, model):
self.model = model
def on_epoch_begin(self, epoch, logs=None):
pass
def on_epoch_end(self, epoch, logs=None):
pass
def on_batch_begin(self, batch, logs=None):
pass
def on_batch_end(self, batch, logs=None):
pass
def on_train_begin(self, logs=None):
pass
def on_train_end(self, logs=None):
pass
@tf_export('keras.callbacks.BaseLogger')
class BaseLogger(Callback):
"""Callback that accumulates epoch averages of metrics.
This callback is automatically applied to every Keras model.
Arguments:
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over an epoch.
Metrics in this list will be logged as-is in `on_epoch_end`.
All others will be averaged in `on_epoch_end`.
"""
def __init__(self, stateful_metrics=None):
super(BaseLogger, self).__init__()
self.stateful_metrics = set(stateful_metrics or [])
def on_epoch_begin(self, epoch, logs=None):
self.seen = 0
self.totals = {}
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
self.seen += batch_size
for k, v in logs.items():
if k in self.stateful_metrics:
self.totals[k] = v
else:
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
def on_epoch_end(self, epoch, logs=None):
if logs is not None:
for k in self.params['metrics']:
if k in self.totals:
# Make value available to next callbacks.
if k in self.stateful_metrics:
logs[k] = self.totals[k]
else:
logs[k] = self.totals[k] / self.seen
@tf_export('keras.callbacks.TerminateOnNaN')
class TerminateOnNaN(Callback):
"""Callback that terminates training when a NaN loss is encountered.
"""
def on_batch_end(self, batch, logs=None):
logs = logs or {}
loss = logs.get('loss')
if loss is not None:
if np.isnan(loss) or np.isinf(loss):
print('Batch %d: Invalid loss, terminating training' % (batch))
self.model.stop_training = True
@tf_export('keras.callbacks.ProgbarLogger')
class ProgbarLogger(Callback):
"""Callback that prints metrics to stdout.
Arguments:
count_mode: One of "steps" or "samples".
Whether the progress bar should
count samples seen or steps (batches) seen.
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over an epoch.
Metrics in this list will be logged as-is.
All others will be averaged over time (e.g. loss, etc).
Raises:
ValueError: In case of invalid `count_mode`.
"""
def __init__(self, count_mode='samples', stateful_metrics=None):
super(ProgbarLogger, self).__init__()
if count_mode == 'samples':
self.use_steps = False
elif count_mode == 'steps':
self.use_steps = True
else:
raise ValueError('Unknown `count_mode`: ' + str(count_mode))
self.stateful_metrics = set(stateful_metrics or [])
def on_train_begin(self, logs=None):
self.verbose = self.params['verbose']
self.epochs = self.params['epochs']
def on_epoch_begin(self, epoch, logs=None):
if self.verbose:
print('Epoch %d/%d' % (epoch + 1, self.epochs))
if self.use_steps:
target = self.params['steps']
else:
target = self.params['samples']
self.target = target
self.progbar = Progbar(
target=self.target,
verbose=self.verbose,
stateful_metrics=self.stateful_metrics)
self.seen = 0
def on_batch_begin(self, batch, logs=None):
if self.seen < self.target:
self.log_values = []
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
if self.use_steps:
self.seen += 1
else:
self.seen += batch_size
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
# Skip progbar update for the last batch;
# will be handled by on_epoch_end.
if self.verbose and self.seen < self.target:
self.progbar.update(self.seen, self.log_values)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
if self.verbose:
self.progbar.update(self.seen, self.log_values)
@tf_export('keras.callbacks.History')
class History(Callback):
"""Callback that records events into a `History` object.
This callback is automatically applied to
every Keras model. The `History` object
gets returned by the `fit` method of models.
"""
def on_train_begin(self, logs=None):
self.epoch = []
self.history = {}
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epoch.append(epoch)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
@tf_export('keras.callbacks.ModelCheckpoint')
class ModelCheckpoint(Callback):
"""Save the model after every epoch.
`filepath` can contain named formatting options,
which will be filled the value of `epoch` and
keys in `logs` (passed in `on_epoch_end`).
For example: if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`,
then the model checkpoints will be saved with the epoch number and
the validation loss in the filename.
Arguments:
filepath: string, path to save the model file.
monitor: quantity to monitor.
verbose: verbosity mode, 0 or 1.
save_best_only: if `save_best_only=True`,
the latest best model according to
the quantity monitored will not be overwritten.
mode: one of {auto, min, max}.
If `save_best_only=True`, the decision
to overwrite the current save file is made
based on either the maximization or the
minimization of the monitored quantity. For `val_acc`,
this should be `max`, for `val_loss` this should
be `min`, etc. In `auto` mode, the direction is
automatically inferred from the name of the monitored quantity.
save_weights_only: if True, then only the model's weights will be
saved (`model.save_weights(filepath)`), else the full model
is saved (`model.save(filepath)`).
period: Interval (number of epochs) between checkpoints.
"""
def __init__(self,
filepath,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
period=1):
super(ModelCheckpoint, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.period = period
self.epochs_since_last_save = 0
if mode not in ['auto', 'min', 'max']:
logging.warning('ModelCheckpoint mode %s is unknown, '
'fallback to auto mode.', mode)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epochs_since_last_save += 1
if self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
filepath = self.filepath.format(epoch=epoch + 1, **logs)
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
logging.warning('Can save best model only with %s available, '
'skipping.', self.monitor)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
print('\nEpoch %05d: %s improved from %0.5f to %0.5f,'
' saving model to %s' % (epoch + 1, self.monitor, self.best,
current, filepath))
self.best = current
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
else:
if self.verbose > 0:
print('\nEpoch %05d: %s did not improve from %0.5f' %
(epoch + 1, self.monitor, self.best))
else:
if self.verbose > 0:
print('\nEpoch %05d: saving model to %s' % (epoch + 1, filepath))
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
@tf_export('keras.callbacks.EarlyStopping')
class EarlyStopping(Callback):
"""Stop training when a monitored quantity has stopped improving.
Arguments:
monitor: quantity to be monitored.
min_delta: minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than min_delta, will count as no
improvement.
patience: number of epochs with no improvement
after which training will be stopped.
verbose: verbosity mode.
mode: one of {auto, min, max}. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `max`
mode it will stop when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
baseline: baseline value for the monitored quantity.
Training will stop if the model doesn't show improvement over the
baseline.
"""
def __init__(self,
monitor='val_loss',
min_delta=0,
patience=0,
verbose=0,
mode='auto',
baseline=None):
super(EarlyStopping, self).__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.baseline = baseline
self.min_delta = abs(min_delta)
self.wait = 0
self.stopped_epoch = 0
if mode not in ['auto', 'min', 'max']:
logging.warning('EarlyStopping mode %s is unknown, '
'fallback to auto mode.', mode)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
elif mode == 'max':
self.monitor_op = np.greater
else:
if 'acc' in self.monitor:
self.monitor_op = np.greater
else:
self.monitor_op = np.less
if self.monitor_op == np.greater:
self.min_delta *= 1
else:
self.min_delta *= -1
def on_train_begin(self, logs=None):
# Allow instances to be re-used
self.wait = 0
self.stopped_epoch = 0
if self.baseline is not None:
self.best = self.baseline
else:
self.best = np.Inf if self.monitor_op == np.less else -np.Inf
def on_epoch_end(self, epoch, logs=None):
current = logs.get(self.monitor)
if current is None:
logging.warning('Early stopping conditioned on metric `%s` '
'which is not available. Available metrics are: %s',
self.monitor, ','.join(list(logs.keys())))
return
if self.monitor_op(current - self.min_delta, self.best):
self.best = current
self.wait = 0
else:
self.wait += 1
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.model.stop_training = True
def on_train_end(self, logs=None):
if self.stopped_epoch > 0 and self.verbose > 0:
print('Epoch %05d: early stopping' % (self.stopped_epoch + 1))
@tf_export('keras.callbacks.RemoteMonitor')
class RemoteMonitor(Callback):
"""Callback used to stream events to a server.
Requires the `requests` library.
Events are sent to `root + '/publish/epoch/end/'` by default. Calls are
HTTP POST, with a `data` argument which is a
JSON-encoded dictionary of event data.
If send_as_json is set to True, the content type of the request will be
application/json. Otherwise the serialized JSON will be sent within a form.
Arguments:
root: String; root url of the target server.
path: String; path relative to `root` to which the events will be sent.
field: String; JSON field under which the data will be stored.
The field is used only if the payload is sent within a form
(i.e. send_as_json is set to False).
headers: Dictionary; optional custom HTTP headers.
send_as_json: Boolean; whether the request should be
sent as application/json.
"""
def __init__(self,
root='http://localhost:9000',
path='/publish/epoch/end/',
field='data',
headers=None,
send_as_json=False):
super(RemoteMonitor, self).__init__()
self.root = root
self.path = path
self.field = field
self.headers = headers
self.send_as_json = send_as_json
def on_epoch_end(self, epoch, logs=None):
if requests is None:
raise ImportError('RemoteMonitor requires the `requests` library.')
logs = logs or {}
send = {}
send['epoch'] = epoch
for k, v in logs.items():
send[k] = v
try:
if self.send_as_json:
requests.post(self.root + self.path, json=send, headers=self.headers)
else:
requests.post(
self.root + self.path, {self.field: json.dumps(send)},
headers=self.headers)
except requests.exceptions.RequestException:
logging.warning('Warning: could not reach RemoteMonitor '
'root server at ' + str(self.root))
@tf_export('keras.callbacks.LearningRateScheduler')
class LearningRateScheduler(Callback):
"""Learning rate scheduler.
Arguments:
schedule: a function that takes an epoch index as input
(integer, indexed from 0) and returns a new
learning rate as output (float).
verbose: int. 0: quiet, 1: update messages.
"""
def __init__(self, schedule, verbose=0):
super(LearningRateScheduler, self).__init__()
self.schedule = schedule
self.verbose = verbose
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
try: # new API
lr = float(K.get_value(self.model.optimizer.lr))
lr = self.schedule(epoch, lr)
except TypeError: # Support for old API for backward compatibility
lr = self.schedule(epoch)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function '
'should be float.')
K.set_value(self.model.optimizer.lr, lr)
if self.verbose > 0:
print('\nEpoch %05d: LearningRateScheduler reducing learning '
'rate to %s.' % (epoch + 1, lr))
@tf_export('keras.callbacks.TensorBoard')
class TensorBoard(Callback):
# pylint: disable=line-too-long
"""Tensorboard basic visualizations.
This callback writes a log for TensorBoard, which allows
you to visualize dynamic graphs of your training and test
metrics, as well as activation histograms for the different
layers in your model.
TensorBoard is a visualization tool provided with TensorFlow.
If you have installed TensorFlow with pip, you should be able
to launch TensorBoard from the command line:
```sh
tensorboard --logdir=/full_path_to_your_logs
```
You can find more information about TensorBoard
[here](https://www.tensorflow.org/get_started/summaries_and_tensorboard).
Arguments:
log_dir: the path of the directory where to save the log
files to be parsed by TensorBoard.
histogram_freq: frequency (in epochs) at which to compute activation
and weight histograms for the layers of the model. If set to 0,
histograms won't be computed. Validation data (or split) must be
specified for histogram visualizations.
write_graph: whether to visualize the graph in TensorBoard.
The log file can become quite large when
write_graph is set to True.
write_grads: whether to visualize gradient histograms in TensorBoard.
`histogram_freq` must be greater than 0.
batch_size: size of batch of inputs to feed to the network
for histograms computation.
write_images: whether to write model weights to visualize as
image in TensorBoard.
embeddings_freq: frequency (in epochs) at which selected embedding
layers will be saved. If set to 0, embeddings won't be computed.
Data to be visualized in TensorBoard's Embedding tab must be passed
as `embeddings_data`.
embeddings_layer_names: a list of names of layers to keep eye on. If
None or empty list all the embedding layer will be watched.
embeddings_metadata: a dictionary which maps layer name to a file name
in which metadata for this embedding layer is saved. See the
[details](https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)
about metadata files format. In case if the same metadata file is
used for all embedding layers, string can be passed.
embeddings_data: data to be embedded at layers specified in
`embeddings_layer_names`. Numpy array (if the model has a single
input) or list of Numpy arrays (if the model has multiple inputs).
Learn [more about embeddings](https://www.tensorflow.org/programmers_guide/embedding)
Raises:
ValueError: If histogram_freq is set and no validation data is provided.
@compatbility(eager)
Using `Tensorboard` callback will work while eager execution is enabled,
however outputting histogram summaries of weights and gradients is not
supported, and thus `histogram_freq` will be ignored.
@end_compatibility
"""
# pylint: enable=line-too-long
def __init__(self,
log_dir='./logs',
histogram_freq=0,
batch_size=32,
write_graph=True,
write_grads=False,
write_images=False,
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None,
embeddings_data=None):
super(TensorBoard, self).__init__()
self.log_dir = log_dir
self.histogram_freq = histogram_freq
if self.histogram_freq and context.executing_eagerly():
logging.warning(
UserWarning('Weight and gradient histograms not supported for eager'
'execution, setting `histogram_freq` to `0`.'))
self.histogram_freq = 0
self.merged = None
self.write_graph = write_graph
self.write_grads = write_grads
self.write_images = write_images
self.batch_size = batch_size
self._current_batch = 0
self._total_batches_seen = 0
self.embeddings_freq = embeddings_freq
self.embeddings_layer_names = embeddings_layer_names
self.embeddings_metadata = embeddings_metadata
self.embeddings_data = embeddings_data
def _init_writer(self):
"""Sets file writer."""
if context.executing_eagerly():
self.writer = summary_ops_v2.create_file_writer(self.log_dir)
elif self.write_graph:
self.writer = tf_summary.FileWriter(self.log_dir, K.get_session().graph)
else:
self.writer = tf_summary.FileWriter(self.log_dir)
def _make_histogram_ops(self, model):
"""Defines histogram ops when histogram_freq > 0."""
# only make histogram summary op if it hasn't already been made
if self.histogram_freq and self.merged is None:
for layer in self.model.layers:
for weight in layer.weights:
mapped_weight_name = weight.name.replace(':', '_')
tf_summary.histogram(mapped_weight_name, weight)
if self.write_images:
w_img = array_ops.squeeze(weight)
shape = K.int_shape(w_img)
if len(shape) == 2: # dense layer kernel case
if shape[0] > shape[1]:
w_img = array_ops.transpose(w_img)
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img, [1, shape[0], shape[1], 1])
elif len(shape) == 3: # convnet case
if K.image_data_format() == 'channels_last':
# switch to channels_first to display
# every kernel as a separate image
w_img = array_ops.transpose(w_img, perm=[2, 0, 1])
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img,
[shape[0], shape[1], shape[2], 1])
elif len(shape) == 1: # bias case
w_img = array_ops.reshape(w_img, [1, shape[0], 1, 1])
else:
# not possible to handle 3D convnets etc.
continue
shape = K.int_shape(w_img)
assert len(shape) == 4 and shape[-1] in [1, 3, 4]
tf_summary.image(mapped_weight_name, w_img)
if self.write_grads:
for weight in layer.trainable_weights:
mapped_weight_name = weight.name.replace(':', '_')
grads = model.optimizer.get_gradients(model.total_loss, weight)
def is_indexed_slices(grad):
return type(grad).__name__ == 'IndexedSlices'
grads = [
grad.values if is_indexed_slices(grad) else grad
for grad in grads
]
tf_summary.histogram('{}_grad'.format(mapped_weight_name), grads)
if hasattr(layer, 'output'):
if isinstance(layer.output, list):
for i, output in enumerate(layer.output):
tf_summary.histogram('{}_out_{}'.format(layer.name, i), output)
else:
tf_summary.histogram('{}_out'.format(layer.name), layer.output)
def set_model(self, model):
"""Sets Keras model and creates summary ops."""
self.model = model
self._init_writer()
# histogram summaries only enabled in graph mode
if not context.executing_eagerly():
self._make_histogram_ops(model)
self.merged = tf_summary.merge_all()
# If both embedding_freq and embeddings_data are available, we will
# visualize embeddings.
if self.embeddings_freq and self.embeddings_data is not None:
self.embeddings_data = standardize_input_data(self.embeddings_data,
model.input_names)
# If embedding_layer_names are not provided, get all of the embedding
# layers from the model.
embeddings_layer_names = self.embeddings_layer_names
if not embeddings_layer_names:
embeddings_layer_names = [
layer.name
for layer in self.model.layers
if type(layer).__name__ == 'Embedding'
]
self.assign_embeddings = []
embeddings_vars = {}
self.batch_id = batch_id = array_ops.placeholder(dtypes.int32)
self.step = step = array_ops.placeholder(dtypes.int32)
for layer in self.model.layers:
if layer.name in embeddings_layer_names:
embedding_input = self.model.get_layer(layer.name).output
embedding_size = np.prod(embedding_input.shape[1:])
embedding_input = array_ops.reshape(embedding_input,
(step, int(embedding_size)))
shape = (self.embeddings_data[0].shape[0], int(embedding_size))
embedding = variables.Variable(
array_ops.zeros(shape), name=layer.name + '_embedding')
embeddings_vars[layer.name] = embedding
batch = state_ops.assign(embedding[batch_id:batch_id + step],
embedding_input)
self.assign_embeddings.append(batch)
self.saver = saver.Saver(list(embeddings_vars.values()))
# Create embeddings_metadata dictionary
if isinstance(self.embeddings_metadata, str):
embeddings_metadata = {
layer_name: self.embeddings_metadata
for layer_name in embeddings_vars.keys()
}
else:
# If embedding_metadata is already a dictionary
embeddings_metadata = self.embeddings_metadata
try:
from tensorboard.plugins import projector
except ImportError:
raise ImportError('Failed to import TensorBoard. Please make sure that '
'TensorBoard integration is complete."')
# TODO(psv): Add integration tests to test embedding visualization
# with TensorBoard callback. We are unable to write a unit test for this
# because TensorBoard dependency assumes TensorFlow package is installed.
config = projector.ProjectorConfig()
for layer_name, tensor in embeddings_vars.items():
embedding = config.embeddings.add()
embedding.tensor_name = tensor.name
if (embeddings_metadata is not None and
layer_name in embeddings_metadata):
embedding.metadata_path = embeddings_metadata[layer_name]
projector.visualize_embeddings(self.writer, config)
def _fetch_callback(self, summary):
self.writer.add_summary(
summary,
self._epoch + self._current_val_batch / self._validation_batches)
self._current_val_batch += 1
def _write_custom_summaries(self, step, logs=None):
"""Writes metrics out as custom scalar summaries.
Arguments:
step: the global step to use for Tensorboard.
logs: dict. Keys are scalar summary names, values are
NumPy scalars.
"""
logs = logs or {}
if context.executing_eagerly():
# use v2 summary ops
with self.writer.as_default(), summary_ops_v2.always_record_summaries():
for name, value in logs.items():
summary_ops_v2.scalar(name, value.item(), step=step)
else:
# use FileWriter from v1 summary
for name, value in logs.items():
summary = tf_summary.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value.item()
summary_value.tag = name
self.writer.add_summary(summary, step)
self.writer.flush()
def on_train_begin(self, logs=None):
"""Checks if histogram summaries can be run."""
# will never be set when in eager
if self.histogram_freq:
if self.params.get('validation_steps', None) is not None:
self._validation_batches = self.params['validation_steps']
elif self.validation_data:
self._validation_batches = math.ceil(
self.validation_data[0].shape[0] / self.batch_size)
else:
raise ValueError('If printing histograms, validation data must be '
'provided.')
if self._validation_batches == 0:
raise ValueError(
'If printing histograms, validation data must have length > 0.')
def on_batch_end(self, batch, logs=None):
"""Writes scalar summaries for metrics on every training batch."""
# Don't output batch_size and batch number as Tensorboard summaries
logs = logs or {}
batch_logs = {('batch_' + k): v
for k, v in logs.items()
if k not in ['batch', 'size']}
self._write_custom_summaries(self._total_batches_seen, batch_logs)
self._total_batches_seen += 1
def on_epoch_begin(self, epoch, logs=None):
"""Add histogram op to Model test_function callbacks, reset batch count."""
# check if histogram summary should be run for this epoch
if self.histogram_freq and epoch % self.histogram_freq == 0:
self._epoch = epoch
self._current_val_batch = 0
# add the histogram summary op if it should run this epoch
if self.merged not in self.model.test_function.fetches:
self.model.test_function.fetches.append(self.merged)
self.model.test_function.fetch_callbacks[
self.merged] = self._fetch_callback
def on_epoch_end(self, epoch, logs=None):
"""Checks if summary ops should run next epoch, logs scalar summaries."""
# don't output batch_size and
# batch number as Tensorboard summaries
logs = {('epoch_' + k): v
for k, v in logs.items()
if k not in ['batch', 'size']}
self._write_custom_summaries(epoch, logs)
# pop the histogram summary op after each epoch
if self.histogram_freq:
if self.merged in self.model.test_function.fetches:
self.model.test_function.fetches.remove(self.merged)
if self.merged in self.model.test_function.fetch_callbacks:
self.model.test_function.fetch_callbacks.pop(self.merged)
if self.embeddings_data is None and self.embeddings_freq:
raise ValueError('To visualize embeddings, embeddings_data must '
'be provided.')
if self.embeddings_freq and self.embeddings_data is not None:
if epoch % self.embeddings_freq == 0:
# We need a second forward-pass here because we're passing
# the `embeddings_data` explicitly. This design allows to pass
# arbitrary data as `embeddings_data` and results from the fact
# that we need to know the size of the `tf.Variable`s which
# hold the embeddings in `set_model`. At this point, however,
# the `validation_data` is not yet set.
embeddings_data = self.embeddings_data
n_samples = embeddings_data[0].shape[0]
i = 0
while i < n_samples:
step = min(self.batch_size, n_samples - i)
batch = slice(i, i + step)
if isinstance(self.model.input, list):
feed_dict = {
model_input: embeddings_data[idx][batch]
for idx, model_input in enumerate(self.model.input)
}
else:
feed_dict = {self.model.input: embeddings_data[0][batch]}
feed_dict.update({self.batch_id: i, self.step: step})
if self.model.uses_learning_phase:
feed_dict[K.learning_phase()] = False
self.sess.run(self.assign_embeddings, feed_dict=feed_dict)
self.saver.save(self.sess,
os.path.join(self.log_dir, 'keras_embedding.ckpt'),
epoch)
i += self.batch_size
def on_train_end(self, logs=None):
self.writer.close()
@tf_export('keras.callbacks.ReduceLROnPlateau')
class ReduceLROnPlateau(Callback):
"""Reduce learning rate when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor
of 2-10 once learning stagnates. This callback monitors a
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate is reduced.
Example:
```python
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
model.fit(X_train, Y_train, callbacks=[reduce_lr])
```
Arguments:
monitor: quantity to be monitored.
factor: factor by which the learning rate will
be reduced. new_lr = lr * factor
patience: number of epochs with no improvement
after which learning rate will be reduced.
verbose: int. 0: quiet, 1: update messages.
mode: one of {auto, min, max}. In `min` mode,
lr will be reduced when the quantity
monitored has stopped decreasing; in `max`
mode it will be reduced when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
min_delta: threshold for measuring the new optimum,
to only focus on significant changes.
cooldown: number of epochs to wait before resuming
normal operation after lr has been reduced.
min_lr: lower bound on the learning rate.
"""
def __init__(self,
monitor='val_loss',
factor=0.1,
patience=10,
verbose=0,
mode='auto',
min_delta=1e-4,
cooldown=0,
min_lr=0,
**kwargs):
super(ReduceLROnPlateau, self).__init__()
self.monitor = monitor
if factor >= 1.0:
raise ValueError('ReduceLROnPlateau ' 'does not support a factor >= 1.0.')
if 'epsilon' in kwargs:
min_delta = kwargs.pop('epsilon')
logging.warning('`epsilon` argument is deprecated and '
'will be removed, use `min_delta` instead.')
self.factor = factor
self.min_lr = min_lr
self.min_delta = min_delta
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0 # Cooldown counter.
self.wait = 0
self.best = 0
self.mode = mode
self.monitor_op = None
self._reset()
def _reset(self):
"""Resets wait counter and cooldown counter.
"""
if self.mode not in ['auto', 'min', 'max']:
logging.warning('Learning Rate Plateau Reducing mode %s is unknown, '
'fallback to auto mode.', self.mode)
self.mode = 'auto'
if (self.mode == 'min' or
(self.mode == 'auto' and 'acc' not in self.monitor)):
self.monitor_op = lambda a, b: np.less(a, b - self.min_delta)
self.best = np.Inf
else:
self.monitor_op = lambda a, b: np.greater(a, b + self.min_delta)
self.best = -np.Inf
self.cooldown_counter = 0
self.wait = 0
def on_train_begin(self, logs=None):
self._reset()
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
current = logs.get(self.monitor)
if current is None:
logging.warning('Reduce LR on plateau conditioned on metric `%s` '
'which is not available. Available metrics are: %s',
self.monitor, ','.join(list(logs.keys())))
else:
if self.in_cooldown():
self.cooldown_counter -= 1
self.wait = 0
if self.monitor_op(current, self.best):
self.best = current
self.wait = 0
elif not self.in_cooldown():
self.wait += 1
if self.wait >= self.patience:
old_lr = float(K.get_value(self.model.optimizer.lr))
if old_lr > self.min_lr:
new_lr = old_lr * self.factor
new_lr = max(new_lr, self.min_lr)
K.set_value(self.model.optimizer.lr, new_lr)
if self.verbose > 0:
print('\nEpoch %05d: ReduceLROnPlateau reducing learning '
'rate to %s.' % (epoch + 1, new_lr))
self.cooldown_counter = self.cooldown
self.wait = 0
def in_cooldown(self):
return self.cooldown_counter > 0
@tf_export('keras.callbacks.CSVLogger')
class CSVLogger(Callback):
"""Callback that streams epoch results to a csv file.
Supports all values that can be represented as a string,
including 1D iterables such as np.ndarray.
Example:
```python
csv_logger = CSVLogger('training.log')
model.fit(X_train, Y_train, callbacks=[csv_logger])
```
Arguments:
filename: filename of the csv file, e.g. 'run/log.csv'.
separator: string used to separate elements in the csv file.
append: True: append if file exists (useful for continuing
training). False: overwrite existing file,
"""
def __init__(self, filename, separator=',', append=False):
self.sep = separator
self.filename = filename
self.append = append
self.writer = None
self.keys = None
self.append_header = True
self.file_flags = 'b' if six.PY2 and os.name == 'nt' else ''
super(CSVLogger, self).__init__()
def on_train_begin(self, logs=None):
if self.append:
if os.path.exists(self.filename):
with open(self.filename, 'r' + self.file_flags) as f:
self.append_header = not bool(len(f.readline()))
self.csv_file = open(self.filename, 'a' + self.file_flags)
else:
self.csv_file = open(self.filename, 'w' + self.file_flags)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
def handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, six.string_types):
return k
elif isinstance(k, Iterable) and not is_zero_dim_ndarray:
return '"[%s]"' % (', '.join(map(str, k)))
else:
return k
if self.keys is None:
self.keys = sorted(logs.keys())
if self.model.stop_training:
# We set NA so that csv parsers do not fail for this last epoch.
logs = dict([(k, logs[k]) if k in logs else (k, 'NA') for k in self.keys])
if not self.writer:
class CustomDialect(csv.excel):
delimiter = self.sep
self.writer = csv.DictWriter(
self.csv_file,
fieldnames=['epoch'] + self.keys,
dialect=CustomDialect)
if self.append_header:
self.writer.writeheader()
row_dict = OrderedDict({'epoch': epoch})
row_dict.update((key, handle_value(logs[key])) for key in self.keys)
self.writer.writerow(row_dict)
self.csv_file.flush()
def on_train_end(self, logs=None):
self.csv_file.close()
self.writer = None
@tf_export('keras.callbacks.LambdaCallback')
class LambdaCallback(Callback):
r"""Callback for creating simple, custom callbacks on-the-fly.
This callback is constructed with anonymous functions that will be called
at the appropriate time. Note that the callbacks expects positional
arguments, as:
- `on_epoch_begin` and `on_epoch_end` expect two positional arguments:
`epoch`, `logs`
- `on_batch_begin` and `on_batch_end` expect two positional arguments:
`batch`, `logs`
- `on_train_begin` and `on_train_end` expect one positional argument:
`logs`
Arguments:
on_epoch_begin: called at the beginning of every epoch.
on_epoch_end: called at the end of every epoch.
on_batch_begin: called at the beginning of every batch.
on_batch_end: called at the end of every batch.
on_train_begin: called at the beginning of model training.
on_train_end: called at the end of model training.
Example:
```python
# Print the batch number at the beginning of every batch.
batch_print_callback = LambdaCallback(
on_batch_begin=lambda batch,logs: print(batch))
# Stream the epoch loss to a file in JSON format. The file content
# is not well-formed JSON but rather has a JSON object per line.
import json
json_log = open('loss_log.json', mode='wt', buffering=1)
json_logging_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: json_log.write(
json.dumps({'epoch': epoch, 'loss': logs['loss']}) + '\n'),
on_train_end=lambda logs: json_log.close()
)
# Terminate some processes after having finished model training.
processes = ...
cleanup_callback = LambdaCallback(
on_train_end=lambda logs: [
p.terminate() for p in processes if p.is_alive()])
model.fit(...,
callbacks=[batch_print_callback,
json_logging_callback,
cleanup_callback])
```
"""
def __init__(self,
on_epoch_begin=None,
on_epoch_end=None,
on_batch_begin=None,
on_batch_end=None,
on_train_begin=None,
on_train_end=None,
**kwargs):
super(LambdaCallback, self).__init__()
self.__dict__.update(kwargs)
if on_epoch_begin is not None:
self.on_epoch_begin = on_epoch_begin
else:
self.on_epoch_begin = lambda epoch, logs: None
if on_epoch_end is not None:
self.on_epoch_end = on_epoch_end
else:
self.on_epoch_end = lambda epoch, logs: None
if on_batch_begin is not None:
self.on_batch_begin = on_batch_begin
else:
self.on_batch_begin = lambda batch, logs: None
if on_batch_end is not None:
self.on_batch_end = on_batch_end
else:
self.on_batch_end = lambda batch, logs: None
if on_train_begin is not None:
self.on_train_begin = on_train_begin
else:
self.on_train_begin = lambda logs: None
if on_train_end is not None:
self.on_train_end = on_train_end
else:
self.on_train_end = lambda logs: None
| [
"[email protected]"
] | |
24b225f065ed151eb22a92e8b8d904ab8f8a5b5d | ad01faab6dd663dc5193eb8383fdc2d24c2df23d | /_flask/_flask/src/models.py | 65bcffc85e3d1501298f09252d2b8c292996163d | [] | no_license | jurgeon018/snippets | 585db91b8120076b37deaa37393b34f7c61fec66 | e0ab24a99791c3b25422a3208f02919cf98ca084 | refs/heads/master | 2023-05-14T12:31:48.139452 | 2023-01-23T03:33:41 | 2023-01-23T03:33:41 | 222,001,233 | 0 | 0 | null | 2023-05-01T22:16:48 | 2019-11-15T20:51:27 | Python | UTF-8 | Python | false | false | 2,368 | py | from flask_security import UserMixin, RoleMixin
from datetime import datetime
import re
from app import db
def slugify(s):
pattern = r'[^\w+]'
return re.sub(pattern, '-', str(s))
post_tags = db.Table(
'post_tags',
db.Column('post_id', db.Integer, db.ForeignKey('post.id')),
db.Column('tag_id', db.Integer, db.ForeignKey('tag.id'))
)
roles_users = db.Table(
'roles_users',
db.Column('user_id', db.Integer(), db.ForeignKey('user.id')),
db.Column('role_id', db.Integer(), db.ForeignKey('role.id'))
)
class SaveMixin:
def save(self, *args, **kwargs):
db.session.add(self)
db.session.commit()
class User(db.Model, SaveMixin, UserMixin):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(100), unique=True)
password = db.Column(db.String(100))
active = db.Column(db.Boolean())
roles = db.relationship('Role',
secondary=roles_users,
backref=db.backref('users', lazy='dynamic'))
class Role(db.Model, SaveMixin, RoleMixin):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), unique=True)
description = db.Column(db.String(255))
class Post(db.Model, SaveMixin):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(140))
slug = db.Column(db.String(140), unique=True)
body = db.Column(db.Text())
created = db.Column(db.DateTime, default=datetime.now)
tags = db.relationship('Tag',
secondary=post_tags,
backref=db.backref('posts'),
lazy='dynamic')
def __init__(self, *args, **kwargs):
super(Post, self).__init__(*args, **kwargs)
self.generate_slug()
def generate_slug(self):
if self.title:
self.slug = slugify(self.title)
def __repr__(self):
return '<Post id: {}, title: {}'.format(self.id, self.title)
class Tag(db.Model, SaveMixin):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100))
slug = db.Column(db.String(100))
def __init__(self, *args, **kwargs):
super(Tag, self).__init__(*args, **kwargs)
self.slug = slugify(self.name)
def __repr__(self):
return '<Tag id: {}, name: {}>'.format(self.id, self.name)
| [
"[email protected]"
] | |
e4d331190d0951613bfb2dd2e5c596a4220fc079 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-4642.py | e44d50bd03724d6fd15dac3b8b839dc0d38c9563 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,755 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:$IDSTRING) -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"[email protected]"
] | |
7ce5a5a7e987b5117fe0c6627da59256dd274079 | 09f0505f3ac1dccaf301c1e363423f38768cc3cc | /r_DailyProgrammer/Hard/C261/__init__.py | 7fb10a7e3f3dd524eca4b11a7704983484b081b8 | [] | no_license | Awesome-Austin/PythonPractice | 02212292b92814016d062f0fec1c990ebde21fe7 | 9a717f91d41122be6393f9fcd1a648c5e62314b3 | refs/heads/master | 2023-06-21T11:43:59.366064 | 2021-07-29T23:33:00 | 2021-07-29T23:33:00 | 270,854,302 | 0 | 0 | null | 2020-08-11T20:47:10 | 2020-06-08T23:24:09 | Python | UTF-8 | Python | false | false | 63 | py | #! python3
from r_DailyProgrammer.Hard.C261.main import main
| [
"{ID}+{username}@users.noreply.github.com"
] | {ID}+{username}@users.noreply.github.com |
552ab2bbd2ef44a5026c219a56b2ffd8ce677ca4 | c73fc798764f40ea6fa466a573fb01223e367ce3 | /recursion/dequeue.py | 0cb394faf20083a3b1185caeaf1124bf8907044b | [] | no_license | mohitsh/python_work | b1385f62104aa6b932f5452ca5c2421526345455 | 223a802dea5cdb73f44a159856c7432983655668 | refs/heads/master | 2020-04-24T00:34:15.427060 | 2018-08-21T19:12:07 | 2018-08-21T19:12:07 | 37,491,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,060 | py | '''
in this deque example I have considered position 0 to be FRONT
in other deque exmaples like Palindrome checker last element has
been considered as front.
Usually first element is considered rear and last one is considered
front.
I have to manipulate 0 twice to make this code compatible with 0 as
rear and last as Front
so no worries!
'''
class Deque:
def __init__(self):
self.items = []
def size(self):
return len(self.items)
def isEmpty(self):
return self.items == []
def addFront(self,item):
self.items.insert(0,item)
def addRear(self,item):
self.items.append(item)
def removeFront(self):
return self.items.pop(0)
def removeRear(self):
return self.items.pop()
def show(self):
print self.items
d = Deque()
print d.isEmpty()
print d.size()
d.addFront(1)
d.addFront(2)
d.addFront(3)
print d.isEmpty()
print d.size()
d.show()
d.addRear(10)
d.addRear(11)
d.addRear(12)
d.show()
print d.removeFront()
print d.removeFront()
print d.removeFront()
d.show()
print d.removeRear()
print d.removeRear()
print d.removeRear()
d.show()
| [
"[email protected]"
] | |
8939cb11b44574e3ae4666bb7ed1698550d192c4 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5756407898963968_0/Python/eding/A-small-code.py | e5ebfa779d5c7ca729204629dbea0f829a594e03 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 939 | py | import codecs
import sys
N_ROWS = 4
def main():
file = codecs.open(sys.argv[1], "r", "utf-8-sig")
lines = [line.strip() for line in file]
T = int(lines[0])
cards1 = []
cards2 = []
index = 1
for trial in xrange(0,T):
ans1 = int(lines[index])
cards1 = map(int, lines[index+ans1].split())
index += 5
ans2 = int(lines[index])
cards2 = map(int, lines[index+ans2].split())
index += 5
intersect = [card for card in cards1 if card in cards2]
sys.stdout.write("Case #%d: " % (trial+1))
if len(intersect) < 1:
sys.stdout.write("Volunteer cheated!\n")
elif len(intersect) == 1:
sys.stdout.write("%d\n" % intersect[0])
elif len(intersect) > 1:
sys.stdout.write("Bad magician!\n")
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
90bd6692ba1c920aebf545909f10a2d5fe660622 | c8036cb365243439b4a3593124eafdfba933a034 | /src/loss/normal_6_class.py | 445ac4273311e941f342bfc5794d5eeaf8cc2e37 | [] | no_license | koike-ya/rsna | 3a1150dc878bde6320ae4c1d965675460dd7de0d | c88c45cfa280b47f0fb48cc9df88954f83a551b4 | refs/heads/master | 2022-03-16T00:36:55.846905 | 2019-11-02T00:49:15 | 2019-11-02T00:49:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,715 | py |
dir_csv = '../../input/'
dir_train_img = '../../input/stage_1_train_pngs/'
dir_test_img = '../../input/stage_1_test_pngs/'
# Parameters
n_classes = 6
n_epochs = 5
batch_size = 32
import glob
import os
from pathlib import Path
import cv2
import numpy as np
import pandas as pd
import pydicom
import torch
import torch.optim as optim
from albumentations import Compose, ShiftScaleRotate, CenterCrop, HorizontalFlip, RandomBrightnessContrast
from albumentations.pytorch import ToTensor
from skimage.transform import resize
from torch.utils.data import Dataset
from tqdm import tqdm as tqdm
from apex import amp
CT_LEVEL = 40
CT_WIDTH = 150
def rescale_pixelarray(dataset):
image = dataset.pixel_array
rescaled_image = image * dataset.RescaleSlope + dataset.RescaleIntercept
rescaled_image[rescaled_image < -1024] = -1024
return rescaled_image
def set_manual_window(hu_image, custom_center, custom_width):
min_value = custom_center - (custom_width / 2)
max_value = custom_center + (custom_width / 2)
hu_image[hu_image < min_value] = min_value
hu_image[hu_image > max_value] = max_value
return hu_image
class IntracranialDataset(Dataset):
def __init__(self, csv_file, data_dir, labels, ct_level=0, ct_width=0, transform=None):
self.data_dir = data_dir
self.data = pd.read_csv(csv_file)
self.transform = transform
self.labels = labels
self.level = ct_level
self.width = ct_width
self.nn_input_shape = (224, 224)
def __len__(self):
return len(self.data)
def resize(self, image):
image = resize(image, self.nn_input_shape)
return image
def fill_channels(self, image):
filled_image = np.stack((image,)*3, axis=-1)
return filled_image
def _get_hounsfield_window(self, dicom):
hu_image = rescale_pixelarray(dicom)
windowed_image = set_manual_window(hu_image, self.level, self.width)
return windowed_image
def _load_dicom_to_image(self, file_path):
dicom = pydicom.dcmread(file_path)
windowed_image = self._get_hounsfield_window(dicom)
image = self.fill_channels(self.resize(windowed_image))
return image
def __getitem__(self, idx):
file_path = os.path.join(self.data_dir, self.data.loc[idx, 'Image'] + '.png')
from pathlib import Path
if not Path(file_path).is_file():
return self.__getitem__(idx + 1)
# img = self._load_dicom_to_image(file_path)
img = cv2.imread(file_path)
if self.transform:
augmented = self.transform(image=img)
img = augmented['image']
if self.labels:
labels = torch.tensor(
self.data.loc[idx, ['epidural', 'intraparenchymal', 'intraventricular', 'subarachnoid', 'subdural', 'any']])
return {'image': img, 'labels': labels}
else:
return {'image': img}
# # CSV
# In[7]:
# CSVs
if __name__ == '__main__':
if not Path('../../src/train.csv').is_file():
train = pd.read_csv(os.path.join(dir_csv, 'stage_1_train.csv'))
test = pd.read_csv(os.path.join(dir_csv, 'stage_1_sample_submission.csv'))
# Split train out into row per image and save a sample
train[['ID', 'Image', 'Diagnosis']] = train['ID'].str.split('_', expand=True)
train = train[['Image', 'Diagnosis', 'Label']]
train.drop_duplicates(inplace=True)
train = train.pivot(index='Image', columns='Diagnosis', values='Label').reset_index()
train['Image'] = 'ID_' + train['Image']
train.head()
# Some files didn't contain legitimate images, so we need to remove them
png = glob.glob(os.path.join(dir_train_img, '*.png'))
png = [os.path.basename(png)[:-4] for png in png]
png = np.array(png)
train = train[train['Image'].isin(png)]
train.to_csv('train.csv', index=False)
# Also prepare the test data
test[['ID','Image','Diagnosis']] = test['ID'].str.split('_', expand=True)
test['Image'] = 'ID_' + test['Image']
test = test[['Image', 'Label']]
test.drop_duplicates(inplace=True)
test.to_csv('test.csv', index=False)
# Data loaders
transform_train = Compose([CenterCrop(200, 200),
#Resize(224, 224),
HorizontalFlip(),
RandomBrightnessContrast(),
ShiftScaleRotate(),
ToTensor()
])
transform_test= Compose([CenterCrop(200, 200),
#Resize(224, 224),
ToTensor()
])
train_dataset = IntracranialDataset(
csv_file='train.csv', data_dir=dir_train_img, transform=transform_train, labels=True)
test_dataset = IntracranialDataset(
csv_file='test.csv', data_dir=dir_test_img, transform=transform_test, labels=False)
data_loader_train = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=False, num_workers=8)
data_loader_test = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=8)
device = torch.device("cuda:0")
# device = torch.device("cpu")
# model = torch.hub.load('facebookresearch/WSL-Images', 'resnext101_32x8d_wsl')
model = torch.hub.load('pytorch/vision', 'shufflenet_v2_x1_0', pretrained=True)
model.fc = torch.nn.Linear(1024, n_classes)
model.to(device)
criterion = torch.nn.BCEWithLogitsLoss()
plist = [{'params': model.parameters(), 'lr': 2e-5}]
optimizer = optim.Adam(plist, lr=2e-5)
model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
for epoch in range(n_epochs):
print('Epoch {}/{}'.format(epoch, n_epochs - 1))
print('-' * 10)
model.train()
tr_loss = 0
tk0 = tqdm(data_loader_train, desc="Iteration")
for step, batch in enumerate(tk0):
inputs = batch["image"]
labels = batch["labels"]
inputs = inputs.to(device, dtype=torch.float)
labels = labels.to(device, dtype=torch.float)
outputs = model(inputs)
loss = criterion(outputs, labels)
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
# loss.backward()
tr_loss += loss.item()
optimizer.step()
optimizer.zero_grad()
if epoch == 1 and step > 6000:
epoch_loss = tr_loss / 6000
print('Training Loss: {:.4f}'.format(epoch_loss))
break
epoch_loss = tr_loss / len(data_loader_train)
print('Training Loss: {:.4f}'.format(epoch_loss))
for param in model.parameters():
param.requires_grad = False
model.eval()
test_pred = np.zeros((len(test_dataset) * n_classes, 1))
for i, x_batch in enumerate(tqdm(data_loader_test)):
x_batch = x_batch["image"]
x_batch = x_batch.to(device, dtype=torch.float)
with torch.no_grad():
pred = model(x_batch)
test_pred[(i * batch_size * n_classes):((i + 1) * batch_size * n_classes)] = torch.sigmoid(
pred).detach().cpu().reshape((len(x_batch) * n_classes, 1))
# Submission
submission = pd.read_csv(os.path.join(dir_csv, 'stage_1_sample_submission.csv'))
submission = pd.concat([submission.drop(columns=['Label']), pd.DataFrame(test_pred)], axis=1)
submission.columns = ['ID', 'Label']
submission.to_csv(f'../../output/{Path(__file__).name}_sub.csv', index=False)
submission.head()
| [
"[email protected]"
] | |
9273d11433c99cd486d59b7efcdbdf73ababd159 | 83ed75056a4fa0a26e363ecf80fdb5390b9abe76 | /web/decisions/subscriptions/__init__.py | ec796c778fac00714e1a872800e4d04f22d29a5c | [
"BSD-3-Clause"
] | permissive | okffi/decisions | a67ef9150dfa8585b82bb95da323e5b354be4532 | e45d8c56cf244ef277ffeba6808e942564028b7f | refs/heads/master | 2021-01-21T13:34:03.416056 | 2016-05-25T09:58:51 | 2016-05-25T09:58:51 | 53,145,413 | 3 | 2 | null | 2016-05-02T11:31:34 | 2016-03-04T15:36:21 | JavaScript | UTF-8 | Python | false | false | 72 | py | default_app_config = 'decisions.subscriptions.apps.SubscriptionsConfig'
| [
"[email protected]"
] | |
cf68f6c4ab005f0fe8ee4f5b2477383a7c5b3c99 | a00ed711e3e08b50ad6e91cc07a2cddc4a1de5ea | /tests/models/test_param.py | 3529f0360cdcfbcedfe6aa3802c9aedb473ab05d | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | ishiis/airflow | 4305794e36b611d01f49e3f2401be3dc49782670 | 292440d54f4db84aaf0c5a98cf5fcf34303f2fa8 | refs/heads/master | 2022-07-30T00:51:28.806940 | 2022-07-14T12:07:11 | 2022-07-14T12:07:11 | 209,801,072 | 1 | 0 | Apache-2.0 | 2019-09-20T13:47:26 | 2019-09-20T13:47:26 | null | UTF-8 | Python | false | false | 9,932 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from contextlib import nullcontext
import pytest
from airflow.decorators import task
from airflow.exceptions import ParamValidationError
from airflow.models.param import Param, ParamsDict
from airflow.utils import timezone
from airflow.utils.types import DagRunType
from tests.test_utils.db import clear_db_dags, clear_db_runs, clear_db_xcom
class TestParam(unittest.TestCase):
def test_param_without_schema(self):
p = Param('test')
assert p.resolve() == 'test'
p.value = 10
assert p.resolve() == 10
def test_null_param(self):
p = Param()
with pytest.raises(ParamValidationError, match='No value passed and Param has no default value'):
p.resolve()
assert p.resolve(None) is None
p = Param(None)
assert p.resolve() is None
assert p.resolve(None) is None
p = Param(type="null")
p = Param(None, type='null')
assert p.resolve() is None
assert p.resolve(None) is None
with pytest.raises(ParamValidationError):
p.resolve('test')
def test_string_param(self):
p = Param('test', type='string')
assert p.resolve() == 'test'
p = Param('test')
assert p.resolve() == 'test'
p = Param('10.0.0.0', type='string', format='ipv4')
assert p.resolve() == '10.0.0.0'
p = Param(type='string')
with pytest.raises(ParamValidationError):
p.resolve(None)
with pytest.raises(ParamValidationError, match='No value passed and Param has no default value'):
p.resolve()
def test_int_param(self):
p = Param(5)
assert p.resolve() == 5
p = Param(type='integer', minimum=0, maximum=10)
assert p.resolve(value=5) == 5
with pytest.raises(ParamValidationError):
p.resolve(value=20)
def test_number_param(self):
p = Param(42, type='number')
assert p.resolve() == 42
p = Param(1.0, type='number')
assert p.resolve() == 1.0
with pytest.raises(ParamValidationError):
p = Param('42', type='number')
p.resolve()
def test_list_param(self):
p = Param([1, 2], type='array')
assert p.resolve() == [1, 2]
def test_dict_param(self):
p = Param({'a': 1, 'b': 2}, type='object')
assert p.resolve() == {'a': 1, 'b': 2}
def test_composite_param(self):
p = Param(type=["string", "number"])
assert p.resolve(value="abc") == "abc"
assert p.resolve(value=5.0) == 5.0
def test_param_with_description(self):
p = Param(10, description='Sample description')
assert p.description == 'Sample description'
def test_suppress_exception(self):
p = Param('abc', type='string', minLength=2, maxLength=4)
assert p.resolve() == 'abc'
p.value = 'long_string'
assert p.resolve(suppress_exception=True) is None
def test_explicit_schema(self):
p = Param('abc', schema={type: "string"})
assert p.resolve() == 'abc'
def test_custom_param(self):
class S3Param(Param):
def __init__(self, path: str):
schema = {"type": "string", "pattern": r"s3:\/\/(.+?)\/(.+)"}
super().__init__(default=path, schema=schema)
p = S3Param("s3://my_bucket/my_path")
assert p.resolve() == "s3://my_bucket/my_path"
with pytest.raises(ParamValidationError):
p = S3Param("file://not_valid/s3_path")
p.resolve()
def test_value_saved(self):
p = Param("hello", type="string")
assert p.resolve("world") == "world"
assert p.resolve() == "world"
def test_dump(self):
p = Param('hello', description='world', type='string', minLength=2)
dump = p.dump()
assert dump['__class'] == 'airflow.models.param.Param'
assert dump['value'] == 'hello'
assert dump['description'] == 'world'
assert dump['schema'] == {'type': 'string', 'minLength': 2}
class TestParamsDict:
def test_params_dict(self):
# Init with a simple dictionary
pd = ParamsDict(dict_obj={'key': 'value'})
assert isinstance(pd.get_param('key'), Param)
assert pd['key'] == 'value'
assert pd.suppress_exception is False
# Init with a dict which contains Param objects
pd2 = ParamsDict({'key': Param('value', type='string')}, suppress_exception=True)
assert isinstance(pd2.get_param('key'), Param)
assert pd2['key'] == 'value'
assert pd2.suppress_exception is True
# Init with another object of another ParamsDict
pd3 = ParamsDict(pd2)
assert isinstance(pd3.get_param('key'), Param)
assert pd3['key'] == 'value'
assert pd3.suppress_exception is False # as it's not a deepcopy of pd2
# Dump the ParamsDict
assert pd.dump() == {'key': 'value'}
assert pd2.dump() == {'key': 'value'}
assert pd3.dump() == {'key': 'value'}
# Validate the ParamsDict
plain_dict = pd.validate()
assert type(plain_dict) == dict
pd2.validate()
pd3.validate()
# Update the ParamsDict
with pytest.raises(ParamValidationError, match=r'Invalid input for param key: 1 is not'):
pd3['key'] = 1
# Should not raise an error as suppress_exception is True
pd2['key'] = 1
pd2.validate()
def test_update(self):
pd = ParamsDict({'key': Param('value', type='string')})
pd.update({'key': 'a'})
internal_value = pd.get_param('key')
assert isinstance(internal_value, Param)
with pytest.raises(ParamValidationError, match=r'Invalid input for param key: 1 is not'):
pd.update({'key': 1})
class TestDagParamRuntime:
VALUE = 42
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
@staticmethod
def clean_db():
clear_db_runs()
clear_db_dags()
clear_db_xcom()
def setup_class(self):
self.clean_db()
def teardown_method(self):
self.clean_db()
def test_dag_param_resolves(self, dag_maker):
"""Test dagparam resolves on operator execution"""
with dag_maker(dag_id="test_xcom_pass_to_op") as dag:
value = dag.param('value', default=self.VALUE)
@task
def return_num(num):
return num
xcom_arg = return_num(value)
dr = dag_maker.create_dagrun(
run_id=DagRunType.MANUAL.value,
start_date=timezone.utcnow(),
)
xcom_arg.operator.run(dr.execution_date, dr.execution_date)
ti = dr.get_task_instances()[0]
assert ti.xcom_pull() == self.VALUE
def test_dag_param_overwrite(self, dag_maker):
"""Test dag param is overwritten from dagrun config"""
with dag_maker(dag_id="test_xcom_pass_to_op") as dag:
value = dag.param('value', default=self.VALUE)
@task
def return_num(num):
return num
xcom_arg = return_num(value)
assert dag.params['value'] == self.VALUE
new_value = 2
dr = dag_maker.create_dagrun(
run_id=DagRunType.MANUAL.value,
start_date=timezone.utcnow(),
conf={'value': new_value},
)
xcom_arg.operator.run(dr.execution_date, dr.execution_date)
ti = dr.get_task_instances()[0]
assert ti.xcom_pull() == new_value
def test_dag_param_default(self, dag_maker):
"""Test dag param is retrieved from default config"""
with dag_maker(dag_id="test_xcom_pass_to_op", params={'value': 'test'}) as dag:
value = dag.param('value')
@task
def return_num(num):
return num
xcom_arg = return_num(value)
dr = dag_maker.create_dagrun(run_id=DagRunType.MANUAL.value, start_date=timezone.utcnow())
xcom_arg.operator.run(dr.execution_date, dr.execution_date)
ti = dr.get_task_instances()[0]
assert ti.xcom_pull() == 'test'
@pytest.mark.parametrize(
'default, should_warn',
[
pytest.param({0, 1, 2}, True, id='default-non-JSON-serializable'),
pytest.param(None, False, id='default-None'), # Param init should not warn
pytest.param({"b": 1}, False, id='default-JSON-serializable'), # Param init should not warn
],
)
def test_param_json_warning(self, default, should_warn):
warning_msg = 'The use of non-json-serializable params is deprecated'
cm = pytest.warns(DeprecationWarning, match=warning_msg) if should_warn else nullcontext()
with cm:
p = Param(default=default)
p.resolve() # when resolved with NOTSET, should not warn.
p.resolve(value={'a': 1}) # when resolved with JSON-serializable, should not warn.
with pytest.warns(DeprecationWarning, match=warning_msg):
p.resolve(value={1, 2, 3}) # when resolved with not JSON-serializable, should warn.
| [
"[email protected]"
] | |
a10d864424683827df934951ff4cb07416e8d969 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_12_01/aio/operations/_private_link_resources_operations.py | a7c4a66aa9351e0ab6a575929711ac78f42085cb | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 5,000 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateLinkResourcesOperations:
"""PrivateLinkResourcesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2020_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def list(
self,
resource_group_name: str,
resource_name: str,
**kwargs
) -> "_models.PrivateLinkResourcesListResult":
"""Gets a list of private link resources in the specified managed cluster.
Gets a list of private link resources in the specified managed cluster. The operation returns
properties of each private link resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResourcesListResult, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_12_01.models.PrivateLinkResourcesListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkResourcesListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkResourcesListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateLinkResources'} # type: ignore
| [
"[email protected]"
] | |
ab1409aaf95d2bf69bc496ba2c8a4938816631bd | 3b7b6648b72910046b6a227db30f71aeee2cba9c | /2020-12-18-neural-style-transfer/deeptools/preprocessing/RandomSingleCropPreprocessor.py | 4ddf0ecef9eedb517ec472e48447e933c6d54b45 | [] | no_license | ken2190/deep-learning-study | f2abeb1cd302e405a15bbb52188ae44ffb414e2f | f2998be89d0c931176f158ae5f48ca562786e171 | refs/heads/main | 2023-04-02T05:07:08.504212 | 2021-04-11T15:11:22 | 2021-04-11T15:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | from sklearn.feature_extraction.image import extract_patches_2d
# this processor randomly crop an image of fixed size.
class RandomSingleCropPreprocessor:
def __init__(self, width, height):
self.width = width
self.height = height
def preprocess(self, image):
return extract_patches_2d(image, (self.height, self.width), max_patches=1)[0]
# from PIL import Image
# import numpy as np
# pp = RandomSingleCropPreprocessor(200, 200)
# im = np.array(Image.open('pyimagesearch/preprocessing/test.png'))
# Image.fromarray(pp.preprocess(im)).show()
| [
"[email protected]"
] | |
f4056f860df1771e62dd5010d3a51ea2059537d3 | 6dc761a30cf5efa045f1154aaff2acfa139b835a | /LeetCode/Python/majorityElement.py | 2c3a07d29edec31ce28f3cebf1b76d1b29269efe | [] | no_license | snail15/AlgorithmPractice | 4e58beee3ff76498a389268dd4cc207dcabf778e | 9e8885953ad50e966454c45c460e81dbb6e48be0 | refs/heads/master | 2021-08-17T06:30:02.290260 | 2021-06-08T01:15:07 | 2021-06-08T01:15:07 | 98,246,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | # Given an array of size n, find the majority element. The majority element is the element that appears more than ⌊ n/2 ⌋ times.
# You may assume that the array is non-empty and the majority element always exist in the array.
# Example 1:
# Input: [3,2,3]
# Output: 3
# Example 2:
# Input: [2,2,1,1,1,2,2]
# Output: 2
def majorityElement(self, nums: List[int]) -> int:
counter = {}
target = len(nums) // 2 + 1
for num in nums:
if not counter.get(num):
counter[num] = 1
else:
counter[num] += 1
if counter[num] >= target:
return num | [
"[email protected]"
] | |
883b131aab7cc6403a4eb04a14315ce599a3fb52 | 60d6b8501d0be546437b26a6ee1f9fab97ec3897 | /platypush/message/event/zigbee/mqtt.py | e3179407f6aa9291e1c47fb4fbf836c0c6dbf740 | [
"MIT"
] | permissive | BlackLight/platypush | 68284a85b2f9eef303d26b04530f075927b5834a | 446bc2f67493d3554c5422242ff91d5b5c76d78a | refs/heads/master | 2023-08-31T21:01:53.519960 | 2023-08-29T22:05:38 | 2023-08-29T22:05:38 | 109,421,017 | 265 | 25 | MIT | 2023-09-01T23:15:49 | 2017-11-03T16:56:24 | Python | UTF-8 | Python | false | false | 5,542 | py | from typing import Dict, Any
from platypush.message.event import Event
class ZigbeeMqttEvent(Event):
pass
class ZigbeeMqttOnlineEvent(ZigbeeMqttEvent):
"""
Triggered when a zigbee2mqtt service goes online.
"""
def __init__(self, host: str, port: int, *args, **kwargs):
super().__init__(*args, host=host, port=port, **kwargs)
class ZigbeeMqttOfflineEvent(ZigbeeMqttEvent):
"""
Triggered when a zigbee2mqtt service goes offline.
"""
def __init__(self, host: str, port: int, *args, **kwargs):
super().__init__(*args, host=host, port=port, **kwargs)
class ZigbeeMqttDevicePropertySetEvent(ZigbeeMqttEvent):
"""
Triggered when a the properties of a Zigbee connected devices (state, brightness, alert etc.) change.
"""
def __init__(self, host: str, port: int, device: str, properties: Dict[str, Any], *args, **kwargs):
super().__init__(*args, host=host, port=port, device=device, properties=properties, **kwargs)
class ZigbeeMqttDevicePairingEvent(ZigbeeMqttEvent):
"""
Triggered when a device is pairing to the network.
"""
def __init__(self, host: str, port: int, device=None, *args, **kwargs):
super().__init__(*args, host=host, port=port, device=device, **kwargs)
class ZigbeeMqttDeviceConnectedEvent(ZigbeeMqttEvent):
"""
Triggered when a device connects to the network.
"""
def __init__(self, host: str, port: int, device=None, *args, **kwargs):
super().__init__(*args, host=host, port=port, device=device, **kwargs)
class ZigbeeMqttDeviceBannedEvent(ZigbeeMqttEvent):
"""
Triggered when a device is banned from the network.
"""
def __init__(self, host: str, port: int, device=None, *args, **kwargs):
super().__init__(*args, host=host, port=port, device=device, **kwargs)
class ZigbeeMqttDeviceRemovedEvent(ZigbeeMqttEvent):
"""
Triggered when a device is removed from the network.
"""
def __init__(self, host: str, port: int, device=None, force=False, *args, **kwargs):
super().__init__(*args, host=host, port=port, device=device, force=force, **kwargs)
class ZigbeeMqttDeviceRemovedFailedEvent(ZigbeeMqttEvent):
"""
Triggered when the removal of a device from the network failed.
"""
def __init__(self, host: str, port: int, device=None, *args, **kwargs):
super().__init__(*args, host=host, port=port, device=device, **kwargs)
class ZigbeeMqttDeviceWhitelistedEvent(ZigbeeMqttEvent):
"""
Triggered when a device is whitelisted on the network.
"""
def __init__(self, host: str, port: int, device=None, *args, **kwargs):
super().__init__(*args, host=host, port=port, device=device, **kwargs)
class ZigbeeMqttDeviceRenamedEvent(ZigbeeMqttEvent):
"""
Triggered when a device is renamed on the network.
"""
def __init__(self, host: str, port: int, device=None, *args, **kwargs):
super().__init__(*args, host=host, port=port, device=device, **kwargs)
class ZigbeeMqttDeviceBindEvent(ZigbeeMqttEvent):
"""
Triggered when a device bind occurs on the network.
"""
def __init__(self, host: str, port: int, device=None, *args, **kwargs):
super().__init__(*args, host=host, port=port, device=device, **kwargs)
class ZigbeeMqttDeviceUnbindEvent(ZigbeeMqttEvent):
"""
Triggered when a device bind occurs on the network.
"""
def __init__(self, host: str, port: int, device=None, *args, **kwargs):
super().__init__(*args, host=host, port=port, device=device, **kwargs)
class ZigbeeMqttGroupAddedEvent(ZigbeeMqttEvent):
"""
Triggered when a group is added.
"""
def __init__(self, host: str, port: int, group=None, *args, **kwargs):
super().__init__(*args, host=host, port=port, group=group, **kwargs)
class ZigbeeMqttGroupAddedFailedEvent(ZigbeeMqttEvent):
"""
Triggered when a request to add a group fails.
"""
def __init__(self, host: str, port: int, group=None, *args, **kwargs):
super().__init__(*args, host=host, port=port, group=group, **kwargs)
class ZigbeeMqttGroupRemovedEvent(ZigbeeMqttEvent):
"""
Triggered when a group is removed.
"""
def __init__(self, host: str, port: int, group=None, *args, **kwargs):
super().__init__(*args, host=host, port=port, group=group, **kwargs)
class ZigbeeMqttGroupRemovedFailedEvent(ZigbeeMqttEvent):
"""
Triggered when a request to remove a group fails.
"""
def __init__(self, host: str, port: int, group=None, *args, **kwargs):
super().__init__(*args, host=host, port=port, group=group, **kwargs)
class ZigbeeMqttGroupRemoveAllEvent(ZigbeeMqttEvent):
"""
Triggered when all the devices are removed from a group.
"""
def __init__(self, host: str, port: int, group=None, *args, **kwargs):
super().__init__(*args, host=host, port=port, group=group, **kwargs)
class ZigbeeMqttGroupRemoveAllFailedEvent(ZigbeeMqttEvent):
"""
Triggered when a request to remove all the devices from a group fails.
"""
def __init__(self, host: str, port: int, group=None, *args, **kwargs):
super().__init__(*args, host=host, port=port, group=group, **kwargs)
class ZigbeeMqttErrorEvent(ZigbeeMqttEvent):
"""
Triggered when an error happens on the zigbee2mqtt service.
"""
def __init__(self, host: str, port: int, error=None, *args, **kwargs):
super().__init__(*args, host=host, port=port, error=error, **kwargs)
# vim:sw=4:ts=4:et:
| [
"[email protected]"
] | |
642dace9a98ba086429328e4a3bb682bf656ef68 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03139/s354736213.py | c172482303f5c5982f40aa05ddff89c507c32649 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | a,b,c = [int(i) for i in input().split()]
print(min(b,c),b+c-a if b + c >= a else 0) | [
"[email protected]"
] | |
a472c103c0b1f3c1f8c566e750f7ba8e53639190 | 65cc6a8877896ef69dd03d7b5eee5bed56e5371f | /example/attpc-daq/web/attpcdaq/daq/templatetags/daq_model_tags.py | 600bbc1d51d3e665f9f57b9b0ce19ce3797deda5 | [] | no_license | wuhongyi/DjangoNote | 34bdb9e82fc379e19b1df0bd7c90e504fa70a40d | 81ad949ff895feda8131d8bdf5fa1439f962ae37 | refs/heads/master | 2020-05-02T17:54:12.270297 | 2019-05-22T14:37:32 | 2019-05-22T14:37:32 | 178,112,720 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 762 | py | from django import template
from ..models import DataSource
register = template.Library()
def get_datasource_attr_from_choices(attr_name, choices):
value = getattr(DataSource, attr_name, None)
# Verify that the result is a valid member of the set of choices.
# This also ensures that we're not just returning any random attribute
# of the model, but just one member of a set of constants.
if value not in (key for key, name in choices):
return None
else:
return value
@register.simple_tag
def datasource_state(name):
return get_datasource_attr_from_choices(name, DataSource.STATE_CHOICES)
@register.simple_tag
def daq_state(name):
return get_datasource_attr_from_choices(name, DataSource.DAQ_STATE_CHOICES) | [
"[email protected]"
] | |
d546c510a58b01a177c9d64ec2c323aa473720ae | c5d68f58c9523257a8b41954553f5cff2cd5f487 | /Secao_13_Lista_Ex_29e/ex_27.py | 5ab77700769e25293337bd239ed838f3bd7ed0dc | [] | no_license | SouzaCadu/guppe | 04bfcde82d4404eb9ec795006c6931ba07dc72b6 | 1f8a672230c5c27712f522e1e34516591c012453 | refs/heads/master | 2023-03-13T01:32:51.019871 | 2021-02-25T17:02:59 | 2021-02-25T17:02:59 | 320,908,119 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,378 | py | """
27) Faça um programa para gerenciar as notas dos alunos de uma turma salva em um arquivo. O programa deverá ter um menu
contendo as seguinte opções:
(a) Definir informações da turma;
(b) Inserir aluno e notas;
(c) Exibir alunos e médias;
(d) Exibir alunos aprovados;
(e) Exibir alunos reprovados;
(f) Salvar dados em Disco;
(g) Sair do programa (fim)
Faça a rotina que gerencia o menu dentro do main, e para cada uma das opções deste menu, crie uma função específica
# OBS: Não será necessário criar a função/opção para salvar os dados em disco, pois será salvo automaticamente.
"""
from valida_cadastro import valida_nome
def informacoes_turma(arquivo):
"""Função que recebe o caminho/nome do arquivo e imprimi na tela
as informações da turma. Caso o arquivo não exista o mesmo será criado"""
try:
with open(arquivo, "a") as _:
pass
with open(arquivo, "r", encoding="utf-8") as leitura:
print(f"\n\n{'-' * 48}INFORMAÇÕES DA TURMA{'-' * 49}")
texto = leitura.read().strip().splitlines()
if len(texto) > 0:
[print(f"{informacao.replace(';', ' - ')}\n{'-' * 117}") for informacao in texto]
else:
print(f"\n{'-' * 117}")
except ValueError:
print("-" * 117)
print(f"\n\n{'-' * 56}ERRO{'-' * 57}")
print("ERRO AO LER O ARQUIVO!")
print("-" * 117)
except FileNotFoundError:
print(f"\n\n{'-' * 56}ERRO{'-' * 57}")
print("O PROGRAMA NÃO POSSUI PERMISSÃO PARA CRIAR UM DIRETÓRIO/PASTA!")
print("-" * 117)
except OSError:
print(f"\n\n{'-' * 56}ERRO{'-' * 57}")
print("O SO NÃO ACEITA CARACTERES ESPECIAIS EM NOMES DE ARQUIVOS!")
print("-" * 117)
def inserir_notas_alunos(arquivo):
"""Função que recebe o caminho/arquivo e insere no mesmo os dados
dos alunos e as notas informadas pelo usuário. Caso o arquivo não exista o mesmo
será criado"""
try:
with open(arquivo, "a", encoding="utf-8") as insercao:
print(f"\n\n{'-' * 54}INSERÇÃO{'-' * 55}")
cod = abs(int(input("Insira o identificador(código) do aluno: ")))
print("-" * 117)
codigo_existe = False
with open(arquivo, "r", encoding="utf-8") as leitura:
texto = leitura.read().strip().splitlines()
texto = [informacao.split(";") for informacao in texto]
for linha in texto:
if cod == int(linha[0]):
print(cod, int(linha[0]))
codigo_existe = True
if not codigo_existe:
nome = str(input(f"Insira o nome do aluno {cod}: ")).strip().title()
print("-" * 117)
if valida_nome(nome):
nota1 = float(input(f"Insira a primeira nota do aluno {nome}: "))
print("-" * 117)
nota2 = float(input(f"Insira a segunda nota do aluno {nome}: "))
print("-" * 117)
nota3 = float(input(f"Insira a terceira nota do aluno {nome}: "))
print("-" * 117)
insercao.write(f"{cod};{nome};{nota1} - {nota2} - {nota3}\n")
else:
print(f"\n\n{'-' * 56}ERRO{'-' * 57}")
print("NOME INVÁLIDO!")
print("-" * 117)
else:
print(f"\n\n{'-' * 56}ERRO{'-' * 57}")
print(f"IDENTIFICADOR(CÓDIGO) {cod} JÁ EXISTENTE!")
print("-" * 117)
except ValueError:
print("-" * 117)
print(f"\n\n{'-' * 56}ERRO{'-' * 57}")
print("ERRO AO RECEBER OS DADOS DO USUÁRIO OU AO LER O ARQUIVO!")
print("-" * 117)
except FileNotFoundError:
print(f"\n\n{'-' * 56}ERRO{'-' * 57}")
print("O PROGRAMA NÃO POSSUI PERMISSÃO PARA CRIAR UM DIRETÓRIO/PASTA!")
print("-" * 117)
except OSError:
print(f"\n\n{'-' * 56}ERRO{'-' * 57}")
print("O SO NÃO ACEITA CARACTERES ESPECIAIS EM NOMES DE ARQUIVOS!")
print("-" * 117)
except IndexError:
print(f"\n\n{'-' * 56}ERRO{'-' * 57}")
print("O MODO QUE AS INFORMAÇÕES SE ENCONTRAM NO TEXTO É INVÁLIDO!")
print("-" * 117)
def media_aluno(linha):
"""Função que recebe a linha referente ao arquivo
que armazena as informações dos alunos e retorna sua média"""
notas = [float(nota) for nota in linha[-1].split(" - ")]
media = float("{:.1f}".format(float(sum(notas) / len(notas))))
return media
def alunos_medias(arquivo):
"""Função que recebe o caminho/arquivo e imprimi na tela
o nome e a média de cada aluno. Caso o arquivo não exista o mesmo
será criado"""
try:
with open(arquivo, "a", encoding="utf-8") as _:
pass
with open(arquivo, "r", encoding="utf-8") as leitura:
texto = leitura.read().strip().splitlines()
texto = [informacoes.split(";") for informacoes in texto]
print(f"\n\n{'-' * 51}ALUNOS E MÉDIAS{'-' * 51}")
[print(f"{linha[1]} - {media_aluno(linha)}\n{'-' * 117}") for linha in texto]
except ValueError:
print(f"\n\n{'-' * 56}ERRO{'-' * 57}")
print("ERRO AO LER O ARQUIVO!")
print("-" * 117)
except FileNotFoundError:
print(f"\n\n{'-' * 56}ERRO{'-' * 57}")
print("O PROGRAMA NÃO POSSUI PERMISSÃO PARA CRIAR UM DIRETÓRIO/PASTA!")
print("-" * 117)
except OSError:
print(f"\n\n{'-' * 56}ERRO{'-' * 57}")
print("O SO NÃO ACEITA CARACTERES ESPECIAIS EM NOMES DE ARQUIVOS!")
print("-" * 117)
except IndexError:
print(f"\n{'-' * 117}")
print(f"\n\n{'-' * 56}ERRO{'-' * 57}")
print("O MODO QUE AS INFORMAÇÕES SE ENCONTRAM NO TEXTO É INVÁLIDO!")
print("-" * 117)
def alunos_aprovados(arquivo):
"""Função que recebe o caminho/arquivo e imprimi na tela
o nome dos alunos que estão aprovados. Caso o arquivo não
exista o mesmo será criado"""
try:
with open(arquivo, "a", encoding="utf-8") as _:
pass
with open(arquivo, "r", encoding="utf-8") as leitura:
texto = leitura.read().strip().splitlines()
texto = [informacoes.split(";") for informacoes in texto]
print(f"\n\n{'-' * 54}APROVADOS{'-' * 54}")
[print(f"{linha[1]}\n{'-' * 117}") for linha in texto if media_aluno(linha) >= 6.0]
except ValueError:
print(f"\n\n{'-' * 56}ERRO{'-' * 57}")
print("ERRO AO LER O ARQUIVO!")
print("-" * 117)
except FileNotFoundError:
print(f"\n\n{'-' * 56}ERRO{'-' * 57}")
print("O PROGRAMA NÃO POSSUI PERMISSÃO PARA CRIAR UM DIRETÓRIO/PASTA!")
print("-" * 117)
except OSError:
print(f"\n\n{'-' * 56}ERRO{'-' * 57}")
print("O SO NÃO ACEITA CARACTERES ESPECIAIS EM NOMES DE ARQUIVOS!")
print("-" * 117)
except IndexError:
print(f"\n{'-' * 117}")
print(f"\n\n{'-' * 56}ERRO{'-' * 57}")
print("O MODO QUE AS INFORMAÇÕES SE ENCONTRAM NO TEXTO É INVÁLIDO!")
print("-" * 117)
def alunos_reprovados(arquivo):
"""Função que recebe o caminho/arquivo e imprimi na tela
o nome dos alunos que estão reprovados. Caso o arquivo
não exista o mesmo será criado"""
try:
with open(arquivo, "a", encoding="utf-8") as _:
pass
with open(arquivo, "r", encoding="utf-8") as leitura:
texto = leitura.read().strip().splitlines()
texto = [informacoes.split(";") for informacoes in texto]
print(f"\n\n{'-' * 54}REPROVADOS{'-' * 54}")
[print(f"{linha[1]}\n{'-' * 117}") for linha in texto if media_aluno(linha) < 6.0]
except ValueError:
print(f"\n\n{'-' * 56}ERRO{'-' * 57}")
print("ERRO AO LER O ARQUIVO!")
print("-" * 117)
except FileNotFoundError:
print(f"\n\n{'-' * 56}ERRO{'-' * 57}")
print("O PROGRAMA NÃO POSSUI PERMISSÃO PARA CRIAR UM DIRETÓRIO/PASTA!")
print("-" * 117)
except OSError:
print(f"\n\n{'-' * 56}ERRO{'-' * 57}")
print("O SO NÃO ACEITA CARACTERES ESPECIAIS EM NOMES DE ARQUIVOS!")
print("-" * 117)
except IndexError:
print(f"\n{'-' * 117}")
print(f"\n\n{'-' * 56}ERRO{'-' * 57}")
print("O MODO QUE AS INFORMAÇÕES SE ENCONTRAM NO TEXTO É INVÁLIDO!")
print("-" * 117)
if __name__ == "__main__":
nome_arquivo = "relacao_notas.txt"
try:
while True:
# Como as funções já irão salvar os dados em disco automaticamente,
# então não criarei a opção para salvar dados em disco
print(f"\n\n{'-' * 56}MENU{'-' * 57}")
print("1 - Definir informações da turma")
print(f"{'-' * 117}")
print("2 - Inserir aluno e notas")
print(f"{'-' * 117}")
print("3 - Exibir alunos e médias")
print(f"{'-' * 117}")
print("4 - Exibir alunos aprovados")
print(f"{'-' * 117}")
print("5 - Exibir alunos reprovados")
print(f"{'-' * 117}")
print("6 - Sair do programa (fim)")
print(f"{'-' * 117}")
opcao = abs(int(input("\nInsira o número da opção que você deseja: ")))
print(f"{'-' * 117}")
if opcao == 1:
informacoes_turma(nome_arquivo)
elif opcao == 2:
inserir_notas_alunos(nome_arquivo)
elif opcao == 3:
alunos_medias(nome_arquivo)
elif opcao == 4:
alunos_aprovados(nome_arquivo)
elif opcao == 5:
alunos_reprovados(nome_arquivo)
elif opcao == 6:
print(f"\n\n{'-' * 51}FIM DO PROGRAMA{'-' * 51}")
break
else:
print(f"\n\n{'-' * 56}ERRO{'-' * 57}")
print("OPÇÃO INVÁLIDA!")
print("-" * 117)
except ValueError:
print("-" * 117)
print(f"\n\n{'-' * 56}ERRO{'-' * 57}")
print("OPÇÃO DEVE SER UM NÚMERO INTEIRO!")
print("-" * 117)
| [
"[email protected]"
] | |
cc16d1697225baee47a86dda51adb9016bdd330c | 3f394cd47a1aaf0ae2f8de5ab9854f52341e017a | /tests/conftest.py | 0ec2f5ef473a93e1446046c292552c5de1df0cff | [
"MIT"
] | permissive | devildeveloper/Clay | e3771d97d23ae3ba7d866d8921102d50e95a6562 | ca419ee4cfe191724ed68e3507515a5b258bb4bb | refs/heads/master | 2021-01-18T02:27:22.094481 | 2013-11-18T20:24:02 | 2013-11-18T20:24:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | # -*- coding: utf-8 -*-
"""
Directory-specific fixtures, hooks, etc. for py.test
"""
from clay import Clay
import pytest
from .helpers import TESTS
@pytest.fixture()
def c():
return Clay(TESTS)
@pytest.fixture()
def t(c):
return c.get_test_client()
| [
"[email protected]"
] | |
42dc6d18884578c84f4ca5272b7590683a423d4d | 532549735aab20e7948511b63e0fb77cc5aedacf | /chaussette/backend/_fastgevent.py | c43809bd8d374be7c03b29174b2ce058a6b65653 | [
"Apache-2.0"
] | permissive | ericem/chaussette | f71ac35990b2b7aa41610ec4be867321ce3be89f | fe62725ca1d018bb26c024f796447b6c761f00e0 | refs/heads/master | 2021-01-18T10:52:43.720192 | 2013-05-02T13:38:23 | 2013-05-02T13:38:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | import socket
from gevent.wsgi import WSGIServer
from gevent import monkey
from chaussette.util import create_socket
class Server(WSGIServer):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
def __init__(self, listener, application=None, backlog=None,
spawn='default', log='default', handler_class=None,
environ=None, **ssl_args):
monkey.noisy = False
monkey.patch_all()
host, port = listener
self.socket = create_socket(host, port, self.address_family,
self.socket_type, backlog=backlog)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server_address = self.socket.getsockname()
log = None
super(Server, self).__init__(self.socket, application, None, spawn,
log, handler_class, environ, **ssl_args)
| [
"[email protected]"
] | |
f7d0ebc5b5c74035f2e5e648525b0bdabb67d31e | ee53b0262007b2f0db0fe15b2ad85f65fafa4e25 | /Leetcode/441. Arranging Coins.py | dfa616241b4d3e2f18fe71fc819dff41930a76d6 | [] | no_license | xiaohuanlin/Algorithms | bd48caacb08295fc5756acdac609be78e143a760 | 157cbaeeff74130e5105e58a6b4cdf66403a8a6f | refs/heads/master | 2023-08-09T05:18:06.221485 | 2023-08-08T11:53:15 | 2023-08-08T11:53:15 | 131,491,056 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,616 | py | '''
You have a total of n coins that you want to form in a staircase shape, where every k-th row must have exactly k coins.
Given n, find the total number of full staircase rows that can be formed.
n is a non-negative integer and fits within the range of a 32-bit signed integer.
Example 1:
n = 5
The coins can form the following rows:
¤
¤ ¤
¤ ¤
Because the 3rd row is incomplete, we return 2.
Example 2:
n = 8
The coins can form the following rows:
¤
¤ ¤
¤ ¤ ¤
¤ ¤
Because the 4th row is incomplete, we return 3.
'''
import unittest
class Solution(object):
def arrangeCoins(self, n):
"""
:type n: int
:rtype: int
"""
# stair = 0
# while n > 0:
# stair += 1
# n -= stair
# if n < 0:
# stair -= 1
# break
# return stair
# ------------
# i = 0
# while True:
# sum_ = i*(i+1)/2
# if sum_ > n:
# return i - 1
# i += 1
# -------------
# use the root of the function to get answer
return int(((8*n+1)**0.5 - 1)/2)
class TestSolution(unittest.TestCase):
def test_arrangeCoins(self):
examples = (
(0, 0),
(1, 1),
(5, 2),
(8, 3),
)
for first, second in examples:
self.assert_function(first, second)
def assert_function(self, first, second):
self.assertEqual(Solution().arrangeCoins(first), second, msg="first: {}; second: {}".format(first, second))
unittest.main() | [
"[email protected]"
] | |
40e14b319898e3aa7c0b8261d42eea8d55f52f5d | 1c2cd5951f82a5fb12142621a3b9baea14cf4a31 | /ABC012/ABC012B.py | 93560073a8f77c4f747fddf4fd00cc740a476428 | [] | no_license | ksera332/Atcoder_records | 55ef832eb3e517b99334eb00d2287cd4a1bc83cd | f8b1f13211bae79b7da6d63ba9b1bd177031aef9 | refs/heads/master | 2022-12-08T19:25:26.834948 | 2020-08-29T12:55:30 | 2020-08-29T12:55:30 | 263,906,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109 | py |
# coding: utf-8
# In[37]:
import time
N = int(input())
print(time.strftime('%H:%M:%S', time.gmtime(N)))
| [
"[email protected]"
] | |
cfc155b48e7139b1bf1bea71e66f59e91f6f6b50 | d7c527d5d59719eed5f8b7e75b3dc069418f4f17 | /main/_pythonSnippet1_backup/61/views.py | 3e9bacefeb4c0afffa4042075dad295c84f00a02 | [] | no_license | Aivree/SnippetMatcher | 3e348cea9a61e4342e5ad59a48552002a03bf59a | c8954dfcad8d1f63e6e5e1550bc78df16bc419d1 | refs/heads/master | 2021-01-21T01:20:59.144157 | 2015-01-07T04:35:29 | 2015-01-07T04:35:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,894 | py | from django.shortcuts import render_to_response
from django.template import Template, Context, RequestContext
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.template.loader import get_template
import datetime
from django import forms
from runner.forms import DocumentForm
from runner.models import Document
def list(request):
# Handle file upload
if request.method == 'POST':
form = DocumentForm(request.POST, request.FILES)
if form.is_valid():
newdoc = Document(docfile = request.FILES['docfile'])
newdoc.save()
# Redirect to the document list after POST
return HttpResponseRedirect(reverse('runner.views.list'))
else:
form = DocumentForm() # A empty, unbound form
# Load documents for the list page
documents = Document.objects.all()
# Render list page with the documents and the form
return render_to_response(
'list.html',
{'documents': documents, 'form': form},
context_instance=RequestContext(request)
)
def index(request):
from runner.models import Software
software_list = []
for i in Software.objects.all():
i = str(i).split("|")
software_list.append(i)
t = get_template("bootstrap3.html")
html = t.render(Context({
'bootstrap3_title': 'Run programs',
'software_list': software_list,
}))
return HttpResponse(html)
def software(request, name):
t = get_template("bootstrap3.html")
html = t.render(RequestContext(request, {
'bootstrap3_title': 'Running ' + name,
}))
return HttpResponse(html)
def current_datetime(request):
now = datetime.datetime.now()
t = get_template("bootstrap3.html")
html = t.render(Context({'current_date': now}))
return HttpResponse(html)
| [
"[email protected]"
] | |
078e7534de86ed7c579a2ba0c616d3db8756b6be | d32a1eff193052dd62ad05f638346c7132796c2e | /python/pyspark/pandas/tests/connect/test_parity_groupby_slow.py | 375dc703d956f229358f88f2ca4bde9e8f96075a | [
"CC0-1.0",
"MIT",
"Python-2.0",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-free-unknown",
"EPL-1.0",
"Classpath-exception-2.0",
"GCC-exception-3.1",
"CDDL-1.0",
"CC-BY-SA-3.0",
"LGPL-2.1-only",
"LicenseRef-scancode-unicode",
"CPL-1.0",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-only",
"CC-PDDC",
"NAIST-2003",
"LicenseRef-scancode-other-copyleft",
"EPL-2.0",
"CDDL-1.1",
"BSD-2-Clause"
] | permissive | Kyligence/spark | c266dc19c7c2e2914eea34c9922f97ba17011075 | f29502acf2fe96e23525268b0a29a6338b41bce6 | refs/heads/master | 2023-08-31T08:42:15.254881 | 2023-04-22T00:30:53 | 2023-04-22T00:30:53 | 100,349,194 | 6 | 61 | Apache-2.0 | 2023-09-14T06:29:07 | 2017-08-15T07:04:07 | Scala | UTF-8 | Python | false | false | 2,010 | py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from pyspark.pandas.tests.test_groupby_slow import GroupBySlowTestsMixin
from pyspark.testing.connectutils import ReusedConnectTestCase
from pyspark.testing.pandasutils import PandasOnSparkTestUtils, TestUtils
class GroupBySlowParityTests(
GroupBySlowTestsMixin, PandasOnSparkTestUtils, TestUtils, ReusedConnectTestCase
):
@unittest.skip("Fails in Spark Connect, should enable.")
def test_diff(self):
super().test_diff()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_dropna(self):
super().test_dropna()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_rank(self):
super().test_rank()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_split_apply_combine_on_series(self):
super().test_split_apply_combine_on_series()
if __name__ == "__main__":
from pyspark.pandas.tests.connect.test_parity_groupby_slow import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| [
"[email protected]"
] | |
e9222d3599e353156217730a4903521d6e392997 | 90c6262664d013d47e9a3a9194aa7a366d1cabc4 | /tests/storage/cases/test_KT1QHRKLkwaHDV6TyY9H4ZU9ZwGuwZ1TWPfg_babylon.py | 69dc0227141ef450501ea7063314cad598bd84b6 | [
"MIT"
] | permissive | tqtezos/pytezos | 3942fdab7aa7851e9ea81350fa360180229ec082 | a4ac0b022d35d4c9f3062609d8ce09d584b5faa8 | refs/heads/master | 2021-07-10T12:24:24.069256 | 2020-04-04T12:46:24 | 2020-04-04T12:46:24 | 227,664,211 | 1 | 0 | MIT | 2020-12-30T16:44:56 | 2019-12-12T17:47:53 | Python | UTF-8 | Python | false | false | 1,170 | py | from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1QHRKLkwaHDV6TyY9H4ZU9ZwGuwZ1TWPfg_babylon(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/mainnet/KT1QHRKLkwaHDV6TyY9H4ZU9ZwGuwZ1TWPfg_babylon.json')
def test_storage_encoding_KT1QHRKLkwaHDV6TyY9H4ZU9ZwGuwZ1TWPfg_babylon(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1QHRKLkwaHDV6TyY9H4ZU9ZwGuwZ1TWPfg_babylon(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1QHRKLkwaHDV6TyY9H4ZU9ZwGuwZ1TWPfg_babylon(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
| [
"[email protected]"
] | |
67d0a2a954213d42ddd71266366f19adab9b7138 | 71678f708e7bb80577b560ab660af2d965f7fa88 | /test.py | 23bb3cfd232d7d25db37f4d52705132a55b38aeb | [] | no_license | natepill/CS-1.2-Tweet-Generator | 7c09b396f37b56c5be45edfa603821389848853f | 64736b69a3701c34ba5f36153af1fa4ad0fef84c | refs/heads/master | 2020-04-02T18:25:45.848092 | 2019-03-13T17:05:47 | 2019-03-13T17:05:47 | 154,699,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52 | py | from string import ascii_lowercase as al
print(al)
| [
"[email protected]"
] | |
fb3c1d8faf3f4c7f4a59af63fb46a030978ecd4e | f167dffa2f767a0419aa82bf434852069a8baeb8 | /lib/youtube_dl/extractor/einthusan.py | 4e0f8bc819c70730a476ca31cd4320cecdc25b3d | [
"MIT"
] | permissive | firsttris/plugin.video.sendtokodi | d634490b55149adfdcb62c1af1eb77568b8da3f5 | 1095c58e2bc21de4ab6fcb67a70e4f0f04febbc3 | refs/heads/master | 2023-08-18T10:10:39.544848 | 2023-08-15T17:06:44 | 2023-08-15T17:06:44 | 84,665,460 | 111 | 31 | MIT | 2022-11-11T08:05:21 | 2017-03-11T16:53:06 | Python | UTF-8 | Python | false | false | 3,720 | py | # coding: utf-8
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..compat import (
compat_b64decode,
compat_str,
compat_urlparse,
)
from ..utils import (
extract_attributes,
ExtractorError,
get_elements_by_class,
urlencode_postdata,
)
class EinthusanIE(InfoExtractor):
_VALID_URL = r'https?://(?P<host>einthusan\.(?:tv|com|ca))/movie/watch/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://einthusan.tv/movie/watch/9097/',
'md5': 'ff0f7f2065031b8a2cf13a933731c035',
'info_dict': {
'id': '9097',
'ext': 'mp4',
'title': 'Ae Dil Hai Mushkil',
'description': 'md5:33ef934c82a671a94652a9b4e54d931b',
'thumbnail': r're:^https?://.*\.jpg$',
}
}, {
'url': 'https://einthusan.tv/movie/watch/51MZ/?lang=hindi',
'only_matching': True,
}, {
'url': 'https://einthusan.com/movie/watch/9097/',
'only_matching': True,
}, {
'url': 'https://einthusan.ca/movie/watch/4E9n/?lang=hindi',
'only_matching': True,
}]
# reversed from jsoncrypto.prototype.decrypt() in einthusan-PGMovieWatcher.js
def _decrypt(self, encrypted_data, video_id):
return self._parse_json(compat_b64decode((
encrypted_data[:10] + encrypted_data[-1] + encrypted_data[12:-1]
)).decode('utf-8'), video_id)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
host = mobj.group('host')
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(r'<h3>([^<]+)</h3>', webpage, 'title')
player_params = extract_attributes(self._search_regex(
r'(<section[^>]+id="UIVideoPlayer"[^>]+>)', webpage, 'player parameters'))
page_id = self._html_search_regex(
'<html[^>]+data-pageid="([^"]+)"', webpage, 'page ID')
video_data = self._download_json(
'https://%s/ajax/movie/watch/%s/' % (host, video_id), video_id,
data=urlencode_postdata({
'xEvent': 'UIVideoPlayer.PingOutcome',
'xJson': json.dumps({
'EJOutcomes': player_params['data-ejpingables'],
'NativeHLS': False
}),
'arcVersion': 3,
'appVersion': 59,
'gorilla.csrf.Token': page_id,
}))['Data']
if isinstance(video_data, compat_str) and video_data.startswith('/ratelimited/'):
raise ExtractorError(
'Download rate reached. Please try again later.', expected=True)
ej_links = self._decrypt(video_data['EJLinks'], video_id)
formats = []
m3u8_url = ej_links.get('HLSLink')
if m3u8_url:
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, ext='mp4', entry_protocol='m3u8_native'))
mp4_url = ej_links.get('MP4Link')
if mp4_url:
formats.append({
'url': mp4_url,
})
self._sort_formats(formats)
description = get_elements_by_class('synopsis', webpage)[0]
thumbnail = self._html_search_regex(
r'''<img[^>]+src=(["'])(?P<url>(?!\1).+?/moviecovers/(?!\1).+?)\1''',
webpage, 'thumbnail url', fatal=False, group='url')
if thumbnail is not None:
thumbnail = compat_urlparse.urljoin(url, thumbnail)
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
'description': description,
}
| [
"[email protected]"
] | |
2d441b942de17b1981ea070088659addc116d4ac | 4f3a4c194451eae32f1ff7cf3b0db947e3892365 | /142/main.py | 7dd2d69286c4280a2dc6408e5232b45fffb6d8a6 | [] | no_license | szhongren/leetcode | 84dd848edbfd728b344927f4f3c376b89b6a81f4 | 8cda0518440488992d7e2c70cb8555ec7b34083f | refs/heads/master | 2021-12-01T01:34:54.639508 | 2021-11-30T05:54:45 | 2021-11-30T05:54:45 | 83,624,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,177 | py | """
Given a linked list, return the node where the cycle begins. If there is no cycle, return null.
Note: Do not modify the linked list.
Follow up:
Can you solve it without using extra space?
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def make_list(ls):
if len(ls) == 0:
return None
list_nodes = list(map(lambda x: ListNode(x), ls))
for i, v in enumerate(list_nodes[1:]):
list_nodes[i].next = v
return list_nodes[0]
class Solution(object):
def detectCycle(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head == None:
return None
slow = head
fast = head
cycle_start = head
while slow.next and fast.next and fast.next.next:
slow = slow.next
fast = fast.next.next
if slow == fast:
while cycle_start != slow:
cycle_start = cycle_start.next
slow = slow.next
return slow
return None
| [
"[email protected]"
] | |
1b2708b9fd69527e897aec7549fa95a9ed7fafd3 | 6d11eda98e529286c775942f63013619f37246c5 | /examples/potsdam/semantic_segmentation.py | a3ad3085dd6f3c7d8d3532839dfb3cf35057feda | [
"Apache-2.0"
] | permissive | Pandinosaurus/raster-vision-examples | 388438ddd58c2c0fd8a7eced5be02cc5518e80f8 | d6957a5de6d49fbe7d419da67979725eaab43ee7 | refs/heads/master | 2021-07-18T08:17:33.274224 | 2020-07-03T02:52:20 | 2020-07-03T02:52:20 | 184,796,275 | 1 | 0 | NOASSERTION | 2020-07-03T04:10:43 | 2019-05-03T17:38:55 | Jupyter Notebook | UTF-8 | Python | false | false | 5,610 | py | import os
from os.path import join
import rastervision as rv
from examples.utils import str_to_bool, save_image_crop
class PotsdamSemanticSegmentation(rv.ExperimentSet):
def exp_main(self, raw_uri, processed_uri, root_uri, test=False, use_tf=False):
"""Run an experiment on the ISPRS Potsdam dataset.
Uses Tensorflow Deeplab backend with Mobilenet architecture. Should get to
F1 score of ~0.86 (including clutter class) after 6 hours of training on a P3
instance.
Args:
raw_uri: (str) directory of raw data
root_uri: (str) root directory for experiment output
test: (bool) if True, run a very small experiment as a test and generate
debug output
use_tf: (bool) if True, use Tensorflow Deeplab backend.
"""
test = str_to_bool(test)
use_tf = str_to_bool(use_tf)
exp_id = 'potsdam-seg'
train_ids = ['2-10', '2-11', '3-10', '3-11', '4-10', '4-11', '4-12', '5-10',
'5-11', '5-12', '6-10', '6-11', '6-7', '6-9', '7-10', '7-11',
'7-12', '7-7', '7-8', '7-9']
val_ids = ['2-12', '3-12', '6-12']
# infrared, red, green
channel_order = [3, 0, 1]
debug = False
if test:
debug = True
train_ids = train_ids[0:1]
val_ids = val_ids[0:1]
exp_id += '-test'
classes = {
'Car': (1, '#ffff00'),
'Building': (2, '#0000ff'),
'Low Vegetation': (3, '#00ffff'),
'Tree': (4, '#00ff00'),
'Impervious': (5, "#ffffff"),
'Clutter': (6, "#ff0000")
}
task = rv.TaskConfig.builder(rv.SEMANTIC_SEGMENTATION) \
.with_chip_size(300) \
.with_classes(classes) \
.with_chip_options(window_method='sliding',
stride=300, debug_chip_probability=1.0) \
.build()
if use_tf:
batch_size = 8
num_steps = 100000
if test:
num_steps = 1
batch_size = 2
model_type = rv.MOBILENET_V2
backend = rv.BackendConfig.builder(rv.TF_DEEPLAB) \
.with_task(task) \
.with_model_defaults(model_type) \
.with_train_options(sync_interval=600) \
.with_num_steps(num_steps) \
.with_batch_size(batch_size) \
.with_debug(debug) \
.build()
else:
batch_size = 8
num_epochs = 10
if test:
batch_size = 2
num_epochs = 1
backend = rv.BackendConfig.builder(rv.PYTORCH_SEMANTIC_SEGMENTATION) \
.with_task(task) \
.with_train_options(
lr=1e-4,
batch_size=batch_size,
num_epochs=num_epochs,
model_arch='resnet50',
debug=debug) \
.build()
def make_scene(id):
id = id.replace('-', '_')
raster_uri = '{}/4_Ortho_RGBIR/top_potsdam_{}_RGBIR.tif'.format(
raw_uri, id)
label_uri = '{}/5_Labels_for_participants/top_potsdam_{}_label.tif'.format(
raw_uri, id)
if test:
crop_uri = join(
processed_uri, 'crops', os.path.basename(raster_uri))
save_image_crop(raster_uri, crop_uri, size=600)
raster_uri = crop_uri
# Using with_rgb_class_map because label TIFFs have classes encoded as RGB colors.
label_source = rv.LabelSourceConfig.builder(rv.SEMANTIC_SEGMENTATION) \
.with_rgb_class_map(task.class_map) \
.with_raster_source(label_uri) \
.build()
# URI will be injected by scene config.
# Using with_rgb(True) because we want prediction TIFFs to be in RGB format.
label_store = rv.LabelStoreConfig.builder(rv.SEMANTIC_SEGMENTATION_RASTER) \
.with_rgb(True) \
.build()
scene = rv.SceneConfig.builder() \
.with_task(task) \
.with_id(id) \
.with_raster_source(raster_uri,
channel_order=channel_order) \
.with_label_source(label_source) \
.with_label_store(label_store) \
.build()
return scene
train_scenes = [make_scene(id) for id in train_ids]
val_scenes = [make_scene(id) for id in val_ids]
dataset = rv.DatasetConfig.builder() \
.with_train_scenes(train_scenes) \
.with_validation_scenes(val_scenes) \
.build()
experiment = rv.ExperimentConfig.builder() \
.with_id(exp_id) \
.with_task(task) \
.with_backend(backend) \
.with_dataset(dataset) \
.with_root_uri(root_uri) \
.build()
return experiment
if __name__ == '__main__':
rv.main()
| [
"[email protected]"
] | |
f4be1784fe13e6274c766985a165f620b822bcb1 | 930309163b930559929323647b8d82238724f392 | /abc216_e.py | b2c5d66d2e922c823160cdcb8e9ca31ca835c4d4 | [] | no_license | GINK03/atcoder-solvers | 874251dffc9f23b187faa77c439b445e53f8dfe1 | b1e7ac6e9d67938de9a85df4a2f9780fb1fbcee7 | refs/heads/master | 2021-11-07T14:16:52.138894 | 2021-09-12T13:32:29 | 2021-09-12T13:32:29 | 11,724,396 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,123 | py |
import sys
import logging
def main():
n, k = map(int, input().split())
a = list(map(int, input().split()))
a = sorted(a, reverse=True) + [0]
def cumsum(x):
return x * (x + 1) // 2
k_remaining = k
ans = 0
for i in range(n):
if a[i] == a[i + 1]:
continue
if k_remaining >= (i + 1) * (a[i] - a[i + 1]):
ans += (cumsum(a[i]) - cumsum(a[i + 1])) * (i + 1)
k_remaining -= (i + 1) * (a[i] - a[i + 1])
else:
j = k_remaining // (i + 1)
r = k_remaining % (i + 1)
logging.debug((j, r))
ans += (cumsum(a[i]) - cumsum(a[i] - j)) * (i + 1)
ans += (a[i] - j) * r
k_remaining = 0
if k_remaining == 0:
break
print(ans)
if __name__ == "__main__":
loglevel = "DEBUG" if "--debug" in sys.argv else "WARNING"
numeric_level = getattr(logging, loglevel, None)
log_format = "%(levelname)s (%(asctime)s.%(msecs)d): %(message)s"
logging.basicConfig(level=numeric_level, format=log_format, datefmt="%I:%M:%S")
main()
| [
"[email protected]"
] | |
e7b69c6f075b17d67552da7d91dd5b80b77ed235 | 5f0eeef355fa84b165d4e0707e8874755cc03259 | /chp02_forces/Exercise_2_10_attractrepel/Attractor.py | b265f28a642f9d31b8c7540541527dd188cd2d56 | [] | no_license | kidult00/NatureOfCode-Examples-Python | 5835fbed114f3991b9986852f31d29a0a46d7e53 | 42461590deebbe305d5815ff0d207ff974335ad5 | refs/heads/master | 2021-05-11T04:47:53.999705 | 2018-03-07T15:54:12 | 2018-03-07T15:54:12 | 117,946,895 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,195 | py | # The Nature of Code - Python Version
# [kidult00](https://github.com/kidult00)
# A class for a draggable attractive body in our world
# Attraction = G * M1 * M2 / Distance^2
class Attractor(object):
def __init__(self):
self.mass = 10.0 # Mass, tied to size
self.g = 1.0 # Gravitational Constant
self.position = PVector(width/2, height/2)
self.dragOffset = PVector(0.0, 0.0) # holds the offset for when object is clicked on
self.dragging = False
self.rollover = False
def attract(self, m):
force = PVector.sub(self.position, m.position) # Calculate direction of force
d = force.mag() # Distance between objects
d = constrain(d, 5.0, 25.0) # Limiting the distance to eliminate "extreme" results for very close or very far objects
force.normalize() # Normalize vector (distance doesn't matter here, we just want this vector for direction)
strength = (self.g * self.mass * m.mass) / (d * d) # Calculate gravitional force magnitude
force.mult(strength) # Get force vector --> magnitude * direction
return force
# Method to display
def display(self):
ellipseMode(CENTER)
strokeWeight(0)
stroke(0)
if self.dragging : fill(50)
elif self.rollover : fill(100)
else : fill(0)
ellipse(self.position.x, self.position.y, self.mass*6, self.mass*6)
# The methods below are for mouse interaction
def clicked(self, mx, my):
d = dist(mx, my, self.position.x, self.position.y)
if d < self.mass :
self.dragging = True
self.dragOffset.x = self.position.x - mx
self.dragOffset.y = self.position.y - my
def hover(self, mx, my):
d = dist(mx, my, self.position.x, self.position.y)
if d < self.mass : self.rollover = True
else: self.rollover = False
def stopDragging(self):
self.dragging = False
def drag(self):
if self.dragging :
self.position.x = mouseX + self.dragOffset.x
self.position.y = mouseY + self.dragOffset.y | [
"[email protected]"
] | |
9b0612a4597a28b9bfac2f4dc745eb4104ab302c | 384d0be5ac54b306b945cf38c10d9b0a44c975ea | /devstack/tools/uec/meta.py | 5b845d81a69b19773c66ea4fb61a1a9065a88c47 | [] | no_license | ashokcse/openstack-bill | 05ae313637b3cfecba946d2a9b32e8c7609fc721 | 1a3d7575d4b341f64fa1764ed47e47a7504a9bcc | refs/heads/master | 2021-01-18T14:05:24.696165 | 2012-09-12T11:29:20 | 2012-09-12T11:29:20 | 5,424,267 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 827 | py | import sys
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from SimpleHTTPServer import SimpleHTTPRequestHandler
def main(host, port, HandlerClass = SimpleHTTPRequestHandler,
ServerClass = HTTPServer, protocol="HTTP/1.0"):
"""simple http server that listens on a give address:port"""
server_address = (host, port)
HandlerClass.protocol_version = protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
if __name__ == '__main__':
if sys.argv[1:]:
address = sys.argv[1]
else:
address = '0.0.0.0'
if ':' in address:
host, port = address.split(':')
else:
host = address
port = 8080
main(host, int(port))
| [
"[email protected]"
] | |
afef5e088c4a797fddf972b908f3d05308a8a5c5 | a512b8893b0d2de827d6292e810f3a98b41e132c | /Week6/Day1/Solutions/Python/prog4.py | 8f234ad7cc815e2ff244fd79557baa2595b427a1 | [] | no_license | Audarya07/Daily-Flash-Codes | d771079fd0d470e2d3e05679f17f32fb64b4f426 | cf96ca2b1676b038e243fac67be778381492ffeb | refs/heads/master | 2022-11-06T15:37:47.180729 | 2020-06-25T16:20:55 | 2020-06-25T16:20:55 | 274,960,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | for i in range(5):
num = 5
for j in range(5):
if i>j:
print(" ",end=" ")
else:
print(num,end=" ")
num-=1
print()
| [
"[email protected]"
] | |
bef4ed0adc518bd890aba6eb08948e612e7755b4 | 9eaa2c64a777bd24a3cccd0230da5f81231ef612 | /study/1905/month01/code/Stage1/day04/exercise02.py | 4527340f5bf057badc200a68d1b1fcc8edce6772 | [
"MIT"
] | permissive | Dython-sky/AID1908 | 4528932f2ca66b844d8a3fcab5ed8bf84d20eb0c | 46cd54a7b36b5f009974f2bbb7005a4ad440ca1a | refs/heads/master | 2022-04-14T12:23:30.426270 | 2020-04-01T18:05:19 | 2020-04-01T18:05:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | """
一张纸的厚度是0.01mm
对折多少次厚度能超过珠穆朗玛峰(8844.43米)
"""
thickness = 0.01 / 1000
count = 0
while thickness <= 8848.43:
thickness *= 2
count += 1
# print(thickness)
print("一张纸对折{}次能超过珠穆朗玛峰".format(count))
| [
"[email protected]"
] | |
d49ea65ea1d608754984e1885d288d255efbf3a9 | a8f615e6f2e00bcc72cd67475c5dd4a9ff0e6c14 | /imdemo/imdemo/pages/nodes/pin.py | 15058ecdfab3a662b795bd45d0d98c33f047f968 | [
"MIT"
] | permissive | KangWeon/arcade-imgui | fcf43f2399f56960b5249bd80e4e16d8639be8e2 | 24a8d423440cd9adaf3373a9c2492d04d8862062 | refs/heads/master | 2023-01-01T03:04:05.605347 | 2020-10-18T08:04:21 | 2020-10-18T08:04:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | class Pin:
def __init__(self, node, name):
self.node = node
self.name = name
self.x = 0
self.y = 0
def set_position(self, pos):
self.x, self.y = pos
def get_position(self):
return (self.x, self.y)
def draw(self):
pass
class Input(Pin):
pass
class Output(Pin):
pass | [
"[email protected]"
] | |
8da13cd142ec6b62a14d15b73cfe977ec43475ff | a97fb0584709e292a475defc8506eeb85bb24339 | /source code/code/ch203.py | 3aa2f981b9a6399e15c03b0b1aeb0e4e562fef35 | [] | no_license | AAQ6291/PYCATCH | bd297858051042613739819ed70c535901569079 | 27ec4094be785810074be8b16ef84c85048065b5 | refs/heads/master | 2020-03-26T13:54:57.051016 | 2018-08-17T09:05:19 | 2018-08-17T09:05:19 | 144,963,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | #!/usr/bin/env python
#coding=utf-8
from __future__ import print_function
## 宣告x, y, z變數,各分別為tuple, list, dict資料型態。
x, y, z = (), [], {}
## 雖然都是空的結構,但是它們之間並不相等。
if x == y == z:
print(x, y, z, "相等")
else:
print(x, y, z, "不相等")
if x == None:
print(x, " 相等 None")
else:
print(x, " 不相等 None")
if y == None:
print(y, " 相等 None")
else:
print(y, " 不相等 None")
if z == None:
print(z, " 相等 None")
else:
print(z, " 不相等 None")
| [
"[email protected]"
] | |
a1b3558b03ae177a9ec695640ddab9481f1cfb65 | 093b9569be9d1c4e5daf92efbebc38f680917b2d | /.history/base/views_20210829090123.py | bfec5c7dacaf07d85a118c58236ec494edd47b23 | [] | no_license | Justin-Panagos/todoList | 95b1e97ff71af1b0be58e7f8937d726a687cea4d | 10539219b59fcea00f8b19a406db3d4c3f4d289e | refs/heads/master | 2023-08-04T13:27:13.309769 | 2021-08-29T14:06:43 | 2021-08-29T14:06:43 | 400,827,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,107 | py | from django.shortcuts import render
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView, FormView
from django.urls import reverse_lazy
from django.contrib.auth.views import LoginView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import login
from .models import Task
class CustoomLoginView(LoginView):
template_name = 'base/login.html'
fields = '__all__'
redirect_authenticated_user = True
def get_success_url(self):
return reverse_lazy('tasks')
class RegisterPage(FormView):
template_name = 'base/register.html'
form_class= UserCreationForm
redirect_authenticated_user = True
success_url = reverse_lazy('tasks')
def form_validate(self,form):
user= form.save()
if user is not None:
login()
class TaskList( LoginRequiredMixin, ListView):
model = Task
context_object_name = 'tasks'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['tasks'] = context['tasks'].filter(user=self.request.user)
context['count'] = context['tasks'].filter(complete=False).count()
return context
class TaskDetail(LoginRequiredMixin, DetailView):
model = Task
context_object_name = 'task'
template_name = 'base/task.html'
class TaskCreate(LoginRequiredMixin, CreateView):
model = Task
fields = ['title','description','complete']
success_url = reverse_lazy('tasks')
def form_valid(self, form):
form.instance.user = self.request.user
return super(TaskCreate, self).form_valid(form)
class TaskUpdate( LoginRequiredMixin, UpdateView):
model = Task
fields = ['title','description','complete']
success_url = reverse_lazy('tasks')
class TaskDelete(LoginRequiredMixin, DeleteView):
model = Task
context_object_name = 'task'
success_url = reverse_lazy('tasks') | [
"[email protected]"
] | |
311b252fcafda3be30a0ef65d230b9e80034b49b | 4b3d25e20d710442eb63ed0a655c1ae1cfe68303 | /admin/xstat.py | f3ab436a10a6b2750beeccd42aad970000f8aaa4 | [
"ISC"
] | permissive | openafs-contrib/afs-tools | ad720ae6c56a9500734eed2d84d11b7f58f01f67 | 6509810b8c66454e78514c78bb30d12281067edb | refs/heads/master | 2022-10-21T03:06:27.350718 | 2022-10-11T19:19:55 | 2022-10-14T17:56:01 | 6,767,969 | 5 | 6 | ISC | 2021-07-08T13:39:37 | 2012-11-19T21:54:11 | Perl | UTF-8 | Python | false | false | 10,758 | py | #!/usr/bin/env python
# Copyright (c) 2014-2017 Sine Nomine Associates
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THE SOFTWARE IS PROVIDED 'AS IS' AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#------------------------------------------------------------------------------
#
# Gather stats from OpenAFS file servers.
#
# This tool runs the OpenAFS rxdebug and xstat utilities to gather statisical
# information from running file servers and cache managers over the network.
#
# NOTE: This tool requires a patched version of xstat_fs_test and rxdebug which
# provide a more regular output format.
#
# Example config file:
#
# cat ~/.xstat.conf
# [logging]
# level = info
# file = /tmp/xstat.log
#
# [collect]
# destdir = /tmp/xstats
# sleep = 60
# once = no
#
# [cell0]
# cellname = example.com
# fileservers =
# 172.16.50.143
# 172.16.50.144
#
#
import os
import sys
import errno
import re
import time
import logging
import pprint
import subprocess
import signal
import ConfigParser
LOG_LEVELS = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
LOG_CONSOLE_FORMAT = '%(levelname)s %(message)s'
LOG_FILE_FORMAT = '%(asctime)s %(levelname)s %(message)s'
# Log to stderr until the log file name is read from the config.
logging.basicConfig(level=LOG_LEVELS['info'], format=LOG_CONSOLE_FORMAT)
def debug(msg):
logging.debug(msg)
def info(msg):
logging.info(msg)
def warning(msg):
logging.warning(msg)
def error(msg):
logging.error(msg)
def fatal(msg):
sys.stderr.write("ERROR: {}\n".format(msg))
logging.critical(msg)
sys.exit(1)
def setup_logging(filename, level):
# Update the logger to log to a file instead of stderr now that we have
# the log filename and level.
logger = logging.getLogger()
if filename != '-':
debug("writing log messages to file {}".format(filename))
old_handler = logger.handlers[0]
new_handler = logging.FileHandler(filename)
new_handler.setLevel(LOG_LEVELS[level])
new_handler.setFormatter(logging.Formatter(LOG_FILE_FORMAT))
logger.addHandler(new_handler)
logger.removeHandler(old_handler)
logger.setLevel(LOG_LEVELS[level])
def read_config():
"""Read the config and set defaults.
Read the config file and set defaults for any missing values.
Create a config file with default values if not found."""
filename = os.path.expanduser('~/.xstat.conf')
c = ConfigParser.SafeConfigParser()
debug("reading configuration file {}".format(filename))
c.read(filename)
if not c.has_section('logging'):
c.add_section('logging')
if not c.has_option('logging', 'level'):
c.set('logging', 'level', 'info')
if not c.has_option('logging', 'filename'):
c.set('logging', 'filename', '-') # default to stdout
if not c.has_section('collect'):
c.add_section('collect')
if not c.has_option('collect', 'destdir'):
c.set('collect', 'destdir', '/tmp/xstats')
if not c.has_option('collect', 'sleep'):
c.set('collect', 'sleep', '60')
if not c.has_option('collect', 'once'):
c.set('collect', 'once', 'no')
if not c.has_section('cell0'):
c.add_section('cell0')
if not c.has_option('cell0', 'cellname'):
c.set('cell0', 'cellname', detect_cellname())
if not c.has_option('cell0', 'fileservers'):
cellname = c.get('cell0', 'cellname')
servers = detect_fileservers(cellname) # returns a dict
addrs = [a[0] for a in servers.values()] # use primary address
c.set('cell0', 'fileservers', "\n"+"\n".join(addrs))
if not os.path.exists(filename): # Dont clobber existing config.
with open(filename, 'w') as f:
info("Writing config file {}".format(filename))
c.write(f)
return c
def mkdirp(path):
"""Make a directory with parents if it does not already exist."""
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def which(command):
"""Search for a command in the PATH."""
for path in os.environ['PATH'].split(os.pathsep):
filename = os.path.join(path, command)
if os.path.isfile(filename) and os.access(filename, os.X_OK):
return filename
error("Could not find command '{}' in PATH {}".format(command, os.environ['PATH']))
return None
def detect_cellname():
"""Detect the current cellname with the fs command.
This assumes the current host is running an OpenAFS client."""
info("Searching for cellname")
cellname = None
cmd = [which('fs'), 'wscell']
debug(subprocess.list2cmdline(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
with p.stdout:
for line in iter(p.stdout.readline, ''):
match = re.match(r"This workstation belongs to cell '([^']+)'", line)
if match:
cellname = match.group(1)
info("Cellname is {}".format(cellname))
return cellname
def detect_fileservers(cellname):
"""Detect the file servers with the vos listaddrs command."""
info("Searching for file servers in cell {}".format(cellname))
uuids = {}
uuid = None
cmd = [which('vos'), 'listaddrs', '-printuuid', '-noresolve', '-noauth', '-cell', cellname]
debug(subprocess.list2cmdline(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
with p.stdout:
for line in iter(p.stdout.readline, ''):
match = re.match(r'UUID: (\S+)+', line)
if match:
uuid = match.group(1)
uuids[uuid] = []
match = re.match(r'([\d\.]+)', line)
if match:
addr = match.group(1)
uuids[uuid].append(addr)
info("Found servers: {}".format(pprint.pformat(uuids)))
return uuids
def get_usage(command):
"""Get the command usage as a string."""
pathname = which(command)
if pathname is None:
fatal("Unable to find command '{}' in PATH.".format(command))
cmd = [pathname, '-h']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
out,code = p.communicate()
return out
def check_commands():
"""Check the required commands are available."""
usage = get_usage('rxdebug')
if not '-raw' in usage:
fatal("rxdebug is missing the '-raw' option.")
usage = get_usage('xstat_fs_test')
for option in ['-format', '-delimiter']:
if not option in usage:
fatal("xstat_fs_test is missing the '{}' option.".format(option))
def xstat_fs(host, collection, out):
"""Retrieve xstats from a server and write them to the stream."""
cmd = [which('xstat_fs_test'), host, '-once', '-co', collection, '-format', 'dsv', '-delimiter', ' ']
cmdline = subprocess.list2cmdline(cmd)
debug(cmdline)
p = subprocess.Popen(cmd, stdout=out, stderr=subprocess.PIPE)
with p.stderr:
for line in iter(p.stderr.readline, ''):
line = line.rstrip()
warning("xstat_fs_test: {}".format(line))
code = p.wait()
if code:
error("xstat_fs_test failed ({}): {}".format(code, cmdline))
def rxstats(host, port, out):
"""Retrieve rxstats from a server and write them to the stream."""
cmd = [which('rxdebug'), host, port, '-rxstats', '-noconns', '-raw']
cmdline = subprocess.list2cmdline(cmd)
timestamp = int(time.time())
debug(cmdline)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
with p.stdout:
for line in iter(p.stdout.readline, ''):
line = line.rstrip()
match = re.match(r'(\S+)\s(\S+)', line)
if match:
name = match.group(1)
value = match.group(2)
out.write("{} {} {} {} {}\n".format(timestamp, host, port, name, value))
else:
warning("rxdebug: {}".format(line))
code = p.wait()
if code:
error("rxdebug failed ({}): {}".format(code, cmdline))
running = True
def sigint_handler(signal, frame):
global running
sys.stdout.write("\nquitting...\n")
info("Signal SIGINT caught.")
running = False
def main():
global running
config = read_config()
setup_logging(config.get('logging','filename'), config.get('logging','level'))
destdir = os.path.expanduser(config.get('collect', 'destdir'))
mkdirp(destdir)
check_commands() # Exits if the required commands are missing.
info('Starting main loop.')
signal.signal(signal.SIGINT, sigint_handler)
while running:
for section in config.sections():
if section.startswith('cell'):
cellname = config.get(section, 'cellname')
servers = config.get(section, 'fileservers').strip().split()
timestamp = time.strftime('%Y-%m-%d')
filename = os.path.join(destdir, "{}-{}.dat".format(cellname, timestamp))
for server in servers:
with open(filename, 'a') as out:
try:
rxstats(server, '7000', out)
xstat_fs(server, '2', out)
xstat_fs(server, '3', out)
except Exception as e:
error("Exception: {}".format(e))
info("Wrote stats for server {} to file {}".format(server, filename))
if running:
if config.getboolean('collect', 'once'):
info("Once option set, quitting.")
running = False
else:
sleep = int(config.get('collect', 'sleep'))
debug('sleep {}'.format(sleep))
time.sleep(sleep)
info('Exiting.')
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
b983070276e9108430c515665fa30b6bce8cb8fb | f6841d5626d87e836f6012d88c783706fa46d769 | /web_crawler.py | c736c3b9c98f9e2dabb384fc0182472094e813d0 | [] | no_license | Jack-Valentine/python-seminar-4 | 850b22cd7c552b570e25e9432abf98a25cf0b7d6 | cd6c8945f436fa5dc0d6dec14551d07e6dd3562a | refs/heads/master | 2021-01-22T07:42:35.044924 | 2017-05-25T03:46:59 | 2017-05-25T03:46:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,010 | py | from bs4 import BeautifulSoup
from gevent import monkey
import sys
import gevent
import time
import urllib.request
def crawling_product_price(product_url):
try:
with urllib.request.urlopen(product_url) as response:
html = response.read()
soup = BeautifulSoup(html, 'html.parser')
product_title = soup.find(id='productTitle').get_text().strip()
price = soup.find(id='priceblock_ourprice').get_text()
print(product_title, price)
except:
crawling_product_price(product_url)
if __name__ == '__main__':
concurrency = sys.argv[1:2] == ['-c']
product_urls = [
'https://www.amazon.com/LG-Electronics-OLED65E7P-65-Inch-Smart/dp/B01MZF7YUD',
'https://www.amazon.com/LG-Electronics-75SJ8570-75-Inch-SUPER/dp/B01N5V18W6',
'https://www.amazon.com/All-New-Element-4K-Ultra-HD-Smart-TV-Fire-TV-Edition-43-Inch/dp/B06XD4SXWD',
'https://www.amazon.com/Sceptre-U518CV-UMS-Ultra-True-black/dp/B06Y26S3BC',
'https://www.amazon.com/Vizio-SMART-23-54IN-RETURNS-D24H-E1/dp/B06XQW5FJH',
'https://www.amazon.com/Hisense-55K22DG-55-Inch-1080p-120Hz/dp/B00GFHG1OQ',
'https://www.amazon.com/Samsung-Electronics-UN65MU9000-65-Inch-Ultra/dp/B06XGCT2PQ',
'https://www.amazon.com/Samsung-Electronics-UN65MU8000-65-Inch-Ultra/dp/B06X9VSZYM',
'https://www.amazon.com/Element-ELEFW3916R-720p-Certified-Refurbished/dp/B01N8PPMRG',
'https://www.amazon.com/Samsung-UN50J5000-50-Inch-1080p-Model/dp/B00WR28LLE'
]
start_time = time.time()
if concurrency:
monkey.patch_all()
threads = [gevent.spawn(crawling_product_price, product_url) for product_url in product_urls]
gevent.joinall(threads)
else:
for product_url in product_urls:
crawling_product_price(product_url)
end_time = time.time()
print('-' * 90)
print(f"Results(concurrency is {'on' if concurrency else 'off'}): {end_time-start_time}s")
| [
"[email protected]"
] | |
69e51fdfc4869a7c3cbfdeaf0cb52e5fa0558a74 | f69eccca4970bc898983b149bbadfc6a79e77916 | /befh/api_socket.py | 9252264f83791eecb5cd78803add2d6948531050 | [
"Apache-2.0"
] | permissive | chrischris292/MarketDataGdax | a3cd911edafe7a246a1d553180e1edb66a125c8c | 95dc398123f7878526df4af2402af3cbeee67057 | refs/heads/master | 2021-05-06T17:38:19.949472 | 2017-11-24T22:24:40 | 2017-11-24T22:24:40 | 111,900,487 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 887 | py | #!/bin/python
class ApiSocket:
"""
API socket
"""
def __init__(self):
pass
@classmethod
def parse_l2_depth(cls, instmt, raw):
"""
Parse raw data to L2 depth
:param instmt: Instrument
:param raw: Raw data in JSON
"""
return None
@classmethod
def parse_trade(cls, instmt, raw):
"""
:param instmt: Instrument
:param raw: Raw data in JSON
:return:
"""
return None
def get_order_book(self, instmt):
"""
Get order book
:param instmt: Instrument
:return: Object L2Depth
"""
return None
def get_trades(self, instmt, trade_id):
"""
Get trades
:param instmt: Instrument
:param trade_id: Trade id
:return: List of trades
"""
return None
| [
"[email protected]"
] | |
5f8d714422c7d691696299d9f7a93d52b2168c5c | 59166105545cdd87626d15bf42e60a9ee1ef2413 | /test/test_space_shuttle_api.py | f91c3cfb0e3cfbade624d087754e5913f39e478a | [] | no_license | mosoriob/dbpedia_api_client | 8c594fc115ce75235315e890d55fbf6bd555fa85 | 8d6f0d04a3a30a82ce0e9277e4c9ce00ecd0c0cc | refs/heads/master | 2022-11-20T01:42:33.481024 | 2020-05-12T23:22:54 | 2020-05-12T23:22:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 995 | py | # coding: utf-8
"""
DBpedia
This is the API of the DBpedia Ontology # noqa: E501
The version of the OpenAPI document: v0.0.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import dbpedia
from dbpedia.api.space_shuttle_api import SpaceShuttleApi # noqa: E501
from dbpedia.rest import ApiException
class TestSpaceShuttleApi(unittest.TestCase):
"""SpaceShuttleApi unit test stubs"""
def setUp(self):
self.api = dbpedia.api.space_shuttle_api.SpaceShuttleApi() # noqa: E501
def tearDown(self):
pass
def test_spaceshuttles_get(self):
"""Test case for spaceshuttles_get
List all instances of SpaceShuttle # noqa: E501
"""
pass
def test_spaceshuttles_id_get(self):
"""Test case for spaceshuttles_id_get
Get a single SpaceShuttle by its id # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
de412810d3844cf5f8ee29bbd2e1e99a79ec08a8 | 31085d66c719c5c27aec57be693bb99c902d2596 | /flux_tool/neutrino.py | f2e1a11c0da132e7731968f5bce499d1e10f630c | [] | no_license | bhokansonfasig/flux_tool | 0fa0692a3a10cb0b493a1a34ffb3339db49ac585 | a74d991d0a02d31eea00d5dd053405542d16247d | refs/heads/main | 2023-02-12T04:13:10.814367 | 2021-01-08T23:54:45 | 2021-01-08T23:54:45 | 327,973,884 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,805 | py | """Class for neutrino interaction physics calculations"""
from enum import Enum
import numpy as np
import scipy.constants
from . import units
def get_from_enum(value, enum):
"""
Find the enum value given some representation of it.
Transforms the given `value` into the corresponding value from the `enum`
by checking the type of `value` given.
Parameters
----------
value
Representation of the desired `enum` value. If already a member of
`enum`, no change. If ``str``, assumed to be a name in the `enum`.
Otherwise, assumed to be a value type of the `enum`.
enum : Enum
Python ``Enum`` to compare names values with.
Returns
-------
Enum value
Value in the `enum` represented by the given `value`.
Examples
--------
>>> from enum import Enum
>>> class Color(Enum):
... red = 1
... green = 2
... blue = 3
>>> get_from_enum(Color.red, Color)
<Color.red: 1>
>>> get_from_enum("green", Color)
<Color.green: 2>
>>> get_from_enum(3, Color)
<Color.blue: 3>
"""
if isinstance(value, enum):
return value
elif isinstance(value, str):
return enum[value]
else:
return enum(value)
class NeutrinoInteraction:
"""
Class for storing and calculating neutrino interaction parameters.
Parameters
----------
neutrino_type
Identification value of the neutrino type. Values should be from the
``NeutrinoInteraction.NeutrinoType`` enum, but integer or string values
may work if carefully chosen.
interaction_type
Identification value of the neutrino's interaction type. Values should
be from the ``NeutrinoInteraction.InteractionType`` enum, but integer
or string values may work if carefully chosen.
energy : float
Energy (GeV) of the neutrino.
Attributes
----------
neutrino : NeutrinoInteraction.NeutrinoType
Identification value of the neutrino type.
interaction : NeutrinoInteraction.InteractionType
Identification value of the neutrino's interaction type.
energy : float
Energy (GeV) of the neutrino.
"""
class NeutrinoType(Enum):
"""
Enum containing possible neutrino types.
Values based on the PDG particle numbering scheme.
http://pdg.lbl.gov/2007/reviews/montecarlorpp.pdf
Attributes
----------
nu_e, electron_neutrino
nu_e_bar, electron_antineutrino
nu_mu, muon_neutrino
nu_mu_bar, muon_antineutrino
nu_tau, tau_neutrino
nu_tau_bar, tau_antineutrino
unknown, undefined
"""
undefined = 0
unknown = 0
electron_neutrino = 12
nu_e = 12
electron_antineutrino = -12
nu_e_bar = -12
muon_neutrino = 14
nu_mu = 14
muon_antineutrino = -14
nu_mu_bar = -14
tau_neutrino = 16
nu_tau = 16
tau_antineutrino = -16
nu_tau_bar = -16
class InteractionType(Enum):
"""
Enum containing possible interaction types.
Attributes
----------
cc, charged_current
nc, neutral_current
unknown, undefined
"""
undefined = 0
unknown = 0
charged_current = 1
cc = 1
neutral_current = 2
nc = 2
def __init__(self, neutrino_type, interaction_type, energy):
self.neutrino = neutrino_type
self.interaction = interaction_type
self.energy = energy
@property
def neutrino(self):
"""
Identification value of the neutrino type.
Should always be a value from the ``NeutrinoType`` enum. Setting with
integer or string values may work if carefully chosen.
"""
return self._neutrino_id
@neutrino.setter
def neutrino(self, neutrino_id):
if neutrino_id is None:
self._neutrino_id = self.NeutrinoType.undefined
else:
self._neutrino_id = get_from_enum(neutrino_id, self.NeutrinoType)
@property
def interaction(self):
"""
Identification value of the neutrino's interaction type.
Should always be a value from the ``InteractionType`` enum. Setting
with integer or string values may work if carefully chosen.
"""
return self._interaction_id
@interaction.setter
def interaction(self, interaction_id):
if interaction_id is None:
self._interaction_id = self.InteractionType.undefined
else:
self._interaction_id = get_from_enum(interaction_id, self.InteractionType)
@property
def total_cross_section(self):
"""
The total cross section of the neutrino.
Calculation is determined by whether the neutrino is a neutrino
or antineutrino and is dependent on the energy of the neutrino.
Combines the charged-current and neutral-current cross sections.
Based on Equation 7 and Table III of the CTW 2011 paper.
"""
# Total cross section should be sum of nc and cc cross sections
# Neutrino
if self.neutrino.value>0:
c_0_cc = -1.826
c_0_nc = -1.826
c_1_cc = -17.31
c_1_nc = -17.31
c_2_cc = -6.406
c_2_nc = -6.448
c_3_cc = 1.431
c_3_nc = 1.431
c_4_cc = -17.91
c_4_nc = -18.61
# Antineutrino
elif self.neutrino.value<0:
c_0_cc = -1.033
c_0_nc = -1.033
c_1_cc = -15.95
c_1_nc = -15.95
c_2_cc = -7.247
c_2_nc = -7.296
c_3_cc = 1.569
c_3_nc = 1.569
c_4_cc = -17.72
c_4_nc = -18.30
else:
raise ValueError("Unable to calculate cross section without a"+
" particle type")
# Calculate cross section based on CTW 2011
eps = np.log10(self.energy / units.GeV)
log_term_cc = np.log(eps - c_0_cc)
power_cc = (c_1_cc + c_2_cc*log_term_cc + c_3_cc*log_term_cc**2
+ c_4_cc/log_term_cc)
log_term_nc = np.log(eps - c_0_nc)
power_nc = (c_1_nc + c_2_nc*log_term_nc + c_3_nc*log_term_nc**2
+ c_4_nc/log_term_nc)
return (10**power_cc + 10**power_nc) * units.cm**2
@property
def cross_section(self):
"""
The cross section of the neutrino.
Calculation is determined by whether the neutrino is a neutrino
or antineutrino and what type of interaction it produces, and is
dependent on the energy of the neutrino. Based on Equation 7 and
Table III of the CTW 2011 paper.
"""
# Neutrino
if self.neutrino.value>0:
if self.interaction==self.InteractionType.charged_current:
c_0 = -1.826
c_1 = -17.31
c_2 = -6.406
c_3 = 1.431
c_4 = -17.91
elif self.interaction==self.InteractionType.neutral_current:
c_0 = -1.826
c_1 = -17.31
c_2 = -6.448
c_3 = 1.431
c_4 = -18.61
else:
raise ValueError("Unable to calculate cross section without an"
+" interaction type")
# Antineutrino
elif self.neutrino.value<0:
if self.interaction==self.InteractionType.charged_current:
c_0 = -1.033
c_1 = -15.95
c_2 = -7.247
c_3 = 1.569
c_4 = -17.72
elif self.interaction==self.InteractionType.neutral_current:
c_0 = -1.033
c_1 = -15.95
c_2 = -7.296
c_3 = 1.569
c_4 = -18.30
else:
raise ValueError("Unable to calculate cross section without an"
+" interaction type")
else:
raise ValueError("Unable to calculate cross section without a"+
" neutrino type")
# Calculate cross section based on CTW 2011
eps = np.log10(self.energy / units.GeV)
log_term = np.log(eps - c_0)
power = c_1 + c_2*log_term + c_3*log_term**2 + c_4/log_term
return (10**power) * units.cm**2
@property
def total_interaction_length(self):
"""
The interaction length of the neutrino.
The interaction length is calculated in water equivalent material.
Calculation is determined by whether the neutrino is a neutrino or
antineutrino and is dependent on the energy of the neutrino. Combines
the charged-current and neutral-current interaction lengths.
"""
# Water equivalent density is 1 g/cm^3
# The approximate number of nucleons per gram is Avogadro's number,
# so the nucleon density is approximately 1 nucleon/cm^3 in water.
# Ultimately then the interaction length can be caluclated as
# 1 / NA / cross_section
return 1 / (scipy.constants.N_A/units.cm**3) / self.total_cross_section
@property
def interaction_length(self):
"""
The interaction length of the neutrino interaction.
The interaction length is calculated in water equivalent material.
Calculation is determined by whether the neutrino is a neutrino or
antineutrino and what type of interaction it produces, and is dependent
on the energy of the neutrino.
"""
return 1 / (scipy.constants.N_A/units.cm**3) / self.cross_section
| [
"[email protected]"
] | |
165d038ef67c6e5d9650811fb8eebb4c215a8874 | 1ad12a71c3d5d2b3810ce03e8bd138c4ffb66eb8 | /xlsxwriter/test/comparison/test_chart_axis17.py | d77b584fbddf1629eaf7474c77dea6cb5512ae61 | [
"BSD-2-Clause-Views"
] | permissive | idreamsfy/XlsxWriter | b52929229b16e2ee1eaca0cda9980a5a0aad5769 | 129044ed821de67895b4562c6b71f90eba5be6b4 | refs/heads/master | 2021-01-02T20:39:20.415882 | 2020-02-07T21:07:55 | 2020-02-07T21:07:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,340 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2020, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_axis17.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [43812736, 45705088]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_y_axis({'log_base': 10})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| [
"[email protected]"
] | |
ac0032fb8c3c73b7de8979c896fcd0df0b3a547f | 263dc86ea58278d6e1db448c245f692049c73199 | /employeedetails/customer/urls.py | 00350cd841b461cd5617ec8e73ffdbac809561d7 | [] | no_license | krishnanunni-pr/MyDjangoProjects | c3a81b193a659c47fd6aec01777d6f689479eb9f | 3d644d2a261243be40f5678e9a61d508a5980143 | refs/heads/master | 2023-08-05T20:10:08.509167 | 2021-09-27T09:21:21 | 2021-09-27T09:21:21 | 394,686,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | from django.urls import path
from customer import views
urlpatterns=[
path("accounts/signup",views.signup,name="signup"),
path("accounts/signin",views.signin,name="signin"),
path("accounts/signout",views.signout,name="signout"),
path("",views.home,name="home")
] | [
"[email protected]"
] | |
f63b4dd68f760c9f304342b9e16b4f91fa19bd8f | 6a95112805b64322953429270a305d01fef3faea | /dist/weewx-3.6.2/bin/weewx/drivers/ws23xx.py | 2b538e1eaf92814e477ceee58c4beccac5760015 | [
"GPL-1.0-or-later",
"GPL-3.0-only",
"Apache-2.0"
] | permissive | tomdotorg/docker-weewx | c6d59dc492a9e53f3bc898f7b9f593717092d72c | 7085654f455d39b06acc688738fde27e1f78ad1e | refs/heads/main | 2023-06-08T17:57:44.184399 | 2023-01-30T11:21:23 | 2023-01-30T11:21:23 | 54,113,384 | 21 | 16 | Apache-2.0 | 2022-10-19T23:46:26 | 2016-03-17T11:39:29 | Dockerfile | UTF-8 | Python | false | false | 79,394 | py | #!usr/bin/env python
#
# Copyright 2013 Matthew Wall
# See the file LICENSE.txt for your full rights.
#
# Thanks to Kenneth Lavrsen for the Open2300 implementation:
# http://www.lavrsen.dk/foswiki/bin/view/Open2300/WebHome
# description of the station communication interface:
# http://www.lavrsen.dk/foswiki/bin/view/Open2300/OpenWSAPI
# memory map:
# http://www.lavrsen.dk/foswiki/bin/view/Open2300/OpenWSMemoryMap
#
# Thanks to Russell Stuart for the ws2300 python implementation:
# http://ace-host.stuart.id.au/russell/files/ws2300/
# and the map of the station memory:
# http://ace-host.stuart.id.au/russell/files/ws2300/memory_map_2300.txt
#
# This immplementation copies directly from Russell Stuart's implementation,
# but only the parts required to read from and write to the weather station.
"""Classes and functions for interfacing with WS-23xx weather stations.
LaCrosse made a number of stations in the 23xx series, including:
WS-2300, WS-2308, WS-2310, WS-2315, WS-2317, WS-2357
The stations were also sold as the TFA Matrix and TechnoLine 2350.
The WWVB receiver is located in the console.
To synchronize the console and sensors, press and hold the PLUS key for 2
seconds. When console is not synchronized no data will be received.
To do a factory reset, press and hold PRESSURE and WIND for 5 seconds.
A single bucket tip is 0.0204 in (0.518 mm).
The station has 175 history records. That is just over 7 days of data with
the default history recording interval of 60 minutes.
The station supports both wireless and wired communication between the
sensors and a station console. Wired connection updates data every 8 seconds.
Wireless connection updates data in 16 to 128 second intervals, depending on
wind speed and rain activity.
The connection type can be one of 0=cable, 3=lost, 15=wireless
sensor update frequency:
32 seconds when wind speed > 22.36 mph (wireless)
128 seconds when wind speed < 22.36 mph (wireless)
10 minutes (wireless after 5 failed attempts)
8 seconds (wired)
console update frequency:
15 seconds (pressure/temperature)
20 seconds (humidity)
It is possible to increase the rate of wireless updates:
http://www.wxforum.net/index.php?topic=2196.0
Sensors are connected by unshielded phone cables. RF interference can cause
random spikes in data, with one symptom being values of 25.5 m/s or 91.8 km/h
for the wind speed. Unfortunately those values are within the sensor limits
of 0-113 mph (50.52 m/s or 181.9 km/h). To reduce the number of spikes in
data, replace with shielded cables:
http://www.lavrsen.dk/sources/weather/windmod.htm
The station records wind speed and direction, but has no notion of gust.
The station calculates windchill and dewpoint.
The station has a serial connection to the computer.
This driver does not keep the serial port open for long periods. Instead, the
driver opens the serial port, reads data, then closes the port.
This driver polls the station. Use the polling_interval parameter to specify
how often to poll for data. If not specified, the polling interval will adapt
based on connection type and status.
USB-Serial Converters
With a USB-serial converter one can connect the station to a computer with
only USB ports, but not every converter will work properly. Perhaps the two
most common converters are based on the Prolific and FTDI chipsets. Many
people report better luck with the FTDI-based converters. Some converters
that use the Prolific chipset (PL2303) will work, but not all of them.
Known to work: ATEN UC-232A
Bounds checking
wind speed: 0-113 mph
wind direction: 0-360
humidity: 0-100
temperature: ok if not -22F and humidity is valid
dewpoint: ok if not -22F and humidity is valid
barometer: 25-35 inHg
rain rate: 0-10 in/hr
Discrepancies Between Implementations
As of December 2013, there are significant differences between the open2300,
wview, and ws2300 implementations. Current version numbers are as follows:
open2300 1.11
ws2300 1.8
wview 5.20.2
History Interval
The factory default is 60 minutes. The value stored in the console is one
less than the actual value (in minutes). So for the factory default of 60,
the console stores 59. The minimum interval is 1.
ws2300.py reports the actual value from the console, e.g., 59 when the
interval is 60. open2300 reports the interval, e.g., 60 when the interval
is 60. wview ignores the interval.
Detecting Bogus Sensor Values
wview queries the station 3 times for each sensor then accepts the value only
if the three values were close to each other.
open2300 sleeps 10 seconds if a wind measurement indicates invalid or overflow.
The ws2300.py implementation includes overflow and validity flags for values
from the wind sensors. It does not retry based on invalid or overflow.
Wind Speed
There is disagreement about how to calculate wind speed and how to determine
whether the wind speed is valid.
This driver introduces a WindConversion object that uses open2300/wview
decoding so that wind speeds match that of open2300/wview. ws2300 1.8
incorrectly uses bcd2num instead of bin2num. This bug is fixed in this driver.
The memory map indicates the following:
addr smpl description
0x527 0 Wind overflow flag: 0 = normal
0x528 0 Wind minimum code: 0=min, 1=--.-, 2=OFL
0x529 0 Windspeed: binary nibble 0 [m/s * 10]
0x52A 0 Windspeed: binary nibble 1 [m/s * 10]
0x52B 0 Windspeed: binary nibble 2 [m/s * 10]
0x52C 8 Wind Direction = nibble * 22.5 degrees
0x52D 8 Wind Direction 1 measurement ago
0x52E 9 Wind Direction 2 measurement ago
0x52F 8 Wind Direction 3 measurement ago
0x530 7 Wind Direction 4 measurement ago
0x531 7 Wind Direction 5 measurement ago
0x532 0
wview 5.20.2 implementation (wview apparently copied from open2300):
read 3 bytes starting at 0x527
0x527 x[0]
0x528 x[1]
0x529 x[2]
if ((x[0] != 0x00) ||
((x[1] == 0xff) && (((x[2] & 0xf) == 0) || ((x[2] & 0xf) == 1)))) {
fail
} else {
dir = (x[2] >> 4) * 22.5
speed = ((((x[2] & 0xf) << 8) + (x[1])) / 10.0 * 2.23693629)
maxdir = dir
maxspeed = speed
}
open2300 1.10 implementation:
read 6 bytes starting at 0x527
0x527 x[0]
0x528 x[1]
0x529 x[2]
0x52a x[3]
0x52b x[4]
0x52c x[5]
if ((x[0] != 0x00) ||
((x[1] == 0xff) && (((x[2] & 0xf) == 0) || ((x[2] & 0xf) == 1)))) {
sleep 10
} else {
dir = x[2] >> 4
speed = ((((x[2] & 0xf) << 8) + (x[1])) / 10.0)
dir0 = (x[2] >> 4) * 22.5
dir1 = (x[3] & 0xf) * 22.5
dir2 = (x[3] >> 4) * 22.5
dir3 = (x[4] & 0xf) * 22.5
dir4 = (x[4] >> 4) * 22.5
dir5 = (x[5] & 0xf) * 22.5
}
ws2300.py 1.8 implementation:
read 1 nibble starting at 0x527
read 1 nibble starting at 0x528
read 4 nibble starting at 0x529
read 3 nibble starting at 0x529
read 1 nibble starting at 0x52c
read 1 nibble starting at 0x52d
read 1 nibble starting at 0x52e
read 1 nibble starting at 0x52f
read 1 nibble starting at 0x530
read 1 nibble starting at 0x531
0x527 overflow
0x528 validity
0x529 speed[0]
0x52a speed[1]
0x52b speed[2]
0x52c dir[0]
speed: ((x[2] * 100 + x[1] * 10 + x[0]) % 1000) / 10
velocity: (x[2] * 100 + x[1] * 10 + x[0]) / 10
dir = data[0] * 22.5
speed = (bcd2num(data) % 10**3 + 0) / 10**1
velocity = (bcd2num(data[:3])/10.0, bin2num(data[3:4]) * 22.5)
bcd2num([a,b,c]) -> c*100+b*10+a
"""
# TODO: use pyserial instead of LinuxSerialPort
# TODO: put the __enter__ and __exit__ scaffolding on serial port, not Station
# FIXME: unless we can get setTime to work, just ignore the console clock
# FIXME: detect bogus wind speed/direction
# i see these when the wind instrument is disconnected:
# ws 26.399999
# wsh 21
# w0 135
from __future__ import with_statement
import syslog
import time
import string
import fcntl
import os
import select
import struct
import termios
import tty
import weeutil.weeutil
import weewx.drivers
import weewx.wxformulas
DRIVER_NAME = 'WS23xx'
DRIVER_VERSION = '0.25'
def loader(config_dict, _):
return WS23xxDriver(config_dict=config_dict, **config_dict[DRIVER_NAME])
def configurator_loader(_):
return WS23xxConfigurator()
def confeditor_loader():
return WS23xxConfEditor()
DEFAULT_PORT = '/dev/ttyUSB0'
def logmsg(dst, msg):
syslog.syslog(dst, 'ws23xx: %s' % msg)
def logdbg(msg):
logmsg(syslog.LOG_DEBUG, msg)
def loginf(msg):
logmsg(syslog.LOG_INFO, msg)
def logcrt(msg):
logmsg(syslog.LOG_CRIT, msg)
def logerr(msg):
logmsg(syslog.LOG_ERR, msg)
class WS23xxConfigurator(weewx.drivers.AbstractConfigurator):
def add_options(self, parser):
super(WS23xxConfigurator, self).add_options(parser)
parser.add_option("--info", dest="info", action="store_true",
help="display weather station configuration")
parser.add_option("--current", dest="current", action="store_true",
help="get the current weather conditions")
parser.add_option("--history", dest="nrecords", type=int, metavar="N",
help="display N history records")
parser.add_option("--history-since", dest="recmin",
type=int, metavar="N",
help="display history records since N minutes ago")
parser.add_option("--clear-memory", dest="clear", action="store_true",
help="clear station memory")
parser.add_option("--set-time", dest="settime", action="store_true",
help="set the station clock to the current time")
parser.add_option("--set-interval", dest="interval",
type=int, metavar="N",
help="set the station archive interval to N minutes")
def do_options(self, options, parser, config_dict, prompt):
self.station = WS23xxDriver(**config_dict[DRIVER_NAME])
if options.current:
self.show_current()
elif options.nrecords is not None:
self.show_history(count=options.nrecords)
elif options.recmin is not None:
ts = int(time.time()) - options.recmin * 60
self.show_history(ts=ts)
elif options.settime:
self.set_clock(prompt)
elif options.interval is not None:
self.set_interval(options.interval, prompt)
elif options.clear:
self.clear_history(prompt)
else:
self.show_info()
self.station.closePort()
def show_info(self):
"""Query the station then display the settings."""
print 'Querying the station for the configuration...'
config = self.station.getConfig()
for key in sorted(config):
print '%s: %s' % (key, config[key])
def show_current(self):
"""Get current weather observation."""
print 'Querying the station for current weather data...'
for packet in self.station.genLoopPackets():
print packet
break
def show_history(self, ts=None, count=0):
"""Show the indicated number of records or records since timestamp"""
print "Querying the station for historical records..."
for i, r in enumerate(self.station.genStartupRecords(since_ts=ts,
count=count)):
print r
if count and i > count:
break
def set_clock(self, prompt):
"""Set station clock to current time."""
ans = None
while ans not in ['y', 'n']:
v = self.station.getTime()
vstr = weeutil.weeutil.timestamp_to_string(v)
print "Station clock is", vstr
if prompt:
ans = raw_input("Set station clock (y/n)? ")
else:
print "Setting station clock"
ans = 'y'
if ans == 'y':
self.station.setTime()
v = self.station.getTime()
vstr = weeutil.weeutil.timestamp_to_string(v)
print "Station clock is now", vstr
elif ans == 'n':
print "Set clock cancelled."
def set_interval(self, interval, prompt):
print "Changing the interval will clear the station memory."
v = self.station.getArchiveInterval()
ans = None
while ans not in ['y', 'n']:
print "Interval is", v
if prompt:
ans = raw_input("Set interval to %d minutes (y/n)? " % interval)
else:
print "Setting interval to %d minutes" % interval
ans = 'y'
if ans == 'y':
self.station.setArchiveInterval(interval)
v = self.station.getArchiveInterval()
print "Interval is now", v
elif ans == 'n':
print "Set interval cancelled."
def clear_history(self, prompt):
ans = None
while ans not in ['y', 'n']:
v = self.station.getRecordCount()
print "Records in memory:", v
if prompt:
ans = raw_input("Clear console memory (y/n)? ")
else:
print 'Clearing console memory'
ans = 'y'
if ans == 'y':
self.station.clearHistory()
v = self.station.getRecordCount()
print "Records in memory:", v
elif ans == 'n':
print "Clear memory cancelled."
class WS23xxDriver(weewx.drivers.AbstractDevice):
"""Driver for LaCrosse WS23xx stations."""
def __init__(self, **stn_dict):
"""Initialize the station object.
port: The serial port, e.g., /dev/ttyS0 or /dev/ttyUSB0
[Required. Default is /dev/ttyS0]
polling_interval: How often to poll the station, in seconds.
[Optional. Default is 8 (wired) or 30 (wireless)]
model: Which station model is this?
[Optional. Default is 'LaCrosse WS23xx']
"""
self._last_rain = None
self._last_cn = None
self._poll_wait = 60
self.model = stn_dict.get('model', 'LaCrosse WS23xx')
self.port = stn_dict.get('port', DEFAULT_PORT)
self.max_tries = int(stn_dict.get('max_tries', 5))
self.retry_wait = int(stn_dict.get('retry_wait', 30))
self.polling_interval = stn_dict.get('polling_interval', None)
if self.polling_interval is not None:
self.polling_interval = int(self.polling_interval)
self.enable_startup_records = stn_dict.get('enable_startup_records',
True)
self.enable_archive_records = stn_dict.get('enable_archive_records',
True)
self.mode = stn_dict.get('mode', 'single_open')
loginf('driver version is %s' % DRIVER_VERSION)
loginf('serial port is %s' % self.port)
loginf('polling interval is %s' % self.polling_interval)
if self.mode == 'single_open':
self.station = WS23xx(self.port)
else:
self.station = None
def closePort(self):
if self.station is not None:
self.station.close()
self.station = None
@property
def hardware_name(self):
return self.model
# weewx wants the archive interval in seconds, but the console uses minutes
@property
def archive_interval(self):
if not self.enable_startup_records and not self.enable_archive_records:
raise NotImplementedError
return self.getArchiveInterval() * 60
def genLoopPackets(self):
ntries = 0
while ntries < self.max_tries:
ntries += 1
try:
if self.station:
data = self.station.get_raw_data(SENSOR_IDS)
else:
with WS23xx(self.port) as s:
data = s.get_raw_data(SENSOR_IDS)
packet = data_to_packet(data, int(time.time() + 0.5),
last_rain=self._last_rain)
self._last_rain = packet['rainTotal']
ntries = 0
yield packet
if self.polling_interval is not None:
self._poll_wait = self.polling_interval
if data['cn'] != self._last_cn:
conn_info = get_conn_info(data['cn'])
loginf("connection changed from %s to %s" %
(get_conn_info(self._last_cn)[0], conn_info[0]))
self._last_cn = data['cn']
if self.polling_interval is None:
loginf("using %s second polling interval"
" for %s connection" %
(conn_info[1], conn_info[0]))
self._poll_wait = conn_info[1]
time.sleep(self._poll_wait)
except Ws2300.Ws2300Exception, e:
logerr("Failed attempt %d of %d to get LOOP data: %s" %
(ntries, self.max_tries, e))
logdbg("Waiting %d seconds before retry" % self.retry_wait)
time.sleep(self.retry_wait)
else:
msg = "Max retries (%d) exceeded for LOOP data" % self.max_tries
logerr(msg)
raise weewx.RetriesExceeded(msg)
def genStartupRecords(self, since_ts):
if not self.enable_startup_records:
raise NotImplementedError
if self.station:
return self.genRecords(self.station, since_ts)
else:
with WS23xx(self.port) as s:
return self.genRecords(s, since_ts)
def genArchiveRecords(self, since_ts, count=0):
if not self.enable_archive_records:
raise NotImplementedError
if self.station:
return self.genRecords(self.station, since_ts, count)
else:
with WS23xx(self.port) as s:
return self.genRecords(s, since_ts, count)
def genRecords(self, s, since_ts, count=0):
last_rain = None
for ts, data in s.gen_records(since_ts=since_ts, count=count):
record = data_to_packet(data, ts, last_rain=last_rain)
record['interval'] = data['interval']
last_rain = record['rainTotal']
yield record
# def getTime(self) :
# with WS23xx(self.port) as s:
# return s.get_time()
# def setTime(self):
# with WS23xx(self.port) as s:
# s.set_time()
def getArchiveInterval(self):
if self.station:
return self.station.get_archive_interval()
else:
with WS23xx(self.port) as s:
return s.get_archive_interval()
def setArchiveInterval(self, interval):
if self.station:
self.station.set_archive_interval(interval)
else:
with WS23xx(self.port) as s:
s.set_archive_interval(interval)
def getConfig(self):
fdata = dict()
if self.station:
data = self.station.get_raw_data(Measure.IDS.keys())
else:
with WS23xx(self.port) as s:
data = s.get_raw_data(Measure.IDS.keys())
for key in data:
fdata[Measure.IDS[key].name] = data[key]
return fdata
def getRecordCount(self):
if self.station:
return self.station.get_record_count()
else:
with WS23xx(self.port) as s:
return s.get_record_count()
def clearHistory(self):
if self.station:
self.station.clear_memory()
else:
with WS23xx(self.port) as s:
s.clear_memory()
# ids for current weather conditions and connection type
SENSOR_IDS = ['it','ih','ot','oh','pa','wind','rh','rt','dp','wc','cn']
# polling interval, in seconds, for various connection types
POLLING_INTERVAL = {0: ("cable", 8), 3: ("lost", 60), 15: ("wireless", 30)}
def get_conn_info(conn_type):
return POLLING_INTERVAL.get(conn_type, ("unknown", 60))
def data_to_packet(data, ts, last_rain=None):
"""Convert raw data to format and units required by weewx.
station weewx (metric)
temperature degree C degree C
humidity percent percent
uv index unitless unitless
pressure mbar mbar
wind speed m/s km/h
wind dir degree degree
wind gust None
wind gust dir None
rain mm cm
rain rate cm/h
"""
packet = dict()
packet['usUnits'] = weewx.METRIC
packet['dateTime'] = ts
packet['inTemp'] = data['it']
packet['inHumidity'] = data['ih']
packet['outTemp'] = data['ot']
packet['outHumidity'] = data['oh']
packet['pressure'] = data['pa']
ws, wd, wso, wsv = data['wind']
if wso == 0 and wsv == 0:
packet['windSpeed'] = ws
if packet['windSpeed'] is not None:
packet['windSpeed'] *= 3.6 # weewx wants km/h
packet['windDir'] = wd
else:
loginf('invalid wind reading: speed=%s dir=%s overflow=%s invalid=%s' %
(ws, wd, wso, wsv))
packet['windSpeed'] = None
packet['windDir'] = None
packet['windGust'] = None
packet['windGustDir'] = None
packet['rainTotal'] = data['rt']
if packet['rainTotal'] is not None:
packet['rainTotal'] /= 10 # weewx wants cm
packet['rain'] = weewx.wxformulas.calculate_rain(
packet['rainTotal'], last_rain)
# station provides some derived variables
packet['rainRate'] = data['rh']
if packet['rainRate'] is not None:
packet['rainRate'] /= 10 # weewx wants cm/hr
packet['dewpoint'] = data['dp']
packet['windchill'] = data['wc']
return packet
class WS23xx(object):
"""Wrap the Ws2300 object so we can easily open serial port, read/write,
close serial port without all of the try/except/finally scaffolding."""
def __init__(self, port):
logdbg('create LinuxSerialPort')
self.serial_port = LinuxSerialPort(port)
logdbg('create Ws2300')
self.ws = Ws2300(self.serial_port)
def __enter__(self):
logdbg('station enter')
return self
def __exit__(self, type_, value, traceback):
logdbg('station exit')
self.ws = None
self.close()
def close(self):
logdbg('close LinuxSerialPort')
self.serial_port.close()
self.serial_port = None
def set_time(self, ts):
"""Set station time to indicated unix epoch."""
logdbg('setting station clock to %s' %
weeutil.weeutil.timestamp_to_string(ts))
for m in [Measure.IDS['sd'], Measure.IDS['st']]:
data = m.conv.value2binary(ts)
cmd = m.conv.write(data, None)
self.ws.write_safe(m.address, *cmd[1:])
def get_time(self):
"""Return station time as unix epoch."""
data = self.get_raw_data(['sw'])
ts = int(data['sw'])
logdbg('station clock is %s' % weeutil.weeutil.timestamp_to_string(ts))
return ts
def set_archive_interval(self, interval):
"""Set the archive interval in minutes."""
if int(interval) < 1:
raise ValueError('archive interval must be greater than zero')
logdbg('setting hardware archive interval to %s minutes' % interval)
interval -= 1
for m,v in [(Measure.IDS['hi'],interval), # archive interval in minutes
(Measure.IDS['hc'],1), # time till next sample in minutes
(Measure.IDS['hn'],0)]: # number of valid records
data = m.conv.value2binary(v)
cmd = m.conv.write(data, None)
self.ws.write_safe(m.address, *cmd[1:])
def get_archive_interval(self):
"""Return archive interval in minutes."""
data = self.get_raw_data(['hi'])
x = 1 + int(data['hi'])
logdbg('station archive interval is %s minutes' % x)
return x
def clear_memory(self):
"""Clear station memory."""
logdbg('clearing console memory')
for m,v in [(Measure.IDS['hn'],0)]: # number of valid records
data = m.conv.value2binary(v)
cmd = m.conv.write(data, None)
self.ws.write_safe(m.address, *cmd[1:])
def get_record_count(self):
data = self.get_raw_data(['hn'])
x = int(data['hn'])
logdbg('record count is %s' % x)
return x
def gen_records(self, since_ts=None, count=None, use_computer_clock=True):
"""Get latest count records from the station from oldest to newest. If
count is 0 or None, return all records.
The station has a history interval, and it records when the last
history sample was saved. So as long as the interval does not change
between the first and last records, we are safe to infer timestamps
for each record. This assumes that if the station loses power then
the memory will be cleared.
There is no timestamp associated with each record - we have to guess.
The station tells us the time until the next record and the epoch of
the latest record, based on the station's clock. So we can use that
or use the computer clock to guess the timestamp for each record.
To ensure accurate data, the first record must be read within one
minute of the initial read and the remaining records must be read
within numrec * interval minutes.
"""
logdbg("gen_records: since_ts=%s count=%s clock=%s" %
(since_ts, count, use_computer_clock))
measures = [Measure.IDS['hi'], Measure.IDS['hw'],
Measure.IDS['hc'], Measure.IDS['hn']]
raw_data = read_measurements(self.ws, measures)
interval = 1 + int(measures[0].conv.binary2value(raw_data[0])) # minute
latest_ts = int(measures[1].conv.binary2value(raw_data[1])) # epoch
time_to_next = int(measures[2].conv.binary2value(raw_data[2])) # minute
numrec = int(measures[3].conv.binary2value(raw_data[3]))
now = int(time.time())
cstr = 'station'
if use_computer_clock:
latest_ts = now - (interval - time_to_next) * 60
cstr = 'computer'
logdbg("using %s clock with latest_ts of %s" %
(cstr, weeutil.weeutil.timestamp_to_string(latest_ts)))
if not count:
count = HistoryMeasure.MAX_HISTORY_RECORDS
if since_ts is not None:
count = int((now - since_ts) / (interval * 60))
logdbg("count is %d to satisfy timestamp of %s" %
(count, weeutil.weeutil.timestamp_to_string(since_ts)))
if count == 0:
return
if count > numrec:
count = numrec
if count > HistoryMeasure.MAX_HISTORY_RECORDS:
count = HistoryMeasure.MAX_HISTORY_RECORDS
# station is about to overwrite first record, so skip it
if time_to_next <= 1 and count == HistoryMeasure.MAX_HISTORY_RECORDS:
count -= 1
logdbg("downloading %d records from station" % count)
HistoryMeasure.set_constants(self.ws)
measures = [HistoryMeasure(n) for n in range(count-1, -1, -1)]
raw_data = read_measurements(self.ws, measures)
last_ts = latest_ts - (count-1) * interval * 60
for measure, nybbles in zip(measures, raw_data):
value = measure.conv.binary2value(nybbles)
data_dict = {
'interval': interval,
'it': value.temp_indoor,
'ih': value.humidity_indoor,
'ot': value.temp_outdoor,
'oh': value.humidity_outdoor,
'pa': value.pressure_absolute,
'rt': value.rain,
'wind': (value.wind_speed/10, value.wind_direction, 0, 0),
'rh': None, # no rain rate in history
'dp': None, # no dewpoint in history
'wc': None, # no windchill in history
}
yield last_ts, data_dict
last_ts += interval * 60
def get_raw_data(self, labels):
"""Get raw data from the station, return as dictionary."""
measures = [Measure.IDS[m] for m in labels]
raw_data = read_measurements(self.ws, measures)
data_dict = dict(zip(labels, [m.conv.binary2value(d) for m, d in zip(measures, raw_data)]))
return data_dict
# =============================================================================
# The following code was adapted from ws2300.py by Russell Stuart
# =============================================================================
VERSION = "1.8 2013-08-26"
#
# Debug options.
#
DEBUG_SERIAL = False
#
# A fatal error.
#
class FatalError(StandardError):
source = None
message = None
cause = None
def __init__(self, source, message, cause=None):
self.source = source
self.message = message
self.cause = cause
StandardError.__init__(self, message)
#
# The serial port interface. We can talk to the Ws2300 over anything
# that implements this interface.
#
class SerialPort(object):
#
# Discard all characters waiting to be read.
#
def clear(self): raise NotImplementedError()
#
# Close the serial port.
#
def close(self): raise NotImplementedError()
#
# Wait for all characters to be sent.
#
def flush(self): raise NotImplementedError()
#
# Read a character, waiting for a most timeout seconds. Return the
# character read, or None if the timeout occurred.
#
def read_byte(self, timeout): raise NotImplementedError()
#
# Release the serial port. Closes it until it is used again, when
# it is automatically re-opened. It need not be implemented.
#
def release(self): pass
#
# Write characters to the serial port.
#
def write(self, data): raise NotImplementedError()
#
# A Linux Serial port. Implements the Serial interface on Linux.
#
class LinuxSerialPort(SerialPort):
SERIAL_CSIZE = {
"7": tty.CS7,
"8": tty.CS8, }
SERIAL_PARITIES= {
"e": tty.PARENB,
"n": 0,
"o": tty.PARENB|tty.PARODD, }
SERIAL_SPEEDS = {
"300": tty.B300,
"600": tty.B600,
"1200": tty.B1200,
"2400": tty.B2400,
"4800": tty.B4800,
"9600": tty.B9600,
"19200": tty.B19200,
"38400": tty.B38400,
"57600": tty.B57600,
"115200": tty.B115200, }
SERIAL_SETTINGS = "2400,n,8,1"
device = None # string, the device name.
orig_settings = None # class, the original ports settings.
select_list = None # list, The serial ports
serial_port = None # int, OS handle to device.
settings = None # string, the settings on the command line.
#
# Initialise ourselves.
#
def __init__(self,device,settings=SERIAL_SETTINGS):
self.device = device
self.settings = settings.split(",")
self.settings.extend([None,None,None])
self.settings[0] = self.__class__.SERIAL_SPEEDS.get(self.settings[0], None)
self.settings[1] = self.__class__.SERIAL_PARITIES.get(self.settings[1].lower(), None)
self.settings[2] = self.__class__.SERIAL_CSIZE.get(self.settings[2], None)
if len(self.settings) != 7 or None in self.settings[:3]:
raise FatalError(self.device, 'Bad serial settings "%s".' % settings)
self.settings = self.settings[:4]
#
# Open the port.
#
try:
self.serial_port = os.open(self.device, os.O_RDWR)
except EnvironmentError, e:
raise FatalError(self.device, "can't open tty device - %s." % str(e))
try:
fcntl.flock(self.serial_port, fcntl.LOCK_EX)
self.orig_settings = tty.tcgetattr(self.serial_port)
setup = self.orig_settings[:]
setup[0] = tty.INPCK
setup[1] = 0
setup[2] = tty.CREAD|tty.HUPCL|tty.CLOCAL|reduce(lambda x,y: x|y, self.settings[:3])
setup[3] = 0 # tty.ICANON
setup[4] = self.settings[0]
setup[5] = self.settings[0]
setup[6] = ['\000']*len(setup[6])
setup[6][tty.VMIN] = 1
setup[6][tty.VTIME] = 0
tty.tcflush(self.serial_port, tty.TCIOFLUSH)
#
# Restart IO if stopped using software flow control (^S/^Q). This
# doesn't work on FreeBSD.
#
try:
tty.tcflow(self.serial_port, tty.TCOON|tty.TCION)
except termios.error:
pass
tty.tcsetattr(self.serial_port, tty.TCSAFLUSH, setup)
#
# Set DTR low and RTS high and leave other control lines untouched.
#
arg = struct.pack('I', 0)
arg = fcntl.ioctl(self.serial_port, tty.TIOCMGET, arg)
portstatus = struct.unpack('I', arg)[0]
portstatus = portstatus & ~tty.TIOCM_DTR | tty.TIOCM_RTS
arg = struct.pack('I', portstatus)
fcntl.ioctl(self.serial_port, tty.TIOCMSET, arg)
self.select_list = [self.serial_port]
except Exception:
os.close(self.serial_port)
raise
def close(self):
if self.orig_settings:
tty.tcsetattr(self.serial_port, tty.TCSANOW, self.orig_settings)
os.close(self.serial_port)
def read_byte(self, timeout):
ready = select.select(self.select_list, [], [], timeout)
if not ready[0]:
return None
return os.read(self.serial_port, 1)
#
# Write a string to the port.
#
def write(self, data):
os.write(self.serial_port, data)
#
# Flush the input buffer.
#
def clear(self):
tty.tcflush(self.serial_port, tty.TCIFLUSH)
#
# Flush the output buffer.
#
def flush(self):
tty.tcdrain(self.serial_port)
#
# This class reads and writes bytes to a Ws2300. It is passed something
# that implements the Serial interface. The major routines are:
#
# Ws2300() - Create one of these objects that talks over the serial port.
# read_batch() - Reads data from the device using an scatter/gather interface.
# write_safe() - Writes data to the device.
#
class Ws2300(object):
#
# An exception for us.
#
class Ws2300Exception(weewx.WeeWxIOError):
def __init__(self, *args):
weewx.WeeWxIOError.__init__(self, *args)
#
# Constants we use.
#
MAXBLOCK = 30
MAXRETRIES = 50
MAXWINDRETRIES= 20
WRITENIB = 0x42
SETBIT = 0x12
UNSETBIT = 0x32
WRITEACK = 0x10
SETACK = 0x04
UNSETACK = 0x0C
RESET_MIN = 0x01
RESET_MAX = 0x02
MAX_RESETS = 100
#
# Instance data.
#
log_buffer = None # list, action log
log_mode = None # string, Log mode
long_nest = None # int, Nesting of log actions
serial_port = None # string, SerialPort port to use
#
# Initialise ourselves.
#
def __init__(self, serial_port):
self.log_buffer = []
self.log_nest = 0
self.serial_port = serial_port
#
# Write data to the device.
#
def write_byte(self, data):
if self.log_mode != 'w':
if self.log_mode != 'e':
self.log(' ')
self.log_mode = 'w'
self.log("%02x" % ord(data))
self.serial_port.write(data)
#
# Read a byte from the device.
#
def read_byte(self, timeout=1.0):
if self.log_mode != 'r':
self.log_mode = 'r'
self.log(':')
result = self.serial_port.read_byte(timeout)
if not result:
self.log("--")
else:
self.log("%02x" % ord(result))
return result
#
# Remove all pending incoming characters.
#
def clear_device(self):
if self.log_mode != 'e':
self.log(' ')
self.log_mode = 'c'
self.log("C")
self.serial_port.clear()
#
# Write a reset string and wait for a reply.
#
def reset_06(self):
self.log_enter("re")
try:
for _ in range(self.__class__.MAX_RESETS):
self.clear_device()
self.write_byte('\x06')
#
# Occasionally 0, then 2 is returned. If 0 comes back,
# continue reading as this is more efficient than sending
# an out-of sync reset and letting the data reads restore
# synchronization. Occasionally, multiple 2's are returned.
# Read with a fast timeout until all data is exhausted, if
# we got a 2 back at all, we consider it a success.
#
success = False
answer = self.read_byte()
while answer != None:
if answer == '\x02':
success = True
answer = self.read_byte(0.05)
if success:
return
msg = "Reset failed, %d retries, no response" % self.__class__.MAX_RESETS
raise self.Ws2300Exception(msg)
finally:
self.log_exit()
#
# Encode the address.
#
def write_address(self,address):
for digit in range(4):
byte = chr((address >> (4 * (3-digit)) & 0xF) * 4 + 0x82)
self.write_byte(byte)
ack = chr(digit * 16 + (ord(byte) - 0x82) // 4)
answer = self.read_byte()
if ack != answer:
self.log("??")
return False
return True
#
# Write data, checking the reply.
#
def write_data(self,nybble_address,nybbles,encode_constant=None):
self.log_enter("wd")
try:
if not self.write_address(nybble_address):
return None
if encode_constant == None:
encode_constant = self.WRITENIB
encoded_data = ''.join([
chr(nybbles[i]*4 + encode_constant)
for i in range(len(nybbles))])
ack_constant = {
self.SETBIT: self.SETACK,
self.UNSETBIT: self.UNSETACK,
self.WRITENIB: self.WRITEACK
}[encode_constant]
self.log(",")
for i in range(len(encoded_data)):
self.write_byte(encoded_data[i])
answer = self.read_byte()
if chr(nybbles[i] + ack_constant) != answer:
self.log("??")
return None
return True
finally:
self.log_exit()
#
# Reset the device and write a command, verifing it was written correctly.
#
def write_safe(self,nybble_address,nybbles,encode_constant=None):
self.log_enter("ws")
try:
for _ in range(self.MAXRETRIES):
self.reset_06()
command_data = self.write_data(nybble_address,nybbles,encode_constant)
if command_data != None:
return command_data
raise self.Ws2300Exception("write_safe failed, retries exceeded")
finally:
self.log_exit()
#
# A total kuldge this, but its the easiest way to force the 'computer
# time' to look like a normal ws2300 variable, which it most definitely
# isn't, of course.
#
def read_computer_time(self,nybble_address,nybble_count):
now = time.time()
tm = time.localtime(now)
tu = time.gmtime(now)
year2 = tm[0] % 100
datetime_data = (
tu[5]%10, tu[5]//10, tu[4]%10, tu[4]//10, tu[3]%10, tu[3]//10,
tm[5]%10, tm[5]//10, tm[4]%10, tm[4]//10, tm[3]%10, tm[3]//10,
tm[2]%10, tm[2]//10, tm[1]%10, tm[1]//10, year2%10, year2//10)
address = nybble_address+18
return datetime_data[address:address+nybble_count]
#
# Read 'length' nybbles at address. Returns: (nybble_at_address, ...).
# Can't read more than MAXBLOCK nybbles at a time.
#
def read_data(self,nybble_address,nybble_count):
if nybble_address < 0:
return self.read_computer_time(nybble_address,nybble_count)
self.log_enter("rd")
try:
if nybble_count < 1 or nybble_count > self.MAXBLOCK:
StandardError("Too many nybbles requested")
bytes_ = (nybble_count + 1) // 2
if not self.write_address(nybble_address):
return None
#
# Write the number bytes we want to read.
#
encoded_data = chr(0xC2 + bytes_*4)
self.write_byte(encoded_data)
answer = self.read_byte()
check = chr(0x30 + bytes_)
if answer != check:
self.log("??")
return None
#
# Read the response.
#
self.log(", :")
response = ""
for _ in range(bytes_):
answer = self.read_byte()
if answer == None:
return None
response += answer
#
# Read and verify checksum
#
answer = self.read_byte()
checksum = sum([ord(b) for b in response]) % 256
if chr(checksum) != answer:
self.log("??")
return None
flatten = lambda a,b: a + (ord(b) % 16, ord(b) / 16)
return reduce(flatten, response, ())[:nybble_count]
finally:
self.log_exit()
#
# Read a batch of blocks. Batches is a list of data to be read:
# [(address_of_first_nybble, length_in_nybbles), ...]
# returns:
# [(nybble_at_address, ...), ...]
#
def read_batch(self,batches):
self.log_enter("rb start")
self.log_exit()
try:
if [b for b in batches if b[0] >= 0]:
self.reset_06()
result = []
for batch in batches:
address = batch[0]
data = ()
for start_pos in range(0,batch[1],self.MAXBLOCK):
for _ in range(self.MAXRETRIES):
bytes_ = min(self.MAXBLOCK, batch[1]-start_pos)
response = self.read_data(address + start_pos, bytes_)
if response != None:
break
self.reset_06()
if response == None:
raise self.Ws2300Exception("read failed, retries exceeded")
data += response
result.append(data)
return result
finally:
self.log_enter("rb end")
self.log_exit()
#
# Reset the device, read a block of nybbles at the passed address.
#
def read_safe(self,nybble_address,nybble_count):
self.log_enter("rs")
try:
return self.read_batch([(nybble_address,nybble_count)])[0]
finally:
self.log_exit()
#
# Debug logging of serial IO.
#
def log(self, s):
if not DEBUG_SERIAL:
return
self.log_buffer[-1] = self.log_buffer[-1] + s
def log_enter(self, action):
if not DEBUG_SERIAL:
return
self.log_nest += 1
if self.log_nest == 1:
if len(self.log_buffer) > 1000:
del self.log_buffer[0]
self.log_buffer.append("%5.2f %s " % (time.time() % 100, action))
self.log_mode = 'e'
def log_exit(self):
if not DEBUG_SERIAL:
return
self.log_nest -= 1
#
# Print a data block.
#
def bcd2num(nybbles):
digits = list(nybbles)[:]
digits.reverse()
return reduce(lambda a,b: a*10 + b, digits, 0)
def num2bcd(number, nybble_count):
result = []
for _ in range(nybble_count):
result.append(int(number % 10))
number //= 10
return tuple(result)
def bin2num(nybbles):
digits = list(nybbles)
digits.reverse()
return reduce(lambda a,b: a*16 + b, digits, 0)
def num2bin(number, nybble_count):
result = []
number = int(number)
for _ in range(nybble_count):
result.append(number % 16)
number //= 16
return tuple(result)
#
# A "Conversion" encapsulates a unit of measurement on the Ws2300. Eg
# temperature, or wind speed.
#
class Conversion(object):
description = None # Description of the units.
nybble_count = None # Number of nybbles used on the WS2300
units = None # Units name (eg hPa).
#
# Initialise ourselves.
# units - text description of the units.
# nybble_count- Size of stored value on ws2300 in nybbles
# description - Description of the units
#
def __init__(self, units, nybble_count, description):
self.description = description
self.nybble_count = nybble_count
self.units = units
#
# Convert the nybbles read from the ws2300 to our internal value.
#
def binary2value(self, data): raise NotImplementedError()
#
# Convert our internal value to nybbles that can be written to the ws2300.
#
def value2binary(self, value): raise NotImplementedError()
#
# Print value.
#
def str(self, value): raise NotImplementedError()
#
# Convert the string produced by "str()" back to the value.
#
def parse(self, s): raise NotImplementedError()
#
# Transform data into something that can be written. Returns:
# (new_bytes, ws2300.write_safe_args, ...)
# This only becomes tricky when less than a nybble is written.
#
def write(self, data, nybble):
return (data, data)
#
# Test if the nybbles read from the Ws2300 is sensible. Sometimes a
# communications error will make it past the weak checksums the Ws2300
# uses. This optional function implements another layer of checking -
# does the value returned make sense. Returns True if the value looks
# like garbage.
#
def garbage(self, data):
return False
#
# For values stores as binary numbers.
#
class BinConversion(Conversion):
mult = None
scale = None
units = None
def __init__(self, units, nybble_count, scale, description, mult=1, check=None):
Conversion.__init__(self, units, nybble_count, description)
self.mult = mult
self.scale = scale
self.units = units
def binary2value(self, data):
return (bin2num(data) * self.mult) / 10.0**self.scale
def value2binary(self, value):
return num2bin(int(value * 10**self.scale) // self.mult, self.nybble_count)
def str(self, value):
return "%.*f" % (self.scale, value)
def parse(self, s):
return float(s)
#
# For values stored as BCD numbers.
#
class BcdConversion(Conversion):
offset = None
scale = None
units = None
def __init__(self, units, nybble_count, scale, description, offset=0):
Conversion.__init__(self, units, nybble_count, description)
self.offset = offset
self.scale = scale
self.units = units
def binary2value(self, data):
num = bcd2num(data) % 10**self.nybble_count + self.offset
return float(num) / 10**self.scale
def value2binary(self, value):
return num2bcd(int(value * 10**self.scale) - self.offset, self.nybble_count)
def str(self, value):
return "%.*f" % (self.scale, value)
def parse(self, s):
return float(s)
#
# For pressures. Add a garbage check.
#
class PressureConversion(BcdConversion):
def __init__(self):
BcdConversion.__init__(self, "hPa", 5, 1, "pressure")
def garbage(self, data):
value = self.binary2value(data)
return value < 900 or value > 1200
#
# For values the represent a date.
#
class ConversionDate(Conversion):
format = None
def __init__(self, nybble_count, format_):
description = format_
for xlate in "%Y:yyyy,%m:mm,%d:dd,%H:hh,%M:mm,%S:ss".split(","):
description = description.replace(*xlate.split(":"))
Conversion.__init__(self, "", nybble_count, description)
self.format = format_
def str(self, value):
return time.strftime(self.format, time.localtime(value))
def parse(self, s):
return time.mktime(time.strptime(s, self.format))
class DateConversion(ConversionDate):
def __init__(self):
ConversionDate.__init__(self, 6, "%Y-%m-%d")
def binary2value(self, data):
x = bcd2num(data)
return time.mktime((
x // 10000 % 100,
x // 100 % 100,
x % 100,
0,
0,
0,
0,
0,
0))
def value2binary(self, value):
tm = time.localtime(value)
dt = tm[2] + tm[1] * 100 + (tm[0]-2000) * 10000
return num2bcd(dt, self.nybble_count)
class DatetimeConversion(ConversionDate):
def __init__(self):
ConversionDate.__init__(self, 11, "%Y-%m-%d %H:%M")
def binary2value(self, data):
x = bcd2num(data)
return time.mktime((
x // 1000000000 % 100 + 2000,
x // 10000000 % 100,
x // 100000 % 100,
x // 100 % 100,
x % 100,
0,
0,
0,
0))
def value2binary(self, value):
tm = time.localtime(value)
dow = tm[6] + 1
dt = tm[4]+(tm[3]+(dow+(tm[2]+(tm[1]+(tm[0]-2000)*100)*100)*10)*100)*100
return num2bcd(dt, self.nybble_count)
class UnixtimeConversion(ConversionDate):
def __init__(self):
ConversionDate.__init__(self, 12, "%Y-%m-%d %H:%M:%S")
def binary2value(self, data):
x = bcd2num(data)
return time.mktime((
x //10000000000 % 100 + 2000,
x // 100000000 % 100,
x // 1000000 % 100,
x // 10000 % 100,
x // 100 % 100,
x % 100,
0,
0,
0))
def value2binary(self, value):
tm = time.localtime(value)
dt = tm[5]+(tm[4]+(tm[3]+(tm[2]+(tm[1]+(tm[0]-2000)*100)*100)*100)*100)*100
return num2bcd(dt, self.nybble_count)
class TimestampConversion(ConversionDate):
def __init__(self):
ConversionDate.__init__(self, 10, "%Y-%m-%d %H:%M")
def binary2value(self, data):
x = bcd2num(data)
return time.mktime((
x // 100000000 % 100 + 2000,
x // 1000000 % 100,
x // 10000 % 100,
x // 100 % 100,
x % 100,
0,
0,
0,
0))
def value2binary(self, value):
tm = time.localtime(value)
dt = tm[4] + (tm[3] + (tm[2] + (tm[1] + (tm[0]-2000)*100)*100)*100)*100
return num2bcd(dt, self.nybble_count)
class TimeConversion(ConversionDate):
def __init__(self):
ConversionDate.__init__(self, 6, "%H:%M:%S")
def binary2value(self, data):
x = bcd2num(data)
return time.mktime((
0,
0,
0,
x // 10000 % 100,
x // 100 % 100,
x % 100,
0,
0,
0)) - time.timezone
def value2binary(self, value):
tm = time.localtime(value)
dt = tm[5] + tm[4]*100 + tm[3]*10000
return num2bcd(dt, self.nybble_count)
def parse(self, s):
return time.mktime((0,0,0) + time.strptime(s, self.format)[3:]) + time.timezone
class WindDirectionConversion(Conversion):
def __init__(self):
Conversion.__init__(self, "deg", 1, "North=0 clockwise")
def binary2value(self, data):
return data[0] * 22.5
def value2binary(self, value):
return (int((value + 11.25) / 22.5),)
def str(self, value):
return "%g" % value
def parse(self, s):
return float(s)
class WindVelocityConversion(Conversion):
def __init__(self):
Conversion.__init__(self, "ms,d", 4, "wind speed and direction")
def binary2value(self, data):
return (bin2num(data[:3])/10.0, bin2num(data[3:4]) * 22.5)
def value2binary(self, value):
return num2bin(value[0]*10, 3) + num2bin((value[1] + 11.5) / 22.5, 1)
def str(self, value):
return "%.1f,%g" % value
def parse(self, s):
return tuple([float(x) for x in s.split(",")])
# The ws2300 1.8 implementation does not calculate wind speed correctly -
# it uses bcd2num instead of bin2num. This conversion object uses bin2num
# decoding and it reads all wind data in a single transcation so that we do
# not suffer coherency problems.
class WindConversion(Conversion):
def __init__(self):
Conversion.__init__(self, "ms,d,o,v", 12, "wind speed, dir, validity")
def binary2value(self, data):
overflow = data[0]
validity = data[1]
speed = bin2num(data[2:5]) / 10.0
direction = data[5] * 22.5
return (speed, direction, overflow, validity)
def str(self, value):
return "%.1f,%g,%s,%s" % value
def parse(self, s):
return tuple([float(x) for x in s.split(",")])
#
# For non-numerical values.
#
class TextConversion(Conversion):
constants = None
def __init__(self, constants):
items = constants.items()[:]
items.sort()
fullname = ",".join([c[1]+"="+str(c[0]) for c in items]) + ",unknown-X"
Conversion.__init__(self, "", 1, fullname)
self.constants = constants
def binary2value(self, data):
return data[0]
def value2binary(self, value):
return (value,)
def str(self, value):
result = self.constants.get(value, None)
if result != None:
return result
return "unknown-%d" % value
def parse(self, s):
result = [c[0] for c in self.constants.items() if c[1] == s]
if result:
return result[0]
return None
#
# For values that are represented by one bit.
#
class ConversionBit(Conversion):
bit = None
desc = None
def __init__(self, bit, desc):
self.bit = bit
self.desc = desc
Conversion.__init__(self, "", 1, desc[0] + "=0," + desc[1] + "=1")
def binary2value(self, data):
return data[0] & (1 << self.bit) and 1 or 0
def value2binary(self, value):
return (value << self.bit,)
def str(self, value):
return self.desc[value]
def parse(self, s):
return [c[0] for c in self.desc.items() if c[1] == s][0]
class BitConversion(ConversionBit):
def __init__(self, bit, desc):
ConversionBit.__init__(self, bit, desc)
#
# Since Ws2300.write_safe() only writes nybbles and we have just one bit,
# we have to insert that bit into the data_read so it can be written as
# a nybble.
#
def write(self, data, nybble):
data = (nybble & ~(1 << self.bit) | data[0],)
return (data, data)
class AlarmSetConversion(BitConversion):
bit = None
desc = None
def __init__(self, bit):
BitConversion.__init__(self, bit, {0:"off", 1:"on"})
class AlarmActiveConversion(BitConversion):
bit = None
desc = None
def __init__(self, bit):
BitConversion.__init__(self, bit, {0:"inactive", 1:"active"})
#
# For values that are represented by one bit, and must be written as
# a single bit.
#
class SetresetConversion(ConversionBit):
bit = None
def __init__(self, bit, desc):
ConversionBit.__init__(self, bit, desc)
#
# Setreset bits use a special write mode.
#
def write(self, data, nybble):
if data[0] == 0:
operation = Ws2300.UNSETBIT
else:
operation = Ws2300.SETBIT
return ((nybble & ~(1 << self.bit) | data[0],), [self.bit], operation)
#
# Conversion for history. This kludge makes history fit into the framework
# used for all the other measures.
#
class HistoryConversion(Conversion):
class HistoryRecord(object):
temp_indoor = None
temp_outdoor = None
pressure_absolute = None
humidity_indoor = None
humidity_outdoor = None
rain = None
wind_speed = None
wind_direction = None
def __str__(self):
return "%4.1fc %2d%% %4.1fc %2d%% %6.1fhPa %6.1fmm %2dm/s %5g" % (
self.temp_indoor, self.humidity_indoor,
self.temp_outdoor, self.humidity_outdoor,
self.pressure_absolute, self.rain,
self.wind_speed, self.wind_direction)
def parse(cls, s):
rec = cls()
toks = [tok.rstrip(string.ascii_letters + "%/") for tok in s.split()]
rec.temp_indoor = float(toks[0])
rec.humidity_indoor = int(toks[1])
rec.temp_outdoor = float(toks[2])
rec.humidity_outdoor = int(toks[3])
rec.pressure_absolute = float(toks[4])
rec.rain = float(toks[5])
rec.wind_speed = int(toks[6])
rec.wind_direction = int((float(toks[7]) + 11.25) / 22.5) % 16
return rec
parse = classmethod(parse)
def __init__(self):
Conversion.__init__(self, "", 19, "history")
def binary2value(self, data):
value = self.__class__.HistoryRecord()
n = bin2num(data[0:5])
value.temp_indoor = (n % 1000) / 10.0 - 30
value.temp_outdoor = (n - (n % 1000)) / 10000.0 - 30
n = bin2num(data[5:10])
value.pressure_absolute = (n % 10000) / 10.0
if value.pressure_absolute < 500:
value.pressure_absolute += 1000
value.humidity_indoor = (n - (n % 10000)) / 10000.0
value.humidity_outdoor = bcd2num(data[10:12])
value.rain = bin2num(data[12:15]) * 0.518
value.wind_speed = bin2num(data[15:18])
value.wind_direction = bin2num(data[18:19]) * 22.5
return value
def value2binary(self, value):
result = ()
n = int((value.temp_indoor + 30) * 10.0 + (value.temp_outdoor + 30) * 10000.0 + 0.5)
result = result + num2bin(n, 5)
n = value.pressure_absolute % 1000
n = int(n * 10.0 + value.humidity_indoor * 10000.0 + 0.5)
result = result + num2bin(n, 5)
result = result + num2bcd(value.humidity_outdoor, 2)
result = result + num2bin(int((value.rain + 0.518/2) / 0.518), 3)
result = result + num2bin(value.wind_speed, 3)
result = result + num2bin(value.wind_direction, 1)
return result
#
# Print value.
#
def str(self, value):
return str(value)
#
# Convert the string produced by "str()" back to the value.
#
def parse(self, s):
return self.__class__.HistoryRecord.parse(s)
#
# Various conversions we know about.
#
conv_ala0 = AlarmActiveConversion(0)
conv_ala1 = AlarmActiveConversion(1)
conv_ala2 = AlarmActiveConversion(2)
conv_ala3 = AlarmActiveConversion(3)
conv_als0 = AlarmSetConversion(0)
conv_als1 = AlarmSetConversion(1)
conv_als2 = AlarmSetConversion(2)
conv_als3 = AlarmSetConversion(3)
conv_buzz = SetresetConversion(3, {0:'on', 1:'off'})
conv_lbck = SetresetConversion(0, {0:'off', 1:'on'})
conv_date = DateConversion()
conv_dtme = DatetimeConversion()
conv_utme = UnixtimeConversion()
conv_hist = HistoryConversion()
conv_stmp = TimestampConversion()
conv_time = TimeConversion()
conv_wdir = WindDirectionConversion()
conv_wvel = WindVelocityConversion()
conv_conn = TextConversion({0:"cable", 3:"lost", 15:"wireless"})
conv_fore = TextConversion({0:"rainy", 1:"cloudy", 2:"sunny"})
conv_spdu = TextConversion({0:"m/s", 1:"knots", 2:"beaufort", 3:"km/h", 4:"mph"})
conv_tend = TextConversion({0:"steady", 1:"rising", 2:"falling"})
conv_wovr = TextConversion({0:"no", 1:"overflow"})
conv_wvld = TextConversion({0:"ok", 1:"invalid", 2:"overflow"})
conv_lcon = BinConversion("", 1, 0, "contrast")
conv_rec2 = BinConversion("", 2, 0, "record number")
conv_humi = BcdConversion("%", 2, 0, "humidity")
conv_pres = PressureConversion()
conv_rain = BcdConversion("mm", 6, 2, "rain")
conv_temp = BcdConversion("C", 4, 2, "temperature", -3000)
conv_per2 = BinConversion("s", 2, 1, "time interval", 5)
conv_per3 = BinConversion("min", 3, 0, "time interval")
conv_wspd = BinConversion("m/s", 3, 1, "speed")
conv_wind = WindConversion()
#
# Define a measurement on the Ws2300. This encapsulates:
# - The names (abbrev and long) of the thing being measured, eg wind speed.
# - The location it can be found at in the Ws2300's memory map.
# - The Conversion used to represent the figure.
#
class Measure(object):
IDS = {} # map, Measures defined. {id: Measure, ...}
NAMES = {} # map, Measures defined. {name: Measure, ...}
address = None # int, Nybble address in the Ws2300
conv = None # object, Type of value
id = None # string, Short name
name = None # string, Long name
reset = None # string, Id of measure used to reset this one
def __init__(self, address, id_, conv, name, reset=None):
self.address = address
self.conv = conv
self.reset = reset
if id_ != None:
self.id = id_
assert not id_ in self.__class__.IDS
self.__class__.IDS[id_] = self
if name != None:
self.name = name
assert not name in self.__class__.NAMES
self.__class__.NAMES[name] = self
def __hash__(self):
return hash(self.id)
def __cmp__(self, other):
if isinstance(other, Measure):
return cmp(self.id, other.id)
return cmp(type(self), type(other))
#
# Conversion for raw Hex data. These are created as needed.
#
class HexConversion(Conversion):
def __init__(self, nybble_count):
Conversion.__init__(self, "", nybble_count, "hex data")
def binary2value(self, data):
return data
def value2binary(self, value):
return value
def str(self, value):
return ",".join(["%x" % nybble for nybble in value])
def parse(self, s):
toks = s.replace(","," ").split()
for i in range(len(toks)):
s = list(toks[i])
s.reverse()
toks[i] = ''.join(s)
list_str = list(''.join(toks))
self.nybble_count = len(list_str)
return tuple([int(nybble) for nybble in list_str])
#
# The raw nybble measure.
#
class HexMeasure(Measure):
def __init__(self, address, id_, conv, name):
self.address = address
self.name = name
self.conv = conv
#
# A History record. Again a kludge to make history fit into the framework
# developed for the other measurements. History records are identified
# by their record number. Record number 0 is the most recently written
# record, record number 1 is the next most recently written and so on.
#
class HistoryMeasure(Measure):
HISTORY_BUFFER_ADDR = 0x6c6 # int, Address of the first history record
MAX_HISTORY_RECORDS = 0xaf # string, Max number of history records stored
LAST_POINTER = None # int, Pointer to last record
RECORD_COUNT = None # int, Number of records in use
recno = None # int, The record number this represents
conv = conv_hist
def __init__(self, recno):
self.recno = recno
def set_constants(cls, ws2300):
measures = [Measure.IDS["hp"], Measure.IDS["hn"]]
data = read_measurements(ws2300, measures)
cls.LAST_POINTER = int(measures[0].conv.binary2value(data[0]))
cls.RECORD_COUNT = int(measures[1].conv.binary2value(data[1]))
set_constants = classmethod(set_constants)
def id(self):
return "h%03d" % self.recno
id = property(id)
def name(self):
return "history record %d" % self.recno
name = property(name)
def offset(self):
if self.LAST_POINTER is None:
raise StandardError("HistoryMeasure.set_constants hasn't been called")
return (self.LAST_POINTER - self.recno) % self.MAX_HISTORY_RECORDS
offset = property(offset)
def address(self):
return self.HISTORY_BUFFER_ADDR + self.conv.nybble_count * self.offset
address = property(address)
#
# The measurements we know about. This is all of them documented in
# memory_map_2300.txt, bar the history. History is handled specially.
# And of course, the "c?"'s aren't real measures at all - its the current
# time on this machine.
#
Measure( -18, "ct", conv_time, "this computer's time")
Measure( -12, "cw", conv_utme, "this computer's date time")
Measure( -6, "cd", conv_date, "this computer's date")
Measure(0x006, "bz", conv_buzz, "buzzer")
Measure(0x00f, "wsu", conv_spdu, "wind speed units")
Measure(0x016, "lb", conv_lbck, "lcd backlight")
Measure(0x019, "sss", conv_als2, "storm warn alarm set")
Measure(0x019, "sts", conv_als0, "station time alarm set")
Measure(0x01a, "phs", conv_als3, "pressure max alarm set")
Measure(0x01a, "pls", conv_als2, "pressure min alarm set")
Measure(0x01b, "oths", conv_als3, "out temp max alarm set")
Measure(0x01b, "otls", conv_als2, "out temp min alarm set")
Measure(0x01b, "iths", conv_als1, "in temp max alarm set")
Measure(0x01b, "itls", conv_als0, "in temp min alarm set")
Measure(0x01c, "dphs", conv_als3, "dew point max alarm set")
Measure(0x01c, "dpls", conv_als2, "dew point min alarm set")
Measure(0x01c, "wchs", conv_als1, "wind chill max alarm set")
Measure(0x01c, "wcls", conv_als0, "wind chill min alarm set")
Measure(0x01d, "ihhs", conv_als3, "in humidity max alarm set")
Measure(0x01d, "ihls", conv_als2, "in humidity min alarm set")
Measure(0x01d, "ohhs", conv_als1, "out humidity max alarm set")
Measure(0x01d, "ohls", conv_als0, "out humidity min alarm set")
Measure(0x01e, "rhhs", conv_als1, "rain 1h alarm set")
Measure(0x01e, "rdhs", conv_als0, "rain 24h alarm set")
Measure(0x01f, "wds", conv_als2, "wind direction alarm set")
Measure(0x01f, "wshs", conv_als1, "wind speed max alarm set")
Measure(0x01f, "wsls", conv_als0, "wind speed min alarm set")
Measure(0x020, "siv", conv_ala2, "icon alarm active")
Measure(0x020, "stv", conv_ala0, "station time alarm active")
Measure(0x021, "phv", conv_ala3, "pressure max alarm active")
Measure(0x021, "plv", conv_ala2, "pressure min alarm active")
Measure(0x022, "othv", conv_ala3, "out temp max alarm active")
Measure(0x022, "otlv", conv_ala2, "out temp min alarm active")
Measure(0x022, "ithv", conv_ala1, "in temp max alarm active")
Measure(0x022, "itlv", conv_ala0, "in temp min alarm active")
Measure(0x023, "dphv", conv_ala3, "dew point max alarm active")
Measure(0x023, "dplv", conv_ala2, "dew point min alarm active")
Measure(0x023, "wchv", conv_ala1, "wind chill max alarm active")
Measure(0x023, "wclv", conv_ala0, "wind chill min alarm active")
Measure(0x024, "ihhv", conv_ala3, "in humidity max alarm active")
Measure(0x024, "ihlv", conv_ala2, "in humidity min alarm active")
Measure(0x024, "ohhv", conv_ala1, "out humidity max alarm active")
Measure(0x024, "ohlv", conv_ala0, "out humidity min alarm active")
Measure(0x025, "rhhv", conv_ala1, "rain 1h alarm active")
Measure(0x025, "rdhv", conv_ala0, "rain 24h alarm active")
Measure(0x026, "wdv", conv_ala2, "wind direction alarm active")
Measure(0x026, "wshv", conv_ala1, "wind speed max alarm active")
Measure(0x026, "wslv", conv_ala0, "wind speed min alarm active")
Measure(0x027, None, conv_ala3, "pressure max alarm active alias")
Measure(0x027, None, conv_ala2, "pressure min alarm active alias")
Measure(0x028, None, conv_ala3, "out temp max alarm active alias")
Measure(0x028, None, conv_ala2, "out temp min alarm active alias")
Measure(0x028, None, conv_ala1, "in temp max alarm active alias")
Measure(0x028, None, conv_ala0, "in temp min alarm active alias")
Measure(0x029, None, conv_ala3, "dew point max alarm active alias")
Measure(0x029, None, conv_ala2, "dew point min alarm active alias")
Measure(0x029, None, conv_ala1, "wind chill max alarm active alias")
Measure(0x029, None, conv_ala0, "wind chill min alarm active alias")
Measure(0x02a, None, conv_ala3, "in humidity max alarm active alias")
Measure(0x02a, None, conv_ala2, "in humidity min alarm active alias")
Measure(0x02a, None, conv_ala1, "out humidity max alarm active alias")
Measure(0x02a, None, conv_ala0, "out humidity min alarm active alias")
Measure(0x02b, None, conv_ala1, "rain 1h alarm active alias")
Measure(0x02b, None, conv_ala0, "rain 24h alarm active alias")
Measure(0x02c, None, conv_ala2, "wind direction alarm active alias")
Measure(0x02c, None, conv_ala2, "wind speed max alarm active alias")
Measure(0x02c, None, conv_ala2, "wind speed min alarm active alias")
Measure(0x200, "st", conv_time, "station set time", reset="ct")
Measure(0x23b, "sw", conv_dtme, "station current date time")
Measure(0x24d, "sd", conv_date, "station set date", reset="cd")
Measure(0x266, "lc", conv_lcon, "lcd contrast (ro)")
Measure(0x26b, "for", conv_fore, "forecast")
Measure(0x26c, "ten", conv_tend, "tendency")
Measure(0x346, "it", conv_temp, "in temp")
Measure(0x34b, "itl", conv_temp, "in temp min", reset="it")
Measure(0x350, "ith", conv_temp, "in temp max", reset="it")
Measure(0x354, "itlw", conv_stmp, "in temp min when", reset="sw")
Measure(0x35e, "ithw", conv_stmp, "in temp max when", reset="sw")
Measure(0x369, "itla", conv_temp, "in temp min alarm")
Measure(0x36e, "itha", conv_temp, "in temp max alarm")
Measure(0x373, "ot", conv_temp, "out temp")
Measure(0x378, "otl", conv_temp, "out temp min", reset="ot")
Measure(0x37d, "oth", conv_temp, "out temp max", reset="ot")
Measure(0x381, "otlw", conv_stmp, "out temp min when", reset="sw")
Measure(0x38b, "othw", conv_stmp, "out temp max when", reset="sw")
Measure(0x396, "otla", conv_temp, "out temp min alarm")
Measure(0x39b, "otha", conv_temp, "out temp max alarm")
Measure(0x3a0, "wc", conv_temp, "wind chill")
Measure(0x3a5, "wcl", conv_temp, "wind chill min", reset="wc")
Measure(0x3aa, "wch", conv_temp, "wind chill max", reset="wc")
Measure(0x3ae, "wclw", conv_stmp, "wind chill min when", reset="sw")
Measure(0x3b8, "wchw", conv_stmp, "wind chill max when", reset="sw")
Measure(0x3c3, "wcla", conv_temp, "wind chill min alarm")
Measure(0x3c8, "wcha", conv_temp, "wind chill max alarm")
Measure(0x3ce, "dp", conv_temp, "dew point")
Measure(0x3d3, "dpl", conv_temp, "dew point min", reset="dp")
Measure(0x3d8, "dph", conv_temp, "dew point max", reset="dp")
Measure(0x3dc, "dplw", conv_stmp, "dew point min when", reset="sw")
Measure(0x3e6, "dphw", conv_stmp, "dew point max when", reset="sw")
Measure(0x3f1, "dpla", conv_temp, "dew point min alarm")
Measure(0x3f6, "dpha", conv_temp, "dew point max alarm")
Measure(0x3fb, "ih", conv_humi, "in humidity")
Measure(0x3fd, "ihl", conv_humi, "in humidity min", reset="ih")
Measure(0x3ff, "ihh", conv_humi, "in humidity max", reset="ih")
Measure(0x401, "ihlw", conv_stmp, "in humidity min when", reset="sw")
Measure(0x40b, "ihhw", conv_stmp, "in humidity max when", reset="sw")
Measure(0x415, "ihla", conv_humi, "in humidity min alarm")
Measure(0x417, "ihha", conv_humi, "in humidity max alarm")
Measure(0x419, "oh", conv_humi, "out humidity")
Measure(0x41b, "ohl", conv_humi, "out humidity min", reset="oh")
Measure(0x41d, "ohh", conv_humi, "out humidity max", reset="oh")
Measure(0x41f, "ohlw", conv_stmp, "out humidity min when", reset="sw")
Measure(0x429, "ohhw", conv_stmp, "out humidity max when", reset="sw")
Measure(0x433, "ohla", conv_humi, "out humidity min alarm")
Measure(0x435, "ohha", conv_humi, "out humidity max alarm")
Measure(0x497, "rd", conv_rain, "rain 24h")
Measure(0x49d, "rdh", conv_rain, "rain 24h max", reset="rd")
Measure(0x4a3, "rdhw", conv_stmp, "rain 24h max when", reset="sw")
Measure(0x4ae, "rdha", conv_rain, "rain 24h max alarm")
Measure(0x4b4, "rh", conv_rain, "rain 1h")
Measure(0x4ba, "rhh", conv_rain, "rain 1h max", reset="rh")
Measure(0x4c0, "rhhw", conv_stmp, "rain 1h max when", reset="sw")
Measure(0x4cb, "rhha", conv_rain, "rain 1h max alarm")
Measure(0x4d2, "rt", conv_rain, "rain total", reset=0)
Measure(0x4d8, "rtrw", conv_stmp, "rain total reset when", reset="sw")
Measure(0x4ee, "wsl", conv_wspd, "wind speed min", reset="ws")
Measure(0x4f4, "wsh", conv_wspd, "wind speed max", reset="ws")
Measure(0x4f8, "wslw", conv_stmp, "wind speed min when", reset="sw")
Measure(0x502, "wshw", conv_stmp, "wind speed max when", reset="sw")
Measure(0x527, "wso", conv_wovr, "wind speed overflow")
Measure(0x528, "wsv", conv_wvld, "wind speed validity")
Measure(0x529, "wv", conv_wvel, "wind velocity")
Measure(0x529, "ws", conv_wspd, "wind speed")
Measure(0x52c, "w0", conv_wdir, "wind direction")
Measure(0x52d, "w1", conv_wdir, "wind direction 1")
Measure(0x52e, "w2", conv_wdir, "wind direction 2")
Measure(0x52f, "w3", conv_wdir, "wind direction 3")
Measure(0x530, "w4", conv_wdir, "wind direction 4")
Measure(0x531, "w5", conv_wdir, "wind direction 5")
Measure(0x533, "wsla", conv_wspd, "wind speed min alarm")
Measure(0x538, "wsha", conv_wspd, "wind speed max alarm")
Measure(0x54d, "cn", conv_conn, "connection type")
Measure(0x54f, "cc", conv_per2, "connection time till connect")
Measure(0x5d8, "pa", conv_pres, "pressure absolute")
Measure(0x5e2, "pr", conv_pres, "pressure relative")
Measure(0x5ec, "pc", conv_pres, "pressure correction")
Measure(0x5f6, "pal", conv_pres, "pressure absolute min", reset="pa")
Measure(0x600, "prl", conv_pres, "pressure relative min", reset="pr")
Measure(0x60a, "pah", conv_pres, "pressure absolute max", reset="pa")
Measure(0x614, "prh", conv_pres, "pressure relative max", reset="pr")
Measure(0x61e, "plw", conv_stmp, "pressure min when", reset="sw")
Measure(0x628, "phw", conv_stmp, "pressure max when", reset="sw")
Measure(0x63c, "pla", conv_pres, "pressure min alarm")
Measure(0x650, "pha", conv_pres, "pressure max alarm")
Measure(0x6b2, "hi", conv_per3, "history interval")
Measure(0x6b5, "hc", conv_per3, "history time till sample")
Measure(0x6b8, "hw", conv_stmp, "history last sample when")
Measure(0x6c2, "hp", conv_rec2, "history last record pointer",reset=0)
Measure(0x6c4, "hn", conv_rec2, "history number of records", reset=0)
# get all of the wind info in a single invocation
Measure(0x527, "wind", conv_wind, "wind")
#
# Read the requests.
#
def read_measurements(ws2300, read_requests):
if not read_requests:
return []
#
# Optimise what we have to read.
#
batches = [(m.address, m.conv.nybble_count) for m in read_requests]
batches.sort()
index = 1
addr = {batches[0][0]: 0}
while index < len(batches):
same_sign = (batches[index-1][0] < 0) == (batches[index][0] < 0)
same_area = batches[index-1][0] + batches[index-1][1] + 6 >= batches[index][0]
if not same_sign or not same_area:
addr[batches[index][0]] = index
index += 1
continue
addr[batches[index][0]] = index-1
batches[index-1] = batches[index-1][0], batches[index][0] + batches[index][1] - batches[index-1][0]
del batches[index]
#
# Read the data.
#
nybbles = ws2300.read_batch(batches)
#
# Return the data read in the order it was requested.
#
results = []
for measure in read_requests:
index = addr[measure.address]
offset = measure.address - batches[index][0]
results.append(nybbles[index][offset:offset+measure.conv.nybble_count])
return results
class WS23xxConfEditor(weewx.drivers.AbstractConfEditor):
@property
def default_stanza(self):
return """
[WS23xx]
# This section is for the La Crosse WS-2300 series of weather stations.
# Serial port such as /dev/ttyS0, /dev/ttyUSB0, or /dev/cuaU0
port = /dev/ttyUSB0
# The station model, e.g., 'LaCrosse WS2317' or 'TFA Primus'
model = LaCrosse WS23xx
# The driver to use:
driver = weewx.drivers.ws23xx
"""
def prompt_for_settings(self):
print "Specify the serial port on which the station is connected, for"
print "example /dev/ttyUSB0 or /dev/ttyS0."
port = self._prompt('port', '/dev/ttyUSB0')
return {'port': port}
def modify_config(self, config_dict):
print """
Setting record_generation to software."""
config_dict['StdArchive']['record_generation'] = 'software'
# define a main entry point for basic testing of the station without weewx
# engine and service overhead. invoke this as follows from the weewx root dir:
#
# PYTHONPATH=bin python bin/weewx/drivers/ws23xx.py
if __name__ == '__main__':
import optparse
usage = """%prog [options] [--debug] [--help]"""
syslog.openlog('ws23xx', syslog.LOG_PID | syslog.LOG_CONS)
syslog.setlogmask(syslog.LOG_UPTO(syslog.LOG_INFO))
port = DEFAULT_PORT
parser = optparse.OptionParser(usage=usage)
parser.add_option('--version', dest='version', action='store_true',
help='display driver version')
parser.add_option('--debug', dest='debug', action='store_true',
help='display diagnostic information while running')
parser.add_option('--port', dest='port', metavar='PORT',
help='serial port to which the station is connected')
parser.add_option('--readings', dest='readings', action='store_true',
help='display sensor readings')
parser.add_option("--records", dest="records", type=int, metavar="N",
help="display N station records, oldest to newest")
parser.add_option('--help-measures', dest='hm', action='store_true',
help='display measure names')
parser.add_option('--measure', dest='measure', type=str,
metavar="MEASURE", help='display single measure')
(options, args) = parser.parse_args()
if options.version:
print "ws23xx driver version %s" % DRIVER_VERSION
exit(1)
if options.debug is not None:
syslog.setlogmask(syslog.LOG_UPTO(syslog.LOG_DEBUG))
if options.port:
port = options.port
with WS23xx(port) as s:
if options.readings:
data = s.get_raw_data(SENSOR_IDS)
print data
if options.records is not None:
for ts,record in s.gen_records(count=options.records):
print ts,record
if options.measure:
data = s.get_raw_data([options.measure])
print data
if options.hm:
for m in Measure.IDS:
print "%s\t%s" % (m, Measure.IDS[m].name)
| [
"[email protected]"
] | |
9b8abd96e7a9d1cf1657b05be3e7327c9595c874 | f64e31cb76909a6f7fb592ad623e0a94deec25ae | /tests/test_p0380_insert_delete_getrandom_o1.py | 6fb1b572dadda2d8a17a49d0331190489c5cd47b | [] | no_license | weak-head/leetcode | 365d635cb985e1d154985188f6728c18cab1f877 | 9a20e1835652f5e6c33ef5c238f622e81f84ca26 | refs/heads/main | 2023-05-11T14:19:58.205709 | 2023-05-05T20:57:13 | 2023-05-05T20:57:13 | 172,853,059 | 0 | 1 | null | 2022-12-09T05:22:32 | 2019-02-27T05:58:54 | Python | UTF-8 | Python | false | false | 1,572 | py | # flake8: noqa: F403, F405
import pytest
from leetcode.p0380_insert_delete_getrandom_o1 import *
solutions = [
RandomizedSet,
]
# ([args], expectation),
test_cases = [
[
("d", 2, False),
("i", 1, True),
("i", 1, False),
("r", None, {1}),
],
[
("d", 2, False),
("i", 1, True),
("i", 1, False),
("r", None, {1}),
("i", 2, True),
("i", 3, True),
("r", None, {1, 2, 3}),
("r", None, {1, 2, 3}),
("r", None, {1, 2, 3}),
("r", None, {1, 2, 3}),
("r", None, {1, 2, 3}),
],
[
("d", 1, False),
("i", 1, True),
("r", None, {1}),
("r", None, {1}),
("r", None, {1}),
("i", 2, True),
("r", None, {1, 2}),
("r", None, {1, 2}),
("r", None, {1, 2}),
("r", None, {1, 2}),
("r", None, {1, 2}),
("d", 1, True),
("d", 1, False),
("r", None, {2}),
("r", None, {2}),
("r", None, {2}),
("d", 2, True),
("i", 3, True),
("r", None, {3}),
("r", None, {3}),
("r", None, {3}),
("r", None, {3}),
],
]
@pytest.mark.timeout(1)
@pytest.mark.parametrize(("args"), test_cases)
@pytest.mark.parametrize("solution", solutions)
def test_solution(args, solution):
rs = solution()
for m, v, e in args:
if m == "i":
assert rs.insert(v) == e
elif m == "d":
assert rs.remove(v) == e
else:
assert rs.getRandom() in e
| [
"[email protected]"
] | |
27eeeb653c05caa760b8785076bda08a096fb674 | 0eb599c3bbfa6e5b31516913b88cc9db3a1311ce | /AtCoder_unofficial/chokudai_speedrun_001_i.py | 5148ec2cff2560f0cb7e129c29a7606713c0aa9f | [] | no_license | Linus-MK/AtCoder | 5b84dc88c2d2773d0f97ed18265d303290da7879 | a587e89a9e0c2ab4d36b09176bcc95e901e14326 | refs/heads/master | 2022-11-25T05:37:12.148722 | 2022-11-17T16:04:10 | 2022-11-17T16:04:10 | 169,840,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | n = int(input())
nums = list(map(int, input().split()))
# 累積和 + α
# 累積和が単調増加であることを利用して二分探索で解くか(NlogN)
# 1からの相異なる数の和がnに達するのは割と早いことを利用して逐次計算で解くか(最悪N√Nだがそれより小さいはず)
# 後者でやってみよう
# 369ms, 余裕を持って間に合う
cumsum = [0] * (n+1)
for i in range(n):
cumsum[i+1] = cumsum[i] + nums[i]
ans = 0
for i in range(n+1):
for j in range(i+1, n+1):
if cumsum[j] - cumsum[i] == n:
ans += 1
elif cumsum[j] - cumsum[i] > n:
break
print(ans)
| [
"[email protected]"
] | |
c1596616ba13010400e6d2581bcc6100afca0493 | 9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb | /sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2021_10_01/operations/_maintenance_configurations_operations.py | 1ff70644846346ec7ed89b8078257c9d68991e1e | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | openapi-env-test/azure-sdk-for-python | b334a2b65eeabcf9b7673879a621abb9be43b0f6 | f61090e96094cfd4f43650be1a53425736bd8985 | refs/heads/main | 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 | MIT | 2023-09-08T08:38:48 | 2019-11-18T07:09:24 | Python | UTF-8 | Python | false | false | 26,102 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_managed_cluster_request(
resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str", min_length=1),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(
resource_group_name: str, resource_name: str, config_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str", min_length=1),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
"configName": _SERIALIZER.url("config_name", config_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_or_update_request(
resource_group_name: str, resource_name: str, config_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str", min_length=1),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
"configName": _SERIALIZER.url("config_name", config_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str, resource_name: str, config_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str", min_length=1),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
"configName": _SERIALIZER.url("config_name", config_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
class MaintenanceConfigurationsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.containerservice.v2021_10_01.ContainerServiceClient`'s
:attr:`maintenance_configurations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_managed_cluster(
self, resource_group_name: str, resource_name: str, **kwargs: Any
) -> Iterable["_models.MaintenanceConfiguration"]:
"""Gets a list of maintenance configurations in the specified managed cluster.
Gets a list of maintenance configurations in the specified managed cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either MaintenanceConfiguration or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2021_10_01.models.MaintenanceConfiguration]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01"))
cls: ClsType[_models.MaintenanceConfigurationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_managed_cluster_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_managed_cluster.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("MaintenanceConfigurationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_managed_cluster.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations"
}
@distributed_trace
def get(
self, resource_group_name: str, resource_name: str, config_name: str, **kwargs: Any
) -> _models.MaintenanceConfiguration:
"""Gets the specified maintenance configuration of a managed cluster.
Gets the specified maintenance configuration of a managed cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param config_name: The name of the maintenance configuration. Required.
:type config_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MaintenanceConfiguration or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_10_01.models.MaintenanceConfiguration
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01"))
cls: ClsType[_models.MaintenanceConfiguration] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
config_name=config_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("MaintenanceConfiguration", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}"
}
@overload
def create_or_update(
self,
resource_group_name: str,
resource_name: str,
config_name: str,
parameters: _models.MaintenanceConfiguration,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.MaintenanceConfiguration:
"""Creates or updates a maintenance configuration in the specified managed cluster.
Creates or updates a maintenance configuration in the specified managed cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param config_name: The name of the maintenance configuration. Required.
:type config_name: str
:param parameters: The maintenance configuration to create or update. Required.
:type parameters: ~azure.mgmt.containerservice.v2021_10_01.models.MaintenanceConfiguration
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MaintenanceConfiguration or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_10_01.models.MaintenanceConfiguration
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def create_or_update(
self,
resource_group_name: str,
resource_name: str,
config_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.MaintenanceConfiguration:
"""Creates or updates a maintenance configuration in the specified managed cluster.
Creates or updates a maintenance configuration in the specified managed cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param config_name: The name of the maintenance configuration. Required.
:type config_name: str
:param parameters: The maintenance configuration to create or update. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MaintenanceConfiguration or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_10_01.models.MaintenanceConfiguration
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def create_or_update(
self,
resource_group_name: str,
resource_name: str,
config_name: str,
parameters: Union[_models.MaintenanceConfiguration, IO],
**kwargs: Any
) -> _models.MaintenanceConfiguration:
"""Creates or updates a maintenance configuration in the specified managed cluster.
Creates or updates a maintenance configuration in the specified managed cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param config_name: The name of the maintenance configuration. Required.
:type config_name: str
:param parameters: The maintenance configuration to create or update. Is either a
MaintenanceConfiguration type or a IO type. Required.
:type parameters: ~azure.mgmt.containerservice.v2021_10_01.models.MaintenanceConfiguration or
IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MaintenanceConfiguration or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_10_01.models.MaintenanceConfiguration
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.MaintenanceConfiguration] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "MaintenanceConfiguration")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
config_name=config_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("MaintenanceConfiguration", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}"
}
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, resource_name: str, config_name: str, **kwargs: Any
) -> None:
"""Deletes a maintenance configuration.
Deletes a maintenance configuration.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param config_name: The name of the maintenance configuration. Required.
:type config_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
config_name=config_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}"
}
| [
"[email protected]"
] | |
278beccd4959f7b5d2b6bd3011a01f60c47f08e7 | 3f8de52ba41a7abb4a8b222908f98747d13e1afa | /rlpy/stats/_stats.py | 13b84d0d9773808baa5d5c273610176409964df4 | [
"ISC"
] | permissive | evenmarbles/rlpy | 9c6b570ca3117d2171a897e06ec6deef8fdd918a | 3c3c39a316285ca725268e81aef030e5c764f797 | refs/heads/master | 2016-08-11T06:50:19.679495 | 2016-03-12T22:04:05 | 2016-03-12T22:04:05 | 53,755,316 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,387 | py | from __future__ import division, print_function, absolute_import
# noinspection PyUnresolvedReferences
from six.moves import range
import numpy as np
# noinspection PyPackageRequirements
from sklearn.utils.extmath import logsumexp
from ..auxiliary.array import nunique
__all__ = ['is_posdef', 'randpd', 'stacked_randpd', 'normalize_logspace', 'sq_distance',
'partitioned_mean', 'partitioned_cov', 'partitioned_sum', 'shrink_cov',
'canonize_labels']
def is_posdef(a):
"""Test if matrix `a` is positive definite.
The method uses Cholesky decomposition to determine if
the matrix is positive definite.
Parameters
----------
a : ndarray
A matrix.
Returns
-------
bool :
Whether the matrix is positive definite.
Examples
--------
>>> is_posdef()
.. note::
Adapted from Matlab:
| Project: `Probabilistic Modeling Toolkit for Matlab/Octave <https://github.com/probml/pmtk3>`_.
| Copyright (2010) Kevin Murphy and Matt Dunham
| License: `MIT <https://github.com/probml/pmtk3/blob/5fefd068a2e84ae508684d3e4750bd72a4164ba0/license.txt>`_
"""
try:
np.linalg.cholesky(np.asarray(a))
return True
except np.linalg.LinAlgError:
return False
def randpd(dim):
"""Create a random positive definite matrix of size `dim`-by-`dim`.
Parameters
----------
dim : int
The dimension of the matrix to create.
Returns
-------
ndarray :
A `dim`-by-`dim` positive definite matrix.
Examples
--------
>>> randpd()
.. note::
Adapted from Matlab:
| Project: `Probabilistic Modeling Toolkit for Matlab/Octave <https://github.com/probml/pmtk3>`_.
| Copyright (2010) Kevin Murphy and Matt Dunham
| License: `MIT <https://github.com/probml/pmtk3/blob/5fefd068a2e84ae508684d3e4750bd72a4164ba0/license.txt>`_
"""
x = np.random.randn(dim, dim)
a = x * x.T
while not is_posdef(a):
a = a + np.diag(0.001 * np.ones(dim))
return a
def stacked_randpd(dim, k, p=0):
"""Create stacked positive definite matrices.
Create multiple random positive definite matrices of size
dim-by-dim and stack them.
Parameters
----------
dim : int
The dimension of each matrix.
k : int
The number of matrices.
p : int
The diagonal value of each matrix.
Returns
-------
ndarray :
Multiple stacked random positive definite matrices.
Examples
--------
>>> stacked_randpd()
.. note::
Adapted from Matlab:
| Project: `Probabilistic Modeling Toolkit for Matlab/Octave <https://github.com/probml/pmtk3>`_.
| Copyright (2010) Kevin Murphy and Matt Dunham
| License: `MIT <https://github.com/probml/pmtk3/blob/5fefd068a2e84ae508684d3e4750bd72a4164ba0/license.txt>`_
"""
s = np.zeros((k, dim, dim))
for i in range(k):
s[i] = randpd(dim) + np.diag(p * np.ones(dim))
return s
def normalize_logspace(a):
"""Normalizes the array `a` in the log domain.
Each row of `a` is a log discrete distribution. Returns
the array normalized in the log domain while minimizing the
possibility of numerical underflow.
Parameters
----------
a : ndarray
The array to normalize in the log domain.
Returns
-------
a : ndarray
The array normalized in the log domain.
lnorm : float
log normalization constant.
Examples
--------
>>> normalize_logspace()
.. note::
Adapted from Matlab:
| Project: `Probabilistic Modeling Toolkit for Matlab/Octave <https://github.com/probml/pmtk3>`_.
| Copyright (2010) Kevin Murphy and Matt Dunham
| License: `MIT <https://github.com/probml/pmtk3/blob/5fefd068a2e84ae508684d3e4750bd72a4164ba0/license.txt>`_
"""
l = logsumexp(a, 1)
y = a.T - l
return y.T, l
def sq_distance(p, q, p_sos=None, q_sos=None):
"""Efficiently compute squared Euclidean distances between stats of vectors.
Compute the squared Euclidean distances between every d-dimensional point
in `p` to every `d`-dimensional point in q. Both `p` and `q` are n-point-by-n-dimensions.
Parameters
----------
p : array_like, shape (`n`, `dim`)
Array where `n` is the number of points and `dim` is the number of
dimensions.
q : array_like, shape (`n`, `dim`)
Array where `n` is the number of points and `dim` is the number of
dimensions.
p_sos : array_like, shape (`dim`,)
q_sos : array_like, shape (`dim`,)
Returns
-------
ndarray :
The squared Euclidean distance.
Examples
--------
>>> sq_distance()
.. note::
Adapted from Matlab:
| Project: `Probabilistic Modeling Toolkit for Matlab/Octave <https://github.com/probml/pmtk3>`_.
| Copyright (2010) Kevin Murphy and Matt Dunham
| License: `MIT <https://github.com/probml/pmtk3/blob/5fefd068a2e84ae508684d3e4750bd72a4164ba0/license.txt>`_
"""
p_sos = np.sum(np.power(p, 2), 1) if p_sos is None else p_sos
# noinspection PyTypeChecker
q_sos = np.sum(np.power(q, 2), 1) if q_sos is None else q_sos
# noinspection PyUnresolvedReferences
n = q_sos.shape[0]
# noinspection PyUnresolvedReferences
return (q_sos.reshape((n, 1)) + p_sos).T - 2 * np.dot(p, q.T)
def partitioned_mean(x, y, c=None, return_counts=False):
"""Mean of groups.
Groups the rows of `x` according to the class labels in y and
takes the mean of each group.
Parameters
----------
x : array_like, shape (`n`, `dim`)
The data to group, where `n` is the number of data points and
`dim` is the dimensionality of each data point.
y : array_like, shape (`n`,)
The class label for each data point.
return_counts : bool
Whether to return the number of elements in each group or not.
Returns
-------
mean : array_like
The mean of each group.
counts : int
The number of elements in each group.
Examples
--------
>>> partitioned_mean()
.. note::
Adapted from Matlab:
| Project: `Probabilistic Modeling Toolkit for Matlab/Octave <https://github.com/probml/pmtk3>`_.
| Copyright (2010) Kevin Murphy and Matt Dunham
| License: `MIT <https://github.com/probml/pmtk3/blob/5fefd068a2e84ae508684d3e4750bd72a4164ba0/license.txt>`_
"""
c = nunique(y) if c is None else c
dim = x.shape[1]
m = np.zeros((c, dim))
for i in range(c):
ndx = y == i
m[i] = np.mean(x[ndx], 0)
if not return_counts:
ret = m
else:
ret = (m,)
# noinspection PyTupleAssignmentBalance
_, counts = np.unique(y, return_counts=True)
ret += (counts,)
return ret
def partitioned_cov(x, y, c=None):
"""Covariance of groups.
Partition the rows of `x` according to class labels in `y` and
take the covariance of each group.
Parameters
----------
x : array_like, shape (`n`, `dim`)
The data to group, where `n` is the number of data points and
`dim` is the dimensionality of each data point.
y : array_like, shape (`n`,)
The class label for each data point.
c : int
The number of components in `y`.
Returns
-------
cov : array_like
The covariance of each group.
Examples
--------
>>> partitioned_cov()
.. note::
Adapted from Matlab:
| Project: `Probabilistic Modeling Toolkit for Matlab/Octave <https://github.com/probml/pmtk3>`_.
| Copyright (2010) Kevin Murphy and Matt Dunham
| License: `MIT <https://github.com/probml/pmtk3/blob/5fefd068a2e84ae508684d3e4750bd72a4164ba0/license.txt>`_
.. warning::
Implementation of this function is not finished yet.
"""
c = nunique(y) if c is None else c
dim = x.shape[1]
cov = np.zeros((c, dim, dim))
for i in range(c):
cov[i] = np.cov(x[y == c])
def partitioned_sum(x, y, c=None):
"""Sums of groups.
Groups the rows of `x` according to the class labels in `y`
and sums each group.
Parameters
----------
x : array_like, shape (`n`, `dim`)
The data to group, where `n` is the number of data points and
`dim` is the dimensionality of each data point.
y : array_like, shape (`n`,)
The class label for each data point.
c : int
The number of components in `y`.
Returns
-------
sums : array_like
The sum of each group.
Examples
--------
>>> partitioned_sum()
.. note::
Adapted from Matlab:
| Project: `Probabilistic Modeling Toolkit for Matlab/Octave <https://github.com/probml/pmtk3>`_.
| Copyright (2010) Kevin Murphy and Matt Dunham
| License: `MIT <https://github.com/probml/pmtk3/blob/5fefd068a2e84ae508684d3e4750bd72a4164ba0/license.txt>`_
"""
c = nunique(y) if c is None else c
# noinspection PyTypeChecker
return np.dot(np.arange(0, c).reshape(c, 1) == y, x)
def shrink_cov(x, return_lambda=False, return_estimate=False):
"""Covariance shrinkage estimation.
Ledoit-Wolf optimal shrinkage estimator for cov(X)
:math:`C = \\lambda*t + (1 - \\lambda) * s`
using the diagonal variance 'target' t=np.diag(s) with the
unbiased sample cov `s` as the unconstrained estimate.
Parameters
----------
x : array_like, shape (`n`, `dim`)
The data, where `n` is the number of data points and
`dim` is the dimensionality of each data point.
return_lambda : bool
Whether to return lambda or not.
return_estimate : bool
Whether to return the unbiased estimate or not.
Returns
-------
C : array
The shrunk final estimate
lambda_ : float, optional
Lambda
estimate : array, optional
Unbiased estimate.
Examples
--------
>>> shrink_cov()
.. note::
Adapted from Matlab:
| Project: `Probabilistic Modeling Toolkit for Matlab/Octave <https://github.com/probml/pmtk3>`_.
| Copyright (2010) Kevin Murphy and Matt Dunham
| License: `MIT <https://github.com/probml/pmtk3/blob/5fefd068a2e84ae508684d3e4750bd72a4164ba0/license.txt>`_
"""
optional_returns = return_lambda or return_estimate
n, p = x.shape
x_mean = np.mean(x, 0)
x = x - x_mean
# noinspection PyTypeChecker
s = np.asarray(np.dot(x.T, x) / (n - 1)) # unbiased estimate
s_bar = (n - 1) * s / n
s_var = np.zeros((p, p))
for i in range(n):
# noinspection PyTypeChecker
s_var += np.power(x[i].reshape(p, 1) * x[i] - s_bar, 2)
s_var = np.true_divide(n, (n - 1)**3) * s_var
# calculate optimal shrinkage
o_shrink = np.triu(np.ones((p, p))) - np.eye(p)
# Ledoit-Wolf formula
lambda_ = np.sum(s_var[o_shrink.astype(np.bool)]) / np.sum(np.power(s[o_shrink.astype(np.bool)], 2))
# bound-constrain lambda
lambda_ = np.max([0, np.min([1, lambda_])])
# shrunk final estimate C
c = lambda_ * np.diag(np.diag(s)) + (1 - lambda_) * s
if not optional_returns:
ret = c
else:
ret = (c,)
if return_lambda:
ret += (lambda_,)
if return_estimate:
ret += (s,)
return ret
def canonize_labels(labels, support=None):
"""Transform labels to 1:k.
The size of canonized is the same as ladles but every label is
transformed to its corresponding 1:k. If labels does not span
the support, specify the support explicitly as the 2nd argument.
Parameters
----------
labels : array_like
support : optional
Returns
-------
Transformed labels.
Examples
--------
>>> canonize_labels()
.. note::
Adapted from Matlab:
| Project: `Probabilistic Modeling Toolkit for Matlab/Octave <https://github.com/probml/pmtk3>`_.
| Copyright (2010) Kevin Murphy and Matt Dunham
| License: `MIT <https://github.com/probml/pmtk3/blob/5fefd068a2e84ae508684d3e4750bd72a4164ba0/license.txt>`_
.. warning::
This is only a stub function. Implementation is still missing
"""
raise NotImplementedError
| [
"[email protected]"
] | |
82fb173cf47cd88083962ab2b73da46b3f4fcc51 | 817142283452fd6d351f2faaaccbeb1b012155ef | /ntc_rosetta_conf/usr_datastore.py | 201738bef1e9d1f63cecf9c45559e4d5b2c7deac | [
"Apache-2.0"
] | permissive | networktocode/ntc-rosetta-conf | ada2356f9e717a9688300842dd613a9021a78456 | 06c8028e0bbafdd97d15e14ca13faa2601345d8b | refs/heads/develop | 2021-09-26T13:02:30.495809 | 2019-08-14T15:53:25 | 2019-08-14T15:53:25 | 191,575,862 | 5 | 1 | Apache-2.0 | 2021-09-16T15:04:24 | 2019-06-12T13:24:51 | Python | UTF-8 | Python | false | false | 86 | py | from jetconf.data import JsonDatastore
class UserDatastore(JsonDatastore):
pass
| [
"[email protected]"
] | |
5225cec94bbd84fd01b937451ec2e442f10c6b36 | 64aadced1900d9791099228fa91995c2f8444633 | /python/prices.py | 1865f0e7dfe64d2745c9ef79321c2b43b4be11fc | [] | no_license | ctmakro/playground | 821a8c668b58ebd81cd48309e6f4c6cd16badea7 | 5d6e8e528f1913b6089322ef388213cec5264ae1 | refs/heads/master | 2020-12-25T01:51:12.041611 | 2020-07-14T19:17:24 | 2020-07-14T19:17:24 | 57,165,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,044 | py | wg = '''<!-- TradingView Widget BEGIN -->
<div class="tradingview-widget-container">
<div class="asdf">{}</div>
<div class="tradingview-widget-container__widget"></div>
<script type="text/javascript" src="https://s3.tradingview.com/external-embedding/embed-widget-mini-symbol-overview.js" async>
{{
"symbol": "{}",
"width": "280",
"height": "280",
"locale": "en",
"dateRange": "{}",
"colorTheme": "light",
"trendLineColor": "#37a6ef",
"underLineColor": "#e3f2fd",
"isTransparent": false,
"autosize": false,
"largeChartUrl": ""
}}
</script>
</div>
<!-- TradingView Widget END -->'''
print('''
<style>
.tradingview-widget-container{
display:inline-block;
margin:5px;
}
.asdf { text-align:center;}
</style>''')
items = '1/FX_IDC:CNYUSD,FOREXCOM:XAUUSD/31.1034807/FX_IDC:CNYUSD,INDEX:HSI,GOOGL,AAPL'.split(',')
names = 'USD/CNY,CNY/g,HSI,Google,Apple'.split(',')
dataranges = '12m,1m'.split(',')
for n,i in zip(names,items):
for d in dataranges:
print(wg.format(n+(' ({})'.format(d)), i, d))
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.