id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
6650211
|
<filename>tricircle-6.0.0/tricircle/network/security_groups.py
# Copyright 2015 Huawei Technologies Co., Ltd.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from neutron.db import securitygroups_db
import tricircle.common.client as t_client
import tricircle.common.constants as t_constants
from tricircle.common import context
import tricircle.common.context as t_context
import tricircle.common.exceptions as t_exceptions
from tricircle.common import xrpcapi
from tricircle.db import core
from tricircle.db import models
import tricircle.network.exceptions as n_exceptions
from tricircle.network import utils as nt_utils
LOG = log.getLogger(__name__)
class TricircleSecurityGroupMixin(securitygroups_db.SecurityGroupDbMixin):
def __init__(self):
super(TricircleSecurityGroupMixin, self).__init__()
self.xjob_handler = xrpcapi.XJobAPI()
self.clients = {}
@staticmethod
def _compare_rule(rule1, rule2):
for key in ('direction', 'remote_ip_prefix', 'protocol', 'ethertype',
'port_range_max', 'port_range_min'):
if rule1[key] != rule2[key] and str(rule1[key]) != str(rule2[key]):
return False
return True
def _get_client(self, region_name):
if region_name not in self.clients:
self.clients[region_name] = t_client.Client(region_name)
return self.clients[region_name]
def create_security_group_rule(self, q_context, security_group_rule):
rule = security_group_rule['security_group_rule']
if rule['remote_group_id']:
raise n_exceptions.RemoteGroupNotSupported()
sg_id = rule['security_group_id']
sg = self.get_security_group(q_context, sg_id)
if not sg:
raise n_exceptions.SecurityGroupNotFound(sg_id=sg_id)
new_rule = super(TricircleSecurityGroupMixin,
self).create_security_group_rule(q_context,
security_group_rule)
t_context = context.get_context_from_neutron_context(q_context)
try:
self.xjob_handler.configure_security_group_rules(
t_context, rule['project_id'])
except Exception:
raise n_exceptions.BottomPodOperationFailure(
resource='security group rule', region_name='')
return new_rule
def delete_security_group_rule(self, q_context, _id):
rule = self.get_security_group_rule(q_context, _id)
if not rule:
raise n_exceptions.SecurityGroupRuleNotFound(rule_id=_id)
if rule['remote_group_id']:
raise n_exceptions.RemoteGroupNotSupported()
sg_id = rule['security_group_id']
sg = self.get_security_group(q_context, sg_id)
if not sg:
raise n_exceptions.SecurityGroupNotFound(sg_id=sg_id)
super(TricircleSecurityGroupMixin,
self).delete_security_group_rule(q_context, _id)
t_context = context.get_context_from_neutron_context(q_context)
try:
self.xjob_handler.configure_security_group_rules(
t_context, rule['project_id'])
except Exception:
raise n_exceptions.BottomPodOperationFailure(
resource='security group rule', region_name='')
def get_security_group(self, context, sg_id, fields=None, tenant_id=None):
dict_param = {'resource_id': sg_id, 'resource_type': t_constants.RT_SG}
security_group_list = None
try:
security_group_list = nt_utils.check_resource_not_in_deleting(
context, dict_param)
except t_exceptions.ResourceNotFound:
raise
if security_group_list:
return security_group_list
else:
return super(TricircleSecurityGroupMixin, self).\
get_security_group(context, sg_id)
def delete_security_group(self, context, sg_id):
LOG.debug("lyman--enter delete security group")
t_ctx = t_context.get_context_from_neutron_context(context)
# check the sg whether in security group
super(TricircleSecurityGroupMixin, self).\
get_security_group(context, sg_id)
# check the sg whether in deleting
dict_para = {'resource_id': sg_id, 'resource_type': t_constants.RT_SG}
nt_utils.check_resource_not_in_deleting(context, dict_para)
try:
with t_ctx.session.begin():
core.create_resource(
t_ctx, models.DeletingResources, dict_para)
for pod, bottom_security_group_id in (
self.helper.get_real_shadow_resource_iterator(
t_ctx, t_constants.RT_SG, sg_id)):
self._get_client(pod['region_name']). \
delete_security_groups(t_ctx, bottom_security_group_id)
with t_ctx.session.begin():
core.delete_resources(
t_ctx, models.ResourceRouting,
filters=[{'key': 'top_id', 'comparator': 'eq',
'value': sg_id},
{'key': 'pod_id', 'comparator': 'eq',
'value': pod['pod_id']}])
with t_ctx.session.begin():
super(TricircleSecurityGroupMixin, self). \
delete_security_group(context, sg_id)
except Exception:
raise
finally:
with t_ctx.session.begin():
core.delete_resources(
t_ctx, models.DeletingResources,
filters=[{
'key': 'resource_id', 'comparator': 'eq',
'value': sg_id}])
|
StarcoderdataPython
|
3531489
|
<filename>add_embedding.py
# -*- coding: UTF-8 -*-
from transformers import pipeline
import pandas as pd
from opencc import OpenCC
import pickle
df = pd.read_csv('./data/city_data.csv')
cc = OpenCC('t2s') # 繁體中文 -> 簡體中文 https://yanwei-liu.medium.com/python%E8%87%AA%E7%84%B6%E8%AA%9E%E8%A8%80%E8%99%95%E7%90%86-%E5%9B%9B-%E7%B9%81%E7%B0%A1%E8%BD%89%E6%8F%9B%E5%88%A9%E5%99%A8opencc-74021cbc6de3
df['embedding'] = ''
# transformers & chinese legal large electra https://github.com/ymcui/Chinese-ELECTRA#%E5%BF%AB%E9%80%9F%E5%8A%A0%E8%BD%BD
classifier = pipeline('feature-extraction', model='hfl/chinese-legal-electra-large-discriminator',
tokenizer='hfl/chinese-legal-electra-large-discriminator')
for idx, row in df.iterrows():
try:
sentence_embedding = (classifier(cc.convert(df['JFULL'][idx])[0:500]))
sentence_embedding_sum = [sum(x) for x in zip(*sentence_embedding[0])]
df['embedding'][idx] = sentence_embedding_sum
except:
pass
with open('./data/embedding.pickle', 'wb') as handle:
pickle.dump(df, handle, protocol=pickle.HIGHEST_PROTOCOL)
print()
|
StarcoderdataPython
|
120249
|
<filename>pyconafrica/ghana19/team.py
"""
This file contains organizing team description related code.
"""
from colorama import init, Style, Fore, Back
init()
TEAM = [
{
'name': '<NAME>',
'role': 'Chair of the organising committee.',
'bio': \
"""Marlene is a Director of the Python Software Foundation,
and a co-founder of ZimboPy, a Zimbabwean non-profit that
empowers women to pursue careers in technology.
She lives in Harare and has an active role in assisting the growth of Python
communities locally and across Africa."""
},
{
'name': '<NAME>',
'role': 'Chair of the Python Software Community in Ghana.',
'bio': \
"""Best known for helping make Ghana a space-faring nation, Aaron has been contributing
his widow's mite to the tech community for over seven years.
He's a member of the Python Software Foundation Grants committee and
helps promote Python-related activities around the globe."""
},
{
'name': '<NAME>',
'role': 'Aisha is a Cloud Systems Engineer at TechData.',
'bio': \
"""She's a former board member of the Python Nigeria Community, a Python Software Foundation fellow,
Django Software Foundation member and winner of the 2016 Malcolm Tredinnick Memorial award.
Aisha is passionate about mentoring African women through PyLadies and DjangoGirls.
She's on Twitter as @AishaXBello."""
},
{
'name': '<NAME>',
'role': 'Treasurer.',
'bio': \
"""Michael is a professional accountant with keen interest in Data Science and Financial Inclusion.
He is a co-founder and Executive Board member of the Python Software Community in Ghana,
and works with students and professionals as a career mentor, educator and a community builder."""
},
{
'name': '<NAME>',
'role': '',
'bio': \
"""Abigail is the current Lead of PyLadies Ghana, a mentorship group with a focus on helping
more women become active participants and leaders in the Python open-source community.
She has been involved in organising and coaching at several Django Girls events in Ghana and
hopes to empower more women in the field of technology through these initiatives."""
},
{
'name': '<NAME>',
'role': 'Talks committee lead.',
'bio': \
"""Noah is a software developer, a member of ICT4D.at and UI designer. He's a co-founder and
executive board member of the Python Software Community in Ghana,
and a member of the Django Software Foundation. Noah has been involved in the organisation
of several events including Django Girls & PyCon Ghana. He's on Twitter: @plasmadray """
},
{
'name': '<NAME>',
'role': 'Mannie is a computer scientist, graphic designer and software developer.',
'bio': \
"""He's also a community builder, having kick-started the initiatives and user groups under
the Python Software Community in Ghana. He volunteers as a mentor for people hoping
to get into software development and organises events and workshops around the country.
He's on Twitter: @mawy_7."""
},
{
'name': '<NAME>',
'role': 'Daniele works at Divio and currently lives in the Netherlands.',
'bio': \
"""He is a core developer of the Django Project and has been an active volunteer organiser
in the Python community for several years. In recent years he has been involved in
the organisation of several editions of DjangoCon Europe, PyCon UK and PyCon Namibia."""
}
]
def get_team():
"""
This function returns the team of organizers
"""
return TEAM
def print_team():
"""
This function prints the team of organizers
"""
print("The organizers team of PyCON Africa 2019")
print('----------------------------------------\n')
for member in TEAM:
print(Back.YELLOW + Fore.BLACK + member['name'] + Back.RESET + Fore.RESET)
if member['role']:
print(' '+ Back.WHITE + Fore.BLUE + ' {}'.format(member['role']) + Back.RESET + Fore.RESET)
print(' Bio: {}\n'.format(member['bio']))
print("\n\nLet's clap for them \U0001F44F \U0001F44F \U0001F44F")
if __name__ == "__main__":
print_team()
|
StarcoderdataPython
|
9662102
|
<reponame>MyYaYa/deeplab-tensorflow
import tensorflow as tf
import numpy as np
class MyData(object):
def __init__(self, record, image_mean, shuffle=False, buffer_size=1000, batch_size=10, repeat=False, repeat_times=None):
self.record = record
self.image_mean = image_mean
self.shuffle = shuffle
self.buffer_size = buffer_size
self.batch_size = batch_size
self.repeat = repeat
self.repeat_times = repeat_times
def preprocess_function(self, example_proto):
features = tf.parse_single_example(example_proto, features={
'height' : tf.FixedLenFeature([], tf.int64),
'width' : tf.FixedLenFeature([], tf.int64),
'image_raw' : tf.FixedLenFeature([], tf.string),
'label_raw' : tf.FixedLenFeature([], tf.string),
})
image = tf.decode_raw(features['image_raw'], tf.uint8)
label = tf.decode_raw(features['label_raw'], tf.uint8)
height = tf.cast(features['height'], tf.int32)
width = tf.cast(features['width'], tf.int32)
image_shape = tf.stack([height, width, 3])
label_shape = tf.stack([height, width, 1])
image = tf.reshape(image, image_shape)
label = tf.reshape(label, label_shape)
crop_size = tf.constant(321, tf.int32)
def subtract(a, b):
return tf.subtract(a, b)
def zero():
return tf.constant(0, tf.int32)
padding_h = tf.cond(pred=tf.greater_equal(subtract(crop_size, height), tf.constant(0, tf.int32)),
true_fn=lambda: subtract(crop_size, height),
false_fn=lambda: zero())
padding_w = tf.cond(pred=tf.greater_equal(subtract(crop_size, width), tf.constant(0, tf.int32)),
true_fn=lambda: subtract(crop_size, width),
false_fn=lambda: zero())
image = tf.pad(image, paddings=[[0, padding_h], [0, padding_w], [0, 0]])
label = tf.pad(label, paddings=[[0, padding_h], [0, padding_w], [0, 0]])
image = tf.reshape(image, tf.stack([height + padding_h, width + padding_w, 3]))
label = tf.reshape(label, tf.stack([height + padding_h, width + padding_w, 1]))
img_seg = tf.concat([image, label], 2)
croped_img_seg = tf.random_crop(img_seg, [crop_size, crop_size, 4])
mirrored_img_seg = tf.image.random_flip_left_right(croped_img_seg)
image = tf.slice(mirrored_img_seg, [0, 0, 0], [321, 321, 3])
label = tf.slice(mirrored_img_seg, [0, 0, 3], [321, 321, 1])
# subtract three channals‘ mean value
image = tf.cast(image, dtype=tf.float32)
mean = tf.constant(self.image_mean, dtype=tf.float32)
mean = tf.reshape(mean, [1, 1, -1])
image = tf.subtract(image, mean)
# image = tf.div(image, 255)
label = tf.cast(label, dtype=tf.int32)
return image, label
def build_dataset(self):
dataset = tf.contrib.data.TFRecordDataset(self.record)
dataset = dataset.map(self.preprocess_function)
# shuffle
if self.shuffle:
dataset = dataset.shuffle(buffer_size=self.buffer_size)
if self.repeat:
dataset = dataset.repeat()
if self.repeat_times is not None:
dataset = dataset.repeat(self.repeat_times)
dataset = dataset.batch(self.batch_size)
#iterator = dataset.make_initializable_iterator()
return dataset
|
StarcoderdataPython
|
3578286
|
<gh_stars>0
from django.db import models
from django.conf import settings
from products.models import Product
from django.db.models.signals import m2m_changed, pre_save
User = settings.AUTH_USER_MODEL
class CartManager(models.Manager):
def new_or_get(self, request):
cart_id = request.session.get('cart_id', None)
qs = self.get_queryset().filter(id = cart_id)
if qs.count() == 1:
new_obj = False
cart_obj = qs.first()
if request.user.is_authenticated and cart_obj.user is None:
cart_obj.user = request.user
cart_obj.save()
else:
cart_obj = self.new()
new_obj = True
request.session['cart_id'] = cart_obj.id
return cart_obj, new_obj
def new(self, user=None):
user_obj = None
if user is not None:
if user.is_authenticated():
user_obj = user
return self.model.objects.create(user=user_obj)
class Cart(models.Model):
user = models.ForeignKey(User, models.SET_NULL, null=True, blank=True)
products = models.ManyToManyField(Product, blank=True)
subtotal = models.DecimalField(default=0.0, decimal_places=2, max_digits=50)
total = models.DecimalField(default=0.0, decimal_places=2, max_digits=50)
last_updated = models.DateTimeField(auto_now=True)
timestamp = models.DateTimeField(auto_now_add=True)
objects = CartManager()
def __str__(self):
return str(self.id)
def m2m_changed_cart_reciever(sender, instance, action, *args, **kwargs):
products = instance.products.all()
total = 0
for product in products:
total += product.price
if instance.subtotal != total:
instance.subtotal = total
instance.save()
m2m_changed.connect(m2m_changed_cart_reciever, sender = Cart.products.through)
#pre_save.connect(pre_saved_cart_reciever, sender = Cart)
def pre_saved_cart_reciever(sender, instance, *args, **kwargs):
instance.total = instance.subtotal
pre_save.connect(pre_saved_cart_reciever, sender = Cart)
|
StarcoderdataPython
|
8114258
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 13 16:00:10 2019
@author: nico
"""
import os
import numpy as np
from scipy import signal as sig
import matplotlib.pyplot as plt
import control
os.system ("clear") # limpia la terminal de python
plt.close("all") #cierra todos los graficos
num = np.array([1,0, 0, 0, -1]) #numerador de H[b2, b1, b0]
den = np.array([1, 0, 0, 0, 0])
z, p, k = sig.tf2zpk(num,den)
ww, hh = sig.freqz(num, den)
print("Z =", z, "\n", "P =", p, "\n", "K =", k, "\n")
ww, hh = sig.freqz(num, den)
ww = ww / np.pi
eps = np.finfo(float).eps
plt.figure("Filtro FIR")
ax1 = plt.subplot(2, 1, 1)
ax1.set_title('Módulo')
ax1.plot(ww, 20 * np.log10(abs(hh+eps)))
ax1.set_xlabel('Frequencia normalizada')
ax1.set_ylabel('Modulo [dB]')
plt.grid()
ax2 = plt.subplot(2, 1, 2)
ax2.set_title('Fase')
ax2.plot(ww, np.angle(hh))
ax2.set_xlabel('Frequencia normalizada')
ax2.set_ylabel('[Rad]')
plt.grid()
plt.show()
plt.tight_layout()
tf = control.TransferFunction(num,den, 1)
print (tf)
control.pzmap(tf, Plot=True, title='Pole Zero Map', grid=True)
|
StarcoderdataPython
|
330458
|
#!/usr/bin/env python3
"""example.py
Example of using the AmesPAHdbPythonSuite to display the ('stick')
absorption spectrum of coronene (UID=18).
"""
import pkg_resources
from amespahdbpythonsuite.xmlparser import XMLparser
import matplotlib.pyplot as plt
if __name__ == '__main__':
path = 'resources/pahdb-theoretical_cutdown.xml'
xml = pkg_resources.resource_filename('amespahdbpythonsuite', path)
parser = XMLparser(xml)
parser.verify_schema()
library = parser.to_pahdb_dict()
plt.bar([d['frequency'] for d in library['species'][18]['transitions']],
[d['intensity'] for d in library['species'][18]['transitions']],
20, color='red', edgecolor="none")
plt.title('stick absorption spectrum of coronene (UID=18)')
plt.xlabel('frequency [cm$^{-1}$]')
plt.ylabel('integrated cross-section [km mol$^{-1}$]')
plt.show()
|
StarcoderdataPython
|
3487407
|
from pydantic import BaseModel, validator
from .validators import gt0
class BaseRule(BaseModel):
bal: int = 0
_gt0_bal = validator("bal", allow_reuse=True)(gt0)
class NullRuleFields(BaseModel):
bal: int = 0
|
StarcoderdataPython
|
356581
|
<reponame>wangyum/anaconda
import numpy as np
from .util import collect, dshape
from .internal_utils import remove
from .coretypes import (DataShape, Fixed, Var, Ellipsis, Record, Tuple, Unit,
date_, datetime_, TypeVar, to_numpy_dtype, Map,
Option, Categorical)
from .typesets import floating, boolean
# https://github.com/blaze/datashape/blob/master/docs/source/types.rst
__all__ = ['isdimension', 'ishomogeneous', 'istabular', 'isfixed', 'isscalar',
'isrecord', 'iscollection', 'isnumeric', 'isboolean', 'isdatelike',
'isreal']
dimension_types = Fixed, Var, Ellipsis, int
def isscalar(ds):
""" Is this dshape a single dtype?
>>> isscalar('int')
True
>>> isscalar('?int')
True
>>> isscalar('{name: string, amount: int}')
False
"""
if isinstance(ds, str):
ds = dshape(ds)
if isinstance(ds, DataShape) and len(ds) == 1:
ds = ds[0]
return isinstance(getattr(ds, 'ty', ds), (Unit, Categorical))
def isrecord(ds):
""" Is this dshape a record type?
>>> isrecord('{name: string, amount: int}')
True
>>> isrecord('int')
False
>>> isrecord('?{name: string, amount: int}')
True
"""
if isinstance(ds, str):
ds = dshape(ds)
if isinstance(ds, DataShape) and len(ds) == 1:
ds = ds[0]
return isinstance(getattr(ds, 'ty', ds), Record)
def isdimension(ds):
""" Is a component a dimension?
>>> from datashape import int32
>>> isdimension(Fixed(10))
True
>>> isdimension(Var())
True
>>> isdimension(int32)
False
"""
return isinstance(ds, dimension_types)
def ishomogeneous(ds):
""" Does datashape contain only one dtype?
>>> from datashape import int32
>>> ishomogeneous(int32)
True
>>> ishomogeneous('var * 3 * string')
True
>>> ishomogeneous('var * {name: string, amount: int}')
False
"""
ds = dshape(ds)
return len(set(remove(isdimension, collect(isscalar, ds)))) == 1
def _dimensions(ds):
""" Number of dimensions of datashape
Interprets records as dimensional
>>> from datashape import int32
>>> _dimensions(int32)
0
>>> _dimensions(10 * int32)
1
>>> _dimensions('var * 10 * int')
2
>>> _dimensions('var * {name: string, amount: int}')
2
>>> _dimensions('var * {name: map[int32, {a: int32}]}')
2
"""
ds = dshape(ds)
if isinstance(ds, DataShape) and len(ds) == 1:
ds = ds[0]
if isinstance(ds, Option):
return _dimensions(ds.ty)
if isinstance(ds, Map):
return max(map(_dimensions, ds.key))
if isinstance(ds, Record):
return 1 + max(map(_dimensions, ds.types))
if isinstance(ds, Tuple):
return 1 + max(map(_dimensions, ds.dshapes))
if isinstance(ds, DataShape) and isdimension(ds[0]):
return 1 + _dimensions(ds.subshape[0])
if isscalar(ds):
return 0
raise TypeError('Cannot count dimensions of dshape %s which is a %r' %
(ds, type(ds).__name__))
def isfixed(ds):
""" Contains no variable dimensions
>>> isfixed('10 * int')
True
>>> isfixed('var * int')
False
>>> isfixed('10 * {name: string, amount: int}')
True
>>> isfixed('10 * {name: string, amounts: var * int}')
False
"""
ds = dshape(ds)
if isinstance(ds[0], TypeVar):
return None # don't know
if isinstance(ds[0], Var):
return False
if isinstance(ds[0], Record):
return all(map(isfixed, ds[0].types))
if len(ds) > 1:
return isfixed(ds.subarray(1))
return True
def istabular(ds):
""" Can be represented by a two dimensional with fixed columns
>>> istabular('var * 3 * int')
True
>>> istabular('var * {name: string, amount: int}')
True
>>> istabular('var * 10 * 3 * int')
False
>>> istabular('10 * var * int')
False
>>> istabular('var * (int64, string, ?float64)')
False
"""
ds = dshape(ds)
return (
_dimensions(ds) == 2 and
isfixed(ds.subarray(1)) and
not isinstance(ds.subarray(1).measure, Tuple)
)
def iscollection(ds):
""" Is a collection of items, has dimension
>>> iscollection('5 * int32')
True
>>> iscollection('int32')
False
"""
if isinstance(ds, str):
ds = dshape(ds)
return isdimension(ds[0])
def isnumeric(ds):
""" Has a numeric measure
>>> isnumeric('int32')
True
>>> isnumeric('3 * ?real')
True
>>> isnumeric('string')
False
>>> isnumeric('var * {amount: ?int32}')
False
"""
ds = launder(ds)
try:
npdtype = to_numpy_dtype(ds)
except TypeError:
return False
else:
return isinstance(ds, Unit) and np.issubdtype(npdtype, np.number)
def launder(ds):
if isinstance(ds, str):
ds = dshape(ds)
if isinstance(ds, DataShape):
ds = ds.measure
return getattr(ds, 'ty', ds)
def isreal(ds):
""" Has a numeric measure
>>> isreal('float32')
True
>>> isreal('3 * ?real')
True
>>> isreal('string')
False
"""
ds = launder(ds)
return isinstance(ds, Unit) and ds in floating
def isboolean(ds):
""" Has a boolean measure
>>> isboolean('bool')
True
>>> isboolean('3 * ?bool')
True
>>> isboolean('int')
False
"""
return launder(ds) in boolean
def isdatelike(ds):
""" Has a date or datetime measure
>>> isdatelike('int32')
False
>>> isdatelike('3 * datetime')
True
>>> isdatelike('?datetime')
True
"""
ds = launder(ds)
return ds == date_ or ds == datetime_
|
StarcoderdataPython
|
5178196
|
<gh_stars>1-10
"""
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
from collections import defaultdict
n = int(input())
a = []
for _ in range(n):
line = input()
try:
a.append(int(line))
except:
a = list(map(int, line.strip().split()))
break
counting = defaultdict(int)
for i in range(n):
if a[i]:
for j in range(n):
for k in range(n):
val = a[i] * (a[j] + a[k]) # RHS(s(t+u))
counting[val] += 1
ans = 0
for i in range(n):
for j in range(n):
for k in range(n):
val = a[i] * a[j] + a[k] # LHS(p*q+r)
if val in counting: # p*q+r=s(t+u)
ans += counting[val]
print(ans)
|
StarcoderdataPython
|
1664685
|
<filename>deepmd/xyz2raw.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import json
import argparse
from collections import Counter
from ase.io import read, write
from tqdm import tqdm
import dpdata
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-df', '--datafile',
default='data.xyz', help='time series files'
)
parser.add_argument(
'-ej', '--enjson',
default='e0.json', help='json file with single atom energies'
)
args = parser.parse_args()
#
substract_baseline = False
if os.path.exists(args.enjson):
substract_baseline = True
with open(args.enjson, 'r') as fopen:
e0_dict = json.load(fopen)
# sanity check, dpdata only needs species, pos, Z, force, virial
# ase-extxyz is inconsistent with quip-xyz, especially the force
frames = read(args.datafile, ':')
print('number of frames ', len(frames))
atomic_properties = ['numbers', 'positions', 'forces']
calc_props = ['energy', 'forces']
for atoms in tqdm(frames):
# remove extra properties in atoms
cur_properties = list(atoms.arrays.keys())
for prop in cur_properties:
if prop not in atomic_properties:
#atoms.arrays.pop(prop)
del atoms.arrays[prop]
# atoms info
# del atoms.info['feature_vector']
# TODO: check if calculator exists
atoms.calc = None # ase copys xyz info to SinglePointCalculator?
stored_forces = atoms.arrays.get('forces', None)
if stored_forces is not None:
atoms.arrays['force'] = stored_forces.copy()
del atoms.arrays['forces']
# calc
#cur_calc_props = list(atoms.calc.results.keys())
#for prop in cur_calc_props:
# if prop not in calc_props:
# del atoms.calc.results[prop]
# move forces to force
# check e0
if substract_baseline:
chemical_symbols = atoms.get_chemical_symbols()
sym_dict = Counter(chemical_symbols)
tot_e0 = 0.0
for elem, num in sym_dict.items():
tot_e0 += num*e0_dict[elem]
atoms.info['energy'] -= tot_e0
#print(tot_e0)
#print(sym_dict)
write('dp_raw.xyz', frames)
#
xyz_multi_systems = dpdata.MultiSystems.from_file(
file_name='./dp_raw.xyz',
fmt='quip/gap/xyz'
)
print(xyz_multi_systems)
xyz_multi_systems.to_deepmd_raw('./raw_data/')
pass
|
StarcoderdataPython
|
6479295
|
#
# Copyright 2018-2019 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import requests
def test_swagger():
model_endpoint = 'http://localhost:5000/swagger.json'
r = requests.get(url=model_endpoint)
assert r.status_code == 200
assert r.headers['Content-Type'] == 'application/json'
json = r.json()
assert 'swagger' in json
assert json.get('info') and json.get('info').get('title') == 'MAX Review Text Generator'
def test_metadata():
model_endpoint = 'http://localhost:5000/model/metadata'
r = requests.get(url=model_endpoint)
assert r.status_code == 200
metadata = r.json()
assert metadata['id'] == 'generative-language-model-keras'
assert metadata['name'] == 'Generative Language Model Keras'
assert metadata['description'] == 'Generative Language Model in Keras trained on Yelp reviews'
assert metadata['license'] == 'Apache2'
assert metadata['type'] == 'Language Modeling'
assert 'max-review-text-generator' in metadata['source']
def test_predict():
seed_text = "went there for dinner on a friday night and i have to say i'm impressed by the quality of the food "
chars = 20
model_endpoint = 'http://localhost:5000/model/predict'
json_data = {"seed_text": seed_text, "chars": chars}
r = requests.post(url=model_endpoint, json=json_data)
assert r.status_code == 200
response = r.json()
assert response['status'] == 'ok'
assert response['prediction']['seed_text'] == seed_text
assert len(response['prediction']['generated_text']) == chars
assert response['prediction']['full_text'] == seed_text + response['prediction']['generated_text']
if __name__ == '__main__':
pytest.main([__file__])
|
StarcoderdataPython
|
4884506
|
import sys
import os
import logging
import MySQLdb
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from dotenv import load_dotenv
load_dotenv()
rds_host = os.getenv('MYSQL_IP')
db_name = os.getenv('DB_NAME')
user_name = os.getenv('MYSQL_ID')
password = os.getenv('<PASSWORD>')
port = os.getenv('MYSQL_PORT')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class Command(BaseCommand):
help = 'Creates the initial database'
def handle(self, *args, **options):
print('Starting db creation')
try:
db = MySQLdb.connect(host=rds_host, user=user_name,
password=password, db="mysql", connect_timeout=5)
c = db.cursor()
print("connected to db server")
c.execute(f"""CREATE DATABASE {db_name};""")
c.execute(
f"""GRANT ALL PRIVILEGES ON db_name.* TO '{user_name}' IDENTIFIED BY '{password}';""")
c.close()
print("closed db connection")
except Exception as e:
logger.error(
f"ERROR: Unexpected error: Could not connect to MySql instance. \n{e}")
sys.exit()
|
StarcoderdataPython
|
1991714
|
from RNAtools.graph import CTGraph
import os
filepath = os.path.dirname(__file__)
def test_ct_graph():
"""
Tests that CT graph construction is done correctly.
"""
ct = CTGraph(f'{filepath}/../data/foo.ct')
assert len(ct.graph.nodes) == 21
assert len(ct.graph.edges) == 28
print(ct)
|
StarcoderdataPython
|
11280280
|
<reponame>matthiask/django-multilingual-search
# coding: utf-8
from __future__ import absolute_import, unicode_literals
from django.db import models
from haystack import signals
from .models import Document
class DocumentOnlySignalProcessor(signals.BaseSignalProcessor):
def setup(self):
# Listen only to the ``Document`` model.
models.signals.post_save.connect(self.handle_save, sender=Document)
models.signals.post_delete.connect(self.handle_delete, sender=Document)
def teardown(self):
# Disconnect only for the ``Document`` model.
models.signals.post_save.disconnect(self.handle_save, sender=Document)
models.signals.post_delete.disconnect(self.handle_delete, sender=Document)
|
StarcoderdataPython
|
11363815
|
from flask_restplus import Api
from .auth import api as ns3
from .book import api as ns2
from .user import api as ns1
api = Api(
title="",
version="1.0",
description="API description",
)
api.add_namespace(ns1)
api.add_namespace(ns2)
api.add_namespace(ns3)
|
StarcoderdataPython
|
6470052
|
<gh_stars>1-10
#!/usr/bin/env python
#---------------------------------------------------------------------------
# Instantiates a ROS node that sets velocities for a standalone Teledyne
# WHN DVL model in the gazebo scene. The velocity is set relative to the
# DVL base link to drive in an octagon pattern (forward, forward left, left,
# backwards left, backwards, backwards right, right, roward right) at a
# fixed linear speed (1 meter/second). Descents and climbs at 0.25 meters
# per second are included in some legs resulting in a slow descent over
# time. Each leg is allowed to run for 10 seconds. The motion is intended
# to test or demonstrate the DVL plugin water and bottom tracking, in
# particular, the inclusion of ocean current in the water tracking solution.
#---------------------------------------------------------------------------
import rospy
import gazebo_msgs.msg as gm
import time
import math
TOPIC_NAME = 'gazebo/set_model_state'
if __name__ == '__main__':
diagonal = math.sqrt(0.5) # diagonal components for 1 m/s speed
rospy.init_node('apply_velocity')
rate = rospy.Rate(0.1)
publisher = rospy.Publisher(TOPIC_NAME, gm.ModelState, queue_size=1)
command = gm.ModelState()
command.model_name = rospy.get_param('model_name')
command.reference_frame = rospy.get_param('base_link_name')
time.sleep(10) # Give things time to start up
while not rospy.is_shutdown():
for (command.twist.linear.x, \
command.twist.linear.y, \
command.twist.linear.z) in \
((1.0, 0.0, 0.25), (diagonal, diagonal, 0.0), \
(0.0, 1.0, -0.25), (-diagonal, diagonal, 0.0), \
(-1.0, 0.0, 0.25), (-diagonal, -diagonal, 0.0), \
(0.0, -1.0, 0.0), (diagonal, -diagonal, 0.0)):
publisher.publish(command)
rate.sleep()
|
StarcoderdataPython
|
3240031
|
<gh_stars>0
class RunData(object):
"""Parent class for runs. """
def __init__(self, control_file, log_file=None, energy_file=None):
self._control_file = control_file
self._log_file = log_file
self._energy_file = energy_file
self._parameters = {}
self._tags = []
self._file_list = []
with open(control_file) as file:
for line in file:
line = line.rstrip()
line = line.strip()
self._file_list.append(line)
if self._file_list[2] == '&cntrl':
self._type = 'AMBER'
elif self._file_list[0] == 'TITLE':
self._type = 'GROMOS'
if ';' in self._file_list[1]:
self._type = 'GROMACS'
def get_type(self):
return self._type
def find_parameters(self):
"""Must be implemented by the subclasses"""
pass
def calc_runtime(self):
timestep = self._parameters['timestep']
self._parameters['runtime'] = (timestep * self._parameters['num_timestep'])/1000
def get_parameters(self):
return self._parameters
def get_file(self):
return self._file_list
def add_tag(self, name, tag_id, type):
"""Adds a new tag to self._tags.
:param:
name(str): The name of the tag
tag_id(str): The id of the tag in the file
type(class): The type of the data. eg. str, int, float."""
tag = (name, tag_id, type)
self._tags.append(tag)
def standard_units(self):
"""Converts the units in self._parameters to standardised units.
Pressure --> Bar
Temperature --> Kelvin
"""
for tag in self._tags:
key = tag[0]
value = self._parameters[key]
try:
standard = tag[3]
value = value * standard
self._parameters[key] = value
except IndexError:
continue
|
StarcoderdataPython
|
4879546
|
import pytest
import src.single_hmm_searcher
class singleHmmTestCase(object):
'''Tests for single_hmm_searcher.py'''
# Eventually will add tests
|
StarcoderdataPython
|
4896230
|
from models import db
class Volunteer(db.Model):
__tablename__ = "volunteers"
volunteer_id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.String, nullable=False)
last_name = db.Column(db.String, nullable=True)
email = db.Column(db.String, nullable=False)
phone = db.Column(db.String)
|
StarcoderdataPython
|
12836862
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class AbstractFold(nn.Module):
def __init__(self, predict, partfunc):
super(AbstractFold, self).__init__()
self.predict = predict
self.partfunc = partfunc
def clear_count(self, param):
param_count = {}
for n, p in param.items():
if n.startswith("score_"):
param_count["count_"+n[6:]] = torch.zeros_like(p)
param.update(param_count)
return param
def calculate_differentiable_score(self, v, param, count):
s = 0
for n, p in param.items():
if n.startswith("score_"):
s += torch.sum(p * count["count_"+n[6:]].to(p.device))
s += v - s.item()
return s
def forward(self, seq, return_param=False, param=None, return_partfunc=False,
max_internal_length=30, max_helix_length=30, constraint=None, reference=None,
loss_pos_paired=0.0, loss_neg_paired=0.0, loss_pos_unpaired=0.0, loss_neg_unpaired=0.0):
param = self.make_param(seq) if param is None else param # reuse param or not
ss = []
preds = []
pairs = []
pfs = []
bpps = []
for i in range(len(seq)):
param_on_cpu = { k: v.to("cpu") for k, v in param[i].items() }
param_on_cpu = self.clear_count(param_on_cpu)
with torch.no_grad():
v, pred, pair = self.predict(seq[i], param_on_cpu,
max_internal_length=max_internal_length if max_internal_length is not None else len(seq[i]),
max_helix_length=max_helix_length,
constraint=constraint[i].tolist() if constraint is not None else None,
reference=reference[i].tolist() if reference is not None else None,
loss_pos_paired=loss_pos_paired, loss_neg_paired=loss_neg_paired,
loss_pos_unpaired=loss_pos_unpaired, loss_neg_unpaired=loss_neg_unpaired)
if return_partfunc:
pf, bpp = self.partfunc(seq[i], param_on_cpu,
max_internal_length=max_internal_length if max_internal_length is not None else len(seq[i]),
max_helix_length=max_helix_length,
constraint=constraint[i].tolist() if constraint is not None else None,
reference=reference[i].tolist() if reference is not None else None,
loss_pos_paired=loss_pos_paired, loss_neg_paired=loss_neg_paired,
loss_pos_unpaired=loss_pos_unpaired, loss_neg_unpaired=loss_neg_unpaired)
pfs.append(pf)
bpps.append(bpp)
if torch.is_grad_enabled():
v = self.calculate_differentiable_score(v, param[i], param_on_cpu)
ss.append(v)
preds.append(pred)
pairs.append(pair)
device = next(iter(param[0].values())).device
ss = torch.stack(ss) if torch.is_grad_enabled() else torch.tensor(ss, device=device)
if return_param:
return ss, preds, pairs, param
elif return_partfunc:
return ss, preds, pairs, pfs, bpps
else:
return ss, preds, pairs
|
StarcoderdataPython
|
11215335
|
<filename>Lote de cancelamento/Arquivo de Cancelamento.py
import pandas as pd
import xml.etree.ElementTree as ET
import time
dados_cancelamento = pd.read_excel()
# Chamada de arquivo
ListaCancelamento = dados_cancelamento.values.tolist()
# Referencial de valores para contagem da estrutura de repetição
Codigos_List = dados_cancelamento["req"].tolist()
Numero_List = dados_cancelamento["Número COAF"].tolist()
Autenticacao_List = dados_cancelamento["Autenticação"].tolist()
Motivo_List = dados_cancelamento["Motivo do cancelamento"].tolist()
# Transformação de colunas em listas
DataReporte = ('SISCOAFCancelamento')+(str(time.strftime('%d%m%Y')))
# Chamada de data para elaboração do atributo da comunicação
def create_xml():
Lote = ET.Element("LOTECANCELAMENTO")
ocx = ET.SubElement(Lote, "OCORRENCIAS", ID=(DataReporte).replace('""',''))
# Cria o elemento principal, secundários estão na estrutura de repetição
for linha in range(len(ListaCancelamento)):
oco = ET.SubElement(ocx, "OCORRENCIA")
NUMEROORIGEM = ET.SubElement(oco, "NUMEROORIGEM")
NUMEROORIGEM.text = str(Codigos_List[linha])
NUMEROCOAF = ET.SubElement(oco, "NUMEROCOAF")
NUMEROCOAF.text = str(Numero_List[linha])
AUTENTICACAO = ET.SubElement(oco, "AUTENTICACAO")
AUTENTICACAO.text = str(Autenticacao_List[linha])
MOTIVO = ET.SubElement(oco,"MOTIVO")
MOTIVO.text = str(Motivo_List[linha])
# Estrutura de repetição para inserir lista de elementos que comporão o XML, para cada linha da planilha
Comunicacao = ET.ElementTree(Lote)
Comunicacao.write("Arquivo de Cancelamento.xml", encoding='iso-8859-1', xml_declaration=True)
# Escreve o xml
create_xml()
|
StarcoderdataPython
|
4854759
|
#!/usr/bin/env python2
"""Create segmentation datasets from select SMPL fits."""
import os
import os.path as path
import sys
import logging
import numpy as np
import scipy
import click
import tqdm
from clustertools.log import LOGFORMAT
from clustertools.visualization import apply_colormap
from up_tools.model import (robust_person_size, six_region_groups,
regions_to_classes, get_crop)
from up_tools.render_segmented_views import render_body_impl
sys.path.insert(0, path.join(path.dirname(__file__), '..', '..'))
from config import SEG_DATA_FP, UP3D_FP
LOGGER = logging.getLogger(__name__)
DSET_ROOT_FP = SEG_DATA_FP
if not path.exists(DSET_ROOT_FP):
os.mkdir(DSET_ROOT_FP)
def uncrop(annot, fullimsize, cropinfo):
if annot.ndim == 2:
res = np.zeros((fullimsize[0], fullimsize[1]), dtype='uint8')
else:
res = np.ones((fullimsize[0], fullimsize[1], 3), dtype='uint8') * 255
res[cropinfo[2]:cropinfo[3],
cropinfo[4]:cropinfo[5]] = scipy.misc.imresize(
annot,
(cropinfo[3] - cropinfo[2],
cropinfo[5] - cropinfo[4]),
interp='nearest')
return res
def add_dataset(dset_fp, dset_rel_fp, up3d_fp, # pylint: disable=too-many-locals, too-many-arguments, too-many-statements, too-many-branches
train_list_f, val_list_f, test_list_f,
train_spec, val_spec, test_spec,
target_person_size, partspec, crop, running_idx,
only_missing=False):
"""Add a dataset to the collection."""
test_ids = [int(id_[1:6]) for id_ in test_spec]
train_ids = [int(id_[1:6]) for id_ in train_spec]
val_ids = [int(id_[1:6]) for id_ in val_spec]
LOGGER.info("Split: %d train, %d val, %d test.",
len(train_ids), len(val_ids), len(test_ids))
LOGGER.info("Writing dataset...")
for im_idx in tqdm.tqdm(train_ids + val_ids + test_ids):
image = scipy.misc.imread(path.join(up3d_fp, '%05d_image.png' % (im_idx)))
with open(path.join(up3d_fp, '%05d_fit_crop_info.txt' % (im_idx)), 'r') as inf:
cropinfo = [int(val) for val in inf.readline().strip().split()]
assert image.ndim == 3
out_exists = (path.exists(path.join(dset_fp, '%05d_image.png' % (running_idx))) and
path.exists(path.join(dset_fp, '%05d_ann.png' % (running_idx))) and
path.exists(path.join(dset_fp, '%05d_ann_vis.png' % (running_idx))) and
path.exists(path.join(dset_fp, '%05d_render.png' % (running_idx))) and
path.exists(path.join(dset_fp, '%05d_render_light.png' % (running_idx))))
if not (only_missing and out_exists):
rendering = uncrop(render_body_impl(path.join(up3d_fp, '%05d_body.pkl' % (im_idx)),
resolution=(cropinfo[1],
cropinfo[0]),
quiet=True,
use_light=False)[0],
image.shape[:2],
cropinfo)
rendering_l = uncrop(render_body_impl(path.join(up3d_fp, '%05d_body.pkl' % (im_idx)),
resolution=(cropinfo[1],
cropinfo[0]),
quiet=True,
use_light=True)[0],
image.shape[:2],
cropinfo)
joints = np.load(path.join(up3d_fp, '%05d_joints.npy' % (im_idx)))
joints = np.vstack((joints, np.all(joints > 0, axis=0)[None, :]))
person_size = robust_person_size(joints)
norm_factor = float(target_person_size) / person_size
if not (only_missing and out_exists):
image = scipy.misc.imresize(image, norm_factor, interp='bilinear')
rendering = scipy.misc.imresize(rendering, norm_factor, interp='nearest')
rendering_l = scipy.misc.imresize(rendering_l, norm_factor, interp='bilinear')
if image.shape[0] > crop or image.shape[1] > crop:
LOGGER.debug("Image (original %d, here %d) too large (%s)! Cropping...",
im_idx, running_idx, str(image.shape[:2]))
person_center = np.mean(joints[:2, joints[2, :] == 1], axis=1) * norm_factor
crop_y, crop_x = get_crop(image, person_center, crop)
image = image[crop_y[0]:crop_y[1],
crop_x[0]:crop_x[1], :]
rendering = rendering[crop_y[0]:crop_y[1],
crop_x[0]:crop_x[1], :]
rendering_l = rendering_l[crop_y[0]:crop_y[1],
crop_x[0]:crop_x[1], :]
assert image.shape[0] == crop or image.shape[1] == crop, (
"Error cropping image (original %d, here %d)!" % (im_idx,
running_idx))
assert image.shape[0] <= crop and image.shape[1] <= crop and image.shape[2] == 3, (
"Wrong image shape (original %d, here %d)!" % (im_idx, running_idx))
class_groups = six_region_groups if partspec == '6' else None
annotation = regions_to_classes(rendering, class_groups, warn_id=str(im_idx))
if partspec == '1':
annotation = (annotation > 0).astype('uint8')
assert np.max(annotation) <= int(partspec), (
"Wrong annotation value (original %d, here %d): %s!" % (
im_idx, running_idx, str(np.unique(annotation))))
if running_idx == 0:
assert np.max(annotation) == int(partspec), (
"Probably an error in the number of parts!")
scipy.misc.imsave(path.join(dset_fp, '%05d_image.png' % (running_idx)), image)
scipy.misc.imsave(path.join(dset_fp, '%05d_ann.png' % (running_idx)), annotation)
scipy.misc.imsave(path.join(dset_fp, '%05d_ann_vis.png' % (running_idx)),
apply_colormap(annotation, vmax=int(partspec)))
scipy.misc.imsave(path.join(dset_fp, '%05d_render.png' % (running_idx)), rendering)
scipy.misc.imsave(path.join(dset_fp, '%05d_render_light.png' % (running_idx)), rendering_l) # pylint: disable=line-too-long
if im_idx in train_ids:
list_f = train_list_f
elif im_idx in val_ids:
list_f = val_list_f
elif im_idx in test_ids:
list_f = test_list_f
list_f.write("/%s/%05d_image.png /%s/%05d_ann.png %f\n" % (
dset_rel_fp, running_idx, dset_rel_fp, running_idx, norm_factor))
list_f.flush()
running_idx += 1
return running_idx
@click.command()
@click.argument("suffix", type=click.STRING)
@click.argument("partspec", type=click.Choice(['1', '6', '31']))
@click.argument("target_person_size", type=click.INT)
@click.option("--crop", type=click.INT, default=513,
help="Crop size for the images.")
@click.option("--only_missing", type=click.BOOL, default=False, is_flag=True,
help="Only rewrite missing images.")
@click.option("--up3d_fp", type=click.Path(file_okay=False, readable=True),
default=UP3D_FP,
help="Path to the UP3D folder that you want to use.")
def cli(suffix, partspec, target_person_size, crop=513, only_missing=False, up3d_fp=UP3D_FP): # pylint: disable=too-many-locals, too-many-arguments
"""Create segmentation datasets from select SMPL fits."""
np.random.seed(1)
LOGGER.info("Creating segmentation dataset for %s classes with target "
"person size %f and suffix `%s`.",
partspec, target_person_size, suffix)
assert ' ' not in suffix
dset_fromroot = path.join(partspec, str(target_person_size), suffix)
dset_fp = path.join(DSET_ROOT_FP, dset_fromroot)
if path.exists(dset_fp):
if not only_missing:
if not click.confirm("Dataset folder exists: `%s`! Continue?" % (dset_fp)):
return
else:
os.makedirs(dset_fp)
LOGGER.info("Creating list files...")
list_fp = path.join(path.dirname(__file__), '..', 'training', 'list')
if not path.exists(list_fp):
os.makedirs(list_fp)
train_list_f = open(path.join(list_fp, 'train_%s_%d_%s.txt' % (
partspec, target_person_size, suffix)), 'w')
val_list_f = open(path.join(list_fp, 'val_%s_%d_%s.txt' % (
partspec, target_person_size, suffix)), 'w')
test_list_f = open(path.join(list_fp, 'test_%s_%d_%s.txt' % (
partspec, target_person_size, suffix)), 'w')
with open(path.join(up3d_fp, 'train.txt'), 'r') as f:
train_spec = [line.strip() for line in f.readlines()]
with open(path.join(up3d_fp, 'val.txt'), 'r') as f:
val_spec = [line.strip() for line in f.readlines()]
with open(path.join(up3d_fp, 'test.txt'), 'r') as f:
test_spec = [line.strip() for line in f.readlines()]
LOGGER.info("Processing...")
add_dataset(
dset_fp,
dset_fromroot,
up3d_fp,
train_list_f, val_list_f, test_list_f,
train_spec, val_spec, test_spec,
target_person_size, partspec,
crop, 0,
only_missing=only_missing)
train_list_f.close()
val_list_f.close()
test_list_f.close()
LOGGER.info("Creating trainval file...")
trainval_list_f = open(path.join(list_fp, 'trainval_%s_%d_%s.txt' % (
partspec, target_person_size, suffix)), 'w')
train_list_f = open(path.join(list_fp, 'train_%s_%d_%s.txt' % (
partspec, target_person_size, suffix)), 'r')
val_list_f = open(path.join(list_fp, 'val_%s_%d_%s.txt' % (
partspec, target_person_size, suffix)), 'r')
for line in train_list_f:
trainval_list_f.write(line)
for line in val_list_f:
trainval_list_f.write(line)
trainval_list_f.close()
train_list_f.close()
val_list_f.close()
LOGGER.info("Done.")
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, format=LOGFORMAT)
logging.getLogger("opendr.lighting").setLevel(logging.WARN)
cli() # pylint: disable=no-value-for-parameter
|
StarcoderdataPython
|
4817600
|
# coding: utf-8
"""
Trend Micro Deep Security API
Copyright 2018 - 2020 Trend Micro Incorporated.<br/>Get protected, stay secured, and keep informed with Trend Micro Deep Security's new RESTful API. Access system data and manage security configurations to automate your security workflows and integrate Deep Security into your CI/CD pipeline. # noqa: E501
OpenAPI spec version: 12.5.841
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from deepsecurity.api_client import ApiClient
class ContextsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_context(self, context, api_version, **kwargs): # noqa: E501
"""Create a Context # noqa: E501
Create a new context. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_context(context, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Context context: The settings of the new context. (required)
:param str api_version: The version of the api being called. (required)
:return: Context
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_context_with_http_info(context, api_version, **kwargs) # noqa: E501
else:
(data) = self.create_context_with_http_info(context, api_version, **kwargs) # noqa: E501
return data
def create_context_with_http_info(self, context, api_version, **kwargs): # noqa: E501
"""Create a Context # noqa: E501
Create a new context. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_context_with_http_info(context, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Context context: The settings of the new context. (required)
:param str api_version: The version of the api being called. (required)
:return: Context
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['context', 'api_version'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_context" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'context' is set
if ('context' not in params or
params['context'] is None):
raise ValueError("Missing the required parameter `context` when calling `create_context`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `create_context`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'context' in params:
body_params = params['context']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/contexts', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Context', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_context(self, context_id, api_version, **kwargs): # noqa: E501
"""Delete a Context # noqa: E501
Delete a context by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_context(context_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int context_id: The ID number of the context to delete. (required)
:param str api_version: The version of the api being called. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_context_with_http_info(context_id, api_version, **kwargs) # noqa: E501
else:
(data) = self.delete_context_with_http_info(context_id, api_version, **kwargs) # noqa: E501
return data
def delete_context_with_http_info(self, context_id, api_version, **kwargs): # noqa: E501
"""Delete a Context # noqa: E501
Delete a context by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_context_with_http_info(context_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int context_id: The ID number of the context to delete. (required)
:param str api_version: The version of the api being called. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['context_id', 'api_version'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_context" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'context_id' is set
if ('context_id' not in params or
params['context_id'] is None):
raise ValueError("Missing the required parameter `context_id` when calling `delete_context`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `delete_context`") # noqa: E501
if 'context_id' in params and not re.search('\\d+', str(params['context_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `context_id` when calling `delete_context`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'context_id' in params:
path_params['contextID'] = params['context_id'] # noqa: E501
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/contexts/{contextID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def describe_context(self, context_id, api_version, **kwargs): # noqa: E501
"""Describe a Context # noqa: E501
Describe a context by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.describe_context(context_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int context_id: The ID number of the context to describe. (required)
:param str api_version: The version of the api being called. (required)
:return: Context
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.describe_context_with_http_info(context_id, api_version, **kwargs) # noqa: E501
else:
(data) = self.describe_context_with_http_info(context_id, api_version, **kwargs) # noqa: E501
return data
def describe_context_with_http_info(self, context_id, api_version, **kwargs): # noqa: E501
"""Describe a Context # noqa: E501
Describe a context by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.describe_context_with_http_info(context_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int context_id: The ID number of the context to describe. (required)
:param str api_version: The version of the api being called. (required)
:return: Context
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['context_id', 'api_version'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method describe_context" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'context_id' is set
if ('context_id' not in params or
params['context_id'] is None):
raise ValueError("Missing the required parameter `context_id` when calling `describe_context`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `describe_context`") # noqa: E501
if 'context_id' in params and not re.search('\\d+', str(params['context_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `context_id` when calling `describe_context`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'context_id' in params:
path_params['contextID'] = params['context_id'] # noqa: E501
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/contexts/{contextID}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Context', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_contexts(self, api_version, **kwargs): # noqa: E501
"""List Contexts # noqa: E501
Lists all contexts. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_contexts(api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_version: The version of the api being called. (required)
:return: Contexts
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_contexts_with_http_info(api_version, **kwargs) # noqa: E501
else:
(data) = self.list_contexts_with_http_info(api_version, **kwargs) # noqa: E501
return data
def list_contexts_with_http_info(self, api_version, **kwargs): # noqa: E501
"""List Contexts # noqa: E501
Lists all contexts. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_contexts_with_http_info(api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_version: The version of the api being called. (required)
:return: Contexts
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_version'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_contexts" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `list_contexts`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/contexts', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Contexts', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def modify_context(self, context_id, context, api_version, **kwargs): # noqa: E501
"""Modify a Context # noqa: E501
Modify a context by ID. Any unset elements will be left unchanged. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_context(context_id, context, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int context_id: The ID number of the context to modify. (required)
:param Context context: The settings of the context to modify. (required)
:param str api_version: The version of the api being called. (required)
:return: Context
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.modify_context_with_http_info(context_id, context, api_version, **kwargs) # noqa: E501
else:
(data) = self.modify_context_with_http_info(context_id, context, api_version, **kwargs) # noqa: E501
return data
def modify_context_with_http_info(self, context_id, context, api_version, **kwargs): # noqa: E501
"""Modify a Context # noqa: E501
Modify a context by ID. Any unset elements will be left unchanged. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_context_with_http_info(context_id, context, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int context_id: The ID number of the context to modify. (required)
:param Context context: The settings of the context to modify. (required)
:param str api_version: The version of the api being called. (required)
:return: Context
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['context_id', 'context', 'api_version'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method modify_context" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'context_id' is set
if ('context_id' not in params or
params['context_id'] is None):
raise ValueError("Missing the required parameter `context_id` when calling `modify_context`") # noqa: E501
# verify the required parameter 'context' is set
if ('context' not in params or
params['context'] is None):
raise ValueError("Missing the required parameter `context` when calling `modify_context`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `modify_context`") # noqa: E501
if 'context_id' in params and not re.search('\\d+', str(params['context_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `context_id` when calling `modify_context`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'context_id' in params:
path_params['contextID'] = params['context_id'] # noqa: E501
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'context' in params:
body_params = params['context']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/contexts/{contextID}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Context', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_contexts(self, api_version, **kwargs): # noqa: E501
"""Search Contexts # noqa: E501
Search for contexts using optional filters. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_contexts(api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_version: The version of the api being called. (required)
:param SearchFilter search_filter: A collection of options used to filter the search results.
:return: Contexts
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_contexts_with_http_info(api_version, **kwargs) # noqa: E501
else:
(data) = self.search_contexts_with_http_info(api_version, **kwargs) # noqa: E501
return data
def search_contexts_with_http_info(self, api_version, **kwargs): # noqa: E501
"""Search Contexts # noqa: E501
Search for contexts using optional filters. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_contexts_with_http_info(api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_version: The version of the api being called. (required)
:param SearchFilter search_filter: A collection of options used to filter the search results.
:return: Contexts
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_version', 'search_filter'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_contexts" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `search_contexts`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'search_filter' in params:
body_params = params['search_filter']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/contexts/search', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Contexts', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
StarcoderdataPython
|
1867410
|
#!/usr/bin/env python
""" Web app specific utilities.
In particular, it handles tasks related to deployment and minimization which are not relevant
to other Overwatch packages.
.. codeauthor:: <NAME> <<EMAIL>>, Yale University
"""
import os
import subprocess
import logging
logger = logging.getLogger(__name__)
# Webassets
import webassets.filter
# Configuration
from ..base import config
(serverParameters, filesRead) = config.readConfig(config.configurationType.webApp)
class PolymerBundler(webassets.filter.ExternalTool):
""" Filter to bundle Polymer html imports into a single file for deployment.
Best practices dictate that the Polymer html imports should be combined into a single file
to reduce the number of individual http requests. Polymer provides a tool to do so, called
``polymer-bundler``. By taking advantage of ``webassets``, we can automatically combine and
minimize these files when starting a web app deployment.
To successfully define the filter, the following details must be addressed:
- polymer-bundler must only be executed with relative paths, so we cannot use
``ExternalTool.subprocess``, since that gives absolute paths.
- To ensure that the polymer components also work when not bundled, the filter must be
executed in a directory above the static dir.
These issues causes quite some complications! See the ``input(...)`` function for how to deal
with these issues.
When ``webassets`` is run in debug mode, this filter will not be run! Instead, the standard
(un-minified) version will be included. For information on forcing this filter to be run,
see the :doc:`web app README </webAppReadme>`.
"""
# Define the name of the bundle so it can be referenced.
name = "PolymerBundler"
def input(self, _in, out, **kwargs):
""" Plugin function for adding an external filter to ``webassets``.
As of August 2018, the ``kwargs`` options available include:
.. code-block:: python
kwargs = {'output': 'gen/polymerBundle.html',
'output_path': '/pathToOverwatch/overwatch/webApp/static/gen/polymerBundle.html',
'source_path': '/pathToOverwatch/overwatch/webApp/static/polymerComponents.html',
'source': 'polymerComponents.html'}
Note:
``polymer-bundler`` parses arguments a bit strangely - values such as paths still need
to be in a separate argument. Thus, the arguments looks more split than would usually
be expected.
Args:
_in (StringIO): Input for the filter. Not used here.
out (StringIO): Output for the filter. The output for ``polymer-bundler`` is written here.
This will eventually be written out to a file.
**kwargs (dict): Additional options required to run the filter properly. See the function
description for the available options.
Returns:
None
"""
# Printed because otherwise we won't be able to see the output.
logger.debug("polymer-bundler filter arguments. _in: {}, out: {}, kwargs: {}".format(_in, out, kwargs))
# Cannot just use the naive current path since this could be executed from anywhere. Instead,
# look for the static folder - it must be included somewhere.
output_path = "{output_path}".format(**kwargs)
executionPath = output_path[:output_path.find(serverParameters["staticFolder"])]
# Stream the result to stdout since writing the file seems to cause trouble with
# the "out" string, which will overwrite the desired output
args = [
"polymer-bundler",
"--inline-scripts",
"--strip-comments",
#"--out-html",
#os.path.join(serverParameters["staticFolder"], "{output}.tmp".format(**kwargs)),
# NOTE: It appears that ``--in-html`` is not a valid option anyonre. The input file should just be the last argument.
os.path.join(serverParameters["staticFolder"], "{source}".format(**kwargs))
]
logger.debug("Executing polymer filter with execution path \"{executionPath}\" and args {args}".format(executionPath = executionPath, args = args))
output = subprocess.check_output(args, cwd = executionPath)
if len(output) > 0:
logger.debug("Received non-zero output string! This means the polymer-bundler filter likely worked!")
# Write the output to the out string, which will then eventually automatically be written to file
# Without explicit decoding here, it will fail
out.write(output.decode('utf-8'))
# Register filter so it can be run in the web app
webassets.filter.register_filter(PolymerBundler)
|
StarcoderdataPython
|
9740296
|
<reponame>chachabooboo/king-phisher
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# tests/server/database/validation.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from king_phisher import testing
from king_phisher.server.database import models as db_models
from king_phisher.server.database import validation as db_validation
class DatabaseValidateCredentialTests(testing.KingPhisherTestCase):
campaign = db_models.Campaign(credential_regex_username=r'a\S+')
def test_credential_collection_members(self):
for field in db_validation.CredentialCollection._fields:
self.assertHasAttribute(db_models.Credential, field)
def test_empty_configuration_returns_none(self):
self.assertIsNone(db_validation.validate_credential(
db_validation.CredentialCollection(username='alice', password='<PASSWORD>', mfa_token='<PASSWORD>'),
db_models.Campaign()
))
self.assertIsNone(db_validation.validate_credential(
db_validation.CredentialCollection(username=None, password=None, mfa_token=None),
db_models.Campaign()
))
def test_extra_fields_are_ignored(self):
self.assertTrue(db_validation.validate_credential(
db_validation.CredentialCollection(username='alice', password='<PASSWORD>', mfa_token=None),
self.campaign
))
self.assertTrue(db_validation.validate_credential(
db_validation.CredentialCollection(username='alice', password=<PASSWORD>, mfa_token='<PASSWORD>'),
self.campaign
))
self.assertTrue(db_validation.validate_credential(
db_validation.CredentialCollection(username='alice', password='<PASSWORD>', mfa_token='<PASSWORD>'),
self.campaign
))
def test_validation_methods(self):
cred = db_validation.CredentialCollection(username='alice', password=None, mfa_token=None)
self.assertEqual(
db_validation.validate_credential_fields(cred, self.campaign),
db_validation.CredentialCollection(username=True, password=None, mfa_token=None)
)
self.assertTrue(db_validation.validate_credential(cred, self.campaign))
cred = db_validation.CredentialCollection(username='calie', password=None, mfa_token=None)
self.assertEqual(
db_validation.validate_credential_fields(cred, self.campaign),
db_validation.CredentialCollection(username=False, password=None, mfa_token=None)
)
self.assertFalse(db_validation.validate_credential(cred, self.campaign))
cred = db_validation.CredentialCollection(username='alice', password=None, mfa_token=None)
campaign = db_models.Campaign(credential_regex_username=r'a\S+', credential_regex_password=r'a\S+')
self.assertEqual(
db_validation.validate_credential_fields(cred, campaign),
db_validation.CredentialCollection(username=True, password=False, mfa_token=None)
)
self.assertFalse(db_validation.validate_credential(cred, campaign))
def test_empty_fields_fail(self):
self.assertEqual(db_validation.validate_credential_fields(
db_validation.CredentialCollection(username='', password=None, mfa_token=None),
self.campaign
), db_validation.CredentialCollection(username=False, password=<PASSWORD>, mfa_token=None))
def test_none_fields_fail(self):
self.assertEqual(db_validation.validate_credential_fields(
db_validation.CredentialCollection(username=None, password=<PASSWORD>, mfa_token=None),
self.campaign
), db_validation.CredentialCollection(username=False, password=None, mfa_token=None))
def test_bad_regexs_are_skipped(self):
self.assertEqual(db_validation.validate_credential_fields(
db_validation.CredentialCollection(username='alice', password=<PASSWORD>, mfa_token=None),
db_models.Campaign(credential_regex_username=r'\S+[')
), db_validation.CredentialCollection(username=None, password=<PASSWORD>, mfa_token=None))
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
12806369
|
<gh_stars>10-100
from os.path import join, realpath
import sys; sys.path.insert(0, realpath(join(__file__, "../../../../")))
import unittest
from decimal import Decimal
from hummingbot.strategy.amm_arb import utils
from hummingbot.connector.connector_base import ConnectorBase
from hummingbot.strategy.market_trading_pair_tuple import MarketTradingPairTuple
trading_pair = "HBOT-USDT"
base = trading_pair.split("-")[0]
quote = trading_pair.split("-")[1]
class MockConnector1(ConnectorBase):
def get_quote_price(self, trading_pair: str, is_buy: bool, amount: Decimal) -> Decimal:
if is_buy:
return Decimal("105")
else:
return Decimal("104")
def get_order_price(self, trading_pair: str, is_buy: bool, amount: Decimal) -> Decimal:
return self.get_quote_price(trading_pair, is_buy, amount)
class MockConnector2(ConnectorBase):
def get_quote_price(self, trading_pair: str, is_buy: bool, amount: Decimal) -> Decimal:
if is_buy:
return Decimal("103")
else:
return Decimal("100")
def get_order_price(self, trading_pair: str, is_buy: bool, amount: Decimal) -> Decimal:
return self.get_quote_price(trading_pair, is_buy, amount)
class AmmArbUtilsUnitTest(unittest.TestCase):
def test_create_arb_proposals(self):
market_info1 = MarketTradingPairTuple(MockConnector1(), trading_pair, base, quote)
market_info2 = MarketTradingPairTuple(MockConnector2(), trading_pair, base, quote)
arb_proposals = utils.create_arb_proposals(market_info1, market_info2, Decimal("1"))
# there are 2 proposal combination possible - (buy_1, sell_2) and (buy_2, sell_1)
self.assertEqual(2, len(arb_proposals))
# Each proposal has a buy and a sell proposal sides
self.assertNotEqual(arb_proposals[0].first_side.is_buy, arb_proposals[0].second_side.is_buy)
self.assertNotEqual(arb_proposals[1].first_side.is_buy, arb_proposals[1].second_side.is_buy)
buy_1_sell_2_profit_pct = (Decimal("100") - Decimal("105")) / Decimal("105")
self.assertEqual(buy_1_sell_2_profit_pct, arb_proposals[0].profit_pct())
buy_2_sell_1_profit_pct = (Decimal("104") - Decimal("103")) / Decimal("103")
self.assertEqual(buy_2_sell_1_profit_pct, arb_proposals[1].profit_pct())
|
StarcoderdataPython
|
6574953
|
# Cite from https://github.com/metrofun/E3D-LSTM
from functools import reduce
import copy
import operator
import torch
import torch.nn as nn
import torch.nn.functional as F
from .rnn_cell import E3DLSTMCell, ConvDeconv3d
from .utils import window
from tqdm import tqdm
import numpy as np
class E3DLSTM_Module(nn.Module):
def __init__(self, input_shape, hidden_size, num_layers, kernel_size, tau):
super().__init__()
self._tau = tau
self._cells = []
input_shape = list(input_shape)
for i in range(num_layers):
cell = E3DLSTMCell(input_shape, hidden_size, kernel_size)
# NOTE hidden state becomes input to the next cell
input_shape[0] = hidden_size
self._cells.append(cell)
# Hook to register submodule
setattr(self, "cell{}".format(i), cell)
def forward(self, input):
# NOTE (seq_len, batch, input_shape)
batch_size = input.size(1)
c_history_states = []
h_states = []
outputs = []
for step, x in enumerate(input):
for cell_idx, cell in enumerate(self._cells):
if step == 0:
c_history, m, h = self._cells[cell_idx].init_hidden(
batch_size, self._tau, input.device
)
c_history_states.append(c_history)
h_states.append(h)
# NOTE c_history and h are coming from the previous time stamp, but we iterate over cells
c_history, m, h = cell(
x, c_history_states[cell_idx], m, h_states[cell_idx]
)
c_history_states[cell_idx] = c_history
h_states[cell_idx] = h
# NOTE hidden state of previous LSTM is passed as input to the next one
x = h
outputs.append(h)
# NOTE Concat along the channels
return torch.cat(outputs, dim=1)
class E3DLSTM_Model(nn.Module):
def __init__(self, params):
super().__init__()
self.encoder = E3DLSTM_Module(params.input_shape, params.hidden_size, params.lstm_layers, params.kernel, params.tau)
# self.decoder = nn.Conv3d(params.hidden_size * params.time_steps, params.output_shape[0], params.kernel, padding=(0, 2, 2))
self.decoder = nn.Conv3d(64, 1, kernel_size=(1, 5, 5), stride=(1, 1, 1), padding=(0, 2, 2))
def forward(self, x):
return self.decoder(self.encoder(x))
from .basic_algo import Basic_algo
class E3DLSTM(Basic_algo):
def __init__(self, params):
config = params.__dict__
config.update({
'input_shape': (3, 4, 128, 128),
'output_shape': (3, 4, 128, 128),
'hidden_size': 64,
'lstm_layers': 4,
'kernel': (2, 5, 5),
'tau': 2,
'temporal_frames': 4,
'temporal_stride': 1,
'input_time_window': 4,
'output_time_horizon': 1,
'time_steps': 1,
'lr': 0.001,
'device': torch.device('cuda:0')
})
model = E3DLSTM_Model(params).to(params.device)
Basic_algo.__init__(self, model)
self.device = params.device
self.params = params
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=params.lr)
self.criterion = torch.nn.MSELoss()
def _iter_batch(self, x, y):
pred = self.model(x)
loss = self.criterion(pred, y)
return pred, loss
def train(self, train_loader, epoch):
'''
Train the model with train_loader.
Input params:
train_loader: dataloader of train.
Output params:
mse_loss: mean square loss between predictions and ground truth.
'''
self.model.train()
train_pbar = tqdm(train_loader)
mse_loss = []
for i, (batch_x, batch_y, _) in enumerate(train_pbar):
batch_x, batch_y = batch_x.to(self.device), batch_y.to(self.device)
batch_x = batch_x.permute(0, 2, 1, 3, 4)
batch_y = batch_y.permute(0, 2, 1, 3, 4)
# train model
self.optimizer.zero_grad()
frames_seq = []
for indices in window(range(self.params.input_time_window), \
self.params.temporal_frames, self.params.temporal_stride):
frames_seq.append(batch_x[:, :, indices[0] : indices[-1] + 1])
batch_x = torch.stack(frames_seq, dim=0)
pred, loss = self._iter_batch(batch_x, batch_y)
loss.backward()
self.optimizer.step()
# trian model
train_pbar.set_description('train loss: {:.4f}'.format(loss.item()))
mse_loss.append(loss.item())
mse_loss = np.average(mse_loss)
return mse_loss
def evaluate(self, val_loader):
'''
Evaluate the model with val_loader.
Input params:
val_loader: dataloader of validation.
Output params:
(mse, mae, ssim): mse, mas, ssim between predictions and ground truth.
'''
self.model.eval()
val_pbar = tqdm(val_loader)
mse_loss, preds, trues = [], [], []
for i, (batch_x, batch_y, _) in enumerate(val_pbar):
batch_x, batch_y = batch_x.to(self.device), batch_y.to(self.device)
# eval model
batch_x = batch_x.permute(0, 2, 1, 3, 4)
batch_y = batch_y.permute(0, 2, 1, 3, 4)
frames_seq = []
for indices in window(range(self.params.input_time_window), \
self.params.temporal_frames, self.params.temporal_stride):
frames_seq.append(batch_x[:, :, indices[0] : indices[-1] + 1])
batch_x = torch.stack(frames_seq, dim=0)
pred_y, loss = self._iter_batch(batch_x, batch_y)
# eval model
true, pred_y = batch_y.detach().cpu(), pred_y.detach().cpu()
val_pbar.set_description('vali loss: {:.4f}'.format(loss.item()))
mse_loss.append(loss.item())
preds.append(pred_y.numpy())
trues.append(true.numpy())
mse_loss = np.average(mse_loss)
preds = np.concatenate(preds,axis=0)
trues = np.concatenate(trues,axis=0)
import sys; sys.path.append('/usr/data/gzy/Weather_Forecast')
from API.metrics import metric
mae, mse, rmse, mape, mspe,ssim,psnr = metric(preds, trues,val_loader.dataset.mean,val_loader.dataset.std,return_ssim_psnr=True)
return mse, mae, ssim
def validate(self, val_loader):
self.model.eval()
number = 0
val_pbar = tqdm(val_loader)
mse_loss, preds, trues = [], [], []
for i, (batch_x, batch_y, _) in enumerate(val_pbar):
number += batch_x.shape[0]
batch_x, batch_y = batch_x.to(self.device), batch_y.to(self.device)
# eval model
batch_x = batch_x.permute(0, 2, 1, 3, 4)
batch_y = batch_y.permute(0, 2, 1, 3, 4)
frames_seq = []
for indices in window(range(self.params.input_time_window), \
self.params.temporal_frames, self.params.temporal_stride):
frames_seq.append(batch_x[:, :, indices[0] : indices[-1] + 1])
batch_x = torch.stack(frames_seq, dim=0)
pred_y, loss = self._iter_batch(batch_x, batch_y)
# eval model
true, pred_y = batch_y.detach().cpu(), pred_y.detach().cpu()
val_pbar.set_description('vali loss: {:.4f}'.format(loss.item()))
mse_loss.append(loss.item())
preds.append(pred_y.numpy())
trues.append(true.numpy())
if number >= 1000:
break
mse_loss = np.average(mse_loss)
preds = np.concatenate(preds,axis=0)
trues = np.concatenate(trues,axis=0)
import sys; sys.path.append('/usr/data/gzy/Weather_Forecast')
from API.metrics import metric
mae, mse, rmse, mape, mspe,ssim,psnr = metric(preds, trues,val_loader.dataset.mean,val_loader.dataset.std,return_ssim_psnr=True)
return mse, mae
|
StarcoderdataPython
|
1837407
|
<gh_stars>1-10
import os
from flask import Flask
from rss_reader.config import Config
from rss_reader.models import db, RssSource
sources = [{
'url': 'http://feed.cnblogs.com/blog/u/118754/rss',
'img': '1.jpg',
'name': 'Vamei',
'tag': 'Python',
'desc': '编程,数学,设计'
}, {
'url': 'http://www.dongwm.com/atom.xml',
'img': '2.jpg',
'name': '小明明之美',
'tag': 'Python',
'desc': '一个Python手艺',
}, {
'url': 'https://coolshell.cn/feed',
'img': '3.jpg',
'name': '酷壳',
'tag':'Article',
'desc': 'Coding Your Ambition',
}, {
'url': 'http://www.alibuybuy.com/feed',
'img': '4.jpg',
'name': '互联网的那点事',
'tag':'Article',
'desc': '聚焦互联网前沿资讯!',
}, {
'url': 'http://www.geekpark.net/rss',
'img': '5.jpg',
'name': '极客公园',
'tag': 'Digital',
'desc': '极客公园',
}, {
'url': 'http://36kr.com/feed',
'img': '6.jpg',
'name': '36氪',
'tag': 'Innovation',
'desc': '36氪 - 让创业更简单。'
}, {
'url': 'http://www.apprcn.com/feed',
'img': '7.jpg',
'name': '反斗软件',
'tag':'Article',
'desc': '关注个人软件和绿色软件。',
}, {
'url': 'https://feed.iplaysoft.com/',
'img': '8.jpg',
'name': '异次元软件世界',
'tag':'Downloads',
'desc': '软件改变生活!',
}, {
'url': 'https://www.cnbeta.com/backend.php',
'img': '9.jpg',
'name': 'cnBeta',
'tag':'News',
'desc': '简明IT新闻,网友媒体与言论平台!',
}, {
'url': 'http://www.diy-robots.com/?feed=rss2',
'img': '10.jpg',
'name': '做做AI,造造人',
'tag':'Blog',
'desc': '动手改变世界。',
}, {
'url': 'http://blog.zhaojie.me/rss',
'img': '11.jpg',
'name': '老赵点滴',
'tag':'Blog',
'desc': '追求编程之美。',
}, {
'url': 'http://feed.mifengtd.cn/',
'img': '12.jpg',
'name': '褪墨',
'tag':'Blog',
'desc': '我们的目标是:把事情做到更好!',
}, {
'url': 'http://news.feng.com/rss.xml',
'img': '13.jpg',
'name': '威锋网',
'tag':'iPhone',
'desc': 'iPhone讨论社区!',
}, {
'url': 'http://www.uisdc.com/feed',
'img': '14.jpg',
'name': '优设-UISDC',
'tag':'Article',
'desc': '设计师交流学习平台。',
}, {
'url': 'http://www.ifanr.com/feed',
'img': '15.jpg',
'name': '爱范儿',
'tag':'Consumption',
'desc': '让未来触手可及!',
},{
'url': 'http://cinephilia.net/feed',
'img': '16.jpg',
'name': 'cinephilia迷影',
'tag': 'Moves',
'desc': '认真说好每一个故事',
}, {
'url': 'http://www.phonekr.com/feed/',
'img': '17.jpg',
'name': '锋客网',
'tag': 'Phone',
'desc': 'techXtreme 科技锋芒',
}, {
'url': 'http://pansci.asia/feed',
'img': '18.jpg',
'name': 'PanSci 泛科學',
'tag':'Article',
'desc': '全台最大科學知識社群。',
},]
app = Flask(__name__)
if __name__ == '__main__':
db_path = os.path.join(Config.BASE_DIR, 'models/rss_reader.db')
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + db_path
db.init_app(app)
db.create_all(app=app)
with app.app_context():
for source in sources:
data = RssSource(source_url=source['url'],
source_img=source['img'],
source_name=source['name'],
source_tag=source['tag'],
source_desc=source['desc'],
)
db.session.add(data)
db.session.commit()
|
StarcoderdataPython
|
3485847
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This is used in fstat_pdf_gen.py to generate the plots that are to be in the PDF.
import json, time, urllib, os
import unicodedata
#Plotting libs
import plotly
import plotly.graph_objs as go
from adrian_code.colour_science import SCILIFE_COLOURS, FACILITY_USER_AFFILIATION_COLOUR_OFFICIAL
from adrian_code.issn_files import ISSN_IMPACT_2017, ISSN_IMPACT_2016, ISSN_IMPACT_2015, ISSN_TO_ISSNL, ISSNL_TO_ISSN, issn_to_impact
from adrian_code.publications_api import Publications_api
def fix_spl_char(value):
if value == None:
value = ''
elif not isinstance(value, unicode):
value = unicode(str(value), 'utf-8')
return str(unicodedata.normalize('NFKD', value).encode('ascii', 'ignore'))
def publication_plot(label_list, year, pformat="png"):
url = "https://publications.scilifelab.se/labels.json"
response = urllib.urlopen(url)
labels = json.loads(response.read())
labels_check_dict = dict()
label_list_no_spl_char = [fix_spl_char(l) for l in label_list]
for label in labels["labels"]:
# labels_check_dict[label["value"]] = label["links"]["self"]["href"]
labels_check_dict[fix_spl_char(label["value"])] = label["links"]["self"]["href"]
all_publications = list()
pub_read = list()
for label in label_list:
label = fix_spl_char(label)
if label not in labels_check_dict.keys():
#import pdb; pdb.set_trace()
exit("ERROR: Wrong label, does not exist in database {}".format(label))
url = labels_check_dict[label]
response = urllib.urlopen(url)
publications = json.loads(response.read())
for pub in publications["publications"]:
if pub not in all_publications:
all_publications.append(pub)
years = {
year:{"Service":0, "Collaborative":0, "Technology development":0, "None":0},
year-1:{"Service":0, "Collaborative":0, "Technology development":0, "None":0},
year-2:{"Service":0, "Collaborative":0, "Technology development":0, "None":0}
}
publication_issns = list()
publication_impacts = {year: [], year-1: [], year-2: []}
# if 'Bioinformatics Support and Infrastructure' in label_list:
# import pdb; pdb.set_trace()
for pub in all_publications:
pub_year = int(pub["published"].split("-")[0])
if pub_year in years.keys():
catflag = False
jifflag = False
for key in pub["labels"].keys():
# if key in label_list and not catflag:
if fix_spl_char(key) in label_list_no_spl_char and not catflag:
# Need to use the right label for the category
# This WILL break for several labels, categories will be counted several times
try:
years[pub_year][pub["labels"][key]] += 1
catflag = True
except KeyError as e:
years[pub_year]["None"] += 1
catflag = True
if pub["journal"]["issn"]:
issn = pub["journal"]["issn"]
publication_issns.append(issn)
impact = issn_to_impact(issn)
# if impact is None:
# print "NO IMPACT FACTOR FOUND FOR:", issn, pub["journal"]
# At the end, add the impact to the list
publication_impacts[pub_year].append(impact)
jifflag = True
else:
# NO ISSN
publication_impacts[pub_year].append(None)
jifflag = True
# print "NO ISSN FOUND FOR:", pub["journal"]
if catflag ^ jifflag:
print "\n\nERROR THIS SHOULD NEVER HAPPEN. PUBLICATION ADDED TO CATEGORY PLOT BUT NOT JIF PLOT\n\n", pub
# This should never happen, ie having only one of the flags
# I added this to make sure all publications are always visible in BOTH graphs
jif_data = {year-2: [0,0,0,0,0], year-1: [0,0,0,0,0], year: [0,0,0,0,0]}
for year in publication_impacts.keys():
for impact in publication_impacts[year]:
if impact is not None:
real_impact = float(impact)/1000
#print real_impact
if real_impact>25.0:
jif_data[year][3] += 1
continue
if real_impact>9.0:
jif_data[year][2] += 1
continue
if real_impact>6.0:
jif_data[year][1] += 1
continue
jif_data[year][0] += 1
else:
jif_data[year][4] += 1
trace_service = go.Bar(
x=[year-2, year-1, year],
y=[years[year-2]["Service"], years[year-1]["Service"], years[year]["Service"]],
width=0.75,
name="Service ",
marker=dict(
color=SCILIFE_COLOURS[2],
line=dict(color='#000000', width=1)
)
)
trace_collaborative = go.Bar(
x=[year-2, year-1, year],
y=[years[year-2]["Collaborative"], years[year-1]["Collaborative"], years[year]["Collaborative"]],
width=0.75,
name="Collaborative ",
marker=dict(
color=SCILIFE_COLOURS[8],
line=dict(color='#000000', width=1)
)
)
trace_tech_dev = go.Bar(
x=[year-2, year-1, year],
y=[years[year-2]["Technology development"], years[year-1]["Technology development"], years[year]["Technology development"]],
width=0.75,
name="Technology<br>development ",
marker=dict(
color=SCILIFE_COLOURS[4],
line=dict(color='#000000', width=1)
)
)
trace_none = go.Bar(
x=[year-2, year-1, year],
y=[years[year-2]["None"], years[year-1]["None"], years[year]["None"]],
width=0.75,
name="No category ",
marker=dict(
color=SCILIFE_COLOURS[5],
line=dict(color='#000000', width=1)
)
)
if (years[year-2]["None"] or years[year-1]["None"] or years[year]["None"]):
data = [trace_none, trace_service, trace_collaborative, trace_tech_dev]
else:
data = [trace_service, trace_collaborative, trace_tech_dev]
highest_y_value = max(
years[year-2]["None"]+years[year-2]["Technology development"]+years[year-2]["Collaborative"]+years[year-2]["Service"],
years[year-1]["None"]+years[year-1]["Technology development"]+years[year-1]["Collaborative"]+years[year-1]["Service"],
years[year]["None"]+years[year]["Technology development"]+years[year]["Collaborative"]+years[year]["Service"]
)
yaxis_tick = 1
if highest_y_value>10:
yaxis_tick = 2
if highest_y_value>20:
yaxis_tick = 5
if highest_y_value>50:
yaxis_tick = 10
if highest_y_value>100:
yaxis_tick = 20
if highest_y_value>150:
yaxis_tick = 40
if highest_y_value>200:
yaxis_tick = 50
if highest_y_value>1000:
yaxis_tick = 100
layout = go.Layout(
barmode='stack',
plot_bgcolor='rgba(0,0,0,0)',
margin=go.layout.Margin(
l=60,
r=50,
b=50,
t=30,
pad=4
),
# title=dict(
# text="<b>Publications by Category</b>",
# font=dict(family="Arial", size=32, color="#95C11E"),
# x=0.20,
# y=0.03
# ),
xaxis=dict(
showticklabels=True,
dtick=1,
zeroline=True,
tickfont=dict(family='Arial', size=28)
),
yaxis=dict(
# domain=[0.12, 1],
showticklabels=True,
dtick=yaxis_tick,
tickfont=dict(family='Arial', size=28),
range=[0, int(highest_y_value*1.15)] # Set the ylim slightly higher than the max value for a prettier graph
),
legend=dict(
traceorder='reversed',
font=dict(family='Arial', size=22),
bordercolor="#5a5c60",
borderwidth=0.6
)
)
fig = go.Figure(data=data, layout=layout)
plotly.io.write_image(fig, 'facility_onepagers_figures/pub_plot/{}_category.{}'.format(label_list[0].lower().replace(" ", "_").replace(",", ""), pformat))
# height=1150, width=1500, scale=3)
total_pubs_lastlast_year = years[year-2]["None"]+years[year-2]["Technology development"]+years[year-2]["Collaborative"]+years[year-2]["Service"]
total_pubs_last_year = years[year-1]["None"]+years[year-1]["Technology development"]+years[year-1]["Collaborative"]+years[year-1]["Service"]
total_pubs_current_year = years[year]["None"]+years[year]["Technology development"]+years[year]["Collaborative"]+years[year]["Service"]
jif_unknown = go.Bar(
x=[year-2, year-1, year],
y=[jif_data[year-2][4], jif_data[year-1][4], jif_data[year][4]],
name="JIF unknown ",
width=0.75,
marker=dict(
color=SCILIFE_COLOURS[5],
line=dict(color='#000000', width=1)
)
)
jif_low = go.Bar(
x=[year-2, year-1, year],
y=[jif_data[year-2][0], jif_data[year-1][0], jif_data[year][0]],
name="JIF < 6 ",
width=0.75,
marker=dict(
color=SCILIFE_COLOURS[0],
line=dict(color='#000000', width=1)
)
)
jif_mediocre = go.Bar(
x=[year-2, year-1, year],
y=[jif_data[year-2][1], jif_data[year-1][1], jif_data[year][1]],
name="JIF = 6 - 9 ",
width=0.75,
marker=dict(
color=SCILIFE_COLOURS[7],
line=dict(color='#000000', width=1)
)
)
jif_good = go.Bar(
x=[year-2, year-1, year],
y=[jif_data[year-2][2], jif_data[year-1][2], jif_data[year][2]],
name="JIF = 9 - 25 ",
width=0.75,
marker=dict(
color=SCILIFE_COLOURS[9],
line=dict(color='#000000', width=1)
)
)
jif_high = go.Bar(
x=[year-2, year-1, year],
y=[jif_data[year-2][3], jif_data[year-1][3], jif_data[year][3]],
name="JIF > 25 ",
width=0.75,
marker=dict(
color=SCILIFE_COLOURS[1],
line=dict(color='#000000', width=1)
)
)
if (jif_data[year-2][4] or jif_data[year-1][4] or jif_data[year][4]):
jif_fig_data = [jif_unknown, jif_low, jif_mediocre, jif_good, jif_high]
else:
jif_fig_data = [jif_low, jif_mediocre, jif_good, jif_high]
jif_layout = go.Layout(
barmode="stack",
plot_bgcolor='rgba(0,0,0,0)',
margin=go.layout.Margin(
l=60,
r=50,
b=50,
t=30,
pad=4
),
# title=dict(
# text="<b>Publications by JIF</b>",
# font=dict(family="Arial", size=32, color="#95C11E"),
# x=0.25,
# y=0.03
# ),
xaxis=dict(
showticklabels=True,
dtick=1,
zeroline=True,
tickfont=dict(family='Arial', size=28)
),
yaxis=dict(
# domain=[0.12, 1],
showticklabels=True,
dtick=yaxis_tick,
tickfont=dict(family='Arial', size=28),
range=[0, int(highest_y_value*1.15)] # Set the ylim slightly higher than the max value for a prettier graph
),
legend=dict(
traceorder='reversed',
font=dict(family='Arial', size=22),
bordercolor="#5a5c60",
borderwidth=0.6
)
)
jif_fig = go.Figure(data=jif_fig_data, layout=jif_layout)
plotly.io.write_image(jif_fig, 'facility_onepagers_figures/pub_plot/{}_jif.{}'.format(label_list[0].lower().replace(" ", "_").replace(",", ""), pformat))
# height=1150, width=1500, scale=3)
return (
'{}/facility_onepagers_figures/pub_plot/{}_category.{}'.format(os.getcwd(), label_list[0].lower().replace(" ", "_").replace(",", ""), pformat),
'{}/facility_onepagers_figures/pub_plot/{}_jif.{}'.format(os.getcwd(), label_list[0].lower().replace(" ", "_").replace(",", ""), pformat),
(total_pubs_current_year, total_pubs_last_year, total_pubs_lastlast_year)
)
def user_plot(user_affiliation_data, fac, year="2019", prefix=None, pformat="png"):
aff_map_abbr = {
"Chalmers University of Technology": "Chalmers",
"KTH Royal Institute of Technology": "KTH",
"Swedish University of Agricultural Sciences": "SLU",
"Karolinska Institutet": "KI",
"Linköping University": "LiU",
"Lund University": "LU",
"Naturhistoriska Riksmuséet": "NRM",
"Naturhistoriska Riksmuseet": "NRM",
"Stockholm University": "SU",
"Umeå University": "UmU",
"University of Gothenburg": "GU",
"Uppsala University": "UU",
"Örebro University": "OU",
"International University": "Int Univ",
"Other Swedish University" : "Other Swe{}Univ".format("<br>" if fac=='Chemical Proteomics' else " "),
"Other Swedish organization" : "Other Swe Org",
"Other international organization" : "Other Int Org",
"Industry": "Industry",
"Healthcare": "Healthcare" + ("<br>" if fac=='Clinical Genomics Gothenburg' else ""),
"National University Ireland Galway": "National University<br>Ireland Galway"
}
if not os.path.isdir("facility_onepagers_figures/"):
os.mkdir("facility_onepagers_figures/")
fn = fix_spl_char(fac).lower().replace(" ", "_").replace(",", "") + (prefix if prefix else '')
user_fig_name = '{}/facility_onepagers_figures/user_plot/{}_user.{}'.format(os.getcwd(), fn, pformat)
values = []
labels = []
for institution in user_affiliation_data.keys():
if user_affiliation_data[institution]:
values.append(user_affiliation_data[institution])
labels.append(institution)
if sum(values) < 2:
pi_plural = "PI"
else:
pi_plural = "PIs"
# Set font sizes for exceptional case
font_size, title_size = (25, 37)
if (fac=="Compute and Storage"):
font_size, title_size = (23, 31)
elif (fac=="National Genomics Infrastructure" and year=="2017"):
font_size, title_size = (23, 32)
elif (fac=="National Genomics Infrastructure" and year=="2018"):
font_size, title_size = (22, 32)
elif (fac=="National Genomics Infrastructure" and year=="2019"):
font_size, title_size = (22, 32)
elif (fac=="AIDA Data Hub" and year=="2019"):
font_size, title_size = (22, 35)
elif (fac=="BioImage Informatics" and year=="2017"):
font_size, title_size = (29, 38)
elif (fac=="Support, Infrastructure and Training" and year=="2017"):
font_size, title_size = (24, 36)
elif (fac=="Support, Infrastructure and Training" and year=="2018"):
font_size, title_size = (22, 36)
elif (fac=="Support, Infrastructure and Training" and year=="2019"):
font_size, title_size = (22, 36)
elif (fac=="Advanced Light Microscopy" and year=="2017"):
font_size, title_size = (28, 38)
elif (fac=="Intravital Microscopy Facility" and year=="2018"):
font_size, title_size = (28, 42)
elif (fix_spl_char(fac)=="Biochemical Imaging Centre Umea" and year=="2017"):
font_size, title_size = (30, 45)
elif (fix_spl_char(fac)=="Biochemical Imaging Centre Umea" and year=="2018"):
font_size, title_size = (22, 37)
elif (fix_spl_char(fac)=="Biochemical Imaging Centre Umea" and year=="2019"):
font_size, title_size = (22, 37)
elif (fac=="Cryo-EM" and year=="2017"):
font_size, title_size = (30, 42)
elif (fac=="National Resource for Mass Spectrometry Imaging" and year=="2018"):
font_size, title_size = (32, 44)
elif (fac=="Gothenburg Imaging Mass Spectrometry" and year=="2017"):
font_size, title_size = (24, 36)
elif (fac=="Chemical Biology Consortium Sweden" and year=="2017"):
font_size, title_size = (26, 39)
elif (fac=="Chemical Proteomics" and year=="2017"):
font_size, title_size = (29, 42)
elif (fac=="Genome Engineering Zebrafish" and year=="2017"):
font_size, title_size = (26, 38)
elif (fac=="High Throughput Genome Engineering" and year=="2018"):
font_size, title_size = (26, 38)
elif (fac=="Clinical Genomics Gothenburg" and year=="2017"):
font_size, title_size = (26, 38)
elif (fac=="Clinical Genomics Lund" and year=="2018"):
font_size, title_size = (26, 38)
elif (fac=="Clinical Genomics Uppsala" and year=="2017"):
font_size, title_size = (26, 38)
elif (fac=="Ancient DNA" and year=="2018"):
font_size, title_size = (27, 38)
elif (fac=="Glycoproteomics" and year=="2017"):
font_size, title_size = (24, 33)
elif (fac=="Glycoproteomics" and year=="2018"):
font_size, title_size = (23, 33)
elif (fac=="Glycoproteomics" and year=="2019"):
font_size, title_size = (24, 33)
elif (fac=="Mass Cytometry" and year=="2019"):
font_size, title_size = (23, 36)
elif (fac=="Proximity Proteomics" and year=="2019"):
font_size, title_size = (23, 36)
elif (fac=="Targeted and Structural Proteomics" and year=="2019"):
font_size, title_size = (24, 36)
elif (fac=="Swedish Metabolomics Centre" and year=="2019"):
font_size, title_size = (24, 36)
elif (fac=="Swedish NMR Centre"):
font_size, title_size = (25, 32)
fig = go.Figure(layout=dict(
margin=go.layout.Margin(
l=50,
r=50,
b=30,
t=30,
pad=4
),
annotations=[dict(
font=dict(family="Arial", size=title_size),
showarrow=False,
# text='<b><span style="color:#95C11E">Users {}<br>({})</span></b>'.format(year, sum(values)),
text='{} {}'.format(sum(values), pi_plural),
x=0.49,
y=0.49)
]
)
)
fig.add_pie(labels=labels,
values=values,
text=["{} ({}%)".format(aff_map_abbr.get(labels[i].encode('utf-8'), labels[i].encode('utf-8')), round(float(values[i])/float(sum(values))*float(100), 1)) for i in range(len(labels))],
marker=dict(colors=[FACILITY_USER_AFFILIATION_COLOUR_OFFICIAL.get(labels[i], "#000000") for i in range(len(labels))]),
hole=0.6,
automargin=True,
sort=True,
direction="clockwise",
textinfo="text",
textposition="outside",
textfont=dict(family="Arial", size=font_size, color="#000000"),
showlegend=False)
plotly.io.write_image(fig, user_fig_name)#, height=1300, width=1200, scale=3)
return user_fig_name
|
StarcoderdataPython
|
11375248
|
<gh_stars>1-10
# from flask import Flask, jsonify, request
# from settings import SETTINGS
# from db import db, Activity, Claim, Tag
# from datetime import datetime
# from flask_cors import CORS
# from dateutil import parser
# import pytz
# # import iso8601
# # basedir = os.path.abspath(os.path.dirname(__file__))
# app = Flask(__name__)
# CORS(app)
import gevent.monkey
gevent.monkey.patch_all()
from settings import Settings
from flask import Flask
import logging
from flask_sockets import Sockets
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_object(Settings)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
sockets = Sockets(app)
db = SQLAlchemy(app)
from src.hub import Hub
hub = Hub()
@sockets.route('/api/')
def open_socket(ws):
socket = hub.add(ws)
while not socket.ws.closed:
message = socket.ws.receive()
if message:
try:
socket.handle(db, message)
# do something here
except Exception as e:
logging.error(e, exc_info=True)
|
StarcoderdataPython
|
3267931
|
"""
python-v2x.py
Sample Mcity OCTANE Python script for interacting with V2X AACE data.
"""
import os
import json
from dotenv import load_dotenv
import socketio
import requests
#Load environment variables
load_dotenv()
api_key = os.environ.get('MCITY_OCTANE_KEY', None)
server = os.environ.get('MCITY_OCTANE_SERVER', 'http://localhost:5000')
namespace = "/octane"
#If no API Key provided, exit.
if not api_key:
print ("No API KEY SPECIFIED. EXITING")
exit()
#Query all intersections to get a listing of possible intersections for use.
#First build a re-usable header for queries.
headers = {
'accept': 'application/json',
'X-API-KEY': api_key,
'Content-Type': 'application/json'
}
#Make the get query
uri = server + '/api/intersections'
response = requests.get(uri, headers=headers)
json_data = json.loads(response.text)
#Plymouth/Nixon is ID 157/
#We found this by browsing the above list.
#You can print the list with: print(json_data)
for item in json_data['intersections']:
if item['id'] == 157:
#For any given intersection that is V2X enabled
#A V2X intersection id is assigned. This identifier will be present
#on all messages from this infrastructure
#This ID can also be used to subscribe to a stream with only messages
#from this device.
v2xid = item['v2xIntersectionId']
#This is the intersection we'd like to listen to events from.
print (v2xid)
#Create an SocketIO Python client.
sio = socketio.Client()
# Async client is available also: sio = socketio.AsyncClient()
def send_auth():
"""
Emit an authentication event.
"""
sio.emit('auth', {'x-api-key': api_key}, namespace=namespace)
#Define event callbacks
@sio.on('connect', namespace=namespace)
def on_connect():
"""
Handle connection event and send authentication key
"""
send_auth()
@sio.on('join', namespace=namespace)
def on_join(data):
"""
Event fired when user joins a channel
"""
print('Join received with ', data)
@sio.on('channels', namespace=namespace)
def on_channels(data):
"""
Event fired when a user requests current channel information.
"""
print('Channel information', data)
#After the first channel list you have logged in.
#For this example we'll use this to signify the start of our session.
#At this point let's join the stream for the RSU we are interested in.
#The channel format is v2x_rsu_[id]_parsed (or raw)
channel = 'v2x_rsu_' + v2xid + '_parsed'
#Show all the rsu messages parsed:
#channel = 'v2x_rsu_parsed'
#Show all the obu messages raw:
#channel = 'v2x_obu_raw'
# let's join the V2X channel.
sio.emit('join', {'channel': channel}, namespace=namespace)
@sio.on('v2x_SPaT', namespace=namespace)
def on_spat(data):
"""
Event fired for each V2X Parsed SPaT message
"""
print(data)
@sio.on('v2x_raw', namespace=namespace)
def on_raw(data):
"""
Event fired for each V2X RAW message
"""
print(data)
@sio.on('disconnect', namespace=namespace)
def on_disconnect():
"""
Event fired on disconnect.
"""
print('disconnected from server')
#Make connection.
sio.connect(server, namespaces=[namespace])
sio.wait()
|
StarcoderdataPython
|
3531109
|
<reponame>shkyler/gmit-foda-trials<filename>Week 02/monty.py
#Monty Hall Game - <NAME> 2018-10-02 - created for fun based on this problem https://en.wikipedia.org/wiki/Monty_Hall_problem
import numpy as np
# Set up the 3 doors and randomly put a car behind one of them
doors = ['green', 'blue', 'red']
car = np.random.choice(doors)
# ask the user for the initial pick - value entered must be valid
print('There is a car behind one of the doors, please pick red, green or blue')
pick = input('Please pick a door!')
while pick not in doors:
pick = input("Please pick a valid door!")
# Randomly choose a door to open (it can't be the one with the car or the original pick)
show = np.random.choice([door for door in doors if door !=pick and door !=car])
print("I can now reveal to you that there is no car behind the " + show + " door")
# Define what the door is that has not been picked or showed
otherdoor = np.random.choice([door for door in doors if door !=pick and door !=show])
# Ask the user would they like to stay or switch - value entered must be valid
stay = input("Would you like to stay with your original door or switch?")
while stay not in ['stay', 'switch']:
stay = input ("Would you like to stay or switch")
# Define what the final answer is
if stay == 'stay':
final = pick
else:
final = otherdoor
# Check if they won and let them know
if final == car:
print('The car is behind '+ car + ' you win!')
else:
print('There is a goat behind '+ pick + ' you lose')
|
StarcoderdataPython
|
5060116
|
import os
import cv2
import sys
import argparse
import numpy as np
import matplotlib.pyplot as plt
import warnings
from PIL import Image
warnings.filterwarnings('ignore')
import torch.utils.data
from backbone import mobilefacenet, resnet, arcfacenet, cbam
from mtcnnalign.align_faces import warp_and_crop_face, get_reference_facial_points
from mtcnnalign.mtcnn.detector import detect_faces
import torchvision.transforms as transforms
from sklearn.externals import joblib
import pickle
def detectFace(img):
"""
input : image numpy array
output : two float numpy arrays of shapes [n_boxes, 4] and [n_boxes, 10],
bounding boxes and facial landmarks.
"""
img = Image.fromarray(img).convert('RGB')
bounding_boxes, facial5points = detect_faces(img)
return bounding_boxes, facial5points
def alignFace(raw, bounding_boxes, facial5points):
"""
input : raw : numpy array, bounding boxes and facial landmarks.
output : align img
"""
if len(facial5points) == 0:
align_img = raw
else:
facial5points = np.reshape(facial5points[0], (2, 5))
crop_size = (112, 112)
default_square = True
inner_padding_factor = 0.25
outer_padding = (0, 0)
output_size = (112, 112)
reference_5pts = get_reference_facial_points(
output_size, inner_padding_factor, outer_padding, default_square)
align_img = warp_and_crop_face(raw, facial5points, reference_pts=reference_5pts, crop_size=crop_size)
return align_img
# load model
model_checkpoint_path = './model/IMFDB_MOBILEFACE_20190510_142512_Align_1.000/Iter_006000_net.ckpt'
model = mobilefacenet.MobileFaceNet()
model.load_state_dict(torch.load(model_checkpoint_path)['net_state_dict'])
model.eval()
# transform for input net
transform = transforms.Compose([
transforms.ToTensor(), # range [0, 255] -> [0.0,1.0]
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0]
])
# 读取视频
cap = cv2.VideoCapture("/home/lab404/Desktop/myOpenCV/3idots.avi")
fps = cap.get(cv2.CAP_PROP_FPS)
totalFrameNumber = cap.get(cv2.CAP_PROP_FRAME_COUNT)
print(fps)
print(totalFrameNumber)
COUNT = 0
#load knn trained model.
knn_model = joblib.load('./model/knn_classifier.model')
#load star dict to get idx for star name
fid = open("./model/star_dict.pkl","rb")
star_dict = pickle.load(fid)
while COUNT < totalFrameNumber:
ret, frame = cap.read() #BGR
#bbox use to display, facial5points use to align face
bounding_boxes, facial5points = detectFace(frame)
#fliter the invaild bboxes
#vaild_bboxes = [bbox for bbox in bounding_boxes if bbox is not None]
if len(bounding_boxes) == 0 or len(facial5points)==0 :
cv2.imshow('video', frame)
COUNT = COUNT + 1
cv2.waitKey(1)
else:
#align_img use to face recognition
for idx in range(len(bounding_boxes)):
bbox = bounding_boxes[idx]
facial5 = facial5points[idx]
x1 = int(bbox[0]);y1 = int(bbox[1]); x2 = int(bbox[2]); y2 = int(bbox[3]); prob = bbox[4]
print(prob)
align_img = alignFace(frame, bbox, facial5points)
align_img_tensor = transform(align_img)
align_img_tensor = torch.unsqueeze(align_img_tensor, 0)
feature = model(align_img_tensor)
predict = knn_model.predict(feature.detach().numpy())
star_name = star_dict[predict[0]]
cv2.rectangle(frame, (x1,y1), (x2,y2), (0,255,0), 2)
cv2.putText(frame, star_name, (x1-5,y1-5), cv2.FONT_HERSHEY_COMPLEX,0.5,(0,255,0),1)
cv2.imshow('video', frame)
COUNT = COUNT + 1
cv2.waitKey(1)
cap.release();
|
StarcoderdataPython
|
6594299
|
<reponame>rpg711/Interview-Prep
'''https://practice.geeksforgeeks.org/problems/equilibrium-point/0'''
def find_equilibrium(A):
if len(A) < 3:
return -1
sum_before = A[0]
sum_after = sum(A) - A[1] - A[0]
idx = 1 # the equilibrium point
if sum_before == sum_after:
return idx
while idx < len(A) - 1:
idx += 1
sum_before += A[idx - 1]
sum_after -= A[idx]
if sum_before == sum_after:
return idx # 0 based
return -1
if __name__ == '__main__':
A = [1,3,5,2,2]
print(find_equilibrium(A))
|
StarcoderdataPython
|
5191325
|
from sklearn.linear_model import LinearRegression
import pandas as pd
df=pd.read_csv("corrected_data4.csv")
#Converting values to numeric
df["price"]=pd.to_numeric(df["price"],errors='coerce')
df["highway-mpg"]=pd.to_numeric(df["price"],errors='coerce')
#declaring class of LinearRegression
lm=LinearRegression()
#fitting the data
x=df[['highway-mpg']]
y=df[['price']]
lm.fit(x, y)
#Getting the coefficients of the linear regression
lm.coef_()
#getting the intercept of the linear regression
lm.intercept_()
|
StarcoderdataPython
|
9648157
|
<filename>examples/erm_data/erm_test.py<gh_stars>0
# This script uses LDLite to extract sample data from the FOLIO demo sites.
# Demo sites
current_release = 'https://folio-juniper-okapi.dev.folio.org/'
latest_snapshot = 'https://folio-snapshot-okapi.dev.folio.org/'
ldp_test = 'https://folio-test.ub.uni-mainz.de/okapi/'
###############################################################################
# Select a demo site here:
selected_site = ldp_test
###############################################################################
# Note that these demo sites are unavailable at certain times in the evening
# (Eastern time) or if a bug is introduced and makes one of them unresponsive.
# At the time of this writing, the "current release" demo site appears to be
# more stable than the "latest snapshot" site. For information about the
# status of the demo sites, please see the #hosted-reference-envs channel in
# the FOLIO Slack organization. For general information about FOLIO demo
# sites, see the "Demo Sites" section of the FOLIO Wiki at:
# https://wiki.folio.org
###############################################################################
import traceback
import ldlite
ld = ldlite.LDLite()
ld.connect_okapi(url=selected_site, tenant='diku', user='user', password='<PASSWORD>')
#db = ld.connect_db(filename='ldlite.db')
# For PostgreSQL, use connect_db_postgresql() instead of connect_db():
db = ld.connect_db_postgresql(dsn='dbname=ldlite_01 host=localhost user=ldlite password=<PASSWORD>')
queries = [
('folio_agreements.subscription_agreement', '/erm/sas', 'cql.allRecords=1 sortby id'),
('folio_agreements.entitlement', '/erm/entitlements', 'cql.allRecords=1 sortby id'),
('folio_agreements.files', '/erm/files', 'cql.allRecords=1 sortby id'),
('folio_agreements.contacts', '/erm/contacts', 'cql.allRecords=1 sortby id'),
('folio_agreements.package', '/erm/packages', 'cql.allRecords=1 sortby id'),
('folio_agreements.job', '/erm/jobs', 'cql.allRecords=1 sortby id'),
('folio_agreements.refdata_value', '/erm/refdata', 'cql.allRecords=1 sortby id'),
('folio_agreements.kbs', '/erm/kbs', 'cql.allRecords=1 sortby id'),
('folio_agreements.erm_resource', '/erm/resource', 'cql.allRecords=1 sortby id'),
('folio_agreements.title', '/erm/titles', 'cql.allRecords=1 sortby id'),
('folio_agreements.titles_entitled', '/erm/titles/entitled', 'cql.allRecords=1 sortby id'),
('folio_agreements.package_content_item', '/erm/pci', 'cql.allRecords=1 sortby id'),
('folio_agreements.platform', '/erm/platforms', 'cql.allRecords=1 sortby id'),
('folio_agreements.platform_title_instance', '/erm/pti', 'cql.allRecords=1 sortby id'),
('folio_agreements.custom_property', '/erm/custprops', 'cql.allRecords=1 sortby id'),
('folio_agreements.subscription_agreement_licences', '/erm/sas/linkedLicenses', 'cql.allRecords=1 sortby id'),
]
tables = []
for q in queries:
try:
if len(q) == 4:
t = ld.query(table=q[0], path=q[1], query=q[2], json_depth=[3])
else:
t = ld.query(table=q[0], path=q[1], query=q[2])
except Exception as e:
traceback.print_exception(type(e), e, e.__traceback__)
tables += t
print()
print('Tables:')
for t in tables:
print(t)
print('('+str(len(tables))+' tables)')
|
StarcoderdataPython
|
357427
|
from datetime import date
d_maior = 0
for c in range(1, 8):
ano = int(input('Insira o seu ano de nascimento:'))
idade = date.today().year - ano
if idade >= 21:
d_maior = d_maior + 1
d_menor = 7 - d_maior
print(f'Neses grupo de pessoas {d_maior} atingiram a maioridade e {d_menor} não.')
|
StarcoderdataPython
|
8163021
|
import logging, unittest
from main import Url
class UrlTest(unittest.TestCase):
def test_sane_domain(self):
url = Url('google.com')
self.assertEqual('google.com', url.original_domain)
self.assertEqual('http://google.com', url.domain)
def test_domain_with_http(self):
url = Url('http://google.com')
self.assertEqual('http://google.com', url.original_domain)
self.assertEqual('http://google.com', url.domain)
def test_domain_with_http_encoded(self):
url = Url('http%3A//google.com')
self.assertEqual('google.com', url.original_domain)
self.assertEqual('http://google.com', url.domain)
|
StarcoderdataPython
|
12802757
|
<gh_stars>10-100
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from fbpcp.util.gcspath import GCSPath
class TestGCSPath(unittest.TestCase):
def test_gcspath_no_subfolder(self):
test_gcspath = GCSPath("https://storage.cloud.google.com/bucket-name/key-name")
self.assertEqual(test_gcspath.bucket, "bucket-name")
self.assertEqual(test_gcspath.key, "key-name")
def test_gcspath_with_subfoler(self):
test_gcspath = GCSPath(
"https://storage.cloud.google.com/bucket-name/subfolder/key"
)
self.assertEqual(test_gcspath.bucket, "bucket-name")
self.assertEqual(test_gcspath.key, "subfolder/key")
def test_gcspath_invalid_fileURL(self):
test_url = "an invalid fileURL"
with self.assertRaises(ValueError):
GCSPath(test_url)
|
StarcoderdataPython
|
11311855
|
#!/usr/bin/python
## dinucleotide frequencies from seqeunces, input is multi sequence fasta format;
import gzip
import os
import sys
import re
def dinuc_list(sequence):
l1 = re.findall('.{1,2}', sequence)
l2 = re.findall('.{1,2}', sequence[1:])
sequence_list = l1 + l2
return sequence_list
def dinuc_counts(dnt_list, sequence_list):
freq = [0] * len(dnt_list)
for i, pattern in enumerate(dnt_list):
count = sequence_list.count(pattern) ## list.count(x)
freq[i] = float(count) # / float(len(seq)-1)
return freq
f = open(sys.argv[1], 'r')
## all possible sets:
nt = ['A', 'G', 'T', 'C']
dnt = [n + nt[i] for n in nt for i in range(4) ]
## write the header
dntw = [d + '\t' for d in dnt]
dntw = ''.join(dntw)
newline = '#name' + '\t' + dntw.strip()
print newline
for line in f:
line = line.strip();
if line.startswith('>'):
seqname = line[1:];
continue
else:
seq = line
seqlist = dinuc_list(seq)
dcounts = dinuc_counts(dnt, seqlist)
freq = [float(d) / float(len(seq)-1) for d in dcounts ]
# print freq
freq = ['%.4f' % i + '\t' for i in freq]
freq = ''.join(freq)
newline = seqname + '\t' + freq.strip()
print newline
|
StarcoderdataPython
|
6650512
|
<gh_stars>1-10
import re
try:
from Tkinter import * # noqa
from tkMessageBox import showinfo, showerror
except:
from tkinter import * # noqa
from tkinter.messagebox import showinfo, showerror
import mudclientproto as mcp
from mcpgui.wizard import Wizard
from mcpgui.notebook import NoteBook
class McpFuzzballNotifyPkg(mcp.McpPackage):
def __init__(self):
mcp.McpPackage.__init__(self, 'org-fuzzball-notify', '1.0', '1.1')
def process_message(self, msg):
if self.connection.is_server:
self.process_message_server(msg)
else:
self.process_message_client(msg)
def process_message_client(self, msg):
pass
def process_message_server(self, msg):
if msg.name == 'org-fuzzball-notify-info':
topic = msg['topic']
text = msg['text']
showinfo(topic, text)
elif msg.name == 'org-fuzzball-notify-warning':
topic = msg['topic']
text = msg['text']
showwarning(topic, text)
elif msg.name == 'org-fuzzball-notify-error':
topic = msg['topic']
text = msg['text']
showerror(topic, text)
class McpFuzzballGuiPkg(mcp.McpPackage):
def __init__(self):
self.dlogs = {}
mcp.McpPackage.__init__(self, 'org-fuzzball-gui', '1.0', '1.3')
self.client_handlers = {
'org-fuzzball-gui-dlog-create': self.dlog_create,
'org-fuzzball-gui-dlog-show': self.dlog_show,
'org-fuzzball-gui-dlog-close': self.dlog_close,
'org-fuzzball-gui-error': self.error,
'org-fuzzball-gui-ctrl-command': self.ctrl_command,
'org-fuzzball-gui-ctrl-value': self.ctrl_value,
'org-fuzzball-gui-ctrl-frame': self.ctrl_create,
'org-fuzzball-gui-ctrl-datum': self.ctrl_create,
'org-fuzzball-gui-ctrl-hrule': self.ctrl_create,
'org-fuzzball-gui-ctrl-vrule': self.ctrl_create,
'org-fuzzball-gui-ctrl-text': self.ctrl_create,
'org-fuzzball-gui-ctrl-image': self.ctrl_create,
'org-fuzzball-gui-ctrl-button': self.ctrl_create,
'org-fuzzball-gui-ctrl-checkbox': self.ctrl_create,
'org-fuzzball-gui-ctrl-radio': self.ctrl_create,
'org-fuzzball-gui-ctrl-edit': self.ctrl_create,
'org-fuzzball-gui-ctrl-password': self.ctrl_create,
'org-fuzzball-gui-ctrl-spinner': self.ctrl_create,
'org-fuzzball-gui-ctrl-scale': self.ctrl_create,
'org-fuzzball-gui-ctrl-combobox': self.ctrl_create,
'org-fuzzball-gui-ctrl-multiedit': self.ctrl_create,
'org-fuzzball-gui-ctrl-listbox': self.ctrl_create,
'org-fuzzball-gui-ctrl-notebook': self.ctrl_create,
'org-fuzzball-gui-ctrl-tree': self.ctrl_create,
'org-fuzzball-gui-ctrl-menu': self.ctrl_create,
}
def process_message(self, msg):
if self.connection.is_server:
return
if msg.name not in self.client_handlers:
return
hndlr = self.client_handlers[msg.name]
hndlr(msg)
def _verify_existing_dlog(self, dlogid):
if dlogid in self.dlogs:
return True
self.send_error(
dlogid, '', 'ENODLOG',
"No dialog exists with that dialog id."
)
return False
def _get_dlog(self, msg):
dlogid = msg.get('dlogid', '')
if not self._verify_existing_dlog(dlogid):
return None
return self.dlogs[dlogid]
def dlog_create(self, msg):
dlogid = msg.get('dlogid', '')
if not re.match(r'^[A-Z0-9_]+$', dlogid, re.I):
self.send_error(
dlogid, '', 'EBADDLOGID',
"The given dialog id contains illegal characters."
)
return
self.dlogs[dlogid] = McpGuiDialog(self, msg)
def dlog_show(self, msg):
dlog = self._get_dlog(msg)
if not dlog:
return
dlog.deiconify()
def dlog_close(self, msg):
dlog = self._get_dlog(msg)
if not dlog:
return
dlog.destroy()
del self.dlogs[dlog.dlogid]
def error(self, msg):
dlogid = msg.get('dlogid', '')
ctrlid = msg.get('ctrlid', '')
errcode = msg.get('errcode', '')
errtext = msg.get('errtext', '')
showerror(
"MCP GUI Error",
(
"Dialog ID: %s\n"
"Control ID: %s\n"
"ErrorCode: %s\n"
"%s"
) % (
dlogid,
ctrlid,
errcode,
errtext
),
)
def ctrl_command(self, msg):
dlog = self._get_dlog(msg)
if not dlog:
return
dlog.ctrl_command(msg)
def ctrl_value(self, msg):
dlog = self._get_dlog(msg)
if not dlog:
return
dlog.ctrl_value(msg)
def ctrl_create(self, msg):
dlog = self._get_dlog(msg)
if not dlog:
return
pfx = 'org-fuzzball-gui-ctrl-'
ctrl_type = msg.name[len(pfx):]
if ctrl_type not in self.control_classes:
return
dlog.ctrl_create(ctrl_type, msg)
def send_event(self, dlogid, ctrlid, event, dismissed, data=''):
msg = McpMessage(
'org-fuzzball-gui-ctrl-event',
dlogid=dlogid,
id=ctrlid,
dismissed=dismissed,
event=event,
data=data,
)
self.connection.send_message(msg)
def send_error(self, dlogid, ctrlid, errcode, errtext):
msg = McpMessage(
'org-fuzzball-gui-error',
dlogid=dlogid,
id=ctrlid,
errcode=errcode,
errtext=errtext,
)
self.connection.send_message(msg)
class McpGuiControl(object):
def __init__(self, typ, dlog, pane, msg):
self.ctrl_type = typ
self.dlog = dlog
self.ctrlid = msg.get('id')
self.value = msg.get('value', '')
self.valname = msg.get('valname', self.ctrlid)
self.row = msg.get('row', -1, int)
self.column = msg.get('column', -1, int)
self.newline = msg.get('newline', True, bool)
self.colskip = msg.get('colskip', 0, int)
self.colspan = msg.get('colspan', 1, int)
self.rowspan = msg.get('rowspan', 1, int)
self.sticky = msg.get('sticky', 'w', str).lower()
self.sticky = ''.join(set(self.sticky) & set("nsew"))
self.minwidth = msg.get('minwidth', 0, int)
self.minheight = msg.get('minheight', 0, int)
self.hweight = msg.get('hweight', 0, int)
self.vweight = msg.get('vweight', 0, int)
self.leftpad = msg.get('leftpad', 10, int)
self.toppad = msg.get('toppad', 10, int)
self.sort = msg.get('sorted', False, bool)
self.pane = pane
self.ctrl = None
def send_event(self, event, dismissed, data=''):
self.dlog.send_event(self.ctrlid, event, dismissed, data)
def send_error(self, errcode, errtext):
self.dlog.send_error(self.ctrlid, errcode, errtext)
def set_value(self, val):
self.value = val
def get_value(self):
return self.value
def command(self, msg):
self.send_error(
'ECTRLCMDNOTSUPP',
"The given control-command is not recognized."
)
class McpGuiCtrlFrame(McpGuiControl):
def __init__(self, dlog, pane, msg):
self.visible = msg.get('visible', False, bool)
self.collapsible = msg.get('collapsible', False, bool)
self.collapsed = msg.get('collapsed', False, bool)
opts = {
k: v for k, v in msg.items()
if v and k in ['text', 'relief', 'width', 'height']
}
if self.visible:
opts['borderwidth'] = 2
else:
opts['borderwidth'] = 0
opts['relief'] = FLAT
if 'text' in opts:
del opts['text']
if 'text' in opts:
self.ctrl = LabelFrame(pane, **opts)
else:
self.ctrl = Frame(pane, **opts)
self.holder = Frame(self.ctrl, borderwidth=0)
if not self.collapsible or not self.collapsed:
self.holder.pack(side=TOP, fill=BOTH, expand=1)
if self.collapsible:
self.bind('<1>', self._toggle)
dlog.panes[self.ctrlid] = self.holder
McpGuiControl.__init__('datum', self, dlog, pane, msg)
def _toggle(self):
self.collapsed = not self.collapsed
if not self.collapsible:
self.collapsed = False
if self.collapsed:
for child in self.winfo_children():
child.forget()
else:
self.holder.pack(side=TOP, fill=BOTH, expand=1)
def config(self, **kwargs):
pass
class McpGuiCtrlDatum(McpGuiControl):
def __init__(self, dlog, pane, msg):
McpGuiControl.__init__('datum', self, dlog, pane, msg)
class McpGuiCtrlHRule(McpGuiControl):
def __init__(self, dlog, pane, msg):
self.ctrl = Frame(
pane.ctrl,
height=msg.get('height', 2),
)
self.sticky = ''.join(set(self.sticky+'ew'))
McpGuiControl.__init__('hrule', self, dlog, pane, msg)
class McpGuiCtrlVRule(McpGuiControl):
def __init__(self, dlog, pane, msg):
self.ctrl = Frame(
self.pane.ctrl,
width=msg.get('width', 2),
)
self.sticky = ''.join(set(self.sticky+'ns'))
McpGuiControl.__init__('vrule', self, dlog, pane, msg)
class McpGuiCtrlText(McpGuiControl):
pass
class McpGuiCtrlImage(McpGuiControl):
pass
class McpGuiCtrlButton(McpGuiControl):
pass
class McpGuiCtrlCheckBox(McpGuiControl):
pass
class McpGuiCtrlRadio(McpGuiControl):
pass
class McpGuiCtrlEdit(McpGuiControl):
pass
class McpGuiCtrlPassWord(McpGuiControl):
pass
class McpGuiCtrlSpinner(McpGuiControl):
pass
class McpGuiCtrlScale(McpGuiControl):
pass
class McpGuiCtrlComboBox(McpGuiControl):
pass
class McpGuiCtrlMultiEdit(McpGuiControl):
pass
class McpGuiCtrlListBox(McpGuiControl):
pass
class McpGuiCtrlNoteBook(McpGuiControl):
pass
class McpGuiCtrlTree(McpGuiControl):
pass
class McpGuiCtrlMenu(McpGuiControl):
pass
class McpGuiDialog(TopLevel):
def __init__(self, pkg, msg):
self.pkg = pkg
self.currpane = self
self.dlogid = msg.get('dlogid')
self.title = msg.get('title')
self.dlogtype = msg.get('type')
self.resize = msg.get('resizable', '').lower()
self.minwidth = int(msg.get('minwidth', '30'))
self.minheight = int(msg.get('minheight', '30'))
self.width = int(msg.get('width', '300'))
self.height = int(msg.get('height', '200'))
self.maxwidth = int(msg.get('maxwidth', '0'))
self.maxheight = int(msg.get('maxheight', '0'))
if not self.maxwidth:
self.maxwidth = self.winfo_screenwidth()
if not self.maxheight:
self.maxheight = self.winfo_screenheight()
Toplevel.__init__(self, **kwargs)
self.controls = {}
if self.title:
self.title(self.title)
self.withdraw()
xresize = 1 if resize in ["x", "xy", "both"] else 0
yresize = 1 if resize in ["y", "xy", "both"] else 0
self.resizable(width=xresize, height=yresize)
self.minsize(width=self.minwidth, height=self.minheight)
self.maxsize(width=self.maxwidth, height=self.maxheight)
self.protocol('WM_DELETE_WINDOW', self._delete_window)
self.control_classes = {
'frame': McpGuiCtrlFrame,
'datum': McpGuiCtrlDatum,
'hrule': McpGuiCtrlHRule,
'vrule': McpGuiCtrlVRule,
'text': McpGuiCtrlText,
'image': McpGuiCtrlImage,
'button': McpGuiCtrlButton,
'checkbox': McpGuiCtrlCheckBox,
'radio': McpGuiCtrlRadio,
'edit': McpGuiCtrlEdit,
'password': <PASSWORD>,
'spinner': McpGuiCtrlSpinner,
'scale': McpGuiCtrlScale,
'combobox': McpGuiCtrlComboBox,
'multiedit': McpGuiCtrlMultiEdit,
'listbox': McpGuiCtrlListBox,
'notebook': McpGuiCtrlNoteBook,
'tree': McpGuiCtrlTree,
'menu': McpGuiCtrlMenu,
}
def send_event(self, ctrlid, event, dismissed, data=''):
self.pkg.send_event(self.dlogid, ctrlid, event, dismissed, data)
def send_error(self, ctrlid, errcode, errtext):
self.pkg.send_error(self.dlogid, ctrlid, errcode, errtext)
def setup_tabbed_dlog(self, msg):
panes = msg.get('panes', [])
names = msg.get('names', [])
self.ctrl_create(
'notebook', dict(
panes=panes,
names=names,
height=self.height,
width=self.width
)
)
self.ctrl_create(
'frame', dict(
id='__bframe',
text='',
sticky='wen',
toppad=3
)
)
self.ctrl_create(
'frame', dict(
id='__bfiller',
pane='__bframe',
newline=0,
hweight=1
)
)
self.ctrl_create(
'button', dict(
id='_ok',
width=8,
text='Okay',
dismiss=1,
newline=0
)
)
self.ctrl_create(
'button', dict(
id='_cancel',
width=8,
text='Cancel',
dismiss=1,
newline=0
)
)
self.ctrl_create(
'button', dict(
id='_apply',
width=8,
text='Apply',
dismiss=0,
newline=0
)
)
def setup_helper_dlog(self, msg):
wiz = Wizard(
width=msg.get('width', 640, int),
height=msg.get('height', 480, int),
finishcommand=self._helper_finish,
cancelcommand=self._helper_cancel,
)
for name, pane in zip(msg.get('names', []), msg.get('panes', [])):
self.panes[name] = wiz.add_pane(pane, name)
def _helper_finish(self, event=None):
self.send_event('_finish', 'buttonpress', 1)
def _helper_cancel(self, event=None):
self.send_event('_cancel', 'buttonpress', 1)
def _verify_existing_ctrl(self, ctrlid):
if ctrlid in self.controls:
return True
self.send_error(
self.dlogid, ctrlid, 'ENOCONTROL',
"No control named '%s' exists in the given dialog." % ctrlid
)
return False
def _get_ctrl(self, msg):
ctrlid = msg.get('id', '')
if not self._verify_existing_ctrl(dlogid):
return None
return self.controls[ctrlid]
def _delete_window(self, *args):
self.send_event('_closed', 'buttonpress', 1)
def destroy_dlog(self):
self.destroy()
def ctrl_command(self, msg):
ctrl = self._get_ctrl(msg)
if not ctrl:
return
return ctrl.command(msg)
def ctrl_value(self, msg):
ctrl = self._get_ctrl(msg)
if not ctrl:
return
return ctrl.set_value(msg.get('value', ''))
def ctrl_create(self, ctrl_type, msg):
ctrl_class = self.control_classes[ctrl_type]
ctrlid = msg.get('id', '')
if not re.match(r'^[A-Z0-9_]+$', ctrlid, re.I):
self.send_error(
dlogid, ctrlid, 'EBADCTRLID',
"The given control id contains illegal characters."
)
return
pane = msg.get('pane')
if pane:
if pane not in self.controls:
self.send_error(
ctrlid, 'EPANEINVALID',
"The given dialog doesn't contain a pane by that id."
)
return
pane = self.controls[pane]
self.currpane = pane
else:
pane = self.currpane
self.controls[ctrlid] = ctrl_class(self, pane, msg)
# vim: expandtab tabstop=4 shiftwidth=4 softtabstop=4 nowrap
|
StarcoderdataPython
|
1839278
|
<reponame>teuben/QAC<gh_stars>0
# -*- python -*-
#
# Typical usage (see also Makefile)
# casa -c sky1.py
#
# Play with the skymodel
# - one or full pointing set
# - options for tp2vis, feather, ssc
#
# Reminder: at 115 GHz we have:
# 12m PB is 50" (FWHM) [FWHM" ~ 600/DishDiam]
# 7m PB is 85"
#
# Integration times:
# TP uses (nvgrp * npnt) seconds
# INT (per config) uses times[0] hours in total, times[1] minutes per pointing
# thus it is useful to make sure that (times[0]*60) / (times[1]*npnt) is integral
#
#
pdir = 'sky1' # name of directory within which everything will reside
model = 'skymodel-b.fits' # this has phasecenter with dec=-30 for ALMA sims
phasecenter = 'J2000 180.0deg -35.0deg' # where we want this model to be on the sky
# pick the piece of the model to image, and at what pixel size
# natively this model is 4096 pixels at 0.05"
imsize_m = 4096
pixel_m = 0.05
# pick the sky imaging parameters (for tclean)
# The product of these typically will be the same as that of the model (but don't need to)
# pick the pixel_s based on the largest array configuration (cfg[], see below) choosen
imsize_s = 256
pixel_s = 0.8
box = '17,17,238,238'
imsize_s = 1120
pixel_s = 0.21
box = '150,150,970,970'
# number of TP cycles
nvgrp = 4
# pick a few niter values for tclean to check flux convergence
niter = [0,500,1000,2000]
niter = [0,1000,4000]
# pick which ALMA configurations you want (0=7m ACA ; 1,2,3...=12m ALMA)
cfg = [0,1,2,3]
cfg = [0]
cfg = [0,1,4]
# pick integration times for cfg's
times = [2.25, 1] # 2 hrs in 1 min integrations
# TP dish size in m; uvmax will be taken as 5/6 of this
# @todo don't change this
dish = 12.0
maxuv = None
# Set grid to a positive arcsec grid spacing if the field needs to be covered
# 0 will force a single pointing
# ALMA normally uses lambda/2D hexgrid is Lambda/sqrt(3)D
grid = 30
# OTF: if selected, tp2vis will get an OTF, instead of the model jy/pixel map
otf = 0
# advanced features (0 or 1)
VP = 0
SCHWAB = 0
# scaling factors
wfactor = 1
# -- do not change parameters below this ---
import sys
for arg in qac_argv(sys.argv):
exec(arg)
# derived parameters
ptg = pdir + '.ptg' # pointing mosaic for the ptg
test = pdir # compat
psd = [] # accumulate images for qac_psd()
if maxuv == None:
maxuv = 5.0*dish/6.0 # for tp2vis
# extra tclean arguments
args = {}
args['usemask'] = 'pb'
args['pbmask'] = 0.5
args['deconvolver']= 'hogbom'
# report, add Dtime
qac_begin(pdir,False)
qac_project(pdir)
qac_log("REPORT")
qac_version()
tp2vis_version()
(phasecenter, imsize_m, pixel_m) = qac_image_desc(model, phasecenter, imsize_m, pixel_m)
if grid > 0:
# create a mosaic of pointings
p = qac_im_ptg(phasecenter,imsize_m,pixel_m,grid,rect=True,outfile=ptg)
else:
# create a single pointing
qac_ptg(phasecenter,ptg)
p = [phasecenter]
if True:
qac_tpdish('ALMATP', dish)
qac_tpdish('VIRTUAL',dish)
qac_vp(VP,SCHWAB)
# create a series of MS based on a model and antenna configuration for different ACA/ALMA configurations
# we do the INT first, so we get a better sized startmodel for tp2vis, in case we rescale the model
qac_log("ALMA 7m/12m")
ms1={}
for c in cfg:
ms1[c] = qac_alma(test,model,imsize_m,pixel_m,cycle=7,cfg=c,ptg=ptg, phasecenter=phasecenter, times=times)
# startmodel for later
startmodel = ms1[cfg[0]].replace('.ms','.skymodel')
psd.append(startmodel)
# get a list of MS we got for the INT
intms = list(ms1.values())
if False:
# generate two OTF
qac_log('OTF')
qac_tp_otf(test+'/clean1', startmodel, 24.0, label='24')
qac_tp_otf(test+'/clean1', startmodel, 12.0, label='12')
# smooth the skymodel with the feather beam so we can compare when they are in Jy/beam
qac_log('SMOOTH')
qac_smooth(test+'/clean1', startmodel, label='24', niteridx=0)
qac_smooth(test+'/clean1', startmodel, label='12', niteridx=0)
qac_log("TP2VIS")
if otf == 0:
tpms = qac_tp_vis(pdir,model,ptg,pixel_m,maxuv=maxuv,nvgrp=nvgrp,fix=0)
else:
tp_beam = 56.7 # @todo
beam = '%sarcsec' % tp_beam
otf = model.replace('.fits','.otf')
print("Creating %s" % otf)
imsmooth(model,'gaussian',beam,beam,pa='0deg',outfile=otf,overwrite=True)
tpms = qac_tp_vis(test,otf,ptg,pixel_m,maxuv=maxuv,nvgrp=nvgrp,fix=0)
psd.append(otf)
tp2viswt(tpms,wfactor,'multiply')
qac_log("CLEAN1:")
qac_clean1(test+'/clean0', tpms, imsize_s, pixel_s, phasecenter=phasecenter, **args)
qac_log("PLOT and STATS:")
for idx in range(1):
im1 = test+'/clean0/dirtymap%s.image' % QAC.label(idx)
im2 = test+'/clean0/dirtymap%s.image.pbcor' % QAC.label(idx)
qac_plot(im1,mode=1) # casa based plot w/ colorbar
qac_stats(im1) # noise flat
qac_stats(im2) # flux flat
if idx==0: psd.append(im2)
tp2vispl(intms+[tpms],outfig=test+'/tp2vispl.png')
qac_log("CLEAN with TP2VIS")
if False:
qac_clean(test+'/clean3',tpms,intms,imsize_s,pixel_s,niter=niter,phasecenter=phasecenter,do_int=True,do_concat=False, **args)
qac_tweak(test+'/clean3','int', niter)
qac_tweak(test+'/clean3','tpint',niter)
else:
qac_clean(test+'/clean3',tpms,intms,imsize_s,pixel_s,niter=niter,phasecenter=phasecenter,do_int=True,do_concat=False, **args)
qac_tweak(test+'/clean3','tpint',niter)
psd.append(test+'/clean3/tpint.image.pbcor')
psd.append(test+'/clean3/skymodel.smooth.image')
if False:
qac_clean(test+'/clean4',tpms,intms,imsize_s,pixel_s,niter=niter,phasecenter=phasecenter,do_int=True,do_concat=False,startmodel=startmodel)
qac_tweak(test+'/clean4','int',niter)
qac_tweak(test+'/clean4','tpint',niter)
if False:
# clean instead of tclean()
qac_clean(test+'/clean5',tpms,intms,imsize_s,pixel_s,niter=niter,phasecenter=phasecenter,do_int=True,do_concat=False,t=True)
qac_tweak(test+'/clean5','int',niter)
qac_tweak(test+'/clean5','tpint',niter)
qac_log("OTF")
# create an OTF TP map using a [12m] dish
qac_tp_otf(test+'/clean3', startmodel, dish, template=test+'/clean3/tpint.image')
if False:
qac_tp_otf(test+'/clean4', startmodel, dish,template=test+'/clean4/tpint.image')
if False:
qac_log("FEATHER")
# combine TP + INT using feather and ssc, for all niter's
for idx in range(len(niter)):
qac_feather(test+'/clean3', niteridx=idx, name="int")
qac_ssc (test+'/clean3', niteridx=idx, name="int")
qac_smooth (test+'/clean3', startmodel, niteridx=idx, name="int")
if False:
qac_feather(test+'/clean4', niteridx=idx, name="int")
qac_ssc (test+'/clean4', niteridx=idx, name="int")
qac_smooth (test+'/clean4', startmodel, niteridx=idx, name="int")
for idx in range(len(niter)):
qac_smooth (test+'/clean3', startmodel, niteridx=idx, name="tpint")
# the real flux
qac_stats(model)
qac_log("REGRESSION")
qac_stats(model)
qac_stats(test+'/clean0/dirtymap.image')
qac_stats(test+'/clean0/dirtymap.image.pbcor')
qac_stats(test+'/clean3/int.image')
qac_stats(test+'/clean3/tpint.image')
qac_stats(test+'/clean3/tpint_2.image')
qac_stats(test+'/clean3/tpint_3.image')
qac_stats(test+'/clean3/tpint.image.pbcor')
qac_stats(test+'/clean3/tpint_2.image.pbcor')
qac_stats(test+'/clean3/tpint_3.image.pbcor')
qac_stats(test+'/clean3/skymodel.smooth.image')
qac_stats(test+'/clean3/skymodel_2.smooth.image')
qac_stats(test+'/clean3/skymodel_3.smooth.image')
qac_stats(test+'/clean3/otf.image')
qac_stats(test+'/clean3/otf.image.pbcor')
if False:
qac_log("PLOT_GRID plot2/3")
a1 = test+'/clean1/dirtymap.image' # INT
a2 = test+'/clean1/dirtymap_2.image'
a3 = test+'/clean2/dirtymap.image' # INT w/ startmodel
a4 = test+'/clean2/dirtymap_2.image'
a31 = test+'/clean3/int.image' # INT/TPINT
a32 = test+'/clean3/int_2.image'
a33 = test+'/clean3/tpint.image'
a34 = test+'/clean3/tpint_2.image'
a41 = test+'/clean4/int.image' # INT/TPINT w/ startmodel
a42 = test+'/clean4/int_2.image'
a43 = test+'/clean4/tpint.image'
a44 = test+'/clean4/tpint_2.image'
qac_tp_otf(test+'/clean1', startmodel, 12.0)
# nitering
qac_plot_grid([a31,a32,a41,a42,a31,a32,a33,a34],diff=10, plot=test+'/plot1.cmp.png')
#
qac_plot_grid([a31,a41,a32,a42],diff=1, plot=test+'/plot2.cmp.png')
qac_plot_grid([a33,a43,a34,a44],diff=10, plot=test+'/plot3.cmp.png')
# qac_plot_grid([a1,a31,a2,a32,a3,a41,a4,a42],diff=1) these are all 0, as they should be
if False:
qac_log("PLOT_GRID plot1")
a1 = test+'/clean1/dirtymap.image'
a2 = test+'/clean1/dirtymap.image.pbcor'
a3 = test+'/clean1/skymodel.smooth.image'
a4 = test+'/clean1/feather.image/'
a5 = test+'/clean1/ssc.image/'
a6 = test+'/test1-alma.aca.cycle5.skymodel/'
a7 = test+'/clean3/tpint_2.image'
a8 = test+'/clean3/tpint_2.tweak.image'
qac_plot_grid([a1,a3,a4,a5,a7,a8],plot=test+'/plot1.cmp.png')
# PSD plot comparison of the images we accumulated in psd[]
if len(psd) > 0:
qac_log("QAC_PSD")
p2=qac_psd(psd, plot=pdir+'/psd.png')
qac_log("DONE!")
qac_end()
|
StarcoderdataPython
|
1918944
|
from django.contrib.auth.forms import PasswordResetForm
from django.contrib.auth.forms import UserChangeForm
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.forms import UsernameField
from django.contrib.auth.models import User
from django.forms import ModelForm, Form, ChoiceField, IntegerField, DateField, EmailField
from spendtrackapp.models import Entry, Plan, Category
from spendtrackapp.urls.converters import *
from spendtrackapp.views.utils import is_valid_iso_week
##############################################################
# FIELDS #
##############################################################
class NullableIntegerField(IntegerField):
"""An integer field that can be empty"""
def clean(self, value):
return super(IntegerField, self).clean(value) if value else None
class NullableDateField(DateField):
"""A date filed that can be empty"""
def clean(self, value):
return super(DateField, self).clean(value) if value else None
##############################################################
# FORMS #
##############################################################
class EntryForm(ModelForm):
"""Form to validate entry info"""
class Meta:
model = Entry
fields = ['date', 'content', 'value', 'leaf_category', 'user']
def clean(self):
super().clean()
# validate leaf category
if 'leaf_category' not in self.errors:
leaf_category_id = self.data['leaf_category']
category = Category.get_leaf_category(leaf_category_id)
if category is None:
self.add_error('leaf_category', 'Invalid category')
return self.cleaned_data
def save(self, commit=True):
# TODO test this in add/edit entry
super().save(commit)
# set the list of categories after saving entry to database
if commit:
leaf_category = Category.get_leaf_category(self.instance.leaf_category_id)
if leaf_category is None:
raise ValueError('Invalid leaf category')
self.instance.change_category(leaf_category)
return self.instance
class PlanForm(ModelForm):
"""Form to validate plan info"""
class Meta:
model = Plan
fields = ['name', 'start_date', 'end_date',
'planned_total', 'compare', 'category', 'user']
def clean(self):
super().clean()
# validate dates
if 'start_date' in self.cleaned_data and 'end_date' in self.cleaned_data:
start_date = self.data['start_date']
end_date = self.data['end_date']
# start_date must <= end_date
if start_date > end_date:
self.add_error('end_date', 'End date must be after start date')
# start date can't be in the past
# if start_date < date.today().isoformat():
# self.add_error('start_date', 'Start date cannot be in the past')
# validate planned_total
if 'planned_total' in self.cleaned_data:
planned_total = float(self.data['planned_total'])
if planned_total <= 0:
self.add_error('planned_total', 'Total must be positive')
return self.cleaned_data
class SearchTimeForm(Form):
"""Form to validate search time queries"""
search_type = ChoiceField(choices=[
['year', 'year'], ['month', 'month'], ['week', 'week'], ['date_range', 'date_range']
])
year = NullableIntegerField(min_value=1000, max_value=9999)
month = NullableIntegerField(min_value=1, max_value=12)
week = NullableIntegerField(min_value=1, max_value=53)
start_date = NullableDateField()
end_date = NullableDateField()
def clean(self):
super().clean()
# check that required fields according to 'search_type' are present
if 'search_type' in self.cleaned_data:
search_type = self.data['search_type']
year = self.cleaned_data.get('year', None)
month = self.cleaned_data.get('month', None)
week = self.cleaned_data.get('week', None)
start_date = self.cleaned_data.get('start_date', None)
end_date = self.cleaned_data.get('end_date', None)
if search_type == 'year' and year is None:
self.add_error('year', 'This field is required.')
elif search_type == 'month':
if year is None:
self.add_error('year', 'This field is required.')
if month is None:
self.add_error('month', 'This field is required.')
elif search_type == 'week':
if year is None:
self.add_error('year', 'This field is required.')
if week is None:
self.add_error('week', 'This field is required.')
if None not in [year, week] and not is_valid_iso_week(year, week):
self.add_error('week', 'Invalid ISO week')
elif search_type == 'date_range':
if start_date is None:
self.add_error('start_date', 'This field is required.')
if end_date is None:
self.add_error('end_date', 'This field is required.')
if None not in [start_date, end_date] and start_date > end_date:
self.add_error('end_date', 'End date must come after start date.')
return self.cleaned_data
_relevant_data_str = None
@property
# TODO consider make this a immutable dict
def relevant_data_str(self):
"""Return a dictionary contains strings of relevant data to the search type"""
# raise error when data is not clean
if not self.is_valid():
raise ValueError('Data is not clean')
if self._relevant_data_str is None:
search_type = self.cleaned_data['search_type']
if search_type == 'year':
year_converter = FourDigitYearConverter()
self._relevant_data_str = {
'year': year_converter.to_url(self.cleaned_data['year'])
}
elif search_type == 'month':
year_converter = FourDigitYearConverter()
month_converter = ThreeCharMonthConverter()
self._relevant_data_str = {
'year': year_converter.to_url(self.cleaned_data['year']),
'month': month_converter.to_url(self.cleaned_data['month'])
}
elif search_type == 'week':
year_converter = FourDigitYearConverter()
week_converter = TwoDigitWeekConverter()
self._relevant_data_str = {
'year': year_converter.to_url(self.cleaned_data['year']),
'week': week_converter.to_url(self.cleaned_data['week'])
}
else:
date_converter = DateConverter()
self._relevant_data_str = {
'start_date': date_converter.to_url(self.cleaned_data['start_date']),
'end_date': date_converter.to_url(self.cleaned_data['end_date'])
}
return self._relevant_data_str
class UserEditForm(UserChangeForm):
class Meta:
model = User
fields = ['username', 'first_name', 'last_name', 'email']
def __init__(self, *args, **kwargs):
super(UserChangeForm, self).__init__(*args, **kwargs)
if 'password' in self.fields:
del self.fields['password']
def clean(self):
super().clean()
if 'email' in self.data and self.data['email'].strip() == '':
self.add_error('email', 'Email cannot be empty.')
return self.cleaned_data
class CustomPasswordResetForm(PasswordResetForm):
def clean(self):
super().clean()
if self.errors == {}:
try:
self.user = User.objects.get(email=self.cleaned_data['email'])
except User.DoesNotExist:
self.add_error('email', 'Invalid email')
class RegisterForm(UserCreationForm):
class Meta:
model = User
fields = ("username", "email")
field_classes = {'username': UsernameField, 'email': EmailField}
def clean(self):
super().clean()
if 'email' in self.cleaned_data and self.cleaned_data['email'] == '':
self.add_error('email', 'Email cannot be empty')
return self.cleaned_data
|
StarcoderdataPython
|
1962709
|
<reponame>punitvara/daily-practice-DSA
import os
# directory = "Day "+i
parent_dir = "./"
# for i in range(100):
# os.makedirs()
for i in range(100):
directory = "Day "+str(i)
path = os.path.join(parent_dir, directory)
print (path)
if not os.path.exists(path):
os.mkdir(path)
|
StarcoderdataPython
|
5026721
|
import django.contrib.auth.urls
from django.urls import path, include
from . import views
urlpatterns = [
path(
'detail/<int:pk>',
views.ProjectDetailView.as_view(),
name='project_detail',
),
path(
'create/',
views.ProjectCreateView.as_view(),
name='project_create',
),
path(
'follow/<int:pk>',
views.follow_project,
name='project_follow',
),
path(
'unfollow/<int:pk>',
views.unfollow_project,
name='project_unfollow',
),
path(
'update/<int:pk>',
views.ProjectUpdateView.as_view(),
name='project_update',
),
path(
'rss_update/<int:pk>',
views.ProjectRSSUploadView.as_view(),
name='project_rss_upload',
),
path(
'list/user',
views.UserProjectListView.as_view(),
name='user_project_list',
),
path('list/following',
views.UserProjectsFollowedListView.as_view(),
name='user_project_following',
),
path('dictionary/list/<int:project_pk>',
views.ProjectDictionaryListView.as_view(),
name='project_dictionary_list',
),
]
|
StarcoderdataPython
|
1725100
|
from __future__ import absolute_import
import os
import sys
import numpy as np
from my_lib import Object
from my_lib import Object2
from my_lib import Object3
from third_party import lib1
from third_party import lib2
from third_party import lib3
from third_party import lib4
from third_party import lib5
from third_party import lib6
from third_party import lib7
from third_party import lib8
from third_party import lib9
from third_party import lib10
from third_party import lib11
from third_party import lib12
from third_party import lib13
from third_party import lib14
from third_party import lib15
print("Hey")
print("yo")
|
StarcoderdataPython
|
9764842
|
from __future__ import absolute_import, division, unicode_literals
locations = {
'urls_file': 'https://raw.githubusercontent.com/stephensolis/kameris-experiments/master/files.yml' # NOQA
}
|
StarcoderdataPython
|
375468
|
<filename>translate_description.py
#!/usr/bin/env python3
from json import dump, load
from os.path import isfile
from subprocess import run
import sys
class colors:
BOLD = "\033[1m"
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
END = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def save(obj, f):
f = open(f, "w")
dump(obj, f, sort_keys=True, indent=4)
def translate(translations, key, move):
temp_file = "/tmp/" + key
temp = open(temp_file, "w")
print("# Übersetzung oben eingeben.",
"# Ohne Eingabe wird die Übersetzung abgebrochen.",
"# ",
"# ================================================================================",
"# ID: %s" % key,
"# TITLE: %s" % move["name"],
"# CLASSES: %s" % ", ".join(move["classes"]) if "classes" in move else "All classes",
"# -- Zu Übersetzen ---------------------------------------------------------------",
"# %s" % move["description"].replace("\n", "\n# "),
"# --------------------------------------------------------------------------------",
"# ================================================================================",
sep="\n",
file=temp)
temp.flush()
temp.close()
proc = run(["$VISUAL %s" % temp_file], shell=True)
if proc.returncode == 0:
lines = []
temp = open(temp_file)
for line in temp.readlines():
if not line.startswith("#"):
line = line.strip()
print(line)
lines.append(line)
temp.close()
if len(lines) > 0 and lines[0] != "":
translations[key] = "\n".join(lines)
return True
return False
data_file = open("data.json", "r")
data = load(data_file)
translations = {}
if isfile("description_translations.json"):
in_file = open("description_translations.json", "r")
translations = load(in_file)
out_file = "description_translations.json"
# Translate basic moves
moves = data["basic_moves"]
for move in moves:
if not move["key"] in translations:
success = translate(translations, move["key"], move)
save(translations, out_file)
if not success:
sys.exit()
# Translate special moves
moves = data["special_moves"]
for move in moves:
if not move["key"] in translations:
success = translate(translations, move["key"], move)
save(translations, out_file)
if not success:
sys.exit()
save(translations, out_file)
# Reload moves to fix dublication bug
translations = {}
if isfile("description_translations.json"):
in_file = open("description_translations.json", "r")
translations = load(in_file)
# Translate all remaining moves
moves = data["moves"]
for key, move in moves.items():
if not key in translations:
success = translate(translations, key, move)
save(translations, out_file)
if not success:
sys.exit()
|
StarcoderdataPython
|
11379274
|
# coding:utf-8
import os
import json
from PIL import Image
from django.core.paginator import Paginator
from django.contrib import messages
from django.contrib.auth import login as auth_login
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.sites.models import get_current_site
from django.conf import settings as django_settings
from django.utils.http import is_safe_url
from django.shortcuts import render, redirect, get_object_or_404, resolve_url
from django.http import HttpResponseRedirect, HttpResponse
from django.template.response import TemplateResponse
from django.views.decorators.debug import sensitive_post_parameters
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from depotwork.decorators import ajax_required
from depotwork.core.forms import ProfileForm, ChangePasswordForm
from depotwork.feeds.views import feeds
from depotwork.feeds.models import Feed
from depotwork.feeds.views import FEEDS_NUM_PAGES
from depotwork.settings import REDIRECT_FIELD_NAME
def home(request):
if request.user.is_authenticated():
return feeds(request)
else:
return render(request, 'core/cover.html', {'next': request.REQUEST.get('next')})
@sensitive_post_parameters()
@csrf_protect
@never_cache
def login(request, template_name='registration/login.html',
redirect_field_name=REDIRECT_FIELD_NAME,
authentication_form=AuthenticationForm,
current_app=None, extra_context=None):
"""
Displays the login form and handles the login action.
"""
redirect_to = request.REQUEST.get(redirect_field_name, '')
if request.user.is_authenticated():
redirect_to = request.REQUEST.get(redirect_field_name, '')
if request.method == "POST":
form = authentication_form(request, data=request.POST)
if form.is_valid():
# Ensure the user-originating redirection url is safe.
if not is_safe_url(url=redirect_to, host=request.get_host()):
redirect_to = resolve_url(django_settings.LOGIN_REDIRECT_URL)
# Okay, security check complete. Log the user in.
auth_login(request, form.get_user())
return HttpResponseRedirect(redirect_to)
else:
form = authentication_form(request)
current_site = get_current_site(request)
context = {
'form': form,
redirect_field_name: redirect_to,
'site': current_site,
'site_name': current_site.name,
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context,
current_app=current_app)
@login_required
def settings(request):
return render(request, 'core/settings.html', )
@login_required
@ajax_required
def update_avatar(request):
response_data = []
if not request.user.is_authenticated():
return HttpResponse(json.dumps(response_data.append({'status': 'fail'})), content_type='application/json')
profile_pictures = django_settings.MEDIA_ROOT + '/profile_pictures/'
avatar_image_width = django_settings.AVATAR_IMAGE_WIDTH
if not os.path.exists(profile_pictures):
os.makedirs(profile_pictures)
f = request.FILES['avatar']
filename = profile_pictures + request.user.username + '.jpg'
with open(filename, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
im = Image.open(filename)
width, height = im.size
if width > avatar_image_width:
new_width = avatar_image_width
new_height = (height * avatar_image_width) / width
new_size = new_width, new_height
im.thumbnail(new_size, Image.ANTIALIAS)
im.save(filename)
update_info = {}
update_info['status'] = "OK"
update_info['url'] = django_settings.MEDIA_URL + '/profile_pictures/' + request.user.username + '.jpg'
update_info['message'] = u'上传成功'
response_data.append(update_info)
return HttpResponse(json.dumps(response_data), content_type="application/json")
@login_required
@ajax_required
def update_profile(request):
response_data = {}
# 读取数据写入数据库
return HttpResponse(json.dumps(response_data), content_type='application/json')
@login_required
def password(request):
user = request.user
if request.method == 'POST':
form = ChangePasswordForm(request.POST)
if form.is_valid():
new_password = form.cleaned_data.get('new_password')
user.set_password(<PASSWORD>)
user.save()
messages.add_message(request, messages.SUCCESS, 'Your password were successfully changed.')
else:
form = ChangePasswordForm(instance=user)
return render(request, 'core/password.html', {'form': form})
|
StarcoderdataPython
|
11221636
|
<reponame>Practical-Formal-Methods/clam-racetrack
#!/usr/bin/env python3.7
# Copyright 2019, Gurobi Optimization, LLC
# In this example we show the use of general constraints for modeling
# some common expressions. We use as an example a SAT-problem where we
# want to see if it is possible to satisfy at least four (or all) clauses
# of the logical for
#
# L = (x0 or ~x1 or x2) and (x1 or ~x2 or x3) and
# (x2 or ~x3 or x0) and (x3 or ~x0 or x1) and
# (~x0 or ~x1 or x2) and (~x1 or ~x2 or x3) and
# (~x2 or ~x3 or x0) and (~x3 or ~x0 or x1)
#
# We do this by introducing two variables for each literal (itself and its
# negated value), a variable for each clause, and then two
# variables for indicating if we can satisfy four, and another to identify
# the minimum of the clauses (so if it is one, we can satisfy all clauses)
# and put these two variables in the objective.
# i.e. the Objective function will be
#
# maximize Obj0 + Obj1
#
# Obj0 = MIN(Clause1, ... , Clause8)
# Obj1 = 1 -> Clause1 + ... + Clause8 >= 4
#
# thus, the objective value will be two if and only if we can satisfy all
# clauses; one if and only if at least four clauses can be satisfied, and
# zero otherwise.
import gurobipy as gp
from gurobipy import GRB
try:
NLITERALS = 4
n = NLITERALS
# Example data:
# e.g. {0, n+1, 2} means clause (x0 or ~x1 or x2)
Clauses = [[ 0, n+1, 2],
[ 1, n+2, 3],
[ 2, n+3, 0],
[ 3, n+0, 1],
[n+0, n+1, 2],
[n+1, n+2, 3],
[n+2, n+3, 0],
[n+3, n+0, 1]]
# Create a new model
model = gp.Model("Genconstr")
# initialize decision variables and objective
Lit = model.addVars(NLITERALS, vtype=GRB.BINARY, name="X")
NotLit = model.addVars(NLITERALS, vtype=GRB.BINARY, name="NotX")
Cla = model.addVars(len(Clauses), vtype=GRB.BINARY, name="Clause")
Obj0 = model.addVar(vtype=GRB.BINARY, name="Obj0")
Obj1 = model.addVar(vtype=GRB.BINARY, name="Obj1")
# Link Xi and notXi
model.addConstrs((Lit[i] + NotLit[i] == 1.0 for i in range(NLITERALS)),
name="CNSTR_X")
# Link clauses and literals
for i, c in enumerate(Clauses):
clause = []
for l in c:
if l >= n:
clause.append(NotLit[l-n])
else:
clause.append(Lit[l])
model.addConstr(Cla[i] == gp.or_(clause), "CNSTR_Clause" + str(i))
# Link objs with clauses
model.addConstr(Obj0 == gp.min_(Cla), name="CNSTR_Obj0")
model.addConstr((Obj1 == 1) >> (Cla.sum() >= 4.0), name="CNSTR_Obj1")
# Set optimization objective
model.setObjective(Obj0 + Obj1, GRB.MAXIMIZE)
# Save problem
model.write("genconstr.mps")
model.write("genconstr.lp")
# Optimize
model.optimize()
# Status checking
status = model.getAttr(GRB.Attr.Status)
if status in (GRB.INF_OR_UNBD, GRB.INFEASIBLE, GRB.UNBOUNDED):
print("The model cannot be solved because it is infeasible or "
"unbounded")
sys.exit(1)
if status != GRB.OPTIMAL:
print("Optimization was stopped with status ", status)
sys.exit(1)
# Print result
objval = model.getAttr(GRB.Attr.ObjVal)
if objval > 1.9:
print("Logical expression is satisfiable")
elif objval > 0.9:
print("At least four clauses can be satisfied")
else:
print("Not even three clauses can be satisfied")
except gp.GurobiError as e:
print('Error code ' + str(e.errno) + ": " + str(e))
except AttributeError:
print('Encountered an attribute error')
|
StarcoderdataPython
|
130005
|
<gh_stars>0
from datetime import datetime
import matplotlib.pyplot as plt
import pandas as pd
import pvlib
latitude = -23.313602
longitude = -46.221382
altitude = 655
tmz = 'America/Sao_Paulo'
temperatura = 20
pressao = 94000
agora = datetime.now()
dia = agora.today().day
mes = agora.today().month
ano = agora.today().year
hora = agora.today().hour
minuto = agora.today().minute
segundo = agora.today().second
data_inicio = datetime(ano, mes, dia, 0, 0, 0)
data_fim = datetime(ano, mes, dia, 23, 59, 59)
datas = pd.date_range(start=data_inicio, end=data_fim, freq='1Min', tz=tmz)
posicao_sol = pvlib.solarposition.get_solarposition(
datas,
latitude,
longitude,
altitude,
temperature=temperatura,
pressure=pressao
)
posicao_sol.plot()
dt = pd.DatetimeIndex([agora.isoformat()])
posicao_sol_atual = pvlib.solarposition.get_solarposition(
dt,
latitude,
longitude,
altitude,
temperature=temperatura,
pressure=pressao,
tz=tmz)
plt.scatter(posicao_sol_atual.index.values[0],
posicao_sol_atual['elevation'][0], linewidths=20, c='y')
plt.show()
|
StarcoderdataPython
|
1746097
|
import torch
import math
from Net import ActorCritic
from Utils import buffer
class PPOAgent():
def __init__(self, state_dim, action_dim, gamma, std, eps_clip, kepoch, lr, device):
self.gamma = gamma
self.std = std
self.eps_clip = eps_clip
self.kepoch = kepoch
self.device = device
self.declare_net(state_dim, action_dim)
self.opt = torch.optim.Adam(self.net.parameters(), lr=lr)
self.loss = torch.nn.MSELoss()
self.buffer = buffer()
def declare_net(self, state_dim, action_dim):
self.net = ActorCritic(state_dim, action_dim, self.std)
self.old_net = ActorCritic(state_dim, action_dim, self.std)
self.old_net.load_state_dict(self.net.state_dict())
@torch.no_grad()
def choose_action(self, state):
state = torch.FloatTensor(state).view(1, -1).to(self.device)
mu, cov_mat, _ = self.old_net(state)
cov_mat = cov_mat.to(self.device)
dist = self.old_net.dist(mu, cov_mat)
action = dist.sample()
log_prob = dist.log_prob(action)
self.buffer.bs.append(state)
self.buffer.ba.append(action)
self.buffer.blogp.append(log_prob)
return action.cpu().data.detach().flatten()
def act(self, old_state, old_action):
mu, cov_mat, state_value = self.net(old_state)
cov_mat = cov_mat.to(self.device)
dist = self.net.dist(mu, cov_mat)
action_prob = dist.log_prob(old_action)
entropy = dist.entropy()
return action_prob, state_value, entropy
def learn(self):
self.net.train()
bs, ba, br, blogp, bd = self.buffer.get_atr()
# Monte Carlo estimate of rewards:
rewards = []
discounted_reward = 0
for reward, is_terminal in zip(reversed(br), reversed(bd)):
if is_terminal:
discounted_reward = 0
discounted_reward = reward + (self.gamma * discounted_reward)
rewards.insert(0, discounted_reward)
# Normalizing the rewards:
rewards = torch.tensor(rewards, dtype=torch.float32).to(self.device).view((-1,1))
rewards = (rewards - rewards.mean()) / (rewards.std() + 1e-5)
old_state = torch.stack(bs).to(self.device).detach()
old_action = torch.stack(ba).to(self.device).detach()
old_logp = torch.stack(blogp).to(self.device).detach()
for e in range(self.kepoch):
new_logp, state_value, entropy = self.act(old_state, old_action)
state_value = state_value.squeeze(2)
ratio = torch.exp(new_logp - old_logp.detach())
advs = rewards - state_value.detach()
surr1 = ratio * advs
surr2 = torch.clamp(ratio, 1 - self.eps_clip, 1 + self.eps_clip)
loss = -torch.min(surr1, surr2) + 0.5 * self.loss(state_value, state_value) - 0.01 * entropy
self.opt.zero_grad()
loss.mean().backward()
self.opt.step()
self.old_net.load_state_dict(self.net.state_dict())
|
StarcoderdataPython
|
236041
|
<reponame>RetailMeNotSandbox/dart
from dart.model.base import BaseModel, dictable
@dictable
class ApiKey(BaseModel):
def __init__(self, id, user_id, api_key, api_secret):
"""
:type user_id: str
:type api_key: str
:type api_secret: str
"""
self.id = id
self.user_id = user_id
self.api_key = api_key
self.api_secret = api_secret
|
StarcoderdataPython
|
4826556
|
#! /usr/bin/env python3
# We need this to define our package
from setuptools import setup
# We use this to find and deploy our unittests
import unittest
import os
# We need to know the version to backfill some dependencies
from sys import version_info, exit
# Define our list of installation dependencies
DEPENDS = ["pyjwt", "snowflake-connector-python", "furl", "cryptography"]
# If we're at version less than 3.4 - fail
if version_info[0] < 3 or version_info[1] < 4:
exit("Unsupported version of Python. Minimum version for the Ingest SDK is 3.4")
# If we're at version 3.4, backfill the typing library
elif version_info[1] == 4:
DEPENDS.append("typing")
# Python 3.5.0 and 3.5.1 have incompatible typing modules. Use typing_extensions instead.
elif version_info[1] == 5 and version_info[2] < 2:
DEPENDS.append("typing_extensions")
here = os.path.abspath(os.path.dirname(__file__))
def test_suite():
"""
Defines the test suite for the snowflake ingest SDK
"""
loader = unittest.TestLoader()
return loader.discover("tests", pattern="test_*.py")
about = {}
with open(os.path.join(here, 'snowflake', 'ingest', 'version.py'),
mode='r', encoding='utf-8') as f:
exec(f.read(), about)
__version__ = about['__version__']
if 'SF_BUILD_NUMBER' in os.environ:
__version__ += ('.' + str(os.environ['SF_BUILD_NUMBER']))
with open(os.path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='snowflake_ingest',
version=__version__,
description='Official SnowflakeDB File Ingest SDK',
long_description=long_description,
author='<NAME>',
author_email='<EMAIL>',
url='https://www.snowflake.net',
packages=['snowflake.ingest',
'snowflake.ingest.utils'],
license='Apache',
keywords="snowflake ingest sdk copy loading",
package_data={
'snowflake.ingest':['*.rst', 'LICENSE']
},
# From here we describe the package classifiers
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Topic :: Database"
],
# Now we describe the dependencies
install_requires=DEPENDS,
# At last we set the test suite
test_suite="setup.test_suite"
)
|
StarcoderdataPython
|
251974
|
import pandas as pd
import numpy as np
import pylab as plt
import seaborn as sns
from sklearn import neighbors
from scipy.cluster import hierarchy
from scipy.spatial import distance
from scipy.spatial.distance import squareform,pdist
def one_nn_class_baseline(X,labels):
''' given a pointcloud X and labels, compute the classification accuracy of
1NN-classifier
'''
one_nn = neighbors.kneighbors_graph(X,2)
inds = np.zeros(len(X),dtype=int)
for i in range(len(X)):
inds[i] = [ind for ind in one_nn[i].indices if ind != i][0]
preds = Y[inds]
return 1.0*sum(preds==Y) / len(Y)
def one_nn_baseline(X,Y):
''' given two clouds of corresponding points, X and Y, report the fraction
of nearest-neighbors preserved.
Algorithm:
- each pair of corresponding points is labeled by an index i
- for each point in X, find its nearest neighbor, labeled j
- for the corresponding point in Y, find its nearest neighbor, labeled j'
- if j==j', then the nearest neighbors of i are preserved
- return number of preserved neighbors / the total number possible'''
# 2, since self is counted as a neighbor by the neighbors module
one_nn_X = neighbors.kneighbors_graph(X,2)
one_nn_Y = neighbors.kneighbors_graph(Y,2)
sames = 0
for i in range(len(X)):
neighbor_X = one_nn_X[i].indices[one_nn_X[i].indices!=i][0]
neighbor_Y = one_nn_Y[i].indices[one_nn_Y[i].indices!=i][0]
if neighbor_X == neighbor_Y:
sames+=1
same_frac = 1.0*sames / len(X)
return same_frac
def knn_baseline(X,Y,k=5):
''' generalization of the one_nn_baseline algorithm...
given two clouds of corresponding points, X and Y, and a parameter k,
compute the fraction of each k-nearest-neighborhood conserved
return the overall fraction of neighborhoods preserved, as well as the
fraction of each local neighborhood preserved
'''
k = k+1 # since self is counted as a neighbor in the kneighbors graph
knn_X = neighbors.kneighbors_graph(X,k)
knn_Y = neighbors.kneighbors_graph(Y,k)
sames = np.zeros(len(X))
for i in range(len(X)):
neighbors_X = set(knn_X[i].indices[knn_X[i].indices!=i])
neighbors_Y = set(knn_Y[i].indices[knn_Y[i].indices!=i])
sames[i] = len(neighbors_X.intersection(neighbors_Y))
same_frac = 1.0*sum(sames) / (len(X)*(k-1))
return same_frac, sames
def knn_baseline_curve(X,Y,ks=range(1,50)):
''' slightly less wasteful way to sweep over a range of ks
when computing knn_baseline, if computing the neighbors graph is expensive'''
max_k = max(ks)+1 # since self is counted as a neighbor in the kneighbors graph
knn_X = neighbors.kneighbors_graph(X,max_k)
knn_Y = neighbors.kneighbors_graph(Y,max_k)
sames = np.zeros(len(ks))
for ind,k in enumerate(ks):
for i in range(len(X)):
neighbors_X = set(knn_X[i].indices[knn_X[i].indices!=i][:k])
neighbors_Y = set(knn_Y[i].indices[knn_Y[i].indices!=i][:k])
sames[ind] += len(neighbors_X.intersection(neighbors_Y))
sames[ind] /= (len(X)*(k))
return sames
def plot_1nn_classification_comparison():
fig, ax = pl.subplots()
barlist = ax.bar(range(len(vec)),vec)
pl.hlines(one_nn_class_baseline(X,Y),0,len(vec),linestyles='--')
pl.xlabel('Algorithm')
pl.ylabel('1NN Classification Accuracy')
pl.title('1NN Classification in Low-Dimensional Embeddings')
baseline_names = ['PCA','Isomap','LLE']
pl.xticks(range(len(vec)), baseline_names + method_names,rotation=30)
#pl.ylim(0.25,1.0)
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., height-0.075, '{0:.2f}'.format(height),
ha='center', va='bottom',color='white')
autolabel(barlist)
for i in range(len(baseline_names)):
barlist[i].set_color('gray')
for i in range(len(baseline_names),len(vec)):
barlist[i].set_color('blue')
pl.savefig('../figures/embedding-comparison.pdf')
#def plot_neighborhood_preservation()
# metrics of cluster preservation
linkages = ['single','complete','ward','average','weighted','centroid','median']
def pairwise_cophenetic_distances(X,linkage='single'):
return hierarchy.cophenet(hierarchy.linkage(X,linkage))
def cophenetic_distance_preservation(orig,embedding,linkage='single'):
orig_d = pairwise_cophenetic_distances(orig,linkage)
embed_d = pairwise_cophenetic_distances(embedding,linkage)
return spearmanr(orig_d,embed_d)
|
StarcoderdataPython
|
103620
|
# -*- Mode:Python;indent-tabs-mode:nil; -*-
#
# File: psaExceptions.py
# Created: 05/09/2014
# Author: BSC
#
# Description:
# Custom execption class to manage error in the PSC
#
class psaExceptions( object ):
class confRetrievalFailed( Exception ):
pass
|
StarcoderdataPython
|
9684143
|
from __future__ import absolute_import, division, print_function
import datetime
import os
import gzip
import os
import json
from contextlib import contextmanager
import numpy as np
from odo.backends.json import json_dumps
from odo.utils import tmpfile, ignoring
from odo import odo, discover, JSONLines, resource, JSON, convert, append, drop
from odo.temp import Temp, _Temp
from datashape import dshape
@contextmanager
def json_file(data):
with tmpfile('.json') as fn:
with open(fn, 'w') as f:
json.dump(data, f, default=json_dumps)
yield fn
@contextmanager
def jsonlines_file(data):
with tmpfile('.json') as fn:
with open(fn, 'w') as f:
for item in data:
json.dump(item, f, default=json_dumps)
f.write('\n')
yield fn
dat = [{'name': 'Alice', 'amount': 100},
{'name': 'Bob', 'amount': 200}]
def test_discover_json():
with json_file(dat) as fn:
j = JSON(fn)
assert discover(j) == discover(dat)
def test_discover_jsonlines():
with jsonlines_file(dat) as fn:
j = JSONLines(fn)
assert discover(j) == discover(dat)
def test_discover_json_only_includes_datetimes_not_dates():
data = [{'name': 'Alice', 'dt': datetime.date(2002, 2, 2)},
{'name': 'Bob', 'dt': datetime.date(2000, 1, 1)}]
with json_file(data) as fn:
j = JSON(fn)
assert discover(j) == dshape('2 * {dt: datetime, name: string }')
def test_resource():
with tmpfile('json') as fn:
assert isinstance(resource('jsonlines://' + fn), JSONLines)
assert isinstance(resource('json://' + fn), JSON)
assert isinstance(resource(fn, expected_dshape=dshape('var * {a: int}')),
JSONLines)
def test_resource_guessing():
with json_file(dat) as fn:
assert isinstance(resource(fn), JSON)
with jsonlines_file(dat) as fn:
assert isinstance(resource(fn), JSONLines)
def test_append_jsonlines():
with tmpfile('json') as fn:
j = JSONLines(fn)
append(j, dat)
with open(j.path) as f:
lines = f.readlines()
assert len(lines) == 2
assert 'Alice' in lines[0]
assert 'Bob' in lines[1]
def test_append_json():
with tmpfile('json') as fn:
j = JSON(fn)
append(j, dat)
with open(j.path) as f:
lines = f.readlines()
assert len(lines) == 1
assert 'Alice' in lines[0]
assert 'Bob' in lines[0]
def test_convert_json_list():
with json_file(dat) as fn:
j = JSON(fn)
assert convert(list, j) == dat
def test_convert_jsonlines():
with jsonlines_file(dat) as fn:
j = JSONLines(fn)
assert convert(list, j) == dat
def test_tuples_to_json():
ds = dshape('var * {a: int, b: int}')
with tmpfile('json') as fn:
j = JSON(fn)
append(j, [(1, 2), (10, 20)], dshape=ds)
with open(fn) as f:
assert '"a": 1' in f.read()
with tmpfile('json') as fn:
j = JSONLines(fn)
append(j, [(1, 2), (10, 20)], dshape=ds)
with open(fn) as f:
assert '"a": 1' in f.read()
def test_datetimes():
from odo import into
import numpy as np
data = [{'a': 1, 'dt': datetime.datetime(2001, 1, 1)},
{'a': 2, 'dt': datetime.datetime(2002, 2, 2)}]
with tmpfile('json') as fn:
j = JSONLines(fn)
append(j, data)
assert str(into(np.ndarray, j)) == str(into(np.ndarray, data))
def test_json_encoder():
result = json.dumps([1, datetime.datetime(2000, 1, 1, 12, 30, 0)],
default=json_dumps)
assert result == '[1, "2000-01-01T12:30:00Z"]'
assert json.loads(result) == [1, "2000-01-01T12:30:00Z"]
def test_empty_line():
text = '{"a": 1}\n{"a": 2}\n\n' # extra endline
with tmpfile('.json') as fn:
with open(fn, 'w') as f:
f.write(text)
j = JSONLines(fn)
assert len(convert(list, j)) == 2
def test_multiple_jsonlines():
a, b = '_test_a1.json', '_test_a2.json'
try:
with ignoring(OSError):
os.remove(a)
with ignoring(OSError):
os.remove(b)
with open(a, 'w') as f:
json.dump(dat, f)
with open(b'_test_a2.json', 'w') as f:
json.dump(dat, f)
r = resource('_test_a*.json')
result = convert(list, r)
assert len(result) == len(dat) * 2
finally:
with ignoring(OSError):
os.remove(a)
with ignoring(OSError):
os.remove(b)
def test_read_gzip_lines():
with tmpfile('json.gz') as fn:
f = gzip.open(fn, 'wb')
for item in dat:
s = json.dumps(item).encode('utf-8')
f.write(s)
f.write(b'\n')
f.close()
js = JSONLines(fn)
assert convert(list, js) == dat
def test_read_gzip():
with tmpfile('json.gz') as fn:
f = gzip.open(fn, 'wb')
s = json.dumps(dat).encode('utf-8')
f.write(s)
f.close()
js = JSON(fn)
assert convert(list, js) == dat
def test_write_gzip_lines():
with tmpfile('json.gz') as fn:
j = JSONLines(fn)
append(j, dat)
f = gzip.open(fn)
line = next(f)
f.close()
assert line.decode('utf-8').strip() == str(json.dumps(dat[0]))
def test_write_gzip():
with tmpfile('json.gz') as fn:
j = JSON(fn)
append(j, dat)
f = gzip.open(fn)
text = f.read()
f.close()
assert text.decode('utf-8').strip() == str(json.dumps(dat))
assert isinstance(resource(fn), (JSON, JSONLines))
def test_resource_gzip():
with tmpfile('json.gz') as fn:
assert isinstance(resource(fn), (JSON, JSONLines))
assert isinstance(resource('json://' + fn), (JSON, JSONLines))
assert isinstance(resource('jsonlines://' + fn), (JSON, JSONLines))
with tmpfile('jsonlines.gz'):
assert isinstance(resource('jsonlines://' + fn), (JSON, JSONLines))
def test_convert_to_temp_json():
js = convert(Temp(JSON), [1, 2, 3])
assert isinstance(js, JSON)
assert isinstance(js, _Temp)
assert convert(list, js) == [1, 2, 3]
def test_drop():
with tmpfile('json') as fn:
js = JSON(fn)
append(js, [1, 2, 3])
assert os.path.exists(fn)
drop(js)
assert not os.path.exists(fn)
def test_missing_to_csv():
data = [dict(a=1, b=2), dict(a=2, c=4)]
with tmpfile('.json') as fn:
js = JSON(fn)
js = odo(data, js)
with tmpfile('.csv') as csvf:
csv = odo(js, csvf)
with open(csv.path, 'rt') as f:
result = f.read()
expected = 'a,b,c\n1,2.0,\n2,,4.0\n'
assert result == expected
|
StarcoderdataPython
|
1753503
|
<filename>modeling_script.py<gh_stars>0
import os
import glob
import pandas as pd
import argparse
def args():
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--folder", required=True,
help="Enter into the folder")
parser.add_argument("-omega", "--omega_path", required=True,
help="Enter the OMEGA path")
parser.add_argument("-rocs", "--rocs_path", required=True,
help="Enter the ROCS path")
parser.add_argument("-temp_lig", "--template_ligand_path", required=True,
help="Enter the template ligand PDB path")
parser.add_argument("-mol2params", required=True,
help="Enter the template ligand PDB path")
return parser.parse_args()
def conf_gen(smi, maxconfs=2000):
smi_prefix = os.path.splitext(os.path.basename(smi))[0]
cmd = f'{OMEGA} -in {smi} -out OMEGA/{smi_prefix}_omega.sdf \
-prefix OMEGA/{smi_prefix}_omega -warts true \
-maxconfs {maxconfs} -strict false'
os.system(cmd)
# ligand alignment using ROCS openeye
def lig_alignment(conformer, template_database, rocs_maxconfs_output=100):
sdf_prefix = os.path.basename(os.path.splitext(conformer)[0]).split('_')[0]
for template in template_database:
template_id = "_".join(os.path.basename(template).split("_")[0:3])
cmd = f'{ROCS} -dbase {conformer} -query {template} \
-prefix {sdf_prefix}_{template_id}_rocs -oformat mol2 \
-maxconfs {rocs_maxconfs_output} -outputquery false \
-qconflabel title -outputdir ROCS/'
os.system(cmd)
# Combine each report file into single file
def combine_report_files(report_file):
data = []
for rpt in report_file:
target_template_name = os.path.basename(rpt).replace("_1.rpt", "")
rpt = pd.read_csv(rpt, sep='\t')
rpt = rpt.loc[:, ~rpt.columns.str.match('Unnamed')]
rpt["ShapeQuery"] = target_template_name
data.append(rpt)
data = pd.concat(data)
data = data.sort_values(by=['TanimotoCombo'], ascending=False)
data["Rank"] = range(1, data.shape[0]+1)
data.to_csv('ROCS/single_report_file_sorted.csv', index=False)
data_100 = data.iloc[:100, :][["Name", "ShapeQuery"]]
data_100.to_csv("ROCS/top_100.txt", index=False)
# Seperate the top 100 conformer hits from ROCS alignment sdf files
def sep_hits_from_rocs_sdf_file(top_100_hits_txt_path):
def extract_mol2_conf(file, conf_labels):
with open(f"ROCS/{file}_hits_1.mol2", "r") as f:
f = f.read().split("@<TRIPOS>MOLECULE\n")
f = {v.split("\n")[0]:"@<TRIPOS>MOLECULE\n" + v for v in f}
for label in conf_labels:
with open(f"top_100_conf/{label}_{file}_hits.mol2", "w") as fwr:
fwr.write(f[label])
fwr.close()
data = pd.read_csv(top_100_hits_txt_path)
data = data.groupby("ShapeQuery")["Name"].apply(list).to_dict()
for template, conformers in data.items():
extract_mol2_conf(template, conformers)
# Convert SDF file to PDB/PARAMS for Rosetta input
def sdftomol2(mol2params, top_hits_sdf_path):
for file in top_hits_sdf_path:
prefix = file.split("/")[-1].split(".")[0]
cmd = f'python {mol2params} -s {file} --prefix=mol2params/{prefix}'
os.system(cmd)
if __name__ == "__main__":
args = args()
OMEGA = args.omega_path
ROCS = args.rocs_path
MOL2PARAMS = args.mol2params
template_lig_library = args.template_ligand_path
template_lig_library = glob.glob(f"{template_lig_library}/*.pdb")
os.chdir(args.folder)
os.mkdir("OMEGA")
os.mkdir("ROCS")
os.mkdir("top_100_conf")
os.mkdir("mol2params")
smiles = glob.glob("*.smi")[0]
conf_gen(smiles, maxconfs=1000)
sdf = glob.glob("OMEGA/*.sdf")[0]
lig_alignment(sdf, template_lig_library, rocs_maxconfs_output=30)
combine_report_files(glob.glob("ROCS/*.rpt"))
sep_hits_from_rocs_sdf_file("ROCS/top_100.txt")
sdftomol2(MOL2PARAMS, glob.glob("top_100_conf/*.mol2"))
|
StarcoderdataPython
|
1808761
|
<reponame>seigot/tetris_game_tutorial
#for x in [1,2,3,4]:
print("for x in [1,2,3,4]:")
for x in [1,2,3,4]:
print (x)
|
StarcoderdataPython
|
1705076
|
<reponame>Rey092/SwipeApp
from django.contrib import admin
from src.estate.models import Advertisement, Complex
from src.users.models import Contact
admin.site.register(Advertisement)
admin.site.register(Complex)
|
StarcoderdataPython
|
1605135
|
from ... import UP, DOWN, LEFT, RIGHT, UP_2, DOWN_2, LEFT_2, RIGHT_2
class Movable:
move_up = UP
move_up_alt = UP_2
move_down = DOWN
move_down_alt = DOWN_2
move_left = LEFT
move_left_alt = LEFT_2
move_right = RIGHT
move_right_alt = RIGHT_2
lr_step = 1
ud_step = 1
wrap_height = None
wrap_width = None
bounded = False
def on_press(self, key):
top, left = self.top, self.left
height, width = self.height, self.width
bounded = self.bounded
if key == self.move_up or key == self.move_up_alt:
if not bounded or top > 0:
self.top -= self.ud_step
elif key == self.move_down or key == self.move_down_alt:
if not bounded or top + height < self.parent.height:
self.top += self.ud_step
elif key == self.move_left or key == self.move_left_alt:
if not bounded or left > 0:
self.left -= self.lr_step
elif key == self.move_right or key == self.move_right_alt:
if not bounded or left + width < self.parent.width:
self.left += self.lr_step
else:
return super().on_press(key)
if self.wrap_height:
self.top %= self.wrap_height
if self.wrap_width:
self.left %= self.wrap_width
return True
|
StarcoderdataPython
|
11329269
|
<gh_stars>0
a, b, c = input().split()
a, b, c = int(a), int(b), int(c)
divisible = 0
for i in range(a, b+1):
if i % c == 0:
divisible += 1
print(divisible)
|
StarcoderdataPython
|
214660
|
import re
import os
import pathlib
from xml.dom import minidom
from xml.sax.saxutils import escape
from xml.parsers.expat import ExpatError
class Transcribe():
COMMENT = '<!--TEMPLATE-->'
SECTION_START = '<div style="border: 1px solid #EBECF0; margin-bottom: 10px;">'
SECTION_END = '</div>'
RECORDED_HEADERS = ['content-type', 'cookie']
def __init__(self):
self._filename = './CTERA_' + str(os.getpid()) + '.html'
def transcribe(self, request, response=None):
self._append_content(self._get_content(request, response))
@classmethod
def _get_content(cls, request, response):
return ''.join(
[
Transcribe.SECTION_START,
cls._get_request_content(request),
cls._get_response_content(response) if response else '',
Transcribe.SECTION_END
]
)
@classmethod
def _get_request_content(cls, request):
request_content_array = [
'<p class="h4">' + request.method + ' ' + cls._parse_url(request.url) + '</p>'
]
# Request headers:
headers = cls._get_headers_array(request.headers)
if len(headers) > 0:
request_content_array.append("<br/><p class=\"h6\">Request headers:</p>")
for key, value in headers:
request_content_array.append('<p>' + key + ': ' + value + '</p>')
# Request body:
if request.body:
request_content_array.append("<br/><p class=\"h6\">Request body:</p>")
request_content_array.append('<pre>' + cls._prettify(request.body.decode('utf-8')) + '</pre>')
return ''.join(request_content_array)
@staticmethod
def _parse_url(url):
uri = url[re.search("https?://[^/]*", url).end():]
return uri[:-1] if uri.endswith('?') else uri
@classmethod
def _get_headers_array(cls, request_headers):
return [
(key, value) for key, value in request_headers.items() if key.lower() in cls.RECORDED_HEADERS
]
@classmethod
def _get_response_content(cls, response):
response_content_array = []
reply = response.read()
if reply:
response_content_array.append("<br/><p class=\"h6\">Response body:</p>")
response_content_array.append('<pre>' + cls._prettify(reply) + '</pre>')
return ''.join(response_content_array)
def _append_content(self, content):
header, footer = self._get_header_footer()
with open(self._filename, 'w', encoding='utf-8') as f:
f.write(header + content + Transcribe.COMMENT + footer)
def _get_header_footer(self):
filename = self._filename if os.path.exists(self._filename) else \
os.path.join(pathlib.Path(__file__).parent.absolute(), 'apidoc.template')
with open(filename, 'r', encoding='utf-8') as f:
content = f.read()
array = content.split(Transcribe.COMMENT)
return array[0], array[1]
@staticmethod
def _prettify(data):
try:
data = minidom.parseString(data).toprettyxml(indent=" ")
data = ''.join(data.split('\n', 1)[1:])
data = escape(data)
return data
except ExpatError:
return data
def transcribe(request, response=None):
Transcribe().transcribe(request, response)
|
StarcoderdataPython
|
1731284
|
from Messenger import Messenger
msg = Messenger()
msg.send_error_message('Error message 301')
|
StarcoderdataPython
|
11316169
|
def getfield(glyph, key):
import re
nwl = re.compile('\r?\n')
field = re.compile(r'^\s*([\w\-]+)\s*:\s*(.+?)\s*$')
rawlist = nwl.split(glyph.comment)
fields = {}
for line in rawlist:
(name, value) = field.match(line).group(1, 2)
fields[name] = value
return fields[key]
|
StarcoderdataPython
|
292213
|
<filename>telethon/tl/types/messages.py<gh_stars>0
"""File generated by TLObjects' generator. All changes will be ERASED"""
from ...tl.tlobject import TLObject
from typing import Optional, List, Union, TYPE_CHECKING
import os
import struct
if TYPE_CHECKING:
from ...tl.types import TypeEncryptedFile, TypeChat, TypeStickerPack, TypeBotInlineResult, TypeFoundGif, TypeDocument, TypeInlineBotSwitchPM, TypeUser, TypeStickerSetCovered, TypeChatFull, TypeDialog, TypeStickerSet, TypeHighScore, TypeMessage
from ...tl.types.updates import TypeState
class AffectedHistory(TLObject):
CONSTRUCTOR_ID = 0xb45c69d1
SUBCLASS_OF_ID = 0x2c49c116
def __init__(self, pts, pts_count, offset):
"""
:param int pts:
:param int pts_count:
:param int offset:
Constructor for messages.AffectedHistory: Instance of AffectedHistory.
"""
self.pts = pts # type: int
self.pts_count = pts_count # type: int
self.offset = offset # type: int
def to_dict(self):
return {
'_': 'AffectedHistory',
'pts': self.pts,
'pts_count': self.pts_count,
'offset': self.offset
}
def __bytes__(self):
return b''.join((
b'\xd1i\\\xb4',
struct.pack('<i', self.pts),
struct.pack('<i', self.pts_count),
struct.pack('<i', self.offset),
))
@classmethod
def from_reader(cls, reader):
_pts = reader.read_int()
_pts_count = reader.read_int()
_offset = reader.read_int()
return cls(pts=_pts, pts_count=_pts_count, offset=_offset)
class AffectedMessages(TLObject):
CONSTRUCTOR_ID = 0x84d19185
SUBCLASS_OF_ID = 0xced3c06e
def __init__(self, pts, pts_count):
"""
:param int pts:
:param int pts_count:
Constructor for messages.AffectedMessages: Instance of AffectedMessages.
"""
self.pts = pts # type: int
self.pts_count = pts_count # type: int
def to_dict(self):
return {
'_': 'AffectedMessages',
'pts': self.pts,
'pts_count': self.pts_count
}
def __bytes__(self):
return b''.join((
b'\x85\x91\xd1\x84',
struct.pack('<i', self.pts),
struct.pack('<i', self.pts_count),
))
@classmethod
def from_reader(cls, reader):
_pts = reader.read_int()
_pts_count = reader.read_int()
return cls(pts=_pts, pts_count=_pts_count)
class AllStickers(TLObject):
CONSTRUCTOR_ID = 0xedfd405f
SUBCLASS_OF_ID = 0x45834829
def __init__(self, hash, sets):
"""
:param int hash:
:param List[TypeStickerSet] sets:
Constructor for messages.AllStickers: Instance of either AllStickersNotModified, AllStickers.
"""
self.hash = hash # type: int
self.sets = sets # type: List[TypeStickerSet]
def to_dict(self):
return {
'_': 'AllStickers',
'hash': self.hash,
'sets': [] if self.sets is None else [None if x is None else x.to_dict() for x in self.sets]
}
def __bytes__(self):
return b''.join((
b'_@\xfd\xed',
struct.pack('<i', self.hash),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.sets)),b''.join(bytes(x) for x in self.sets),
))
@classmethod
def from_reader(cls, reader):
_hash = reader.read_int()
reader.read_int()
_sets = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_sets.append(_x)
return cls(hash=_hash, sets=_sets)
class AllStickersNotModified(TLObject):
CONSTRUCTOR_ID = 0xe86602c3
SUBCLASS_OF_ID = 0x45834829
def to_dict(self):
return {
'_': 'AllStickersNotModified'
}
def __bytes__(self):
return b''.join((
b'\xc3\x02f\xe8',
))
@classmethod
def from_reader(cls, reader):
return cls()
class ArchivedStickers(TLObject):
CONSTRUCTOR_ID = 0x4fcba9c8
SUBCLASS_OF_ID = 0x7296d771
def __init__(self, count, sets):
"""
:param int count:
:param List[TypeStickerSetCovered] sets:
Constructor for messages.ArchivedStickers: Instance of ArchivedStickers.
"""
self.count = count # type: int
self.sets = sets # type: List[TypeStickerSetCovered]
def to_dict(self):
return {
'_': 'ArchivedStickers',
'count': self.count,
'sets': [] if self.sets is None else [None if x is None else x.to_dict() for x in self.sets]
}
def __bytes__(self):
return b''.join((
b'\xc8\xa9\xcbO',
struct.pack('<i', self.count),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.sets)),b''.join(bytes(x) for x in self.sets),
))
@classmethod
def from_reader(cls, reader):
_count = reader.read_int()
reader.read_int()
_sets = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_sets.append(_x)
return cls(count=_count, sets=_sets)
class BotCallbackAnswer(TLObject):
CONSTRUCTOR_ID = 0x36585ea4
SUBCLASS_OF_ID = 0x6c4dd18c
def __init__(self, cache_time, alert=None, has_url=None, native_ui=None, message=None, url=None):
"""
:param int cache_time:
:param Optional[bool] alert:
:param Optional[bool] has_url:
:param Optional[bool] native_ui:
:param Optional[str] message:
:param Optional[str] url:
Constructor for messages.BotCallbackAnswer: Instance of BotCallbackAnswer.
"""
self.cache_time = cache_time # type: int
self.alert = alert # type: Optional[bool]
self.has_url = has_url # type: Optional[bool]
self.native_ui = native_ui # type: Optional[bool]
self.message = message # type: Optional[str]
self.url = url # type: Optional[str]
def to_dict(self):
return {
'_': 'BotCallbackAnswer',
'cache_time': self.cache_time,
'alert': self.alert,
'has_url': self.has_url,
'native_ui': self.native_ui,
'message': self.message,
'url': self.url
}
def __bytes__(self):
return b''.join((
b'\xa4^X6',
struct.pack('<I', (0 if self.alert is None or self.alert is False else 2) | (0 if self.has_url is None or self.has_url is False else 8) | (0 if self.native_ui is None or self.native_ui is False else 16) | (0 if self.message is None or self.message is False else 1) | (0 if self.url is None or self.url is False else 4)),
b'' if self.message is None or self.message is False else (self.serialize_bytes(self.message)),
b'' if self.url is None or self.url is False else (self.serialize_bytes(self.url)),
struct.pack('<i', self.cache_time),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_alert = bool(flags & 2)
_has_url = bool(flags & 8)
_native_ui = bool(flags & 16)
if flags & 1:
_message = reader.tgread_string()
else:
_message = None
if flags & 4:
_url = reader.tgread_string()
else:
_url = None
_cache_time = reader.read_int()
return cls(cache_time=_cache_time, alert=_alert, has_url=_has_url, native_ui=_native_ui, message=_message, url=_url)
class BotResults(TLObject):
CONSTRUCTOR_ID = 0x947ca848
SUBCLASS_OF_ID = 0x3ed4d9c9
def __init__(self, query_id, results, cache_time, users, gallery=None, next_offset=None, switch_pm=None):
"""
:param int query_id:
:param List[TypeBotInlineResult] results:
:param int cache_time:
:param List[TypeUser] users:
:param Optional[bool] gallery:
:param Optional[str] next_offset:
:param Optional[TypeInlineBotSwitchPM] switch_pm:
Constructor for messages.BotResults: Instance of BotResults.
"""
self.query_id = query_id # type: int
self.results = results # type: List[TypeBotInlineResult]
self.cache_time = cache_time # type: int
self.users = users # type: List[TypeUser]
self.gallery = gallery # type: Optional[bool]
self.next_offset = next_offset # type: Optional[str]
self.switch_pm = switch_pm # type: Optional[TypeInlineBotSwitchPM]
def to_dict(self):
return {
'_': 'BotResults',
'query_id': self.query_id,
'results': [] if self.results is None else [None if x is None else x.to_dict() for x in self.results],
'cache_time': self.cache_time,
'users': [] if self.users is None else [None if x is None else x.to_dict() for x in self.users],
'gallery': self.gallery,
'next_offset': self.next_offset,
'switch_pm': None if self.switch_pm is None else self.switch_pm.to_dict()
}
def __bytes__(self):
return b''.join((
b'H\xa8|\x94',
struct.pack('<I', (0 if self.gallery is None or self.gallery is False else 1) | (0 if self.next_offset is None or self.next_offset is False else 2) | (0 if self.switch_pm is None or self.switch_pm is False else 4)),
struct.pack('<q', self.query_id),
b'' if self.next_offset is None or self.next_offset is False else (self.serialize_bytes(self.next_offset)),
b'' if self.switch_pm is None or self.switch_pm is False else (bytes(self.switch_pm)),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.results)),b''.join(bytes(x) for x in self.results),
struct.pack('<i', self.cache_time),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.users)),b''.join(bytes(x) for x in self.users),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_gallery = bool(flags & 1)
_query_id = reader.read_long()
if flags & 2:
_next_offset = reader.tgread_string()
else:
_next_offset = None
if flags & 4:
_switch_pm = reader.tgread_object()
else:
_switch_pm = None
reader.read_int()
_results = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_results.append(_x)
_cache_time = reader.read_int()
reader.read_int()
_users = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_users.append(_x)
return cls(query_id=_query_id, results=_results, cache_time=_cache_time, users=_users, gallery=_gallery, next_offset=_next_offset, switch_pm=_switch_pm)
class ChannelMessages(TLObject):
CONSTRUCTOR_ID = 0x99262e37
SUBCLASS_OF_ID = 0xd4b40b5e
def __init__(self, pts, count, messages, chats, users):
"""
:param int pts:
:param int count:
:param List[TypeMessage] messages:
:param List[TypeChat] chats:
:param List[TypeUser] users:
Constructor for messages.Messages: Instance of either Messages, MessagesSlice, ChannelMessages, MessagesNotModified.
"""
self.pts = pts # type: int
self.count = count # type: int
self.messages = messages # type: List[TypeMessage]
self.chats = chats # type: List[TypeChat]
self.users = users # type: List[TypeUser]
def to_dict(self):
return {
'_': 'ChannelMessages',
'pts': self.pts,
'count': self.count,
'messages': [] if self.messages is None else [None if x is None else x.to_dict() for x in self.messages],
'chats': [] if self.chats is None else [None if x is None else x.to_dict() for x in self.chats],
'users': [] if self.users is None else [None if x is None else x.to_dict() for x in self.users]
}
def __bytes__(self):
return b''.join((
b'7.&\x99',
b'\0\0\0\0',
struct.pack('<i', self.pts),
struct.pack('<i', self.count),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.messages)),b''.join(bytes(x) for x in self.messages),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.chats)),b''.join(bytes(x) for x in self.chats),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.users)),b''.join(bytes(x) for x in self.users),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_pts = reader.read_int()
_count = reader.read_int()
reader.read_int()
_messages = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_messages.append(_x)
reader.read_int()
_chats = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_chats.append(_x)
reader.read_int()
_users = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_users.append(_x)
return cls(pts=_pts, count=_count, messages=_messages, chats=_chats, users=_users)
class ChatFull(TLObject):
CONSTRUCTOR_ID = 0xe5d7d19c
SUBCLASS_OF_ID = 0x225a5109
def __init__(self, full_chat, chats, users):
"""
:param TypeChatFull full_chat:
:param List[TypeChat] chats:
:param List[TypeUser] users:
Constructor for messages.ChatFull: Instance of ChatFull.
"""
self.full_chat = full_chat # type: TypeChatFull
self.chats = chats # type: List[TypeChat]
self.users = users # type: List[TypeUser]
def to_dict(self):
return {
'_': 'ChatFull',
'full_chat': None if self.full_chat is None else self.full_chat.to_dict(),
'chats': [] if self.chats is None else [None if x is None else x.to_dict() for x in self.chats],
'users': [] if self.users is None else [None if x is None else x.to_dict() for x in self.users]
}
def __bytes__(self):
return b''.join((
b'\x9c\xd1\xd7\xe5',
bytes(self.full_chat),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.chats)),b''.join(bytes(x) for x in self.chats),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.users)),b''.join(bytes(x) for x in self.users),
))
@classmethod
def from_reader(cls, reader):
_full_chat = reader.tgread_object()
reader.read_int()
_chats = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_chats.append(_x)
reader.read_int()
_users = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_users.append(_x)
return cls(full_chat=_full_chat, chats=_chats, users=_users)
class Chats(TLObject):
CONSTRUCTOR_ID = 0x64ff9fd5
SUBCLASS_OF_ID = 0x99d5cb14
def __init__(self, chats):
"""
:param List[TypeChat] chats:
Constructor for messages.Chats: Instance of either Chats, ChatsSlice.
"""
self.chats = chats # type: List[TypeChat]
def to_dict(self):
return {
'_': 'Chats',
'chats': [] if self.chats is None else [None if x is None else x.to_dict() for x in self.chats]
}
def __bytes__(self):
return b''.join((
b'\xd5\x9f\xffd',
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.chats)),b''.join(bytes(x) for x in self.chats),
))
@classmethod
def from_reader(cls, reader):
reader.read_int()
_chats = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_chats.append(_x)
return cls(chats=_chats)
class ChatsSlice(TLObject):
CONSTRUCTOR_ID = 0x9cd81144
SUBCLASS_OF_ID = 0x99d5cb14
def __init__(self, count, chats):
"""
:param int count:
:param List[TypeChat] chats:
Constructor for messages.Chats: Instance of either Chats, ChatsSlice.
"""
self.count = count # type: int
self.chats = chats # type: List[TypeChat]
def to_dict(self):
return {
'_': 'ChatsSlice',
'count': self.count,
'chats': [] if self.chats is None else [None if x is None else x.to_dict() for x in self.chats]
}
def __bytes__(self):
return b''.join((
b'D\x11\xd8\x9c',
struct.pack('<i', self.count),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.chats)),b''.join(bytes(x) for x in self.chats),
))
@classmethod
def from_reader(cls, reader):
_count = reader.read_int()
reader.read_int()
_chats = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_chats.append(_x)
return cls(count=_count, chats=_chats)
class DhConfig(TLObject):
CONSTRUCTOR_ID = 0x2c221edd
SUBCLASS_OF_ID = 0xe488ed8b
def __init__(self, g, p, version, random):
"""
:param int g:
:param bytes p:
:param int version:
:param bytes random:
Constructor for messages.DhConfig: Instance of either DhConfigNotModified, DhConfig.
"""
self.g = g # type: int
self.p = p # type: bytes
self.version = version # type: int
self.random = random # type: bytes
def to_dict(self):
return {
'_': 'DhConfig',
'g': self.g,
'p': self.p,
'version': self.version,
'random': self.random
}
def __bytes__(self):
return b''.join((
b'\xdd\x1e",',
struct.pack('<i', self.g),
self.serialize_bytes(self.p),
struct.pack('<i', self.version),
self.serialize_bytes(self.random),
))
@classmethod
def from_reader(cls, reader):
_g = reader.read_int()
_p = reader.tgread_bytes()
_version = reader.read_int()
_random = reader.tgread_bytes()
return cls(g=_g, p=_p, version=_version, random=_random)
class DhConfigNotModified(TLObject):
CONSTRUCTOR_ID = 0xc0e24635
SUBCLASS_OF_ID = 0xe488ed8b
def __init__(self, random):
"""
:param bytes random:
Constructor for messages.DhConfig: Instance of either DhConfigNotModified, DhConfig.
"""
self.random = random # type: bytes
def to_dict(self):
return {
'_': 'DhConfigNotModified',
'random': self.random
}
def __bytes__(self):
return b''.join((
b'5F\xe2\xc0',
self.serialize_bytes(self.random),
))
@classmethod
def from_reader(cls, reader):
_random = reader.tgread_bytes()
return cls(random=_random)
class Dialogs(TLObject):
CONSTRUCTOR_ID = 0x15ba6c40
SUBCLASS_OF_ID = 0xe1b52ee
def __init__(self, dialogs, messages, chats, users):
"""
:param List[TypeDialog] dialogs:
:param List[TypeMessage] messages:
:param List[TypeChat] chats:
:param List[TypeUser] users:
Constructor for messages.Dialogs: Instance of either Dialogs, DialogsSlice, DialogsNotModified.
"""
self.dialogs = dialogs # type: List[TypeDialog]
self.messages = messages # type: List[TypeMessage]
self.chats = chats # type: List[TypeChat]
self.users = users # type: List[TypeUser]
def to_dict(self):
return {
'_': 'Dialogs',
'dialogs': [] if self.dialogs is None else [None if x is None else x.to_dict() for x in self.dialogs],
'messages': [] if self.messages is None else [None if x is None else x.to_dict() for x in self.messages],
'chats': [] if self.chats is None else [None if x is None else x.to_dict() for x in self.chats],
'users': [] if self.users is None else [None if x is None else x.to_dict() for x in self.users]
}
def __bytes__(self):
return b''.join((
b'@l\xba\x15',
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.dialogs)),b''.join(bytes(x) for x in self.dialogs),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.messages)),b''.join(bytes(x) for x in self.messages),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.chats)),b''.join(bytes(x) for x in self.chats),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.users)),b''.join(bytes(x) for x in self.users),
))
@classmethod
def from_reader(cls, reader):
reader.read_int()
_dialogs = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_dialogs.append(_x)
reader.read_int()
_messages = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_messages.append(_x)
reader.read_int()
_chats = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_chats.append(_x)
reader.read_int()
_users = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_users.append(_x)
return cls(dialogs=_dialogs, messages=_messages, chats=_chats, users=_users)
class DialogsNotModified(TLObject):
CONSTRUCTOR_ID = 0xf0e3e596
SUBCLASS_OF_ID = 0xe1b52ee
def __init__(self, count):
"""
:param int count:
Constructor for messages.Dialogs: Instance of either Dialogs, DialogsSlice, DialogsNotModified.
"""
self.count = count # type: int
def to_dict(self):
return {
'_': 'DialogsNotModified',
'count': self.count
}
def __bytes__(self):
return b''.join((
b'\x96\xe5\xe3\xf0',
struct.pack('<i', self.count),
))
@classmethod
def from_reader(cls, reader):
_count = reader.read_int()
return cls(count=_count)
class DialogsSlice(TLObject):
CONSTRUCTOR_ID = 0x71e094f3
SUBCLASS_OF_ID = 0xe1b52ee
def __init__(self, count, dialogs, messages, chats, users):
"""
:param int count:
:param List[TypeDialog] dialogs:
:param List[TypeMessage] messages:
:param List[TypeChat] chats:
:param List[TypeUser] users:
Constructor for messages.Dialogs: Instance of either Dialogs, DialogsSlice, DialogsNotModified.
"""
self.count = count # type: int
self.dialogs = dialogs # type: List[TypeDialog]
self.messages = messages # type: List[TypeMessage]
self.chats = chats # type: List[TypeChat]
self.users = users # type: List[TypeUser]
def to_dict(self):
return {
'_': 'DialogsSlice',
'count': self.count,
'dialogs': [] if self.dialogs is None else [None if x is None else x.to_dict() for x in self.dialogs],
'messages': [] if self.messages is None else [None if x is None else x.to_dict() for x in self.messages],
'chats': [] if self.chats is None else [None if x is None else x.to_dict() for x in self.chats],
'users': [] if self.users is None else [None if x is None else x.to_dict() for x in self.users]
}
def __bytes__(self):
return b''.join((
b'\xf3\x94\xe0q',
struct.pack('<i', self.count),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.dialogs)),b''.join(bytes(x) for x in self.dialogs),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.messages)),b''.join(bytes(x) for x in self.messages),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.chats)),b''.join(bytes(x) for x in self.chats),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.users)),b''.join(bytes(x) for x in self.users),
))
@classmethod
def from_reader(cls, reader):
_count = reader.read_int()
reader.read_int()
_dialogs = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_dialogs.append(_x)
reader.read_int()
_messages = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_messages.append(_x)
reader.read_int()
_chats = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_chats.append(_x)
reader.read_int()
_users = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_users.append(_x)
return cls(count=_count, dialogs=_dialogs, messages=_messages, chats=_chats, users=_users)
class FavedStickers(TLObject):
CONSTRUCTOR_ID = 0xf37f2f16
SUBCLASS_OF_ID = 0x8e736fb9
def __init__(self, hash, packs, stickers):
"""
:param int hash:
:param List[TypeStickerPack] packs:
:param List[TypeDocument] stickers:
Constructor for messages.FavedStickers: Instance of either FavedStickersNotModified, FavedStickers.
"""
self.hash = hash # type: int
self.packs = packs # type: List[TypeStickerPack]
self.stickers = stickers # type: List[TypeDocument]
def to_dict(self):
return {
'_': 'FavedStickers',
'hash': self.hash,
'packs': [] if self.packs is None else [None if x is None else x.to_dict() for x in self.packs],
'stickers': [] if self.stickers is None else [None if x is None else x.to_dict() for x in self.stickers]
}
def __bytes__(self):
return b''.join((
b'\x16/\x7f\xf3',
struct.pack('<i', self.hash),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.packs)),b''.join(bytes(x) for x in self.packs),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.stickers)),b''.join(bytes(x) for x in self.stickers),
))
@classmethod
def from_reader(cls, reader):
_hash = reader.read_int()
reader.read_int()
_packs = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_packs.append(_x)
reader.read_int()
_stickers = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_stickers.append(_x)
return cls(hash=_hash, packs=_packs, stickers=_stickers)
class FavedStickersNotModified(TLObject):
CONSTRUCTOR_ID = 0x9e8fa6d3
SUBCLASS_OF_ID = 0x8e736fb9
def to_dict(self):
return {
'_': 'FavedStickersNotModified'
}
def __bytes__(self):
return b''.join((
b'\xd3\xa6\x8f\x9e',
))
@classmethod
def from_reader(cls, reader):
return cls()
class FeaturedStickers(TLObject):
CONSTRUCTOR_ID = 0xf89d88e5
SUBCLASS_OF_ID = 0x2614b722
def __init__(self, hash, sets, unread):
"""
:param int hash:
:param List[TypeStickerSetCovered] sets:
:param List[int] unread:
Constructor for messages.FeaturedStickers: Instance of either FeaturedStickersNotModified, FeaturedStickers.
"""
self.hash = hash # type: int
self.sets = sets # type: List[TypeStickerSetCovered]
self.unread = unread # type: List[int]
def to_dict(self):
return {
'_': 'FeaturedStickers',
'hash': self.hash,
'sets': [] if self.sets is None else [None if x is None else x.to_dict() for x in self.sets],
'unread': [] if self.unread is None else self.unread[:]
}
def __bytes__(self):
return b''.join((
b'\xe5\x88\x9d\xf8',
struct.pack('<i', self.hash),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.sets)),b''.join(bytes(x) for x in self.sets),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.unread)),b''.join(struct.pack('<q', x) for x in self.unread),
))
@classmethod
def from_reader(cls, reader):
_hash = reader.read_int()
reader.read_int()
_sets = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_sets.append(_x)
reader.read_int()
_unread = []
for _ in range(reader.read_int()):
_x = reader.read_long()
_unread.append(_x)
return cls(hash=_hash, sets=_sets, unread=_unread)
class FeaturedStickersNotModified(TLObject):
CONSTRUCTOR_ID = 0x4ede3cf
SUBCLASS_OF_ID = 0x2614b722
def to_dict(self):
return {
'_': 'FeaturedStickersNotModified'
}
def __bytes__(self):
return b''.join((
b'\xcf\xe3\xed\x04',
))
@classmethod
def from_reader(cls, reader):
return cls()
class FoundGifs(TLObject):
CONSTRUCTOR_ID = 0x450a1c0a
SUBCLASS_OF_ID = 0xe799ea7
def __init__(self, next_offset, results):
"""
:param int next_offset:
:param List[TypeFoundGif] results:
Constructor for messages.FoundGifs: Instance of FoundGifs.
"""
self.next_offset = next_offset # type: int
self.results = results # type: List[TypeFoundGif]
def to_dict(self):
return {
'_': 'FoundGifs',
'next_offset': self.next_offset,
'results': [] if self.results is None else [None if x is None else x.to_dict() for x in self.results]
}
def __bytes__(self):
return b''.join((
b'\n\x1c\nE',
struct.pack('<i', self.next_offset),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.results)),b''.join(bytes(x) for x in self.results),
))
@classmethod
def from_reader(cls, reader):
_next_offset = reader.read_int()
reader.read_int()
_results = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_results.append(_x)
return cls(next_offset=_next_offset, results=_results)
class FoundStickerSets(TLObject):
CONSTRUCTOR_ID = 0x5108d648
SUBCLASS_OF_ID = 0x40df361
def __init__(self, hash, sets):
"""
:param int hash:
:param List[TypeStickerSetCovered] sets:
Constructor for messages.FoundStickerSets: Instance of either FoundStickerSetsNotModified, FoundStickerSets.
"""
self.hash = hash # type: int
self.sets = sets # type: List[TypeStickerSetCovered]
def to_dict(self):
return {
'_': 'FoundStickerSets',
'hash': self.hash,
'sets': [] if self.sets is None else [None if x is None else x.to_dict() for x in self.sets]
}
def __bytes__(self):
return b''.join((
b'H\xd6\x08Q',
struct.pack('<i', self.hash),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.sets)),b''.join(bytes(x) for x in self.sets),
))
@classmethod
def from_reader(cls, reader):
_hash = reader.read_int()
reader.read_int()
_sets = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_sets.append(_x)
return cls(hash=_hash, sets=_sets)
class FoundStickerSetsNotModified(TLObject):
CONSTRUCTOR_ID = 0xd54b65d
SUBCLASS_OF_ID = 0x40df361
def to_dict(self):
return {
'_': 'FoundStickerSetsNotModified'
}
def __bytes__(self):
return b''.join((
b']\xb6T\r',
))
@classmethod
def from_reader(cls, reader):
return cls()
class HighScores(TLObject):
CONSTRUCTOR_ID = 0x9a3bfd99
SUBCLASS_OF_ID = 0x6ccd95fd
def __init__(self, scores, users):
"""
:param List[TypeHighScore] scores:
:param List[TypeUser] users:
Constructor for messages.HighScores: Instance of HighScores.
"""
self.scores = scores # type: List[TypeHighScore]
self.users = users # type: List[TypeUser]
def to_dict(self):
return {
'_': 'HighScores',
'scores': [] if self.scores is None else [None if x is None else x.to_dict() for x in self.scores],
'users': [] if self.users is None else [None if x is None else x.to_dict() for x in self.users]
}
def __bytes__(self):
return b''.join((
b'\x99\xfd;\x9a',
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.scores)),b''.join(bytes(x) for x in self.scores),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.users)),b''.join(bytes(x) for x in self.users),
))
@classmethod
def from_reader(cls, reader):
reader.read_int()
_scores = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_scores.append(_x)
reader.read_int()
_users = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_users.append(_x)
return cls(scores=_scores, users=_users)
class MessageEditData(TLObject):
CONSTRUCTOR_ID = 0x26b5dde6
SUBCLASS_OF_ID = 0xfb47949d
def __init__(self, caption=None):
"""
:param Optional[bool] caption:
Constructor for messages.MessageEditData: Instance of MessageEditData.
"""
self.caption = caption # type: Optional[bool]
def to_dict(self):
return {
'_': 'MessageEditData',
'caption': self.caption
}
def __bytes__(self):
return b''.join((
b'\xe6\xdd\xb5&',
struct.pack('<I', (0 if self.caption is None or self.caption is False else 1)),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_caption = bool(flags & 1)
return cls(caption=_caption)
class Messages(TLObject):
CONSTRUCTOR_ID = 0x8c718e87
SUBCLASS_OF_ID = 0xd4b40b5e
def __init__(self, messages, chats, users):
"""
:param List[TypeMessage] messages:
:param List[TypeChat] chats:
:param List[TypeUser] users:
Constructor for messages.Messages: Instance of either Messages, MessagesSlice, ChannelMessages, MessagesNotModified.
"""
self.messages = messages # type: List[TypeMessage]
self.chats = chats # type: List[TypeChat]
self.users = users # type: List[TypeUser]
def to_dict(self):
return {
'_': 'Messages',
'messages': [] if self.messages is None else [None if x is None else x.to_dict() for x in self.messages],
'chats': [] if self.chats is None else [None if x is None else x.to_dict() for x in self.chats],
'users': [] if self.users is None else [None if x is None else x.to_dict() for x in self.users]
}
def __bytes__(self):
return b''.join((
b'\x87\x8eq\x8c',
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.messages)),b''.join(bytes(x) for x in self.messages),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.chats)),b''.join(bytes(x) for x in self.chats),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.users)),b''.join(bytes(x) for x in self.users),
))
@classmethod
def from_reader(cls, reader):
reader.read_int()
_messages = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_messages.append(_x)
reader.read_int()
_chats = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_chats.append(_x)
reader.read_int()
_users = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_users.append(_x)
return cls(messages=_messages, chats=_chats, users=_users)
class MessagesNotModified(TLObject):
CONSTRUCTOR_ID = 0x74535f21
SUBCLASS_OF_ID = 0xd4b40b5e
def __init__(self, count):
"""
:param int count:
Constructor for messages.Messages: Instance of either Messages, MessagesSlice, ChannelMessages, MessagesNotModified.
"""
self.count = count # type: int
def to_dict(self):
return {
'_': 'MessagesNotModified',
'count': self.count
}
def __bytes__(self):
return b''.join((
b'!_St',
struct.pack('<i', self.count),
))
@classmethod
def from_reader(cls, reader):
_count = reader.read_int()
return cls(count=_count)
class MessagesSlice(TLObject):
CONSTRUCTOR_ID = 0xb446ae3
SUBCLASS_OF_ID = 0xd4b40b5e
def __init__(self, count, messages, chats, users):
"""
:param int count:
:param List[TypeMessage] messages:
:param List[TypeChat] chats:
:param List[TypeUser] users:
Constructor for messages.Messages: Instance of either Messages, MessagesSlice, ChannelMessages, MessagesNotModified.
"""
self.count = count # type: int
self.messages = messages # type: List[TypeMessage]
self.chats = chats # type: List[TypeChat]
self.users = users # type: List[TypeUser]
def to_dict(self):
return {
'_': 'MessagesSlice',
'count': self.count,
'messages': [] if self.messages is None else [None if x is None else x.to_dict() for x in self.messages],
'chats': [] if self.chats is None else [None if x is None else x.to_dict() for x in self.chats],
'users': [] if self.users is None else [None if x is None else x.to_dict() for x in self.users]
}
def __bytes__(self):
return b''.join((
b'\xe3jD\x0b',
struct.pack('<i', self.count),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.messages)),b''.join(bytes(x) for x in self.messages),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.chats)),b''.join(bytes(x) for x in self.chats),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.users)),b''.join(bytes(x) for x in self.users),
))
@classmethod
def from_reader(cls, reader):
_count = reader.read_int()
reader.read_int()
_messages = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_messages.append(_x)
reader.read_int()
_chats = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_chats.append(_x)
reader.read_int()
_users = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_users.append(_x)
return cls(count=_count, messages=_messages, chats=_chats, users=_users)
class PeerDialogs(TLObject):
CONSTRUCTOR_ID = 0x3371c354
SUBCLASS_OF_ID = 0x3ac70132
def __init__(self, dialogs, messages, chats, users, state):
"""
:param List[TypeDialog] dialogs:
:param List[TypeMessage] messages:
:param List[TypeChat] chats:
:param List[TypeUser] users:
:param TypeState state:
Constructor for messages.PeerDialogs: Instance of PeerDialogs.
"""
self.dialogs = dialogs # type: List[TypeDialog]
self.messages = messages # type: List[TypeMessage]
self.chats = chats # type: List[TypeChat]
self.users = users # type: List[TypeUser]
self.state = state # type: TypeState
def to_dict(self):
return {
'_': 'PeerDialogs',
'dialogs': [] if self.dialogs is None else [None if x is None else x.to_dict() for x in self.dialogs],
'messages': [] if self.messages is None else [None if x is None else x.to_dict() for x in self.messages],
'chats': [] if self.chats is None else [None if x is None else x.to_dict() for x in self.chats],
'users': [] if self.users is None else [None if x is None else x.to_dict() for x in self.users],
'state': None if self.state is None else self.state.to_dict()
}
def __bytes__(self):
return b''.join((
b'T\xc3q3',
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.dialogs)),b''.join(bytes(x) for x in self.dialogs),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.messages)),b''.join(bytes(x) for x in self.messages),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.chats)),b''.join(bytes(x) for x in self.chats),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.users)),b''.join(bytes(x) for x in self.users),
bytes(self.state),
))
@classmethod
def from_reader(cls, reader):
reader.read_int()
_dialogs = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_dialogs.append(_x)
reader.read_int()
_messages = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_messages.append(_x)
reader.read_int()
_chats = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_chats.append(_x)
reader.read_int()
_users = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_users.append(_x)
_state = reader.tgread_object()
return cls(dialogs=_dialogs, messages=_messages, chats=_chats, users=_users, state=_state)
class RecentStickers(TLObject):
CONSTRUCTOR_ID = 0x22f3afb3
SUBCLASS_OF_ID = 0xf76f8683
def __init__(self, hash, packs, stickers, dates):
"""
:param int hash:
:param List[TypeStickerPack] packs:
:param List[TypeDocument] stickers:
:param List[int] dates:
Constructor for messages.RecentStickers: Instance of either RecentStickersNotModified, RecentStickers.
"""
self.hash = hash # type: int
self.packs = packs # type: List[TypeStickerPack]
self.stickers = stickers # type: List[TypeDocument]
self.dates = dates # type: List[int]
def to_dict(self):
return {
'_': 'RecentStickers',
'hash': self.hash,
'packs': [] if self.packs is None else [None if x is None else x.to_dict() for x in self.packs],
'stickers': [] if self.stickers is None else [None if x is None else x.to_dict() for x in self.stickers],
'dates': [] if self.dates is None else self.dates[:]
}
def __bytes__(self):
return b''.join((
b'\xb3\xaf\xf3"',
struct.pack('<i', self.hash),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.packs)),b''.join(bytes(x) for x in self.packs),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.stickers)),b''.join(bytes(x) for x in self.stickers),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.dates)),b''.join(struct.pack('<i', x) for x in self.dates),
))
@classmethod
def from_reader(cls, reader):
_hash = reader.read_int()
reader.read_int()
_packs = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_packs.append(_x)
reader.read_int()
_stickers = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_stickers.append(_x)
reader.read_int()
_dates = []
for _ in range(reader.read_int()):
_x = reader.read_int()
_dates.append(_x)
return cls(hash=_hash, packs=_packs, stickers=_stickers, dates=_dates)
class RecentStickersNotModified(TLObject):
CONSTRUCTOR_ID = 0xb17f890
SUBCLASS_OF_ID = 0xf76f8683
def to_dict(self):
return {
'_': 'RecentStickersNotModified'
}
def __bytes__(self):
return b''.join((
b'\x90\xf8\x17\x0b',
))
@classmethod
def from_reader(cls, reader):
return cls()
class SavedGifs(TLObject):
CONSTRUCTOR_ID = 0x2e0709a5
SUBCLASS_OF_ID = 0xa68b61f5
def __init__(self, hash, gifs):
"""
:param int hash:
:param List[TypeDocument] gifs:
Constructor for messages.SavedGifs: Instance of either SavedGifsNotModified, SavedGifs.
"""
self.hash = hash # type: int
self.gifs = gifs # type: List[TypeDocument]
def to_dict(self):
return {
'_': 'SavedGifs',
'hash': self.hash,
'gifs': [] if self.gifs is None else [None if x is None else x.to_dict() for x in self.gifs]
}
def __bytes__(self):
return b''.join((
b'\xa5\t\x07.',
struct.pack('<i', self.hash),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.gifs)),b''.join(bytes(x) for x in self.gifs),
))
@classmethod
def from_reader(cls, reader):
_hash = reader.read_int()
reader.read_int()
_gifs = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_gifs.append(_x)
return cls(hash=_hash, gifs=_gifs)
class SavedGifsNotModified(TLObject):
CONSTRUCTOR_ID = 0xe8025ca2
SUBCLASS_OF_ID = 0xa68b61f5
def to_dict(self):
return {
'_': 'SavedGifsNotModified'
}
def __bytes__(self):
return b''.join((
b'\xa2\\\x02\xe8',
))
@classmethod
def from_reader(cls, reader):
return cls()
class SentEncryptedFile(TLObject):
CONSTRUCTOR_ID = 0x9493ff32
SUBCLASS_OF_ID = 0xc99e3e50
def __init__(self, date, file):
"""
:param Optional[datetime] date:
:param TypeEncryptedFile file:
Constructor for messages.SentEncryptedMessage: Instance of either SentEncryptedMessage, SentEncryptedFile.
"""
self.date = date # type: Optional[datetime]
self.file = file # type: TypeEncryptedFile
def to_dict(self):
return {
'_': 'SentEncryptedFile',
'date': self.date,
'file': None if self.file is None else self.file.to_dict()
}
def __bytes__(self):
return b''.join((
b'2\xff\x93\x94',
self.serialize_datetime(self.date),
bytes(self.file),
))
@classmethod
def from_reader(cls, reader):
_date = reader.tgread_date()
_file = reader.tgread_object()
return cls(date=_date, file=_file)
class SentEncryptedMessage(TLObject):
CONSTRUCTOR_ID = 0x560f8935
SUBCLASS_OF_ID = 0xc99e3e50
def __init__(self, date):
"""
:param Optional[datetime] date:
Constructor for messages.SentEncryptedMessage: Instance of either SentEncryptedMessage, SentEncryptedFile.
"""
self.date = date # type: Optional[datetime]
def to_dict(self):
return {
'_': 'SentEncryptedMessage',
'date': self.date
}
def __bytes__(self):
return b''.join((
b'5\x89\x0fV',
self.serialize_datetime(self.date),
))
@classmethod
def from_reader(cls, reader):
_date = reader.tgread_date()
return cls(date=_date)
class StickerSet(TLObject):
CONSTRUCTOR_ID = 0xb60a24a6
SUBCLASS_OF_ID = 0x9b704a5a
def __init__(self, set, packs, documents):
"""
:param TypeStickerSet set:
:param List[TypeStickerPack] packs:
:param List[TypeDocument] documents:
Constructor for messages.StickerSet: Instance of StickerSet.
"""
self.set = set # type: TypeStickerSet
self.packs = packs # type: List[TypeStickerPack]
self.documents = documents # type: List[TypeDocument]
def to_dict(self):
return {
'_': 'StickerSet',
'set': None if self.set is None else self.set.to_dict(),
'packs': [] if self.packs is None else [None if x is None else x.to_dict() for x in self.packs],
'documents': [] if self.documents is None else [None if x is None else x.to_dict() for x in self.documents]
}
def __bytes__(self):
return b''.join((
b'\xa6$\n\xb6',
bytes(self.set),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.packs)),b''.join(bytes(x) for x in self.packs),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.documents)),b''.join(bytes(x) for x in self.documents),
))
@classmethod
def from_reader(cls, reader):
_set = reader.tgread_object()
reader.read_int()
_packs = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_packs.append(_x)
reader.read_int()
_documents = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_documents.append(_x)
return cls(set=_set, packs=_packs, documents=_documents)
class StickerSetInstallResultArchive(TLObject):
CONSTRUCTOR_ID = 0x35e410a8
SUBCLASS_OF_ID = 0x67cb3fe8
def __init__(self, sets):
"""
:param List[TypeStickerSetCovered] sets:
Constructor for messages.StickerSetInstallResult: Instance of either StickerSetInstallResultSuccess, StickerSetInstallResultArchive.
"""
self.sets = sets # type: List[TypeStickerSetCovered]
def to_dict(self):
return {
'_': 'StickerSetInstallResultArchive',
'sets': [] if self.sets is None else [None if x is None else x.to_dict() for x in self.sets]
}
def __bytes__(self):
return b''.join((
b'\xa8\x10\xe45',
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.sets)),b''.join(bytes(x) for x in self.sets),
))
@classmethod
def from_reader(cls, reader):
reader.read_int()
_sets = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_sets.append(_x)
return cls(sets=_sets)
class StickerSetInstallResultSuccess(TLObject):
CONSTRUCTOR_ID = 0x38641628
SUBCLASS_OF_ID = 0x67cb3fe8
def to_dict(self):
return {
'_': 'StickerSetInstallResultSuccess'
}
def __bytes__(self):
return b''.join((
b'(\x16d8',
))
@classmethod
def from_reader(cls, reader):
return cls()
class Stickers(TLObject):
CONSTRUCTOR_ID = 0xe4599bbd
SUBCLASS_OF_ID = 0xd73bb9de
def __init__(self, hash, stickers):
"""
:param int hash:
:param List[TypeDocument] stickers:
Constructor for messages.Stickers: Instance of either StickersNotModified, Stickers.
"""
self.hash = hash # type: int
self.stickers = stickers # type: List[TypeDocument]
def to_dict(self):
return {
'_': 'Stickers',
'hash': self.hash,
'stickers': [] if self.stickers is None else [None if x is None else x.to_dict() for x in self.stickers]
}
def __bytes__(self):
return b''.join((
b'\xbd\x9bY\xe4',
struct.pack('<i', self.hash),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.stickers)),b''.join(bytes(x) for x in self.stickers),
))
@classmethod
def from_reader(cls, reader):
_hash = reader.read_int()
reader.read_int()
_stickers = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_stickers.append(_x)
return cls(hash=_hash, stickers=_stickers)
class StickersNotModified(TLObject):
CONSTRUCTOR_ID = 0xf1749a22
SUBCLASS_OF_ID = 0xd73bb9de
def to_dict(self):
return {
'_': 'StickersNotModified'
}
def __bytes__(self):
return b''.join((
b'"\x9at\xf1',
))
@classmethod
def from_reader(cls, reader):
return cls()
|
StarcoderdataPython
|
3429820
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Fixed IP action implementations"""
import logging
from osc_lib.command import command
from osc_lib import utils
from openstackclient.i18n import _
class AddFixedIP(command.Command):
_description = _("Add fixed IP address to server")
# TODO(tangchen): Remove this class and ``ip fixed add`` command
# two cycles after Mitaka.
# This notifies cliff to not display the help for this command
deprecated = True
log = logging.getLogger('deprecated')
def get_parser(self, prog_name):
parser = super(AddFixedIP, self).get_parser(prog_name)
parser.add_argument(
"network",
metavar="<network>",
help=_("Network to fetch an IP address from (name or ID)"),
)
parser.add_argument(
"server",
metavar="<server>",
help=_("Server to receive the IP address (name or ID)"),
)
return parser
def take_action(self, parsed_args):
self.log.warning(_('This command has been deprecated. '
'Please use "server add fixed ip" instead.'))
compute_client = self.app.client_manager.compute
network = utils.find_resource(
compute_client.networks, parsed_args.network)
server = utils.find_resource(
compute_client.servers, parsed_args.server)
server.add_fixed_ip(network.id)
class RemoveFixedIP(command.Command):
_description = _("Remove fixed IP address from server")
# TODO(tangchen): Remove this class and ``ip fixed remove`` command
# two cycles after Mitaka.
# This notifies cliff to not display the help for this command
deprecated = True
log = logging.getLogger('deprecated')
def get_parser(self, prog_name):
parser = super(RemoveFixedIP, self).get_parser(prog_name)
parser.add_argument(
"ip_address",
metavar="<ip-address>",
help=_("IP address to remove from server (name only)"),
)
parser.add_argument(
"server",
metavar="<server>",
help=_("Server to remove the IP address from (name or ID)"),
)
return parser
def take_action(self, parsed_args):
self.log.warning(_('This command has been deprecated. '
'Please use "server remove fixed ip" instead.'))
compute_client = self.app.client_manager.compute
server = utils.find_resource(
compute_client.servers, parsed_args.server)
server.remove_fixed_ip(parsed_args.ip_address)
|
StarcoderdataPython
|
1870780
|
<gh_stars>1-10
# SPDX-FileCopyrightText: Copyright 2022-present Open Networking Foundation.
# SPDX-License-Identifier: Apache-2.0
from scapy.contrib.gtp import GTP_U_Header, GTPPDUSessionContainer
from scapy.layers.inet import IP, UDP
from scapy.layers.l2 import Ether
GTPU_PORT = 2152
def pkt_add_gtpu(
pkt,
out_ipv4_src,
out_ipv4_dst,
teid,
sport=GTPU_PORT,
dport=GTPU_PORT,
ext_psc_type=None,
ext_psc_qfi=None,
):
"""
Encapsulates the given pkt with GTPU tunnel headers.
"""
gtp_pkt = (
Ether(src=pkt[Ether].src, dst=pkt[Ether].dst)
/ IP(src=out_ipv4_src, dst=out_ipv4_dst, tos=0, id=0x1513, flags=0, frag=0,)
/ UDP(sport=sport, dport=dport, chksum=0)
/ GTP_U_Header(gtp_type=255, teid=teid)
)
if ext_psc_type is not None:
# Add QoS Flow Identifier (QFI) as an extension header (required for 5G RAN)
gtp_pkt = gtp_pkt / GTPPDUSessionContainer(type=ext_psc_type, QFI=ext_psc_qfi)
return gtp_pkt / pkt[Ether].payload
|
StarcoderdataPython
|
6538119
|
<gh_stars>1-10
# 20200517 Angold4
import os
class Computer:
def __init__(self, name):
self._name = name
self._storage = []
self._addrstorage = []
def get_name(self):
return self._name
def recieved(self, packages, path):
if packages not in self._storage:
self._storage.append(packages)
self._addrstorage.append(path)
else:
print("Error,storage already have the same Duplicate package")
def send_packages(self, aims, packages, path=''):
try:
aims.recieved(packages, path)
except NameError:
print("undefind name")
def check(self):
print("Here are the packages:")
for i in range(len(self._storage)):
print(self._storage[i], ':', self._addrstorage[i])
def check_position(self, packages):
j = 0
if len(self._storage) == 0:
return 'Empty!'
for i in self._storage:
if i == packages:
addr = self._addrstorage[j]
Path = os.path.join(addr, packages)
return Path
else:
j += 1
def read(self, packages):
print("The file will be delete after open it")
if packages in self._storage:
for i in range(len(self._storage)):
if self._storage[i] == packages:
_path = os.path.join(self._addrstorage[i], self._storage[i])
_file = open(_path)
del self._storage[i]
del self._addrstorage[i]
return _file.read()
else:
return "Not found"
if __name__ == "__main__":
A = Computer('Alice')
B = Computer('Bob')
print(A.get_name())
A.send_packages(B, '2.07.py', '/Users/Angold4/WorkSpace/algorithms_in_python/Chapters/Chapter_2/Answer')
B.check()
print(B.check_position('2.07.py'))
print(B.read('2.07.py'))
B.check()
"""
Alice
Here are the packages:
2.07.py : /Users/Angold4/WorkSpace/algorithms_in_python/Chapters/Chapter_2/Answer
/Users/Angold4/WorkSpace/algorithms_in_python/Chapters/Chapter_2/Answer/2.07.py
The file will be delete after open it
# Angold4 20200517 2.07.py
import sys; sys.path.append('..') # Change the relative path
from CreditCard import CreditCard
class BCreditCard(CreditCard):
def __init__(self, customer, bank, account, limit, balance=0):
super().__init__(customer, bank, account, limit)
self._balance = balance
if __name__ == "__main__":
B = BCreditCard('<NAME>', 'California Savings', '5391 0375 9387 5309', 2500, 100)
print(B.get_balance())
Here are the packages:
"""
|
StarcoderdataPython
|
9687032
|
from swsscommon import swsscommon
import os
import sys
import time
import json
import pytest
from distutils.version import StrictVersion
def create_entry(tbl, key, pairs):
fvs = swsscommon.FieldValuePairs(pairs)
tbl.set(key, fvs)
# FIXME: better to wait until DB create them
time.sleep(1)
def remove_entry_tbl(db, table, key):
tbl = swsscommon.Table(db, table)
tbl._del(key)
# FIXME: better to wait until DB create them
time.sleep(1)
def create_entry_tbl(db, table, key, pairs):
tbl = swsscommon.Table(db, table)
create_entry(tbl, key, pairs)
def how_many_entries_exist(db, table):
tbl = swsscommon.Table(db, table)
return len(tbl.getKeys())
def test_negativeFDB(dvs, testlog):
dvs.setup_db()
#dvs.runcmd("sonic-clear fdb all")
time.sleep(2)
#Find switch_id
switch_id = dvs.getSwitchOid()
print("Switch_id="+str(switch_id))
vlan_before = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN")
bp_before = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_BRIDGE_PORT")
vm_before = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN_MEMBER")
# create vlan
dvs.create_vlan("2")
dvs.create_vlan_member("2", "Ethernet0")
dvs.create_vlan_member("2", "Ethernet4")
# Find the vlan_oid_2 to be used in DB communications
vlan_oid_2 = dvs.getVlanOid("2")
assert vlan_oid_2 is not None, "Could not find Vlan_oid"
print("VLan-2 vlan_oid="+str(vlan_oid_2))
# create vlan
dvs.create_vlan("4")
dvs.create_vlan_member("4", "Ethernet8")
# Find the vlan_oid_4 to be used in DB communications
vlan_oid_4 = dvs.getVlanOid("4")
assert vlan_oid_4 is not None, "Could not find Vlan_oid"
print("VLan-4 vlan_oid="+str(vlan_oid_4))
dvs.create_vlan("10")
dvs.create_vlan_member("10", "Ethernet12")
# Find the vlan_oid_10 to be used in DB communications
vlan_oid_10 = dvs.getVlanOid("10")
assert vlan_oid_10 is not None, "Could not find Vlan_oid"
print("VLan-10 vlan_oid="+str(vlan_oid_10))
# check that the vlan information was propagated
vlan_after = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN")
bp_after = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_BRIDGE_PORT")
vm_after = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN_MEMBER")
assert vlan_after - vlan_before == 3, "The Vlan2/Vlan4 wasn't created"
assert bp_after - bp_before == 4, "The bridge port wasn't created"
assert vm_after - vm_before == 4, "The vlan member wasn't added"
# Get mapping between interface name and its bridge port_id
iface_2_bridge_port_id = dvs.get_map_iface_bridge_port_id(dvs.adb)
#dvs.runcmd("swssloglevel -l DEBUG -c orchagent")
#dvs.runcmd("swssloglevel -l DEBUG -c vlanmgrd")
print("NEG1 - Add MAC address to an out of range vlan and later delete it")
mac = "52:54:00:25:06:E9"
#dvs.runcmd("config mac add " + mac.lower() + " 3 Ethernet0")
print("ACTION: Creating static FDB Vlan33333|"+mac.lower()+"|Ethernet0 in CONFIG-DB")
create_entry_tbl(
dvs.cdb,
"FDB", "Vlan33333|"+mac.lower(),
[
("port", "Ethernet0"),
]
)
time.sleep(2)
# check that the FDB entry was added in Config DB
print("CHECK: Static FDB Vlan33333:"+mac.lower()+":Ethernet0 is created in Config-DB")
mac1_found, extra = dvs.is_table_entry_exists(dvs.cdb, "FDB",
"Vlan33333\|"+mac.lower(),
[("port", "Ethernet0")]
)
assert mac1_found, str(extra)
print("CONFIRM: Static FDB Vlan33333:"+mac.lower()+":Ethernet0 is created in Config-DB")
# check that the FDB entry was not added in APP DB
print("CHECK: Static FDB Vlan33333:"+mac.lower()+":Ethernet0 is not created in APP-DB")
mac1_found, extra = dvs.is_table_entry_exists(dvs.pdb, "FDB_TABLE",
"Vlan33333:"+mac.lower(),
[("port", "Ethernet0"),
("type", "static"),
]
)
assert mac1_found == False, str(extra)
print("CONFIRM: Static FDB Vlan33333:"+mac.lower()+":Ethernet0 is not created in APP-DB")
print("ACTION: Deleting Static FDB Vlan33333:"+mac.lower()+":Ethernet0")
remove_entry_tbl(dvs.cdb, "FDB", "Vlan33333|"+mac.lower())
time.sleep(2)
#Check the mac is removed from config-db
print("CHECK: Static FDB Vlan33333:"+mac.lower()+":Ethernet0 is deleted from Config-DB")
mac1_found, extra = dvs.is_table_entry_exists(dvs.cdb, "FDB",
"Vlan33333\|"+mac.lower(),
[("port", "Ethernet0")]
)
assert mac1_found == False, str(extra)
print("CONFIRM: Static FDB Vlan33333:"+mac.lower()+":Ethernet0 is deleted from Config-DB")
print("NEG2 - Add MAC address to a vlan which does not exist and later delete it")
mac = "52:54:00:25:06:E9"
#dvs.runcmd("config mac add " + mac.lower() + " 3 Ethernet0")
print("ACTION: Creating static FDB Vlan3:"+mac.lower()+":Ethernet0 in CONFIG-DB")
create_entry_tbl(
dvs.cdb,
"FDB", "Vlan3|"+mac.lower(),
[
("port", "Ethernet0"),
]
)
time.sleep(2)
# check that the FDB entry was added in Config DB
print("CHECK: Static FDB Vlan3:"+mac.lower()+":Ethernet0 is created in Config-DB")
mac1_found, extra = dvs.is_table_entry_exists(dvs.cdb, "FDB",
"Vlan3\|"+mac.lower(),
[("port", "Ethernet0")]
)
assert mac1_found, str(extra)
print("CONFIRM: Static FDB Vlan3:"+mac.lower()+":Ethernet0 is created in Config-DB")
# check that the FDB entry was added in APP DB
print("CHECK: Static FDB Vlan3:"+mac.lower()+":Ethernet0 is created in APP-DB")
mac1_found, extra = dvs.is_table_entry_exists(dvs.pdb, "FDB_TABLE",
"Vlan3:"+mac.lower(),
[("port", "Ethernet0"),
("type", "static"),
]
)
assert mac1_found, str(extra)
print("CONFIRM: Static FDB Vlan3:"+mac.lower()+":Ethernet0 is created in APP-DB")
# check that the FDB entry is not inserted into ASIC DB
print("CHECK: Static FDB Vlan3:"+mac.lower()+":Ethernet0 is not created in ASIC-DB")
ok, extra = dvs.is_fdb_entry_exists(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY",
[("mac", mac.lower())],
[("SAI_FDB_ENTRY_ATTR_TYPE", "SAI_FDB_ENTRY_TYPE_STATIC")]
)
assert ok == False, str(extra)
print("CONFIRM: Static FDB Vlan3:"+mac.lower()+":Ethernet0 is not created in ASIC-DB")
print("ACTION: Deleting Static FDB Vlan3:"+mac.lower()+"Ethernet0")
remove_entry_tbl(dvs.cdb, "FDB", "Vlan3|"+mac.lower())
time.sleep(2)
#Check the mac is removed from config-db
print("CHECK: Static FDB Vlan3:"+mac.lower()+":Ethernet0 is deleted from Config-DB")
mac1_found, extra = dvs.is_table_entry_exists(dvs.cdb, "FDB",
"Vlan3\|"+mac.lower(),
[("port", "Ethernet0")]
)
assert mac1_found == False, str(extra)
print("CONFIRM: Static FDB Vlan3:"+mac.lower()+":Ethernet0 is deleted from Config-DB")
# check that the FDB entry is removed from APP DB
print("CHECK: Static FDB Vlan3:"+mac.lower()+":Ethernet0 is deleted from APP-DB")
mac1_found, extra = dvs.is_table_entry_exists(dvs.pdb, "FDB_TABLE",
"Vlan3:"+mac.lower(),
[("port", "Ethernet0"),
("type", "static"),
]
)
assert mac1_found == False, str(extra)
print("CONFIRM: Static FDB Vlan3:"+mac.lower()+":Ethernet0 is deleted from APP-DB")
print("NEG3 - Add MAC address to an invalid port which does not exist and later delete it")
mac = "52:54:00:25:06:E9"
#dvs.runcmd("config mac add " + mac.lower() + " 3 Ethernet0")
print("ACTION: Creating static FDB Vlan2:"+mac.lower()+":Port0 in CONFIG-DB")
create_entry_tbl(
dvs.cdb,
"FDB", "Vlan2|"+mac.lower(),
[
("port", "Port0"),
]
)
time.sleep(2)
# check that the FDB entry was added in Config DB
print("CHECK: Static FDB Vlan2:"+mac.lower()+":Port0 is created in Config-DB")
mac1_found, extra = dvs.is_table_entry_exists(dvs.cdb, "FDB",
"Vlan2\|"+mac.lower(),
[("port", "Port0")]
)
assert mac1_found, str(extra)
print("CONFIRM: Static FDB Vlan2:"+mac.lower()+":Port0 is created in Config-DB")
# check that the FDB entry was added in APP DB
print("CHECK: Static FDB Vlan2:"+mac.lower()+":Port0 is created in APP-DB")
mac1_found, extra = dvs.is_table_entry_exists(dvs.pdb, "FDB_TABLE",
"Vlan2:"+mac.lower(),
[("port", "Port0"),
("type", "static"),
]
)
assert mac1_found, str(extra)
print("CONFIRM: Static FDB Vlan2:"+mac.lower()+"Port0 is created in APP-DB")
# check that the FDB entry is not inserted into ASIC DB
print("CHECK: Static FDB Vlan2:"+mac.lower()+":Port0 is not created in ASIC-DB")
ok, extra = dvs.is_fdb_entry_exists(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY",
[("mac", mac.lower())],
[("SAI_FDB_ENTRY_ATTR_TYPE", "SAI_FDB_ENTRY_TYPE_STATIC")]
)
assert ok == False, str(extra)
print("CONFIRM: Static FDB Vlan2:"+mac.lower()+":Port0 is not created in ASIC-DB")
print("ACTION: Removing static FDB Vlan2:"+mac.lower()+":Port0 from CONFIG-DB")
remove_entry_tbl(dvs.cdb, "FDB", "Vlan2|"+mac.lower())
time.sleep(2)
#Check the mac is removed from config-db
print("CHECK: Static FDB Vlan2:"+mac.lower()+":Port0 is deleted from Config-DB")
mac1_found, extra = dvs.is_table_entry_exists(dvs.cdb, "FDB",
"Vlan2\|"+mac.lower(),
[("port", "Port0")]
)
assert mac1_found == False, str(extra)
print("CONFIRM: Static FDB Vlan2:"+mac.lower()+":Port0 is deleted from Config-DB")
# check that the FDB entry is removed from APP DB
print("CHECK: Static FDB Vlan2:"+mac.lower()+":Port0 is deleted from APP-DB")
mac1_found, extra = dvs.is_table_entry_exists(dvs.pdb, "FDB_TABLE",
"Vlan2:"+mac.lower(),
[("port", "Port0"),
("type", "static"),
]
)
assert mac1_found == False, str(extra)
print("CONFIRM: Static FDB Vlan2:"+mac.lower()+":Port0 is deleted from APP-DB")
print("NEG4 - simulate mac learn event for a port which is not part of vlan")
bp_eth8 = iface_2_bridge_port_id["Ethernet8"]
dvs.remove_vlan_member("4", "Ethernet8")
print("ACTION Creating FDB Vlan4:52-54-00-25-06-E9:Ethernet8 in ASIC-DB")
create_entry_tbl(
dvs.adb,
"ASIC_STATE", "SAI_OBJECT_TYPE_FDB_ENTRY:{\"bvid\":\""+vlan_oid_4+"\",\"mac\":\"52:54:00:25:06:E9\",\"switch_id\":\""+switch_id+"\"}",
[
("SAI_FDB_ENTRY_ATTR_TYPE", "SAI_FDB_ENTRY_TYPE_DYNAMIC"),
("SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID", bp_eth8),
]
)
ntf = swsscommon.NotificationProducer(dvs.adb, "FDB_NOTIFICATIONS")
fvp = swsscommon.FieldValuePairs()
ntf_data = "[{\"fdb_entry\":\"{\\\"bvid\\\":\\\""+vlan_oid_4+"\\\",\\\"mac\\\":\\\"52:54:00:25:06:E9\\\",\\\"switch_id\\\":\\\""+switch_id+"\\\"}\",\"fdb_event\":\"SAI_FDB_EVENT_LEARNED\",\"list\":[{\"id\":\"SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID\",\"value\":\""+bp_eth8+"\"}]}]"
ntf.send("fdb_event", ntf_data, fvp)
time.sleep(2)
# check that the FDB entry was added in ASIC DB
print("CHECK: FDB Vlan4:52-54-00-25-06-E9:Ethernet8 is created in ASIC-DB")
ok, extra = dvs.is_fdb_entry_exists(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY",
[("mac", "52:54:00:25:06:E9"), ("bvid", vlan_oid_4)],
[("SAI_FDB_ENTRY_ATTR_TYPE", "SAI_FDB_ENTRY_TYPE_DYNAMIC"),
("SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID", bp_eth8)]
)
assert ok, str(extra)
print("CONFIRM: FDB Vlan4:52-54-00-25-06-E9:Ethernet8 is created in ASIC-DB")
# check that the FDB entry was not added in STATE DB
print("CHECK: FDB Vlan4:52-54-00-25-06-E9:Ethernet8 is not created in STATE-DB")
mac1_found, extra = dvs.is_table_entry_exists(dvs.sdb, "FDB_TABLE",
"Vlan4:52:54:00:25:06:e9",
[("port", "Ethernet8"),
("type", "dynamic"),
]
)
assert mac1_found == False, str(extra)
print("CONFIRM: FDB Vlan4:52-54-00-25-06-E9:Ethernet8 is not created in STATE-DB")
print("NEG5 - simulate mac learn event for a vlan which does not exist")
bp_eth12 = iface_2_bridge_port_id["Ethernet12"]
dvs.remove_vlan_member("10", "Ethernet12")
dvs.remove_vlan("10")
print("ACTION: Creating FDB Vlan10:52-54-00-25-06-E9:Ethernet12 in ASIC-DB")
create_entry_tbl(
dvs.adb,
"ASIC_STATE", "SAI_OBJECT_TYPE_FDB_ENTRY:{\"bvid\":\""+vlan_oid_10+"\",\"mac\":\"52:54:00:25:06:E9\",\"switch_id\":\""+switch_id+"\"}",
[
("SAI_FDB_ENTRY_ATTR_TYPE", "SAI_FDB_ENTRY_TYPE_DYNAMIC"),
("SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID", bp_eth12),
]
)
ntf = swsscommon.NotificationProducer(dvs.adb, "FDB_NOTIFICATIONS")
fvp = swsscommon.FieldValuePairs()
ntf_data = "[{\"fdb_entry\":\"{\\\"bvid\\\":\\\""+vlan_oid_10+"\\\",\\\"mac\\\":\\\"52:54:00:25:06:E9\\\",\\\"switch_id\\\":\\\""+switch_id+"\\\"}\",\"fdb_event\":\"SAI_FDB_EVENT_LEARNED\",\"list\":[{\"id\":\"SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID\",\"value\":\""+bp_eth12+"\"}]}]"
ntf.send("fdb_event", ntf_data, fvp)
time.sleep(2)
# check that the FDB entry was added in ASIC DB
print("CHECK: FDB Vlan10:52-54-00-25-06-E9:Ethernet12 is created in ASIC-DB")
ok, extra = dvs.is_fdb_entry_exists(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY",
[("mac", "52:54:00:25:06:E9"), ("bvid", vlan_oid_10)],
[("SAI_FDB_ENTRY_ATTR_TYPE", "SAI_FDB_ENTRY_TYPE_DYNAMIC"),
("SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID", bp_eth12)]
)
assert ok, str(extra)
print("CONFIRM: FDB Vlan10:52-54-00-25-06-E9:Ethernet12 is created in ASIC-DB")
# check that the FDB entry was not added in STATE DB
print("CHECK: FDB Vlan10:52-54-00-25-06-E9:Ethernet12 is not created in STATE-DB")
mac1_found, extra = dvs.is_table_entry_exists(dvs.sdb, "FDB_TABLE",
"Vlan10:52:54:00:25:06:e9",
[("port", "Ethernet12"),
("type", "dynamic"),
]
)
assert mac1_found == False, str(extra)
print("CONFIRM: FDB Vlan10:52-54-00-25-06-E9:Ethernet12 is not created in STATE-DB")
print("NEG6 - simulate mac age event for a vlan which does not exist")
print("ACTION: Deleting FDB Vlan10:52-54-00-25-06-E9:Ethernet12 from ASIC-DB")
remove_entry_tbl(dvs.adb, "ASIC_STATE", "SAI_OBJECT_TYPE_FDB_ENTRY:{\"bvid\":\""+vlan_oid_10+"\",\"mac\":\"52:54:00:25:06:E9\",\"switch_id\":\""+switch_id+"\"}")
ntf = swsscommon.NotificationProducer(dvs.adb, "FDB_NOTIFICATIONS")
fvp = swsscommon.FieldValuePairs()
ntf_data = "[{\"fdb_entry\":\"{\\\"bvid\\\":\\\""+vlan_oid_10+"\\\",\\\"mac\\\":\\\"52:54:00:25:06:E9\\\",\\\"switch_id\\\":\\\""+switch_id+"\\\"}\",\"fdb_event\":\"SAI_FDB_EVENT_AGED\",\"list\":[{\"id\":\"SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID\",\"value\":\""+bp_eth12+"\"}]}]"
ntf.send("fdb_event", ntf_data, fvp)
time.sleep(2)
# check that the FDB entry is not present ASIC DB
print("CHECK: FDB Vlan10:52-54-00-25-06-E9:Ethernet12 is not found in ASIC-DB")
ok, extra = dvs.is_fdb_entry_exists(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY",
[("mac", "52:54:00:25:06:E9"), ("bvid", vlan_oid_10)],
[("SAI_FDB_ENTRY_ATTR_TYPE", "SAI_FDB_ENTRY_TYPE_DYNAMIC"),
("SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID", bp_eth12)]
)
assert ok == False, str(extra)
print("CONFIRM: FDB Vlan10:52-54-00-25-06-E9:Ethernet12 is not found in ASIC-DB")
# check that the FDB entry was not found in STATE DB
print("CHECK: FDB Vlan10:52-54-00-25-06-E9:Ethernet12 is not found in STATE-DB")
mac1_found, extra = dvs.is_table_entry_exists(dvs.sdb, "FDB_TABLE",
"Vlan10:52:54:00:25:06:e9",
[("port", "Ethernet12"),
("type", "dynamic"),
]
)
assert mac1_found == False, str(extra)
print("CONFIRM: FDB Vlan10:52-54-00-25-06-E9:Ethernet12 is not found in STATE-DB")
print("NEG7 - simulate mac age event for a port which is not part of vlan")
print("ACTION: Deleting FDB Vlan4:52-54-00-25-06-E9:Ethernet8 from ASIC-DB")
remove_entry_tbl(dvs.adb, "ASIC_STATE", "SAI_OBJECT_TYPE_FDB_ENTRY:{\"bvid\":\""+vlan_oid_4+"\",\"mac\":\"52:54:00:25:06:E9\",\"switch_id\":\""+switch_id+"\"}")
ntf = swsscommon.NotificationProducer(dvs.adb, "FDB_NOTIFICATIONS")
fvp = swsscommon.FieldValuePairs()
ntf_data = "[{\"fdb_entry\":\"{\\\"bvid\\\":\\\""+vlan_oid_4+"\\\",\\\"mac\\\":\\\"52:54:00:25:06:E9\\\",\\\"switch_id\\\":\\\""+switch_id+"\\\"}\",\"fdb_event\":\"SAI_FDB_EVENT_AGED\",\"list\":[{\"id\":\"SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID\",\"value\":\""+bp_eth8+"\"}]}]"
ntf.send("fdb_event", ntf_data, fvp)
time.sleep(2)
# check that the FDB entry is not present ASIC DB
print("CHECK: FDB Vlan4:52-54-00-25-06-E9:Ethernet8 is not found in ASIC-DB")
ok, extra = dvs.is_fdb_entry_exists(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY",
[("mac", "52:54:00:25:06:E9"), ("bvid", vlan_oid_4)],
[("SAI_FDB_ENTRY_ATTR_TYPE", "SAI_FDB_ENTRY_TYPE_DYNAMIC"),
("SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID", bp_eth8)]
)
assert ok == False, str(extra)
print("CONFIRM: FDB Vlan4:52-54-00-25-06-E9:Ethernet8 is not found in ASIC-DB")
# check that the FDB entry was not found in STATE DB
print("CHECK: FDB Vlan4:52-54-00-25-06-E9:Ethernet8 is not found in STATE-DB")
mac1_found, extra = dvs.is_table_entry_exists(dvs.sdb, "FDB_TABLE",
"Vlan4:52:54:00:25:06:e9",
[("port", "Ethernet8"),
("type", "dynamic"),
]
)
assert mac1_found == False, str(extra)
print("CONFIRM: FDB Vlan4:52-54-00-25-06-E9:Ethernet8 is not found in STATE-DB")
print("NEG8 - simulate mac age event for a mac which does not exist")
bp_eth0 = iface_2_bridge_port_id["Ethernet0"]
print("ACTION: Deleting FDB Vlan2:52-54-00-25-06-E9:Ethernet0 from ASIC-DB")
remove_entry_tbl(dvs.adb, "ASIC_STATE", "SAI_OBJECT_TYPE_FDB_ENTRY:{\"bvid\":\""+vlan_oid_2+"\",\"mac\":\"52:54:00:25:06:E9\",\"switch_id\":\""+switch_id+"\"}")
ntf = swsscommon.NotificationProducer(dvs.adb, "FDB_NOTIFICATIONS")
fvp = swsscommon.FieldValuePairs()
ntf_data = "[{\"fdb_entry\":\"{\\\"bvid\\\":\\\""+vlan_oid_2+"\\\",\\\"mac\\\":\\\"52:54:00:25:06:E9\\\",\\\"switch_id\\\":\\\""+switch_id+"\\\"}\",\"fdb_event\":\"SAI_FDB_EVENT_AGED\",\"list\":[{\"id\":\"SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID\",\"value\":\""+bp_eth0+"\"}]}]"
ntf.send("fdb_event", ntf_data, fvp)
time.sleep(2)
# check that the FDB entry is not present ASIC DB
print("CHECK: FDB Vlan2:52-54-00-25-06-E9:Ethernet0 is not found in ASIC-DB")
ok, extra = dvs.is_fdb_entry_exists(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY",
[("mac", "52:54:00:25:06:E9"), ("bvid", vlan_oid_2)],
[("SAI_FDB_ENTRY_ATTR_TYPE", "SAI_FDB_ENTRY_TYPE_DYNAMIC"),
("SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID", bp_eth0)]
)
assert ok == False, str(extra)
print("CONFIRM: FDB Vlan2:52-54-00-25-06-E9:Ethernet0 is not found in ASIC-DB")
# check that the FDB entry was not found in STATE DB
print("CHECK: FDB Vlan2:52-54-00-25-06-E9:Ethernet0 is not found in STATE-DB")
mac1_found, extra = dvs.is_table_entry_exists(dvs.sdb, "FDB_TABLE",
"Vlan2:52:54:00:25:06:e9",
[("port", "Ethernet0"),
("type", "dynamic"),
]
)
assert mac1_found == False, str(extra)
print("CONFIRM: FDB Vlan2:52-54-00-25-06-E9:Ethernet0 is not found in STATE-DB")
print("NEG9 - Add Static MAC M1 to Vlan V1 and Port P1; create V1; assign V1 to P1; remove V1 from P1; remove V1")
mac = "52:54:00:25:06:EF"
#dvs.runcmd("config mac add " + mac.lower() + " 10 Ethernet12")
print("ACTION: Creating static FDB Vlan10|"+mac.lower()+"|Ethernet12 in CONFIG-DB")
create_entry_tbl(
dvs.cdb,
"FDB", "Vlan10|"+mac.lower(),
[
("port", "Ethernet12"),
]
)
time.sleep(5)
# check that the FDB entry was added in Config DB
print("CHECK: Static FDB Vlan10:"+mac.lower()+":Ethernet12 is created in Config-DB")
mac1_found, extra = dvs.is_table_entry_exists(dvs.cdb, "FDB",
"Vlan10\|"+mac.lower(),
[("port", "Ethernet12")]
)
assert mac1_found, str(extra)
print("CONFIRM: Static FDB Vlan10:"+mac.lower()+":Ethernet12 is created in Config-DB")
# check that the FDB entry was added in APP DB
print("CHECK: Static FDB Vlan10:"+mac.lower()+":Ethernet12 is created in APP-DB")
mac1_found, extra = dvs.is_table_entry_exists(dvs.pdb, "FDB_TABLE",
"Vlan10:"+mac.lower(),
[("port", "Ethernet12"),
("type", "static")
]
)
assert mac1_found, str(extra)
print("CONFIRM: Static FDB Vlan10:"+mac.lower()+":Ethernet12 is created in APP-DB")
# check that the FDB entry is not inserted into ASIC DB
print("CHECK: Static FDB Vlan10:"+mac.lower()+":Ethernet12 is not created in ASIC-DB")
ok, extra = dvs.is_fdb_entry_exists(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY",
[("mac", mac.lower())],
[("SAI_FDB_ENTRY_ATTR_TYPE", "SAI_FDB_ENTRY_TYPE_STATIC")]
)
assert ok == False, str(extra)
print("CONFIRM: Static FDB Vlan10:"+mac.lower()+":Ethernet12 is not created in ASIC-DB")
dvs.create_vlan("10")
time.sleep(1)
dvs.create_vlan_member("10", "Ethernet12")
time.sleep(1)
# Find the vlan_oid_10 to be used in DB communications
vlan_oid_10 = dvs.getVlanOid("10")
assert vlan_oid_10 is not None, "Could not find Vlan_oid"
print("VLan-10 vlan_oid="+str(vlan_oid_10))
iface_2_bridge_port_id = dvs.get_map_iface_bridge_port_id(dvs.adb)
bp_eth12 = iface_2_bridge_port_id["Ethernet12"]
print("bp_eth12="+str(bp_eth12))
print("CHECK: Static FDB Vlan10:"+mac+":Ethernet12 is created in ASIC-DB")
ok, extra = dvs.is_fdb_entry_exists(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY",
[("mac", mac), ("bvid", str(vlan_oid_10))],
[("SAI_FDB_ENTRY_ATTR_TYPE", "SAI_FDB_ENTRY_TYPE_STATIC"),
("SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID", bp_eth12)]
)
assert ok, str(extra)
print("CONFIRM: Static FDB Vlan10:"+mac+":Ethernet12 is created in ASIC-DB")
# check that the FDB entry was added in STATE DB
print("CHECK: Static FDB Vlan10:"+mac.lower()+":Ethernet12 is created in STATE-DB")
mac1_found, extra = dvs.is_table_entry_exists(dvs.sdb, "FDB_TABLE",
"Vlan10:"+mac.lower(),
[("port", "Ethernet12"),
("type", "static"),
]
)
assert mac1_found, str(extra)
print("CONFIRM: Static FDB Vlan10:"+mac.lower()+":Ethernet12 is created in STATE-DB")
print("ACTION: Remove vlan member Ethernet12")
dvs.remove_vlan_member("10", "Ethernet12")
time.sleep(2)
print("CHECK: Static FDB Vlan10:"+mac+":Ethernet12 is deleted from ASIC-DB")
ok, extra = dvs.is_fdb_entry_exists(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY",
[("mac", mac), ("bvid", str(vlan_oid_10))],
[("SAI_FDB_ENTRY_ATTR_TYPE", "SAI_FDB_ENTRY_TYPE_STATIC"),
("SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID", bp_eth12)]
)
assert ok == False, str(extra)
print("CONFIRM: Static FDB Vlan10:"+mac+":Ethernet12 is deleted from ASIC-DB")
# check that the FDB entry was deleted from STATE DB
print("CHECK: Static FDB Vlan10:"+mac.lower()+":Ethernet12 is deleted from STATE-DB")
mac1_found, extra = dvs.is_table_entry_exists(dvs.sdb, "FDB_TABLE",
"Vlan10:"+mac.lower(),
[("port", "Ethernet12"),
("type", "static"),
]
)
assert mac1_found == False, str(extra)
print("CONFIRM: Static FDB Vlan10:"+mac.lower()+":Ethernet12 is deleted from STATE-DB")
print("ACTION: Remove vlan Vlan10")
dvs.remove_vlan("10")
time.sleep(2)
print("ACTION: Remove FDB Vlan10|"+mac.lower()+" from Config DB")
remove_entry_tbl(dvs.cdb, "FDB", "Vlan10|"+mac.lower())
time.sleep(2)
#Check the mac is removed from config-db
print("CHECK: Static FDB Vlan10:"+mac.lower()+":Ethernet12 is deleted from Config-DB")
mac1_found, extra = dvs.is_table_entry_exists(dvs.cdb, "FDB",
"Vlan10\|"+mac.lower(),
[("port", "Ethernet12")]
)
assert mac1_found == False, str(extra)
print("CONFIRM: Static FDB Vlan10:"+mac.lower()+":Ethernet12 is deleted from Config-DB")
# check that the FDB entry is removed from APP DB
print("CHECK: Static FDB Vlan10:"+mac.lower()+":Ethernet12 is deleted from APP-DB")
mac1_found, extra = dvs.is_table_entry_exists(dvs.pdb, "FDB_TABLE",
"Vlan10:"+mac.lower(),
[("port", "Ethernet12"),
("type", "static"),
]
)
assert mac1_found == False, str(extra)
print("CONFIRM: Static FDB Vlan10:"+mac.lower()+":Ethernet12 is deleted from APP-DB")
print("NEG10 - Received move event with invalid bridge-port")
# Move a FDB entry in ASIC DB
print("Action: Creating FDB Vlan2:52-54-00-25-06-EB:Ethernet0 in ASIC-DB")
create_entry_tbl(
dvs.adb,
"ASIC_STATE", "SAI_OBJECT_TYPE_FDB_ENTRY:{\"bvid\":\""+vlan_oid_2+"\",\"mac\":\"52:54:00:25:06:EB\",\"switch_id\":\""+switch_id+"\"}",
[
("SAI_FDB_ENTRY_ATTR_TYPE", "SAI_FDB_ENTRY_TYPE_DYNAMIC"),
("SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID", iface_2_bridge_port_id["Ethernet0"])]
)
ntf = swsscommon.NotificationProducer(dvs.adb, "FDB_NOTIFICATIONS")
fvp = swsscommon.FieldValuePairs()
ntf_data = "[{\"fdb_entry\":\"{\\\"bvid\\\":\\\""+vlan_oid_2+"\\\",\\\"mac\\\":\\\"52:54:00:25:06:EB\\\",\\\"switch_id\\\":\\\""+switch_id+"\\\"}\",\"fdb_event\":\"SAI_FDB_EVENT_LEARNED\",\"list\":[{\"id\":\"SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID\",\"value\":\""+iface_2_bridge_port_id["Ethernet0"]+"\"}]}]"
ntf.send("fdb_event", ntf_data, fvp)
time.sleep(2)
print("Action: Moving FDB Vlan2:52-54-00-25-06-EB:Ethernet0 to non-existing bridge-port Ethernet12")
ntf = swsscommon.NotificationProducer(dvs.adb, "FDB_NOTIFICATIONS")
fvp = swsscommon.FieldValuePairs()
ntf_data = "[{\"fdb_entry\":\"{\\\"bvid\\\":\\\""+vlan_oid_2+"\\\",\\\"mac\\\":\\\"52:54:00:25:06:EB\\\",\\\"switch_id\\\":\\\""+switch_id+"\\\"}\",\"fdb_event\":\"SAI_FDB_EVENT_MOVE\",\"list\":[{\"id\":\"SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID\",\"value\":\""+bp_eth12+"\"}]}]"
ntf.send("fdb_event", ntf_data, fvp)
time.sleep(2)
print("CHECK: FDB Vlan2:52-54-00-25-06-EB is not Moved in STATE-DB")
# check that the FDB entry was not moved in STATE DB
mac2_found, extra = dvs.is_table_entry_exists(dvs.sdb, "FDB_TABLE",
"Vlan2:52:54:00:25:06:eb",
[("port", "Ethernet0"),
("type", "dynamic"),
]
)
assert mac2_found, str(extra)
print("CONFIRM: FDB Vlan2:52-54-00-25-06-EB is not Moved in STATE-DB")
#raw_input("Check at the end")
dvs.runcmd("sonic-clear fdb all")
time.sleep(10)
dvs.remove_vlan_member("2", "Ethernet0")
dvs.remove_vlan_member("2", "Ethernet4")
dvs.remove_vlan("2")
dvs.remove_vlan("4")
|
StarcoderdataPython
|
9722748
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSDFeatureExtractor for PNASNet features.
Based on PNASNet ImageNet model: https://arxiv.org/abs/1712.00559
"""
import tensorflow as tf
from meta_architectures import ssd_meta_arch
from models import feature_map_generators
from utils import context_manager
from utils import ops
from nets.nasnet import pnasnet
slim = tf.contrib.slim
def pnasnet_large_arg_scope_for_detection(is_batch_norm_training=False):
"""Defines the default arg scope for the PNASNet Large for object detection.
This provides a small edit to switch batch norm training on and off.
Args:
is_batch_norm_training: Boolean indicating whether to train with batch norm.
Default is False.
Returns:
An `arg_scope` to use for the PNASNet Large Model.
"""
imagenet_scope = pnasnet.pnasnet_large_arg_scope()
with slim.arg_scope(imagenet_scope):
with slim.arg_scope([slim.batch_norm],
is_training=is_batch_norm_training) as sc:
return sc
class SSDPNASNetFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""SSD Feature Extractor using PNASNet features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False):
"""PNASNet Feature Extractor for SSD Models.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
use_depthwise: Whether to use depthwise convolutions.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
"""
super(SSDPNASNetFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
feature_map_layout = {
'from_layer': ['Cell_7', 'Cell_11', '', '', '', ''],
'layer_depth': [-1, -1, 512, 256, 256, 128],
'use_explicit_padding': self._use_explicit_padding,
'use_depthwise': self._use_depthwise,
}
with slim.arg_scope(
pnasnet_large_arg_scope_for_detection(
is_batch_norm_training=self._is_training)):
with slim.arg_scope([slim.conv2d, slim.batch_norm, slim.separable_conv2d],
reuse=self._reuse_weights):
with (slim.arg_scope(self._conv_hyperparams_fn())
if self._override_base_feature_extractor_hyperparams else
context_manager.IdentityContextManager()):
_, image_features = pnasnet.build_pnasnet_large(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
num_classes=None,
is_training=self._is_training,
final_endpoint='Cell_11')
with tf.variable_scope('SSD_feature_maps', reuse=self._reuse_weights):
with slim.arg_scope(self._conv_hyperparams_fn()):
feature_maps = feature_map_generators.multi_resolution_feature_maps(
feature_map_layout=feature_map_layout,
depth_multiplier=self._depth_multiplier,
min_depth=self._min_depth,
insert_1x1_conv=True,
image_features=image_features)
return feature_maps.values()
def restore_from_classification_checkpoint_fn(self, feature_extractor_scope):
"""Returns a map of variables to load from a foreign checkpoint.
Note that this overrides the default implementation in
ssd_meta_arch.SSDFeatureExtractor which does not work for PNASNet
checkpoints.
Args:
feature_extractor_scope: A scope name for the first stage feature
extractor.
Returns:
A dict mapping variable names (to load from a checkpoint) to variables in
the model graph.
"""
variables_to_restore = {}
for variable in tf.global_variables():
if variable.op.name.startswith(feature_extractor_scope):
var_name = variable.op.name.replace(feature_extractor_scope + '/', '')
var_name += '/ExponentialMovingAverage'
variables_to_restore[var_name] = variable
return variables_to_restore
|
StarcoderdataPython
|
6479687
|
<gh_stars>0
#!/usr/bin/env python3
import argparse
parser = argparse.ArgumentParser(description='Druckfedern')
parser.add_argument('-d', type=float, help='Drahtdurchmesser')
parser.add_argument('-D', type=float, help='Áussendurchmesser')
parser.add_argument('-n', type=float, help='Windungen')
parser.add_argument('-L', type=float, help='enstpannte Länge')
args = parser.parse_args()
# Assume cold worked
n_t = args.n + 2
# Minimal Length: S_a + L_c = L_n
L_c = n_t * args.d
S_a = (0.0015 * (args.D ** 2 / args.d) + 0.1 * args.d) * args.n
print("minimale Länge: {}".format(L_c + S_a))
|
StarcoderdataPython
|
3580595
|
#!/usr/bin/env python3
import argparse
import os
from os import path
import pandas as pd
from cobra.io import read_sbml_model, write_sbml_model
from cobra.util import solvers
from corda import CORDA
from corda.util import reaction_confidence
from fastcore import Fastcore
from csm4cobra.io import read_json
from csm4cobra.manipulate import set_medium
MEM_METHODS = ['corda', 'fastcore']
SOLVERS = list(solvers.keys())
if 'cglpk' in SOLVERS:
SOLVERS.remove('cglpk')
SOLVERS.append('glpk')
def create_parser():
parser = argparse.ArgumentParser(description='Build a context specific genome-scale metabolic model.')
parser.add_argument('sbml_fname', action="store", help='SBML file to use a the model reference')
parser.add_argument('json_confidences', action="store", help='CSV file storing the gene confidences')
parser.add_argument('model_id', action="store", help='Model id for the builded CSM')
parser.add_argument('--gene-id', action="store", dest="gene_id", default="gene_symbol",
help='Gene identifier name used as column name')
parser.add_argument('--media', action="store", dest="json_exchanges", default=None,
help='JSON file storing the exchange bounds')
parser.add_argument('--out', action="store", dest="output_folder", default=".",
help='Output folder to store the builded CSM')
parser.add_argument('--method', action='store', dest='method', choices=MEM_METHODS, default='corda',
help='Method used to build the CSM: corda / fastcore')
parser.add_argument('--solver', action="store", dest="solver", choices=SOLVERS,
default='glpk', help='LP solver to perform optimizations')
parser.add_argument('--force-biomass', action='store_true', dest='force_biomass',
help='Biomass reactions are set to have the highest confidence')
parser.add_argument('--use-fbc', action='store', dest='use_fbc', default=True, type=bool,
help='Write SBML using FBC package')
parser.add_argument('--write-confidences', action='store_true', dest='write_confidences',
help='Store computed reaction confidences in CSV format')
curdir = path.dirname(path.realpath(__file__))
fastcore_params_fname = path.join(curdir, "config/fastcore.json")
parser.add_argument('--fastcore-params', action="store", dest="fastcore_params_fname",
default=fastcore_params_fname, help='JSON file with fastcore parameters')
return parser
def create_reactions_confidences(model, gene_confidences, column='confidence'):
# Creating a reactions confidence dict using gene confidendes (df_HPAC.CNS)
# and the corda function reaction_confidence
rxn_confidences = dict()
for r in model.reactions:
# Creating a confidence dict for each particular reaction
conf_genes = {g.id: gene_confidences[g.id] for g in r.genes}
rxn_confidences[r.id] = reaction_confidence(r.gene_reaction_rule, conf_genes)
return rxn_confidences
def create_reactions_penalties(reactions_confidence, confidence_penalty_map):
reactions_penalties = {}
for rxn, conf in reactions_confidence.items():
reactions_penalties[rxn] = confidence_penalty_map[conf]
return reactions_penalties
def create_confidences(model, rxns_confidence):
model_rxns = {r.id for r in model.reactions}
df = pd.DataFrame(columns=['confidence', 'in_model'], index=rxns_confidence.keys())
for r, conf in rxns_confidence.items():
df.confidence[r] = conf
df.in_model[r] = r in model_rxns
df.index.name = 'ID'
return df
def generate_model_id(args, sep="_"):
if args.force_biomass:
model_id = sep.join([args.model_id, args.method, args.solver, "biomass"])
else:
model_id = sep.join([args.model_id, args.method, args.solver])
return model_id
def generate_output_fname(args, file_type):
fname = generate_model_id(args)
fname = ".".join([fname, file_type])
fname = os.path.join(args.output_folder, fname)
return fname
def create_fastcore(model, reactions_confidences, fastcore_params, check_consistency=False):
core_rxn_cutoff = fastcore_params['core_rxn_cutoff']
core_reactions = [rxn for rxn, conf in reactions_confidences.items() if conf >= core_rxn_cutoff]
conf_penalty_map = {int(k): v for k, v in fastcore_params['conf_penalty_map'].items()}
penalties = create_reactions_penalties(reactions_confidences, confidence_penalty_map=conf_penalty_map)
for r in model.exchanges:
penalties[r.id] = 0
return Fastcore(model, core_reactions, penalties=penalties, check_consistency=check_consistency)
def main():
parser = create_parser()
args = parser.parse_args()
biomass_reaction = "biomass_reaction"
assert os.path.isfile(args.sbml_fname)
assert os.path.isfile(args.json_confidences)
assert os.path.isdir(args.output_folder)
# Reading Reference Genome-Scale Model
print("Reading SBML Model from %s:" % args.sbml_fname, end=" ")
model = read_sbml_model(args.sbml_fname)
print("OK!")
# Setting optimization solver
print("Setting optimization solver %s:" % args.solver, end=" ")
model.solver = args.solver
print("OK!")
# Reading DataFrame including gene confidence
print("Reading gene confidence from %s:" % args.json_confidences, end=" ")
gene_confidence = read_json(args.json_confidences)
# gene_confidence = pd.read_csv(args.csv_fname, delimiter='\t', index_col=args.gene_id)
# gene_confidence = gene_confidence[args.confidence_column].to_dict()
print("OK!")
print("Computing reactions confidences:", end=" ")
reaction_confidences = create_reactions_confidences(model, gene_confidence)
print("OK!")
# If True: biomass reactions are set to have the highest confidence
if args.force_biomass:
reaction_confidences[biomass_reaction] = 3
print("Adding biomass reactions %s to the core set: OK!" % biomass_reaction)
if args.json_exchanges:
print("Reading exchange fluxes bounds: %s:" % args.json_exchanges, end=" ")
media_dict = read_json(args.json_exchanges)
print("OK!")
print("Setting exchange fluxes bounds:", end=" ")
set_medium(model, media_dict, inplace=True)
print("OK!")
print("Building context-specific model using \"%s\":" % args.method)
if args.method == "corda":
corda = CORDA(model, reaction_confidences)
corda.build()
cs_model = corda.cobra_model()
elif args.method == "fastcore":
print("Reading fastcore parameters: %s:" % args.fastcore_params_fname, end=" ")
fastcore_params = read_json(args.fastcore_params_fname)
print("OK!")
fc_builder = create_fastcore(model, reaction_confidences, fastcore_params)
fc_builder.fast_core()
cs_model = fc_builder.build_context_specific_model()
print("OK!")
# Removing isolated metabolites
isolated_metabolites = [m for m in cs_model.metabolites if len(m.reactions) == 0]
cs_model.remove_metabolites(isolated_metabolites)
isolated_genes = [g for g in cs_model.genes if len(g.reactions) == 0]
for g in isolated_genes:
cs_model.genes.remove(g)
model_id = generate_model_id(args)
cs_model.id = model_id
cs_model.repair()
sbml_output = generate_output_fname(args, 'xml')
print("Writing context-specific model to %s:" % sbml_output, end=" ")
# write_sbml_model(cs_model, sbml_output, use_fbc_package=args.use_fbc)
write_sbml_model(cs_model, sbml_output)
print("OK!")
if args.write_confidences:
csv_output = generate_output_fname(args, 'tsv')
df_rxn_conf = create_confidences(cs_model, reaction_confidences)
print("Writing reaction confidences to %s:" % csv_output, end=" ")
df_rxn_conf.to_csv(csv_output, sep='\t')
print("OK!")
main()
|
StarcoderdataPython
|
132403
|
<reponame>LuckyMagpie/StatusMap
from django.contrib import admin
from .models import *
admin.site.register(StatusPoint)
admin.site.register(Indicator)
# Register your models here.
|
StarcoderdataPython
|
4927102
|
<gh_stars>1-10
# Enter your code here. Read input from STDIN. Print output to STDOUT
a = float(raw_input())
b = float(raw_input())
print(int(a//b))
print(a/b)
|
StarcoderdataPython
|
11328795
|
ies = []
ies.append({ "iei" : "2D", "value" : "Authentication response parameter", "type" : "Authentication response parameter", "reference" : "172.16.58.3", "presence" : "O", "format" : "TLV", "length" : "6-18"})
ies.append({ "iei" : "78", "value" : "EAP message", "type" : "EAP message", "reference" : "192.168.3.11", "presence" : "O", "format" : "TLV-E", "length" : "7-1503"})
msg_list[key]["ies"] = ies
|
StarcoderdataPython
|
1765082
|
#!/usr/bin/env python
# Copyright (c) 2016 Hewlett Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import monasca_analytics.banana.grammar.ast as ast
import monasca_analytics.banana.typeck.type_util as util
import monasca_analytics.exception.banana as exception
import monasca_analytics.util.string_util as strut
import six
class TypeTable(object):
"""
Type table. Support lookup for JsonLike object.
Json-like object have properties that needs to be
type-checked. The TypeTable allows to store
that information as well. All type values are
rooted by their variable name.
Every-time a variable type is erased, we create a new
snapshot of the variables types. This allow to have
variable where the type change as the statement are
being executed.
"""
def __init__(self):
self._variables_snapshots = [(0, {})]
self._variables = self._variables_snapshots[0][1]
def get_type(self, var, statement_index=None):
variables = self.get_variables(statement_index)
if isinstance(var, ast.Ident):
if var in variables:
return variables[var]
else:
raise exception.BananaUnknown(var)
# If we encounter a dot path:
if isinstance(var, ast.DotPath):
if var.varname in variables:
if len(var.properties) > 0:
return variables[var.varname][var.next_dot_path()]
else:
return variables[var.varname]
else:
raise exception.BananaUnknown(var.varname)
raise exception.BananaTypeCheckerBug("Unkown type for {}".format(var))
def set_type(self, var, _type, statement_index):
"""
Set the type for the given var to _type.
:type var: ast.Ident | ast.DotPath
:param var: The var to set a type.
:type _type: util.Object | util.Component | util.String | util.Number
:param _type: The type for the var.
:type statement_index: int
:param statement_index: The statement at which this assignment was
made.
"""
if _type is None:
raise exception.BananaTypeCheckerBug(
"'None' is not a valid banana type"
)
if isinstance(var, ast.Ident):
self._check_needs_for_snapshot(var, _type, statement_index)
self._variables[var] = _type
return
if isinstance(var, ast.DotPath):
if util.is_comp(_type) and len(var.properties) > 0:
raise exception.BananaAssignCompError(var.span)
if len(var.properties) == 0:
self._check_needs_for_snapshot(
var.varname,
_type,
statement_index
)
self._variables[var.varname] = _type
else:
if var.varname in self._variables:
var_type = self._variables[var.varname]
if isinstance(var_type, util.Object):
new_type = util.create_object_tree(
var.next_dot_path(), _type)
util.attach_to_root(var_type, new_type, var.span,
erase_existing=True)
elif isinstance(var_type, util.Component):
var_type[var.next_dot_path()] = _type
else:
raise exception.BananaTypeError(
expected_type=util.Object,
found_type=type(var)
)
# Var undeclared, declare its own type
else:
new_type = util.create_object_tree(var.next_dot_path(),
_type)
self._variables[var.varname] = new_type
return
raise exception.BananaTypeCheckerBug("Unreachable code reached.")
def get_variables(self, statement_index=None):
"""
Returns the list of variables with their associated type.
:type statement_index: int
:param: Statement index.
:rtype: dict[str, util.Object|util.Component|util.String|util.Number]
"""
if statement_index is None:
return self._variables
variables = {}
for created_at, snap in self._variables_snapshots:
if created_at < statement_index:
variables = snap
else:
break
return variables
def get_variables_snapshots(self):
return self._variables_snapshots
def _check_needs_for_snapshot(self, var, _type, statement_index):
if var in self._variables:
# If we shadow a component, we need to raise an error
if util.is_comp(self._variables[var]):
raise exception.BananaShadowingComponentError(
where=var.span,
comp=self._variables[var].class_name
)
# If we change the type of the variable, we create a new snapshot:
# This is very strict but will allow to know exactly how
# the type of a variable (or a property) changed.
if self._variables[var] != _type:
self._create_snapshot(statement_index)
def _create_snapshot(self, statement_index):
"""
Create a new snapshot of the variables.
:type statement_index: int
:param statement_index: index of the statement
(should be strictly positive)
"""
new_snapshot = copy.deepcopy(self._variables)
self._variables_snapshots.append((
statement_index, new_snapshot
))
self._variables = new_snapshot
def to_json(self):
"""
Convert this type table into a dictionary.
Useful to serialize the type table.
:rtype: dict
:return: Returns this type table as a dict.
"""
res = {}
for key, val in six.iteritems(self._variables):
res[key.inner_val()] = val.to_json()
return res
def __contains__(self, key):
"""
Test if the type table contains or not the provided
path. This function is more permissive than the other two.
It will never raise any exception (or should aim not to).
:type key: six.string_types | ast.Ident | ast.DothPath
:param key: The key to test.
:return: Returns True if the TypeTable contains a type for the
given path or identifier.
"""
if isinstance(key, six.string_types):
return key in self._variables
if isinstance(key, ast.Ident):
return key.val in self._variables
if isinstance(key, ast.DotPath):
res = key.varname in self._variables
if not res:
return False
val = self._variables[key.varname]
for prop in key.properties:
if isinstance(val, util.Object):
if prop in val.props:
val = val[prop]
else:
return False
return True
return False
def __str__(self):
return strut.dict_to_str(self._variables)
|
StarcoderdataPython
|
118557
|
<reponame>dwward/kb-syllabus
'''
Utility to assist in converting CSV format of Kaiwan Syllabus to
a technique list. Expects CSV format in a particular format that
requires the Excel spreadsheet to be altered according to
example below.
Example
-------------
THROWING TECHNIQUES (NAGE WAZA),,,,,,,,,,,,,,,,
,STANDING TECHNIQUES (TACHI WAZA),,,,,,,,,,,,,,,
,,HIP TECHNIQUES (KOSHI WAZA) ,,,,,,,,,,,,,,
"Kubi Nage (Neck Throw) [regular, aiki]",66,70,x,,x,,x,,x,,x,,x,,x,
O Goshi (Major Hip Throw),,65,x,,x,,x,,x,,x,,x,,x,
Given the lines of CSV above, the first three lines are interpreted
as category and subcategories based on position in column 1, 2 or 3.
The last two lines are interepted as techniques and use various columns
to populate technique information as shown below:
Technique name, Kirby '83 pages, Kirby '99 pages, Kodokan pages, Yellow technique,,Green technique,,Yellow-Green technique,,Purple technique,,Brown3 technique,,Brown2 technique,,Brown3 technique,
'''
from csv import reader
import json
def isCategory(row):
if not row[15]:
return True
return False
def beltTest(row):
if row[3]:
return "Yellow"
if row[5]:
return "Green"
if row[7]:
return "Green Yellow"
if row[9]:
return "Purple"
if row[11]:
return "Brown 3"
if row[13]:
return "Brown 2"
if row[15]:
return "Brown 1"
raise ("Can't find belt test")
def getKirby83Pages(row):
col = row[1]
if not col:
return ''
toks = col.split('[')
# 1 or 1[2] or [2]
if len(toks) == 1:
return toks[0].strip()
else:
if toks[0]:
return toks[0].strip()
return ''
def getKirby99Pages(row):
col = row[1]
if not col:
return ''
toks = col.split('[')
# 1 or 1[2] or [2]
if len(toks) == 2:
return (toks[1])[:-1].strip()
return ''
def getKodokan97Pages(row):
if row[2]:
return row[2]
return ''
with open('Jujitsu-Syllabus-201701.csv', 'r') as read_obj:
csv_reader = reader(read_obj)
category1=''
category2=''
category3=''
techniques = []
books = []
id=1
currRow=1
for row in csv_reader:
# print("Reading line: ", currRow)
# currRow+=1
#Retrieve category
if isCategory(row):
if row[0]:
category1 = row[0]
category2 = ''
category3 = ''
if row[1]:
category2 = row[1]
if row[2]:
category3 = row[2]
else:
if not isCategory(row):
#Build technique JSON
t = {}
#id used by reactjs
t['id'] = id
id += 1
# technique name
t['name'] = row[0].strip()
# categories
categories = []
if category1:
categories.append(category1.strip().title())
if category2:
categories.append(category2.strip().title())
if category3:
categories.append(category3.strip().title())
t['categories'] = categories
# belt tested
t['belt'] = beltTest(row)
# placeholder for video
t['video'] = ''
# placeholder for description
t['description'] = ''
# book pages
t['pages_kirby83'] = getKirby83Pages(row)
t['pages_kirby99'] = getKirby99Pages(row)
t['pages_kodokan97'] = getKodokan97Pages(row)
# add technique to list
techniques.append(t)
print(json.dumps(techniques, indent=4))
|
StarcoderdataPython
|
31007
|
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras.applications.xception import Xception
import h5py
import json
import cv2
import math
import logging
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.xception import preprocess_input, decode_predictions
logging.basicConfig(level = logging.INFO)
sampling_rate = 5
sampled_frames = frame_stamps = []
top1_labels = top1_scores = []
def sampling_time_stamps(_sample_path):
cap = cv2.VideoCapture(_sample_path)
total_frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
logging.info('Total no. of frames in video:', total_frame_count)
for i in range(sampling_rate):
val = round(total_frame_count/sampling_rate)*(i+1)
frame_stamps.append(val)
def sampling_frames():
frameId , frame_count = 5, 0
success,frame = cap.read()
while success:
frame_count+=1
if frame_count in frame_stamps and frameId >= 1:
frame = cv2.resize(frame, (299,299))
sampled_frames.append(frame)
success,frame = cap.read()
frameId-=1
else:
success,frame = cap.read()
pass
def generate_and_average_predictions():
base_model = keras.applications.Xception(
weights='imagenet') # Load weights pre-trained on ImageNet.
for i in range(len(sampled_frames)):
img = sampled_frames[i]
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = base_model.predict(x)
print('Prediction level:', (i+1), decode_predictions(preds, top=5)[0])
top1_labels.append(decode_predictions(preds, top=1)[0][0][1])
top1_scores.append(decode_predictions(preds, top=1)[0][0][2])
return top1_labels, top1_scores
def run():
sampling_time_stamps(_sample_path)
sampling_frames()
labels, scores = generate_and_average_predictions()
return labels, scores
|
StarcoderdataPython
|
9658489
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
##===-----------------------------------------------------------------------------*- Python -*-===##
## _
## | |
## __| | __ ___ ___ ___
## / _` |/ _` \ \ /\ / / '_ |
## | (_| | (_| |\ V V /| | | |
## \__,_|\__,_| \_/\_/ |_| |_| - Compiler Toolchain
##
##
## This file is distributed under the MIT License (MIT).
## See LICENSE.txt for details.
##
##===------------------------------------------------------------------------------------------===##
if __name__ != '__main__':
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
__path__ = __import__('pkgutil').extend_path(__path__, __name__)
|
StarcoderdataPython
|
6610743
|
"""Initialize CommissioningIssues into database."""
import re
import os
import copy
import github3
import logging # noqa
import numpy as np
import pandas as pd
from astropy.time import Time
from dateutil import parser as dateparser
from datetime import datetime, timedelta
from django.core.management.base import BaseCommand, CommandError
from django.utils import timezone
from heranow import settings
from dashboard.models import CommissioningIssue
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""Command to add issues to DB."""
help = "Read data from redis databse and update local django database."
def handle(self, *args, **options):
"""Access github API and Initialize DB with all daily issues."""
key = settings.GITHUB_APP_KEY
app_id = settings.GITHUB_APP_ID
gh = github3.github.GitHub()
gh.login_as_app(key.encode(), app_id)
ap = gh.authenticated_app()
inst = gh.app_installation_for_repository("HERA-Team", "HERA_Commissioning")
gh.login_as_app_installation(key.encode(), ap.id, inst.id)
repo = gh.repository("HERA-Team", "HERA_Commissioning")
issues = repo.issues(labels="Daily", state="all")
local_issue_regex = r"[^a-zA-Z0-9]#(\d+)"
# the foreign issue reference may be useful in the future
# foreign_issue_regex = r"[a-zA-Z0-9]#(\d+)"
jd_list = []
issue_list = []
for cnt, issue in enumerate(issues):
try:
jd = int(issue.title.split(" ")[-1])
except ValueError:
match = re.search(r"\d{7}", issue.title)
if match is not None:
jd = int(match.group())
else:
continue
obs_date = Time(jd, format="jd")
try:
obs_date = timezone.make_aware(obs_date.datetime)
except ValueError:
# theres's a weirdly names issue that breaks this
continue
jd_list.insert(0, jd)
obs_end = obs_date + timedelta(days=1)
num_opened = len(
list(repo.issues(state="all", sort="created", since=obs_date))
) - len(list(repo.issues(state="all", sort="created", since=obs_end)))
other_labels = [lab.name for lab in issue.labels() if lab.name != "Daily"]
iss_nums = map(int, re.findall(local_issue_regex, issue.body))
related_issues = set()
related_issues.update(iss_nums)
for comm in issue.comments():
nums = map(int, re.findall(local_issue_regex, issue.body))
related_issues.update(nums)
related_issues = sorted(related_issues)
iss = CommissioningIssue(
julian_date=jd,
number=issue.number,
related_issues=related_issues,
labels=other_labels,
new_issues=num_opened,
)
issue_list.append(iss)
CommissioningIssue.objects.bulk_create(issue_list, ignore_conflicts=True)
jd_list = np.sort(jd_list).astype(int)
full_jd_range = np.arange(jd_list.min(), int(np.floor(Time.now().jd)) + 1)
new_issues = []
for jd in np.setdiff1d(full_jd_range, jd_list):
iss = CommissioningIssue(julian_date=jd,)
new_issues.append(iss)
CommissioningIssue.objects.bulk_create(new_issues, ignore_conflicts=True)
|
StarcoderdataPython
|
1969462
|
from .queue import Queue
class AnimalShelter:
""" AnimalShelter class"""
def __init__(self):
self.pseudo_queue = Queue()
self._length = 0
def enqueue(self, obj):
""" add either a dog or cat object """
self.pseudo_queue.enqueue(obj)
self._length += 1
def dequeue(self, pref):
""" return either the longest-waiting cat or dog"""
pref = pref.lower()
pref = '{}{}'.format(pref[0].upper, pref[1:])
if type(self.pseudo_queue.dequeue()) == pref:
return self.pseudo_queue.dequeue()
current = self.pseudo_queue.dequeue()
first_obj_name = current.name
while type(current) != pref:
self.pseudo_queue.enqueue(current)
current = self.pseudo_queue.dequeue()
if self.pseudo_queue.dequeue().name == first_obj_name:
return
while True:
if self.pseudo_queue.dequeue().name != first_obj_name:
self.pseudo_queue.enqueue(self.pseudo_queue.dequeue())
break
return current
|
StarcoderdataPython
|
11211014
|
# Copyright (c) 2015 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
"""
Authentication and session renewal handling.
This module handles asking the user for their password, login etc.
It will try to use a QT UI to prompt the user if possible, but may
fall back on a console (stdin/stdout) based workflow if QT isn't available.
--------------------------------------------------------------------------------
NOTE! This module is part of the authentication library internals and should
not be called directly. Interfaces and implementation of this module may change
at any point.
--------------------------------------------------------------------------------
"""
# Using "with" with the lock to make sure it is always released.
from __future__ import with_statement
from .errors import AuthenticationCancelled
from .console_authentication import ConsoleLoginHandler, ConsoleRenewSessionHandler
from .ui_authentication import UiAuthenticationHandler
from .. import LogManager
import threading
import sys
import os
logger = LogManager.get_logger(__name__)
###############################################################################################
# internal classes and methods
def _get_current_os_user():
"""
Gets the current operating system username.
:returns: The username string.
"""
if sys.platform == "win32":
# http://stackoverflow.com/questions/117014/how-to-retrieve-name-of-current-windows-user-ad-or-local-using-python
return os.environ.get("USERNAME", None)
else:
try:
import pwd
pwd_entry = pwd.getpwuid(os.geteuid())
return pwd_entry[0]
except:
return None
def _get_qt_state():
"""
Returns the state of Qt: the libraries available and if we have a ui or not.
:returns: If Qt is available, a tuple of (QtCore, QtGui, has_ui_boolean_flag).
Otherwise, (None, None, False)
"""
qt_core = None
qt_gui = None
qapp_instance_active = False
try:
from .ui.qt_abstraction import QtGui, QtCore
qt_core = QtCore
qt_gui = QtGui
qapp_instance_active = (QtGui.QApplication.instance() is not None)
except:
pass
return (qt_core, qt_gui, qapp_instance_active)
class SessionRenewal(object):
"""
Handles multi-threaded session renewal. This class handles the use case when
multiple threads simultaneously try to ask the user for a password.
Use this class by calling the static method renew_session(). Please see this method
for more details.
"""
# Lock the assures only one thread at a time can execute the authentication logic.
_renew_session_internal_lock = threading.Lock()
# List of possible states for session renewal.
WAITING, CANCELLED, SUCCESS = range(3)
# When a thread cancels session renewal, this flag is set so other threads know
# to raise an exception as well.
_auth_state = WAITING
# Makes access to the thread count and executing logic based on it thread
# safe.
_renew_session_lock = threading.Lock()
# Number of threads who are trying to renew the session.
_renew_session_thread_count = 0
@staticmethod
def _renew_session_internal(user, credentials_handler):
"""
Prompts the user for the password. This method should never be called directly
and _renew_session should be called instead.
:param user: SessionUserImpl instance of the user that needs its session
renewed.
:param credentials_handler: Object that actually prompts the user for
credentials.
:raises AuthenticationCancelled: Raised if the authentication is cancelled.
"""
logger.debug("About to take the authentication lock.")
with SessionRenewal._renew_session_internal_lock:
logger.debug("Took the authentication lock.")
# When authentication is cancelled, every thread who enter the authentication
# critical section should throw as well.
if SessionRenewal._auth_state == SessionRenewal.CANCELLED:
raise AuthenticationCancelled()
# If authentication was successful, simply return.
elif SessionRenewal._auth_state == SessionRenewal.SUCCESS:
return
# We're the first thread, so authenticate.
try:
logger.debug("Not authenticated, requesting user input.")
hostname, login, session_token = credentials_handler.authenticate(
user.get_host(),
user.get_login(),
user.get_http_proxy()
)
SessionRenewal._auth_state = SessionRenewal.SUCCESS
logger.debug("Login successful!")
user.set_session_token(session_token)
except AuthenticationCancelled:
SessionRenewal._auth_state = SessionRenewal.CANCELLED
logger.debug("Authentication cancelled")
raise
@staticmethod
def renew_session(user, credentials_handler):
"""
Prompts the user for the password. This method is thread-safe, meaning if
multiple users call this method at the same time, it will keep track of
how many threads are currently running inside it and all threads waiting
for the authentication to complete will return with the same result
as the thread that actually did the authentication, either returning or
raising an exception.
:param user: SessionUser we are re-authenticating.
:param credentials_handler: Object that actually prompts the user for
credentials.
:raises AuthenticationCancelled: If the user cancels the authentication,
this exception is raised.
"""
# One more renewer.
with SessionRenewal._renew_session_lock:
SessionRenewal._renew_session_thread_count += 1
try:
# Renew the session
SessionRenewal._renew_session_internal(user, credentials_handler)
finally:
# We're leaving the method somehow, cleanup!
with SessionRenewal._renew_session_lock:
# Decrement the thread count
SessionRenewal._renew_session_thread_count -= 1
# If we're the last one, clear the cancel flag.
if SessionRenewal._renew_session_thread_count == 0:
SessionRenewal._auth_state = SessionRenewal.WAITING
# At this point, if the method _renew_session_internal simply
# returned, this method returns. If the method raised an exception,
# it will keep being propagated.
###############################################################################################
# public methods
def renew_session(user):
"""
Prompts the user to enter this password on the console or in a ui to
retrieve a new session token.
:param user: SessionUser that needs its session token refreshed.
:raises AuthenticationCancelled: If the user cancels the authentication,
this exception is raised.
"""
logger.debug("Credentials were out of date, renewing them.")
QtCore, QtGui, has_ui = _get_qt_state()
# If we have a gui, we need gui based authentication
if has_ui:
authenticator = UiAuthenticationHandler(is_session_renewal=True)
else:
authenticator = ConsoleRenewSessionHandler()
SessionRenewal.renew_session(user, authenticator)
def authenticate(default_host, default_login, http_proxy, fixed_host):
"""
Prompts the user for his user name and password. If the host is not fixed,
it is also possible to edit the host. If Qt is available and an QApplication
instantiated, a dialog will prompt for user input. If not, the console will
prompt instead.
:param default_host: Default host to present to the user.
:param default_login: Default login to present to the user.
:param http_proxy: Proxy to use to connect to the host.
:param fixed_host: If True, the host won't be editable.
:returns: The (hostname, login, session token) tuple for this authenticated
user.
:raises AuthenticationCancelled: If the user cancels the authentication,
this exception is raised.
"""
# If there is no default login, let's provide the os user's instead.
default_login = default_login or _get_current_os_user()
QtCore, QtGui, has_ui = _get_qt_state()
# If we have a gui, we need gui based authentication
if has_ui:
# If we are renewing for a background thread, use the invoker
authenticator = UiAuthenticationHandler(is_session_renewal=False, fixed_host=fixed_host)
else:
authenticator = ConsoleLoginHandler(fixed_host=fixed_host)
return authenticator.authenticate(default_host, default_login, http_proxy)
|
StarcoderdataPython
|
3442124
|
<reponame>trickeydan/mqtt-automate
"""MQTT Automate API."""
import argparse
import asyncio
import logging
from pathlib import Path
from typing import Callable, Dict, Match, Optional
from .engine import AutomationEngine, OnMessageHandler
from .mqtt import Topic
loop = asyncio.get_event_loop()
LOGGER = logging.getLogger(__name__)
class MQTTAutomate:
"""MQTT Automation."""
def __init__(self) -> None:
self._handlers: Dict[Topic, OnMessageHandler] = {}
def app(self, verbose: bool, config_file: Optional[str]) -> None:
"""Main function for MQTTAutomate."""
mqtt = AutomationEngine(verbose, config_file, self._handlers)
loop.run_until_complete(mqtt.run())
def run(self) -> None:
"""Start the automation engine."""
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("-c", "--config-file", type=Path, default=None)
args = parser.parse_args()
self.app(args.verbose, args.config_file)
def on_message(self, topic: str) -> Callable[[OnMessageHandler], OnMessageHandler]:
"""Register a topic to react to."""
def decorator(func: OnMessageHandler) -> OnMessageHandler:
async def wrapper(
engine: AutomationEngine,
match: Match[str],
payload: str,
) -> None:
LOGGER.info(f"INVOKE {topic} -> {func.__name__}")
await func(engine, match, payload)
# Register handler
self._handlers[Topic.parse(topic)] = wrapper
return wrapper
return decorator
|
StarcoderdataPython
|
6681969
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import utool as ut
import re
from itertools import combinations
#import sys
print, rrr, profile = ut.inject2(__name__, '[rules]')
# TODO: remove static class. just use the module
#INF = sys.maxint
#HAS_NUMPY = False
#if HAS_NUMPY:
#INF = np.inf
INF = 1000 # pretty much infinite
def english_number(numtext):
try:
return int(numtext)
except Exception:
lookup = {'one': 1, 'two': 2, 'three': 3, 'four': 4} # , '∞': INF}
return lookup[numtext]
ETB = ' enters the battlefield '
COLORLESS_SYMS = 'C'
COLOR_SYMS = 'WUBRG'
ALL_MANA_SYM = COLOR_SYMS + COLORLESS_SYMS
COLORLESS_MANASYM = ut.named_field('colorless_manasym', '{[0-9]+}')
MANASYM = ut.named_field('manasym', '({[' + ALL_MANA_SYM + ']})+')
def _fill(name=None):
return ut.named_field(name, '.*?')
def is_ability_block(block):
return ':' in block or (len(block) == 1 and (block in ALL_MANA_SYM))
def mana_generated(block, card, new=False, debug=False):
r"""
Parse the string representation of mana generated
CommandLine:
python -m mtgmonte.mtgrules --exec-mana_generated
python -m mtgmonte.mtgrules --exec-mana_generated --cards "Reflecting Pool" --debug-mana
Example:
>>> # ENABLE_DOCTEST
>>> from mtgmonte.mtgrules import * # NOQA
>>> from mtgmonte import mtgobjs
>>> testmana_cards_ = [
>>> 'Flooded Strand',
>>> 'Tundra',
>>> 'Island',
>>> 'Shivan Reef',
>>> 'Ancient Tomb',
>>> 'Black Lotus',
>>> 'Mox Lotus',
>>> 'Mox Ruby',
>>> 'Mox Diamond', 'Chrome Mox',
>>> 'Elvish Mystic', 'Lion\'s Eye Diamond',
>>> 'Grim Monolith', 'Tolarian Academy',
>>> 'City of Brass', 'Mana Confluence',
>>> 'Lake of the Dead', 'Snow-Covered Island',
>>> 'Reflecting Pool',
>>> 'Mirrorpool', 'Wastes',
>>> # 'Dark Ritual', (dark rit does not have ability blocks)
>>> ]
>>> testmana_cards = ut.get_argval('--cards', type_=list, default=testmana_cards_)
>>> DEBUG = ut.get_argflag('--debug-mana')
>>> print('testmana_cards = %r' % (testmana_cards,))
>>> cards = mtgobjs.load_cards(testmana_cards)
>>> for card in cards:
>>> print('\n-----')
>>> print(card)
>>> if DEBUG:
>>> card.printinfo()
>>> for block in card.ability_blocks:
>>> #print('block = %r' % (block,))
>>> print(mana_generated(block, card, debug=DEBUG))
Ignore:
>>> card = cards[1]
>>> card = cards[-1]
>>> block = card.ability_blocks[0]
>>> result = mana_generated(block, card)
>>> print(result)
"""
if debug:
print('block = %s' % (block,))
#esc = re.escape
if block in MANASYM:
mana_generated = ['{' + block + '}']
else:
# Most mana generaters look like this
managen_line1 = 'Add ' + _fill('managen') + ' to your mana pool ?' + _fill('modifier') + '$'
# Some (like reflecting pool look like this)
managen_line2 = 'Add to your mana pool ' + _fill('managen') + ' mana ?' + _fill('modifier') + '$'
managen_line_regexes = [managen_line1, managen_line2]
#,
#print('block = %r' % (block,))
#esc('(') + managen_line + esc(')')]
any_matched = False
mana_generated = []
for count, managen_line in enumerate(managen_line_regexes):
match = re.search(managen_line, block)
if match is None:
if debug:
print('Did not match managen_line %d' % (count + 1))
elif match is not None:
any_matched = True
manatext = match.groupdict()['managen']
modifier = match.groupdict()['modifier']
if debug:
print(' * Matched managen_line=%r (%d)' % (managen_line, count + 1))
print(' * modifier = %r' % (modifier,))
print(' * manatext = %r' % (manatext,))
for x in re.finditer(MANASYM, manatext):
z = x.groupdict()
mana_generated += [z['manasym']]
for x in re.finditer(_fill('num') + ' mana of any one color', manatext):
numtxt = x.groupdict()['num']
num = english_number(numtxt)
mana_generated += ['{' + (c * num) + '}' for c in COLOR_SYMS]
for x in re.finditer(_fill('num') + ' mana of any color', manatext):
numtxt = x.groupdict()['num']
num = english_number(numtxt)
mana_generated += ['{' + ''.join(comb) + '}' for comb in combinations(COLOR_SYMS, num)]
for x in re.finditer(_fill('num') + ' mana of any of the ' +
_fill('refcard') + ' colors', manatext):
print('Refer card hack')
num = english_number(x.groupdict()['num'])
refcard = x.groupdict()['refcard']
# chrome mox hack
if refcard == 'exiled card\'s':
# TODO: Refers to part of the game state
# Assume any color for now
mana_generated += ['{' + ('*' * num) + '}'] # for c in COLOR_SYMS]
#print('num = %r' % (num,))
#print('refcard = %r' % (refcard,))
#mana_generated += ['{' + (c * num) + '}' for c in COLOR_SYMS]
if manatext == 'one':
num = 1
# Deal with colorless mana
for x in re.finditer(COLORLESS_MANASYM, manatext):
num = english_number(x.groupdict()['colorless_manasym'].strip('{}'))
mana_generated += ['{' + ('C' * num) + '}']
# Mox lotus hack
if manatext.strip('{}') == '∞':
num = INF
mana_generated += [{'C': '∞'}]
# Tolarian acadamy hack
if modifier == 'for each artifact you control.':
#print('modifier = %r' % (modifier,))
# TODO: Refers to part of the game state
mana_generated = [{c.strip('{}'): '*'} for c in mana_generated]
print('Refer card hack')
# Reflecting pool hack
if modifier == 'of any type that a land you control could produce.':
mana_generated += ['{' + ('*' * num) + '}'] # for c in COLOR_SYMS]
pass
if not any_matched and len(mana_generated) == 0:
#mana_generated = []
mana_generated = None
if mana_generated is not None:
from mtgmonte import mtgobjs
# sources = None
sources = [card]
options = [mtgobjs.ManaSet(manas, sources) for manas in mana_generated]
mana_generated = mtgobjs.ManaOption(options)
return mana_generated
class RuleHeuristics(object):
"""
Defines simple heuristics to determine
revant info about a block of rules text.
cls = RuleHeuristics
"""
ETB = ' enters the battlefield '
COLOR_SYMS = 'WUBRG'
COLORLESS_SYMS = 'C'
MANASYM = ut.named_field('manasym', '{[' + COLOR_SYMS + COLORLESS_SYMS + ']}')
@classmethod
def _iter_blocks(cls, card):
rule_blocks = card.rules_text.split(';')
for block in rule_blocks:
block = block.strip(' ')
yield block
@classmethod
def _fill(cls, name=None):
return _fill(name)
@classmethod
def mana_generated(cls, block, card):
return mana_generated(block, card)
@classmethod
def is_triland(cls, block, card):
mana = mana_generated(block, card)
return mana is not None and len(mana) == 3
@classmethod
def is_fetchland(cls, block, card):
return is_fetchland(block, card)
@classmethod
def get_fetched_lands(cls, block, card):
return get_fetched_lands(block, card)
@classmethod
def is_tapland(cls, block, card):
return block == (card.name + cls.ETB + 'tapped.')
@classmethod
def is_tangoland(cls, block, card):
return block == (
card.name + cls.ETB +
'tapped unless you control two or more basic lands.')
@classmethod
def is_painland(cls, block, card):
pain_regex = (
'{T}: Add ' + cls._fill() + ' to your mana pool. ' + card.name +
' deals 1 damage to you.'
)
match = re.search(pain_regex, block)
return match is not None
@classmethod
def is_mana_ability(cls, effect):
pain_regex = (
'Add ' + cls._fill() + ' to your mana pool'
)
match = re.search(pain_regex, effect)
return match is not None
def get_fetch_search_targets(effect, card, deck=None):
from mtgmonte import mtgobjs
valid_types = RuleHeuristics.get_fetched_lands(effect, card)
targets = []
for type_ in valid_types:
if deck is None:
# Infer normal sort of thing out of deck context
card = mtgobjs.lookup_card(type_)
targets.add(card)
else:
for card in deck.library:
alltypes = card.subtypes + card.types
alltypes = [x.lower() for x in alltypes]
if ut.is_subset(type_, alltypes):
targets += [card]
return targets
def get_fetched_lands(block, card):
fetch_regex = (
'Search your library for an? ' +
_fill('landtypes') +
' card and put it onto the battlefield' +
ut.named_field('istapped', ' tapped') +
'?')
match = re.search(fetch_regex, block)
valid_types = None
if match is not None:
groupdict = match.groupdict()
landtypes = groupdict['landtypes'].split(' or ')
valid_types = [
[x.lower() for x in type_.split(' ')]
for type_ in landtypes
]
#landtypes = groupdict['landtypes'].split(' or ')
#if groupdict['istapped']:
# landtypes = ['tap-' + type_ for type_ in landtypes]
return valid_types
def is_fetchland(block, card):
return get_fetched_lands(block, card) is not None
if __name__ == '__main__':
r"""
CommandLine:
python -m mtgmonte.mtgrules
python -m mtgmonte.mtgrules --allexamples
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
|
StarcoderdataPython
|
48663
|
from django import forms
class UserForm(forms.Form):
name = forms.CharField(max_length=30)
email = forms.CharField(max_length=30, widget = forms.EmailInput)
password = forms.CharField(widget = forms.PasswordInput)
class Meta:
fields = ['name', 'email', 'password']
# class HandlerForm(forms.ModelForm):
# class Meta:
# model = Handlers
# fields = ['handle', 'handler_name']
|
StarcoderdataPython
|
11258245
|
import gzip
import codecs
import json
import pickle
from typing import Any, Iterator, Callable, Iterable
import xmltodict
def load_xml_gz(filename: str, func: Callable, depth: int) -> Any:
with gzip.open(filename) as f:
return xmltodict.parse(f, item_depth=depth, item_callback=func)
def load_xml(filename: str, func: Callable, depth: int) -> Any:
with open(filename, "rb") as f:
return xmltodict.parse(f, item_depth=depth, item_callback=func)
def save_pickle_gz(data: Any, filename: str) -> None:
with gzip.GzipFile(filename, "wb") as outfile:
pickle.dump(data, outfile)
def iteratate_jsonl_gz(filename: str) -> Iterator[Any]:
reader = codecs.getreader("utf-8")
with gzip.open(filename) as f:
for line in reader(f):
yield json.loads(line)
def save_jsonl_gz(filename: str, data: Iterable[Any]) -> None:
with gzip.GzipFile(filename, "wb") as out_file:
writer = codecs.getwriter("utf-8")
for element in data:
writer(out_file).write(json.dumps(element))
writer(out_file).write("\n")
def load_gz_per_line(filename: str) -> Iterator[str]:
reader = codecs.getreader("utf-8")
with gzip.open(filename) as f:
yield from reader(f)
|
StarcoderdataPython
|
1856230
|
<reponame>WatsonWangZh/CodingPractice
# Calculate the sum of two integers a and b, but you are not allowed to use the operator + and -.
# Example 1:
# Input: a = 1, b = 2
# Output: 3
# Example 2:
# Input: a = -2, b = 3
# Output: 1
class Solution(object):
def getSum(self, a, b):
"""
:type a: int
:type b: int
:rtype: int
"""
# 位运算 O(1)
# 用<异或>来算不带进位的和(异或:0⊕0=0,1⊕0=1,0⊕1=1,1⊕1=0)
# 用<与>并<左移>一位来算进位
# 递归计算无进位和 与 进位 的和,直到无需进位
# set MASK and MAX_INT since python will dynamically change the precision of a integer
MASK = 0xFFFFFFFF
MAX_INT = 0x7FFFFFFF
while b!= 0:
carry = (a & b) & MASK
a = (a ^ b) & MASK
b = carry << 1
# 修正负数结果
return a if a < MAX_INT else ~(a ^ MASK)
|
StarcoderdataPython
|
1887968
|
<filename>src/service/tasks/__init__.py
# encoding: utf-8
from .tasks import *
|
StarcoderdataPython
|
5078633
|
# Copyright 2019 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
[概要]
セッションファイル削除コマンド
削除対象の判定となるファイルのタイムスタンプは、
サーバー設定のタイムゾーンに関わらず、UTCで判定する
[引数]
[戻り値]
"""
import os
import pytz
import time
import glob
import datetime
import traceback
from django.core.management.base import BaseCommand
from django.conf import settings
class Command(BaseCommand):
help = 'セッションファイル削除コマンド'
def add_arguments(self, parser):
parser.add_argument('-e', '--expire', action='store', default=14, type=int, dest='expire', help='削除対象とするセッションファイル最終更新日時からの経過日数')
def handle(self, *args, **options):
try:
self.now = datetime.datetime.now(pytz.timezone('UTC'))
# パラメーター取得
expire_day = options['expire']
if expire_day < 0:
expire_day = 0
# 削除対象日時の設定
self.delete_dt = self.now - datetime.timedelta(days=expire_day)
# セッションエンジンの確認
str_session_engine = getattr(settings, 'SESSION_ENGINE', 'None')
if not str_session_engine.endswith('.file'):
print('セッションエンジンがファイルではありません engine=%s' % (str_session_engine))
return
# セッションファイルパスの確認
session_dir = getattr(settings, 'SESSION_FILE_PATH', None)
if not session_dir:
print('セッションファイルパスが設定されていません path=%s' % (session_dir))
return
if session_dir.endswith('/') == False:
session_dir = '%s/' % (session_dir)
# 削除前の確認
msg = '最終更新日時から%s日経過しているセッションファイルを削除します。\nよろしいですか? (y/n)\n' % (expire_day)
if expire_day <= 0:
msg = '全てのセッションファイルを削除します。\nログイン中のユーザーは切断されます。\nよろしいですか? (y/n)\n'
while True:
choice = input(msg).lower()
if choice in ['y', 'yes']:
break
elif choice in ['n', 'no']:
return
# セッションファイル取得
session_files = glob.glob('%s*' % (session_dir))
for sf in session_files:
if not os.path.isfile(sf):
continue
# ファイルの更新日時を取得
updated_epoch = os.path.getmtime(sf)
updated_dt = datetime.datetime.fromtimestamp(updated_epoch, datetime.timezone.utc)
# 指定の日数経過しているファイルは削除
if updated_dt < self.delete_dt:
os.remove(sf)
print('削除しました file=%s' % (os.path.split(sf)[1]))
except Exception as e:
print(e)
|
StarcoderdataPython
|
3521757
|
from .resnet101_baseline import get_resnet101_baseline
from .resnet101_base_oc import get_resnet101_base_oc_dsn
from .resnet101_pyramid_oc import get_resnet101_pyramid_oc_dsn
from .resnet101_asp_oc import get_resnet101_asp_oc_dsn
from .resnet101_aa_dsn import get_resnet101_aa_dsn
networks = {
'resnet101_baseline': get_resnet101_baseline,
'resnet101_base_oc_dsn': get_resnet101_base_oc_dsn,
'resnet101_pyramid_oc_dsn': get_resnet101_pyramid_oc_dsn,
'resnet101_asp_oc_dsn': get_resnet101_asp_oc_dsn,
'resnet101_aa_dsn': get_resnet101_aa_dsn,
}
def get_segmentation_model(name, **kwargs):
return networks[name.lower()](**kwargs)
|
StarcoderdataPython
|
5103354
|
<reponame>Ace-Ma/LSOracle
import argparse
import sys
import glob
import math
import numpy as np
import os
import shutil
import subprocess
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import re
import time
import timeit
from datetime import datetime
import logging
#Set up command line parser
parser = argparse.ArgumentParser(prog='lstools_test', description='Test script for LSOracle')
parser.add_argument('--log_to_file', action='store_true', help='print log information to specified filename in LSOracle directory')
parser.add_argument('--verbose', '-v', action='count', help='output detail. Default (unset) = warning; -v = info, -vv = debug')
parser.add_argument('--test_directory', '-t', help='If you have a custom set of test files, specify path here. Default LSOracle/tests. The directory you specify should have 2 subdirectories: end_to_end and unit_tests, and the input files should be .aig format')
parser.add_argument('--training_model', '-m', default='/LSOracle/deep_learn_model.json', help='if you have a custom tensorflow model for the classifier, specify it here.')
parser.add_argument('--travis', action='store_true', help='sets paths, envs, etc, to appropriate values for travis ci')
args = parser.parse_args()
#saving paths for future use
home_path = os.getenv('HOME')
if args.travis:
lstools_path = home_path + '/build/LNIS-Projects/LSOracle/build/core'
training_file = lstools_path + '/../../deep_learn_model.json'
else:
lstools_path = home_path + '/LSOracle/build/core'
training_file = home_path + args.training_model
#configure logging
timestamp = datetime.now()
timestamp_format = timestamp.strftime('%Y%m%d%H%M%S')
log_level = 'WARNING'
if args.verbose == 1:
log_level = 'INFO'
if args.verbose > 1:
log_level = 'DEBUG'
if args.log_to_file:
log_filename = timestamp_format + '_lsoracle_test.log'
logging.basicConfig(filename=log_filename,format='%(asctime)s:%(levelname)s:%(message)s', level=log_level)
else:
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', level=log_level)
#Define our function to call the lstools executable
def optimize(filename, mode, part_num, suffix):
opt_file = filename + suffix + '.v'
cmd = ['./lsoracle','-c', 'read_aig ' + filename + '; partitioning ' + str(part_num) + '; ' + mode + ' -o ' + opt_file + ';']
process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdout, stderr = process.communicate()
string_stdout = str(stdout).splitlines()
string_stderr = str(stderr)
if 'None' not in string_stderr:
logging.warning(string_stderr)
return [int(s) for s in string_stdout[-5].split() if s.isdigit()]
def compare(filename, suffix):
new_file = filename + '.v'
opt_file = filename + suffix + '.v'
#need to create verilog file to compare to
cmd = ['./lsoracle','-c', 'read_aig ' + curr_file + '; write_verilog ' + new_file + ';']
subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
#use cec to compare the pre and post optimization files
cmd = ['abc', '-c', 'cec ' + new_file +' '+ opt_file + ';']
abc_process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
abc_stdout, abc_stderr = abc_process.communicate()
if "None" not in str(abc_stderr):
logging.warning(str(abc_stderr))
intermediate_string = str(abc_stdout)
string_abc = intermediate_string.splitlines()
print("str_abc \n")
print(string_abc[-1])
return string_abc[-1]
#Begin tests
print('LSOracle test suite ' + str(timestamp))
logging.debug(home_path)
logging.debug('Home path: %s', home_path)
#End to end tests
#Grab my test files
test_path = lstools_path + '/../../tests/end_to_end'
test_path_glob = test_path + '/*.aig'
print('\nEnd to end tests: ')
logging.info('Test path: %s', test_path)
files = glob.glob(test_path_glob)
logging.debug("List of test files: ")
logging.debug(files)
#Actual testing
#we'll have to do some more thinking about what a good end to end test looks like. For now I'm going to optimize a couple benchmarks
#using aig, mig, mixed, and brute force, and report those. I'll have a failure message if our method is slower than
#both mig and aig. It ought to, at least, be between them.
for curr_file in files:
print('\n' + curr_file)
os.chdir(lstools_path)
#report statistics
cmd = ['./lsoracle','-c', 'read_aig ' + curr_file + '; ps -a;']
process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdout, stderr = process.communicate()
string_stdout = str(stdout)
#calculate number of nodes
unoptimized_size = float(string_stdout[7:string_stdout.find('\n')])
num_part = math.ceil(unoptimized_size / 300)
print('Size (# nodes before optimization): ' + str(unoptimized_size) +' partitions = size/300: ' + str(num_part))
#mixed synthesis with classifier
cmdstr = 'optimization -n ' + training_file
mixed_size = optimize(curr_file, cmdstr, num_part, '_mixed_out')
print (mixed_size)
print('ntk size after mixed synthesis: ' + str(mixed_size[0]) + ' depth: ' + str(mixed_size[1]))
abcout = compare(curr_file, '_mixed_out')
assert('Networks are equivalent' in abcout)
#Brute Force
cmdstr = 'optimization -b'
brute_size = optimize(curr_file, cmdstr, num_part, '_brute_out')
print('ntk size after brute force: ' + str(brute_size[0]) + ' depth: ' + str(brute_size[1]))
abcout = compare(curr_file, '_brute_out')
assert('Networks are equivalent' in abcout)
#AIG only
cmdstr = 'optimization -a'
aig_size = optimize(curr_file, cmdstr, num_part, '_aig_out')
print('ntk size after aig optimization: ' + str(aig_size[0]) + ' depth: ' + str(aig_size[1]))
abcout = compare(curr_file, '_aig_out')
assert('Networks are equivalent' in abcout)
#MIG only
cmdstr = 'optimization -m'
mig_size = optimize(curr_file, cmdstr, num_part, '_mig_out')
print('ntk size after mig optimization: ' + str(mig_size[0]) + ' depth: ' + str(mig_size[1]))
abcout = compare(curr_file, '_mig_out')
assert('Networks are equivalent' in abcout)
assert (mixed_size[0] <= aig_size[0] or mixed_size[0] <= mig_size[0]) or (brute_size[0] <= aig_size[0] or brute_size[0] <= mig_size[0])
#unit tests. This is a stub.
#Grab my test files
print('\nUnit tests:')
test_path = lstools_path + '/../../tests/unit'
test_path_glob = test_path + '/*.aig'
logging.info('Unit tests\n')
logging.info('Test path: %s', test_path)
files = glob.glob(test_path_glob)
logging.debug("List of test files: ")
logging.debug(files)
for curr_file in files:
print(curr_file + '\n')
os.chdir(lstools_path)
|
StarcoderdataPython
|
5126184
|
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseNotAllowed
import gripcontrol
import smartfeed
import smartfeed.django
def items(req, **kwargs):
if req.method == 'GET':
mapper_class = kwargs.get('mapper_class')
if mapper_class:
mapper = smartfeed.django.get_class(mapper_class)
else:
mapper = smartfeed.django.get_default_mapper()
model_class = kwargs.get('model_class')
if not model_class:
model_class = mapper.get_model_class(req, kwargs)
if model_class:
model = smartfeed.django.get_class(model_class)
else:
model = smartfeed.django.get_default_model()
feed_id = mapper.get_feed_id(req, kwargs)
max_count = req.GET.get('max')
if max_count:
try:
max_count = int(max_count)
if max_count < 1:
raise ValueError('max too small')
except ValueError as e:
return HttpResponseBadRequest('Bad Request: Invalid max value: %s\n' % e.message)
if not max_count or max_count > 50:
max_count = 50
since = req.GET.get('since')
if since:
try:
since = smartfeed.parse_spec(since)
except ValueError as e:
return HttpResponseBadRequest('Bad Request: Invalid since value: %s\n' % e.message)
until = req.GET.get('until')
if until:
try:
until = smartfeed.parse_spec(until)
except ValueError as e:
return HttpResponseBadRequest('Bad Request: Invalid until value: %s\n' % e.message)
wait = req.GET.get('wait')
if wait is not None:
if wait in ('true', 'false'):
wait = (wait == 'true')
else:
return HttpResponseBadRequest('Bad Request: Invalid wait value\n')
else:
wait = False
rformat = 'json'
accept = req.META.get('HTTP_ACCEPT')
if accept:
try:
rformat = smartfeed.get_accept_format(accept)
except:
pass
try:
result = model.get_items(feed_id, since, until, max_count)
except NotImplementedError as e:
return HttpResponse('Not Implemented: %s\n' % e.message, status=501)
except smartfeed.UnsupportedSpecError as e:
return HttpResponseBadRequest('Bad Request: %s' % e.message)
except smartfeed.InvalidSpecError:
return HttpResponseBadRequest('Bad Request: Invalid spec\n')
except smartfeed.SpecMismatchError as e:
return HttpResponseBadRequest('Bad Request: %s' % e.message)
except smartfeed.FeedDoesNotExist as e:
return HttpResponseNotFound('Not Found: %s\n' % e.message)
except smartfeed.ItemDoesNotExist as e:
return HttpResponseNotFound('Not Found: %s\n' % e.message)
if not wait or result.last_cursor is None or not since or len(result.items) > 0:
content_type, body = smartfeed.create_items_body(rformat, result.items, total=result.total, last_cursor=result.last_cursor, formatter=mapper.get_formatter(req, kwargs))
return HttpResponse(body, content_type=content_type)
if not smartfeed.django.check_grip_sig(req):
return HttpResponse('Error: Realtime endpoint not supported. Set up Pushpin or Fanout.io\n', status=501)
grip_prefix = mapper.get_grip_prefix(req, kwargs)
channel = gripcontrol.Channel(grip_prefix + smartfeed.encode_id_part(feed_id) + '-' + smartfeed.encode_id_part(rformat), result.last_cursor)
theaders = dict()
content_type, tbody = smartfeed.create_items_body(rformat, [], last_cursor=result.last_cursor)
theaders['Content-Type'] = content_type
tresponse = gripcontrol.Response(headers=theaders, body=tbody)
instruct = gripcontrol.create_hold_response(channel, tresponse)
return HttpResponse(instruct, content_type='application/grip-instruct')
else:
return HttpResponseNotAllowed(['GET'])
def stream(req, **kwargs):
if req.method == 'GET':
mapper_class = kwargs.get('mapper_class')
if mapper_class:
mapper = smartfeed.django.get_class(mapper_class)
else:
mapper = smartfeed.django.get_default_mapper()
feed_id = mapper.get_feed_id(req, kwargs)
rformat = 'json'
accept = req.META.get('HTTP_ACCEPT')
if accept:
try:
rformat = smartfeed.get_accept_format(accept)
except:
pass
if not smartfeed.django.check_grip_sig(req):
return HttpResponse('Error: Realtime endpoint not supported. Set up Pushpin or Fanout.io\n', status=501)
grip_prefix = mapper.get_grip_prefix(req, kwargs)
channel = gripcontrol.Channel(grip_prefix + smartfeed.encode_id_part(feed_id) + '-' + smartfeed.encode_id_part(rformat))
iheaders = dict()
iheaders['Content-Type'] = 'text/plain'
iresponse = gripcontrol.Response(headers=iheaders)
instruct = gripcontrol.create_hold_stream(channel, iresponse)
return HttpResponse(instruct, content_type='application/grip-instruct')
else:
return HttpResponseNotAllowed(['GET'])
def subscriptions(req, **kwargs):
# TODO
return HttpResponse('Not Implemented: %s\n' % 'Persistent subscriptions not implemented', status=501)
|
StarcoderdataPython
|
3349273
|
<filename>Algo_practice/BinaryGap.py
# you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
'''
A binary gap within a positive integer N is any maximal sequence of consecutive zeros that is surrounded by ones at both ends in the binary representation of N.
For example, number 9 has binary representation 1001 and contains a binary gap of length 2. The number 529 has binary representation 1000010001 and contains two binary gaps: one of length 4 and one of length 3. The number 20 has binary representation 10100 and contains one binary gap of length 1. The number 15 has binary representation 1111 and has no binary gaps. The number 32 has binary representation 100000 and has no binary gaps.
Write a function:
def solution(N)
that, given a positive integer N, returns the length of its longest binary gap. The function should return 0 if N doesn't contain a binary gap.
For example, given N = 1041 the function should return 5, because N has binary representation 10000010001 and so its longest binary gap is of length 5. Given N = 32 the function should return 0, because N has binary representation '100000' and thus no binary gaps.
Write an efficient algorithm for the following assumptions:
N is an integer within the range [1..2,147,483,647].
'''
def solution(N):
binary = bin(N)[2:]
temp = 0
flag = 0
count = 0
#print(binary)
for i in range(len(binary)):
if(binary[i]=='1'):
flag = 1
if(temp>count):
count = temp
temp = 0
# print(count)
elif(binary[i]=='0' and flag == 1):
temp += 1
#print(temp)
return count
ans=solution(1073741825)
print(ans)
|
StarcoderdataPython
|
3468577
|
# 学号:1827402018
# 姓名:王浩南
# IP:192.168.157.238
# 上传时间:2018/11/12 15:19:26
import math
def func1(a,b):
#请输入两个整数a和b
if a>=b:
a,b=b,a
#引入一个量c
c=1
while a<=b:
c=c*a
a+=1
count=0
if c%10!=0:
return None
else:
while c%10==0:
count+=1
c=c/10
return(count)
return
def func2(a,b):
count=0
if a>=b:
a,b=b,a
while a<=b:
for x in range(a,b+1):
c=str(x)
if list(c)==list(reversed(c)):
count+=1
return(count)
return
def func3(lst):
list_first=sorted(lst)
list_second=[i for i in list_first if i%3!=0 and i>=0]
return list_second
if __name__=="__main__":
print(func1(1,10))
|
StarcoderdataPython
|
8068806
|
# In this example, which is a variation of 'node_setup.py',
# dispy's "resetup_node" method is used to replace in-memory data to illustrate
# how to change working dataset. Although "resetup_node" feature can be used
# with any platform, this example doesn't work with Windows, as in-memory
# feature doesn't work with Windows.
# this function is executed by a node to initialize for computation; in this case, this function
# loads data in given file into global variable, which is available for computation jobs
# (as read-only data)
def setup(data_file, n):
global data, algorithms, hashlib, file_name
import hashlib, os, sys
data = open(data_file, 'rb').read() # read data in file to global variable
os.remove(data_file) # data_file can now be deleted
file_name = data_file
if sys.version_info.major > 2:
algorithms = list(hashlib.algorithms_guaranteed)
else:
algorithms = hashlib.algorithms
algorithms = [alg for alg in algorithms if (not alg.startswith('shake'))]
return 0
# this function is executed by node when closing for computation (after all jobs are finished);
# in this case, the function removes global variables initialized with 'setup'
def cleanup(data_file, n):
global data, algorithms, hashlib, file_name
del data, algorithms, file_name
# this function is executed by each computation job; in this case, the function uses
# global variables (in memory) initialized in 'setup'.
def compute(i, n):
# 'hashlib', 'data' and 'algorithms' global variables are initialized in 'setup'
alg = algorithms[i % len(algorithms)]
csum = getattr(hashlib, alg)()
csum.update(data)
time.sleep(n)
return (dispy_node_ip_addr, file_name, alg, csum.hexdigest())
# this function is executed at client (this program) when a job's status has changed
def job_status(job):
if job.status == dispy.DispyJob.Finished:
print('\njob %s finished by %s, %s of %s is %s' % (job.id, job.result[0], job.result[2],
job.result[1], job.result[3]))
else:
print('\njob %s failed: %s' % (job.id, job.exception))
if __name__ == '__main__':
import dispy, sys, os, glob, random
data_files = glob.glob(os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), '*.py'))
file_id = 0
nodes = {}
class NodeAllocate(dispy.NodeAllocate):
def allocate(self, cluster, ip_addr, name, cpus, avail_info=None, platform='*'):
global file_id
if len(nodes) == 2:
# use at most 2 nodes, to illustrate this example
return 0
if platform.startswith('Windows'):
# In-memory is not supported with Windows
print('Ignoring node %s as in-memory data is not supported with Windows' % ip_addr)
return 0
data_file = data_files[file_id % len(data_files)]
file_id += 1
nodes[ip_addr] = data_file # keep track of which node processes which file
print('Node %s (%s) processes "%s"' % (ip_addr, name, data_file))
# files are saved at server under computation's directory so send just basename
self.setup_args = (os.path.basename(data_file), file_id)
self.depends = [data_file]
return max(2, cpus) # use at most 2 cpus (for illustration)
cluster = dispy.JobCluster(compute, nodes=[NodeAllocate(host='*')],
setup=setup, cleanup=cleanup, job_status=job_status,
loglevel=dispy.logger.DEBUG)
for i in range(1, 7): # submit 7 jobs
job = cluster.submit(i, random.uniform(2, 5))
job.id = i
cluster.wait() # alternately, job_status above can decide when to call resetup_node
# call 'resetup_node' on nodes to process more files
for ip_addr in nodes.keys():
cluster.resetup_node(ip_addr)
for i in range(i+1, i+7): # submit 7 more jobs
job = cluster.submit(i, random.uniform(2, 5))
job.id = i
cluster.wait()
cluster.print_status()
cluster.close()
|
StarcoderdataPython
|
4952714
|
from scrapy.spiders import Spider
from scrapy.selector import Selector
from scrapy.loader import ItemLoader
from scrapy.loader.processors import Join, MapCompose
from scraper_app.items import LivingSocialDeal
class LivingSocialSpider(Spider):
"""Spider for regularly updated livingsocial.com site, Austin Page"""
name = "livingsocial"
allowed_domains = ['livingsocial.com']
start_urls = ["https://www.livingsocial.com/cities/18-austin"]
deals_list_xpath = '//li[@dealid]'
item_fields = {
'title': './/a/div[@class="deal-details"]/h3/text()',
'link': './/a/@href',
'location': './/a/div[@class="deal-details"]/p[@class="location"]/text()',
'original_price': './/a/div[@class="deal-prices"]/div[@class="deal-strikethrough-price"]/div[@class="strikethrough-wrapper"]/text()',
'price': './/a/div[@class="deal-prices"]/div[@class="deal-price"]/text()',
'end_date': './/span[@itemscope]/meta[@itemprop="availabilityEnds"]/@content'
}
'//table[@class="data-table"]'
def parse(self, response):
"""
Default callback used by Scrapy to process downloaded responses
"""
sel = Selector(response)
# iterate over deals
for deal in sel.xpath(self.deals_list_xpath):
loader = ItemLoader(LivingSocialDeal(), selector=deal)
# define processors
loader.default_input_processor= MapCompose()
loader.default_output_processor = Join()
# iterate over fields and add xpaths to the Loader
for field, xpath in self.item_fields.items():
loader.add_xpath(field, xpath)
yield loader.load_item()
|
StarcoderdataPython
|
11359051
|
<gh_stars>10-100
size(800, 800)
background(255)
noStroke()
# Größe pro Kästchen
size = 20
# Gehe alle Spalten durch
for y in range(0, height / size):
# Gehe alle Zeilen durch
for x in range(0, width / size):
# Zufällige Füllfarbe
fill(random(0, 255), random(0, 255), random(0, 255))
# Zufälliger Rotationswinkel
angle = random(-PI / 32, PI / 32)
# Rotiere hin
rotate(angle)
# Zeichne Rechteck
rect(0, 0, size, size)
# Rotiere zurück
rotate(-angle)
# Bewege Koordinatensystem nach rechts
translate(size, 0)
# Bewege Koordinatensystem nach unten und ganz nach links
translate(-height, size)
|
StarcoderdataPython
|
3501129
|
<gh_stars>10-100
import requests
from mockserver_friendly import request, response, times
from test import MOCK_SERVER_URL, MockServerClientTestCase
class TestBasicVerifying(MockServerClientTestCase):
def test_verify_request_received_once(self):
self.client.stub(request(), response())
requests.get(MOCK_SERVER_URL)
self.client.verify(request(), times(1))
def test_verify_request_never_received(self):
self.client.verify(request(), times(0))
def test_verify_request_not_received_fail(self):
with self.assertRaises(AssertionError):
self.client.verify(request(), times(1))
def test_verify_all_expectations(self):
self.client.expect(request(), response(), times(1))
requests.get(MOCK_SERVER_URL)
self.client.verify_expectations()
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.