blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3d1adc70cd541480ba5036a9efa4b5fee148a93d | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /YcqAY72nZNPtvofuJ_8.py | b8e97a2438ad8150983e02cf9849e462c2089ed7 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py |
def quad_sequence(lst):
#find pattern
difference = [lst[len(lst)-2] - lst[len(lst)-3], lst[len(lst)-1] - lst[len(lst)-2]]
difference_of_difference = difference[1] - difference[0]
#workout
last_num = lst[len(lst)-1]
last_diff = difference[1]
next_nums = []
for _ in range(len(lst)):
last_diff+=difference_of_difference
last_num +=last_diff
next_nums.append(last_num)
return next_nums
| [
"[email protected]"
] | |
eaaf216be821853937268b4966d0219606e5dc83 | 86f026df0f9c5734ffd7266e08e45a5c8f855359 | /dataapp/migrations/0001_initial.py | e1531da8975c84cddbfa3ea68c7c488015ea2500 | [] | no_license | stam007/api_shop | 38e23795a305f31d1bf1260cf6c4f118c99c9c92 | 443c2a2f9b6f204a2b194b1c6dd61b8b29d23c1c | refs/heads/master | 2020-06-16T03:39:38.137973 | 2019-07-05T21:37:03 | 2019-07-05T21:37:03 | 195,469,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Movie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=32)),
],
),
]
| [
"[email protected]"
] | |
595d87247e2461ae9ffc07c514269c1026c31b6b | d499057c555f2c9217cdfa5052207b67ea54b5cc | /server.py | d28eb56439d3b0a8e280d4903de84f66fd06cc9b | [
"Apache-2.0"
] | permissive | defnngj/movie-website | 6fe1fcc2571c75dd7f423137833eb46c4ac7db1d | d1ffaf209b4c689cd5180b8a8bb1866ad6d0f0e8 | refs/heads/main | 2023-08-11T10:37:38.783093 | 2021-09-27T14:39:56 | 2021-09-27T14:39:56 | 410,204,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,250 | py | import os
import sqlite3
from flask import g
from flask import Flask
from flask import render_template
app = Flask(__name__)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATABASE = os.path.join(BASE_DIR, "dev.db")
def connect_db():
return sqlite3.connect(DATABASE)
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
def query_db(query, args=(), one=False):
"""
查询db
:param query:
:param args:
:param one:
:return:
"""
cur = get_db().execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
@app.route("/")
def hello_world():
return "<p>Hello, World!</p>"
@app.route("/top")
def move_top():
return "<p>250经典电影</p>"
@app.route("/movie")
def movie_page():
"""
电影页面
:return:
"""
movie_list = query_db('select * from movie')
# for movie in movie_list:
# print("db-data\n", type(movie), movie)
return render_template("hello.html", moves=movie_list)
| [
"[email protected]"
] | |
74da5a87b5ec42b3916f337f6510325ceb0175cc | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_223/ch147_2020_04_12_20_47_12_558411.py | 48a99e9557a4ecf4ea235c6fb2d47c61fe37004e | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | def mais_frequente(l1):
dic={}
for e in l1:
if e in dic:
dic[e]+=1
else:
dic[e]=1
ocorrencias = [0]
for n in dic.values():
if n>ocorrencias[0]:
del ocorrencias[0]
ocorrencias.append(n)
palavra = []
for i in dic.items():
for p in dic.keys():
if dic[p] = ocorrencias
palavra.append(p)
return palavra | [
"[email protected]"
] | |
cfbbccfbb28499d825414a4c03770d71a0783f86 | 0ad5abffdd15bca072ab8db068aab7e1bc6df167 | /NanoGardener/python/modules/LeptonMaker.py | c081cb86e230635a3145a1ee358104a8582dccfd | [] | no_license | pfackeldey/LatinoAnalysis | bf603af9c370b079c3d92e3ed49a5d7d05b87379 | 484a48ec6bfdb7edb06897be984eecfd1aae62fd | refs/heads/master | 2020-03-14T22:42:22.226962 | 2018-04-27T16:02:56 | 2018-04-27T16:02:56 | 131,827,114 | 0 | 0 | null | 2018-05-02T09:16:59 | 2018-05-02T09:16:59 | null | UTF-8 | Python | false | false | 8,092 | py | import ROOT
import os
import re
ROOT.PyConfig.IgnoreCommandLineOptions = True
from PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import Module
from LatinoAnalysis.NanoGardener.data.LeptonMaker_cfg import List_newVar, Lep_var
from LatinoAnalysis.NanoGardener.data.common_cfg import Type_dict
#from LatinoAnalysis.NanoGardener.data.Trigger_names import TrigNames, SPTrigNames
class LeptonMaker(Module):
'''
put this file in LatinoAnalysis/NanoGardener/python/modules/
Add extra variables to NANO tree
'''
def __init__(self):
pass
def beginJob(self):
pass
def endJob(self):
pass
def beginFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):
self.initReaders(inputTree) # initReaders must be called in beginFile
self.out = wrappedOutputTree
# New branches
for typ in List_newVar:
for var in List_newVar[typ]:
if 'Lepton_' in var: self.out.branch(var, typ, lenVar='nLepton')
elif 'SPTrigger' in var: self.out.branch(var, typ, len(SPTrigNames))
elif 'Trigger' in var: self.out.branch(var, typ, len(TrigNames))
else: self.out.branch(var, typ)
# Old branches to reorder
self.list_old_br = {}
self.list_old_br['Electron'] = []
self.list_old_br['Muon'] = []
self.list_old_br['Jet'] = []
for br in inputTree.GetListOfBranches():
bname = br.GetName()
btype = Type_dict[br.GetListOfLeaves()[0].GetTypeName()]
if re.match('\AElectron_', bname):
self.list_old_br['Electron'].append(bname)
self.out.branch(bname, btype, lenVar='nElectron')
if re.match('\AMuon_', bname):
self.list_old_br['Muon'].append(bname)
self.out.branch(bname, btype, lenVar='nMuon')
if re.match('\AJet_', bname):
self.list_old_br['Jet'].append(bname)
self.out.branch(bname, btype, lenVar='nJet')
def endFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):
pass
def initReaders(self,tree): # this function gets the pointers to Value and ArrayReaders and sets them in the C++ worker class
self.electron_var = {}
self.muon_var = {}
self.jet_var = {}
for br in tree.GetListOfBranches():
bname = br.GetName()
if re.match('\AElectron_', bname): self.electron_var[bname] = tree.arrayReader(bname)
if re.match('\AMuon_', bname): self.muon_var[bname] = tree.arrayReader(bname)
if re.match('\AJet_', bname): self.jet_var[bname] = tree.arrayReader(bname)
self.nElectron = tree.valueReader('nElectron')
self.nMuon = tree.valueReader('nMuon')
self.nJet = tree.valueReader('nJet')
self._ttreereaderversion = tree._ttreereaderversion # self._ttreereaderversion must be set AFTER all calls to tree.valueReader or tree.arrayReader
def analyze(self, event):
"""process event, return True (go to next module) or False (fail, go to next event)"""
if event._tree._ttreereaderversion > self._ttreereaderversion: # do this check at every event, as other modules might have read further branches
self.initReaders(event._tree)
# do NOT access other branches in python between the check/call to initReaders and the call to C++ worker code
#--- Set vars
nEl = int(self.nElectron)
nMu = int(self.nMuon)
nJt = int(self.nJet)
nLep = nMu + nEl
lep_dict = {}
for lv in Lep_var:
lep_dict[lv] = [0]*nLep
lep_dict['instance'] = [0]*nLep
ele_dict = {}
for lv in self.list_old_br['Electron']:
ele_dict[lv] = [0]*nEl
muo_dict = {}
for lv in self.list_old_br['Muon']:
muo_dict[lv] = [0]*nMu
jet_dict = {}
for lv in self.list_old_br['Jet']:
jet_dict[lv] = [0]*nJt
#--- Electron Loops
for iEle1 in range(nEl):
pt_idx = 0
pt1 = self.electron_var['Electron_pt'][iEle1]
# Start comparing electrons
for iEle2 in range(nEl):
if iEle2 == iEle1: continue
pt2 = self.electron_var['Electron_pt'][iEle2]
if pt1 < pt2:
pt_idx += 1
#if pt_idx != iEle1: print('Electrons reordered')
# Now index is set, fill the vars
for var in ele_dict:
if type(self.electron_var[var][iEle1]) is str:
ele_dict[var][pt_idx] = ord(self.electron_var[var][iEle1])
else:
ele_dict[var][pt_idx] = self.electron_var[var][iEle1]
#--- Muon Loops
for iMu1 in range(nMu):
pt_idx = 0
pt1 = self.muon_var['Muon_pt'][iMu1]
# Start comparing muons
for iMu2 in range(nMu):
if iMu2 == iMu1: continue
pt2 = self.muon_var['Muon_pt'][iMu2]
if pt1 < pt2:
pt_idx += 1
#if pt_idx != iMu1: print('Muons reordered')
# Now index is set, fill the vars
for var in muo_dict:
if type(self.muon_var[var][iMu1]) is str:
muo_dict[var][pt_idx] = ord(self.muon_var[var][iMu1])
else:
muo_dict[var][pt_idx] = self.muon_var[var][iMu1]
#--- Lepton Loops
for iLep1 in range(nLep):
pt_idx = 0
if iLep1 < nEl:
pt1 = ele_dict['Electron_pt'][iLep1]
pdgId1 = ele_dict['Electron_pdgId'][iLep1]
else:
pt1 = muo_dict['Muon_pt'][iLep1 - nEl]
pdgId1 = muo_dict['Muon_pdgId'][iLep1 - nEl]
# Start comparing leptons
for iLep2 in range(nLep):
if iLep2 == iLep1: continue
if iLep2 < nEl:
pt2 = ele_dict['Electron_pt'][iLep2]
else:
pt2 = muo_dict['Muon_pt'][iLep2 - nEl]
if pt1 < pt2:
pt_idx += 1
# Now index is set, fill the vars
if abs(pdgId1) == 11:
for var in lep_dict:
if not 'instance' in var:
lep_dict[var][pt_idx] = ele_dict['Electron_'+var][iLep1]
else:
lep_dict[var][pt_idx] = iLep1
elif abs(pdgId1) == 13:
for var in lep_dict:
if not 'instance' in var and not 'eCorr' in var:
lep_dict[var][pt_idx] = muo_dict['Muon_'+var][iLep1 - nEl]
elif 'eCorr' in var:
lep_dict[var][pt_idx] = 1.
else:
lep_dict[var][pt_idx] = iLep1 - nEl
#--- Jet Loops
for iJ1 in range(nJt):
pt_idx = 0
pt1 = self.jet_var['Jet_pt'][iJ1]
# Start comparing jets
for iJ2 in range(nJt):
if iJ2 == iJ1: continue
pt2 = self.jet_var['Jet_pt'][iJ2]
if pt1 < pt2:
pt_idx += 1
#if pt_idx != iJ1: print('Jets reordered')
# Now index is set, fill the vars
for var in jet_dict:
if type(self.jet_var[var][iJ1]) is str:
jet_dict[var][pt_idx] = ord(self.jet_var[var][iJ1])
else:
jet_dict[var][pt_idx] = self.jet_var[var][iJ1]
#--- Fill branches
for var in lep_dict:
self.out.fillBranch('Lepton_' + var, lep_dict[var])
for var in ele_dict:
self.out.fillBranch(var, ele_dict[var])
for var in muo_dict:
self.out.fillBranch(var, muo_dict[var])
for var in jet_dict:
self.out.fillBranch(var, jet_dict[var])
return True
# define modules using the syntax 'name = lambda : constructor' to avoid having them loaded when not needed
lepMkr = lambda : LeptonMaker()
| [
"[email protected]"
] | |
2faa1a2aaf34ff00d50c35afead93ace9bc949fb | 993ef8924418866f932396a58e3ad0c2a940ddd3 | /Production/test/condorSub/dict_Summer20UL16APV_gjets_dr0p4.py | 817994fbf5c7ee0aa69b211e2a51ecb254f23838 | [] | no_license | TreeMaker/TreeMaker | 48d81f6c95a17828dbb599d29c15137cd6ef009a | 15dd7fe9e9e6f97d9e52614c900c27d200a6c45f | refs/heads/Run2_UL | 2023-07-07T15:04:56.672709 | 2023-07-03T16:43:17 | 2023-07-03T16:43:17 | 29,192,343 | 16 | 92 | null | 2023-07-03T16:43:28 | 2015-01-13T13:59:30 | Python | UTF-8 | Python | false | false | 446 | py | flist = {
"scenario": "Summer20UL16APV",
"args": "emerging=True",
"samples": [
['Summer20UL16APV.GJets_DR-0p4_HT-100To200_TuneCP5_13TeV-madgraphMLM-pythia8'],
['Summer20UL16APV.GJets_DR-0p4_HT-200To400_TuneCP5_13TeV-madgraphMLM-pythia8'],
['Summer20UL16APV.GJets_DR-0p4_HT-400To600_TuneCP5_13TeV-madgraphMLM-pythia8'],
['Summer20UL16APV.GJets_DR-0p4_HT-600ToInf_TuneCP5_13TeV-madgraphMLM-pythia8'],
]
}
| [
"[email protected]"
] | |
190d1b1092d241c85f0feb82ec4fbde905277a25 | 871e1b0295c0fbbfca8191236d674866cf62ff01 | /TrainB5_NAR1_imagenet_64.py | 532b33d65353507986ad9cfe7bb6f9818cee5de2 | [] | no_license | Peckkie/USAI_ABnormal_Screening | ce31a813e9303a7d43def912ab731cc633268cb7 | 82cd63ac9ab72fbe68eae254c15c7bf7ef906022 | refs/heads/master | 2023-02-16T13:32:33.678500 | 2021-01-07T02:36:35 | 2021-01-07T02:36:35 | 277,981,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,080 | py | import PIL
from keras import models
from keras import layers
from tensorflow.keras import optimizers
import os
import glob
import shutil
import sys
import numpy as np
from skimage.io import imread
import matplotlib.pyplot as plt
import os
from tensorflow.keras import callbacks
import pandas as pd
from keras.utils import generic_utils
os.environ["CUDA_VISIBLE_DEVICES"]="1"
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
batch_size = 64
epochs = 200
#Train
dataframe = pd.read_csv( '/media/tohn/SSD/ImageForTrainTest/train.csv')
base_dir = '/media/tohn/SSD/ImageForTrainTest/'
os.chdir(base_dir)
train_dir = os.path.join(base_dir, 'train')
#validation
valframe = pd.read_csv( '/media/tohn/SSD/ImageForTrainTest/validation.csv')
validation_dir = os.path.join(base_dir, 'validation')
from efficientnet.keras import EfficientNetB5 as Net
from efficientnet.keras import center_crop_and_resize, preprocess_input
conv_base = Net(weights='imagenet')
height = width = conv_base.input_shape[1]
input_shape = (height, width, 3)
# loading pretrained conv base model
conv_base = Net(weights='imagenet', include_top=False, input_shape=input_shape)
# create new model with a new classification layer
x = conv_base.output
global_average_layer = layers.GlobalAveragePooling2D(name = 'head_pooling')(x)
dropout_layer_1 = layers.Dropout(0.50,name = 'head_dropout')(global_average_layer)
prediction_layer = layers.Dense(2, activation='softmax',name = 'prediction_layer')(dropout_layer_1)
model = models.Model(inputs= conv_base.input, outputs=prediction_layer)
model.summary()
#showing before&after freezing
print('This is the number of trainable layers '
'before freezing the conv base:', len(model.trainable_weights))
#conv_base.trainable = False # freeze เพื่อรักษา convolutional base's weight
for layer in conv_base.layers:
layer.trainable = False
print('This is the number of trainable layers '
'after freezing the conv base:', len(model.trainable_weights)) #freez แล้วจะเหลือ max pool and dense
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
brightness_range=[0.5,1.5],
shear_range=0.4,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_dataframe(
dataframe = dataframe,
directory = train_dir,
x_col = 'Path Crop',
y_col = 'Class',
target_size = (height, width),
batch_size=batch_size,
color_mode= 'rgb',
class_mode='categorical')
test_generator = test_datagen.flow_from_dataframe(
dataframe = valframe,
directory = validation_dir,
x_col = 'Path Crop',
y_col = 'Class',
target_size = (height, width),
batch_size=batch_size,
color_mode= 'rgb',
class_mode='categorical')
os.chdir('/media/tohn/SSD/trainEffbyB/R1')
root_logdir = '/media/tohn/SSD/trainEffbyB/R1/my_logsB5imagenet_64'
def get_run_logdir():
import time
run_id = time.strftime("run_%Y_%m_%d_%H_%M_%S")
return os.path.join(root_logdir,run_id)
run_logdir = get_run_logdir()
tensorboard_cb = callbacks.TensorBoard(log_dir = run_logdir)
# os.makedirs("./models", exist_ok=True)
def avoid_error(gen):
while True:
try:
data, labels = next(gen)
yield data, labels
except:
pass
#Training
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.RMSprop(lr=2e-5),
metrics=['acc'])
history = model.fit_generator(
avoid_error(train_generator),
steps_per_epoch= len(dataframe)//batch_size,
epochs=epochs,
validation_data=avoid_error(test_generator),
validation_steps= len(valframe) //batch_size,
callbacks = [tensorboard_cb])
model.save('./models/B5_R1_imnet_64.h5')
| [
"[email protected]"
] | |
b9a48a3fa6173aaf6e71b3ae6f50b4791ceb6e34 | e49a07ad215172e9c82cb418b10371bf0ce1c0f7 | /第1章 python基础/Python基础09/1-创建模块/msgnew.py | c70f8f258a39e5f8bc8e8298c973427f1890cdb5 | [] | no_license | taogangshow/python_Code | 829c25a7e32ead388c8b3ffa763cb9cf587bfd7b | 4b3d6992ec407d6069f3187ca7e402a14d863fff | refs/heads/master | 2022-12-16T01:26:17.569230 | 2018-11-16T10:07:59 | 2018-11-16T10:07:59 | 157,832,985 | 0 | 1 | null | 2022-11-25T09:55:32 | 2018-11-16T08:00:13 | Python | UTF-8 | Python | false | false | 142 | py | __all__ = ["test2","Test"]
def test1():
print("---test1---")
def test2():
print("---test2---")
class Test(object):
pass
num = 100
| [
"[email protected]"
] | |
fdd59d240a4c0bb10c89d75d4e9a62b0b1c7f939 | 2e990ff03f23c3f82e1f3fb7acee1ddd8fb72e0e | /whoislive.py | 450d38b0d33cacce38abf02fe08ffd66a715315d | [] | no_license | HeNine/ekimbot_plugins | e25bd5326b13603a8671d4089317185bb7a7821c | 354978cc8a632aec57ef79d2948ada21dc2502cd | refs/heads/master | 2021-01-24T18:26:28.146480 | 2017-03-06T17:37:11 | 2017-03-06T17:37:11 | 84,441,447 | 0 | 0 | null | 2017-03-09T12:47:12 | 2017-03-09T12:47:12 | null | UTF-8 | Python | false | false | 4,547 | py |
import functools
import itertools
import gevent
import gtools
import requests
import twitch
from ekimbot.botplugin import ClientPlugin
from ekimbot.commands import CommandHandler
from ekimbot.utils import reply_target
def encode_recursive(o, encoding='utf-8'):
if isinstance(o, unicode):
return o.encode(encoding)
elif isinstance(o, dict):
return {encode_recursive(k): encode_recursive(v) for k, v in o.items()}
elif isinstance(o, list):
return [encode_recursive(x) for x in o]
else:
return o
def requires_oauth(fn):
@functools.wraps(fn)
def wrapper(self, msg, *args):
if self.config.oauth is None or self.config.target is None:
self.reply(msg, "No twitch login configured")
return
return fn(self, msg, *args)
return wrapper
class TwitchPlugin(ClientPlugin):
"""Should be a client plugin for a client logged into twitch.
Upon request, will list all live channels out of the list of channels that config.target
(default client.nick) is following.
"""
name = 'whoislive'
defaults = {
'target': None, # None makes no args an error
'limit': 3,
'private_limit': 10,
'client_id': None,
'oauth': None, # if not none, can do follow actions
}
def init(self):
self.api = twitch.TwitchClient(oauth=self.config.oauth, client_id=self.config.client_id)
def limit(self, msg):
if msg.target == reply_target(self.client, msg):
# public channel
return self.config.limit
else:
# private message
return self.config.private_limit
@CommandHandler("live", 0)
def live(self, msg, *channels):
"""List currently live streamers
Specify list of channels, or list of all channels followed by a channel by prepending a ~
If nothing given, a default follow list is used depending on bot config
"""
found = []
errors = False
if not channels:
if self.config.target:
channels = ['~{}'.format(self.config.target)]
else:
self.reply(msg, "Please list some channels to check")
return
limit = self.limit(msg)
try:
# flatten iterators of follows and direct channel names into single iterable
# TODO this could be better parallelised so follow fetches happen in parallel
# but we need to refactor to use gevent queues or it gets real ugly real fast
channels = itertools.chain(*[
self.following(channel.lstrip('~')) if channel.startswith('~') else (channel,)
for channel in channels
])
for name, channel in gtools.gmap_unordered(self.get_channel_if_live, channels):
if not channel:
continue
found.append(name)
if len(found) < limit:
self.reply(msg, "https://twitch.tv/{name} is playing {game}: {status}".format(**channel))
except Exception:
self.logger.exception("Error while checking who is live")
errors = True
if errors:
self.reply(msg, "I had some issues talking to twitch, maybe try again later?")
elif len(found) >= limit:
found = found[limit - 1:]
self.reply(msg, "And also {}".format(', '.join(found)))
elif not found:
self.reply(msg, "No-one is live right now, sorry!")
def following(self, target):
"""Yields channel names that target is following"""
for result in self.api.get_all("follows", "users", target, "follows", "channels"):
yield encode_recursive(result['channel']['name'])
def get_channel_if_live(self, name):
"""Returns an up-to-date channel object if channel is currently live, else None"""
stream = gevent.spawn(lambda: self.api.get("streams", name))
channel = gevent.spawn(lambda: self.api.get("channels", name))
if stream.get().get("stream") is None:
return
return encode_recursive(channel.get())
def _follow_op(self, msg, channels, method, op_name):
channels = sorted(list(set(channels)))
failures = {}
for channel in channels:
try:
self.api.request(method, 'users', self.config.target, 'follows', 'channels', channel, json=False)
except requests.HTTPError as e:
failures[channel] = str(e)
if len(failures) == 0:
self.reply(msg, "{}ed channels: {}".format(op_name, ' '.join(channels)))
elif len(failures) == 1:
(channel, error), = failures.items()
self.reply(msg, "failed to {} channel {}: {}".format(op_name, channel, error))
else:
self.reply(msg, "failed to {} channels: {}".format(op_name, ' '.join(sorted(failures))))
@CommandHandler("twitch follow", 1)
@requires_oauth
def follow(self, msg, *channels):
self._follow_op(msg, channels, 'PUT', 'follow')
@CommandHandler("twitch unfollow", 1)
@requires_oauth
def unfollow(self, msg, *channels):
self._follow_op(msg, channels, 'DELETE', 'unfollow')
| [
"[email protected]"
] | |
7a2c9eb7044540d777bca9c0f68a4a888895eb00 | 06904f68018fbd42bba1909e12a79c2106af71f4 | /mirror_en.py | 733cf287ae4ed857491c9bb00206dfa953eb9428 | [] | no_license | rzbfreebird/MCDR-Mirror-Server | 2d079ac30c073805045f97302b2379937b8f95e2 | fbaebc8eeddaefe3675efff8abe98e7e69d83e30 | refs/heads/master | 2022-12-07T01:14:01.603244 | 2020-09-03T14:30:43 | 2020-09-03T14:30:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,443 | py | # -*- coding: utf-8 -*-
import shutil
import datetime
import os
import json as js
import platform
from os.path import abspath, dirname
from utils import rcon
current_path = abspath(dirname(__file__))
def read_config():
with open("config/mirror.json") as json_file:
config = js.load(json_file)
return config
conf=read_config()
mirror_folder=conf['path']
remote_enable=conf['remote']['enable']
address=conf['remote']['address']
port=conf['remote']['port']
secret=conf['remote']['secret']
start_command=conf['command']
world=conf["world"]
source=[]
target=[]
mirror_started=False
MCDRJudge=os.path.exists("{}MCDReforged.py".format(mirror_folder))
for i in range(len(world)):
source.append('./server/{}'.format(world[i-1]))
if(MCDRJudge):
for i in range(len(world)):
target.append('{}/server/{}'.format(mirror_folder,world[i-1]))
else:
for i in range(len(world)):
target.append('{}/{}'.format(mirror_folder,world[i-1]))
if(remote_enable):
connection=rcon.Rcon(address,port,secret)
remote_info='''
§6[Mirror]§bRemote Information:
§5Rcon Address: §b{}
§5Rcon Port: §b{}
'''.format(address,port)
help_msg='''
§r======= §6Minecraft Mirror Plugin §r=======
Use §6!!mirror sync§r to sync the main server's world to the mirror one
Use §6!!mirror start§r to turn on the mirror server
§4BE CAUTIOUS: IF YOU DON'T ENABLE THE RCON FREATURE OF THE MIRROR SERVER, YOU CANNOT SHUTDOWN THE SERVER BY REMOTE COMMAND
§4YOU CAN ONLY SHUTDOWN IT IN THE MIRROR SERVER, TO DO THIS, YOU CAN CHECKOUT THE FOLLOWING MCDR PLUGINS
§4SimpleOP without MCDR-Admin permission required
§4StartStopHelper with MCDR-Admin permission required
-----Rcon Features-----
Use §6!!mirror info§r to checkout rcon information(MCDR-Admin Permission is Required)
Use §6!!mirror stop§r to stop mirror server
Use §6!!mirror status§r to checkout whether the mirror has been turned on or not
Use §6!!mirror rcon <command>§r to send command to mirror server(MCDR-Admin Permission is Required, use it WITHOUT SLASH)
'''
SimpleOP=' {"text":"§6Checkout SimpleOP","clickEvent":{"action":"open_url","value":"https://github.com/GamerNoTitle/SimpleOP"}}'
StartStopHelper=' {"text":"§6Checkout StartStopHelper","clickEvent":{"action":"open_url","value":"https://github.com/MCDReforged-Plugins/StartStopHelper"}}'
def helpmsg(server,info):
if info.is_player and info.content == '!!mirror':
server.reply(info, help_msg, encoding=None)
server.execute('tellraw '+ info.player + SimpleOP)
server.execute('tellraw '+ info.player + StartStopHelper)
def sync(server,info):
start_time=datetime.datetime.now()
server.execute('save-all')
server.say('§6[Mirror]Syncing...')
i=0
try:
while True:
if(i>len(world)-1): break
shutil.copytree(source[i],target[i])
i=i+1
except:
try:
while True:
if(i>len(world)-1): break
shutil.rmtree(target[i],True)
shutil.copytree(source[i],target[i])
i=i+1
except Exception:
while True:
if(i>len(world)-1): break
shutil.rmtree(target[i],True)
ignore=shutil.ignore_patterns('session.lock')
shutil.copytree(source[i],target[i],ignore=ignore)
i=i+1
end_time=datetime.datetime.now()
server.say('§6[Mirror]Sync completed in {}'.format(end_time-start_time))
def start(server,info):
server.say('§6[Mirror]Mirror server is launching, please wait...')
if platform.system()=='Windows':
os.system('cd {} && powershell {}'.format(mirror_folder,start_command))
else:
os.system('cd {} && {}'.format(mirror_folder,start_command))
os.system('cd {}'.format(current_path))
global mirror_started
mirror_started=False
server.say('§6[Mirror]Mirror server has been shutdown!')
def command(server,info):
if(conf['remote']['command']):
if(server.get_permission_level(info)>2):
try:
connection.connect()
connection.send_command(info.content[14:])
connection.disconnect()
server.reply(info,'§6[Mirror]Command Sent!', encoding=None)
except Exception as e:
server.reply(info,'§6[Mirror]§4Error: {}'.format(e), encoding=None)
else:
server.reply(info,'§6[Mirror]§4Error: Permission Denied!', encoding=None)
else:
server.reply(info,' §6[Mirror]§4Error: Rcon feature is disabled!', encoding=None)
def stop(server,info):
try:
connection.connect()
connection.send_command('stop')
connection.disconnect()
except Exception as e:
server.reply(info,'§6[Mirror]§4Connection Failed: {}'.format(e), encoding=None)
def information(server,info):
if(server.get_permission_level(info)>2):
server.reply(info,remote_info)
else:
server.reply(info,"§6[Mirror]§4Error: Permission Denied!", encoding=None)
def status(server,info):
global mirror_started
try:
connection.connect()
server.reply(info,'§6[Mirror]§lMirror Server is online!', encoding=None)
connection.disconnect()
except:
if mirror_started:
server.reply(info,'§6[Mirror]§lMirror Server is Starting...(or mirror has been started but rcon feature didn\'t work well', encoding=None)
else:
server.reply(info,'§4[Mirror]§lMirror Server is offline!', encoding=None)
def on_load(server, old_module):
server.add_help_message('!!mirror', '§6Get the usage of Mirror')
def on_info(server,info):
if info.is_player and info.content == '!!mirror':
helpmsg(server,info)
if info.content == '!!mirror sync':
sync(server,info)
if info.content == '!!mirror start':
global mirror_started
if mirror_started:
server.reply(info,'§b[Mirror]Mirror server has already started, please don\'t run the command again!', encoding=None)
else:
mirror_started=True
start(server,info)
if('!!mirror rcon' in info.content):
command(server,info)
if(info.content=='!!mirror info'):
information(server,info)
if(info.content=='!!mirror stop'):
stop(server,info)
if(info.content=='!!mirror status'):
status(server,info)
| [
"[email protected]"
] | |
0f6b4c0e8a7fc2507d68d242905734ba1e2e2592 | 6b033e3dddc280417bb97500f72e68d7378c69d6 | /IV. COLAB/Enemy-Spotted/2. Uniform Classification/crawling/crawling_version_2_deprecated.py | fa6711bbd2b14e88c54793181f0ffa2d0b600bb1 | [] | no_license | inyong37/Study | e5cb7c23f7b70fbd525066b6e53b92352a5f00bc | e36252a89b68a5b05289196c03e91291dc726bc1 | refs/heads/master | 2023-08-17T11:35:01.443213 | 2023-08-11T04:02:49 | 2023-08-11T04:02:49 | 128,149,085 | 11 | 0 | null | 2022-10-07T02:03:09 | 2018-04-05T02:17:17 | Jupyter Notebook | UTF-8 | Python | false | false | 903 | py | from urllib.request import urlopen
import argparse
import requests as req
from bs4 import BeautifulSoup
# reference: https://enjoysomething.tistory.com/42
parser = argparse.ArgumentParser()
parser.add_argument("-data", required=False, default='acu pattern')
args = parser.parse_args()
data = args.data
def main():
url_info = "https://www.google.com/search?"
params = {
"q": data
}
html_object = req.get(url_info, params)
if html_object.status_code == 200:
bs_object = BeautifulSoup(html_object.text, "html.parser")
img_data = bs_object.find_all("img")
for i in enumerate(img_data[1:]):
t = urlopen(i[1].attrs['src']).read()
filename = "img_" + str(i[0] + 1) + '.jpg'
with open(filename, "wb") as f:
f.write(t)
print("Image Save Success")
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
5901cd761f795addb37355ab5dfb91b136524937 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/45/usersdata/118/15614/submittedfiles/lista1.py | e7b973d29fb37d041373635daf0586e519cab283 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | # -*- coding: utf-8 -*-
from __future__ import division
n = input('Digite o número de termos:')
a = []
for i in range(0,n+1,1):
a.append(input('Digite o valor:')
somap = 0
somai = 0
contp = 0
conti = 0
for j in range(0,len(a),1):
if a[i]%2 == 0:
contp = contp +1
somap = somap +1
else:
conti = conti +1
somai = somai +1
print(somai)
print(somap)
print(conti)
print(contp)
print(a) | [
"[email protected]"
] | |
65862506e7c2a0b1eba9b24168fb76d1f57c32fd | 87fb0ae5563512bf4cfe2754ea92e7f4173f753f | /Chap_05/Ex_129.py | 67451fbd6333873e212e51249f4b024c92250365 | [] | no_license | effedib/the-python-workbook-2 | 87291f5dd6d369360288761c87dc47df1b201aa7 | 69532770e6bbb50ea507e15f7d717028acc86a40 | refs/heads/main | 2023-08-21T13:43:59.922037 | 2021-10-12T20:36:41 | 2021-10-12T20:36:41 | 325,384,405 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | # Tokenizing a String
# Tokenizing is the process of converting a string into a list of substrings, known as tokens.
def tokenbystring(string: str) -> list:
string = string.replace(' ', '')
tokens = []
dgt = ''
for s in string:
if s in ['*', '/', '^', '+', '-', '(', ')']:
if dgt != '':
tokens.append(dgt)
dgt = ''
tokens.append(s)
elif 0 <= int(s) <= 9:
dgt += s
if s == string[len(string)-1]:
tokens.append(dgt)
return tokens
def main():
# exp = input("Enter a mathematical expressione: ")
exp = '52 + 3 - 86 * (936 / 2)'
print('The tokens are: {}'.format(tokenbystring(exp)))
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
c69d55d3f7500378e3a928dff4e8a0e47d70916b | 09db0d94ef90ff4df3b17cf8d9c2cca7f79b2c65 | /buffer.py | 317b3835a2a7a73b712441fc4f3f631cdf1c3eb1 | [] | no_license | tgbugs/desc | 5e17e7e35445908b14c7cbaed766764bb3cbab6b | b68a07af90f87f55c4b5be6ff433f310a0bc7e2c | refs/heads/master | 2020-04-09T12:20:02.650756 | 2019-05-08T07:34:29 | 2019-05-08T07:34:29 | 20,045,270 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 913 | py | #!/usr/bin/env python3.4
""" Example for how to load vertex data from numpy directly
"""
import numpy as np
from panda3d.core import Geom, GeomVertexFormat, GeomVertexData
from .util.ipython import embed
size = 1000
data = np.random.randint(0,1000,(size,3))
#color = np.random.randint(0,255,(size,4))
color = np.repeat(np.random.randint(0,255,(1,4)), size, 0)
#full = np.hstack((data,color))
full = [tuple(d) for d in np.hstack((data,color))]
#full = [tuple(*d,*color) for d in data]
geom = GeomVertexData('points', GeomVertexFormat.getV3c4(), Geom.UHDynamic)
geom.setNumRows(len(full))
array = geom.modifyArray(0) # need a writeable version
handle = array.modifyHandle()
#options are then the following:
view = memoryview(array)
arr = np.asarray(view)
arr[:] = full
embed()
#OR
#handle.copyDataFrom('some other handle to a GVDA')
#handle.copySubataFrom(to_start, to_size, buffer, from_start, from_size)
| [
"[email protected]"
] | |
b7dd7a197154d308863a5d0f9d1d548a6a166d6e | dd3bbd4e7aaee7a8a5f26b927ce28ac472c855a5 | /eggs/plone.app.controlpanel-2.1.1-py2.7.egg/plone/app/controlpanel/skins.py | a649d961b9669e9e19a497770d9f1e3f809ad3e2 | [] | no_license | nacho22martin/tesis | ea0a822f8bdbdef6f13f41276ecd4d6e85427ca5 | e137eb6225cc5e724bee74a892567796166134ac | refs/heads/master | 2020-12-24T13:20:58.334839 | 2013-11-09T12:42:41 | 2013-11-09T12:42:41 | 14,261,570 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,314 | py | from zope.interface import Interface
from zope.component import adapts
from zope.formlib.form import FormFields
from zope.interface import implements
from zope.schema import Bool
from zope.schema import Choice
from Products.CMFCore.utils import getToolByName
from Products.CMFDefault.formlib.schema import SchemaAdapterBase
from Products.CMFPlone import PloneMessageFactory as _
from Products.CMFPlone.interfaces import IPloneSiteRoot
from form import ControlPanelForm
from widgets import DropdownChoiceWidget
from zope.schema.vocabulary import SimpleTerm
from zope.schema.vocabulary import SimpleVocabulary
ICON_VISIBILITY_CHOICES = {
_(u"Only for users who are logged in"): 'authenticated',
_(u"Never show icons"): 'disabled',
_(u"Always show icons"): 'enabled',
}
ICON_VISIBILITY_VOCABULARY = SimpleVocabulary(
[SimpleTerm(v, v, k) for k, v in ICON_VISIBILITY_CHOICES.items()]
)
class ISkinsSchema(Interface):
theme = Choice(title=_(u'Default theme'),
description=_(u'''Select the default theme for the site.'''),
required=True,
missing_value=tuple(),
vocabulary="plone.app.vocabularies.Skins")
mark_special_links = Bool(title=_(u'Mark external links'),
description=_(u"If enabled all external links "
"will be marked with link type "
"specific icons."),
default=True)
ext_links_open_new_window = Bool(title=_(u"External links open in new "
"window"),
description=_(u"If enabled all external "
"links in the content "
"region open in a new "
"window."),
default=False)
icon_visibility = Choice(title=_(u'Show content type icons'),
description=_(u"If disabled the content icons "
"in folder listings and portlets "
"won't be visible."),
vocabulary=ICON_VISIBILITY_VOCABULARY)
use_popups = Bool(title=_(u'Use popup overlays for simple forms'),
description=_(u"If enabled popup overlays will be "
"used for simple forms like login, "
"contact and delete confirmation."),
default=True)
class SkinsControlPanelAdapter(SchemaAdapterBase):
adapts(IPloneSiteRoot)
implements(ISkinsSchema)
def __init__(self, context):
super(SkinsControlPanelAdapter, self).__init__(context)
self.context = getToolByName(context, 'portal_skins')
self.jstool = getToolByName(context, 'portal_javascripts')
self.csstool = getToolByName(context, 'portal_css')
self.ksstool = getToolByName(context, 'portal_kss')
ptool = getToolByName(context, 'portal_properties')
self.props = ptool.site_properties
self.themeChanged = False
def get_theme(self):
return self.context.getDefaultSkin()
def set_theme(self, value):
self.themeChanged = True
self.context.default_skin = value
theme = property(get_theme, set_theme)
def _update_jsreg_mark_special(self):
self.jstool.getResource('mark_special_links.js').setEnabled(
self.mark_special_links or self.ext_links_open_new_window
)
self.jstool.cookResources()
def get_mark_special_links(self):
msl = getattr(self.props, 'mark_special_links', False)
if msl == 'true':
return True
return False
# return self.jstool.getResource('mark_special_links.js').getEnabled()
def set_mark_special_links(self, value):
if value:
mark_special_links='true'
else:
mark_special_links='false'
if self.props.hasProperty('mark_special_links'):
self.props.manage_changeProperties(mark_special_links=mark_special_links)
else:
self.props.manage_addProperty('mark_special_links', mark_special_links, 'string')
self._update_jsreg_mark_special()
mark_special_links = property(get_mark_special_links,
set_mark_special_links)
def get_ext_links_open_new_window(self):
elonw = self.props.external_links_open_new_window
if elonw == 'true':
return True
return False
def set_ext_links_open_new_window(self, value):
if value:
self.props.manage_changeProperties(external_links_open_new_window='true')
else:
self.props.manage_changeProperties(external_links_open_new_window='false')
self._update_jsreg_mark_special()
ext_links_open_new_window = property(get_ext_links_open_new_window,
set_ext_links_open_new_window)
def get_icon_visibility(self):
return self.props.icon_visibility
def set_icon_visibility(self, value):
self.props.manage_changeProperties(icon_visibility=value)
icon_visibility = property(get_icon_visibility,set_icon_visibility)
def get_use_popups(self):
return self.jstool.getResource('popupforms.js').getEnabled()
def set_use_popups(self, value):
self.jstool.getResource('popupforms.js').setEnabled(value)
self.jstool.cookResources()
use_popups = property(get_use_popups, set_use_popups)
class SkinsControlPanel(ControlPanelForm):
form_fields = FormFields(ISkinsSchema)
form_fields['theme'].custom_widget = DropdownChoiceWidget
label = _("Theme settings")
description = _("Settings that affect the site's look and feel.")
form_name = _("Theme settings")
def _on_save(self, data=None):
# Force a refresh of the page so that a new theme choice fully takes
# effect.
if not self.errors and self.adapters['ISkinsSchema'].themeChanged:
self.request.response.redirect(self.request.URL)
| [
"ignacio@plone.(none)"
] | ignacio@plone.(none) |
9ee36689f1628a59d8a7f28c1af469ca7adedfe2 | b5e15fc6fe0132f18c72a1bf035b3edab618e35c | /microfinance/project_data/helpers.py | 4e75b923715a09285f8ea6047a5c9c702562fcbf | [] | no_license | Jubair70/BRAC-Customer-Service-Assisstant | ced72b4c81e0f4670c4be9efdb7d0d113f285b28 | fe35de8b96e2d8a44bf8ed811faa628ea27861d2 | refs/heads/master | 2021-06-27T06:38:35.239131 | 2020-01-13T05:17:48 | 2020-01-13T05:17:48 | 233,516,095 | 0 | 0 | null | 2021-06-10T22:28:56 | 2020-01-13T05:12:26 | JavaScript | UTF-8 | Python | false | false | 608 | py | import paho.mqtt.client as mqtt
from microfinance.settings import MQTT_SERVER_PATH, MQTT_SERVER_PORT
def send_push_msg(topic = "/CSA/1/11111", payload = None, qos = 1, retained = False):
# MQTT_SERVER_PATH = "192.168.22.114"
# MQTT_SERVER_PORT = 1884
# MQTT_SUBSCRIBE_TOKEN = "/CSA/1/11111"
# MQTT_SERVER_RESPONSE = "response from view=> ayayayayya :)"
mqttc = mqtt.Client("",True)
mqttc.connect(MQTT_SERVER_PATH, MQTT_SERVER_PORT,100)
print "sending.. token: %s: response text: %s" % (topic, payload)
mqttc.publish(topic, payload, qos , retained)
mqttc.disconnect() | [
"[email protected]"
] | |
e7bc5b408596623a5bf610c7bba934e4da24efab | 197420c1f28ccb98059888dff214c9fd7226e743 | /elements, blocks and directions/classes/class5_A_funcs.py | 3f2a7d6da1786dea286652d45ddc788ab0d67f48 | [] | no_license | Vovanuch/python-basics-1 | fc10b6f745defff31364b66c65a704a9cf05d076 | a29affec12e8b80a1d3beda3a50cde4867b1dee2 | refs/heads/master | 2023-07-06T17:10:46.341121 | 2021-08-06T05:38:19 | 2021-08-06T05:38:19 | 267,504,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | ''' class A '''
class A:
val = 1
def foo(self):
A.val += 2
def bar(self):
self.val += 1
a = A()
b = A()
a.bar()
a.foo()
c = A()
print(a.val)
print(b.val)
print(c.val)
| [
"[email protected]"
] | |
af8ba639185f3e1cad576566a26e97b93daee28c | a63d907ad63ba6705420a6fb2788196d1bd3763c | /src/api/datahub/storekit/druid.py | 73bd116ab7896d4819f9a8cc6250460549d55a6b | [
"MIT"
] | permissive | Tencent/bk-base | a38461072811667dc2880a13a5232004fe771a4b | 6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2 | refs/heads/master | 2022-07-30T04:24:53.370661 | 2022-04-02T10:30:55 | 2022-04-02T10:30:55 | 381,257,882 | 101 | 51 | NOASSERTION | 2022-04-02T10:30:56 | 2021-06-29T06:10:01 | Python | UTF-8 | Python | false | false | 44,065 | py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
import random
import threading
import time
import uuid
from datetime import datetime, timedelta
import requests
from common.http import get, post
from common.log import logger
from datahub.common.const import (
APPEND_FIELDS,
BAD_FIELDS,
BIGINT,
CHECK_DIFF,
CHECK_RESULT,
CLUSTER_NAME,
CONNECTION_INFO,
COUNT,
DATASOURCE,
DRUID,
EXPIRES,
FAILED,
FIELD_NAME,
FIELD_TYPE,
FIELDS,
HOST,
ID,
INFO,
INTERVAL,
JSON_HEADERS,
LOCATION,
LONG,
MESSAGE,
MINTIME,
NAME,
PENDING,
PERIOD,
PHYSICAL_TABLE_NAME,
PORT,
REPORT_TIME,
RESULT_TABLE_ID,
RT_FIELDS,
RUNNING,
SAMPLE,
SEGMENTS,
SIZE,
STATUS,
STORAGE_CLUSTER,
STORAGE_CONFIG,
STORAGES,
STRING,
SUCCESS,
TABLE,
TABLE_RECORD_NUMS,
TABLE_SIZE_MB,
TASK,
TASK_TYPE,
TIMESTAMP,
TYPE,
UNKNOWN,
VARCHAR,
VERSION,
WAITING,
ZOOKEEPER_CONNECT,
)
from datahub.storekit import model_manager
from datahub.storekit.exceptions import (
DruidCreateTaskErrorException,
DruidDeleteDataException,
DruidHttpRequestException,
DruidQueryDataSourceException,
DruidQueryExpiresException,
DruidQueryHistoricalException,
DruidQueryTaskErrorException,
DruidQueryWorkersException,
DruidShutDownTaskException,
DruidUpdateExpiresException,
DruidZkConfException,
DruidZKPathException,
NotSupportTaskTypeException,
)
from datahub.storekit.settings import (
CLEAN_DELTA_DAY,
COORDINATOR,
DEFAULT_DRUID_EXPIRES,
DEFAULT_EXPIRES_RULE,
DEFAULT_MAX_IDLE_TIME,
DEFAULT_SEGMENT_GRANULARITY,
DEFAULT_TASK_MEMORY,
DEFAULT_TIMESTAMP_COLUMN,
DEFAULT_WINDOW_PERIOD,
DRUID_CLEAN_DEEPSTORAGE_TASK_CONFIG_TEMPLATE,
DRUID_COMPACT_SEGMENTS_TASK_CONFIG_TEMPLATE,
DRUID_MAINTAIN_TIMEOUT,
DRUID_VERSION_V1,
DRUID_VERSION_V2,
ENDPOINT_DATASOURCE_RULE,
ENDPOINT_GET_ALL_DATASOURCES,
ENDPOINT_GET_DATASOURCES,
ENDPOINT_GET_PENDING_TASKS,
ENDPOINT_GET_RUNNING_TASKS,
ENDPOINT_GET_RUNNING_WORKERS,
ENDPOINT_HISTORICAL_SIZES,
ENDPOINT_PUSH_EVENTS,
ENDPOINT_RUN_TASK,
ENDPOINT_SHUTDOWN_TASK,
EXCEPT_FIELDS,
EXECUTE_TIMEOUT,
HTTP_REQUEST_TIMEOUT,
INT_MAX_VALUE,
MAINTAIN_DELTA_DAY,
MERGE_BYTES_LIMIT,
MERGE_DAYS_DEFAULT,
OVERLORD,
TASK_CONFIG_TEMPLATE,
TASK_TYPE_PENDING,
TASK_TYPE_RUNNING,
TIME_ZONE_DIFF,
UTC_BEGIN_TIME,
UTC_FORMAT,
ZK_DRUID_PATH,
)
from datahub.storekit.util import translate_expires_day
from django.template import Context, Template
from kazoo.client import KazooClient
def initialize(rt_info):
"""
初始化rt的druid存储
:param rt_info: rt的字段和配置信息
:return: 初始化操作结果
"""
return prepare(rt_info)
def info(rt_info):
"""
获取rt的druid存储相关信息
:param rt_info: rt的字段和配置信息
:return: rt的druid相关信息
"""
druid, physical_tn, conn_info = _get_druid_storage_info(rt_info)
zk_addr = conn_info[ZOOKEEPER_CONNECT]
coordinator = _get_role_leader(zk_addr, COORDINATOR, druid[STORAGE_CLUSTER][VERSION])
# 获取维度和指标信息
broker_host, broker_port = conn_info[HOST], conn_info[PORT]
schema_url = f"http://{broker_host}:{broker_port}/druid/v2/sql/"
schema_sql = (
'{"query": "SELECT COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE '
"TABLE_NAME = '%s'\"}" % physical_tn
)
ok, schema = post(schema_url, params=json.loads(schema_sql))
table_schema = {}
if ok and schema:
for e in schema:
table_schema[e["COLUMN_NAME"].lower()] = e["DATA_TYPE"].lower()
logger.info(f"physical_tn: {physical_tn}, schema_url: {schema_url}, schema: {table_schema}")
# 获取segments信息:curl -XGET http://{router_ip:port}/druid/coordinator/v1/datasources/{datasource}
segments_url = f"http://{coordinator}/druid/coordinator/v1/datasources/{physical_tn}"
ok, segments = get(segments_url)
logger.info(f"physical_tn: {physical_tn}, segments_url: {segments_url}, segments: {segments}")
# 获取样例数据
sample_url = f"http://{broker_host}:{broker_port}/druid/v2/sql/"
sample_sql = '{"query": "SELECT * FROM \\"%s\\" ORDER BY __time DESC LIMIT 10"}' % physical_tn
ok, sample = post(sample_url, params=json.loads(sample_sql))
logger.info(f"physical_tn: {physical_tn}, sample_url: {sample_url}, sample_sql: {sample_sql}, sample: {sample}")
druid[INFO] = {TABLE: table_schema, SEGMENTS: segments, SAMPLE: sample}
return druid
def get_task_status(overlord, task_id, druid_version):
"""
获取指定task_id的任务状态
:param druid_version: druid集群的版本
:param overlord: overlord角色leader,形式ip:port
:param task_id: index task的id
:return: index task的状态
"""
# 获取segments信息:curl -XGET http://{router_ip:port}/druid/coordinator/v1/datasources/{datasource}
status_url = f"http://{overlord}/druid/indexer/v1/task/{task_id}/status"
# 5种状态:RUNNING, PENDING, WAITING, SUCCESS, FAILED
ok, status = get(status_url)
if not ok:
return UNKNOWN
logger.info(f"task_id: {task_id}, status_url: {status_url}, status: {status}")
runner_status = status[STATUS][STATUS]
if druid_version == DRUID_VERSION_V1:
return runner_status
else:
return runner_status if runner_status in [SUCCESS, FAILED] else status[STATUS]["runnerStatusCode"]
def shutdown_index_task(overlord, task_id):
"""
强制关闭指定task_id的任务状态,会导致丢peon数据, 谨慎使用
:param overlord: overlord角色,形式ip:port
:param task_id: index task的id
:return: index task的状态
"""
# 关闭任务:curl -XPOST http://{router_ip:port}/druid/overlord/v1/task/{task_id}/shutdown
shutdown_url = f"http://{overlord}/druid/indexer/v1/task/{task_id}/shutdown"
# 尽最大努力关闭druid index task, 重试3次
for i in range(3):
try:
resp = requests.post(shutdown_url, headers=JSON_HEADERS, timeout=HTTP_REQUEST_TIMEOUT)
if resp.status_code == 200:
break
except Exception:
logger.error(
f"{i} times, shutdown index task failed with task_id: {task_id}, shutdown_url: {shutdown_url}, "
f"resp.text: {resp.text}"
)
def merge_segments(zk_addr, datasource, begin_date, end_date, druid_version, timeout, merge_days):
"""
按照天级合并指定数据源的指定时间范围的segments
:param merge_days: 合并天数
:param zk_addr: zk连接信息
:param datasource: 合作操作的datasource
:param begin_date: 合并操作的开始日期
:param end_date: 合并操作的结束日期
:param druid_version: druid集群版本
:param timeout: merge任务执行超时时间,单位分钟
"""
coordinator = _get_role_leader(zk_addr, COORDINATOR, druid_version)
# 检查是否需要Merge
if not should_merge(coordinator, datasource, begin_date, end_date, merge_days):
return
interval = f"{begin_date}/{end_date}"
overlord = _get_role_leader(zk_addr, OVERLORD, druid_version)
execute_task(DRUID_COMPACT_SEGMENTS_TASK_CONFIG_TEMPLATE, overlord, datasource, interval, druid_version, timeout)
def execute_task(task_template, overlord, datasource, interval, druid_version, timeout=60):
"""
:param task_template: task config模板
:param overlord: overlord leader进程 ip:port格式
:param datasource: druid datasource名称
:param interval: 时间区间
:param druid_version: druid集群版本
:param timeout: 任务执行超时时间,单位分钟
"""
data = Template(task_template)
context = Context({DATASOURCE: datasource, INTERVAL: interval})
body = data.render(context)
task_url = f"http://{overlord}/druid/indexer/v1/task"
ok, task = post(task_url, params=json.loads(body))
task_id = task["task"] if ok else ""
logger.info(
f"datasource: {datasource}, overlord: {overlord}, interval: {interval}, task config: {body}, task_id: {task_id}"
)
begin_time = datetime.now()
time_delta = timedelta(minutes=timeout)
while True:
time.sleep(10)
status = get_task_status(overlord, task_id, druid_version)
if status == RUNNING:
if datetime.now() - begin_time > time_delta:
shutdown_index_task(overlord, task_id)
logger.warning(f"datasource: {datasource}, task_id {task_id} timeout, has been shutdown")
return
elif status in [PENDING, WAITING]:
shutdown_index_task(overlord, task_id)
return
else:
return
def clean_unused_segments(cluster_name, druid_version, timeout=60):
"""
清理的单个集群的
:param cluster_name: 集群名
:param druid_version: druid集群版本
:param timeout: clean任务执行超时时间,单位分钟
:return:
"""
coordinator = get_leader(cluster_name, COORDINATOR)
ok, datasources_all = get(f"http://{coordinator}{ENDPOINT_GET_ALL_DATASOURCES}")
if not ok or not datasources_all:
return False
ok, datasources_used = get(f"http://{coordinator}{ENDPOINT_GET_DATASOURCES}")
if not ok:
return False
logger.info(f"datasources_all: {datasources_all}, datasources_used: {datasources_used}")
for datasource in datasources_all:
try:
begin_date, end_date = "1000-01-01", "3000-01-01"
if datasource in datasources_used:
coordinator = get_leader(cluster_name, COORDINATOR)
ok, resp = get(f"http://{coordinator}/druid/coordinator/v1/datasources/{datasource}/")
if not ok:
continue
end_date = (
datetime.strptime(resp[SEGMENTS][MINTIME], "%Y-%m-%dT%H:%M:%S.000Z") - timedelta(CLEAN_DELTA_DAY)
).strftime("%Y-%m-%d")
interval = f"{begin_date}/{end_date}"
overlord = get_leader(cluster_name, OVERLORD)
logger.info(f"datasource: {datasource}, overlord: {overlord}, interval: {interval}")
execute_task(
DRUID_CLEAN_DEEPSTORAGE_TASK_CONFIG_TEMPLATE, overlord, datasource, interval, druid_version, timeout
)
except Exception:
logger.warning(f"clean unused segments failed for datasource {datasource}", exc_info=True)
return True
def should_merge(coordinator, datasource, begin_date, end_date, merge_days=MERGE_DAYS_DEFAULT):
"""
判断指定数据源的指定时间范围的segments是否需要合并,interval是一天, 下列条件下不需要merge,
1) 平均segment size大于300MB
2) 平均每天的segment文件数量小于2
:param merge_days: 合并天数
:param coordinator: coordinator角色leader节点
:param datasource: druid数据源名称
:param begin_date: merge时间区间的左边界
:param end_date: merge时间区间的右边界
:return:
"""
segments_url = (
f"http://{coordinator}/druid/coordinator/v1/datasources/{datasource}/intervals/"
f"{begin_date}_{end_date}?simple"
)
ok, segments = get(segments_url)
# segments是按天合并的,预期合并后每天至多一个segment
if not ok or len(segments) <= merge_days:
return False
size = 0
file_count = 0
for value in segments.values():
size += value[SIZE]
file_count += value[COUNT]
logger.info(
f"datasource: {datasource}, segments_url: {segments_url}, segments: {segments}, size: {size}, "
f"file_count: {file_count}, status: True"
)
if file_count <= 1 or size > MERGE_BYTES_LIMIT:
return False
return True
def alter(rt_info):
"""
修改rt的druid存储相关信息
:param rt_info: rt的字段和配置信息
:return: rt的druid存储的变更结果
"""
return prepare(rt_info)
def prepare(rt_info):
"""
准备rt关联的druid存储(创建新库表或旧表新增字段)
:param rt_info: rt的配置信息
:return: True/False
"""
return True
def maintain_merge_segments(zk_addr, physical_tn, expires_day, delta_day, druid_version, timeout, merge_days):
"""
用于在maintain和maintain_all中执行的merge segment逻辑
:param zk_addr: zk连接信息
:param physical_tn: 物理表名
:param expires_day: 数据保留天数
:param delta_day: 跳过的天数
:param druid_version : druid 集群版本
:param timeout : druid 任务的执行超时时间
"""
expires_date = (datetime.today() - timedelta(expires_day)).strftime("%Y-%m-%d")
end_date = (datetime.today() - timedelta(delta_day)).strftime("%Y-%m-%d")
begin_date = (datetime.today() - timedelta(delta_day + merge_days)).strftime("%Y-%m-%d")
logger.info(
f"physical_tn: {physical_tn}, expires_day: {expires_day}, begin_date: {begin_date}, end_date: {end_date}"
)
if end_date >= expires_date:
merge_segments(zk_addr, physical_tn, begin_date, end_date, druid_version, timeout, merge_days)
def set_retain_rule(coordinator, cluster_name, physical_tn, expires_day, druid_version):
"""
设置druid datasource的数据保留规则
:param coordinator: coordinator角色leader, 格式hostname:port
:param cluster_name: 集群名称
:param physical_tn: 物理表名
:param expires_day: 数据保留天数
:param druid_version: druid集群版本
:return: 数据保留规则是否设置成功,True or False
"""
rules = build_retain_rule(druid_version, expires_day)
url = f"http://{coordinator}/druid/coordinator/v1/rules/{physical_tn}"
resp = requests.post(url, data=rules, headers=JSON_HEADERS)
if resp.status_code != 200:
logger.warning(
f"{cluster_name}: failed to set retention rule for datasource {physical_tn}. "
f"status_code: {resp.status_code}, response: {resp.text}"
)
return False
return True
def build_retain_rule(druid_version, expires_day):
"""
构建数据保留规则
:param expires_day: 数据保留天数
:param druid_version: druid集群版本
:return: json字符串
"""
load_rule = {
PERIOD: f"P{expires_day}D",
"includeFuture": True,
"tieredReplicants": {"_default_tier": 2},
TYPE: "loadByPeriod",
}
if druid_version == DRUID_VERSION_V1:
load_rule["tieredReplicants"]["tier_hot"] = 2
rules = [load_rule, {"type": "dropForever"}]
return json.dumps(rules)
def kill_waiting_tasks(cluster_name):
"""
kill druid集群的所有waiting状态的任务
:param cluster_name: 集群名
"""
try:
overlord = get_leader(cluster_name, OVERLORD)
waiting_tasks_url = "http://" + overlord + "/druid/indexer/v1/waitingTasks"
res = requests.get(waiting_tasks_url, verify=False, timeout=HTTP_REQUEST_TIMEOUT)
pending_tasks = json.loads(res.text, encoding="utf-8")
for task_json in pending_tasks:
kill_task_url = "http://" + overlord + "/druid/indexer/v1/task/" + task_json[ID] + "/shutdown"
headers = JSON_HEADERS
requests.post(kill_task_url, headers=headers, verify=False)
except Exception:
logger.warning("failed to kill waiting tasks", exc_info=True)
def kill_pending_tasks(cluster_name):
"""
kill druid集群的所有pending状态的任务
:param cluster_name: 集群名
"""
try:
overlord = get_leader(cluster_name, OVERLORD)
pending_tasks_url = "http://" + overlord + "/druid/indexer/v1/pendingTasks"
res = requests.get(pending_tasks_url, verify=False, timeout=HTTP_REQUEST_TIMEOUT)
pending_tasks = json.loads(res.text, encoding="utf-8")
for task_json in pending_tasks:
kill_task_url = "http://" + overlord + "/druid/indexer/v1/task/" + task_json[ID] + "/shutdown"
headers = JSON_HEADERS
requests.post(kill_task_url, headers=headers, verify=False)
except Exception:
logger.warning("failed to kill pending tasks", exc_info=True)
def maintain(rt_info, delta_day=MAINTAIN_DELTA_DAY, timeout=EXECUTE_TIMEOUT, merge_days=MERGE_DAYS_DEFAULT):
"""
根据用户设定的数据保留时间维护druid表数据保留规则
:param merge_days: 合并天数
:param rt_info: rt的配置信息
:param delta_day: merge segments的日期偏移量
:param timeout: druid index任务的执行超时时间
"""
druid, physical_tn, conn_info = _get_druid_storage_info(rt_info)
cluster_name, version = druid[STORAGE_CLUSTER][CLUSTER_NAME], druid[STORAGE_CLUSTER][VERSION]
coordinator = get_leader(cluster_name, COORDINATOR)
expires_day = translate_expires_day(druid[EXPIRES])
# 设置数据保留规则
set_retain_rule(coordinator, cluster_name, physical_tn, expires_day, version)
# merge segments
zk_addr = conn_info[ZOOKEEPER_CONNECT]
maintain_merge_segments(zk_addr, physical_tn, expires_day, delta_day, version, timeout, merge_days)
return True
def maintain_all(delta_day=MAINTAIN_DELTA_DAY):
"""
根据用户设定的数据保留时间维护druid表数据保留规则
"""
start = time.time()
# rt维度的mantain, 主要是设置数据保存时间
storage_rt_list = model_manager.get_storage_rt_objs_by_type(DRUID)
for rt_storage in storage_rt_list:
try:
conn_info = json.loads(rt_storage.storage_cluster_config.connection_info)
zk_addr = conn_info[ZOOKEEPER_CONNECT]
coordinator = _get_role_leader(zk_addr, COORDINATOR, rt_storage.storage_cluster_config.version)
expires_day = translate_expires_day(rt_storage.expires)
physical_tn = rt_storage.physical_table_name
cluster_name = rt_storage.storage_cluster_config.cluster_name
# 设置数据保留规则
set_retain_rule(
coordinator, cluster_name, physical_tn, expires_day, rt_storage.storage_cluster_config.version
)
except Exception:
logger.warning(
f"{rt_storage.storage_cluster_config.cluster_name}: failed to maintain the retention rule of "
f"datasource {rt_storage.physical_table_name}",
exc_info=True,
)
set_rule_finish = time.time()
# 集群维度的maintain, 功能是清理deepstorage和compact segments
cluster_list = model_manager.get_storage_cluster_configs_by_type(DRUID)
check_threads = []
for cluster in cluster_list:
cluster_name = cluster[CLUSTER_NAME]
thread = threading.Thread(target=maintain_druid_cluster, name=cluster_name, args=(cluster_name,))
# 设置线程为守护线程,主线程结束后,结束子线程
thread.setDaemon(True)
check_threads.append(thread)
thread.start()
# join所有线程,等待所有集群检查都执行完毕
# 设置超时时间,防止集群出现问题,一直阻塞,导致后续集群维护任务等待
for th in check_threads:
th.join(timeout=DRUID_MAINTAIN_TIMEOUT)
end = time.time()
logger.info(
f"druid maintain_all total time: {end - start}(s), set rule take {set_rule_finish - start}(s), "
f"cluster maintain takes {end - set_rule_finish}(s)"
)
return True
def maintain_druid_cluster(cluster_name):
"""
对单个集群串行maintain其rt, 清理rt在deepstorage上的无用数据和合并小segment
:param cluster_name: 集群名称
"""
cluster = model_manager.get_storage_cluster_config(cluster_name, DRUID)
version = cluster[VERSION]
clean_unused_segments(cluster_name, version, EXECUTE_TIMEOUT)
# 对于0.11 druid版,无法执行compact操作
if version == DRUID_VERSION_V2:
segments_compaction(cluster_name, MAINTAIN_DELTA_DAY, MERGE_DAYS_DEFAULT, EXECUTE_TIMEOUT)
logger.info(
"{cluster_name}: maintain_druid_cluster total time: {end - start}(s), clean_unused_segments task "
"{clean_finish - start}(s), compaction takes {end - clean_finish}(s)"
)
def check_schema(rt_info):
"""
校验RT的字段(名字、类型)的修改是否满足存储的限制
:param rt_info: rt的配置信息
:return: rt字段和存储字段的schema对比
"""
result = {RT_FIELDS: {}, "druid_fields": {}, CHECK_RESULT: True, CHECK_DIFF: {}}
for field in rt_info[FIELDS]:
if field[FIELD_NAME].lower() in EXCEPT_FIELDS:
continue
result[RT_FIELDS][field[FIELD_NAME]] = field[FIELD_TYPE]
_, physical_tn, conn_info = _get_druid_storage_info(rt_info)
broker_host, broker_port = conn_info[HOST], conn_info[PORT]
druid_schema_url = f"http://{broker_host}:{broker_port}/druid/v2/sql/"
druid_schema_sql = (
'{"query": "SELECT COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS '
"WHERE TABLE_NAME = '%s'\"}" % physical_tn
)
ok, druid_schema = post(druid_schema_url, params=json.loads(druid_schema_sql))
if not ok or not druid_schema:
return result
logger.info(f"physical_tn: {physical_tn}, druid_schema_url: {druid_schema_url}, druid_schema: {druid_schema}")
for e in druid_schema:
result["druid_fields"][e["COLUMN_NAME"].lower()] = e["DATA_TYPE"].lower()
append_fields, bad_fields = check_rt_druid_fields(result[RT_FIELDS], result["druid_fields"])
result[CHECK_DIFF] = {APPEND_FIELDS: append_fields, BAD_FIELDS: bad_fields}
if bad_fields:
result[CHECK_RESULT] = False
logger.info(f"diff result: {result}")
return result
def check_rt_druid_fields(rt_table_columns, druid_columns):
"""
对比rt的字段,和druid物理表字段的区别
:param rt_table_columns: rt的字段转换为druid中字段后的字段信息
:param druid_columns: druid物理表字段
:return: (append_fields, bad_fields),需变更增加的字段 和 有类型修改的字段
"""
append_fields, bad_fields = [], []
for key, value in rt_table_columns.items():
col_name, col_type = key.lower(), value.lower()
if druid_columns[col_name]:
# 再对比类型
druid_col_type = druid_columns[col_name]
ok = (
(col_type == druid_col_type)
or (col_type == STRING and druid_col_type == VARCHAR)
or (col_type == LONG and druid_col_type == BIGINT)
)
if not ok:
bad_fields.append({col_name: f"difference between rt and druid({col_type} != {druid_col_type})"})
else:
append_fields.append({FIELD_NAME: col_name, FIELD_TYPE: col_type})
return append_fields, bad_fields
def clusters():
"""
获取druid存储集群列表
:return: druid存储集群列表
"""
result = model_manager.get_storage_cluster_configs_by_type(DRUID)
return result
def create_task(rt_info):
"""
创建任务
:param rt_info: rt的配置信息
:return: 创建task
"""
druid, physical_tn, conn_info = _get_druid_storage_info(rt_info)
zk_addr = conn_info.get(ZOOKEEPER_CONNECT)
overlord = _get_role_leader(zk_addr, OVERLORD, druid[STORAGE_CLUSTER][VERSION])
task_config = _get_task_config(rt_info)
url = f"http://{overlord}{ENDPOINT_RUN_TASK}"
result, resp = post(url=url, params=json.loads(task_config))
if not result or not resp[TASK]:
logger.error(f"create task error, url: {url}, param: {task_config}, result: {resp}")
raise DruidCreateTaskErrorException(message_kv={RESULT_TABLE_ID: rt_info[RESULT_TABLE_ID]})
# 获取正在执行的该任务地址
task_id = resp[TASK]
# 轮询结果
return _get_task_location(overlord, task_id)
def _get_task_location(overlord, task_id, max_times=3):
"""
:param overlord: overlord 节点
:param task_id: 任务id
:param max_times: 最大超时时间
:return: 任务地址
"""
if max_times < 0:
return ""
running_tasks = _get_tasks(overlord, TASK_TYPE_RUNNING)
for task in running_tasks:
if task[ID] == task_id:
task_location = f"http://{task[LOCATION][HOST]}:{task[LOCATION][PORT]}{ENDPOINT_PUSH_EVENTS}"
return task_location
time.sleep(5)
max_times = max_times - 1
return _get_task_location(overlord, task_id, max_times)
def shutdown_task(rt_info):
"""
:param rt_info: 结果表信息
:return: 停止成功或者失败
"""
druid, physical_tn, conn_info = _get_druid_storage_info(rt_info)
zk_addr = conn_info[ZOOKEEPER_CONNECT]
overlord = _get_role_leader(zk_addr, OVERLORD, druid[STORAGE_CLUSTER][VERSION])
return _shutdown_task_with_retry(overlord, physical_tn)
def _shutdown_task_with_retry(overlord, data_source, max_times=3):
"""
停止任务
:param overlord: overlord 节点
:param data_source: 数据源
:param max_times: 最大次数
:return: 停止task
"""
if max_times < 0:
raise DruidShutDownTaskException(message_kv={MESSAGE: "shut down overtime"})
running_tasks = _get_tasks(overlord, TASK_TYPE_RUNNING)
pending_tasks = _get_tasks(overlord, TASK_TYPE_PENDING)
tasks = running_tasks + pending_tasks
counter = 0
for task in tasks:
if task[ID].find(data_source) > 0:
peon_url = f"http://{task[LOCATION][HOST]}:{task[LOCATION][PORT]}{ENDPOINT_SHUTDOWN_TASK}"
resp = requests.post(peon_url)
logger.info(f"shutdown task info, url: {peon_url}, result: {resp.content}")
if resp.status_code != 200:
logger.error(f"shutdown task exception, {resp}")
raise DruidShutDownTaskException(message_kv={MESSAGE: resp})
logger.info(f"shutdown task success, peon_url: {peon_url}, task_id: {task[ID]}")
else:
counter = counter + 1
if counter == len(tasks):
return True
time.sleep(5)
max_times = max_times - 1
return _shutdown_task_with_retry(overlord, data_source, max_times)
def _get_druid_storage_info(rt_info):
"""
获取存储基本信息
:param rt_info: rt的信息
:return: druid, physical_tn, conn_info
"""
druid = rt_info[STORAGES][DRUID]
physical_tn = druid[PHYSICAL_TABLE_NAME]
conn_info = json.loads(druid[STORAGE_CLUSTER][CONNECTION_INFO])
return (
druid,
physical_tn,
conn_info,
)
def _get_role_leader(zk_addr, zk_node, druid_version):
"""
:param zk_addr: zk连接信息
:param zk_node: zk节点类型
:param druid_version: Druid版本
:return: 获取leader
"""
path = f"{ZK_DRUID_PATH}/{zk_node.lower() if druid_version == DRUID_VERSION_V1 else zk_node.upper()}"
zk = KazooClient(hosts=zk_addr, read_only=True)
zk.start()
result = zk.get_children(path)
zk.stop()
if not result or len(result) == 0:
logger.error(f"not found any zk path {path}, or this path is empty")
raise DruidZkConfException()
role = random.sample(result, 1)[0]
if zk_node in ["overlord", "OVERLORD"]:
leader_url = f"http://{role}/druid/indexer/v1/leader"
elif zk_node in ["coordinator", "COORDINATOR"]:
leader_url = f"http://{role}/druid/coordinator/v1/leader"
else:
logger.error(f"the zk path {path} is not for overlord or coordinator, please input a correct path")
raise DruidZKPathException()
resp = requests.get(leader_url, timeout=HTTP_REQUEST_TIMEOUT)
if resp.status_code != 200:
logger.error(f"failed to get leader from url: {leader_url}")
raise DruidHttpRequestException()
leader = resp.text.strip("http://")
return leader
def _get_task_config(rt_info):
"""
:param rt_info: 结果表信息
:return: 获取Druid 任务配置
"""
druid, physical_tn, conn_info = _get_druid_storage_info(rt_info)
task_config_dict = {
"availability_group": f"availability-group-{str(uuid.uuid4())[0:8]}",
"required_capacity": DEFAULT_TASK_MEMORY,
"data_source": physical_tn,
"metrics_spec": _get_dimensions_and_metrics(rt_info)["metrics_fields"],
"segment_granularity": DEFAULT_SEGMENT_GRANULARITY,
"timestamp_column": DEFAULT_TIMESTAMP_COLUMN,
"dimensions_spec": _get_dimensions_and_metrics(rt_info)["dimensions_fields"],
"dimension_exclusions": [],
"max_idle_time": DEFAULT_MAX_IDLE_TIME,
"window_period": DEFAULT_WINDOW_PERIOD,
"partition_num": random.randint(1, INT_MAX_VALUE),
"context": {
"druid.indexer.fork.property.druid.processing.buffer.sizeBytes": DEFAULT_TASK_MEMORY * 1024 * 1024 / 11,
"druid.indexer.runner.javaOpts": "-Xmx%dM -XX:MaxDirectMemorySize=%dM"
% (DEFAULT_TASK_MEMORY * 6 / 11 + 1, DEFAULT_TASK_MEMORY * 5 / 11 + 1),
},
}
task_config = TASK_CONFIG_TEMPLATE.format(**task_config_dict).replace("'", '"')
return task_config
def _get_dimensions_and_metrics(rt_info):
"""
:param rt_info: 结果表信息
:return: 返回纬度和度量字段
"""
druid, physical_tn, conn_info = _get_druid_storage_info(rt_info)
storage_config = json.loads(druid.get(STORAGE_CONFIG, "{}"))
dimensions_fields = storage_config.get("dimensions_fields", [])
metrics_fields = storage_config.get("metrics_fields", [])
default_dimensions = [{NAME: str(field[FIELD_NAME]), TYPE: str(field[FIELD_TYPE])} for field in rt_info[FIELDS]]
default_metrics = [{TYPE: "count", NAME: "__druid_reserved_count", "fieldName": ""}]
dimensions_fields = dimensions_fields if dimensions_fields else default_dimensions
metrics_fields = metrics_fields if metrics_fields else default_metrics
return {"dimensions_fields": dimensions_fields, "metrics_fields": metrics_fields}
def _get_tasks(overlord_conn_info, task_type):
"""
:param overlord_conn_info: overlord连接信息
:param task_type: 任务类型
:return: 该任务类型结果集
"""
if task_type not in [TASK_TYPE_RUNNING, TASK_TYPE_PENDING]:
raise NotSupportTaskTypeException(message_kv={TASK_TYPE, task_type})
if task_type == TASK_TYPE_RUNNING:
result, resp = get(f"http://{overlord_conn_info}{ENDPOINT_GET_RUNNING_TASKS}")
else:
result, resp = get(f"http://{overlord_conn_info}{ENDPOINT_GET_PENDING_TASKS}")
if not result:
raise DruidQueryTaskErrorException()
return resp
def get_roles(cluster_name):
"""
:param cluster_name: 集群名称
:return:
"""
cluster = model_manager.get_cluster_obj_by_name_type(cluster_name, DRUID)
conn_info = json.loads(cluster.connection_info)
zk_addr = conn_info[ZOOKEEPER_CONNECT]
zk = KazooClient(hosts=zk_addr, read_only=True)
zk.start()
result = zk.get_children(ZK_DRUID_PATH)
if not result or len(result) == 0:
logger.error("Failed to get overload node")
zk.stop()
raise DruidZkConfException()
data = dict()
for role in result:
data[role] = zk.get_children(f"{ZK_DRUID_PATH}/{role}")
zk.stop()
return data
def get_datasources(cluster_name):
"""
:param cluster_name: 集群名称
:return:
"""
cluster = model_manager.get_cluster_obj_by_name_type(cluster_name, DRUID)
conn_info = json.loads(cluster.connection_info)
zk_addr = conn_info[ZOOKEEPER_CONNECT]
coordinator = _get_role_leader(zk_addr, COORDINATOR, cluster.version)
result, resp = get(f"http://{coordinator}{ENDPOINT_GET_DATASOURCES}")
if not result:
raise DruidQueryDataSourceException(message_kv={MESSAGE: resp})
return resp
def get_workers(cluster_name):
"""
:param cluster_name: 集群名称
:return: workers信息
"""
overlord = get_leader(cluster_name, OVERLORD)
result, resp = get(f"http://{overlord}{ENDPOINT_GET_RUNNING_WORKERS}")
if not result:
raise DruidQueryWorkersException(message_kv={MESSAGE: resp})
return resp
def get_historical(cluster_name):
"""
:param cluster_name: 集群名称
:return: historical容量
"""
coordinator = get_leader(cluster_name, COORDINATOR)
result, resp = get(f"http://{coordinator}{ENDPOINT_HISTORICAL_SIZES}")
if not result:
raise DruidQueryHistoricalException(message_kv={MESSAGE: resp})
return resp
def get_cluster_capacity(cluster_name):
"""
:param cluster_name: 集群名称
:return: 容量信息
"""
cluster_capacity = {
"slot_capacity": 0,
"slot_capacity_used": 0,
"slot_usage": 0,
"used_size": 0,
"max_size": 0,
"storage_usage": 0,
"segments_count": 0,
"timestamp": time.time(),
}
try:
# 获取druid槽位信息
worker_info = get_workers(cluster_name)
if worker_info:
for worker in worker_info:
cluster_capacity["slot_capacity"] = cluster_capacity["slot_capacity"] + worker["worker"]["capacity"]
cluster_capacity["slot_capacity_used"] = (
cluster_capacity["slot_capacity_used"] + worker["currCapacityUsed"]
)
# 获取historical 容量信息
historical_info = get_historical(cluster_name)
if historical_info:
for historical in historical_info:
if historical[TYPE] == "historical":
cluster_capacity["used_size"] = cluster_capacity["used_size"] + historical["currSize"]
cluster_capacity["max_size"] = cluster_capacity["max_size"] + historical["maxSize"]
# 获取segments总数
coordinator = get_leader(cluster_name, COORDINATOR)
datasource_list_url = f"http://{coordinator}/druid/coordinator/v1/datasources/"
ok, datasource_list = get(datasource_list_url)
segments_sum = 0
for physical_tn in datasource_list:
segments_url = f"http://{coordinator}/druid/coordinator/v1/datasources/{physical_tn}"
ok, datasource_meta = get(segments_url)
segments_sum += datasource_meta[SEGMENTS][COUNT]
cluster_capacity["segments_count"] = segments_sum
cluster_capacity["slot_usage"] = (
int(100 * cluster_capacity["slot_capacity_used"] / cluster_capacity["slot_capacity"])
if cluster_capacity["slot_capacity"] > 0
else 0
)
cluster_capacity["storage_usage"] = (
int(100 * cluster_capacity["used_size"] / cluster_capacity["max_size"])
if cluster_capacity["max_size"] > 0
else 0
)
cluster_capacity[TIMESTAMP] = time.time()
except Exception:
logger.warning("failed to execute function druid.get_cluster_capacity", exc_info=True)
return cluster_capacity
def get_table_capacity(conn_info):
"""
读取druid集群容量数据
:param conn_info: 集群链接信息
:return:
"""
url = f"http://{conn_info[HOST]}:{conn_info[PORT]}/druid/v2/sql/"
sql = (
'{"query": "SELECT datasource, sum(size * num_replicas)/1000000 as total_size, sum(num_rows) as total_nums '
'FROM sys.segments WHERE is_available = 1 GROUP BY datasource"} '
)
rt_size = {}
try:
ok, table_capacity_list = post(url, params=json.loads(sql))
if not ok or not table_capacity_list:
return rt_size
for table_capacity in table_capacity_list:
rt_size[table_capacity[DATASOURCE]] = {
TABLE_SIZE_MB: table_capacity["total_size"],
TABLE_RECORD_NUMS: table_capacity["total_nums"],
REPORT_TIME: time.time(),
}
except Exception:
logger.warning("failed to execute function druid.get_table_capacity", exc_info=True)
return rt_size
def get_leader(cluster_name, role_type):
"""
:param cluster_name: 集群名称
:param role_type: 角色类型
:return: overlord or coordinator
"""
cluster = model_manager.get_cluster_obj_by_name_type(cluster_name, DRUID)
conn_info = json.loads(cluster.connection_info)
zk_addr = conn_info[ZOOKEEPER_CONNECT]
return _get_role_leader(zk_addr, role_type, cluster.version)
def get_tasks(cluster_name, task_type):
"""
:param cluster_name: 集群名称
:param task_type: 任务类型
:return:
"""
cluster = model_manager.get_cluster_obj_by_name_type(cluster_name, DRUID)
conn_info = json.loads(cluster.connection_info)
zk_addr = conn_info[ZOOKEEPER_CONNECT]
overlord = _get_role_leader(zk_addr, OVERLORD, cluster.version)
if task_type != TASK_TYPE_RUNNING and task_type != TASK_TYPE_PENDING:
raise NotSupportTaskTypeException(message_kv={TASK_TYPE: task_type})
elif task_type == TASK_TYPE_RUNNING:
result, resp = get(f"http://{overlord}{ENDPOINT_GET_RUNNING_TASKS}")
else:
result, resp = get(f"http://{overlord}{ENDPOINT_GET_PENDING_TASKS}")
if not result:
raise DruidQueryTaskErrorException()
return resp
def update_expires(rt_info, expires):
"""
更新datasource的数据过期规则
:param rt_info: 结果表
:param expires: 过期时间
:return:
"""
druid, physical_tn, conn_info = _get_druid_storage_info(rt_info)
expires = druid.get(EXPIRES, DEFAULT_DRUID_EXPIRES) if not expires else expires
zk_addr = conn_info[ZOOKEEPER_CONNECT]
coordinator = _get_role_leader(zk_addr, COORDINATOR, druid[STORAGE_CLUSTER][VERSION])
rule_path = f"{ENDPOINT_DATASOURCE_RULE}/{physical_tn}"
rule_url = f"http://{coordinator}{rule_path}"
result, resp = get(rule_url)
if not result:
raise DruidQueryExpiresException(message_kv={MESSAGE: f"{physical_tn}获取数据过期时间异常"})
rule = resp
if not rule or len(rule) == 0:
# 没有查询到过期规则,取默认的数据过期规则
rule = DEFAULT_EXPIRES_RULE
# 2 更新data_source中的数据过期时间
rule[0]["period"] = f"P{expires.upper()}"
resp = requests.post(rule_url, json=rule)
if resp.status_code != 200:
raise DruidUpdateExpiresException(message_kv={MESSAGE: f"{physical_tn}更新数据过期时间异常"})
return True
def delete(rt_info, expires):
"""
删除数据
:param rt_info: 结果表
:param expires: 过期时间
:return:
"""
druid, physical_tn, conn_info = _get_druid_storage_info(rt_info)
zk_addr = conn_info[ZOOKEEPER_CONNECT]
expires = druid.get(EXPIRES, "360d") if not expires else expires
overlord = _get_role_leader(zk_addr, OVERLORD, druid[STORAGE_CLUSTER][VERSION])
expires = translate_expires_day(expires)
kill_interval = _get_kill_interval(expires)
task_id = f'kill_{rt_info[RESULT_TABLE_ID]}_{kill_interval.replace("/", "_")}_{str(uuid.uuid4())[0:8]}'
data = {TYPE: "kill", ID: task_id, "dataSource": physical_tn, INTERVAL: kill_interval}
url = f"http://{overlord}{ENDPOINT_RUN_TASK}"
logger.info(f"start delete data, url:{url}, params: {json.dumps(data)}")
result, resp = post(url, data)
if not result:
raise DruidDeleteDataException(message_kv={MESSAGE: resp})
return _check_delete_result(overlord, rt_info[RESULT_TABLE_ID], task_id)
def _get_kill_interval(expires):
"""
获取kill的时间间隔
:param expires: 过期时间
:return:
"""
date_diff = (datetime.today() + timedelta(-expires + 1)).strftime("%Y-%m-%dT00:00:00.000Z")
time_utc = datetime.strptime(date_diff, UTC_FORMAT) - timedelta(hours=TIME_ZONE_DIFF)
return f"{UTC_BEGIN_TIME}/{time_utc.strftime(UTC_FORMAT)}"
def _check_delete_result(overlord, result_table_id, task_id, max_times=60):
"""
:param overlord: overload节点
:param result_table_id: 结果表id
:param task_id: 任务id
:param max_times: 超时次数
:return:
"""
if max_times < 0:
logger.error(f"deleting expired data failed, rt: {result_table_id}, task_id: {task_id}")
raise DruidDeleteDataException(message_kv={MESSAGE: "删除过期数据失败, 超过最大重试次数"})
time.sleep(5)
result, resp = get(f"http://{overlord}{ENDPOINT_RUN_TASK}/{task_id}/status")
if not result:
raise DruidDeleteDataException(message_kv={MESSAGE: "检查任务运行状态异常"})
result = resp
if result.get(STATUS, {}).get(STATUS, "") == SUCCESS:
return True
else:
max_times = max_times - 1
logger.info(f"Enter the next poll, max_times: {max_times}, current result: {result}")
return _check_delete_result(overlord, result_table_id, task_id, max_times)
def segments_compaction(cluster_name, delta_day, merge_days, timeout):
"""
segments合并
:param cluster_name: druid集群名
:param delta_day: 合并跳过的天数
:param merge_days: 合并的天数
:param timeout: 合并操作的超时时间
:return:
"""
cluster = model_manager.get_storage_cluster_config(cluster_name, DRUID)
zk_addr = json.loads(cluster[CONNECTION_INFO])[ZOOKEEPER_CONNECT]
version = cluster[VERSION]
coordinator = _get_role_leader(zk_addr, COORDINATOR, version)
ok, datasources_used = get(f"http://{coordinator}{ENDPOINT_GET_DATASOURCES}")
if not ok:
return False
for datasource in datasources_used:
try:
coordinator = _get_role_leader(zk_addr, COORDINATOR, version)
ok, resp = get(f"http://{coordinator}/druid/coordinator/v1/datasources/{datasource}/")
if not ok:
continue
last_day = datetime.strptime(resp[SEGMENTS][MINTIME], "%Y-%m-%dT%H:%M:%S.000Z").strftime("%Y-%m-%d")
end_date = (datetime.today() - timedelta(delta_day)).strftime("%Y-%m-%d")
begin_date = (datetime.today() - timedelta(delta_day + merge_days)).strftime("%Y-%m-%d")
if end_date <= last_day:
continue
begin_date = last_day if last_day > begin_date else begin_date
merge_segments(zk_addr, datasource, begin_date, end_date, version, timeout, merge_days)
except Exception:
logger.warning(f"segments compaction failed for datasource {datasource}", exc_info=True)
return True
| [
"[email protected]"
] | |
7c7e67d27b764ca813e58971be7ee5ec46ca05c5 | e49a07ad215172e9c82cb418b10371bf0ce1c0f7 | /第1章 python基础/Python基础01/19-打印1-100之间的偶数.py | 4549dced2d7a0a0a558734f64134b9b56b6a40e8 | [] | no_license | taogangshow/python_Code | 829c25a7e32ead388c8b3ffa763cb9cf587bfd7b | 4b3d6992ec407d6069f3187ca7e402a14d863fff | refs/heads/master | 2022-12-16T01:26:17.569230 | 2018-11-16T10:07:59 | 2018-11-16T10:07:59 | 157,832,985 | 0 | 1 | null | 2022-11-25T09:55:32 | 2018-11-16T08:00:13 | Python | UTF-8 | Python | false | false | 61 | py | i = 1
while i<=100:
if i%2==0:
print(i)
i+=1
| [
"[email protected]"
] | |
73a34062044e8bbacbf5e735782bef6c3a6cbc5a | 85df75bec1ea604c21db36b8892c90e0d7b7574f | /armstrong/core/arm_layout/utils.py | c7bb882623ba0881a93e8ae89a446d49251f0d1a | [
"Apache-2.0"
] | permissive | niran/armstrong.core.arm_layout | a569a64f84085b55509b26c004a9a41af3952047 | 229106581439c370ba51b1395e5e5e4db111a0bc | refs/heads/master | 2021-01-16T19:29:16.017160 | 2012-03-16T16:29:58 | 2012-03-16T16:29:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | from django.utils.safestring import mark_safe
from django.template.loader import render_to_string
def get_layout_template_name(model, name):
ret = []
for a in model.__class__.mro():
if not hasattr(a, "_meta"):
continue
ret.append("layout/%s/%s/%s.html" % (a._meta.app_label,
a._meta.object_name.lower(), name))
return ret
def render_model(object, name, dictionary=None, context_instance=None):
dictionary = dictionary or {}
dictionary["object"] = object
return mark_safe(render_to_string(get_layout_template_name(object, name),
dictionary=dictionary, context_instance=context_instance))
| [
"[email protected]"
] | |
60c721e6c7d21277963b95af8fdc2aa107b72302 | 21df7cd93e156af8357596143792c22b44e14747 | /regression/SimpleLinearRegression.py | 963a2797127498672b735dbc7c59e572c6b024fa | [] | no_license | yanyongyong/machineLearn | 0cac90c1d0b4f7021e3f9ca658268f3c433b481f | d77a13f83679ba4b06bf24c6c6019dc2af55986f | refs/heads/master | 2021-09-03T08:25:33.933996 | 2018-01-07T14:15:52 | 2018-01-07T14:15:52 | 107,839,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | import numpy as np
#简单的线性回归
def fitSLR(x,y):
n = len(x)
denominator = 0 #分母
numerator = 0 #分子
for i in range(0,n):
numerator += (x[i]- np.mean(x))*(y[i] - np.mean(y))
denominator += (x[i] - np.mean(x))**2
b1 = numerator/float(denominator)
b0 = np.mean(y) - b1*np.mean(x)
# b0 = np.mean(y)/float(np.mean(x))
return b0, b1
def predict(x,bo,b1):
return bo + x*b1
x = [1,3,2,1,3]
y = [14,24,18,17,27]
b0,b1 = fitSLR(x,y)
x_test = 8
y_test = predict(8,b0,b1)
print(y_test) | [
"123456"
] | 123456 |
465b87dd2605a4e591b7693d9ff7ef6ed379c2e6 | f39c2c500873180d953ab9a7b22a4f6df95fb1c3 | /Amazon/Pyramid Transition Matrix.py | 24f0dd8cd85fc636e5f6ed3c3ff56adc101c0a4e | [] | no_license | Jason003/interview | 458516f671d7da0d3585f89b098c5370edcd9f04 | e153306b85c3687b23a332812a0885d25ecce904 | refs/heads/master | 2021-07-15T15:28:07.175276 | 2021-02-05T03:21:59 | 2021-02-05T03:21:59 | 224,898,150 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 872 | py | '''
Input: bottom = "BCD", allowed = ["BCG", "CDE", "GEA", "FFF"]
Output: true
Explanation:
We can stack the pyramid like this:
A
/ \
G E
/ \ / \
B C D
We are allowed to place G on top of B and C because BCG is an allowed triple. Similarly, we can place E on top of C and D, then A on top of G and E.
'''
import collections
class Solution:
def pyramidTransition(self, bottom: str, allowed: List[str]) -> bool:
d = collections.defaultdict(set)
for s in allowed:
d[s[:2]].add(s[2])
def helper(bottom, idx, nxt):
if len(bottom) == 1: return True
if idx == len(bottom) - 1: return helper(nxt, 0, '')
s = bottom[idx: idx + 2]
for c in d[s]:
if helper(bottom, idx + 1, nxt + c): return True
return False
return helper(bottom, 0, '')
| [
"[email protected]"
] | |
983a777eea0b5f999dc64520c81090b60c106a85 | cd6c6298fb407b7158e25aba2ab28e58517b1bd0 | /tests/test_plugins.py | 54f26a7f79028ccfeae9522314b326c15c0de4a7 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | mociepka/coveragepy | dc58ef4b6072af0e55edb5d920d8a58d4cbeef0c | bc31b68776bb76ac9a650caa3c7a04c84817093d | refs/heads/master | 2021-01-17T20:16:31.014696 | 2016-01-02T21:30:47 | 2016-01-02T21:30:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,439 | py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Tests for plugins."""
import os.path
import coverage
from coverage import env
from coverage.backward import StringIO
from coverage.control import Plugins
from coverage.misc import CoverageException
import coverage.plugin
from tests.coveragetest import CoverageTest
from tests.helpers import CheckUniqueFilenames
class FakeConfig(object):
"""A fake config for use in tests."""
def __init__(self, plugin, options):
self.plugin = plugin
self.options = options
self.asked_for = []
def get_plugin_options(self, module):
"""Just return the options for `module` if this is the right module."""
self.asked_for.append(module)
if module == self.plugin:
return self.options
else:
return {}
class LoadPluginsTest(CoverageTest):
"""Test Plugins.load_plugins directly."""
def test_implicit_boolean(self):
self.make_file("plugin1.py", """\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
pass
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
config = FakeConfig("plugin1", {})
plugins = Plugins.load_plugins([], config)
self.assertFalse(plugins)
plugins = Plugins.load_plugins(["plugin1"], config)
self.assertTrue(plugins)
def test_importing_and_configuring(self):
self.make_file("plugin1.py", """\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
def __init__(self, options):
self.options = options
self.this_is = "me"
def coverage_init(reg, options):
reg.add_file_tracer(Plugin(options))
""")
config = FakeConfig("plugin1", {'a': 'hello'})
plugins = list(Plugins.load_plugins(["plugin1"], config))
self.assertEqual(len(plugins), 1)
self.assertEqual(plugins[0].this_is, "me")
self.assertEqual(plugins[0].options, {'a': 'hello'})
self.assertEqual(config.asked_for, ['plugin1'])
def test_importing_and_configuring_more_than_one(self):
self.make_file("plugin1.py", """\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
def __init__(self, options):
self.options = options
self.this_is = "me"
def coverage_init(reg, options):
reg.add_file_tracer(Plugin(options))
""")
self.make_file("plugin2.py", """\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
def __init__(self, options):
self.options = options
def coverage_init(reg, options):
reg.add_file_tracer(Plugin(options))
""")
config = FakeConfig("plugin1", {'a': 'hello'})
plugins = list(Plugins.load_plugins(["plugin1", "plugin2"], config))
self.assertEqual(len(plugins), 2)
self.assertEqual(plugins[0].this_is, "me")
self.assertEqual(plugins[0].options, {'a': 'hello'})
self.assertEqual(plugins[1].options, {})
self.assertEqual(config.asked_for, ['plugin1', 'plugin2'])
# The order matters...
config = FakeConfig("plugin1", {'a': 'second'})
plugins = list(Plugins.load_plugins(["plugin2", "plugin1"], config))
self.assertEqual(len(plugins), 2)
self.assertEqual(plugins[0].options, {})
self.assertEqual(plugins[1].this_is, "me")
self.assertEqual(plugins[1].options, {'a': 'second'})
def test_cant_import(self):
with self.assertRaises(ImportError):
_ = Plugins.load_plugins(["plugin_not_there"], None)
def test_plugin_must_define_coverage_init(self):
self.make_file("no_plugin.py", """\
from coverage import CoveragePlugin
Nothing = 0
""")
msg_pat = "Plugin module 'no_plugin' didn't define a coverage_init function"
with self.assertRaisesRegex(CoverageException, msg_pat):
list(Plugins.load_plugins(["no_plugin"], None))
class PluginTest(CoverageTest):
"""Test plugins through the Coverage class."""
def test_plugin_imported(self):
# Prove that a plugin will be imported.
self.make_file("my_plugin.py", """\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
pass
def coverage_init(reg, options):
reg.add_noop(Plugin())
with open("evidence.out", "w") as f:
f.write("we are here!")
""")
self.assert_doesnt_exist("evidence.out")
cov = coverage.Coverage()
cov.set_option("run:plugins", ["my_plugin"])
cov.start()
cov.stop() # pragma: nested
with open("evidence.out") as f:
self.assertEqual(f.read(), "we are here!")
def test_missing_plugin_raises_import_error(self):
# Prove that a missing plugin will raise an ImportError.
with self.assertRaises(ImportError):
cov = coverage.Coverage()
cov.set_option("run:plugins", ["does_not_exist_woijwoicweo"])
cov.start()
cov.stop()
def test_bad_plugin_isnt_hidden(self):
# Prove that a plugin with an error in it will raise the error.
self.make_file("plugin_over_zero.py", """\
1/0
""")
with self.assertRaises(ZeroDivisionError):
cov = coverage.Coverage()
cov.set_option("run:plugins", ["plugin_over_zero"])
cov.start()
cov.stop()
def test_plugin_sys_info(self):
self.make_file("plugin_sys_info.py", """\
import coverage
class Plugin(coverage.CoveragePlugin):
def sys_info(self):
return [("hello", "world")]
def coverage_init(reg, options):
reg.add_noop(Plugin())
""")
debug_out = StringIO()
cov = coverage.Coverage(debug=["sys"])
cov._debug_file = debug_out
cov.set_option("run:plugins", ["plugin_sys_info"])
cov.load()
out_lines = debug_out.getvalue().splitlines()
expected_end = [
"-- sys: plugin_sys_info.Plugin -------------------------------",
" hello: world",
"-- end -------------------------------------------------------",
]
self.assertEqual(expected_end, out_lines[-len(expected_end):])
def test_plugin_with_no_sys_info(self):
self.make_file("plugin_no_sys_info.py", """\
import coverage
class Plugin(coverage.CoveragePlugin):
pass
def coverage_init(reg, options):
reg.add_noop(Plugin())
""")
debug_out = StringIO()
cov = coverage.Coverage(debug=["sys"])
cov._debug_file = debug_out
cov.set_option("run:plugins", ["plugin_no_sys_info"])
cov.load()
out_lines = debug_out.getvalue().splitlines()
expected_end = [
"-- sys: plugin_no_sys_info.Plugin ----------------------------",
"-- end -------------------------------------------------------",
]
self.assertEqual(expected_end, out_lines[-len(expected_end):])
def test_local_files_are_importable(self):
self.make_file("importing_plugin.py", """\
from coverage import CoveragePlugin
import local_module
class MyPlugin(CoveragePlugin):
pass
def coverage_init(reg, options):
reg.add_noop(MyPlugin())
""")
self.make_file("local_module.py", "CONST = 1")
self.make_file(".coveragerc", """\
[run]
plugins = importing_plugin
""")
self.make_file("main_file.py", "print('MAIN')")
out = self.run_command("coverage run main_file.py")
self.assertEqual(out, "MAIN\n")
out = self.run_command("coverage html")
self.assertEqual(out, "")
class PluginWarningOnPyTracer(CoverageTest):
"""Test that we get a controlled exception with plugins on PyTracer."""
def test_exception_if_plugins_on_pytracer(self):
if env.C_TRACER:
self.skip("This test is only about PyTracer.")
self.make_file("simple.py", """a = 1""")
cov = coverage.Coverage()
cov.set_option("run:plugins", ["tests.plugin1"])
expected_warnings = [
r"Plugin file tracers \(tests.plugin1.Plugin\) aren't supported with PyTracer",
]
with self.assert_warnings(cov, expected_warnings):
self.start_import_stop(cov, "simple")
class FileTracerTest(CoverageTest):
"""Tests of plugins that implement file_tracer."""
def setUp(self):
super(FileTracerTest, self).setUp()
if not env.C_TRACER:
self.skip("Plugins are only supported with the C tracer.")
class GoodPluginTest(FileTracerTest):
"""Tests of plugin happy paths."""
def test_plugin1(self):
self.make_file("simple.py", """\
import try_xyz
a = 1
b = 2
""")
self.make_file("try_xyz.py", """\
c = 3
d = 4
""")
cov = coverage.Coverage()
CheckUniqueFilenames.hook(cov, '_should_trace')
CheckUniqueFilenames.hook(cov, '_check_include_omit_etc')
cov.set_option("run:plugins", ["tests.plugin1"])
# Import the Python file, executing it.
self.start_import_stop(cov, "simple")
_, statements, missing, _ = cov.analysis("simple.py")
self.assertEqual(statements, [1, 2, 3])
self.assertEqual(missing, [])
zzfile = os.path.abspath(os.path.join("/src", "try_ABC.zz"))
_, statements, _, _ = cov.analysis(zzfile)
self.assertEqual(statements, [105, 106, 107, 205, 206, 207])
def make_render_and_caller(self):
"""Make the render.py and caller.py files we need."""
# plugin2 emulates a dynamic tracing plugin: the caller's locals
# are examined to determine the source file and line number.
# The plugin is in tests/plugin2.py.
self.make_file("render.py", """\
def render(filename, linenum):
# This function emulates a template renderer. The plugin
# will examine the `filename` and `linenum` locals to
# determine the source file and line number.
fiddle_around = 1 # not used, just chaff.
return "[{0} @ {1}]".format(filename, linenum)
def helper(x):
# This function is here just to show that not all code in
# this file will be part of the dynamic tracing.
return x+1
""")
self.make_file("caller.py", """\
import sys
from render import helper, render
assert render("foo_7.html", 4) == "[foo_7.html @ 4]"
# Render foo_7.html again to try the CheckUniqueFilenames asserts.
render("foo_7.html", 4)
assert helper(42) == 43
assert render("bar_4.html", 2) == "[bar_4.html @ 2]"
assert helper(76) == 77
# quux_5.html will be omitted from the results.
assert render("quux_5.html", 3) == "[quux_5.html @ 3]"
# In Python 2, either kind of string should be OK.
if sys.version_info[0] == 2:
assert render(u"uni_3.html", 2) == "[uni_3.html @ 2]"
""")
# will try to read the actual source files, so make some
# source files.
def lines(n):
"""Make a string with n lines of text."""
return "".join("line %d\n" % i for i in range(n))
self.make_file("bar_4.html", lines(4))
self.make_file("foo_7.html", lines(7))
def test_plugin2(self):
self.make_render_and_caller()
cov = coverage.Coverage(omit=["*quux*"])
CheckUniqueFilenames.hook(cov, '_should_trace')
CheckUniqueFilenames.hook(cov, '_check_include_omit_etc')
cov.set_option("run:plugins", ["tests.plugin2"])
self.start_import_stop(cov, "caller")
# The way plugin2 works, a file named foo_7.html will be claimed to
# have 7 lines in it. If render() was called with line number 4,
# then the plugin will claim that lines 4 and 5 were executed.
_, statements, missing, _ = cov.analysis("foo_7.html")
self.assertEqual(statements, [1, 2, 3, 4, 5, 6, 7])
self.assertEqual(missing, [1, 2, 3, 6, 7])
self.assertIn("foo_7.html", cov.data.line_counts())
_, statements, missing, _ = cov.analysis("bar_4.html")
self.assertEqual(statements, [1, 2, 3, 4])
self.assertEqual(missing, [1, 4])
self.assertIn("bar_4.html", cov.data.line_counts())
self.assertNotIn("quux_5.html", cov.data.line_counts())
if env.PY2:
_, statements, missing, _ = cov.analysis("uni_3.html")
self.assertEqual(statements, [1, 2, 3])
self.assertEqual(missing, [1])
self.assertIn("uni_3.html", cov.data.line_counts())
def test_plugin2_with_branch(self):
self.make_render_and_caller()
cov = coverage.Coverage(branch=True, omit=["*quux*"])
CheckUniqueFilenames.hook(cov, '_should_trace')
CheckUniqueFilenames.hook(cov, '_check_include_omit_etc')
cov.set_option("run:plugins", ["tests.plugin2"])
self.start_import_stop(cov, "caller")
# The way plugin2 works, a file named foo_7.html will be claimed to
# have 7 lines in it. If render() was called with line number 4,
# then the plugin will claim that lines 4 and 5 were executed.
analysis = cov._analyze("foo_7.html")
self.assertEqual(analysis.statements, set([1, 2, 3, 4, 5, 6, 7]))
# Plugins don't do branch coverage yet.
self.assertEqual(analysis.has_arcs(), True)
self.assertEqual(analysis.arc_possibilities(), [])
self.assertEqual(analysis.missing, set([1, 2, 3, 6, 7]))
def test_plugin2_with_text_report(self):
self.make_render_and_caller()
cov = coverage.Coverage(branch=True, omit=["*quux*"])
cov.set_option("run:plugins", ["tests.plugin2"])
self.start_import_stop(cov, "caller")
repout = StringIO()
total = cov.report(file=repout, include=["*.html"], omit=["uni*.html"])
report = repout.getvalue().splitlines()
expected = [
'Name Stmts Miss Branch BrPart Cover Missing',
'--------------------------------------------------------',
'bar_4.html 4 2 0 0 50% 1, 4',
'foo_7.html 7 5 0 0 29% 1-3, 6-7',
'--------------------------------------------------------',
'TOTAL 11 7 0 0 36% ',
]
self.assertEqual(report, expected)
self.assertAlmostEqual(total, 36.36, places=2)
def test_plugin2_with_html_report(self):
self.make_render_and_caller()
cov = coverage.Coverage(branch=True, omit=["*quux*"])
cov.set_option("run:plugins", ["tests.plugin2"])
self.start_import_stop(cov, "caller")
total = cov.html_report(include=["*.html"], omit=["uni*.html"])
self.assertAlmostEqual(total, 36.36, places=2)
self.assert_exists("htmlcov/index.html")
self.assert_exists("htmlcov/bar_4_html.html")
self.assert_exists("htmlcov/foo_7_html.html")
def test_plugin2_with_xml_report(self):
self.make_render_and_caller()
cov = coverage.Coverage(branch=True, omit=["*quux*"])
cov.set_option("run:plugins", ["tests.plugin2"])
self.start_import_stop(cov, "caller")
total = cov.xml_report(include=["*.html"], omit=["uni*.html"])
self.assertAlmostEqual(total, 36.36, places=2)
with open("coverage.xml") as fxml:
xml = fxml.read()
for snip in [
'filename="bar_4.html" line-rate="0.5" name="bar_4.html"',
'filename="foo_7.html" line-rate="0.2857" name="foo_7.html"',
]:
self.assertIn(snip, xml)
def test_defer_to_python(self):
# A plugin that measures, but then wants built-in python reporting.
self.make_file("fairly_odd_plugin.py", """\
# A plugin that claims all the odd lines are executed, and none of
# the even lines, and then punts reporting off to the built-in
# Python reporting.
import coverage.plugin
class Plugin(coverage.CoveragePlugin):
def file_tracer(self, filename):
return OddTracer(filename)
def file_reporter(self, filename):
return "python"
class OddTracer(coverage.plugin.FileTracer):
def __init__(self, filename):
self.filename = filename
def source_filename(self):
return self.filename
def line_number_range(self, frame):
lineno = frame.f_lineno
if lineno % 2:
return (lineno, lineno)
else:
return (-1, -1)
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.make_file("unsuspecting.py", """\
a = 1
b = 2
c = 3
d = 4
e = 5
f = 6
""")
cov = coverage.Coverage(include=["unsuspecting.py"])
cov.set_option("run:plugins", ["fairly_odd_plugin"])
self.start_import_stop(cov, "unsuspecting")
repout = StringIO()
total = cov.report(file=repout)
report = repout.getvalue().splitlines()
expected = [
'Name Stmts Miss Cover Missing',
'-----------------------------------------------',
'unsuspecting.py 6 3 50% 2, 4, 6',
]
self.assertEqual(report, expected)
self.assertEqual(total, 50)
class BadPluginTest(FileTracerTest):
"""Test error handling around plugins."""
def run_plugin(self, module_name):
"""Run a plugin with the given module_name.
Uses a few fixed Python files.
Returns the Coverage object.
"""
self.make_file("simple.py", """\
import other, another
a = other.f(2)
b = other.f(3)
c = another.g(4)
d = another.g(5)
""")
# The names of these files are important: some plugins apply themselves
# to "*other.py".
self.make_file("other.py", """\
def f(x):
return x+1
""")
self.make_file("another.py", """\
def g(x):
return x-1
""")
cov = coverage.Coverage()
cov.set_option("run:plugins", [module_name])
self.start_import_stop(cov, "simple")
return cov
def run_bad_plugin(self, module_name, plugin_name, our_error=True, excmsg=None):
"""Run a file, and see that the plugin failed.
`module_name` and `plugin_name` is the module and name of the plugin to
use.
`our_error` is True if the error reported to the user will be an
explicit error in our test code, marked with an '# Oh noes!' comment.
`excmsg`, if provided, is text that should appear in the stderr.
The plugin will be disabled, and we check that a warning is output
explaining why.
"""
self.run_plugin(module_name)
stderr = self.stderr()
print(stderr) # for diagnosing test failures.
if our_error:
errors = stderr.count("# Oh noes!")
# The exception we're causing should only appear once.
self.assertEqual(errors, 1)
# There should be a warning explaining what's happening, but only one.
# The message can be in two forms:
# Disabling plugin '...' due to previous exception
# or:
# Disabling plugin '...' due to an exception:
msg = "Disabling plugin '%s.%s' due to " % (module_name, plugin_name)
warnings = stderr.count(msg)
self.assertEqual(warnings, 1)
if excmsg:
self.assertIn(excmsg, stderr)
def test_file_tracer_has_no_file_tracer_method(self):
self.make_file("bad_plugin.py", """\
class Plugin(object):
pass
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin", our_error=False)
def test_file_tracer_has_inherited_sourcefilename_method(self):
self.make_file("bad_plugin.py", """\
import coverage
class Plugin(coverage.CoveragePlugin):
def file_tracer(self, filename):
# Just grab everything.
return FileTracer()
class FileTracer(coverage.FileTracer):
pass
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin(
"bad_plugin", "Plugin", our_error=False,
excmsg="Class 'bad_plugin.FileTracer' needs to implement source_filename()",
)
def test_plugin_has_inherited_filereporter_method(self):
self.make_file("bad_plugin.py", """\
import coverage
class Plugin(coverage.CoveragePlugin):
def file_tracer(self, filename):
# Just grab everything.
return FileTracer()
class FileTracer(coverage.FileTracer):
def source_filename(self):
return "foo.xxx"
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
cov = self.run_plugin("bad_plugin")
expected_msg = "Plugin 'bad_plugin.Plugin' needs to implement file_reporter()"
with self.assertRaisesRegex(NotImplementedError, expected_msg):
cov.report()
def test_file_tracer_fails(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
17/0 # Oh noes!
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin")
def test_file_tracer_returns_wrong(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
return 3.14159
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin", our_error=False)
def test_has_dynamic_source_filename_fails(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def has_dynamic_source_filename(self):
23/0 # Oh noes!
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin")
def test_source_filename_fails(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
42/0 # Oh noes!
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin")
def test_source_filename_returns_wrong(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
return 17.3
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin", our_error=False)
def test_dynamic_source_filename_fails(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
if filename.endswith("other.py"):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def has_dynamic_source_filename(self):
return True
def dynamic_source_filename(self, filename, frame):
101/0 # Oh noes!
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin")
def test_line_number_range_returns_non_tuple(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
if filename.endswith("other.py"):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
return "something.foo"
def line_number_range(self, frame):
return 42.23
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin", our_error=False)
def test_line_number_range_returns_triple(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
if filename.endswith("other.py"):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
return "something.foo"
def line_number_range(self, frame):
return (1, 2, 3)
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin", our_error=False)
def test_line_number_range_returns_pair_of_strings(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
if filename.endswith("other.py"):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
return "something.foo"
def line_number_range(self, frame):
return ("5", "7")
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin", our_error=False)
| [
"[email protected]"
] | |
ae60e9b1424a37519eecbabcfeb13e32fe0e0f59 | df1348a67a54fa530f620ba1145c34d914710fde | /examples/sandbox/sandbox_export.py | 0279085899b7e8a7bfb5c5464169c3afb8f28481 | [
"MIT"
] | permissive | SilverLabUCL/netpyne | bf00991cec1ca44c44476e0a0fff2a15bc28b08c | 72ce78d8c79c060d44513bafa7843756ee06cc45 | refs/heads/master | 2020-07-12T12:45:39.959342 | 2016-11-16T10:26:23 | 2016-11-16T10:26:23 | 73,908,592 | 0 | 0 | null | 2016-11-16T10:21:48 | 2016-11-16T10:21:48 | null | UTF-8 | Python | false | false | 293 | py | import sandbox # import parameters file
from netpyne import sim # import netpyne sim module
sim.createExportNeuroML2(netParams = sandbox.netParams,
simConfig = sandbox.simConfig,
reference = 'sandbox') # create and export network to NeuroML 2 | [
"[email protected]"
] | |
e0c05f71ba2c1ec6b84d1cee2e49a9f3fd585618 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_clutched.py | e70757cfd80eae64be874dd7819e132a2b0a95da | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py |
from xai.brain.wordbase.nouns._clutch import _CLUTCH
#calss header
class _CLUTCHED(_CLUTCH, ):
def __init__(self,):
_CLUTCH.__init__(self)
self.name = "CLUTCHED"
self.specie = 'nouns'
self.basic = "clutch"
self.jsondata = {}
| [
"[email protected]"
] | |
59d94563bfa6f5937003f4b1bdd3072c24cc7d4c | e9f111b913255e2a8963556a638017c6c4729f01 | /randomize_four_digits.py | 0384492097a4e58757931549c4dab66f38246c1c | [] | no_license | dojinkimm/daddy | d609c38333358a6119ad71b4c89f418ae8c071eb | 77e79324da3e7deb11d0a045d888e432a499d388 | refs/heads/master | 2023-01-31T08:21:26.544482 | 2020-12-15T12:25:26 | 2020-12-15T12:25:26 | 285,579,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,668 | py | """
문장 리스트에서 4자리 혹은 3자리 숫자를 찾아서 랜덤 숫자로 변경해주는 GUI 프로그램
GUI Program that finds four digit or three digit number in a list of sentences,
and changes to random number
"""
import random
import re
import pandas as pd
import PySimpleGUI as sg
def arg_parse():
layout = [
[sg.Text("문장을 입력하세요", size=(25, 1))],
[sg.InputText()],
[sg.Text("변경할 숫자의 길이를 입력해주세요")],
[sg.InputText()],
[sg.Text("저장할 파일의 이름을 입력하세요")],
[sg.InputText()],
[sg.Submit(), sg.Cancel()],
]
window = sg.Window("문장 숫자 랜덤 생성기", layout)
event, values = window.read()
window.close()
if event is None or event == "Cancel":
exit()
return values
args = arg_parse()
phrases = args[0].split("\n")
digit = args[1]
file_name = args[2] + ".csv"
if args[2] == "":
file_name = "test.csv"
generated_words = []
digit_regexp = "\d\d\d\d((?=[^kg|^Kg|^ml|^cm|^mm|^MM|^WT]))|\d\d\d\d$"
if digit != "" and int(digit) == 3:
digit_regexp = "\d\d\d\d((?=[^kg|^Kg|^ml|^cm|^mm|^MM|^WT]))|\d\d\d\d$"
for p in phrases:
if p == "":
continue
match = re.search(digit_regexp, p)
if match is None:
generated_words.append(p)
continue
rand = random.randint(1000, 9999)
if digit != "" and int(digit) == 3:
rand = random.randint(100, 999)
random.seed(p)
new_p = re.sub(digit_regexp, str(rand), p)
generated_words.append(new_p)
df = pd.DataFrame(generated_words)
df.to_csv(file_name, encoding="utf-8-sig")
| [
"[email protected]"
] | |
125c76db9f1f9f7db1a60cc1fac82e87519e6ac9 | c342df24a9e2a94c5b952b57d73e45ee35adea80 | /dqn_bullet_cartpole.py | f1a52ad5b053b903b878f9a354642da5683ba6ec | [
"MIT"
] | permissive | vyraun/cartpoleplusplus | 4b652d4ba0210e5abdb78931153d6076839cf6df | 87c0f1b896e6d6919c4dbfcd0bf4306f807b60ef | refs/heads/master | 2020-12-31T02:48:49.650551 | 2016-08-29T03:29:05 | 2016-08-29T03:29:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,734 | py | #!/usr/bin/env python
# copy pasta from https://github.com/matthiasplappert/keras-rl/blob/master/examples/dqn_cartpole.py
# with some extra arg parsing
import numpy as np
import gym
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
import bullet_cartpole
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--gui', action='store_true')
parser.add_argument('--initial-force', type=float, default=55.0,
help="magnitude of initial push, in random direction")
parser.add_argument('--action-force', type=float, default=50.0,
help="magnitude of action push")
parser.add_argument('--num-train', type=int, default=100)
parser.add_argument('--num-eval', type=int, default=0)
parser.add_argument('--load-file', type=str, default=None)
parser.add_argument('--save-file', type=str, default=None)
parser.add_argument('--delay', type=float, default=0.0)
opts = parser.parse_args()
print "OPTS", opts
ENV_NAME = 'BulletCartpole'
# Get the environment and extract the number of actions.
env = bullet_cartpole.BulletCartpole(gui=opts.gui, action_force=opts.action_force,
initial_force=opts.initial_force, delay=opts.delay)
nb_actions = env.action_space.n
# Next, we build a very simple model.
model = Sequential()
model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
model.add(Dense(32))
model.add(Activation('tanh'))
#model.add(Dense(16))
#model.add(Activation('relu'))
#model.add(Dense(16))
#model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
print(model.summary())
memory = SequentialMemory(limit=50000)
policy = BoltzmannQPolicy()
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10,
target_model_update=1e-2, policy=policy)
dqn.compile(Adam(lr=1e-3), metrics=['mae'])
if opts.load_file is not None:
print "loading weights from from [%s]" % opts.load_file
dqn.load_weights(opts.load_file)
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
dqn.fit(env, nb_steps=opts.num_train, visualize=True, verbose=2)
# After training is done, we save the final weights.
if opts.save_file is not None:
print "saving weights to [%s]" % opts.save_file
dqn.save_weights(opts.save_file, overwrite=True)
# Finally, evaluate our algorithm for 5 episodes.
dqn.test(env, nb_episodes=opts.num_eval, visualize=True)
| [
"[email protected]"
] | |
a2ee918ee914a6a2440aeba1db575f22ba3e78bf | 458b1133df5b38a017f3a690a624a54f0f43fda7 | /PaperExperiments/XHExp041/parameters.py | 62f97ccd29ad9f45eebb6360c8de059e6a0f209d | [
"MIT"
] | permissive | stefan-c-kremer/TE_World2 | 9c7eca30ee6200d371183c5ba32b3345a4cc04ee | 8e1fae218af8a1eabae776deecac62192c22e0ca | refs/heads/master | 2020-12-18T14:31:00.639003 | 2020-02-04T15:55:49 | 2020-02-04T15:55:49 | 235,413,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,604 | py |
# parameters.py
"""
Exp 41 - {'Initial_genes': '5000', 'Host_mutation_rate': '0.30', 'TE_progeny': '0.15, 0, 0.55, 1, 0.30, 2', 'TE_Insertion_Distribution': 'Flat()', 'Carrying_capacity': '30', 'TE_excision_rate': '0.1', 'Junk_BP': '1.4', 'Gene_Insertion_Distribution': 'Flat()', 'mutation_effect': '0.10', 'TE_death_rate': '0.0005'}
"""
from TEUtil import *;
# note that "#" indicates a comment
# set the following to True if you want messages printed to the screen
# while the program runs - search for these keywords in TESim.py to see
# what each one prints out
output = {
"SPLAT": False,
"SPLAT FITNESS": False,
"INITIALIZATION": False,
"GENERATION": True,
"HOST EXTINCTION": True,
"TE EXTINCTION": True,
"TRIAL NO": True,
"GENE INIT": False,
"TE INIT": False,
};
TE_Insertion_Distribution = Flat();
Gene_Insertion_Distribution = Flat();
# Triangle( pmax, pzero ) generates values between pmax and pzero with
# a triangular probability distribution, where pmax is the point of highest
# probability, and pzero is the point of lowest probability
# - you can change the orientation of the triangle by reversing the values
# of pmax and pzero
# Flat() generates values between 0 and 1 with uniform probability
Gene_length = 1000; # use 1000?
TE_length = 1000; # use 1000?
TE_death_rate = 0.0005;
TE_excision_rate = 0.1; # set this to zero for retro transposons
# for retro transposons this is the probability of the given number of progeny
# for dna transposons this is the probability of the given number of progeny
# ___PLUS___ the original re-inserting
TE_progeny = ProbabilityTable( 0.15, 0, 0.55, 1, 0.30, 2 );
Initial_genes = 5000;
Append_gene = True; # True: when the intialization routine tries to place
# a gene inside another gene, it instead appends it
# at the end of the original gene (use this with small
# amounts of Junk_BP).
# False: when the intialization routine tries to place
# a gene inside another gene, try to place it somewhere
# else again (don't use theis option with samll amounts
# of Junk_BP).
Initial_TEs = 1;
MILLION = 1000000;
Junk_BP = 1.4 * MILLION;
Host_start_fitness = 1.0;
Host_mutation_rate = 0.30;
Host_mutation = ProbabilityTable( 0.40, lambda fit: 0.0,
0.30, lambda fit: fit - random.random()*0.10,
0.15, lambda fit: fit,
0.15, lambda fit: fit + random.random()*0.10
);
# what happens when a TA hits a gene
Insertion_effect = ProbabilityTable(0.30, lambda fit: 0.0,
0.20, lambda fit: fit - random.random()*0.10,
0.30, lambda fit: fit,
0.20, lambda fit: fit + random.random()*0.10
);
Carrying_capacity = 30;
Host_reproduction_rate = 1; # how many offspring each host has
Host_survival_rate = lambda propfit: min( Carrying_capacity * propfit, 0.95 );
# propfit = proportion of fitness owned by this individual
Maximum_generations = 1500;
Terminate_no_TEs = True; # end simulation if there are no TEs left
# seed = 0;
seed = None; # if seed = None, the random number generator's initial state is
# set "randomly"
save_frequency = 50; # Frequency with with which to save state of experiment
saved = None; # if saved = None then we start a new simulation from scratch
# if saves = string, then we open that file and resume a simulation
| [
"[email protected]"
] | |
a03230f460994f28b677a293aea19162a7708eb2 | 8ff12c53e31f134b9f39f59b9a6f7d4f9142cea7 | /lvlist/teacherPython/lianxi.py | bf2cc0047479923ed84cd01299189d22e12ed361 | [] | no_license | quhuohuo/python | 5b0a80dbec7d22a0b274e4a32d269e85d254718c | 5732c5974519da8e8919dab42b36ab0ab2c99b37 | refs/heads/master | 2021-06-13T11:41:12.356329 | 2017-04-07T08:58:05 | 2017-04-07T08:58:05 | 75,054,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | #!/usr/bin/python
def fun(char):
l = char.split(" ")
char = ''.join(l)
return char
while True:
s = raw_input()
if not len(s):
break
print "before:",s
s = fun(s)
print "after:",s
| [
"[email protected]"
] | |
19774af108915387eb5f2ee44608d270c5137efc | 6d4d69c91eb14150778468f7cf73d8e2a9aa9124 | /t/variant/read_expresses_del.py | a0eba72d31d3b41050cde4c2746b8eee0690634a | [] | no_license | phonybone/Nof1 | 847acf7ce785319590f99271d20b7c126b59b699 | 22d877a96cd4481fdb7bf860c4d0721fcb34ddbe | refs/heads/master | 2021-01-23T13:56:53.606351 | 2013-09-20T23:50:30 | 2013-09-20T23:50:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,054 | py | import unittest, sys, os
from warnings import warn
libdir=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','lib'))
sys.path.append(libdir)
from variant import *
class TestExpressesSNP(unittest.TestCase):
def setUp(self):
print
def test_expresses_del(self):
var=Variant('ABC', 23, 'center', 'hg45', 'chr1', 3827, 3836, '+', 'Missense_Mutation', 'DEL',
'GTATCCGTCA', 'GTATCCGTCA', '')
seq='AAAAACCGAGCCCGGGGGTT'*4 # note presence of 'GAG' at correct location
pos=3820 # has to encompass variant position of 3829
self.assertTrue(var.is_expressed_in_seq(seq, pos))
seq='AAAAACGGTATCCGTCAAGC'*4 # note presence of 'GAG' at incorrect location
self.assertFalse(var.is_expressed_in_seq(seq, pos))
#-----------------------------------------------------------------------
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestExpressesSNP)
unittest.TextTestRunner(verbosity=2).run(suite)
| [
"[email protected]"
] | |
c310f33e1c8dbb6251814466ec5e07be15b0a61f | 637fe43cb3b858be426e9b9ce10485430ae1f146 | /fsleyes/gl/gl14/glmask_funcs.py | 3d9bfb590cf1c45bff2b61b253fd436eaac571e6 | [
"BSD-3-Clause",
"CC-BY-3.0",
"Apache-2.0"
] | permissive | laurenpan02/fsleyes | 9dda45c1b1b77f0f042488ddf40fed46e5c77360 | eed8940d422994b6c1f1787381ebac2361b81408 | refs/heads/master | 2023-03-11T16:49:16.994945 | 2021-02-25T18:07:39 | 2021-02-25T18:07:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,363 | py | #!/usr/bin/env python
#
# glmask_funcs.py - OpenGL 1.4 functions used by the GLMask class.
#
# Author: Paul McCarthy <[email protected]>
#
"""This module provides functions which are used by the :class:`.GLMask`
class to render :class:`.Image` overlays in an OpenGL 1.4 compatible manner.
"""
import fsleyes.gl.shaders as shaders
from . import glvolume_funcs
def init(self):
"""Calls the :func:`compileShaders` and :func:`updateShaderState`
functions.
"""
self.shader = None
compileShaders( self)
updateShaderState(self)
def destroy(self):
"""Destroys the shader programs. """
self.shader.destroy()
self.shader = None
def compileShaders(self):
"""Loads the vertex/fragment shader source code, and creates a
:class:`.ARBPShader` program.
"""
if self.shader is not None:
self.shader.destroy()
vertSrc = shaders.getVertexShader( 'glvolume')
fragSrc = shaders.getFragmentShader('glmask')
textures = {
'imageTexture' : 0,
}
self.shader = shaders.ARBPShader(vertSrc,
fragSrc,
shaders.getShaderDir(),
textures)
def updateShaderState(self):
"""Updates all shader program variables. """
if not self.ready():
return
opts = self.opts
shader = self.shader
colour = self.getColour()
threshold = list(self.getThreshold())
if opts.invert: threshold += [ 1, 0]
else: threshold += [-1, 0]
shader.load()
shader.setFragParam('threshold', threshold)
shader.setFragParam('colour', colour)
shader.unload()
return True
def draw2D(self, zpos, axes, xform=None, bbox=None):
"""Draws a 2D slice at the given ``zpos``. Uses the
:func:`.gl14.glvolume_funcs.draw2D` function.
"""
self.shader.load()
self.shader.loadAtts()
glvolume_funcs.draw2D(self, zpos, axes, xform, bbox)
self.shader.unloadAtts()
self.shader.unload()
def drawAll(self, axes, zposes, xforms):
"""Draws all specified slices. Uses the
:func:`.gl14.glvolume_funcs.drawAll` function.
"""
self.shader.load()
self.shader.loadAtts()
glvolume_funcs.drawAll(self, axes, zposes, xforms)
self.shader.unloadAtts()
self.shader.unload()
| [
"[email protected]"
] | |
7641a1c1f9068abb40afb542114f32591bf63472 | f645ebae84e973cb42cffbe7f1d112ff2e3b0597 | /no/edgebox_final/edgebox_final/settings.py | 8cc80e236c92caef201e903858278cbcd6d1bf38 | [] | no_license | bopopescu/file_trans | 709ce437e7aa8ce15136aa6be2f5d696261c30bd | fadc3faf6473539ed083ccd380df92f43115f315 | refs/heads/master | 2022-11-19T18:54:17.868828 | 2020-03-11T04:30:41 | 2020-03-11T04:30:41 | 280,964,974 | 0 | 0 | null | 2020-07-19T22:57:41 | 2020-07-19T22:57:40 | null | UTF-8 | Python | false | false | 3,754 | py | """
Django settings for edgebox_final project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i(67r0=ud0l6ti(1sr&d0)m6fl6+_^bus41y&h92%i_ynp(-ov'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
"Agent",
"Device",
"Drive",
"SmartDevice",
'djcelery',
'rest_framework',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'edgebox_final.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'edgebox_final.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/10",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}
}
#分页的设置
REST_FRAMEWORK = {
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.NamespaceVersioning', #启动 drf 基于NameSpace的版本控制
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 5
}
from .celery_config import * | [
"[email protected]"
] | |
bed8fdd79205932c1f16505cfd9077aa45156f68 | 2d9a17e2b896d2f6a90913a4ba02d41f0ede5dd0 | /_58job/page_store.py | ab4764e39b62286c71dc816045dbe148722d6785 | [] | no_license | wolfwhoami/xxxxx | 1cf2ed2c8ed78048d87cccf2953ca86c0871a783 | 670787ec71127bc05c1645cc3d8ef7c3a91fe84b | refs/heads/master | 2020-03-30T00:44:55.864817 | 2016-12-16T01:45:03 | 2016-12-16T01:45:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,600 | py | #!/usr/bin/env python
# -*- coding:utf8 -*-
from spider.ipin.savedb import PageStoreBase
from spider.runtime import Log
from spider.util import htmlfind
from spider.util import TimeHandler
import spider
import time
import re
class Jd58PageStore(PageStoreBase):
def __init__(self):
super(Jd58PageStore, self).__init__('jd_58job')
def extract_content(self):
content = htmlfind.findTag(self.get_cur_doc().cur_content, 'div', 'posMsg borb')
try:
content = htmlfind.remove_tag(content[0], 1)
except:
Log.errorbin("invalid jd content %s" % self.get_cur_doc().cur_url, self.get_cur_doc().cur_content)
return None
return content
def page_time(self):
tag = htmlfind.findTag(self.get_cur_doc().cur_content, 'ul', 'class="headTag"')
try:
tag = htmlfind.remove_tag(tag[0], 1)
except:
Log.errorbin("invalid jd pubtime %s" % self.get_cur_doc().cur_url, self.get_cur_doc().cur_content)
raise
if isinstance(tag, unicode):
tag = tag.encode('utf-8')
if "天前" not in tag:
return int(time.time() * 1000)
else:
find = re.search('(\d+).*?(\d+).*?(\d+)', tag, re.S)
if find:
day = find.group(1)
return TimeHandler.getTimeOfNDayBefore(day)
raise Exception("not copy time pattern: {}".format(tag))
def check_should_fetch(self, jobid):
if not super(Jd58PageStore, self).check_should_fetch(jobid):
return False
return True | [
"[email protected]"
] | |
217bd2af0238293662a1d0bef1aaf8b835af57ff | a4830a0189c325c35c9021479a5958ec870a2e8b | /lib/pyutil/django/mixins.py | 1f6e5aee271e26288ffc4fda4263d7ba951ea772 | [] | no_license | solutionprovider9174/steward | 044c7d299a625108824c854839ac41f51d2ca3fd | fd681593a9d2d339aab0f6f3688412d71cd2ae32 | refs/heads/master | 2022-12-11T06:45:04.544838 | 2020-08-21T02:56:55 | 2020-08-21T02:56:55 | 289,162,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,931 | py | # Django
from django.http import JsonResponse
from django.forms import BaseFormSet, formset_factory
from django.forms.models import model_to_dict
from django.views.generic.edit import FormMixin
from django.core.exceptions import ImproperlyConfigured
from django.views.generic.detail import SingleObjectTemplateResponseMixin
class JSONResponseMixin(object):
"""
A mixin that can be used to render a JSON response.
"""
def render_to_response(self, context, **response_kwargs):
"""
Returns a JSON response, transforming 'context' to make the payload.
"""
return JsonResponse(
self.get_data(context),
**response_kwargs
)
def get_data(self, context):
"""
Returns an object that will be serialized as JSON by json.dumps().
"""
# Note: This is *EXTREMELY* naive; in reality, you'll need
# to do much more complex handling to ensure that arbitrary
# objects -- such as Django model instances or querysets
# -- can be serialized as JSON.
return context
class JSONModelMixin(object):
"""
A mixin that can be used to render a Model as a JSON response.
"""
def render_to_response(self, context):
if self.request.is_ajax() or self.request.GET.get('format') == 'json':
return JSONResponseMixin.render_to_response(self, model_to_dict(self.get_object()))
else:
return SingleObjectTemplateResponseMixin.render_to_response(self, context)
class ProcessFormMixin(FormMixin):
"""
Handles POST requests, instantiating a form instance with the passed
POST variables and then checked for validity.
"""
formset_class = None
formset_extra = 0
def get_formset_class(self):
return self.formset_class
def form_invalid(self, form, formset):
return self.render_to_response(self.get_context_data(form=form, formset=formset))
def get_formset(self, formset_class=None, formset_extra=None):
if formset_class is None:
formset_class = self.get_formset_class()
if formset_extra is None:
formset_extra = self.formset_extra
if formset_class is None:
return None
else:
formset = formset_factory(formset_class, extra=formset_extra)
return formset(**self.get_form_kwargs())
def get_context_data(self, **kwargs):
if 'formset' not in kwargs:
kwargs['formset'] = self.get_formset()
return super(ProcessFormMixin, self).get_context_data(**kwargs)
def post(self, request, *args, **kwargs):
form = self.get_form()
formset = self.get_formset()
if formset:
if form.is_valid() and formset.is_valid():
return self.form_valid(form, formset)
else:
if form.is_valid():
return self.form_valid(form, None)
| [
"[email protected]"
] | |
4bbfd3063d60db8bdd0ba24404b6cba6e8214f32 | d916a3a68980aaed1d468f30eb0c11bfb04d8def | /2021_06_14_Linked_list_cycle.py | 2cfffe4d21e1cf1685d43336acfba01f596912c7 | [] | no_license | trinhgliedt/Algo_Practice | 32aff29ca6dc14f9c74308af1d7eaaf0167e1f72 | 480de9be082fdcbcafe68e2cd5fd819dc7815e64 | refs/heads/master | 2023-07-10T23:49:16.519671 | 2021-08-11T05:11:34 | 2021-08-11T05:11:34 | 307,757,861 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,882 | py | # https://leetcode.com/problems/linked-list-cycle/
# Given head, the head of a linked list, determine if the linked list has a cycle in it.
# There is a cycle in a linked list if there is some node in the list that can be reached again by continuously following the next pointer. Internally, pos is used to denote the index of the node that tail's next pointer is connected to. Note that pos is not passed as a parameter.
# Return true if there is a cycle in the linked list. Otherwise, return false.
# Example 1:
# Input: head = [3,2,0,-4], pos = 1
# Output: true
# Explanation: There is a cycle in the linked list, where the tail connects to the 1st node (0-indexed).
# Example 2:
# Input: head = [1,2], pos = 0
# Output: true
# Explanation: There is a cycle in the linked list, where the tail connects to the 0th node.
# Example 3:
# Input: head = [1], pos = -1
# Output: false
# Explanation: There is no cycle in the linked list.
# Constraints:
# The number of the nodes in the list is in the range [0, 104].
# -105 <= Node.val <= 105
# pos is -1 or a valid index in the linked-list.
# Follow up: Can you solve it using O(1) (i.e. constant) memory?
# Definition for singly-linked list.
from typing import List
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def hasCycle(self, head: ListNode) -> bool:
hare = head
turtle = head
while turtle and hare and hare.next:
hare = hare.next.next
turtle = turtle.next
if turtle == hare:
return True
return False
s = Solution()
node1 = ListNode(1)
node5 = ListNode(5)
node11 = ListNode(11)
node8 = ListNode(8)
node9 = ListNode(9)
node1.next = node5
node5.next = node11
node11.next = node8
node8.next = node9
node9.next = node5
answer = s.hasCycle(node1)
print(answer)
| [
"[email protected]"
] | |
7f976f5b8142c14de1f5a2d2cbea50a1fe36c512 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/appconfiguration/azure-appconfiguration/azure/appconfiguration/aio/_sync_token_async.py | 9d2441dc438ea9e84f222b0768eefed6c3454998 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 4,236 | py | # --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
from typing import Any, Dict
from asyncio import Lock
from azure.core.pipeline import PipelineRequest, PipelineResponse
from azure.core.pipeline.policies import SansIOHTTPPolicy
from .._sync_token import SyncToken
class AsyncSyncTokenPolicy(SansIOHTTPPolicy):
"""A simple policy that enable the given callback with the response.
:keyword callback raw_response_hook: Callback function. Will be invoked on response.
"""
def __init__(self, **kwargs: Any) -> None: # pylint: disable=unused-argument
self._sync_token_header = "Sync-Token"
self._sync_tokens = {} # type: Dict[str, Any]
self._lock = Lock()
async def on_request(self, request: PipelineRequest) -> None: # type: ignore # pylint: disable=arguments-differ, invalid-overridden-method
"""This is executed before sending the request to the next policy.
:param request: The PipelineRequest object.
:type request: ~azure.core.pipeline.PipelineRequest
"""
async with self._lock:
sync_token_header = ",".join(str(x) for x in self._sync_tokens.values())
if sync_token_header:
request.http_request.headers.update({self._sync_token_header: sync_token_header})
async def on_response(self, request: PipelineRequest, response: PipelineResponse) -> None: # type: ignore # pylint: disable=arguments-differ, invalid-overridden-method
"""This is executed after the request comes back from the policy.
:param request: The PipelineRequest object.
:type request: ~azure.core.pipeline.PipelineRequest
:param response: The PipelineResponse object.
:type response: ~azure.core.pipeline.PipelineResponse
"""
sync_token_header = response.http_response.headers.get(self._sync_token_header)
if not sync_token_header:
return
sync_token_strings = sync_token_header.split(",")
if not sync_token_strings:
return
for sync_token_string in sync_token_strings:
sync_token = SyncToken.from_sync_token_string(sync_token_string)
await self._update_sync_token(sync_token)
async def add_token(self, full_raw_tokens: str) -> None:
raw_tokens = full_raw_tokens.split(",")
for raw_token in raw_tokens:
sync_token = SyncToken.from_sync_token_string(raw_token)
await self._update_sync_token(sync_token)
async def _update_sync_token(self, sync_token: SyncToken) -> None:
if not sync_token:
return
async with self._lock:
existing_token = self._sync_tokens.get(sync_token.token_id, None)
if not existing_token:
self._sync_tokens[sync_token.token_id] = sync_token
return
if existing_token.sequence_number < sync_token.sequence_number:
self._sync_tokens[sync_token.token_id] = sync_token
| [
"[email protected]"
] | |
ae59f02eab72110000b74d8503fae65c3fc36ecd | e164fd9dce5fef093f85ca009f78570ec2b1c492 | /324. Wiggle Sort II.py | c63081d423ce9f82a653401f08c2dc5fb6ed93ff | [] | no_license | havenshi/leetcode | 58fde93a1f1cbdd3c2faa9566c00383e5812f3a7 | bcb79f329bcb133e6421db8fc1f4780a4eedec39 | refs/heads/master | 2021-01-22T04:15:23.748793 | 2019-11-30T04:25:54 | 2019-11-30T04:25:54 | 92,447,327 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,541 | py | # Sorting and reoder solution. (92ms)
class Solution(object):
def wiggleSort(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
nums.sort()
med = (len(nums) - 1) / 2
nums[::2], nums[1::2] = nums[med::-1], nums[:med:-1]
# nums[med::-1]为前半段倒序, nums[:med:-1]为后半段倒序
# Time: O(n) ~ O(n^2)
# Space: O(1)
# Tri Partition (aka Dutch National Flag Problem) with virtual index solution. (TLE)
from random import randint
class Solution2(object):
def wiggleSort(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
def findKthLargest(nums, k):
left, right = 0, len(nums) - 1
while left <= right:
pivot_idx = randint(left, right)
new_pivot_idx = partitionAroundPivot(left, right, pivot_idx, nums)
if new_pivot_idx == k - 1:
return nums[new_pivot_idx]
elif new_pivot_idx > k - 1:
right = new_pivot_idx - 1
else: # new_pivot_idx < k - 1.
left = new_pivot_idx + 1
def partitionAroundPivot(left, right, pivot_idx, nums):
pivot_value = nums[pivot_idx]
new_pivot_idx = left
nums[pivot_idx], nums[right] = nums[right], nums[pivot_idx]
for i in xrange(left, right):
if nums[i] > pivot_value:
nums[i], nums[new_pivot_idx] = nums[new_pivot_idx], nums[i]
new_pivot_idx += 1
nums[right], nums[new_pivot_idx] = nums[new_pivot_idx], nums[right]
return new_pivot_idx
def reversedTriPartitionWithVI(nums, val):
def idx(i, N):
return (1 + 2 * (i)) % N
N = len(nums) / 2 * 2 + 1
i, j, n = 0, 0, len(nums) - 1
while j <= n:
if nums[idx(j, N)] > val:
nums[idx(i, N)], nums[idx(j, N)] = nums[idx(j, N)], nums[idx(i, N)]
i += 1
j += 1
elif nums[idx(j, N)] < val:
nums[idx(j, N)], nums[idx(n, N)] = nums[idx(n, N)], nums[idx(j, N)]
n -= 1
else:
j += 1
mid = (len(nums) - 1) / 2
findKthLargest(nums, mid + 1)
reversedTriPartitionWithVI(nums, nums[mid]) | [
"[email protected]"
] | |
91577320a6ad2fab7a30f0640acbdbcf621586e1 | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-enumeration-3-5.py | 41c46ea0f9f6ec5c12819d5834a5ba585aeda8a2 | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 717 | py | from output.models.nist_data.list_pkg.nmtoken.schema_instance.nistschema_sv_iv_list_nmtoken_enumeration_3_xsd.nistschema_sv_iv_list_nmtoken_enumeration_3 import NistschemaSvIvListNmtokenEnumeration3
from output.models.nist_data.list_pkg.nmtoken.schema_instance.nistschema_sv_iv_list_nmtoken_enumeration_3_xsd.nistschema_sv_iv_list_nmtoken_enumeration_3 import NistschemaSvIvListNmtokenEnumeration3Type
obj = NistschemaSvIvListNmtokenEnumeration3(
value=NistschemaSvIvListNmtokenEnumeration3Type.IDENTIFY_THE_FURTHERMORE_PARTNERS_VERSIONS_TO_TECHNOL_THAT_COMMERCE_D_FROM_FRAMEWORKS_WOULD_PA_SAME_FIVE_SIMULATION_COMPLEX_OASIS_TO_THE_NAVAL_DATA_IN_AROMA_DESCRIPTION_BASE_EC_RECOMMEN_SOME_THESE_TOOLS_CO_RELATED
)
| [
"[email protected]"
] | |
61ffe08a041bf9ab8125c750c6710d2416c6f292 | 51f887286aa3bd2c3dbe4c616ad306ce08976441 | /pybind/slxos/v17s_1_02/mpls_state/ldp/statistics/ldp_protocol_stats_instance_since_clear/__init__.py | 9db90b9b03d86d82c89d8808869b01b69cf370e7 | [
"Apache-2.0"
] | permissive | b2220333/pybind | a8c06460fd66a97a78c243bf144488eb88d7732a | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | refs/heads/master | 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,549 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import protocol_stats
class ldp_protocol_stats_instance_since_clear(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls-operational - based on the path /mpls-state/ldp/statistics/ldp-protocol-stats-instance-since-clear. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__protocol_stats',)
_yang_name = 'ldp-protocol-stats-instance-since-clear'
_rest_name = 'ldp-protocol-stats-instance-since-clear'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__protocol_stats = YANGDynClass(base=YANGListType("stat_type",protocol_stats.protocol_stats, yang_name="protocol-stats", rest_name="protocol-stats", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='stat-type', extensions={u'tailf-common': {u'callpoint': u'mpls-protocol-stats', u'cli-suppress-show-path': None}}), is_container='list', yang_name="protocol-stats", rest_name="protocol-stats", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-protocol-stats', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'mpls-state', u'ldp', u'statistics', u'ldp-protocol-stats-instance-since-clear']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'mpls-state', u'ldp', u'statistics', u'ldp-protocol-stats-instance-since-clear']
def _get_protocol_stats(self):
"""
Getter method for protocol_stats, mapped from YANG variable /mpls_state/ldp/statistics/ldp_protocol_stats_instance_since_clear/protocol_stats (list)
YANG Description: protocol stats rx/tx
"""
return self.__protocol_stats
def _set_protocol_stats(self, v, load=False):
"""
Setter method for protocol_stats, mapped from YANG variable /mpls_state/ldp/statistics/ldp_protocol_stats_instance_since_clear/protocol_stats (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_protocol_stats is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_protocol_stats() directly.
YANG Description: protocol stats rx/tx
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("stat_type",protocol_stats.protocol_stats, yang_name="protocol-stats", rest_name="protocol-stats", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='stat-type', extensions={u'tailf-common': {u'callpoint': u'mpls-protocol-stats', u'cli-suppress-show-path': None}}), is_container='list', yang_name="protocol-stats", rest_name="protocol-stats", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-protocol-stats', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """protocol_stats must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("stat_type",protocol_stats.protocol_stats, yang_name="protocol-stats", rest_name="protocol-stats", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='stat-type', extensions={u'tailf-common': {u'callpoint': u'mpls-protocol-stats', u'cli-suppress-show-path': None}}), is_container='list', yang_name="protocol-stats", rest_name="protocol-stats", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-protocol-stats', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)""",
})
self.__protocol_stats = t
if hasattr(self, '_set'):
self._set()
def _unset_protocol_stats(self):
self.__protocol_stats = YANGDynClass(base=YANGListType("stat_type",protocol_stats.protocol_stats, yang_name="protocol-stats", rest_name="protocol-stats", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='stat-type', extensions={u'tailf-common': {u'callpoint': u'mpls-protocol-stats', u'cli-suppress-show-path': None}}), is_container='list', yang_name="protocol-stats", rest_name="protocol-stats", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-protocol-stats', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
protocol_stats = __builtin__.property(_get_protocol_stats)
_pyangbind_elements = {'protocol_stats': protocol_stats, }
| [
"[email protected]"
] | |
2b60f88f7128b020f21fa8e9351b9fb82c26385d | 13a32b92b1ba8ffb07e810dcc8ccdf1b8b1671ab | /home--tommy--mypy/mypy/lib/python2.7/site-packages/gensim/corpora/lowcorpus.py | e293c998a14d288506947a9fd241acf64a343952 | [
"Unlicense"
] | permissive | tommybutler/mlearnpy2 | 8ec52bcd03208c9771d8d02ede8eaa91a95bda30 | 9e5d377d0242ac5eb1e82a357e6701095a8ca1ff | refs/heads/master | 2022-10-24T23:30:18.705329 | 2022-10-17T15:41:37 | 2022-10-17T15:41:37 | 118,529,175 | 0 | 2 | Unlicense | 2022-10-15T23:32:18 | 2018-01-22T23:27:10 | Python | UTF-8 | Python | false | false | 7,100 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Corpus in GibbsLda++ format of List-Of-Words.
"""
from __future__ import with_statement
import logging
from gensim import utils
from gensim.corpora import IndexedCorpus
from six import iterkeys
from six.moves import xrange, zip as izip
logger = logging.getLogger('gensim.corpora.lowcorpus')
def split_on_space(s):
return [word for word in utils.to_unicode(s).strip().split(' ') if word]
class LowCorpus(IndexedCorpus):
"""
List_Of_Words corpus handles input in GibbsLda++ format.
Quoting http://gibbslda.sourceforge.net/#3.2_Input_Data_Format::
Both data for training/estimating the model and new data (i.e., previously
unseen data) have the same format as follows:
[M]
[document1]
[document2]
...
[documentM]
in which the first line is the total number for documents [M]. Each line
after that is one document. [documenti] is the ith document of the dataset
that consists of a list of Ni words/terms.
[documenti] = [wordi1] [wordi2] ... [wordiNi]
in which all [wordij] (i=1..M, j=1..Ni) are text strings and they are separated
by the blank character.
"""
def __init__(self, fname, id2word=None, line2words=split_on_space):
"""
Initialize the corpus from a file.
`id2word` and `line2words` are optional parameters.
If provided, `id2word` is a dictionary mapping between word_ids (integers)
and words (strings). If not provided, the mapping is constructed from
the documents.
`line2words` is a function which converts lines into tokens. Defaults to
simple splitting on spaces.
"""
IndexedCorpus.__init__(self, fname)
logger.info("loading corpus from %s", fname)
self.fname = fname # input file, see class doc for format
self.line2words = line2words # how to translate lines into words (simply split on space by default)
self.num_docs = self._calculate_num_docs()
if not id2word:
# build a list of all word types in the corpus (distinct words)
logger.info("extracting vocabulary from the corpus")
all_terms = set()
self.use_wordids = False # return documents as (word, wordCount) 2-tuples
for doc in self:
all_terms.update(word for word, wordCnt in doc)
all_terms = sorted(all_terms) # sort the list of all words; rank in that list = word's integer id
# build a mapping of word id(int) -> word (string)
self.id2word = dict(izip(xrange(len(all_terms)), all_terms))
else:
logger.info("using provided word mapping (%i ids)", len(id2word))
self.id2word = id2word
self.num_terms = len(self.word2id)
self.use_wordids = True # return documents as (wordIndex, wordCount) 2-tuples
logger.info(
"loaded corpus with %i documents and %i terms from %s",
self.num_docs, self.num_terms, fname
)
def _calculate_num_docs(self):
# the first line in input data is the number of documents (integer). throws exception on bad input.
with utils.smart_open(self.fname) as fin:
try:
result = int(next(fin))
except StopIteration:
result = 0
return result
def __len__(self):
return self.num_docs
def line2doc(self, line):
words = self.line2words(line)
if self.use_wordids:
# get all distinct terms in this document, ignore unknown words
uniq_words = set(words).intersection(iterkeys(self.word2id))
# the following creates a unique list of words *in the same order*
# as they were in the input. when iterating over the documents,
# the (word, count) pairs will appear in the same order as they
# were in the input (bar duplicates), which looks better.
# if this was not needed, we might as well have used useWords = set(words)
use_words, marker = [], set()
for word in words:
if (word in uniq_words) and (word not in marker):
use_words.append(word)
marker.add(word)
# construct a list of (wordIndex, wordFrequency) 2-tuples
doc = [(self.word2id.get(w), words.count(w)) for w in use_words]
else:
uniq_words = set(words)
# construct a list of (word, wordFrequency) 2-tuples
doc = [(w, words.count(w)) for w in uniq_words]
# return the document, then forget it and move on to the next one
# note that this way, only one doc is stored in memory at a time, not the whole corpus
return doc
def __iter__(self):
"""
Iterate over the corpus, returning one bag-of-words vector at a time.
"""
with utils.smart_open(self.fname) as fin:
for lineno, line in enumerate(fin):
if lineno > 0: # ignore the first line = number of documents
yield self.line2doc(line)
@staticmethod
def save_corpus(fname, corpus, id2word=None, metadata=False):
"""
Save a corpus in the List-of-words format.
This function is automatically called by `LowCorpus.serialize`; don't
call it directly, call `serialize` instead.
"""
if id2word is None:
logger.info("no word id mapping provided; initializing from corpus")
id2word = utils.dict_from_corpus(corpus)
logger.info("storing corpus in List-Of-Words format into %s" % fname)
truncated = 0
offsets = []
with utils.smart_open(fname, 'wb') as fout:
fout.write(utils.to_utf8('%i\n' % len(corpus)))
for doc in corpus:
words = []
for wordid, value in doc:
if abs(int(value) - value) > 1e-6:
truncated += 1
words.extend([utils.to_unicode(id2word[wordid])] * int(value))
offsets.append(fout.tell())
fout.write(utils.to_utf8('%s\n' % ' '.join(words)))
if truncated:
logger.warning(
"List-of-words format can only save vectors with integer elements; "
"%i float entries were truncated to integer value", truncated
)
return offsets
def docbyoffset(self, offset):
"""
Return the document stored at file position `offset`.
"""
with utils.smart_open(self.fname) as f:
f.seek(offset)
return self.line2doc(f.readline())
@property
def id2word(self):
return self._id2word
@id2word.setter
def id2word(self, val):
self._id2word = val
self.word2id = utils.revdict(val)
| [
"[email protected]"
] | |
d95b85d157c5e47a6a21e27eabf4525b5afea52e | d0a84d97aaa8dcc2dff4a6b33ce98dee6d474496 | /com.CheckProofing/Test_Campaign_2021/scripts/python/Page/extract_images.py | 3b80f87b21db865b5932d0164080417339bd2fe7 | [] | no_license | ahmed-test001/python | 21a27248c4571a13c0ed4dccab256aede1beea3a | eab59b9a54fae1a51fbc18c391599eb3b0e28b3d | refs/heads/master | 2023-03-10T21:00:54.634028 | 2021-02-27T05:31:58 | 2021-02-27T05:31:58 | 342,778,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,915 | py | # import json
# import re
# import os
# import sys
# import requests
# import pytesseract
# # import cv2
# from urllib.parse import urlparse
#
# from bs4 import BeautifulSoup
# from selenium.webdriver.support.wait import WebDriverWait
#
# from Test_Campaign_2021.scripts.python.Util_Data import ReadConfig
#
#
# class extract_images:
# output_dir = "../../data/output/"
#
# def __init__(self, driver):
# self.driver = driver
# self.wait = WebDriverWait(self.driver, 10)
#
# def check_key_exist(self, test_dict, key):
# try:
# value = test_dict[key]
# return True
# except KeyError:
# return False
#
# def extract_images(self):
#
# with open(ReadConfig.readFilePathData('FilePaths', 'url_list'), 'w') as f:
# urls = f.read().splitlines()
# contents = urls[0]
# input_html_file = BeautifulSoup(contents, 'html.parser')
# f.close()
# print("#################### Extract Images Start ####################")
# pytesseract.pytesseract.tesseract_cmd = (r"C:\\Program Files\\Tesseract-OCR\\tesseract.exe")
#
# png_images = input_html_file.find_all('img', {'src': re.compile('.png')})
# jpg_images = input_html_file.find_all('img', {'src': re.compile('.jpg')})
# ahref_links = []
# hyper_links_json = {}
# for image in jpg_images:
# d_cols = {}
# d_cols['src'] = image['src']
# source = urlparse(image['src'])
# print("Image Source: ", source)
# filename = os.path.basename(source.path)
# response = requests.get(image['src'])
# image_file = open(self.output_dir+"/proof_images/" + filename, "wb+")
# image_file.write(response.content)
# image_file.close()
# d_cols['filename'] = filename
# # if image['alt'] == "":
# # continue
# d_cols['alt'] = image['alt'] if self.check_key_exist(image, 'alt') else ""
# # d_cols['alt'] = image['alt']
# img = cv2.imread(self.output_dir+"/proof_images/" + filename)
# img = cv2.resize(img, None, fx=7, fy=7)
# data = pytesseract.image_to_string(img)
# d_cols['data'] = data.strip()
# ahref_links.append(d_cols)
#
# for image in png_images:
# d_cols = {}
# d_cols['src'] = image['src']
# source = urlparse(image['src'])
# print("Image Source: ", source)
# filename = os.path.basename(source.path)
# response = requests.get(image['src'])
# image_file = open(self.output_dir+"/proof_images/" + filename, "wb+")
# image_file.write(response.content)
# image_file.close()
# d_cols['filename'] = filename
#
# # if image['alt']=="":
# # continue
# d_cols['alt'] = image['alt'] if self.check_key_exist(image, 'alt') else ""
# # d_cols['alt'] = image['alt']
# img = cv2.imread(self.output_dir+"/proof_images/" + filename)
# img = cv2.resize(img, None, fx=7, fy=7)
# data = pytesseract.image_to_string(img)
# d_cols['data'] = data
# ahref_links.append(d_cols)
#
# # hyper_links_json['alerts'] = ahref_links
# # final_hyber_links = json.dumps(hyper_links_json, indent=4, sort_keys=False, ensure_ascii=False)
# # file = open(self.output_dir+"proof_files/" + "abc" + ".json", "w", encoding="utf-8")
# # # file = open(self.output_dir+"proof_files/" + self.output_file_name + '_' + '-'.join(self.filename.split('-')[-3:-1]) + ".json", "w", encoding="utf-8")
# # # file.write(final_hyber_links)
# # file.close()
# print("#################### Extract Images End ####################")
| [
"[email protected]"
] | |
b9063f096b96d5a75a310bc8ea0a8636adf03b5a | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /BHBXNfeMsA43d8Tys_22.py | a4efdfcb90caae2db151d39c9a348261e7d74a67 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,417 | py | """
As far as we currently know, approximations for the mathematical constant
**pi** (π) in the history of mathematics started surfacing with Ancient
Babylonians, who found its correct truncation up to 1 decimal place. During
the 5th century, the Chinese mathematician Zu Chongzhi raised it to 7 decimal
places and from the 18th century onwards the number of correct pi decimal
places has seen steady growth.
Since the middle of the 20th century, the approximation of pi has been the
task of electronic digital computers. During the 2019 Pi Day on the 14th of
March, the Japanese computer scientist _Emma Haruka Iwao_ released the
currently most accurate value of pi with more than 31.4 trillion digits, using
170 Terabytes of data.
Your task is to create a function that takes a positive integer `n` as an
argument and returns the value of **pi** with its first `n` decimal digits.
Taylor series are usually used to get finer approximations. To make this
challenge approachable to anyone, the following formula is suggested:

### Examples
pi(1) ➞ "3.1"
pi(2) ➞ "3.14"
pi(30) ➞ "3.141592653589793238462643383279"
### Notes
N/A
"""
def pi(n):
i = 1
p = x = 3 * 10 ** (n + 10)
while x:
x = x * i // ((i + 1) * 4)
i += 2
p += x // i
return '3.' + str(p // 10 ** 10)[1:]
| [
"[email protected]"
] | |
72fb6a38c5f5d698ef3de0e95fd431195f0c6c1c | 4522fc52bc43654aadd30421a75bae00a09044f0 | /riley/dev.py | 1c924685ce9976e08ff5b678bf63dcb402aa2ce4 | [] | no_license | qesoalpe/anelys | 1edb8201aa80fedf0316db973da3a58b67070fca | cfccaa1bf5175827794da451a9408a26cd97599d | refs/heads/master | 2020-04-07T22:39:35.344954 | 2018-11-25T05:23:21 | 2018-11-25T05:23:21 | 158,779,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,321 | py | from dict import Dict as dict
import os
import os.path
from isodate import datetime_isoformat
from datetime import datetime
from pathlib import Path
path_root = Path(r'/home/picazo/anelys')
if os.path.sep != '/':
os.path.sep = '/'
from katherine import d6
def get_datetime(timestamp):
return datetime_isoformat(datetime.fromtimestamp(timestamp))
def parse_dir(dirpath):
dir = Dict()
dir.children = list()
dir.path = '/' + dirpath + '/'
dir.type = 'riley/directory'
if path_relative:
paths = os.listdir(dirpath)
if dirpath != '.':
paths = [os.path.join(dirpath, path).replace('\\', '/') for path in paths]
else:
paths = [path.replace('\\', '/') for path in paths]
for path in paths:
if os.path.isdir(path) and os.path.basename(path) not in ['__cache__', '__pycache__']:
dir.children.append(parse_dir(path))
elif os.path.isfile(path) and os.path.splitext(path)[1] in ['.py', '.pyw']:
f = open(path, 'rb')
import hashlib
md5_hashlib = hashlib.md5()
for chunk in iter(lambda: f.read(4096), b''):
md5_hashlib.update(chunk)
f.close()
file = Dict()
file.md5 = md5_hashlib.hexdigest().upper()
file.path = '/' + path
file.size = os.path.getsize(path)
file.modified_datetime = get_datetime(os.path.getmtime(path))
file.type = 'riley/file'
dir.children.append(file)
return dir
os.chdir(path_root)
tree = parse_dir('.')
def get_locals(dir):
rr = [child for child in dir.children if child.type == 'riley/file']
for m in [child for child in dir.children if child.type == 'riley/directory']:
rr.extend(get_locals(m))
from copy import deepcopy
m = deepcopy(m)
for k in list(m.keys()):
if k not in ['path', 'type']:
del m[k]
rr.append(m)
return rr
locals = get_locals(tree)
# cursor_db = db_mariadb.cursor()
# from pprint import pprint
#
# cursor_db = db_mariadb.cursor(pymysql.cursors.DictCursor)
# cursor_db.execute('select filepath as path, md5, size, modified_datetime from riley.file;')
#
# remotes = [Dict(file) for file in cursor_db]
#
# for file in remotes:
# file.modified_datetime = datetime_isoformat(file.modified_datetime)
#
#
#
# for katherine in locals:
# if 'path' in katherine:
# if katherine.path[0] != '/':
# katherine.path = '/' + katherine.path
#
# from pymongo import MongoClient
# db_mongo_local = MongoClient(port=27020)
# db_riley = db_mongo_local.get_database('riley')
# coll_snapshot_sync = db_riley.get_collection('snapshot_sync')
#
# snapshot = coll_snapshot_sync.find_one(projection={'_id': False},
# sort=[('datetime', -1)])
# if snapshot is not None:
# snapshots = snapshot.snapshots
# else:
# snapshots = None
#
# persisted_path = [file.path for file in persisted]
# locals_path = [file.path for file in locals]
#
#
# def persist_file(file):
# pass
#
# pprint(locals_path)
# pprint(persisted_path)
#
#
# snapshots = Dict({'snapshot': locals, 'datetime': datetime_isoformat(datetime.now())}) | [
"[email protected]"
] | |
92f6925b2a9cfb31a62f32e59f35f03425e5c4ee | fd25231975acd147e04dc3ed3627c92cb1a4f86c | /FlaskAPI/vir_env/lib/python3.7/site-packages/scipy/io/matlab/tests/test_mio.py | a2fff9f37f188018118bed9ef6dc4f9d6725e5b8 | [] | no_license | sumitkutty/Flight-Price-Prediction | 832a2802a3367e655b46d3b44f073d917abd2320 | d974a8b75fbcbfa42f11703602af3e45a3f08b3c | refs/heads/master | 2022-12-25T07:13:06.375888 | 2020-10-08T18:46:44 | 2020-10-08T18:46:44 | 302,366,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:8ddd2d36df500761f2d9af0be22308dd2450ebd281c3b0e945bc89b26ebb413d
size 42136
| [
"[email protected]"
] | |
9a0a32664eb32200ecc56fb66c1444ceee1270a9 | 7dc65b6d2e857c807bd2f75e2586af5f8e933fe5 | /fixtures/vcenter_gateway.py | 1c031b4ea91e3d21c538dd73e06dfdc6d23c41d9 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | vkolli/contrail-test-perf | d6fdc20f4a2004066c5a6316afd915ecdc9366c2 | db04b8924a2c330baabe3059788b149d957a7d67 | refs/heads/master | 2021-01-18T15:36:18.120487 | 2017-03-30T19:19:30 | 2017-03-30T19:19:30 | 86,661,522 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,851 | py | from vcenter import *
from vnc_api.vnc_api import *
from lif_fixture import LogicalInterfaceFixture
from physical_device_fixture import PhysicalDeviceFixture
from pif_fixture import PhysicalInterfaceFixture
from port_fixture import PortFixture
from openstack import OpenstackAuth, OpenstackOrchestrator
from contrailapi import ContrailVncApi
class VcenterGatewayOrch(VcenterOrchestrator):
def __init__(self, inputs, host, port, user, pwd, dc_name, vnc, logger):
super(VcenterGatewayOrch, self).__init__(inputs, host, port, user, pwd, dc_name, vnc, logger)
self.plug_api = ContrailPlugApi(inputs,vnc,logger)
def create_vn(self, name, subnets, **kwargs):
vn_obj = super(VcenterGatewayOrch, self).create_vn(name, subnets, **kwargs)
self.plug_api.create_network_in_contrail_cluster(name,subnets,**kwargs)
return vn_obj
def delete_vn(self, vn_obj, **kwargs):
super(VcenterGatewayOrch, self).delete_vn(vn_obj, **kwargs)
self.plug_api.delete_network_from_contrail_cluster(vn_obj.name,**kwargs)
def create_vm(self, vm_name, image_name, vn_objs, count=1, zone=None, node_name=None, **kwargs):
vm_objs = super(VcenterGatewayOrch, self).create_vm(vm_name, image_name, vn_objs, count=1, zone=None, node_name=None, **kwargs)
retry_vms = []
retry_vms = vm_objs[:]
for vm in retry_vms:
if self.get_vm_detail(vm):
retry_vms.remove(vm)
else:
continue
for vm in vm_objs:
for network in vm.networks:
vlanId = network.config.defaultPortConfig.vlan.vlanId
net_name = network.name
if net_name in vm.macs:
mac = vm.macs[net_name]
else:
mac = None
self.plug_api.create_vmi_lif_and_attach_vmi_to_lif(vn_name=net_name,mac_address=mac,vlan=vlanId,vm=vm)
for vm in vm_objs:
vm.bring_up_interfaces(self,vm,intfs=['eth0'])
for vm in vm_objs:
vm.get()
self.plug_api.create_vmobj_in_api_server(vm)
return vm_objs
def create_vn_vmi_for_stp_bpdu_to_be_flooded(self,**kwargs):
self.plug_api.create_network_in_contrail_cluster(name='stp_vn',subnet=[{'cidr':'122.121.123.0/24'}],**kwargs)
#The below code is needed for not to
#create the stp vmi port if already exists
#
interfaces = self._vnc.virtual_machine_interfaces_list()
for intf in interfaces['virtual-machine-interfaces']:
uuid = intf['uuid']
intf_obj = self._vnc.virtual_machine_interface_read(id=uuid)
mac_obj = intf_obj.get_virtual_machine_interface_mac_addresses()
macs = mac_obj.mac_address
if macs:
for mac in macs:
if mac == '02:02:03:04:05:06':
return
self.plug_api.create_vmi_lif_and_attach_vmi_to_lif(vn_name='stp_vn',mac_address='02:02:03:04:05:06',vlan='0')
def delete_vm(self, vm, **kwargs):
super(VcenterGatewayOrch, self).delete_vm(vm, **kwargs)
self.plug_api.delete_vmi_and_detach_vmi_to_lif(vm)
self.plug_api.delete_vmobj_in_api_server(vm)
class ContrailPlugApi(object):
def __init__(self, inputs, vnc, logger):
self._inputs = inputs
self._vnc = vnc
self.logger = logger
self._proj_obj = self._get_project_object()
self._ipam_obj = self._get_ipam_object()
self._gw = self._process_vcenter_gateway_info()
self.vnc_h = ContrailVncApi(self._vnc, self.logger)
def _get_project_object(self):
return self._vnc.project_read(fq_name = self._inputs.project_fq_name)
def _get_ipam_object(self):
return self._vnc.network_ipam_read(
fq_name=['default-domain', 'default-project', 'default-network-ipam'])
def create_network_in_contrail_cluster(self,name,subnet,**kwargs):
self.vn_uuid = self._create_vn(name,subnet)
pass
def delete_network_from_contrail_cluster(self,vn_name,**kwargs):
self._delete_vn(vn_name)
pass
def delete_vmi_and_detach_vmi_to_lif(self,vm):
self.delete_lif(vm)
self._delete_vmi(vm)
def delete_lif(self,vm):
self._delete_lif(vm)
def create_vmobj_in_api_server(self,vm_obj):
vm_uuid = vm_obj.id
try:
self.vnc_h.create_virtual_machine(vm_uuid=vm_uuid)
except Exception as e:
self.logger.error("VM object create in api failed for vm id %s"%(vm_uuid))
raise
vm_api_obj = self._vnc.virtual_machine_read(id=vm_obj.id)
for port in vm_obj.ports:
port_uuid = port.uuid
port_obj = self._vnc.virtual_machine_interface_read(id=port_uuid)
port_obj.set_virtual_machine(vm_api_obj)
self._vnc.virtual_machine_interface_update(port_obj)
def delete_vmobj_in_api_server(self,vm_obj):
vm_uuid = vm_obj.id
try:
self.vnc_h.delete_virtual_machine(vm_uuid=vm_uuid)
except Exception as e:
self.logger.error("VM object delete in api failed for vm id %s"%(vm_uuid))
def create_vmi_lif_and_attach_vmi_to_lif(self,vn_name,mac_address,vlan,vm=None):
vn_obj = self._read_vn(vn_name)
vn_id = vn_obj.uuid
#create vmi
port = self._create_vmi(vn_id=vn_id,mac_address=mac_address,
vm=vm )
#for each vrouter gateway port , create lif
for gw in self._gw:
for phy_port in gw.ports:
lif_name = phy_port + '.' + str(vlan)
pif_id = gw.get_port_uuid(phy_port)
self._create_lif(lif_name,vlan,pif_id,vm=vm,vmi_ids = [port.uuid])
def _create_vn(self, vn_name, vn_subnet):
vn_obj = VirtualNetwork(vn_name, parent_obj=self._proj_obj)
for pfx in vn_subnet:
px = pfx['cidr'].split('/')[0]
pfx_len = int(pfx['cidr'].split('/')[1])
subnet_vnc = IpamSubnetType(subnet=SubnetType(px, pfx_len))
vnsn_data = VnSubnetsType([subnet_vnc])
vn_obj.add_network_ipam(self._ipam_obj, vnsn_data)
try:
self._vnc.virtual_network_create(vn_obj)
except RefsExistError:
pass
def _delete_vn(self, vn_name):
vn_fq_name = VirtualNetwork(vn_name, self._proj_obj).get_fq_name()
try:
self._vnc.virtual_network_delete(fq_name=vn_fq_name)
except cfgm_common.exceptions.NoIdError:
pass
# end _delete_vn
def _read_vn(self,vn_name):
vn_fq_name = VirtualNetwork(vn_name, self._proj_obj).get_fq_name()
try:
vn_obj = self._vnc.virtual_network_read(fq_name=vn_fq_name)
except cfgm_common.exceptions.NoIdError:
pass
return vn_obj
def _create_lif(self,name,vlan,pif_id,vmi_ids=[],vm=None):
lif_obj = LogicalInterfaceFixture(
name, pif_id=pif_id, vlan_id=vlan,vmi_ids=vmi_ids)
lif_obj.setUp()
if vm:
vm.lifs.append(lif_obj)
def _delete_lif(self,vm):
for lif in vm.lifs:
lif.cleanUp()
def _create_vmi(self,vn_id,mac_address,
fixed_ips=[],security_groups=[],
extra_dhcp_opts=[],
project_obj=None,vm=None):
port = PortFixture(vn_id,
api_type='contrail',
mac_address=mac_address,
fixed_ips=fixed_ips,
extra_dhcp_opts=extra_dhcp_opts,
project_obj=self._proj_obj,
security_groups=security_groups)
port.setUp()
if vm:
vm.ports.append(port)
return port
def _delete_vmi(self,vm):
for port in vm.ports:
port.cleanUp()
def _process_vcenter_gateway_info(self):
return [VcenterGateway(gw) for gw in self._inputs.vcenter_gateway]
class VcenterGateway:
"""Represents one vcenter gateway."""
def __init__(self,gateway):
self.gateway = gateway
@property
def name(self):
return self.gateway['name']
@property
def mgmt_ip(self):
return self.gateway['mgmt_ip']
@property
def ports(self):
return self.gateway['ports']
def get_port_uuid(self,port):
phy_device_fixture=PhysicalDeviceFixture(self.name,self.mgmt_ip)
phy_device_fixture.setUp()
phy_device_uuid = phy_device_fixture.phy_device.uuid
pif_fixture=PhysicalInterfaceFixture(port,device_id=phy_device_uuid)
pif_fixture.setUp()
return pif_fixture.uuid
| [
"[email protected]"
] | |
3496db296e088ab5b474d57d635d971b8e919291 | 923a14dd594191d77e30465027ece8371f28a7a6 | /web-serpng/code/serpng/jobs/services/search/user_data_tests.py | a41f50ac118c451b073c3ebd84206912b868bae7 | [] | no_license | alyago/django-web | 3af7b3389df59104eaf5e50ed9cc2c3e730fed7f | da3073eec6d676dfe0164502b80d2a1c75e89575 | refs/heads/master | 2021-01-10T19:33:45.425520 | 2013-11-21T09:43:37 | 2013-11-21T09:43:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,400 | py | """User Data Tests."""
from django.test import TestCase
import user_data
# JSON responses from the bridge to be used in the tests.
JSON_RESPONSE_WITH_NO_USER_DATA = {
'abc': 'I am not user data'
}
JSON_RESPONSE_WITH_GOOD_USER_DATA = {
'user_data': {
'recent_searches': ['rs1', 'rs2'],
'user_email': '[email protected]',
'saved_jobs': {
'job1': {'comment': 'abc'},
'job2': {'comment': 'def'}
}
}
}
JSON_RESPONSE_WITH_BAD_USER_DATA = {
'user_data': {}
}
JSON_RESPONSE_WITH_EMPTY_ARRAY_SAVED_JOBS = {
'user_data': {
'saved_jobs': []
}
}
JSON_RESPONSE_WITH_NULL_COMMENT_SAVED_JOB = {
'user_data': {
'saved_jobs': {
'job1': {'comment': 'abc'},
'job2': {'comment': None}
}
}
}
# Tests
class UserDataTestCase(TestCase):
"""User Data TestCase."""
# pylint: disable=R0904
def test_no_user_data_in_json_response(self):
"""Default values should be correct when there is no user data."""
test_user_data = user_data.UserData(JSON_RESPONSE_WITH_NO_USER_DATA)
self.assertIsNone(test_user_data.recent_searches)
self.assertIsNone(test_user_data.user_email)
self.assertEqual(test_user_data.saved_jobs, {})
def test_good_recent_searches(self):
"""Attribute 'recent_searches' should be correctly populated."""
test_user_data = user_data.UserData(JSON_RESPONSE_WITH_GOOD_USER_DATA)
self.assertEqual(test_user_data.recent_searches[1], 'rs2')
def test_good_user_email(self):
"""Attribute 'user_email' should be correctly populated."""
test_user_data = user_data.UserData(JSON_RESPONSE_WITH_GOOD_USER_DATA)
self.assertEqual(test_user_data.user_email, '[email protected]')
def test_good_saved_jobs(self):
"""Attribute 'saved_jobs' should be correctly populated."""
test_user_data = user_data.UserData(JSON_RESPONSE_WITH_GOOD_USER_DATA)
self.assertEqual(test_user_data.saved_jobs['job1'], 'abc')
def test_no_recent_searches(self):
"""Attribute 'recent_searches' should have good default value when user_data is empty."""
test_user_data = user_data.UserData(JSON_RESPONSE_WITH_BAD_USER_DATA)
self.assertIsNone(test_user_data.recent_searches)
def test_no_user_email(self):
"""Attribute 'user_email' should have good default value when user_data is empty."""
test_user_data = user_data.UserData(JSON_RESPONSE_WITH_BAD_USER_DATA)
self.assertIsNone(test_user_data.user_email)
def test_no_saved_jobs(self):
"""Attribute 'saved_jobs' should have good default value when user_data is empty."""
test_user_data = user_data.UserData(JSON_RESPONSE_WITH_BAD_USER_DATA)
self.assertEqual(test_user_data.saved_jobs, {})
def test_empty_array_saved_jobs(self):
"""Attribute 'saved_jobs' should have good default value when saved_jobs is empty."""
test_user_data = user_data.UserData(JSON_RESPONSE_WITH_EMPTY_ARRAY_SAVED_JOBS)
self.assertEqual(test_user_data.saved_jobs, {})
def test_null_comment_saved_job(self):
"""Attribute 'saved_jobs' should convert null comments to empty strings."""
test_user_data = user_data.UserData(JSON_RESPONSE_WITH_NULL_COMMENT_SAVED_JOB)
self.assertEqual(test_user_data.saved_jobs['job2'], '')
| [
"[email protected]"
] | |
ea0247a08d3dbfcc08f7339be1353955119ac626 | f5ef25c84e9b4846f98d520bc9a20d20b3d1b65c | /OOP/oop3.py | e7b6dedf4ec707a9c8abd83d79293c2b25573e9b | [] | no_license | amiraHag/python-basic-course2 | 45757ffdfa677c2accd553330cd2fd825208b0aa | 1fbfd08b34f3993299d869bd55c6267a61dc7810 | refs/heads/main | 2023-03-31T06:48:11.587127 | 2021-03-30T03:43:10 | 2021-03-30T03:43:10 | 327,271,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,180 | py | # --------------------------------------------------------------------
# -- Object Oriented Programming => Instance Attributes and Methods --
# --------------------------------------------------------------------
# Self: Point To Instance Created From Class
# Instance Attributes: Instance Attributes Defined Inside The Constructor
# -----------------------------------------------------------------------
# Instance Methods: Take Self Parameter Which Point To Instance Created From Class
# Instance Methods Can Have More Than One Parameter Like Any Function
# Instance Methods Can Freely Access Attributes And Methods On The Same Object
# Instance Methods Can Access The Class Itself
# -----------------------------------------------------------
class Member:
def __init__(self, first_name, middle_name, last_name):
self.fname = first_name
self.mname = middle_name
self.lname = last_name
member_one = Member("Amira", "Mustafa", "HM")
member_two = Member("Ahmed", "Hag", "Imam")
member_three = Member("Sara", "HI", "Mustafa")
# print(dir(member_one))
print(member_one.fname, member_one.mname, member_one.lname)
print(member_two.fname)
print(member_three.fname)
| [
"[email protected]"
] | |
8af41c09b124f2ec5b82fef8804ae4eefd794aa5 | 4759db9f7e74cec91edbb4c18c553b92913d1695 | /adafruit_atecc/adafruit_atecc_cert_util.py | 415c17ab0cb4833d4b867b6891196d9eb11ca90d | [
"MIT",
"LGPL-2.1-or-later",
"LGPL-2.0-or-later",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | brentru/Adafruit_CircuitPython_ATECC | 9702e8e06123ab258fee39baf3462640401f9f28 | cceac6431ff28edcf410c53fc2db0c357533d774 | refs/heads/master | 2020-07-27T13:53:31.604065 | 2019-09-17T20:17:00 | 2019-09-17T20:17:00 | 209,113,921 | 1 | 0 | MIT | 2019-09-17T17:15:21 | 2019-09-17T17:15:21 | null | UTF-8 | Python | false | false | 6,488 | py | # Copyright (c) 2018 Arduino SA. All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# The MIT License (MIT)
#
# Copyright (c) 2019 Brent Rubell for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_atecc_cert_util`
================================================================================
Certification Generation and Helper Utilities for the Adafruit_ATECC Module.
* Author(s): Brent Rubell
Implementation Notes
--------------------
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
from adafruit_binascii import b2a_base64
import adafruit_atecc.adafruit_atecc_asn1 as asn1
class CSR:
"""Certificate Signing Request Builder.
:param adafruit_atecc atecc: ATECC module.
:param slot_num: ATECC module slot (from 0 to 4).
:param bool private_key: Generate a new private key in selected slot?
:param str country: 2-letter country code.
:param str state_prov: State or Province name,
:param str city: City name.
:param str org: Organization name.
:param str org_unit: Organizational unit name.
"""
# pylint: disable=too-many-arguments, too-many-instance-attributes
def __init__(self, atecc, slot_num, private_key, country, state_prov,
city, org, org_unit):
self._atecc = atecc
self.private_key = private_key
self._slot = slot_num
self._country = country
self._state_province = state_prov
self._locality = city
self._org = org
self._org_unit = org_unit
self._common = self._atecc.serial_number
self._version_len = 3
self._cert = None
self._key = None
def generate_csr(self):
"""Generates and returns a certificate signing request."""
self._csr_begin()
csr = self._csr_end()
return csr
def _csr_begin(self):
"""Initializes CSR generation. """
assert 0 <= self._slot <= 4, "Provided slot must be between 0 and 4."
# Create a new key
self._key = bytearray(64)
if self.private_key:
self._atecc.gen_key(self._key, self._slot, self.private_key)
return
self._atecc.gen_key(self._key, self._slot, self.private_key)
def _csr_end(self):
"""Generates and returns
a certificate signing request as a base64 string."""
len_issuer_subject = asn1.issuer_or_subject_length(self._country, self._state_province,
self._locality, self._org,
self._org_unit, self._common)
len_sub_header = asn1.get_sequence_header_length(len_issuer_subject)
len_csr_info = self._version_len + len_issuer_subject
len_csr_info += len_sub_header + 91 + 2
len_csr_info_header = asn1.get_sequence_header_length(len_csr_info)
# CSR Info Packet
csr_info = bytearray()
# Append CSR Info --> [0:2]
asn1.get_sequence_header(len_csr_info, csr_info)
# Append Version --> [3:5]
asn1.get_version(csr_info)
# Append Subject --> [6:7]
asn1.get_sequence_header(len_issuer_subject, csr_info)
# Append Issuer or Subject
asn1.get_issuer_or_subject(csr_info, self._country, self._state_province,
self._locality, self._org, self._org_unit, self._common)
# Append Public Key
asn1.get_public_key(csr_info, self._key)
# Terminator
csr_info += b"\xa0\x00"
# Init. SHA-256 Calculation
csr_info_sha_256 = bytearray(64)
self._atecc.sha_start()
for i in range(0, len_csr_info + len_csr_info_header, 64):
chunk_len = (len_csr_info_header + len_csr_info) - i
if chunk_len > 64:
chunk_len = 64
if chunk_len == 64:
self._atecc.sha_update(csr_info[i:i+64])
else:
csr_info_sha_256 = self._atecc.sha_digest(csr_info[i:])
# Sign the SHA256 Digest
signature = bytearray(64)
signature = self._atecc.ecdsa_sign(self._slot, csr_info_sha_256)
# Calculations for signature and csr length
len_signature = asn1.get_signature_length(signature)
len_csr = len_csr_info_header + len_csr_info + len_signature
asn1.get_sequence_header_length(len_csr)
# append signature to csr
csr = bytearray()
asn1.get_sequence_header(len_csr, csr)
# append csr_info
csr += csr_info
asn1.get_signature(signature, csr)
# encode and return
csr = b2a_base64(csr)
return csr
| [
"[email protected]"
] | |
432c01b9bc0749e080f5030723946eea795b05b6 | eeade223e39130cac09fb4907da6410101af5935 | /setup.py | 3e74c7d16553cc22209fc859a2c55f4468a03da1 | [] | no_license | TrendingTechnology/jaxfg | 67cac95f7e37c2eac75574fa8473b89cc222137e | 7f19668b344944be196e6b61fdc36f1441bac819 | refs/heads/master | 2023-06-20T17:20:43.928788 | 2021-07-30T23:24:42 | 2021-07-31T00:33:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | from setuptools import find_packages, setup
setup(
name="jaxfg",
version="0.0",
description="Factor graphs in Jax",
url="http://github.com/brentyi/jaxfg",
author="brentyi",
author_email="[email protected]",
license="BSD",
packages=find_packages(),
package_data={"jaxfg": ["py.typed"]},
python_requires=">=3.7",
install_requires=[
"datargs",
"jax>=0.2.13",
"jaxlib",
"jaxlie>=1.0.0",
"jax_dataclasses>=1.0.0",
"overrides",
"scikit-sparse",
"termcolor",
"tqdm",
"typing_utils", # We can phase this out if we drop support for Python 3.7
"matplotlib",
],
extras_require={
"testing": [
"pytest",
# "pytest-cov",
# "hypothesis",
# "hypothesis[numpy]",
],
"type-checking": [
"mypy",
"types-termcolor",
],
},
)
| [
"[email protected]"
] | |
88fde4953ea93f45918c4891940b3b494d26ae2f | 7623d4ca5cacb259a1b2e7a98b1e8a3011592348 | /SICP/examples/ex2_83.py | b372d8db084f7f17d5cb1e2e2f63db57d0db0e8f | [] | no_license | nextdesusu/Learn-Python | 3b875ab5093844fe64cc13e717a3637bdfe62a9a | 3212059408eec27ee2ed359ac9d691b3d061372f | refs/heads/master | 2022-01-29T07:39:11.915177 | 2019-07-21T14:18:10 | 2019-07-21T14:18:10 | 198,063,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,257 | py | def gcd(a, b):
while a != 0 and b != 0:
if a > b:
a = a % b
else:
b = b % a
return a + b
#print(gcd(50, 130))
class Complex:
def __init__(self, real, imag = 0):
self.real = real
self.imag = imag
def __str__(self):
return '{0} + {1}i'.format(self.real, self.imag)
class Rational:
def __init__(self, n, m):
self.n = n
if m == 0:
raise 1 / 0
self.m = m
@property
def equate(self):
return self.n / self.m
def __add__(self, other):
if isinstance(other, Rational):
return Rational((self.n + other.n) / gcd(self.n + other.n, self.m + other.m),
(self.m + other.m) / gcd(self.n + other.n, self.m + other.m))
def __str__(self):
return '{0} / {1}'.format(self.n, self.m)
def raise_(num):
if isinstance(num, int):
return Rational(num, 1)
if isinstance(num, Rational):
return float(num.equate)
if isinstance(num, float):
return Complex(num, 0)
a = 1
print(a)
a = raise_(a)
print(a)
a = raise_(a)
print(a)
a = raise_(a)
print(a) | [
"[email protected]"
] | |
ecadda233d55e5a381cea2a473aabeb40e553cf4 | f32e9b464a8c9fb7f5238935cfb5f83e840269e6 | /chat.py | 9bba623185a4235e003e9897cc735374256095c4 | [] | no_license | DavidArmendariz/python-chatbot | c192fc5f310d7c069c2a58b165ff8d90a1ceff2b | c7df66d4e0ae64c79ab75cc5cb58690efa677c23 | refs/heads/master | 2022-12-18T18:38:38.375681 | 2020-09-28T19:10:11 | 2020-09-28T19:10:11 | 258,566,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | from app import app, db
from app.models import User, Message, Chatroom
@app.shell_context_processor
def make_shell_context():
return {'db': db, 'User': User, 'Message': Message, 'Chatroom': Chatroom} | [
"[email protected]"
] | |
32bc36980bd85af045910d5303f1b1c037b8938f | 3b60e6f4bbc011003ac4929f01eb7409918deb79 | /Analysis_v1/Simulation/Pythia/RSG/RSGfragments/RSGravitonToGammaGamma_kMpl01_M_5750_TuneCP5_13TeV_pythia8_cfi.py | dbf0343665487d8f89199e7c5e5a6aaec7a57103 | [] | no_license | uzzielperez/Analyses | d1a64a4e8730325c94e2bc8461544837be8a179d | 1d66fa94763d7847011ea551ee872936c4c401be | refs/heads/master | 2023-02-09T04:54:01.854209 | 2020-09-07T14:57:54 | 2020-09-07T14:57:54 | 120,850,137 | 0 | 0 | null | 2020-06-17T16:48:16 | 2018-02-09T03:14:04 | C++ | UTF-8 | Python | false | false | 1,324 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.MCTunes2017.PythiaCP5Settings_cfi import *
from Configuration.Generator.Pythia8aMCatNLOSettings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
crossSection = cms.untracked.double(1.095e-3),
filterEfficiency = cms.untracked.double(1),
maxEventsToPrint = cms.untracked.int32(0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CP5SettingsBlock,
pythia8aMCatNLOSettingsBlock,
processParameters = cms.vstring(
'ExtraDimensionsG*:all = on',
'ExtraDimensionsG*:kappaMG = 0.541643794389',
'5100039:m0 = 5750.0',
'5100039:onMode = off',
'5100039:onIfAny = 22',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CP5Settings',
'pythia8aMCatNLOSettings',
'processParameters',
)
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"[email protected]"
] | |
47063c6f9762cc541be468fe4120e733110426e3 | d7a68c636e6128533b17975655bd6b46ed222916 | /adapter-transformers-adapters3.1.0/src/transformers/models/decision_transformer/modeling_decision_transformer.py | 959b9763d0bd48505d91336461376868845b5345 | [
"Apache-2.0"
] | permissive | cambridgeltl/autopeft | 69179f8faf2cc4d2164ff78e544dc3fe2d39c331 | d8ad6bea93aa413a54d0e09fe25bdd62b46cfcf5 | refs/heads/main | 2023-05-23T09:21:59.912941 | 2023-04-25T14:35:31 | 2023-04-25T14:35:31 | 594,316,585 | 26 | 4 | Apache-2.0 | 2023-04-25T14:35:32 | 2023-01-28T06:39:25 | Python | UTF-8 | Python | false | false | 43,844 | py | # coding=utf-8
# Copyright 2022 The HuggingFace Team The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch DecisionTransformer model."""
import math
import os
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from packaging import version
from torch import nn
from ...activations import ACT2FN
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer
from ...utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
if version.parse(torch.__version__) >= version.parse("1.6"):
is_amp_available = True
from torch.cuda.amp import autocast
else:
is_amp_available = False
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions
from .configuration_decision_transformer import DecisionTransformerConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "edbeeching/decision-transformer-gym-hopper-medium"
_CONFIG_FOR_DOC = "DecisionTransformerConfig"
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [
"edbeeching/decision-transformer-gym-hopper-medium",
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
]
# Copied from transformers.models.gpt2.modeling_gpt2.load_tf_weights_in_gpt2
def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
"""Load tf checkpoints in a pytorch model"""
try:
import re
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(gpt2_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array.squeeze())
for name, array in zip(names, arrays):
name = name[6:] # skip "model/"
name = name.split("/")
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+\d+", m_name):
scope_names = re.split(r"(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "w" or scope_names[0] == "g":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "b":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "wpe" or scope_names[0] == "wte":
pointer = getattr(pointer, scope_names[0])
pointer = getattr(pointer, "weight")
else:
pointer = getattr(pointer, scope_names[0])
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
# Copied from transformers.models.gpt2.modeling_gpt2.GPT2Attention with GPT2->DecisionTransformerGPT2
class DecisionTransformerGPT2Attention(nn.Module):
def __init__(self, config, is_cross_attention=False, layer_idx=None):
super().__init__()
max_positions = config.max_position_embeddings
self.register_buffer(
"bias",
torch.tril(torch.ones((max_positions, max_positions), dtype=torch.uint8)).view(
1, 1, max_positions, max_positions
),
)
self.register_buffer("masked_bias", torch.tensor(-1e4))
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
self.split_size = self.embed_dim
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale_attn_weights = config.scale_attn_weights
self.is_cross_attention = is_cross_attention
# Layer-wise attention scaling, reordering, and upcasting
self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx
self.layer_idx = layer_idx
self.reorder_and_upcast_attn = config.reorder_and_upcast_attn
if self.is_cross_attention:
self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim)
self.q_attn = Conv1D(self.embed_dim, self.embed_dim)
else:
self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim)
self.c_proj = Conv1D(self.embed_dim, self.embed_dim)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads)
index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
# Prune conv1d layers
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
# Update hyper params
self.split_size = (self.split_size // self.num_heads) * (self.num_heads - len(heads))
self.num_heads = self.num_heads - len(heads)
self.pruned_heads = self.pruned_heads.union(heads)
def _attn(self, query, key, value, attention_mask=None, head_mask=None):
attn_weights = torch.matmul(query, key.transpose(-1, -2))
if self.scale_attn_weights:
attn_weights = attn_weights / torch.tensor(
value.size(-1) ** 0.5, dtype=attn_weights.dtype, device=attn_weights.device
)
# Layer-wise attention scaling
if self.scale_attn_by_inverse_layer_idx:
attn_weights = attn_weights / float(self.layer_idx + 1)
if not self.is_cross_attention:
# if only "normal" attention layer implements causal mask
query_length, key_length = query.size(-2), key.size(-2)
causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].to(torch.bool)
mask_value = torch.finfo(attn_weights.dtype).min
# Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
# Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
attn_weights = torch.where(causal_mask, attn_weights, mask_value)
if attention_mask is not None:
# Apply the attention mask
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
# Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise
attn_weights = attn_weights.type(value.dtype)
attn_weights = self.attn_dropout(attn_weights)
# Mask heads if we want to
if head_mask is not None:
attn_weights = attn_weights * head_mask
attn_output = torch.matmul(attn_weights, value)
return attn_output, attn_weights
def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, head_mask=None):
# Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM)
bsz, num_heads, q_seq_len, dk = query.size()
_, _, k_seq_len, _ = key.size()
# Preallocate attn_weights for `baddbmm`
attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device)
# Compute Scale Factor
scale_factor = 1.0
if self.scale_attn_weights:
scale_factor /= float(value.size(-1)) ** 0.5
if self.scale_attn_by_inverse_layer_idx:
scale_factor /= float(self.layer_idx + 1)
# Upcast (turn off autocast) and reorder (Scale K by 1 / root(dk))
if is_amp_available:
with autocast(enabled=False):
q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len)
attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor)
attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
else:
q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len)
attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor)
attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
if not self.is_cross_attention:
# if only "normal" attention layer implements causal mask
query_length, key_length = query.size(-2), key.size(-2)
causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].bool()
mask_value = torch.finfo(attn_weights.dtype).min
# Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
# Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
attn_weights = torch.where(causal_mask, attn_weights, mask_value)
if attention_mask is not None:
# Apply the attention mask
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
# Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op if otherwise
if attn_weights.dtype != torch.float32:
raise RuntimeError("Error with upcasting, attn_weights does not have dtype torch.float32")
attn_weights = attn_weights.type(value.dtype)
attn_weights = self.attn_dropout(attn_weights)
# Mask heads if we want to
if head_mask is not None:
attn_weights = attn_weights * head_mask
attn_output = torch.matmul(attn_weights, value)
return attn_output, attn_weights
def _split_heads(self, tensor, num_heads, attn_head_size):
"""
Splits hidden_size dim into attn_head_size and num_heads
"""
new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
tensor = tensor.view(new_shape)
return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def _merge_heads(self, tensor, num_heads, attn_head_size):
"""
Merges attn_head_size dim and num_attn_heads dim into hidden_size
"""
tensor = tensor.permute(0, 2, 1, 3).contiguous()
new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
return tensor.view(new_shape)
def forward(
self,
hidden_states: Optional[Tuple[torch.FloatTensor]],
layer_past: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = False,
output_attentions: Optional[bool] = False,
) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]:
if encoder_hidden_states is not None:
if not hasattr(self, "q_attn"):
raise ValueError(
"If class is used as cross attention, the weights `q_attn` have to be defined. Please make sure to"
" instantiate class with `DecisionTransformerGPT2Attention(..., is_cross_attention=True)`."
)
query = self.q_attn(hidden_states)
key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)
attention_mask = encoder_attention_mask
else:
query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
query = self._split_heads(query, self.num_heads, self.head_dim)
key = self._split_heads(key, self.num_heads, self.head_dim)
value = self._split_heads(value, self.num_heads, self.head_dim)
if layer_past is not None:
past_key, past_value = layer_past
key = torch.cat((past_key, key), dim=-2)
value = torch.cat((past_value, value), dim=-2)
if use_cache is True:
present = (key, value)
else:
present = None
if self.reorder_and_upcast_attn:
attn_output, attn_weights = self._upcast_and_reordered_attn(query, key, value, attention_mask, head_mask)
else:
attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
attn_output = self.c_proj(attn_output)
attn_output = self.resid_dropout(attn_output)
outputs = (attn_output, present)
if output_attentions:
outputs += (attn_weights,)
return outputs # a, present, (attentions)
# Copied from transformers.models.gpt2.modeling_gpt2.GPT2MLP with GPT2->DecisionTransformerGPT2
class DecisionTransformerGPT2MLP(nn.Module):
def __init__(self, intermediate_size, config):
super().__init__()
embed_dim = config.hidden_size
self.c_fc = Conv1D(intermediate_size, embed_dim)
self.c_proj = Conv1D(embed_dim, intermediate_size)
self.act = ACT2FN[config.activation_function]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor:
hidden_states = self.c_fc(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.c_proj(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
# Copied from transformers.models.gpt2.modeling_gpt2.GPT2Block with GPT2->DecisionTransformerGPT2
class DecisionTransformerGPT2Block(nn.Module):
def __init__(self, config, layer_idx=None):
super().__init__()
hidden_size = config.hidden_size
inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.attn = DecisionTransformerGPT2Attention(config, layer_idx=layer_idx)
self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
if config.add_cross_attention:
self.crossattention = DecisionTransformerGPT2Attention(
config, is_cross_attention=True, layer_idx=layer_idx
)
self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.mlp = DecisionTransformerGPT2MLP(inner_dim, config)
def forward(
self,
hidden_states: Optional[Tuple[torch.FloatTensor]],
layer_past: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = False,
output_attentions: Optional[bool] = False,
) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
residual = hidden_states
hidden_states = self.ln_1(hidden_states)
attn_outputs = self.attn(
hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
)
attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
outputs = attn_outputs[1:]
# residual connection
hidden_states = attn_output + residual
if encoder_hidden_states is not None:
# add one self-attention block for cross-attention
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with "
"cross-attention layers by setting `config.add_cross_attention=True`"
)
residual = hidden_states
hidden_states = self.ln_cross_attn(hidden_states)
cross_attn_outputs = self.crossattention(
hidden_states,
attention_mask=attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
)
attn_output = cross_attn_outputs[0]
# residual connection
hidden_states = residual + attn_output
outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights
residual = hidden_states
hidden_states = self.ln_2(hidden_states)
feed_forward_hidden_states = self.mlp(hidden_states)
# residual connection
hidden_states = residual + feed_forward_hidden_states
if use_cache:
outputs = (hidden_states,) + outputs
else:
outputs = (hidden_states,) + outputs[1:]
return outputs # hidden_states, present, (attentions, cross_attentions)
class DecisionTransformerGPT2PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = DecisionTransformerConfig
load_tf_weights = load_tf_weights_in_gpt2
base_model_prefix = "transformer"
is_parallelizable = True
supports_gradient_checkpointing = True
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, (nn.Linear, Conv1D)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
#
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
for name, p in module.named_parameters():
if "c_proj" in name and "weight" in name:
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
p.data.normal_(mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.n_layer)))
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, DecisionTransformerGPT2Model):
module.gradient_checkpointing = value
class DecisionTransformerGPT2Model(DecisionTransformerGPT2PreTrainedModel):
_keys_to_ignore_on_load_missing = ["attn.masked_bias"]
def __init__(self, config):
super().__init__(config)
self.embed_dim = config.hidden_size
self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList(
[DecisionTransformerGPT2Block(config, layer_idx=i) for i in range(config.num_hidden_layers)]
)
self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
# Model parallel
self.model_parallel = False
self.device_map = None
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, new_embeddings):
self.wte = new_embeddings
# Copied from transformers.models.gpt2.modeling_gpt2.GPT2Model.forward
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size = input_ids.shape[0]
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size = inputs_embeds.shape[0]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if position_ids is not None:
position_ids = position_ids.view(-1, input_shape[-1])
if past_key_values is None:
past_length = 0
past_key_values = tuple([None] * len(self.h))
else:
past_length = past_key_values[0][0].size(-2)
if position_ids is None:
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
# GPT2Attention mask.
if attention_mask is not None:
if batch_size <= 0:
raise ValueError("batch_size has to be defined and > 0")
attention_mask = attention_mask.view(batch_size, -1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.add_cross_attention and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
hidden_states = inputs_embeds + position_embeds
if token_type_ids is not None:
token_type_embeds = self.wte(token_type_ids)
hidden_states = hidden_states + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
presents = () if use_cache else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
all_hidden_states = () if output_hidden_states else None
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
# Model parallel
if self.model_parallel:
torch.cuda.set_device(hidden_states.device)
# Ensure layer_past is on same device as hidden_states (might not be correct)
if layer_past is not None:
layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)
# Ensure that attention_mask is always on the same device as hidden_states
if attention_mask is not None:
attention_mask = attention_mask.to(hidden_states.device)
if isinstance(head_mask, torch.Tensor):
head_mask = head_mask.to(hidden_states.device)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, use_cache, output_attentions)
return custom_forward
outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(block),
hidden_states,
None,
attention_mask,
head_mask[i],
encoder_hidden_states,
encoder_attention_mask,
)
else:
outputs = block(
hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask[i],
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = outputs[0]
if use_cache is True:
presents = presents + (outputs[1],)
if output_attentions:
all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)
# Model Parallel: If it's the last layer for that device, put things on the next device
if self.model_parallel:
for k, v in self.device_map.items():
if i == v[-1] and "cuda:" + str(k) != self.last_device:
hidden_states = hidden_states.to("cuda:" + str(k + 1))
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(output_shape)
# Add last hidden state
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=presents,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
@dataclass
class DecisionTransformerOutput(ModelOutput):
"""
Base class for model's outputs that also contains a pooling of the last hidden states.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
state_preds (`torch.FloatTensor` of shape `(batch_size, sequence_length, state_dim)`):
Environment state predictions
action_preds (`torch.FloatTensor` of shape `(batch_size, sequence_length, action_dim)`):
Model action predictions
return_preds (`torch.FloatTensor` of shape `(batch_size, sequence_length, 1)`):
Predicted returns for each state
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
state_preds: torch.FloatTensor = None
action_preds: torch.FloatTensor = None
return_preds: torch.FloatTensor = None
hidden_states: torch.FloatTensor = None
attentions: torch.FloatTensor = None
last_hidden_state: torch.FloatTensor = None
class DecisionTransformerPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = DecisionTransformerConfig
base_model_prefix = "decision_transformer"
main_input_name = "states"
supports_gradient_checkpointing = False
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
DECISION_TRANSFORMER_START_DOCSTRING = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`~DecisionTransformerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
DECISION_TRANSFORMER_INPUTS_DOCSTRING = r"""
Args:
states (`torch.FloatTensor` of shape `(batch_size, episode_length, state_dim)`):
The states for each step in the trajectory
actions (`torch.FloatTensor` of shape `(batch_size, episode_length, act_dim)`):
The actions taken by the "expert" policy for the current state, these are masked for auto regressive
prediction
rewards (`torch.FloatTensor` of shape `(batch_size, episode_length, 1)`):
The rewards for each state, action
returns_to_go (`torch.FloatTensor` of shape `(batch_size, episode_length, 1)`):
The returns for each state in the trajectory
timesteps (`torch.LongTensor` of shape `(batch_size, episode_length)`):
The timestep for each step in the trajectory
attention_mask (`torch.LongTensor` of shape `(batch_size, episode_length)`):
Masking, used to mask the actions when performing autoregressive prediction
"""
@add_start_docstrings("The Decision Transformer Model", DECISION_TRANSFORMER_START_DOCSTRING)
class DecisionTransformerModel(DecisionTransformerPreTrainedModel):
"""
The model builds upon the GPT2 architecture to perform autoregressive prediction of actions in an offline RL
setting. Refer to the paper for more details: https://arxiv.org/abs/2106.01345
"""
def __init__(self, config):
super().__init__(config)
self.config = config
self.hidden_size = config.hidden_size
# note: the only difference between this GPT2Model and the default Huggingface version
# is that the positional embeddings are removed (since we'll add those ourselves)
self.encoder = DecisionTransformerGPT2Model(config)
self.embed_timestep = nn.Embedding(config.max_ep_len, config.hidden_size)
self.embed_return = torch.nn.Linear(1, config.hidden_size)
self.embed_state = torch.nn.Linear(config.state_dim, config.hidden_size)
self.embed_action = torch.nn.Linear(config.act_dim, config.hidden_size)
self.embed_ln = nn.LayerNorm(config.hidden_size)
# note: we don't predict states or returns for the paper
self.predict_state = torch.nn.Linear(config.hidden_size, config.state_dim)
self.predict_action = nn.Sequential(
*([nn.Linear(config.hidden_size, config.act_dim)] + ([nn.Tanh()] if config.action_tanh else []))
)
self.predict_return = torch.nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(DECISION_TRANSFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=DecisionTransformerOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
states=None,
actions=None,
rewards=None,
returns_to_go=None,
timesteps=None,
attention_mask=None,
output_hidden_states=None,
output_attentions=None,
return_dict=None,
) -> Union[Tuple, DecisionTransformerOutput]:
r"""
Returns:
Examples:
```python
>>> from transformers import DecisionTransformerModel
>>> import torch
>>> model = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-medium")
>>> # evaluation
>>> model = model.to(device)
>>> model.eval()
>>> env = gym.make("Hopper-v3")
>>> state_dim = env.observation_space.shape[0]
>>> act_dim = env.action_space.shape[0]
>>> state = env.reset()
>>> states = torch.from_numpy(state).reshape(1, 1, state_dim).to(device=device, dtype=torch.float32)
>>> actions = torch.zeros((1, 1, act_dim), device=device, dtype=torch.float32)
>>> rewards = torch.zeros(1, 1, device=device, dtype=torch.float32)
>>> target_return = torch.tensor(TARGET_RETURN, dtype=torch.float32).reshape(1, 1)
>>> timesteps = torch.tensor(0, device=device, dtype=torch.long).reshape(1, 1)
>>> attention_mask = torch.zeros(1, 1, device=device, dtype=torch.float32)
>>> # forward pass
>>> with torch.no_grad():
... state_preds, action_preds, return_preds = model(
... states=states,
... actions=actions,
... rewards=rewards,
... returns_to_go=target_return,
... timesteps=timesteps,
... attention_mask=attention_mask,
... return_dict=False,
... )
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
batch_size, seq_length = states.shape[0], states.shape[1]
if attention_mask is None:
# attention mask for GPT: 1 if can be attended to, 0 if not
attention_mask = torch.ones((batch_size, seq_length), dtype=torch.long)
# embed each modality with a different head
state_embeddings = self.embed_state(states)
action_embeddings = self.embed_action(actions)
returns_embeddings = self.embed_return(returns_to_go)
time_embeddings = self.embed_timestep(timesteps)
# time embeddings are treated similar to positional embeddings
state_embeddings = state_embeddings + time_embeddings
action_embeddings = action_embeddings + time_embeddings
returns_embeddings = returns_embeddings + time_embeddings
# this makes the sequence look like (R_1, s_1, a_1, R_2, s_2, a_2, ...)
# which works nice in an autoregressive sense since states predict actions
stacked_inputs = (
torch.stack((returns_embeddings, state_embeddings, action_embeddings), dim=1)
.permute(0, 2, 1, 3)
.reshape(batch_size, 3 * seq_length, self.hidden_size)
)
stacked_inputs = self.embed_ln(stacked_inputs)
# to make the attention mask fit the stacked inputs, have to stack it as well
stacked_attention_mask = (
torch.stack((attention_mask, attention_mask, attention_mask), dim=1)
.permute(0, 2, 1)
.reshape(batch_size, 3 * seq_length)
)
device = stacked_inputs.device
# we feed in the input embeddings (not word indices as in NLP) to the model
encoder_outputs = self.encoder(
inputs_embeds=stacked_inputs,
attention_mask=stacked_attention_mask,
position_ids=torch.zeros(stacked_attention_mask.shape, device=device, dtype=torch.long),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
x = encoder_outputs[0]
# reshape x so that the second dimension corresponds to the original
# returns (0), states (1), or actions (2); i.e. x[:,1,t] is the token for s_t
x = x.reshape(batch_size, seq_length, 3, self.hidden_size).permute(0, 2, 1, 3)
# get predictions
return_preds = self.predict_return(x[:, 2]) # predict next return given state and action
state_preds = self.predict_state(x[:, 2]) # predict next state given state and action
action_preds = self.predict_action(x[:, 1]) # predict next action given state
if not return_dict:
return (state_preds, action_preds, return_preds)
return DecisionTransformerOutput(
last_hidden_state=encoder_outputs.last_hidden_state,
state_preds=state_preds,
action_preds=action_preds,
return_preds=return_preds,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
| [
"[email protected]"
] | |
8aeb1300f85a1aaafb71ce05a4910eda695d01de | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_461.py | 5bdb3315298efd0854676b72294e5e643b54f60a | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | # How to query filter in django without multiple occurrences
ParentModel.objects.filter(childmodel__in=ChildModel.objects.all()).distinct()
| [
"[email protected]"
] | |
25894a978235e5a7ba954ec8cdc0e0047e8254e1 | 2fd087fbc5faf43940153693823969df6c8ec665 | /pyc_decrypted/latest/dropbox/metadata/vorbis.py | e7e48da8552e55eb862035894baafb7a71cedce1 | [] | no_license | mickeystone/DropBoxLibrarySRC | ed132bbffda7f47df172056845e5f8f6c07fb5de | 2e4a151caa88b48653f31a22cb207fff851b75f8 | refs/heads/master | 2021-05-27T05:02:30.255399 | 2013-08-27T13:16:55 | 2013-08-27T13:16:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,159 | py | #Embedded file name: dropbox/metadata/vorbis.py
from collections import defaultdict
import struct
from .utils import safe_read
def readVorbisComment(file_obj):
toret = defaultdict(list)
try:
vendor_length = struct.unpack('<I', safe_read(file_obj, 4))[0]
safe_read(file_obj, vendor_length)
user_comment_list_length = struct.unpack('<I', safe_read(file_obj, 4))[0]
for i in range(user_comment_list_length):
length = struct.unpack('<I', safe_read(file_obj, 4))[0]
comment = ''.join(struct.unpack('<%dc' % length, safe_read(file_obj, length)))
k, v = comment.split('=')
toret[k.lower()].append(v)
return toret
except Exception:
return {}
def decodeBlockPicture(file_obj):
try:
pic_type, mime_length = struct.unpack('>II', safe_read(file_obj, 8))
mime = ''.join(struct.unpack('>%dc' % mime_length, safe_read(file_obj, mime_length)))
desc_length = struct.unpack('>I', safe_read(file_obj, 4))[0]
description = unicode(''.join(struct.unpack('>%dc' % desc_length, safe_read(file_obj, desc_length))), 'utf-8')
width, height, depth, colors, data_len = struct.unpack('>IIIII', safe_read(file_obj, 20))
data = safe_read(file_obj, data_len)
return {'type': pic_type,
'mime': mime,
'description': description,
'width': width,
'height': height,
'depth': depth,
'colors': colors,
'data': data}
except Exception:
return {}
def readBlockPicture(file_obj):
try:
buf = ''
buf += safe_read(file_obj, 8)
pic_type, mime_length = struct.unpack('>II', buf[-8:])
buf += safe_read(file_obj, mime_length)
buf += safe_read(file_obj, 4)
desc_length = struct.unpack('>I', buf[-4:])[0]
buf += safe_read(file_obj, desc_length)
buf += safe_read(file_obj, 20)
width, height, depth, colors, data_len = struct.unpack('>IIIII', buf[-20:])
buf += safe_read(file_obj, data_len)
return {'metadata_block_picture': [buf]}
except Exception:
return {}
| [
"[email protected]"
] | |
71b22fc91381f282dc98817aa113784b5ba94563 | 714fadd433c05b126b7909e1de14ee00bd2b0096 | /tests/test_http_client/test_http_service.py | 1dcba7edf292aa326727edd97db710b4b948af70 | [
"MIT"
] | permissive | Zheaoli/huskar-python | 4b1a30fc6939d387e09aaa484e7afe3ca190d293 | f62a2d3636b2804a552bf59f76903cf2841d75c9 | refs/heads/master | 2020-08-03T03:59:19.279390 | 2019-09-27T06:34:09 | 2019-09-27T06:34:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,669 | py | # -*- coding: utf-8 -*-
from mock import Mock
import pytest
import gevent
from huskar_sdk_v2.http.components.service import Service
initial_service_data = {u'192.168.1.1_17400': {
u'ip': u'192.168.1.1',
u'meta': {
u'control_daemon_port': 5544,
u'protocol': u'thrift',
u'pushSequence': 4974,
u'soaVersion': u'0.14.5.3',
u'weight': 1},
u'name': u'arch.test',
u'port': {u'main': 17400},
u'state': u'up'},
}
added_service_data = {"192.168.1.1_23471": {
"ip": "192.168.1.1",
"state": "up",
"meta": {
"control_daemon_port": 5544,
"soaVersion": "0.14.5.3",
"protocol": "thrift", "weight": 1,
"pushSequence": 4975},
"name": "arch.test",
"port": {"main": 23471}}
}
@pytest.fixture
def service_component(request, requests_mock, started_client):
assert started_client.connected.wait(1)
return Service('arch.test', 'alpha-stable')
@pytest.fixture
def fake_service_component(started_file_cache_client,
fake_service_with_file_cache_client):
started_file_cache_client.watched_configs.add_watch(
"arch.test", 'overall')
started_file_cache_client.watched_switches.add_watch(
"arch.test", 'another-cluster')
started_file_cache_client.watched_services.add_watch(
"arch.test", 'alpha-stable')
return fake_service_with_file_cache_client('arch.test', 'alpha-stable')
def test_service_should_yield_the_same_format_as_old_huskar(
service_component, started_client,
fake_service_component):
assert started_client.connected.wait(1)
assert service_component.get_service_node_list(
'arch.test', 'alpha-stable') == initial_service_data
gevent.sleep(0.5)
assert fake_service_component.get_service_node_list(
'arch.test', 'alpha-stable') == initial_service_data
def test_service_changed_should_change_service_nodes(
requests_mock, service_component, started_client,
fake_service_component):
assert started_client.connected.wait(1)
requests_mock.set_result_file('test_data_changed.txt')
assert requests_mock.wait_processed()
new_service_data = dict(initial_service_data)
new_service_data.update(added_service_data)
assert service_component.get_service_node_list(
'arch.test', 'alpha-stable') == new_service_data
gevent.sleep(0.5)
assert fake_service_component.get_service_node_list(
'arch.test', 'alpha-stable') == new_service_data
def test_service_deleted_should_change_service_nodes(
requests_mock, service_component, started_client,
fake_service_component):
listener = Mock()
assert started_client.connected.wait(1)
service_component.register_hook_function(
'arch.test', 'alpha-stable', listener)
requests_mock.set_result_file('test_data_deleted.txt')
assert requests_mock.wait_processed()
assert listener.call_count == 2
listener.assert_any_call({})
assert service_component.get_service_node_list(
'arch.test', 'alpha-stable') == {}
gevent.sleep(0.5)
assert fake_service_component.get_service_node_list(
'arch.test', 'alpha-stable') == {}
def test_service_node_changed_should_notify_listeners(
requests_mock, service_component, started_client,
fake_service_component):
assert started_client.connected.wait(1)
listener = Mock()
fake_listener = Mock()
service_component.register_hook_function(
'arch.test', 'alpha-stable', listener)
fake_service_component.register_hook_function(
'arch.test', 'alpha-stable', fake_listener)
listener.assert_called_once_with(initial_service_data)
gevent.sleep(0.5)
fake_listener.assert_called_with(initial_service_data)
requests_mock.set_result_file('test_data_changed.txt')
assert requests_mock.wait_processed()
new_service_data = dict(initial_service_data)
new_service_data.update(added_service_data)
listener.assert_any_call(new_service_data)
gevent.sleep(0.5)
fake_listener.assert_any_call(new_service_data)
def test_file_client_add_watch_after_data_already_processed(
requests_mock, service_component, started_client,
fake_service_component):
fake_service_component.client.app_id_cluster_map.pop('arch.test', None)
assert started_client.connected.wait(1)
listener = Mock()
fake_listener = Mock()
service_component.register_hook_function(
'arch.test', 'alpha-stable', listener)
listener.assert_called_once_with(initial_service_data)
gevent.sleep(0.5)
assert ('alpha-stable' not in
fake_service_component.client.app_id_cluster_map['arch.test'])
fake_service_component.register_hook_function(
'arch.test', 'alpha-stable', fake_listener)
fake_listener.assert_called_with(initial_service_data)
assert ('alpha-stable' in
fake_service_component.client.app_id_cluster_map['arch.test'])
def test_service_batch_add_watch(requests_mock, service_component,
started_client, started_file_cache_client,
fake_service_component):
service_component.preprocess_service_mappings({})
fake_service_component.preprocess_service_mappings({})
assert service_component.preprocess_service_mappings({
'arch.test1': {'that-cluster'},
'arch.test2': {'this-cluster'},
}) is True
assert fake_service_component.preprocess_service_mappings({
'arch.test1': {'that-cluster'},
'arch.test2': {'this-cluster'},
}) is True
assert dict(started_client.watched_services.app_id_cluster_map) == {
'arch.test': {'alpha-stable'},
'arch.test1': {'that-cluster'},
'arch.test2': {'this-cluster'},
}
fake_services = started_file_cache_client.watched_services
assert dict(fake_services.app_id_cluster_map) == {
'arch.test': {'alpha-stable'},
'arch.test1': {'that-cluster'},
'arch.test2': {'this-cluster'},
}
def test_legacy_interface(requests_mock, service_component):
service_component.set_min_server_num(1)
def test_add_service_in_the_middle_of_runtime(
requests_mock, service_component,
started_client, fake_service_component):
assert started_client.connected.wait(1)
assert service_component.get_service_node_list(
'arch.test', 'alpha-stable') == initial_service_data
gevent.sleep(0.5)
assert fake_service_component.get_service_node_list(
'arch.test', 'alpha-stable') == initial_service_data
requests_mock.add_response(
r'{"body": {"service": {"arch.test": {"beta-stable": '
r'{"192.168.1.1_9999": {"value": "{\"ip\": \"192.168.1.1\"'
r', \"state\": \"up\", \"meta\": {\"control_daemon_port\": 5544,'
r' \"soaVersion\": \"0.14.5.3\", \"protocol\": \"thrift\",'
r' \"weight\": 1, \"pushSequence\": 4975}, \"name\":'
r' \"arch.test\", \"port\": {\"main\": 9999}}"}}}}},'
r' "message": "update"}')
assert requests_mock.wait_processed()
assert service_component.get_service_node_list(
'arch.test', 'beta-stable') == {}
gevent.sleep(0.5)
assert fake_service_component.get_service_node_list(
'arch.test', 'beta-stable') == {}
assert service_component.add_service('arch.test', 'beta-stable',
timeout=10)
assert fake_service_component.add_service('arch.test', 'beta-stable',
timeout=10)
requests_mock.add_response(
r'{"body": {"service": {"arch.test": {"beta-stable":'
r' {"192.168.1.1_9999": {"value": "{\"ip\":'
r' \"192.168.1.1\", \"state\": \"up\", \"meta\":'
r' {\"control_daemon_port\": 5544, \"soaVersion\": \"0.14.5.3\",'
r' \"protocol\": \"thrift\", \"weight\": 1, \"pushSequence\":'
r' 4975}, \"name\": \"arch.test\", \"port\": {\"main\": 9999'
r'}}"}}}}}, "message": "update"}')
assert requests_mock.wait_processed()
assert service_component.get_service_node_list(
'arch.test', 'beta-stable')
gevent.sleep(0.5)
assert fake_service_component.get_service_node_list(
'arch.test', 'beta-stable')
def test_service_should_not_update_if_watch_is_removed(
requests_mock, service_component,
started_client, fake_service_component):
assert started_client.connected.wait(1)
assert service_component.get_service_node_list(
'arch.test', 'alpha-stable') == initial_service_data
gevent.sleep(0.5)
assert fake_service_component.get_service_node_list(
'arch.test', 'alpha-stable') == initial_service_data
assert service_component.unwatch_service(
'arch.test', 'alpha-stable', timeout=2.0)
assert fake_service_component.unwatch_service(
'arch.test', 'alpha-stable', timeout=2.0)
requests_mock.add_response(
r'{"body": {"service": {"arch.test": {"alpha-stable": '
r'{"192.168.1.1_9999": {"value": "{\"ip\": \"192.168.1.1\",'
r' \"state\": \"up\", \"meta\": {\"control_daemon_port\": 5544,'
r' \"soaVersion\": \"0.14.5.3\", \"protocol\": \"thrift\", \"weight\":'
r' 1, \"pushSequence\": 4975}, \"name\": \"arch.test\", \"port\": '
r'{\"main\": 9999}}"}}}}}, "message": "update"}')
assert requests_mock.wait_processed()
assert service_component.get_service_node_list(
'arch.test', 'alpha-stable') == initial_service_data
assert fake_service_component.get_service_node_list(
'arch.test', 'alpha-stable') == initial_service_data
| [
"[email protected]"
] | |
0c4d74fc244e79ebb2b0c11a0c7f7fcf431d901f | 079c07c5d97eb60d36269e27309e84b25ea0aaeb | /guidehero-backend/app/managers/call_manager.py | 2df061c86f9dcff1932fc86ea2e7e2a95baf97e2 | [] | no_license | itdream-dev/python | 3aa44329673f05e2a86e1cba56cb88101c777233 | eda81b802b99f45933bdf0d22b508837cfa538f0 | refs/heads/master | 2023-03-05T12:27:42.776870 | 2020-05-11T15:54:45 | 2020-05-11T15:54:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,138 | py | # -*- coding: utf-8 -*-
from config import Ivysaur
from lib.registry import get_registry
from lib.models.call import Call
from lib.push_notifications import PushNotifications
class CallManager(object):
def __init__(self):
registry = get_registry()
self.call_repo = registry['CALL_REPO']
self.user_repo = registry['USER_REPO']
self.device_repo = registry['DEVICE_REPO']
self.tokbox = registry['TOKBOX']
self.push_notifications = PushNotifications()
def start_session(self, user, user_id_2):
session = self.tokbox.create_session()
session_id = session.session_id
token = self.tokbox.generate_token(session_id)
recepient = self.user_repo.get_user(user_id_2)
self.call_repo.start_session(user, recepient, session_id)
device = self.device_repo.get_latest_device(user_id_2)
if device:
self.push_notifications.send_notification(
device.device_token,
'Incoming call from %s' % user.name,
sound='calling.caf'
)
return {
'api_key': Ivysaur.Config.TOKBOX_API_KEY,
'session_id': session_id,
'token': token
}
def get_pending_call(self, user):
pending_call = self.call_repo.get_pending_call(user)
if not pending_call:
return {}
session_id = pending_call.session_id
token = self.tokbox.generate_token(session_id)
return {
'api_key': Ivysaur.Config.TOKBOX_API_KEY,
'session_id': session_id,
'token': token,
'caller_name': pending_call.caller.name
}
def report_connected(self, session_id):
call = self.call_repo.get_call_from_session_id(session_id)
if not call or call.status != Call.INITIATED:
return
self.call_repo.report_connected(call)
def report_ended(self, session_id):
call = self.call_repo.get_call_from_session_id(session_id)
if not call or call.status == Call.ENDED:
return
self.call_repo.report_ended(call)
| [
"[email protected]"
] | |
aaac7828f0ebe58e41fab34c975790676ce05ef9 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_116/ch32_2020_04_08_11_44_06_529462.py | cc4a1b9f2ef0e4e497cbb913dca1ed7af116e79d | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | def lista_primos(n):
a=2
lista=[]
contador = 3
while len(lista)!=n:
if a == 2:
lista.append(a)
a+=1
elif a%2 == 0:
a+=1
elif contador < a :
contador = 3
while contador < a:
if a%contador == 0:
contador+=2
else:
lista.append(a)
contador=a+2
a+=1
else:
a+=1
return lista | [
"[email protected]"
] | |
dcd05b317337bac479b22dcaea4f461603eaa11b | 02e23da0431623db86c8138bda350a1d526d4185 | /Archivos Python Documentos/Graficas/.history/matriz_20200222132010.py | 8534cb3045cb3b356fb2e42fe4a210b62a5a9f3b | [] | no_license | Jaamunozr/Archivos-python | d9996d3d10ff8429cd1b4c2b396016a3a5482889 | 1f0af9ba08f12ac27e111fcceed49bbcf3b39657 | refs/heads/master | 2022-08-05T14:49:45.178561 | 2022-07-13T13:44:39 | 2022-07-13T13:44:39 | 244,073,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,228 | py | import numpy as np
import os
import pylab as pl
import matplotlib.pyplot as plt
os.system("clear")
g=[
12, 23;
34, 34,
]
print (g)
"""
raiz=np.sqrt
ln=np.log
X = np.arange(-2, 12, 0.1)
Y = np.arange(-2, 12, 0.1)
J=np.count_nonzero(Y)
print (J)
a = [0] * J
for i in range(J):
a[i] = Y[i]
X[25]=0.49
X[65]=4.49
X[105]=8.49
Y[25]=0.49
Y[65]=4.49
Y[105]=8.49
ax, ay = 0.5, 0.5
bx, by = 4.5, 0.4
cx, cy = 8.5, 0.5
dx, dy = 0.5, 4.5
ex, ey = 8.5, 4.5
fx, fy = 0.5, 8.5
gx, gy = 4.5, 8.5
hx, hy = 8.5, 8.5
l = 2
rho= 100
ik=25
ma=raiz((X-ax)**2+(Y-ay)**2)
mb=raiz((X-bx)**2+(Y-by)**2)
mc=raiz((X-cx)**2+(Y-cy)**2)
md=raiz((X-dx)**2+(Y-dy)**2)
me=raiz((X-ex)**2+(Y-ey)**2)
mf=raiz((X-fx)**2+(Y-fy)**2)
mg=raiz((X-gx)**2+(Y-gy)**2)
mh=raiz((X-hx)**2+(Y-hy)**2)
va=ln((l+raiz(ma**2+l**2))/ma)
vb=ln((l+raiz(mb**2+l**2))/mb)
vc=ln((l+raiz(mc**2+l**2))/mc)
vd=ln((l+raiz(md**2+l**2))/md)
ve=ln((l+raiz(me**2+l**2))/me)
vf=ln((l+raiz(mf**2+l**2))/mf)
vg=ln((l+raiz(mg**2+l**2))/mg)
vh=ln((l+raiz(mh**2+l**2))/mh)
Vt=((rho*ik)/(2*np.pi))*(va+vb+vc+vd+ve+vf+vg+vh)
print (Vt[::].max())
print(type(Vt))
print(Vt.shape)
plt.figure(figsize=(X,Y))
plt.imshow(Vt, cmap = "summer")
plt.colorbar(
plt.show()
)""" | [
"[email protected]"
] | |
c711158cdb65871fda79be945d0bae0d04d531a8 | 50afc0db7ccfc6c80e1d3877fc61fb67a2ba6eb7 | /challenge20(T-primes)/solutions/Coder45.py | 32ba0e7800485a552e5637112a3fad818e939995 | [
"MIT"
] | permissive | banana-galaxy/challenges | 792caa05e7b8aa10aad8e04369fc06aaf05ff398 | 8655c14828607535a677e2bb18689681ee6312fa | refs/heads/master | 2022-12-26T23:58:12.660152 | 2020-10-06T13:38:04 | 2020-10-06T13:38:04 | 268,851,516 | 11 | 8 | MIT | 2020-09-22T21:21:30 | 2020-06-02T16:24:41 | Python | UTF-8 | Python | false | false | 165 | py | python
def solution(n):
for k in range(n):
y=len([k for k in range(1,n+1) if not n %k])
if y == 3:
return True
else:
return False | [
"[email protected]"
] | |
62f3762c1f4cc277c8f0b20c4777ee5c189eb345 | e593f5b34050eba13fbadeee3563346fa0f1c25b | /tests/plugins/test_speedrunslive.py | 81731cbdc212b4e31f3887da8a114f76df267300 | [
"BSD-2-Clause",
"CC-BY-SA-2.0",
"MIT",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-proprietary-license"
] | permissive | fuglede/streamlink | f9e56e434b01ae426edd83f13037384af294838a | 2661d40164986f979edc2e6867f8daeceba73a44 | refs/heads/master | 2020-03-25T08:36:43.175618 | 2018-08-05T15:10:15 | 2018-08-05T15:10:15 | 143,622,979 | 0 | 0 | BSD-2-Clause | 2018-08-05T15:07:12 | 2018-08-05T15:07:12 | null | UTF-8 | Python | false | false | 570 | py | import unittest
from streamlink.plugins.speedrunslive import SpeedRunsLive
class TestPluginSpeedRunsLive(unittest.TestCase):
def test_can_handle_url(self):
should_match = [
'http://www.speedrunslive.com/#!/twitch',
]
for url in should_match:
self.assertTrue(SpeedRunsLive.can_handle_url(url))
def test_can_handle_url_negative(self):
should_not_match = [
'https://www.twitch.tv',
]
for url in should_not_match:
self.assertFalse(SpeedRunsLive.can_handle_url(url))
| [
"[email protected]"
] | |
5ec6d29eb18f4b5a615c47c002f54ce12402b6b1 | 611847354ec077c5bc65fdb08c9f45ff45b4bfcc | /code/docker/python/flask_2/app.py | 234a0768f1eb0f2a9ab328dd7b3a0fc9be9cf1a3 | [
"MIT"
] | permissive | afcarl/pythoh_machine_learning_excerise | 1a572e4c6db11ee28d5c245f20fc81b334d04995 | f2b6e93eb02345f9078642cff3066e3e65557e51 | refs/heads/master | 2020-03-21T06:00:08.623962 | 2017-06-08T23:03:35 | 2017-06-08T23:03:35 | 138,193,305 | 1 | 0 | null | 2018-06-21T16:05:53 | 2018-06-21T16:05:52 | null | UTF-8 | Python | false | false | 2,365 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from flask import Flask, render_template, request
from wtforms import Form, TextAreaField, validators
import pickle
import sqlite3
import os
import numpy as np
from vectorizer import vect
app = Flask(__name__)
cur_dir = os.path.dirname(__file__)
clf = pickle.load(open(os.path.join(cur_dir,
'pickle_objects',
'classifier.pkl'), 'rb'))
db = os.path.join(cur_dir, 'reviews.sqlite')
def classify(document):
label = {0: 'negative', 1: 'positive'}
X =vect.transform([document])
y = clf.predict(X)[0]
proba = np.max(clf.predict_proba(X))
return label[y], proba
def train(document, y):
X = vect.transform([document])
clf.partial_fit(X, [y])
def sqlite_entry(path, document, y):
conn = sqlite3.connect(path)
c = conn.cursor()
c.execute("INSERT INTO review_db (review, sentiment, date)"\
" VALUES (?, ?, DATETIME('now'))", (document, y))
conn.commit()
conn.close()
class ReviewForm(Form):
moviereview = TextAreaField('',
[validators.DataRequired(),
validators.length(min=15)])
@app.route('/')
def index():
form = ReviewForm(request.form)
return render_template('reviewform.html', form=form)
@app.route('/requests', methods=['POST'])
def results():
form = ReviewForm(request.form)
if request.method == 'POST' and form.validate():
review = request.form['movierview']
y, proba = classify(review)
return render_template('results.html',
content=review,
prediction=y,
probability=round(proba * 100, 2))
return render_template('reviewform.html', form=form)
@app.route('/thanks', methods=['POST'])
def feedback():
feedback = request.form['feedback_button']
review = request.form['review']
prediction = request.form['prediction']
inv_label = {'negative': 0, 'positive': 1}
y = inv_label[prediction]
if feedback == 'Incorrect':
y = int(not(y))
train(review, y)
sqlite_entry(db, review, y)
return render_template('thanks.html')
if __name__ == '__main__':
app.run(debug=True) | [
"[email protected]"
] | |
9bf502aa53de5ff285b04513b8db97f45b9147ae | 64d923ab490341af97c4e7f6d91bf0e6ccefdf4b | /tensorforce/core/policies/state_value.py | 3f2776d338b73a577f31700a2da5f1127a5c3642 | [
"Apache-2.0"
] | permissive | tensorforce/tensorforce | 38d458fedeeaa481adf083397829cea434d020cd | 1bf4c3abb471062fb66f9fe52852437756fd527b | refs/heads/master | 2023-08-17T17:35:34.578444 | 2023-08-14T20:14:08 | 2023-08-14T20:14:08 | 85,491,050 | 1,312 | 246 | Apache-2.0 | 2023-08-14T20:14:10 | 2017-03-19T16:24:22 | Python | UTF-8 | Python | false | false | 2,932 | py | # Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from tensorforce.core import SignatureDict, TensorSpec, tf_function
from tensorforce.core.policies import BasePolicy
class StateValue(BasePolicy):
"""
Base class for state-value functions, here categorized as "degenerate" policy.
Args:
device (string): Device name
(<span style="color:#00C000"><b>default</b></span>: inherit value of parent module).
l2_regularization (float >= 0.0): Scalar controlling L2 regularization
(<span style="color:#00C000"><b>default</b></span>: inherit value of parent module).
name (string): <span style="color:#0000C0"><b>internal use</b></span>.
states_spec (specification): <span style="color:#0000C0"><b>internal use</b></span>.
auxiliaries_spec (specification): <span style="color:#0000C0"><b>internal use</b></span>.
actions_spec (specification): <span style="color:#0000C0"><b>internal use</b></span>.
"""
def __init__(
self, *, device=None, l2_regularization=None, name=None, states_spec=None,
auxiliaries_spec=None, actions_spec=None
):
BasePolicy.__init__(
self=self, device=device, l2_regularization=l2_regularization, name=name,
states_spec=states_spec, auxiliaries_spec=auxiliaries_spec, actions_spec=actions_spec
)
def input_signature(self, *, function):
if function == 'state_value':
return SignatureDict(
states=self.states_spec.signature(batched=True),
horizons=TensorSpec(type='int', shape=(2,)).signature(batched=True),
internals=self.internals_spec.signature(batched=True),
auxiliaries=self.auxiliaries_spec.signature(batched=True)
)
else:
return super().input_signature(function=function)
def output_signature(self, *, function):
if function == 'state_value':
return SignatureDict(
singleton=TensorSpec(type='float', shape=()).signature(batched=True)
)
else:
return super().output_signature(function=function)
@tf_function(num_args=4)
def state_value(self, *, states, horizons, internals, auxiliaries):
raise NotImplementedError
| [
"[email protected]"
] | |
98fb0dcf64f5486c42788855054e4d8f97762dd7 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-nlp/huaweicloudsdknlp/v2/model/post_sentence_embedding_req.py | 70377e135a6ed8d93d796857d996707216550b46 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,253 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class PostSentenceEmbeddingReq:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'sentences': 'list[str]',
'domain': 'str'
}
attribute_map = {
'sentences': 'sentences',
'domain': 'domain'
}
def __init__(self, sentences=None, domain=None):
"""PostSentenceEmbeddingReq
The model defined in huaweicloud sdk
:param sentences: 文本列表,文本长度为1~512,列表大小为1~1000,文本编码为UTF-8。
:type sentences: list[str]
:param domain: 支持的领域类型,目前只支持通用领域,默认为general。
:type domain: str
"""
self._sentences = None
self._domain = None
self.discriminator = None
self.sentences = sentences
if domain is not None:
self.domain = domain
@property
def sentences(self):
"""Gets the sentences of this PostSentenceEmbeddingReq.
文本列表,文本长度为1~512,列表大小为1~1000,文本编码为UTF-8。
:return: The sentences of this PostSentenceEmbeddingReq.
:rtype: list[str]
"""
return self._sentences
@sentences.setter
def sentences(self, sentences):
"""Sets the sentences of this PostSentenceEmbeddingReq.
文本列表,文本长度为1~512,列表大小为1~1000,文本编码为UTF-8。
:param sentences: The sentences of this PostSentenceEmbeddingReq.
:type sentences: list[str]
"""
self._sentences = sentences
@property
def domain(self):
"""Gets the domain of this PostSentenceEmbeddingReq.
支持的领域类型,目前只支持通用领域,默认为general。
:return: The domain of this PostSentenceEmbeddingReq.
:rtype: str
"""
return self._domain
@domain.setter
def domain(self, domain):
"""Sets the domain of this PostSentenceEmbeddingReq.
支持的领域类型,目前只支持通用领域,默认为general。
:param domain: The domain of this PostSentenceEmbeddingReq.
:type domain: str
"""
self._domain = domain
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PostSentenceEmbeddingReq):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
3eed1b10050537ad9781069bb46ed2f3703cf569 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_examples/_algorithms_challenges/pybites/intermediate/intermediate-bite-14-generate-a-table-of-n-sequences.py | 55c17a68b9ae77cc101badbd72287e480fb740fa | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,471 | py | """
DATE: 05 Nov 2020
TASK DESCRIPTION:
Write a function that receives one or more sequences. The sequences are already defined for you.
The function should return a table (list of strings) where the columns are the sequences
(example below).
To keep it simple we work with equally sized sequences so you don't have to worry about
handling a missing value (you should end up with a grid of 6 rows x n columns).
There are some Pythonic idioms you can use here, hint: think of pants ;)
Example call (look at the tests for more detail):
>>> generate_table(names, aliases)
['Julian | Pythonista', 'Bob | Nerd', 'PyBites | Coder',
'Dante | Pythonista', 'Martin | Nerd', 'Rodolfo | Coder']
Bonus: use a generator to build up the table rows.
"""
import random
names = 'Julian Bob PyBites Dante Martin Rodolfo'.split()
aliases = 'Pythonista Nerd Coder'.split() * 2
points = random.sample(range(81, 101), 6)
awake = [True, False] * 3
SEPARATOR = ' | '
### ----------- My solution ---------------------------
def my_generate_table(*args):
l = []
result = zip(*args)
for i in result:
s = ""
for t in i:
if s == "":
s = str(t)
else:
s = s + " | " + str(t)
l.append(s)
return l
### ---------- PyBites original solution ---------------
def pyb_generate_table(*sequences):
for seq in zip(*sequences):
seq = [str(val) for val in seq]
yield SEPARATOR.join(seq) | [
"[email protected]"
] | |
43ef6671cbd2943a73a2201439c31fdfc5c0ad9c | 54a745510b16111f5e5f610a07be49ea1e79fccf | /py1810/hello_mysql_01.py | 086070e7afa84b55e4f9c256485a983058c32dcc | [] | no_license | SonDog0/bigdata | 84a5b7c58ad9680cdc0e49ac6088f482e09118a5 | e6cd1e3bbb0bfec0c89a31b3fb4ef66d50c272be | refs/heads/master | 2020-04-22T02:24:16.469718 | 2019-03-13T08:59:26 | 2019-03-13T08:59:26 | 170,047,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,590 | py | # 파이썬으로 MySQL, MariaDB 다루기
# python에서 MySQL 데이터베이스를 지원하려면
# python DB API 규약에 맞게 작성된 mySQL DB 모듈 필요
# 일반적으로 pyMySQL 모듈을 많이 사용
import pymysql
# # mysql connection 생성
# conn = pymysql.connect(host='13.209.88.188', user= 'son', password= '931027',db='SON_MARIADB', charset='utf8')
#
# curs = conn.cursor()
#
# curs.execute('DROP TABLE items')
# curs.execute('''create table items( item_id INTEGER PRIMARY KEY AUTO_INCREMENT, name TEXT, price INTEGER)''' )
# # sql 질의문 실행
# sql = 'select * from books'
# curs.execute(sql)
#
# # 결과 집합 처리
# for rs in curs.fetchall():
# print(rs[0], rs[1], rs[2], rs[3]) #배열 기반 커서
#
#
#
#
# #mysql connection 닫기
# conn.close()
# # mysql connection 생성
# conn = pymysql.connect(host='13.209.88.188', user= 'son', password= '931027',db='SON_MARIADB', charset='utf8')
# # connection 으로부터 dict cursor 생성
# curs = conn.cursor(pymysql.cursors.DictCursor)
#
# # sql 질의문 실행
# sql = 'select * from books'
# curs.execute(sql)
#
# # 결과 집합 처리
# for rs in curs.fetchall():
# print(rs['bno'], rs['bname'], rs['bpub'], rs['bprice']) #사전기반 커서
#
# #mysql connection 닫기
# conn.close()
# 1~100 까지 2배수, 3배수, 5배수 저장
# 테이블 이름은 numbers
# 필드는 no, no2, no3, no5
# mysql connection 생성
conn = pymysql.connect(host='13.209.88.188', user= 'son', password= '931027',db='SON_MARIADB', charset='utf8')
# connection 으로부터 cursor 생성
curs = conn.cursor(pymysql.cursors.DictCursor)
# sql 질의문 실행
create_sql = 'create table numbers( no2 int, no3 int, no5 int )'
drop_sql = 'drop table numbers'
sql = 'insert into numbers values(%s,%s,%s)'
# sql = 'select * from books'
curs.execute(drop_sql)
curs.execute(create_sql)
# 1~ 100까지 2배수, 3배수, 5배수
num1 = 0
num2 = 0
num3 = 0
for i in range (1,101):
if i % 2 == 0:
num1 = i
else:
num1 = 0
if i % 3 == 0:
num2 = i
else:
num2 = 0
if i % 5 == 0:
num3 = i
else:
num3 = 0
curs.execute(sql, (num1, num2, num3))
#변경사항 서버에 적용하기
conn.commit()
# 결과 집합 처리
select_sql = 'select * from numbers'
curs.execute(select_sql)
for rs in curs.fetchall():
print(rs['no2'], rs['no3'], rs['no5']) #사전기반 커서
#mysql connection 닫기
conn.close()
| [
"[email protected]"
] | |
639b97de6a8ed56bc002ebcf1f711245cbb5584e | 2793721e5cbfccfedac75556e34dba22999530d7 | /Dynamic_Programming/venv/bin/easy_install-3.7 | 4b809b9ca4fadb6a24070714033b737b0d49bbb0 | [] | no_license | iafjayoza/Python | 135e613d1d23c493b05a009843b40cbca6a1d318 | aaa05b0d655c8f0a47ced0100a844d99f852b2a4 | refs/heads/master | 2022-12-07T07:21:46.494885 | 2020-09-06T09:03:27 | 2020-09-06T09:03:27 | 282,707,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | 7 | #!/Users/jo049566/Desktop/Jay/Jay_Data/Study_Repo/Python/Projects/Dynamic_Programming/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"[email protected]"
] | |
eb3d9991bac5d69b10d1a291e2e099785c5e1bdb | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/leetcode/leetCode/Backtracking/90_SubsetsII.py | f0018bb559069fbfa983759b7fcba413f3f6cb4b | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,397 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
c.. Solution o..
___ subsetsWithDup nums
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
__ n.. nums:
r_ []
nums.s.. )
nums_len = l..(nums)
# Keep the subsets without duplicate subsets
subsets = [[nums[0]]]
# Keep the previous subsets which contains previous nums.
pre_subset = [[nums[0]]]
___ i __ r..(1, nums_len
# Combine current num with the previous subsets,
# Then update the previous subsets
__ nums[i] __ nums[i-1]:
___ j __ r..(l..(pre_subset)):
one_set = pre_subset[j][:]
one_set.a.. nums[i])
subsets.a.. one_set)
pre_subset[j] = one_set
# Combine current num with all the subsets before.
# Then update the previous subsets
____
pre_subset # list
___ j __ r..(l..(subsets)):
one_set = subsets[j][:]
one_set.a.. nums[i])
subsets.a.. one_set)
pre_subset.a.. one_set)
pre_subset.a.. [nums[i]])
subsets.a.. [nums[i]])
subsets.a.. [])
r_ subsets
"""
[]
[1,2]
[1,2,2]
[1,2,2,3,3,4,5]
"""
| [
"[email protected]"
] | |
6315e17d884b08aa11eab2a3d71e667e140f18bc | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc040/A/4812227.py | e74fe7e059ba85e08c3cc200758d2172a2a1467f | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 48 | py | n,x=map(int,input().split());print(min(x-1,n-x)) | [
"[email protected]"
] | |
f24083eb0c7654f23ecf8369b85752a9772562e2 | c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd | /google/ads/googleads/v4/googleads-py/google/ads/googleads/v4/enums/types/targeting_dimension.py | 76a70911a6f15e25b79765af970fefb4bcd708ba | [
"Apache-2.0"
] | permissive | dizcology/googleapis-gen | 74a72b655fba2565233e5a289cfaea6dc7b91e1a | 478f36572d7bcf1dc66038d0e76b9b3fa2abae63 | refs/heads/master | 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,273 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v4.enums',
marshal='google.ads.googleads.v4',
manifest={
'TargetingDimensionEnum',
},
)
class TargetingDimensionEnum(proto.Message):
r"""The dimensions that can be targeted. """
class TargetingDimension(proto.Enum):
r"""Enum describing possible targeting dimensions."""
UNSPECIFIED = 0
UNKNOWN = 1
KEYWORD = 2
AUDIENCE = 3
TOPIC = 4
GENDER = 5
AGE_RANGE = 6
PLACEMENT = 7
PARENTAL_STATUS = 8
INCOME_RANGE = 9
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
adb2babffe1e8af59930020f6c17f6d45db5f76f | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/request/KoubeiTradeOrderConsultRequest.py | 2defd325c725861c41724ed3832b3e090ad2407b | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 3,936 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.KoubeiTradeOrderConsultModel import KoubeiTradeOrderConsultModel
class KoubeiTradeOrderConsultRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, KoubeiTradeOrderConsultModel):
self._biz_content = value
else:
self._biz_content = KoubeiTradeOrderConsultModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'koubei.trade.order.consult'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| [
"[email protected]"
] | |
3c9ce756eb733e12cf686c8c32401331d69dad3f | 5ee028ee2582a2d566c22a32097a1fcbed314fcc | /openwsn-fw-antiJamming/bsp/chips/si70x/SConscript | 8694311edc5303fbdb892109f6bbf7a70d61cfda | [] | permissive | ssciancalepore/BitTransfer | 70c5b271743ebe683d7a3a37d595dbab132f903e | b9d343b0219259f4870e9362b99c27f544014b89 | refs/heads/master | 2022-06-20T18:38:03.271254 | 2019-09-15T04:56:32 | 2019-09-15T04:56:32 | 199,583,953 | 1 | 1 | BSD-3-Clause | 2022-06-03T22:45:01 | 2019-07-30T05:53:29 | C | UTF-8 | Python | false | false | 116 | Import('env')
localEnv = env.Clone()
source = ['si70x.c']
si70x = localEnv.Object(source=source)
Return('si70x') | [
"[email protected]"
] | ||
d74fcb05f9f029d6ad582d79695f8433a2079244 | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv3/lib/python3.8/site-packages/ansible_test/_internal/import_analysis.py | 9cc5376feb59d3d3b5b855832763ded054f1feb9 | [
"MIT"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 14,066 | py | """Analyze python import statements."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import os
import re
from . import types as t
from .io import (
read_binary_file,
)
from .util import (
display,
ApplicationError,
is_subdir,
)
from .data import (
data_context,
)
VIRTUAL_PACKAGES = set([
'ansible.module_utils.six',
])
def get_python_module_utils_imports(compile_targets):
"""Return a dictionary of module_utils names mapped to sets of python file paths.
:type compile_targets: list[TestTarget]
:rtype: dict[str, set[str]]
"""
module_utils = enumerate_module_utils()
virtual_utils = set(m for m in module_utils if any(m.startswith('%s.' % v) for v in VIRTUAL_PACKAGES))
module_utils -= virtual_utils
imports_by_target_path = {}
for target in compile_targets:
imports_by_target_path[target.path] = extract_python_module_utils_imports(target.path, module_utils)
def recurse_import(import_name, depth=0, seen=None): # type: (str, int, t.Optional[t.Set[str]]) -> t.Set[str]
"""Recursively expand module_utils imports from module_utils files."""
display.info('module_utils import: %s%s' % (' ' * depth, import_name), verbosity=4)
if seen is None:
seen = set([import_name])
results = set([import_name])
# virtual packages depend on the modules they contain instead of the reverse
if import_name in VIRTUAL_PACKAGES:
for sub_import in sorted(virtual_utils):
if sub_import.startswith('%s.' % import_name):
if sub_import in seen:
continue
seen.add(sub_import)
matches = sorted(recurse_import(sub_import, depth + 1, seen))
for result in matches:
results.add(result)
import_path = get_import_path(import_name)
if import_path not in imports_by_target_path:
import_path = get_import_path(import_name, package=True)
if import_path not in imports_by_target_path:
raise ApplicationError('Cannot determine path for module_utils import: %s' % import_name)
# process imports in reverse so the deepest imports come first
for name in sorted(imports_by_target_path[import_path], reverse=True):
if name in virtual_utils:
continue
if name in seen:
continue
seen.add(name)
matches = sorted(recurse_import(name, depth + 1, seen))
for result in matches:
results.add(result)
return results
for module_util in module_utils:
# recurse over module_utils imports while excluding self
module_util_imports = recurse_import(module_util)
module_util_imports.remove(module_util)
# add recursive imports to all path entries which import this module_util
for target_path in imports_by_target_path:
if module_util in imports_by_target_path[target_path]:
for module_util_import in sorted(module_util_imports):
if module_util_import not in imports_by_target_path[target_path]:
display.info('%s inherits import %s via %s' % (target_path, module_util_import, module_util), verbosity=6)
imports_by_target_path[target_path].add(module_util_import)
imports = dict([(module_util, set()) for module_util in module_utils | virtual_utils])
for target_path in imports_by_target_path:
for module_util in imports_by_target_path[target_path]:
imports[module_util].add(target_path)
# for purposes of mapping module_utils to paths, treat imports of virtual utils the same as the parent package
for virtual_util in virtual_utils:
parent_package = '.'.join(virtual_util.split('.')[:-1])
imports[virtual_util] = imports[parent_package]
display.info('%s reports imports from parent package %s' % (virtual_util, parent_package), verbosity=6)
for module_util in sorted(imports):
if not imports[module_util]:
package_path = get_import_path(module_util, package=True)
if os.path.exists(package_path) and not os.path.getsize(package_path):
continue # ignore empty __init__.py files
display.warning('No imports found which use the "%s" module_util.' % module_util)
return imports
def get_python_module_utils_name(path): # type: (str) -> str
"""Return a namespace and name from the given module_utils path."""
base_path = data_context().content.module_utils_path
if data_context().content.collection:
prefix = 'ansible_collections.' + data_context().content.collection.prefix + 'plugins.module_utils'
else:
prefix = 'ansible.module_utils'
if path.endswith('/__init__.py'):
path = os.path.dirname(path)
if path == base_path:
name = prefix
else:
name = prefix + '.' + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.path.sep, '.')
return name
def enumerate_module_utils():
"""Return a list of available module_utils imports.
:rtype: set[str]
"""
module_utils = []
for path in data_context().content.walk_files(data_context().content.module_utils_path):
ext = os.path.splitext(path)[1]
if ext != '.py':
continue
module_utils.append(get_python_module_utils_name(path))
return set(module_utils)
def extract_python_module_utils_imports(path, module_utils):
"""Return a list of module_utils imports found in the specified source file.
:type path: str
:type module_utils: set[str]
:rtype: set[str]
"""
# Python code must be read as bytes to avoid a SyntaxError when the source uses comments to declare the file encoding.
# See: https://www.python.org/dev/peps/pep-0263
# Specifically: If a Unicode string with a coding declaration is passed to compile(), a SyntaxError will be raised.
code = read_binary_file(path)
try:
tree = ast.parse(code)
except SyntaxError as ex:
# Treat this error as a warning so tests can be executed as best as possible.
# The compile test will detect and report this syntax error.
display.warning('%s:%s Syntax error extracting module_utils imports: %s' % (path, ex.lineno, ex.msg))
return set()
finder = ModuleUtilFinder(path, module_utils)
finder.visit(tree)
return finder.imports
def get_import_path(name, package=False): # type: (str, bool) -> str
"""Return a path from an import name."""
if package:
filename = os.path.join(name.replace('.', '/'), '__init__.py')
else:
filename = '%s.py' % name.replace('.', '/')
if name.startswith('ansible.module_utils.') or name == 'ansible.module_utils':
path = os.path.join('lib', filename)
elif data_context().content.collection and (
name.startswith('ansible_collections.%s.plugins.module_utils.' % data_context().content.collection.full_name) or
name == 'ansible_collections.%s.plugins.module_utils' % data_context().content.collection.full_name):
path = '/'.join(filename.split('/')[3:])
else:
raise Exception('Unexpected import name: %s' % name)
return path
def path_to_module(path): # type: (str) -> str
"""Convert the given path to a module name."""
module = os.path.splitext(path)[0].replace(os.path.sep, '.')
if module.endswith('.__init__'):
module = module[:-9]
return module
def relative_to_absolute(name, level, module, path, lineno): # type: (str, int, str, str, int) -> str
"""Convert a relative import to an absolute import."""
if level <= 0:
absolute_name = name
elif not module:
display.warning('Cannot resolve relative import "%s%s" in unknown module at %s:%d' % ('.' * level, name, path, lineno))
absolute_name = 'relative.nomodule'
else:
parts = module.split('.')
if level >= len(parts):
display.warning('Cannot resolve relative import "%s%s" above module "%s" at %s:%d' % ('.' * level, name, module, path, lineno))
absolute_name = 'relative.abovelevel'
else:
absolute_name = '.'.join(parts[:-level] + [name])
return absolute_name
class ModuleUtilFinder(ast.NodeVisitor):
"""AST visitor to find valid module_utils imports."""
def __init__(self, path, module_utils):
"""Return a list of module_utils imports found in the specified source file.
:type path: str
:type module_utils: set[str]
"""
self.path = path
self.module_utils = module_utils
self.imports = set()
# implicitly import parent package
if path.endswith('/__init__.py'):
path = os.path.split(path)[0]
if path.startswith('lib/ansible/module_utils/'):
package = os.path.split(path)[0].replace('/', '.')[4:]
if package != 'ansible.module_utils' and package not in VIRTUAL_PACKAGES:
self.add_import(package, 0)
self.module = None
if data_context().content.is_ansible:
# Various parts of the Ansible source tree execute within diffent modules.
# To support import analysis, each file which uses relative imports must reside under a path defined here.
# The mapping is a tuple consisting of a path pattern to match and a replacement path.
# During analyis, any relative imports not covered here will result in warnings, which can be fixed by adding the appropriate entry.
path_map = (
('^hacking/build_library/build_ansible/', 'build_ansible/'),
('^lib/ansible/', 'ansible/'),
('^test/lib/ansible_test/_data/sanity/validate-modules/', 'validate_modules/'),
('^test/units/', 'test/units/'),
('^test/lib/ansible_test/_internal/', 'ansible_test/_internal/'),
('^test/integration/targets/.*/ansible_collections/(?P<ns>[^/]*)/(?P<col>[^/]*)/', r'ansible_collections/\g<ns>/\g<col>/'),
('^test/integration/targets/.*/library/', 'ansible/modules/'),
)
for pattern, replacement in path_map:
if re.search(pattern, self.path):
revised_path = re.sub(pattern, replacement, self.path)
self.module = path_to_module(revised_path)
break
else:
# This assumes that all files within the collection are executed by Ansible as part of the collection.
# While that will usually be true, there are exceptions which will result in this resolution being incorrect.
self.module = path_to_module(os.path.join(data_context().content.collection.directory, self.path))
# noinspection PyPep8Naming
# pylint: disable=locally-disabled, invalid-name
def visit_Import(self, node):
"""
:type node: ast.Import
"""
self.generic_visit(node)
# import ansible.module_utils.MODULE[.MODULE]
# import ansible_collections.{ns}.{col}.plugins.module_utils.module_utils.MODULE[.MODULE]
self.add_imports([alias.name for alias in node.names], node.lineno)
# noinspection PyPep8Naming
# pylint: disable=locally-disabled, invalid-name
def visit_ImportFrom(self, node):
"""
:type node: ast.ImportFrom
"""
self.generic_visit(node)
if not node.module:
return
module = relative_to_absolute(node.module, node.level, self.module, self.path, node.lineno)
if not module.startswith('ansible'):
return
# from ansible.module_utils import MODULE[, MODULE]
# from ansible.module_utils.MODULE[.MODULE] import MODULE[, MODULE]
# from ansible_collections.{ns}.{col}.plugins.module_utils import MODULE[, MODULE]
# from ansible_collections.{ns}.{col}.plugins.module_utils.MODULE[.MODULE] import MODULE[, MODULE]
self.add_imports(['%s.%s' % (module, alias.name) for alias in node.names], node.lineno)
def add_import(self, name, line_number):
"""
:type name: str
:type line_number: int
"""
import_name = name
while self.is_module_util_name(name):
if name in self.module_utils:
if name not in self.imports:
display.info('%s:%d imports module_utils: %s' % (self.path, line_number, name), verbosity=5)
self.imports.add(name)
return # duplicate imports are ignored
name = '.'.join(name.split('.')[:-1])
if is_subdir(self.path, data_context().content.test_path):
return # invalid imports in tests are ignored
# Treat this error as a warning so tests can be executed as best as possible.
# This error should be detected by unit or integration tests.
display.warning('%s:%d Invalid module_utils import: %s' % (self.path, line_number, import_name))
def add_imports(self, names, line_no): # type: (t.List[str], int) -> None
"""Add the given import names if they are module_utils imports."""
for name in names:
if self.is_module_util_name(name):
self.add_import(name, line_no)
@staticmethod
def is_module_util_name(name): # type: (str) -> bool
"""Return True if the given name is a module_util name for the content under test. External module_utils are ignored."""
if data_context().content.is_ansible and name.startswith('ansible.module_utils.'):
return True
if data_context().content.collection and name.startswith('ansible_collections.%s.plugins.module_utils.' % data_context().content.collection.full_name):
return True
return False
| [
"[email protected]"
] | |
6e3f3b1486dc70ef90cb88af554179fd8f6dc4d5 | 59090da2fe4e848b986c704b1ecf06ebe2d730b1 | /conferences/items.py | 459b144b8d8631dcf42753824838e783ac88a031 | [
"MIT"
] | permissive | manuphatak/conferences | 75449d2b16d546d4c66e9363369331239c74c9bd | 67e8880fe7049c003650d83e090b95cc09b45da5 | refs/heads/master | 2021-06-05T04:03:18.071859 | 2015-12-25T01:13:18 | 2015-12-25T01:13:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ConferencesItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| [
"[email protected]"
] | |
cd7c58f94edaf9c24fad73a64af8e44d5887e94d | 90312ba1088363f12408b9869d89e31d6ad658e5 | /mifare_classic/src/python/mifare_classic.py | c96368023c6e19ad3e9a43933d96a426cec021f3 | [
"ISC"
] | permissive | Tosyk/formats-kaitai-io.github.io | c3e9d0df4deae557f5ac4d36290c7052be4c16bb | 1faec646734b93815d39bc638ead4bc9a37eca3e | refs/heads/master | 2023-07-29T07:22:10.818349 | 2021-09-12T11:41:45 | 2021-09-12T11:41:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,148 | py | # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class MifareClassic(KaitaiStruct):
"""You can get a dump for testing by the link: https://github.com/zhovner/mfdread/raw/master/dump.mfd
.. seealso::
Source - https://github.com/nfc-tools/libnfc
https://www.nxp.com/docs/en/data-sheet/MF1S70YYX_V1.pdf
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self._raw_sectors = []
self.sectors = []
i = 0
while not self._io.is_eof():
self._raw_sectors.append(self._io.read_bytes((((4 if i >= 32 else 1) * 4) * 16)))
_io__raw_sectors = KaitaiStream(BytesIO(self._raw_sectors[-1]))
self.sectors.append(MifareClassic.Sector(i == 0, _io__raw_sectors, self, self._root))
i += 1
class Key(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.key = self._io.read_bytes(6)
class Sector(KaitaiStruct):
def __init__(self, has_manufacturer, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.has_manufacturer = has_manufacturer
self._read()
def _read(self):
if self.has_manufacturer:
self.manufacturer = MifareClassic.Manufacturer(self._io, self, self._root)
self._raw_data_filler = self._io.read_bytes(((self._io.size() - self._io.pos()) - 16))
_io__raw_data_filler = KaitaiStream(BytesIO(self._raw_data_filler))
self.data_filler = MifareClassic.Sector.Filler(_io__raw_data_filler, self, self._root)
self.trailer = MifareClassic.Trailer(self._io, self, self._root)
class Values(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.values = []
i = 0
while not self._io.is_eof():
self.values.append(MifareClassic.Sector.Values.ValueBlock(self._io, self, self._root))
i += 1
class ValueBlock(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.valuez = [None] * (3)
for i in range(3):
self.valuez[i] = self._io.read_u4le()
self.addrz = [None] * (4)
for i in range(4):
self.addrz[i] = self._io.read_u1()
@property
def addr(self):
if hasattr(self, '_m_addr'):
return self._m_addr if hasattr(self, '_m_addr') else None
if self.valid:
self._m_addr = self.addrz[0]
return self._m_addr if hasattr(self, '_m_addr') else None
@property
def addr_valid(self):
if hasattr(self, '_m_addr_valid'):
return self._m_addr_valid if hasattr(self, '_m_addr_valid') else None
self._m_addr_valid = ((self.addrz[0] == ~(self.addrz[1])) and (self.addrz[0] == self.addrz[2]) and (self.addrz[1] == self.addrz[3]))
return self._m_addr_valid if hasattr(self, '_m_addr_valid') else None
@property
def valid(self):
if hasattr(self, '_m_valid'):
return self._m_valid if hasattr(self, '_m_valid') else None
self._m_valid = ((self.value_valid) and (self.addr_valid))
return self._m_valid if hasattr(self, '_m_valid') else None
@property
def value_valid(self):
if hasattr(self, '_m_value_valid'):
return self._m_value_valid if hasattr(self, '_m_value_valid') else None
self._m_value_valid = ((self.valuez[0] == ~(self.valuez[1])) and (self.valuez[0] == self.valuez[2]))
return self._m_value_valid if hasattr(self, '_m_value_valid') else None
@property
def value(self):
if hasattr(self, '_m_value'):
return self._m_value if hasattr(self, '_m_value') else None
if self.valid:
self._m_value = self.valuez[0]
return self._m_value if hasattr(self, '_m_value') else None
class Filler(KaitaiStruct):
"""only to create _io."""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.data = self._io.read_bytes(self._io.size())
@property
def block_size(self):
if hasattr(self, '_m_block_size'):
return self._m_block_size if hasattr(self, '_m_block_size') else None
self._m_block_size = 16
return self._m_block_size if hasattr(self, '_m_block_size') else None
@property
def data(self):
if hasattr(self, '_m_data'):
return self._m_data if hasattr(self, '_m_data') else None
self._m_data = self.data_filler.data
return self._m_data if hasattr(self, '_m_data') else None
@property
def blocks(self):
if hasattr(self, '_m_blocks'):
return self._m_blocks if hasattr(self, '_m_blocks') else None
io = self.data_filler._io
_pos = io.pos()
io.seek(0)
self._m_blocks = []
i = 0
while not io.is_eof():
self._m_blocks.append(io.read_bytes(self.block_size))
i += 1
io.seek(_pos)
return self._m_blocks if hasattr(self, '_m_blocks') else None
@property
def values(self):
if hasattr(self, '_m_values'):
return self._m_values if hasattr(self, '_m_values') else None
io = self.data_filler._io
_pos = io.pos()
io.seek(0)
self._m_values = MifareClassic.Sector.Values(io, self, self._root)
io.seek(_pos)
return self._m_values if hasattr(self, '_m_values') else None
class Manufacturer(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.nuid = self._io.read_u4le()
self.bcc = self._io.read_u1()
self.sak = self._io.read_u1()
self.atqa = self._io.read_u2le()
self.manufacturer = self._io.read_bytes(8)
class Trailer(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.key_a = MifareClassic.Key(self._io, self, self._root)
self._raw_access_bits = self._io.read_bytes(3)
_io__raw_access_bits = KaitaiStream(BytesIO(self._raw_access_bits))
self.access_bits = MifareClassic.Trailer.AccessConditions(_io__raw_access_bits, self, self._root)
self.user_byte = self._io.read_u1()
self.key_b = MifareClassic.Key(self._io, self, self._root)
class AccessConditions(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.raw_chunks = [None] * (self._parent.ac_count_of_chunks)
for i in range(self._parent.ac_count_of_chunks):
self.raw_chunks[i] = self._io.read_bits_int_be(4)
class TrailerAc(KaitaiStruct):
def __init__(self, ac, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.ac = ac
self._read()
def _read(self):
pass
@property
def can_read_key_b(self):
"""key A is required."""
if hasattr(self, '_m_can_read_key_b'):
return self._m_can_read_key_b if hasattr(self, '_m_can_read_key_b') else None
self._m_can_read_key_b = self.ac.inv_shift_val <= 2
return self._m_can_read_key_b if hasattr(self, '_m_can_read_key_b') else None
@property
def can_write_keys(self):
if hasattr(self, '_m_can_write_keys'):
return self._m_can_write_keys if hasattr(self, '_m_can_write_keys') else None
self._m_can_write_keys = ((((self.ac.inv_shift_val + 1) % 3) != 0) and (self.ac.inv_shift_val < 6))
return self._m_can_write_keys if hasattr(self, '_m_can_write_keys') else None
@property
def can_write_access_bits(self):
if hasattr(self, '_m_can_write_access_bits'):
return self._m_can_write_access_bits if hasattr(self, '_m_can_write_access_bits') else None
self._m_can_write_access_bits = self.ac.bits[2].b
return self._m_can_write_access_bits if hasattr(self, '_m_can_write_access_bits') else None
@property
def key_b_controls_write(self):
if hasattr(self, '_m_key_b_controls_write'):
return self._m_key_b_controls_write if hasattr(self, '_m_key_b_controls_write') else None
self._m_key_b_controls_write = not (self.can_read_key_b)
return self._m_key_b_controls_write if hasattr(self, '_m_key_b_controls_write') else None
class ChunkBitRemap(KaitaiStruct):
def __init__(self, bit_no, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.bit_no = bit_no
self._read()
def _read(self):
pass
@property
def shift_value(self):
if hasattr(self, '_m_shift_value'):
return self._m_shift_value if hasattr(self, '_m_shift_value') else None
self._m_shift_value = (-1 if self.bit_no == 1 else 1)
return self._m_shift_value if hasattr(self, '_m_shift_value') else None
@property
def chunk_no(self):
if hasattr(self, '_m_chunk_no'):
return self._m_chunk_no if hasattr(self, '_m_chunk_no') else None
self._m_chunk_no = (((self.inv_chunk_no + self.shift_value) + self._parent._parent.ac_count_of_chunks) % self._parent._parent.ac_count_of_chunks)
return self._m_chunk_no if hasattr(self, '_m_chunk_no') else None
@property
def inv_chunk_no(self):
if hasattr(self, '_m_inv_chunk_no'):
return self._m_inv_chunk_no if hasattr(self, '_m_inv_chunk_no') else None
self._m_inv_chunk_no = (self.bit_no + self.shift_value)
return self._m_inv_chunk_no if hasattr(self, '_m_inv_chunk_no') else None
class DataAc(KaitaiStruct):
def __init__(self, ac, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.ac = ac
self._read()
def _read(self):
pass
@property
def read_key_a_required(self):
if hasattr(self, '_m_read_key_a_required'):
return self._m_read_key_a_required if hasattr(self, '_m_read_key_a_required') else None
self._m_read_key_a_required = self.ac.val <= 4
return self._m_read_key_a_required if hasattr(self, '_m_read_key_a_required') else None
@property
def write_key_b_required(self):
if hasattr(self, '_m_write_key_b_required'):
return self._m_write_key_b_required if hasattr(self, '_m_write_key_b_required') else None
self._m_write_key_b_required = (( ((not (self.read_key_a_required)) or (self.read_key_b_required)) ) and (not (self.ac.bits[0].b)))
return self._m_write_key_b_required if hasattr(self, '_m_write_key_b_required') else None
@property
def write_key_a_required(self):
if hasattr(self, '_m_write_key_a_required'):
return self._m_write_key_a_required if hasattr(self, '_m_write_key_a_required') else None
self._m_write_key_a_required = self.ac.val == 0
return self._m_write_key_a_required if hasattr(self, '_m_write_key_a_required') else None
@property
def read_key_b_required(self):
if hasattr(self, '_m_read_key_b_required'):
return self._m_read_key_b_required if hasattr(self, '_m_read_key_b_required') else None
self._m_read_key_b_required = self.ac.val <= 6
return self._m_read_key_b_required if hasattr(self, '_m_read_key_b_required') else None
@property
def decrement_available(self):
if hasattr(self, '_m_decrement_available'):
return self._m_decrement_available if hasattr(self, '_m_decrement_available') else None
self._m_decrement_available = (( ((self.ac.bits[1].b) or (not (self.ac.bits[0].b))) ) and (not (self.ac.bits[2].b)))
return self._m_decrement_available if hasattr(self, '_m_decrement_available') else None
@property
def increment_available(self):
if hasattr(self, '_m_increment_available'):
return self._m_increment_available if hasattr(self, '_m_increment_available') else None
self._m_increment_available = (( ((not (self.ac.bits[0].b)) and (not (self.read_key_a_required)) and (not (self.read_key_b_required))) ) or ( ((not (self.ac.bits[0].b)) and (self.read_key_a_required) and (self.read_key_b_required)) ))
return self._m_increment_available if hasattr(self, '_m_increment_available') else None
class Ac(KaitaiStruct):
def __init__(self, index, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.index = index
self._read()
def _read(self):
pass
class AcBit(KaitaiStruct):
def __init__(self, i, chunk, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.i = i
self.chunk = chunk
self._read()
def _read(self):
pass
@property
def n(self):
if hasattr(self, '_m_n'):
return self._m_n if hasattr(self, '_m_n') else None
self._m_n = ((self.chunk >> self.i) & 1)
return self._m_n if hasattr(self, '_m_n') else None
@property
def b(self):
if hasattr(self, '_m_b'):
return self._m_b if hasattr(self, '_m_b') else None
self._m_b = self.n == 1
return self._m_b if hasattr(self, '_m_b') else None
@property
def bits(self):
if hasattr(self, '_m_bits'):
return self._m_bits if hasattr(self, '_m_bits') else None
_pos = self._io.pos()
self._io.seek(0)
self._m_bits = [None] * (self._parent._parent.ac_bits)
for i in range(self._parent._parent.ac_bits):
self._m_bits[i] = MifareClassic.Trailer.AccessConditions.Ac.AcBit(self.index, self._parent.chunks[i].chunk, self._io, self, self._root)
self._io.seek(_pos)
return self._m_bits if hasattr(self, '_m_bits') else None
@property
def val(self):
"""c3 c2 c1."""
if hasattr(self, '_m_val'):
return self._m_val if hasattr(self, '_m_val') else None
self._m_val = (((self.bits[2].n << 2) | (self.bits[1].n << 1)) | self.bits[0].n)
return self._m_val if hasattr(self, '_m_val') else None
@property
def inv_shift_val(self):
if hasattr(self, '_m_inv_shift_val'):
return self._m_inv_shift_val if hasattr(self, '_m_inv_shift_val') else None
self._m_inv_shift_val = (((self.bits[0].n << 2) | (self.bits[1].n << 1)) | self.bits[2].n)
return self._m_inv_shift_val if hasattr(self, '_m_inv_shift_val') else None
class ValidChunk(KaitaiStruct):
def __init__(self, inv_chunk, chunk, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.inv_chunk = inv_chunk
self.chunk = chunk
self._read()
def _read(self):
pass
@property
def valid(self):
if hasattr(self, '_m_valid'):
return self._m_valid if hasattr(self, '_m_valid') else None
self._m_valid = (self.inv_chunk ^ self.chunk) == 15
return self._m_valid if hasattr(self, '_m_valid') else None
@property
def data_acs(self):
if hasattr(self, '_m_data_acs'):
return self._m_data_acs if hasattr(self, '_m_data_acs') else None
_pos = self._io.pos()
self._io.seek(0)
self._m_data_acs = [None] * ((self._parent.acs_in_sector - 1))
for i in range((self._parent.acs_in_sector - 1)):
self._m_data_acs[i] = MifareClassic.Trailer.AccessConditions.DataAc(self.acs_raw[i], self._io, self, self._root)
self._io.seek(_pos)
return self._m_data_acs if hasattr(self, '_m_data_acs') else None
@property
def remaps(self):
if hasattr(self, '_m_remaps'):
return self._m_remaps if hasattr(self, '_m_remaps') else None
_pos = self._io.pos()
self._io.seek(0)
self._m_remaps = [None] * (self._parent.ac_bits)
for i in range(self._parent.ac_bits):
self._m_remaps[i] = MifareClassic.Trailer.AccessConditions.ChunkBitRemap(i, self._io, self, self._root)
self._io.seek(_pos)
return self._m_remaps if hasattr(self, '_m_remaps') else None
@property
def acs_raw(self):
if hasattr(self, '_m_acs_raw'):
return self._m_acs_raw if hasattr(self, '_m_acs_raw') else None
_pos = self._io.pos()
self._io.seek(0)
self._m_acs_raw = [None] * (self._parent.acs_in_sector)
for i in range(self._parent.acs_in_sector):
self._m_acs_raw[i] = MifareClassic.Trailer.AccessConditions.Ac(i, self._io, self, self._root)
self._io.seek(_pos)
return self._m_acs_raw if hasattr(self, '_m_acs_raw') else None
@property
def trailer_ac(self):
if hasattr(self, '_m_trailer_ac'):
return self._m_trailer_ac if hasattr(self, '_m_trailer_ac') else None
_pos = self._io.pos()
self._io.seek(0)
self._m_trailer_ac = MifareClassic.Trailer.AccessConditions.TrailerAc(self.acs_raw[(self._parent.acs_in_sector - 1)], self._io, self, self._root)
self._io.seek(_pos)
return self._m_trailer_ac if hasattr(self, '_m_trailer_ac') else None
@property
def chunks(self):
if hasattr(self, '_m_chunks'):
return self._m_chunks if hasattr(self, '_m_chunks') else None
_pos = self._io.pos()
self._io.seek(0)
self._m_chunks = [None] * (self._parent.ac_bits)
for i in range(self._parent.ac_bits):
self._m_chunks[i] = MifareClassic.Trailer.AccessConditions.ValidChunk(self.raw_chunks[self.remaps[i].inv_chunk_no], self.raw_chunks[self.remaps[i].chunk_no], self._io, self, self._root)
self._io.seek(_pos)
return self._m_chunks if hasattr(self, '_m_chunks') else None
@property
def ac_bits(self):
if hasattr(self, '_m_ac_bits'):
return self._m_ac_bits if hasattr(self, '_m_ac_bits') else None
self._m_ac_bits = 3
return self._m_ac_bits if hasattr(self, '_m_ac_bits') else None
@property
def acs_in_sector(self):
if hasattr(self, '_m_acs_in_sector'):
return self._m_acs_in_sector if hasattr(self, '_m_acs_in_sector') else None
self._m_acs_in_sector = 4
return self._m_acs_in_sector if hasattr(self, '_m_acs_in_sector') else None
@property
def ac_count_of_chunks(self):
if hasattr(self, '_m_ac_count_of_chunks'):
return self._m_ac_count_of_chunks if hasattr(self, '_m_ac_count_of_chunks') else None
self._m_ac_count_of_chunks = (self.ac_bits * 2)
return self._m_ac_count_of_chunks if hasattr(self, '_m_ac_count_of_chunks') else None
| [
"[email protected]"
] | |
b2801badf5bd6284bd289b522b327b3edbb347b5 | 6131b2738a7c087dfa6907c624453576f6f0e393 | /银行转账pmysql版本/Bank_Transfer.py | 34c606ac76f8f664750def22983d82d05855ec09 | [] | no_license | heheddff/myPythonProcess | 60ef240130cd02906dc500eedb397a9662c02e5a | 885a25dd2a9cd43801306d9e70b9ce89daec4406 | refs/heads/master | 2020-04-08T19:09:18.192738 | 2019-08-06T02:52:54 | 2019-08-06T02:52:54 | 159,642,468 | 4 | 5 | null | null | null | null | GB18030 | Python | false | false | 2,049 | py | # coding=gbk
import pymysql
class Money():
def __init__(self,sid,tid,mon):
self.conn = pymysql.connect(
host="127.0.0.1",
port=3306,
user='root',
passwd='****',
db='test'
)
self.cursor = self.conn.cursor();
self.table = "money"
self.sid = sid
self.tid = tid
self.mon = mon
def checkuser(self,userid):
try:
sql = "select userid from "+self.table+" where userid=%s"
self.cursor.execute(sql,(userid,))
res = self.cursor.fetchone()
if res is None:
raise Exception("账号{}不存在".format(userid))
finally:
pass
#self.cursor.close()
#self.conn.close()
def reducemoney(self,userid,money):
try:
sql = "update "+self.table+" set money=money-%s where userid=%s"
self.cursor.execute(sql,(money,userid))
if self.cursor.rowcount != 1:
raise Exception("账号{}转账失败".format(userid))
finally:
pass
#self.cursor.close()
#self.conn.close()
def addmoney(self,userid,money):
try:
sql = "update "+self.table+" set money=money+%s where userid=%s"
self.cursor.execute(sql,(money,userid,))
if self.cursor.rowcount != 1:
raise Exception("账号{}收账失败".format(userid))
finally:
pass
#self.cursor.close()
#self.conn.close()
def checkmoney(self,userid,money):
try:
sql = "select userid from "+self.table+" where userid=%s and money>%s"
self.cursor.execute(sql,(userid,money))
res = self.cursor.fetchone()
if res is None:
raise Exception("账号{}余额小于{}".format(userid,money))
finally:
pass
#self.cursor.close()
#self.conn.close()
def run(self):
try:
self.checkuser(self.sid)
self.checkuser(self.tid)
self.checkmoney(self.sid,self.mon)
self.reducemoney(self.sid,self.mon)
self.addmoney(self.tid,self.mon)
self.conn.commit()
except Exception as e:
self.conn.rollback()
raise e
finally:
#pass
self.cursor.close()
self.conn.close()
try:
m = Money(11,13,100)
m.run()
except Exception as e:
#pass
print(e)
else:
print("转账成功")
| [
"[email protected]"
] | |
2e89a9bd74c09e3531412e11b310be4b94ae18d1 | 2a39fe8bd203531c9bcdb470d19b80beac665eae | /model_cluster.py | 288cf9409b0694d16f6334c5ea877ffeafd2e726 | [] | no_license | davidharvey1986/lenstoolTools | 7bf11af1a38700503a731c6fe7e83fdc92bf58c1 | 85bcf729603d34341f5f41c57c4e233b08055baa | refs/heads/master | 2021-09-08T14:29:52.695461 | 2018-03-10T13:54:50 | 2018-03-10T13:54:50 | 124,657,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,715 | py | '''
This script has 2 functions:
1. model_cluster( ra, dec, cluster, \
halos=None, \
best_file=None)
This models the input cluster and returns a structure
from simulate_project with shear, chi, etc.
'''
import numpy as np
import ipdb as pdb
import astro_tools as at
import idlsave as idlsave
import lensing as l
import copy as copy
import glob as glob
import os
def model_cluster( ra, dec, cluster, \
halos=None, \
best_file=None):
'''
Model the NFW signal of the cluster using the
input from halos
'''
if best_file is None:
dataDir = '/Users/DavidHarvey/Documents/Work/Trails/data/rerun/'+cluster
best_file = dataDir+'/best.par'
runmode, potentials = l.lenstool.read_best( filename=best_file)
space = l.simulations.templates.space()
space.lens[0].del_profile('isothermal')
space.source[0].ell_disp = 0.
space.source[0].ra = ra
space.source[0].dec = dec
space.telescope.nGalaxies = len(dec)
space.lens[0].redshift = potentials[0]['z_lens']['float']
space.source[0].redshift = 1.0
space.lens[0].ra = potentials[0]['ra']['float']
space.lens[0].dec = potentials[0]['dec']['float']
if halos is not None:
space.lens[0].ra = halos['halos'][0]['gal']['ra'][0]
space.lens[0].dec = halos['halos'][0]['gal']['dec'][0]
space.lens[0].profiles['nfw'].args['mass'] = \
potentials[0]['m200']['str'].astype(np.double)
space.lens[0].profiles['nfw'].args['conc'] = \
potentials[0]['concentration']['float']
space.lens[0].profiles['nfw'].args['ellipticity'] = \
potentials[0]['ellipticite']['float']
space.lens[0].profiles['nfw'].args['potential_angle'] = \
potentials[0]['angle_pos']['float']
scale_radius = l.profiles.nfw.scale_radius(space.lens[0].profiles['nfw'].args['mass'], \
space.lens[0].profiles['nfw'].args['conc'],\
potentials[0]['z_lens']['float'])
space.lens[0].profiles['nfw'].args['scale_radius'] = scale_radius
for iHalo in range(1,len(potentials)):
space.add_lens()
space.lens[iHalo].redshift = potentials[0]['z_lens']['float']
space.source[iHalo].redshift = 1.0
space.lens[iHalo].ra = potentials[iHalo]['ra']['float']
space.lens[iHalo].dec = potentials[iHalo]['dec']['float']
if halos is not None:
space.lens[iHalo].ra = halos['halos'][iHalo]['gal']['ra'][0]
space.lens[iHalo].dec = halos['halos'][iHalo]['gal']['dec'][0]
space.lens[iHalo].profiles['nfw'].args['mass'] = \
potentials[iHalo]['m200']['str'].astype(np.double)
space.lens[iHalo].profiles['nfw'].args['conc'] = \
potentials[iHalo]['concentration']['float']
space.lens[iHalo].profiles['nfw'].args['ellipticity'] = \
potentials[iHalo]['ellipticite']['float']
space.lens[iHalo].profiles['nfw'].args['potential_angle'] = \
potentials[iHalo]['angle_pos']['float']
scale_radius = l.profiles.nfw.scale_radius(space.lens[iHalo].profiles['nfw'].args['mass'], \
space.lens[iHalo].profiles['nfw'].args['conc'],\
potentials[iHalo]['z_lens']['float'])
space.lens[iHalo].profiles['nfw'].args['scale_radius'] = scale_radius
space.reload(positions=False)
space.weak_lensing()
return space
| [
"[email protected]"
] | |
37dcfb2bd2200cc648ab737e317d319edfd9d269 | b87f66b13293782321e20c39aebc05defd8d4b48 | /maps/build/EnthoughtBase/enthought/logger/agent/attachments.py | 4d8f00f577f6d82f2c2ec0b1a7f4b5a14dd94aef | [] | no_license | m-elhussieny/code | 5eae020932d935e4d724c2f3d16126a0d42ebf04 | 5466f5858dbd2f1f082fa0d7417b57c8fb068fad | refs/heads/master | 2021-06-13T18:47:08.700053 | 2016-11-01T05:51:06 | 2016-11-01T05:51:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,835 | py | """ Attach relevant project files.
FIXME: there are no public project plugins for Envisage 3, yet. In any case,
this stuff should not be hard-coded, but extensible via extension points. The
code remains here because we can reuse the zip utility code in that extensible
rewrite.
"""
import logging
import os.path
from email import Encoders
from email.MIMEBase import MIMEBase
from enthought.traits.api import Any, HasTraits
logger = logging.getLogger(__name__)
class Attachments(HasTraits):
application = Any()
message = Any()
def __init__(self, message, **traits):
traits = traits.copy()
traits['message'] = message
super(Attachments, self).__init__(**traits)
# FIXME: all of the package_*() methods refer to deprecated project plugins.
def package_workspace(self):
if self.application is None:
pass
workspace = self.application.get_service('enthought.envisage.project.IWorkspace')
if workspace is not None:
dir = workspace.path
self._attach_directory(dir)
return
def package_single_project(self):
if self.application is None:
pass
single_project = self.application.get_service('enthought.envisage.single_project.ModelService')
if single_project is not None:
dir = single_project.location
self._attach_directory(dir)
def package_any_relevant_files(self):
self.package_workspace()
self.package_single_project()
return
def _attach_directory(self, dir):
relpath = os.path.basename(dir)
import zipfile
from cStringIO import StringIO
ctype = 'application/octet-stream'
maintype, subtype = ctype.split('/', 1)
msg = MIMEBase(maintype, subtype)
file_object = StringIO()
zip = zipfile.ZipFile(file_object, 'w')
_append_to_zip_archive(zip, dir, relpath)
zip.close()
msg.set_payload(file_object.getvalue())
Encoders.encode_base64(msg) # Encode the payload using Base64
msg.add_header('Content-Disposition', 'attachment', filename='project.zip')
self.message.attach(msg)
file_object.close()
def _append_to_zip_archive(zip, dir, relpath):
""" Add all files in and below directory dir into zip archive"""
for filename in os.listdir(dir):
path = os.path.join(dir, filename)
if os.path.isfile(path):
name = os.path.join(relpath, filename)
zip.write(path, name)
logger.debug('adding %s to error report' % path)
else:
if filename != ".svn": # skip svn files if any
subdir = os.path.join(dir, filename)
_append_to_zip_archive(zip, subdir, os.path.join(relpath, filename))
return
| [
"[email protected]"
] | |
a4becb7bb74d1bc89c50607b9bb58cfd03ce77ee | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/littlecodersh_EasierLife/EasierLife-master/Programs/PCMusicViaWechat/run.py | 711a41774b068b3a371af0624be25cf996578762 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 785 | py | #coding=utf8
import os
import itchat
from NetEaseMusicApi import interact_select_song
HELP_MSG = u'''\
欢迎使用微信网易云音乐
帮助: 显示帮助
关闭: 关闭歌曲
歌名: 按照引导播放音乐\
'''
with open('stop.mp3', 'w') as f: pass
def close_music():
os.startfile('stop.mp3')
@itchat.msg_register(itchat.content.TEXT)
def music_player(msg):
if msg['ToUserName'] != 'filehelper': return
if msg['Text'] == u'关闭':
close_music()
itchat.send(u'音乐已关闭', 'filehelper')
if msg['Text'] == u'帮助':
itchat.send(HELP_MSG, 'filehelper')
else:
itchat.send(interact_select_song(msg['Text']), 'filehelper')
itchat.auto_login(True, enableCmdQR=True)
itchat.send(HELP_MSG, 'filehelper')
itchat.run()
| [
"[email protected]"
] | |
9b1fbe5600b7322f49bc1c05a6ab3c1035972d4d | e288dd6e3edbe2e77a970719ea943a98c629d2e2 | /stackdio/api/environments/tasks.py | e774089765937867e957f17e4651402aec083649 | [
"Apache-2.0"
] | permissive | Harrison-Miller/stackdio | b308db4cfd68e9900e08867726b779e334f2334a | b12975737a0962c748b3da375609f7db9c8842f3 | refs/heads/master | 2021-01-25T07:49:08.369918 | 2017-05-14T04:31:09 | 2017-05-14T04:31:09 | 93,666,368 | 0 | 0 | null | 2017-06-07T18:23:56 | 2017-06-07T18:23:56 | null | UTF-8 | Python | false | false | 11,435 | py | # -*- coding: utf-8 -*-
# Copyright 2017, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
import logging
from functools import wraps
import salt.client
import six
from celery import shared_task
from django.conf import settings
from stackdio.api.environments import utils
from stackdio.api.environments.exceptions import EnvironmentTaskException
from stackdio.api.environments.models import Environment
from stackdio.core.constants import Activity, ComponentStatus
from stackdio.core.utils import auto_retry
from stackdio.salt.utils.client import (
StackdioLocalClient,
StackdioRunnerClient,
StackdioSaltClientException,
)
logger = logging.getLogger(__name__)
def environment_task(*args, **kwargs):
"""
Create an environment celery task that performs some common functionality and handles errors
"""
final_task = kwargs.pop('final_task', False)
def wrapped(func):
# Pass the args from environment_task to shared_task
@shared_task(*args, **kwargs)
@wraps(func)
def task(environment_name, *task_args, **task_kwargs):
try:
environment = Environment.objects.get(name=environment_name)
except Environment.DoesNotExist:
raise ValueError('No environment found with name {}'.format(environment_name))
try:
# Call our actual task function and catch some common errors
func(environment, *task_args, **task_kwargs)
if not final_task:
# Everything went OK, set back to queued
environment.activity = Activity.QUEUED
environment.save()
except EnvironmentTaskException as e:
environment.activity = Activity.IDLE
environment.save()
logger.exception(e)
raise
except Exception as e:
environment.activity = Activity.IDLE
environment.save()
logger.exception(e)
raise
return task
return wrapped
@environment_task(name='environments.sync_all')
def sync_all(environment):
logger.info('Syncing all salt systems for environment: {0!r}'.format(environment))
client = salt.client.LocalClient(settings.STACKDIO_CONFIG.salt_master_config)
ret = client.cmd_iter('env:environments.{}'.format(environment.name),
'saltutil.sync_all',
expr_form='grain')
result = {}
for res in ret:
for host, data in res.items():
result[host] = data
for host, data in result.items():
if 'retcode' not in data:
logger.warning('Host {0} missing a retcode... assuming failure'.format(host))
if data.get('retcode', 1) != 0:
err_msg = six.text_type(data['ret'])
raise EnvironmentTaskException('Error syncing salt data: {0!r}'.format(err_msg))
@environment_task(name='environments.highstate')
def highstate(environment, max_attempts=3):
"""
Executes the state.highstate function on the environment using the default
stackdio top file. That top tile will only target the 'base'
environment and core states for the environment. These core states are
purposely separate from others to provision hosts with things that
stackdio needs.
"""
environment.activity = Activity.PROVISIONING
environment.save()
logger.info('Running core provisioning for environment: {0!r}'.format(environment))
# Set up logging for this task
root_dir = environment.get_root_directory()
log_dir = environment.get_log_directory()
# Build up our highstate function
@auto_retry('highstate', max_attempts, EnvironmentTaskException)
def do_highstate(attempt=None):
logger.info('Task {0} try #{1} for environment {2!r}'.format(
highstate.name,
attempt,
environment))
# Use our fancy context manager that handles logging for us
with StackdioLocalClient(run_type='provisioning',
root_dir=root_dir,
log_dir=log_dir) as client:
results = client.run('env:environments.{}'.format(environment.name),
'state.highstate',
expr_form='grain')
if results['failed']:
raise EnvironmentTaskException(
'Core provisioning errors on hosts: '
'{0}. Please see the provisioning errors API '
'or the log file for more details.'.format(', '.join(results['failed_hosts']))
)
# Call our highstate. Will raise the appropriate exception if it fails.
do_highstate()
@environment_task(name='environments.propagate_ssh')
def propagate_ssh(environment, max_attempts=3):
"""
Similar to environments.highstate, except we only run `core.stackdio_users`
instead of `core.*`. This is useful so that ssh keys can be added to
hosts without having to completely re run provisioning.
"""
environment.activity = Activity.PROVISIONING
environment.save()
logger.info('Propagating ssh keys on environment: {0!r}'.format(environment))
# Set up logging for this task
root_dir = environment.get_root_directory()
log_dir = environment.get_log_directory()
@auto_retry('propagate_ssh', max_attempts, EnvironmentTaskException)
def do_propagate_ssh(attempt=None):
logger.info('Task {0} try #{1} for environment {2!r}'.format(
propagate_ssh.name,
attempt,
environment))
# Use our fancy context manager that handles logging for us
with StackdioLocalClient(run_type='propagate-ssh',
root_dir=root_dir,
log_dir=log_dir) as client:
results = client.run('env:environments.{}'.format(environment.name),
'state.sls',
arg=['core.stackdio_users'],
expr_form='grain')
if results['failed']:
raise EnvironmentTaskException(
'SSH key propagation errors on hosts: '
'{0}. Please see the provisioning errors API '
'or the log file for more details.'.format(', '.join(results['failed_hosts']))
)
# Call our function
do_propagate_ssh()
@environment_task(name='environments.orchestrate')
def orchestrate(environment, max_attempts=3):
"""
Executes the runners.state.orchestrate function with the
orchestrate sls specified on the environment.
"""
environment.activity = Activity.ORCHESTRATING
environment.save()
logger.info('Executing orchestration for environment: {0!r}'.format(environment))
# Set up logging for this task
root_dir = environment.get_root_directory()
log_dir = environment.get_log_directory()
@auto_retry('orchestrate', max_attempts, EnvironmentTaskException)
def do_orchestrate(attempt=None):
logger.info('Task {0} try #{1} for environment {2!r}'.format(
orchestrate.name,
attempt,
environment))
with StackdioRunnerClient(run_type='orchestration',
root_dir=root_dir,
log_dir=log_dir) as client:
try:
result = client.orchestrate(arg=[
environment.orchestrate_sls_path,
'environments.{0}'.format(environment.name),
])
except StackdioSaltClientException as e:
raise EnvironmentTaskException('Orchestration failed: {}'.format(six.text_type(e)))
# Set the statuses
utils.set_component_statuses(environment, result)
if result['failed']:
err_msg = 'Orchestration errors on components: ' \
'{0}. Please see the orchestration errors ' \
'API or the orchestration log file for more ' \
'details.'.format(', '.join(result['failed_sls']))
raise EnvironmentTaskException(err_msg)
# Call our function
do_orchestrate()
@environment_task(name='environments.single_sls')
def single_sls(environment, component, host_target, max_attempts=3):
environment.activity = Activity.ORCHESTRATING
environment.save()
logger.info('Executing single sls {0} for environment: {1!r}'.format(component, environment))
# Set up logging for this task
root_dir = environment.get_root_directory()
log_dir = environment.get_log_directory()
if host_target:
target = '{0} and G@env:environments.{1}'.format(host_target, environment.name)
expr_form = 'compound'
else:
target = 'env:environments.{0}'.format(environment.name)
expr_form = 'grain'
@auto_retry('single_sls', max_attempts, EnvironmentTaskException)
def do_single_sls(attempt=None):
logger.info('Task {0} try #{1} for environment {2!r}'.format(
single_sls.name,
attempt,
environment,
))
with StackdioLocalClient(run_type='single-sls',
root_dir=root_dir,
log_dir=log_dir) as client:
results = client.run(
target,
'state.sls',
arg=[
component,
'environments.{0}'.format(environment.name),
],
expr_form=expr_form,
)
if results['failed']:
raise EnvironmentTaskException(
'Single SLS {} errors on hosts: '
'{}. Please see the provisioning errors API '
'or the log file for more details.'.format(
component,
', '.join(results['failed_hosts']),
)
)
if results['succeeded_hosts']:
environment.set_component_status(component, ComponentStatus.SUCCEEDED,
results['succeeded_hosts'])
if results['failed_hosts']:
environment.set_component_status(component, ComponentStatus.FAILED,
results['failed_hosts'])
# Call our function
do_single_sls()
@environment_task(name='environments.finish_environment', final_task=True)
def finish_environment(environment):
logger.info('Finishing environment: {0!r}'.format(environment))
# Update activity
environment.activity = Activity.IDLE
environment.save()
| [
"[email protected]"
] | |
31a22036f099c73ba1c28df51244f2704b311551 | 891902687207fb335b65dbb8d31d6e20301764f9 | /pe007.py | 3521b25495844f22a773b26856805b717f686ada | [] | no_license | maecchi/PE | 93bd050eaca2733aa37db6ca493b820fe3d7a351 | 3d9092635807f0036719b65adb16f1c0926c2321 | refs/heads/master | 2020-05-04T16:38:36.476355 | 2012-06-10T05:26:10 | 2012-06-10T05:26:10 | 1,746,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
#
# pe007.py - Project Euler
#
prime_array = []
sum_of_array = len(prime_array)
number = 1
while (len(prime_array) < 10001) :
is_prime = True
number += 1
if sum_of_array == 0:
if number != 1:
prime_array.append(number)
else:
for i in prime_array :
if not number % i:
is_prime = False
break
if is_prime:
prime_array.append(number)
sum_of_array = len(prime_array)
print prime_array[len(prime_array)-1]
| [
"[email protected]"
] | |
4adcfd595197df3f2ce242669e3c597ba48b4670 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/aio/operations/_express_route_ports_operations.py | be712ee91b79cf9c38e4d15ed2135405fdc6f98d | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 27,188 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRoutePortsOperations:
"""ExpressRoutePortsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
express_route_port_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
express_route_port_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified ExpressRoutePort resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of the ExpressRoutePort resource.
:type express_route_port_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
express_route_port_name=express_route_port_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
async def get(
self,
resource_group_name: str,
express_route_port_name: str,
**kwargs
) -> "_models.ExpressRoutePort":
"""Retrieves the requested ExpressRoutePort resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of ExpressRoutePort.
:type express_route_port_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRoutePort, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.ExpressRoutePort
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePort"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
express_route_port_name: str,
parameters: "_models.ExpressRoutePort",
**kwargs
) -> "_models.ExpressRoutePort":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePort"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ExpressRoutePort')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
express_route_port_name: str,
parameters: "_models.ExpressRoutePort",
**kwargs
) -> AsyncLROPoller["_models.ExpressRoutePort"]:
"""Creates or updates the specified ExpressRoutePort resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of the ExpressRoutePort resource.
:type express_route_port_name: str
:param parameters: Parameters supplied to the create ExpressRoutePort operation.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.ExpressRoutePort
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRoutePort or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_03_01.models.ExpressRoutePort]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePort"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
express_route_port_name=express_route_port_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
express_route_port_name: str,
parameters: "_models.TagsObject",
**kwargs
) -> "_models.ExpressRoutePort":
"""Update ExpressRoutePort tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of the ExpressRoutePort resource.
:type express_route_port_name: str
:param parameters: Parameters supplied to update ExpressRoutePort resource tags.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRoutePort, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.ExpressRoutePort
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePort"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.ExpressRoutePortListResult"]:
"""List all the ExpressRoutePort resources in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRoutePortListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.ExpressRoutePortListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePortListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRoutePortListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts'} # type: ignore
def list(
self,
**kwargs
) -> AsyncIterable["_models.ExpressRoutePortListResult"]:
"""List all the ExpressRoutePort resources in the specified subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRoutePortListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.ExpressRoutePortListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePortListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRoutePortListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePorts'} # type: ignore
| [
"[email protected]"
] | |
2229d4df4a6585402d2b9b02a44445d1e7e39d2e | 71f55955d7115763f9267704328f8c738aafaa15 | /euca2ools/commands/iam/addrolepolicy.py | bb9e1d6ddeabb20857861ae24bcc791d2dbbad40 | [
"BSD-2-Clause"
] | permissive | fr33jc/euca2ools | 66da4a866e9a0873ce225f9f931019b0bbd82fff | f4d8052000601e59e4e7d4dec4aa4094df4e39a0 | refs/heads/master | 2021-01-21T08:20:44.646393 | 2015-05-07T06:16:30 | 2015-05-07T06:26:57 | 35,200,788 | 0 | 0 | null | 2015-05-07T05:34:42 | 2015-05-07T05:34:42 | null | UTF-8 | Python | false | false | 3,335 | py | # Copyright 2014 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
from requestbuilder import Arg
from euca2ools.commands.iam import IAMRequest, AS_ACCOUNT
from euca2ools.commands.iam.putrolepolicy import PutRolePolicy
from euca2ools.util import build_iam_policy
class AddRolePolicy(IAMRequest):
DESCRIPTION = ('Add a new policy to a role. To add more complex policies '
'than this tool supports, see euare-roleuploadpolicy.')
ARGS = [Arg('-r', '--role-name', metavar='ROLE', required=True,
help='role to attach the policy to (required)'),
Arg('-p', '--policy-name', metavar='POLICY', required=True,
help='name of the new policy (required)'),
Arg('-e', '--effect', choices=('Allow', 'Deny'), required=True,
help='whether the new policy should Allow or Deny (required)'),
Arg('-a', '--action', dest='actions', action='append',
required=True, help='''action(s) the policy should apply to
(at least one required)'''),
Arg('-c', '--resource', dest='resources', action='append',
required=True, help='''resource(s) the policy should apply to
(at least one required)'''),
Arg('-o', '--output', action='store_true',
help='also display the newly-created policy'),
AS_ACCOUNT]
def main(self):
policy = build_iam_policy(self.args['effect'], self.args['resources'],
self.args['actions'])
policy_doc = json.dumps(policy)
req = PutRolePolicy.from_other(
self, RoleName=self.args['role_name'],
PolicyName=self.args['policy_name'],
PolicyDocument=policy_doc,
DelegateAccount=self.params['DelegateAccount'])
response = req.main()
response['PolicyDocument'] = policy_doc
return response
def print_result(self, result):
if self.args['output']:
print result['PolicyDocument']
| [
"[email protected]"
] | |
a8fe19a5ebd13e3f499880b38d3a0c9b3e2e1a01 | 77c641fd0708b279dddbe01f6af32a8531b93185 | /marketsim/gen/_out/orderbook/_TwoWayLink.py | 32086a419dd50f0c39f7d88416660a33f3316484 | [] | no_license | abensrhir/marketsimulator | aea286afd2bb2e0c8a547bfa879601aef21c0cd5 | f9f55c72fb34cdbec42b96737ca20839f26c6299 | refs/heads/master | 2020-12-13T20:55:55.795344 | 2014-02-24T22:52:24 | 2014-02-24T22:52:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,564 | py | from marketsim import registry
from marketsim.gen._out._itwowaylink import ITwoWayLink
from marketsim.gen._intrinsic.orderbook.link import _TwoWayLink_Impl
from marketsim.gen._out._ilink import ILink
@registry.expose(["Asset", "TwoWayLink"])
class TwoWayLink_ILinkILink(ITwoWayLink,_TwoWayLink_Impl):
""" (normally between a trader and a market).
Ensures that sending packets via links preserves their order.
Holds two one-way links in opposite directions.
"""
def __init__(self, up = None, down = None):
from marketsim.gen._out.orderbook._link import Link_IObservableFloat as _orderbook_Link_IObservableFloat
from marketsim import rtti
self.up = up if up is not None else _orderbook_Link_IObservableFloat()
self.down = down if down is not None else _orderbook_Link_IObservableFloat()
rtti.check_fields(self)
_TwoWayLink_Impl.__init__(self)
@property
def label(self):
return repr(self)
_properties = {
'up' : ILink,
'down' : ILink
}
def __repr__(self):
return "TwoWayLink(%(up)s, %(down)s)" % self.__dict__
def TwoWayLink(up = None,down = None):
from marketsim.gen._out._ilink import ILink
from marketsim import rtti
if up is None or rtti.can_be_casted(up, ILink):
if down is None or rtti.can_be_casted(down, ILink):
return TwoWayLink_ILinkILink(up,down)
raise Exception('Cannot find suitable overload for TwoWayLink('+str(up) +':'+ str(type(up))+','+str(down) +':'+ str(type(down))+')')
| [
"[email protected]"
] | |
223d15aa59b73791af3a6b0a32075d3e44e5e0e1 | 12c41119156dd3783c3801e07f5f973289f26bb0 | /aliyun-python-sdk-rds/aliyunsdkrds/request/v20140815/ModifyInstanceCrossBackupPolicyRequest.py | 73f1bb1dc4142387214d25bfcc8845befb0ee4b4 | [
"Apache-2.0"
] | permissive | toywei/aliyun-openapi-python-sdk | bfe0893da38af9b222ce072fd7587d5b6cdce204 | ce8f683e3201fca8c473512267f50a34f71e31d3 | refs/heads/master | 2020-08-07T23:42:00.053692 | 2019-10-08T08:50:21 | 2019-10-08T08:50:21 | 213,626,962 | 1 | 0 | NOASSERTION | 2019-10-08T11:43:15 | 2019-10-08T11:43:15 | null | UTF-8 | Python | false | false | 3,625 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ModifyInstanceCrossBackupPolicyRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Rds', '2014-08-15', 'ModifyInstanceCrossBackupPolicy','rds')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_RetentType(self):
return self.get_query_params().get('RetentType')
def set_RetentType(self,RetentType):
self.add_query_param('RetentType',RetentType)
def get_BackupEnabled(self):
return self.get_query_params().get('BackupEnabled')
def set_BackupEnabled(self,BackupEnabled):
self.add_query_param('BackupEnabled',BackupEnabled)
def get_RelService(self):
return self.get_query_params().get('RelService')
def set_RelService(self,RelService):
self.add_query_param('RelService',RelService)
def get_StorageType(self):
return self.get_query_params().get('StorageType')
def set_StorageType(self,StorageType):
self.add_query_param('StorageType',StorageType)
def get_Endpoint(self):
return self.get_query_params().get('Endpoint')
def set_Endpoint(self,Endpoint):
self.add_query_param('Endpoint',Endpoint)
def get_DBInstanceId(self):
return self.get_query_params().get('DBInstanceId')
def set_DBInstanceId(self,DBInstanceId):
self.add_query_param('DBInstanceId',DBInstanceId)
def get_Retention(self):
return self.get_query_params().get('Retention')
def set_Retention(self,Retention):
self.add_query_param('Retention',Retention)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_CrossBackupType(self):
return self.get_query_params().get('CrossBackupType')
def set_CrossBackupType(self,CrossBackupType):
self.add_query_param('CrossBackupType',CrossBackupType)
def get_LogBackupEnabled(self):
return self.get_query_params().get('LogBackupEnabled')
def set_LogBackupEnabled(self,LogBackupEnabled):
self.add_query_param('LogBackupEnabled',LogBackupEnabled)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_CrossBackupRegion(self):
return self.get_query_params().get('CrossBackupRegion')
def set_CrossBackupRegion(self,CrossBackupRegion):
self.add_query_param('CrossBackupRegion',CrossBackupRegion)
def get_StorageOwner(self):
return self.get_query_params().get('StorageOwner')
def set_StorageOwner(self,StorageOwner):
self.add_query_param('StorageOwner',StorageOwner) | [
"[email protected]"
] | |
2a3f06373320ecc765b4bb93855f011b6abd1874 | 8773e8c9b9a0a6e407f91b6f7c6321141d7e8356 | /P0028.py | b36e66bdef3c5322dae8da2cc24b78e41d00f479 | [] | no_license | westgate458/LeetCode | 1836bb21e8dd95386ccab390f5fd04567a429a02 | 36d7f9e967a62db77622e0888f61999d7f37579a | refs/heads/master | 2021-12-28T04:16:36.875737 | 2021-12-17T05:48:09 | 2021-12-17T05:48:09 | 152,928,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,687 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 7 21:30:22 2018
@author: Tianqi Guo
"""
class Solution(object):
def strStr(self, haystack, needle):
"""
:type haystack: str
:type needle: str
:rtype: int
"""
# length of two strings
lh = len(haystack)
ln = len(needle)
# deal with trivial cases
if (ln == 0):
return 0
if (lh < ln):
return -1
# start searching from start
p = 0
# stop when remaining substring not long enough
while p <= lh-ln:
# find next position of the first character
# in the remaining substring
pp = haystack[p:].find(needle[0])
# if first character exists in remaining substring
# and remaining substring long enough
if (pp != -1) and (p+pp+ln <= lh):
# check if target is found
if haystack[p+pp:p+pp+ln] == needle:
# return current position
return p + pp
else:
# if not found update starting position
# as the one after current position of found first character
p = p + pp + 1
else:
# if first character does not exist in remaining substring
# return -1
return -1
# return default result (not found)
return -1
haystack = "a"
needle = "a"
test = Solution()
p = test.strStr(haystack, needle)
print(p) | [
"[email protected]"
] | |
531a90f48670b96708ad059976d2ba5bf25937fd | cbadf1c08435abc91bd221d2fd9d096717685cc0 | /cses/introductory/t1068/task.py | 4d13a4212760b1f74adb4ec357a1211f2f7534e6 | [] | no_license | x3mka/code-contests-python | 9b54738941187284e1f70aad850ae1016ca6cd39 | 57f473ca84735f9312913967e20a3ac0da32baa8 | refs/heads/master | 2022-09-01T20:39:05.329559 | 2022-08-04T13:05:22 | 2022-08-04T13:05:22 | 263,626,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | import sys
def rs(): return sys.stdin.readline().rstrip()
def ri(): return int(sys.stdin.readline())
def ria(): return list(map(int, sys.stdin.readline().split()))
def ws(s): sys.stdout.write(s); sys.stdout.write('\n')
def wi(n): sys.stdout.write(str(n)); sys.stdout.write('\n')
def wia(a, sep=' '): sys.stdout.write(sep.join([str(x) for x in a])); sys.stdout.write('\n')
def solve(n, k):
return 0
def main():
n = ri()
res = []
while n > 1:
res.append(n)
if n % 2 == 0:
n //= 2
else:
n = n * 3 + 1
res.append(1)
wia(res)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
c38f64648780fe24938819e7a021e775e5b9144a | 2aba3c043ce4ef934adce0f65bd589268ec443c5 | /codility/lessons/lesson15/abs_distinct.py | 20cee5b98361ce77f0c60a5866368cb270aedd84 | [] | no_license | kambehmw/algorithm_python | 4f66593b77039d90515d1fcbecacdab8c811b92f | 17222399dcc92fd8f908e5774a9883e2e89c486e | refs/heads/master | 2020-06-02T12:44:11.322356 | 2020-05-18T13:22:05 | 2020-05-18T13:22:05 | 191,157,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | def solution(A):
result = set()
for a in A:
if abs(a) not in result:
result.add(abs(a))
return len(result)
if __name__ == '__main__':
A = [-5, -3, -1, 0, 3, 6]
print(solution(A)) | [
"[email protected]"
] | |
de025b7b8d83683bd44b1e5b4b977f3007113196 | 2a48fb1c369a97ff82974030289613e9ccabdcd7 | /ml/rl/types.py | 0fca511ab279c28aa71bf2520a63f458fde9a3ca | [
"BSD-3-Clause"
] | permissive | Tsolmon17/Horizon | 9f5a192529f424fb8f1f47a4dddca97963c94aa2 | c13522660be6a63b793424db52a1824b0765b22d | refs/heads/master | 2020-06-23T18:40:44.078097 | 2019-07-24T22:23:39 | 2019-07-24T22:34:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,089 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import dataclasses
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Type, TypeVar, Union, cast
import numpy as np
import torch
@dataclass
class BaseDataClass:
def _replace(self, **kwargs):
return cast(type(self), dataclasses.replace(self, **kwargs))
def pin_memory(self):
pinned_memory = {}
for field in dataclasses.fields(self):
f = getattr(self, field.name)
if isinstance(f, (torch.Tensor, BaseDataClass)):
pinned_memory[field.name] = f.pin_memory()
return self._replace(**pinned_memory)
def cuda(self):
cuda_tensor = {}
for field in dataclasses.fields(self):
f = getattr(self, field.name)
if isinstance(f, torch.Tensor):
cuda_tensor[field.name] = f.cuda(non_blocking=True)
elif isinstance(f, BaseDataClass):
cuda_tensor[field.name] = f.cuda()
return self._replace(**cuda_tensor)
@dataclass
class ValuePresence(BaseDataClass):
value: torch.Tensor
presence: Optional[torch.ByteTensor]
@dataclass
class IdFeatureConfig(BaseDataClass):
"""
This describes how to map raw features to model features
"""
feature_id: int # integer feature ID
id_mapping_name: str # key to ModelPreprocessingConfig.id_mapping_config
@dataclass
class IdFeatureBase(BaseDataClass):
"""
User should subclass this class and define each ID feature as a field w/ torch.Tensor
as the type of the field.
"""
@classmethod
# TODO: This should be marked as abstractmethod but mypi doesn't like it.
# See https://github.com/python/mypy/issues/5374
# @abc.abstractmethod
def get_feature_config(cls) -> Dict[str, IdFeatureConfig]:
"""
Returns mapping from feature name, which must be a field in this dataclass, to
feature config.
"""
raise NotImplementedError
T = TypeVar("T", bound="SequenceFeatureBase")
@dataclass
class FloatFeatureInfo(BaseDataClass):
name: str
feature_id: int
@dataclass
class SequenceFeatureBase(BaseDataClass):
id_features: Optional[IdFeatureBase]
float_features: Optional[ValuePresence]
@classmethod
# TODO: This should be marked as abstractmethod but mypi doesn't like it.
# See https://github.com/python/mypy/issues/5374
# @abc.abstractmethod
def get_max_length(cls) -> int:
"""
Subclass should return the max-length of this sequence. If the raw data is
longer, feature extractor will truncate the front. If the raw data is shorter,
feature extractor will fill the front with zero.
"""
raise NotImplementedError
@classmethod
def get_float_feature_infos(cls) -> List[FloatFeatureInfo]:
"""
Override this if the sequence has float features associated to it.
Float features should be stored as ID-score-list, where the ID part corresponds
to primary entity ID of the sequence. E.g., if this is a sequence of previously
watched videos, then the key should be video ID.
"""
return []
@classmethod
def prototype(cls: Type[T]) -> T:
float_feature_infos = cls.get_float_feature_infos()
float_features = (
torch.rand(1, cls.get_max_length(), len(float_feature_infos))
if float_feature_infos
else None
)
fields = dataclasses.fields(cls)
id_features = None
for field in fields:
if field.name != "id_features" or not isinstance(field.type, type):
continue
id_feature_fields = dataclasses.fields(field.type)
id_features = field.type( # noqa
**{
f.name: torch.randint(1, (1, cls.get_max_length()))
for f in id_feature_fields
}
)
break
return cls(id_features=id_features, float_features=float_features)
U = TypeVar("U", bound="SequenceFeatures")
@dataclass
class SequenceFeatures(BaseDataClass):
"""
A stub-class for sequence features in the model. All fileds should be subclass of
SequenceFeatureBase above.
"""
@classmethod
def prototype(cls: Type[U]) -> U:
fields = dataclasses.fields(cls)
return cls(**{f.name: f.type.prototype() for f in fields}) # type: ignore
@dataclass
class IdMapping(BaseDataClass):
ids: List[int]
@dataclass
class ModelFeatureConfig(BaseDataClass):
float_feature_infos: List[FloatFeatureInfo]
id_mapping_config: Dict[str, IdMapping]
sequence_features_type: Optional[Type[SequenceFeatures]]
@dataclass
class FeatureVector(BaseDataClass):
float_features: ValuePresence
# sequence_features should ideally be Mapping[str, IdListFeature]; however,
# that doesn't work well with ONNX.
# User is expected to dynamically define the type of id_list_features based
# on the actual features used in the model.
sequence_features: Optional[SequenceFeatureBase] = None
# Experimental: sticking this here instead of putting it in float_features
# because a lot of places derive the shape of float_features from
# normalization parameters.
time_since_first: Optional[torch.Tensor] = None
@dataclass
class ActorOutput(BaseDataClass):
action: torch.Tensor
log_prob: Optional[torch.Tensor] = None
@dataclass
class PreprocessedFeatureVector(BaseDataClass):
float_features: torch.Tensor
# Experimental: sticking this here instead of putting it in float_features
# because a lot of places derive the shape of float_features from
# normalization parameters.
time_since_first: Optional[torch.Tensor] = None
@dataclass
class PreprocessedState(BaseDataClass):
"""
This class makes it easier to plug modules into predictor
"""
state: PreprocessedFeatureVector
@classmethod
def from_tensor(cls, state: torch.Tensor):
assert isinstance(state, torch.Tensor)
return cls(state=PreprocessedFeatureVector(float_features=state))
def __init__(self, state):
super().__init__()
if isinstance(state, torch.Tensor):
raise ValueError("Use from_tensor()")
self.state = state
@dataclass
class PreprocessedStateAction(BaseDataClass):
state: PreprocessedFeatureVector
action: PreprocessedFeatureVector
@classmethod
def from_tensors(cls, state: torch.Tensor, action: torch.Tensor):
assert isinstance(state, torch.Tensor)
assert isinstance(action, torch.Tensor)
return cls(
state=PreprocessedFeatureVector(float_features=state),
action=PreprocessedFeatureVector(float_features=action),
)
def __init__(self, state, action):
super().__init__()
if isinstance(state, torch.Tensor) or isinstance(action, torch.Tensor):
raise ValueError(f"Use from_tensors() {type(state)} {type(action)}")
self.state = state
self.action = action
@dataclass
class RawStateAction(BaseDataClass):
state: FeatureVector
action: FeatureVector
@dataclass
class CommonInput(BaseDataClass):
"""
Base class for all inputs, both raw and preprocessed
"""
reward: torch.Tensor
time_diff: torch.Tensor
step: Optional[torch.Tensor]
not_terminal: torch.Tensor
@dataclass
class PreprocessedBaseInput(CommonInput):
state: PreprocessedFeatureVector
next_state: PreprocessedFeatureVector
@dataclass
class PreprocessedDiscreteDqnInput(PreprocessedBaseInput):
action: torch.Tensor
next_action: torch.Tensor
possible_actions_mask: torch.Tensor
possible_next_actions_mask: torch.Tensor
@dataclass
class PreprocessedParametricDqnInput(PreprocessedBaseInput):
action: PreprocessedFeatureVector
next_action: PreprocessedFeatureVector
possible_actions: PreprocessedFeatureVector
possible_actions_mask: torch.ByteTensor
possible_next_actions: PreprocessedFeatureVector
possible_next_actions_mask: torch.ByteTensor
tiled_next_state: PreprocessedFeatureVector
@dataclass
class PreprocessedPolicyNetworkInput(PreprocessedBaseInput):
action: PreprocessedFeatureVector
next_action: PreprocessedFeatureVector
@dataclass
class PreprocessedMemoryNetworkInput(PreprocessedBaseInput):
action: Union[torch.Tensor, torch.Tensor]
@dataclass
class RawBaseInput(CommonInput):
state: FeatureVector
next_state: FeatureVector
@dataclass
class RawDiscreteDqnInput(RawBaseInput):
action: torch.ByteTensor
next_action: torch.ByteTensor
possible_actions_mask: torch.ByteTensor
possible_next_actions_mask: torch.ByteTensor
def preprocess(
self, state: PreprocessedFeatureVector, next_state: PreprocessedFeatureVector
):
assert isinstance(state, PreprocessedFeatureVector)
assert isinstance(next_state, PreprocessedFeatureVector)
return PreprocessedDiscreteDqnInput(
self.reward,
self.time_diff,
self.step,
self.not_terminal.float(),
state,
next_state,
self.action.float(),
self.next_action.float(),
self.possible_actions_mask.float(),
self.possible_next_actions_mask.float(),
)
def preprocess_tensors(self, state: torch.Tensor, next_state: torch.Tensor):
assert isinstance(state, torch.Tensor)
assert isinstance(next_state, torch.Tensor)
return PreprocessedDiscreteDqnInput(
self.reward,
self.time_diff,
self.step,
self.not_terminal.float(),
PreprocessedFeatureVector(float_features=state),
PreprocessedFeatureVector(float_features=next_state),
self.action.float(),
self.next_action.float(),
self.possible_actions_mask.float(),
self.possible_next_actions_mask.float(),
)
@dataclass
class RawParametricDqnInput(RawBaseInput):
action: FeatureVector
next_action: FeatureVector
possible_actions: FeatureVector
possible_actions_mask: torch.ByteTensor
possible_next_actions: FeatureVector
possible_next_actions_mask: torch.ByteTensor
tiled_next_state: FeatureVector
def preprocess(
self,
state: PreprocessedFeatureVector,
next_state: PreprocessedFeatureVector,
action: PreprocessedFeatureVector,
next_action: PreprocessedFeatureVector,
possible_actions: PreprocessedFeatureVector,
possible_next_actions: PreprocessedFeatureVector,
tiled_next_state: PreprocessedFeatureVector,
):
assert isinstance(state, PreprocessedFeatureVector)
assert isinstance(next_state, PreprocessedFeatureVector)
assert isinstance(action, PreprocessedFeatureVector)
assert isinstance(next_action, PreprocessedFeatureVector)
assert isinstance(possible_actions, PreprocessedFeatureVector)
assert isinstance(possible_next_actions, PreprocessedFeatureVector)
assert isinstance(tiled_next_state, PreprocessedFeatureVector)
return PreprocessedParametricDqnInput(
self.reward,
self.time_diff,
self.step,
self.not_terminal,
state,
next_state,
action,
next_action,
possible_actions,
self.possible_actions_mask,
possible_next_actions,
self.possible_next_actions_mask,
tiled_next_state,
)
def preprocess_tensors(
self,
state: torch.Tensor,
next_state: torch.Tensor,
action: torch.Tensor,
next_action: torch.Tensor,
possible_actions: torch.Tensor,
possible_next_actions: torch.Tensor,
tiled_next_state: torch.Tensor,
):
assert isinstance(state, torch.Tensor)
assert isinstance(next_state, torch.Tensor)
assert isinstance(action, torch.Tensor)
assert isinstance(next_action, torch.Tensor)
assert isinstance(possible_actions, torch.Tensor)
assert isinstance(possible_next_actions, torch.Tensor)
assert isinstance(tiled_next_state, torch.Tensor)
return PreprocessedParametricDqnInput(
self.reward,
self.time_diff,
self.step,
self.not_terminal,
PreprocessedFeatureVector(float_features=state),
PreprocessedFeatureVector(float_features=next_state),
PreprocessedFeatureVector(float_features=action),
PreprocessedFeatureVector(float_features=next_action),
PreprocessedFeatureVector(float_features=possible_actions),
self.possible_actions_mask,
PreprocessedFeatureVector(float_features=possible_next_actions),
self.possible_next_actions_mask,
PreprocessedFeatureVector(float_features=tiled_next_state),
)
@dataclass
class RawPolicyNetworkInput(RawBaseInput):
action: FeatureVector
next_action: FeatureVector
def preprocess(
self,
state: PreprocessedFeatureVector,
next_state: PreprocessedFeatureVector,
action: PreprocessedFeatureVector,
next_action: PreprocessedFeatureVector,
):
assert isinstance(state, PreprocessedFeatureVector)
assert isinstance(next_state, PreprocessedFeatureVector)
assert isinstance(action, PreprocessedFeatureVector)
assert isinstance(next_action, PreprocessedFeatureVector)
return PreprocessedPolicyNetworkInput(
self.reward,
self.time_diff,
self.step,
self.not_terminal,
state,
next_state,
action,
next_action,
)
def preprocess_tensors(
self,
state: torch.Tensor,
next_state: torch.Tensor,
action: torch.Tensor,
next_action: torch.Tensor,
):
assert isinstance(state, torch.Tensor)
assert isinstance(next_state, torch.Tensor)
assert isinstance(action, torch.Tensor)
assert isinstance(next_action, torch.Tensor)
return PreprocessedPolicyNetworkInput(
self.reward,
self.time_diff,
self.step,
self.not_terminal,
PreprocessedFeatureVector(float_features=state),
PreprocessedFeatureVector(float_features=next_state),
PreprocessedFeatureVector(float_features=action),
PreprocessedFeatureVector(float_features=next_action),
)
@dataclass
class RawMemoryNetworkInput(RawBaseInput):
action: Union[FeatureVector, torch.ByteTensor]
def preprocess(
self,
state: PreprocessedFeatureVector,
next_state: PreprocessedFeatureVector,
action: Optional[torch.Tensor] = None,
):
assert isinstance(state, PreprocessedFeatureVector)
assert isinstance(next_state, PreprocessedFeatureVector)
if action is not None:
assert isinstance(action, torch.Tensor)
return PreprocessedMemoryNetworkInput(
self.reward,
self.time_diff,
self.step,
self.not_terminal,
state,
next_state,
action,
)
else:
assert isinstance(self.action, torch.ByteTensor)
return PreprocessedMemoryNetworkInput(
self.reward,
self.time_diff,
self.step,
self.not_terminal,
state,
next_state,
self.action.float(),
)
def preprocess_tensors(
self,
state: torch.Tensor,
next_state: torch.Tensor,
action: Optional[torch.Tensor] = None,
):
assert isinstance(state, torch.Tensor)
assert isinstance(next_state, torch.Tensor)
if action is not None:
assert isinstance(action, torch.Tensor)
return PreprocessedMemoryNetworkInput(
self.reward,
self.time_diff,
self.step,
self.not_terminal,
PreprocessedFeatureVector(float_features=state),
PreprocessedFeatureVector(float_features=next_state),
action,
)
else:
assert isinstance(self.action, torch.ByteTensor)
return PreprocessedMemoryNetworkInput(
self.reward,
self.time_diff,
self.step,
self.not_terminal,
PreprocessedFeatureVector(float_features=state),
PreprocessedFeatureVector(float_features=next_state),
self.action.float(),
)
@dataclass
class ExtraData(BaseDataClass):
mdp_id: Optional[
np.ndarray
] = None # Need to use a numpy array because torch doesn't support strings
sequence_number: Optional[torch.Tensor] = None
action_probability: Optional[torch.Tensor] = None
max_num_actions: Optional[int] = None
metrics: Optional[torch.Tensor] = None
@dataclass
class PreprocessedTrainingBatch(BaseDataClass):
training_input: Union[
PreprocessedBaseInput,
PreprocessedDiscreteDqnInput,
PreprocessedParametricDqnInput,
PreprocessedMemoryNetworkInput,
PreprocessedPolicyNetworkInput,
]
extras: Any
def batch_size(self):
return self.training_input.state.float_features.size()[0]
@dataclass
class RawTrainingBatch(BaseDataClass):
training_input: Union[
RawBaseInput, RawDiscreteDqnInput, RawParametricDqnInput, RawPolicyNetworkInput
]
extras: Any
def batch_size(self):
return self.training_input.state.float_features.value.size()[0]
def preprocess(
self,
training_input: Union[
PreprocessedBaseInput,
PreprocessedDiscreteDqnInput,
PreprocessedParametricDqnInput,
PreprocessedMemoryNetworkInput,
PreprocessedPolicyNetworkInput,
],
) -> PreprocessedTrainingBatch:
return PreprocessedTrainingBatch(
training_input=training_input, extras=self.extras
)
@dataclass
class SingleQValue(BaseDataClass):
q_value: torch.Tensor
@dataclass
class AllActionQValues(BaseDataClass):
q_values: torch.Tensor
@dataclass
class MemoryNetworkOutput(BaseDataClass):
mus: torch.Tensor
sigmas: torch.Tensor
logpi: torch.Tensor
reward: torch.Tensor
not_terminal: torch.Tensor
last_step_lstm_hidden: torch.Tensor
last_step_lstm_cell: torch.Tensor
all_steps_lstm_hidden: torch.Tensor
@dataclass
class DqnPolicyActionSet(BaseDataClass):
greedy: int
softmax: int
@dataclass
class SacPolicyActionSet:
greedy: torch.Tensor
greedy_propensity: float
| [
"[email protected]"
] | |
b186256f20c492cec5e909922ed7a5ab603e0044 | 3fd8a3e3f37f9db258df63d8565239b8b8be0f24 | /basic_python/try_range.py | 6b594db09eecc7eed9296be9b513002ca6b94dc2 | [] | no_license | raveena17/workout_problems | 713a3e1a6ec513c1ee8b878519171150c6858aa4 | 004812cb7abf096d6f5d20181a29c16f8daaac55 | refs/heads/master | 2021-03-12T19:27:08.013266 | 2017-09-08T16:11:32 | 2017-09-08T16:11:32 | 102,878,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | a=['apple','mango','orange','banana']
for k in range(len(a)):
print k,a[k]
| [
"[email protected]"
] | |
5194708e4cb011418ca453bde54265f86a22abd6 | fd48fba90bb227017ac2da9786d59f9b9130aaf0 | /digsby/src/gui/uberwidgets/formattedinput2/FormattedExpandoTextCtrl.py | eb6cbfe7288e350f9b8f5536a12e7a9dc75c2ac0 | [
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | niterain/digsby | bb05b959c66b957237be68cd8576e3a7c0f7c693 | 16a62c7df1018a49eaa8151c0f8b881c7e252949 | refs/heads/master | 2021-01-18T10:07:10.244382 | 2013-11-03T02:48:25 | 2013-11-03T02:48:25 | 5,991,568 | 1 | 0 | null | 2013-11-03T02:48:26 | 2012-09-28T02:24:50 | Python | UTF-8 | Python | false | false | 8,255 | py | '''
Prepares FormattedExpandoTextCtrl and FormattedTextCtrl dependant on platform
On Windows includes spellcheck mixin while on Mac it does not because spellcheck is provided by the OS
'''
from gui.uberwidgets.formattedinput2.fromattedinputevents import TextFormatChangedEvent
import wx
wxMSW = 'wxMSW' in wx.PlatformInfo
wxMac = 'wxMac' in wx.PlatformInfo
formattedstyle = (wx.TE_RICH2 | wx.TE_MULTILINE | wx.TE_CHARWRAP | wx.NO_BORDER | wx.WANTS_CHARS | wx.TE_NOHIDESEL)
from gui.textutil import default_font
from gui.uberwidgets.umenu import UMenu
from util.primitives.fmtstr import fmtstr, FormattingException
from cgui import InputBox
from cgui import ExpandoTextCtrl, EVT_ETC_LAYOUT_NEEDED
FONT_FLAGS = (wx.TEXT_ATTR_FONT_FACE | wx.TEXT_ATTR_FONT_SIZE | wx.TEXT_ATTR_FONT_WEIGHT | wx.TEXT_ATTR_FONT_ITALIC | wx.TEXT_ATTR_FONT_UNDERLINE)
class FormattingInterface(object):
'''
Interface to add text formatting related methods to a TextField object
'''
SetFormat = None
def default_attrs(self):
return wx.TextAttr(wx.BLACK, wx.WHITE, default_font())
def __init__(self, multiFormat = True, format = None):
self.MultiFormat(multiFormat)
self.BindEvents()
self.SetFormat(format if format is not None else self.default_attrs())
def GetFormat(self):
# FIXME: We need to get the style flags working right under OS X Cocoa
# Right now it seems you need to have at least
attrs = self.GetStyle(self.GetInsertionPoint())[1]
if attrs.IsDefault(): # this will return wx.NullFont, not a very useful thing to use
return self.default_attrs()
return attrs
def SetFormat_Single(self, textattr):
'''
Set format for the entire text content
'''
self.SetStyle(0, self.GetLastPosition(), textattr)
self.SetDefaultStyle(textattr)
def SetFormat_Multi(self, textattr):
'''
Set format for just the current selection
'''
start, end = self.GetSelection()
self.SetStyle(start, end, textattr)
def MultiFormat(self, multi):
'''
Turn MultiFormat support for a field on and off
'''
self.isMultiFormat = multi
if multi:
self.SetFormat = self.SetFormat_Multi
else:
self.SetFormat = self.SetFormat_Single
def ApplyStyle(self, **format):
'''
Set the font style using human readable key words and simple values
@param textcolor: wx.Color
@param bgcolor: wx.Color
@param facename: String
@param pointsize: int
@param bold: Bool
@param italic: Bool
@param underline: Bool
'''
textattr = self.GetFormat()
font = textattr.GetFont()
flags = 0
if 'textcolor' in format:
flags |= wx.TEXT_ATTR_TEXT_COLOUR
textattr.SetTextColour(format['textcolor'])
if 'bgcolor' in format:
flags |= wx.TEXT_ATTR_BACKGROUND_COLOUR
textattr.SetBackgroundColour(format['bgcolor'])
if 'facename' in format:
flags |= wx.TEXT_ATTR_FONT_FACE
font.SetFaceName(format['facename'])
if 'pointsize' in format:
flags |= wx.TEXT_ATTR_FONT_SIZE
font.SetPointSize(format['pointsize'])
if 'bold' in format:
flags |= wx.TEXT_ATTR_FONT_WEIGHT
font.SetWeight(wx.FONTWEIGHT_BOLD if format['bold'] else wx.NORMAL,)
if 'italic' in format:
flags |= wx.TEXT_ATTR_FONT_ITALIC
font.SetStyle(wx.FONTSTYLE_ITALIC if format['italic'] else wx.FONTSTYLE_NORMAL)
if 'underline' in format:
flags |= wx.TEXT_ATTR_FONT_UNDERLINE
font.SetUnderlined(format['underline'])
if flags & FONT_FLAGS:
textattr.SetFont(font)
textattr.SetFlags(flags)
self.SetFormat(textattr)
self.SetFocus()
self.AddPendingEvent(TextFormatChangedEvent(self.Id, EventObject = self))
def GenMenu(self):
m = UMenu(self)
# spelling suggestions and options
if isinstance(self, SpellCheckTextCtrlMixin):
if self.AddSuggestionsToMenu(m):
m.AddSep()
m.AddItem(_('Cut'), id = wx.ID_CUT, callback = self.Cut)
m.AddItem(_('Copy'), id = wx.ID_COPY, callback = self.Copy)
m.AddItem(_('Paste'), id = wx.ID_PASTE, callback = self.Paste)
m.AddSep()
m.AddItem(_('Select All'), id = wx.ID_SELECTALL, callback = lambda: self.SetSelection(0, self.GetLastPosition()))
m.AddSep()
from gui.toolbox import add_rtl_checkbox
add_rtl_checkbox(self, m)
return m
def BindEvents(self):
def OnContextMenu(event):
pt = self.ScreenToClient(wx.GetMousePosition())
ht = self.HitTest(pt)
self.SetInsertionPoint(self.XYToPosition(ht[1], ht[2]))
menu = self.GenMenu()
menu.PopupMenu()
Bind = self.Bind
if not wxMac:
Bind(wx.EVT_CONTEXT_MENU, OnContextMenu)
def _expand_event(self):
if wx.IsDestroyed(self):
return
self.AddPendingEvent(wx.CommandEvent(EVT_ETC_LAYOUT_NEEDED, self.Id))
if wxMSW:
from gui.spellchecktextctrlmixin import SpellCheckTextCtrlMixin
class FormattedExpandoTextCtrl(ExpandoTextCtrl, SpellCheckTextCtrlMixin, FormattingInterface):
def __init__(self, parent, style = 0, value = '', multiFormat = True, format = None, validator = wx.DefaultValidator):
ExpandoTextCtrl.__init__(self, parent, wx.ID_ANY, value, wx.DefaultPosition, wx.DefaultSize, style | formattedstyle, validator, value)
FormattingInterface.__init__(self, multiFormat, format)
SpellCheckTextCtrlMixin.__init__(self)
def ForceExpandEvent(self):
_expand_event(self)
class FormattedTextCtrl(InputBox, SpellCheckTextCtrlMixin, FormattingInterface):
def __init__(self, parent, style = 0, value = '', multiFormat = True, format = None, validator = wx.DefaultValidator):
InputBox.__init__(self, parent, wx.ID_ANY, value, wx.DefaultPosition, wx.DefaultSize, style | formattedstyle, validator, value)
FormattingInterface.__init__(self, multiFormat, format)
SpellCheckTextCtrlMixin.__init__(self)
else:
class FormattedExpandoTextCtrl(ExpandoTextCtrl, FormattingInterface):
def __init__(self, parent, style = 0, value = '', multiFormat = True, format = None, validator = wx.DefaultValidator):
ExpandoTextCtrl.__init__(self, parent, wx.ID_ANY, value, wx.DefaultPosition, wx.DefaultSize, style | formattedstyle, validator, value)
FormattingInterface.__init__(self, multiFormat, format)
def HitTestSuggestions(self, *a, **k):
return -1, []
def GetWordAtPosition(self, *a, **k):
return None
def GetReqHeight(self):
return self.GetBestSize().y
def ForceExpandEvent(self):
_expand_event(self)
class FormattedTextCtrl(InputBox, FormattingInterface):
def __init__(self, parent, style = 0, value = '', multiFormat = True, format = None, validator = wx.DefaultValidator):
InputBox.__init__(self, parent, wx.ID_ANY, value, wx.DefaultPosition, wx.DefaultSize, style | formattedstyle, validator, value)
FormattingInterface.__init__(self, multiFormat, format)
def GetReqHeight(self):
return self.GetBestSize().y
def add_rtf_methods(cls):
def GetFormattedValue(self):
if wxMSW:
rtf, plaintext = cls.GetRTF(self), cls.GetValue(self)
return fmtstr(rtf=rtf, plaintext=plaintext)
else:
return fmtstr(plaintext=cls.GetValue(self))
cls.GetFormattedValue = GetFormattedValue
def SetFormattedValue(self, fmtstr):
try:
rtf = fmtstr.format_as('rtf')
except FormattingException:
cls.SetValue(self, fmtstr.format_as('plaintext'))
else:
cls.SetRTF(self, rtf)
cls.SetFormattedValue = SetFormattedValue
add_rtf_methods(FormattedExpandoTextCtrl)
add_rtf_methods(FormattedTextCtrl)
| [
"[email protected]"
] | |
a65f673d101d1225381df4757e6710a796a2a320 | 853d4cec42071b76a80be38c58ffe0fbf9b9dc34 | /venv/Lib/site-packages/networkx/algorithms/bipartite/projection.py | ffa5405958ff0e3d50185832e1b21f40ec34067e | [] | no_license | msainTesting/TwitterAnalysis | 5e1646dbf40badf887a86e125ef30a9edaa622a4 | b1204346508ba3e3922a52380ead5a8f7079726b | refs/heads/main | 2023-08-28T08:29:28.924620 | 2021-11-04T12:36:30 | 2021-11-04T12:36:30 | 424,242,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,002 | py | """One-mode (unipartite) projections of bipartite graphs."""
import networkx as nx
from networkx.utils import not_implemented_for
__all__ = [
"project",
"projected_graph",
"weighted_projected_graph",
"collaboration_weighted_projected_graph",
"overlap_weighted_projected_graph",
"generic_weighted_projected_graph",
]
def projected_graph(B, nodes, multigraph=False):
r"""Returns the projection of B onto one of its node sets.
Returns the graph G that is the projection of the bipartite graph B
onto the specified nodes. They retain their attributes and are connected
in G if they have a common neighbor in B.
Parameters
----------
B : NetworkX graph
The input graph should be bipartite.
nodes : list or iterable
Nodes to project onto (the "bottom" nodes).
multigraph: bool (default=False)
If True return a multigraph where the multiple edges represent multiple
shared neighbors. They edge key in the multigraph is assigned to the
label of the neighbor.
Returns
-------
Graph : NetworkX graph or multigraph
A graph that is the projection onto the given nodes.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> B = nx.path_graph(4)
>>> G = bipartite.projected_graph(B, [1, 3])
>>> list(G)
[1, 3]
>>> list(G.edges())
[(1, 3)]
If nodes `a`, and `b` are connected through both nodes 1 and 2 then
building a multigraph results in two edges in the projection onto
[`a`, `b`]:
>>> B = nx.Graph()
>>> B.add_edges_from([("a", 1), ("b", 1), ("a", 2), ("b", 2)])
>>> G = bipartite.projected_graph(B, ["a", "b"], multigraph=True)
>>> print([sorted((u, v)) for u, v in G.edges()])
[['a', 'b'], ['a', 'b']]
Notes
-----
No attempt is made to verify that the input graph B is bipartite.
Returns a simple graph that is the projection of the bipartite graph B
onto the set of nodes given in list nodes. If multigraph=True then
a multigraph is returned with an edge for every shared neighbor.
Directed graphs are allowed as input. The output will also then
be a directed graph with edges if there is a directed path between
the nodes.
The graph and node properties are (shallow) copied to the projected graph.
See :mod:`bipartite documentation <networkx.algorithms.bipartite>`
for further details on how bipartite graphs are handled in NetworkX.
See Also
--------
is_bipartite,
is_bipartite_node_set,
sets,
weighted_projected_graph,
collaboration_weighted_projected_graph,
overlap_weighted_projected_graph,
generic_weighted_projected_graph
"""
if B.is_multigraph():
raise nx.NetworkXError("not defined for multigraphs")
if B.is_directed():
directed = True
if multigraph:
G = nx.MultiDiGraph()
else:
G = nx.DiGraph()
else:
directed = False
if multigraph:
G = nx.MultiGraph()
else:
G = nx.Graph()
G.graph.update(B.graph)
G.add_nodes_from((n, B.nodes[n]) for n in nodes)
for u in nodes:
nbrs2 = {v for nbr in B[u] for v in B[nbr] if v != u}
if multigraph:
for n in nbrs2:
if directed:
links = set(B[u]) & set(B.pred[n])
else:
links = set(B[u]) & set(B[n])
for l in links:
if not G.has_edge(u, n, l):
G.add_edge(u, n, key=l)
else:
G.add_edges_from((u, n) for n in nbrs2)
return G
@not_implemented_for("multigraph")
def weighted_projected_graph(B, nodes, ratio=False):
r"""Returns a weighted projection of B onto one of its node sets.
The weighted projected graph is the projection of the bipartite
network B onto the specified nodes with weights representing the
number of shared neighbors or the ratio between actual shared
neighbors and possible shared neighbors if ``ratio is True`` [1]_.
The nodes retain their attributes and are connected in the resulting
graph if they have an edge to a common node in the original graph.
Parameters
----------
B : NetworkX graph
The input graph should be bipartite.
nodes : list or iterable
Nodes to project onto (the "bottom" nodes).
ratio: Bool (default=False)
If True, edge weight is the ratio between actual shared neighbors
and maximum possible shared neighbors (i.e., the size of the other
node set). If False, edges weight is the number of shared neighbors.
Returns
-------
Graph : NetworkX graph
A graph that is the projection onto the given nodes.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> B = nx.path_graph(4)
>>> G = bipartite.weighted_projected_graph(B, [1, 3])
>>> list(G)
[1, 3]
>>> list(G.edges(data=True))
[(1, 3, {'weight': 1})]
>>> G = bipartite.weighted_projected_graph(B, [1, 3], ratio=True)
>>> list(G.edges(data=True))
[(1, 3, {'weight': 0.5})]
Notes
-----
No attempt is made to verify that the input graph B is bipartite.
The graph and node properties are (shallow) copied to the projected graph.
See :mod:`bipartite documentation <networkx.algorithms.bipartite>`
for further details on how bipartite graphs are handled in NetworkX.
See Also
--------
is_bipartite,
is_bipartite_node_set,
sets,
collaboration_weighted_projected_graph,
overlap_weighted_projected_graph,
generic_weighted_projected_graph
projected_graph
References
----------
.. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation
Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook
of Social Network Analysis. Sage Publications.
"""
if B.is_directed():
pred = B.pred
G = nx.DiGraph()
else:
pred = B.adj
G = nx.Graph()
G.graph.update(B.graph)
G.add_nodes_from((n, B.nodes[n]) for n in nodes)
n_top = float(len(B) - len(nodes))
for u in nodes:
unbrs = set(B[u])
nbrs2 = {n for nbr in unbrs for n in B[nbr]} - {u}
for v in nbrs2:
vnbrs = set(pred[v])
common = unbrs & vnbrs
if not ratio:
weight = len(common)
else:
weight = len(common) / n_top
G.add_edge(u, v, weight=weight)
return G
@not_implemented_for("multigraph")
def collaboration_weighted_projected_graph(B, nodes):
r"""Newman's weighted projection of B onto one of its node sets.
The collaboration weighted projection is the projection of the
bipartite network B onto the specified nodes with weights assigned
using Newman's collaboration model [1]_:
.. math::
w_{u, v} = \sum_k \frac{\delta_{u}^{k} \delta_{v}^{k}}{d_k - 1}
where `u` and `v` are nodes from the bottom bipartite node set,
and `k` is a node of the top node set.
The value `d_k` is the degree of node `k` in the bipartite
network and `\delta_{u}^{k}` is 1 if node `u` is
linked to node `k` in the original bipartite graph or 0 otherwise.
The nodes retain their attributes and are connected in the resulting
graph if have an edge to a common node in the original bipartite
graph.
Parameters
----------
B : NetworkX graph
The input graph should be bipartite.
nodes : list or iterable
Nodes to project onto (the "bottom" nodes).
Returns
-------
Graph : NetworkX graph
A graph that is the projection onto the given nodes.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> B = nx.path_graph(5)
>>> B.add_edge(1, 5)
>>> G = bipartite.collaboration_weighted_projected_graph(B, [0, 2, 4, 5])
>>> list(G)
[0, 2, 4, 5]
>>> for edge in sorted(G.edges(data=True)):
... print(edge)
...
(0, 2, {'weight': 0.5})
(0, 5, {'weight': 0.5})
(2, 4, {'weight': 1.0})
(2, 5, {'weight': 0.5})
Notes
-----
No attempt is made to verify that the input graph B is bipartite.
The graph and node properties are (shallow) copied to the projected graph.
See :mod:`bipartite documentation <networkx.algorithms.bipartite>`
for further details on how bipartite graphs are handled in NetworkX.
See Also
--------
is_bipartite,
is_bipartite_node_set,
sets,
weighted_projected_graph,
overlap_weighted_projected_graph,
generic_weighted_projected_graph,
projected_graph
References
----------
.. [1] Scientific collaboration networks: II.
Shortest paths, weighted networks, and centrality,
M. E. J. Newman, Phys. Rev. E 64, 016132 (2001).
"""
if B.is_directed():
pred = B.pred
G = nx.DiGraph()
else:
pred = B.adj
G = nx.Graph()
G.graph.update(B.graph)
G.add_nodes_from((n, B.nodes[n]) for n in nodes)
for u in nodes:
unbrs = set(B[u])
nbrs2 = {n for nbr in unbrs for n in B[nbr] if n != u}
for v in nbrs2:
vnbrs = set(pred[v])
common_degree = (len(B[n]) for n in unbrs & vnbrs)
weight = sum(1.0 / (deg - 1) for deg in common_degree if deg > 1)
G.add_edge(u, v, weight=weight)
return G
@not_implemented_for("multigraph")
def overlap_weighted_projected_graph(B, nodes, jaccard=True):
r"""Overlap weighted projection of B onto one of its node sets.
The overlap weighted projection is the projection of the bipartite
network B onto the specified nodes with weights representing
the Jaccard index between the neighborhoods of the two nodes in the
original bipartite network [1]_:
.. math::
w_{v, u} = \frac{|N(u) \cap N(v)|}{|N(u) \cup N(v)|}
or if the parameter 'jaccard' is False, the fraction of common
neighbors by minimum of both nodes degree in the original
bipartite graph [1]_:
.. math::
w_{v, u} = \frac{|N(u) \cap N(v)|}{min(|N(u)|, |N(v)|)}
The nodes retain their attributes and are connected in the resulting
graph if have an edge to a common node in the original bipartite graph.
Parameters
----------
B : NetworkX graph
The input graph should be bipartite.
nodes : list or iterable
Nodes to project onto (the "bottom" nodes).
jaccard: Bool (default=True)
Returns
-------
Graph : NetworkX graph
A graph that is the projection onto the given nodes.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> B = nx.path_graph(5)
>>> nodes = [0, 2, 4]
>>> G = bipartite.overlap_weighted_projected_graph(B, nodes)
>>> list(G)
[0, 2, 4]
>>> list(G.edges(data=True))
[(0, 2, {'weight': 0.5}), (2, 4, {'weight': 0.5})]
>>> G = bipartite.overlap_weighted_projected_graph(B, nodes, jaccard=False)
>>> list(G.edges(data=True))
[(0, 2, {'weight': 1.0}), (2, 4, {'weight': 1.0})]
Notes
-----
No attempt is made to verify that the input graph B is bipartite.
The graph and node properties are (shallow) copied to the projected graph.
See :mod:`bipartite documentation <networkx.algorithms.bipartite>`
for further details on how bipartite graphs are handled in NetworkX.
See Also
--------
is_bipartite,
is_bipartite_node_set,
sets,
weighted_projected_graph,
collaboration_weighted_projected_graph,
generic_weighted_projected_graph,
projected_graph
References
----------
.. [1] Borgatti, S.P. and Halgin, D. In press. Analyzing Affiliation
Networks. In Carrington, P. and Scott, J. (eds) The Sage Handbook
of Social Network Analysis. Sage Publications.
"""
if B.is_directed():
pred = B.pred
G = nx.DiGraph()
else:
pred = B.adj
G = nx.Graph()
G.graph.update(B.graph)
G.add_nodes_from((n, B.nodes[n]) for n in nodes)
for u in nodes:
unbrs = set(B[u])
nbrs2 = {n for nbr in unbrs for n in B[nbr]} - {u}
for v in nbrs2:
vnbrs = set(pred[v])
if jaccard:
wt = float(len(unbrs & vnbrs)) / len(unbrs | vnbrs)
else:
wt = float(len(unbrs & vnbrs)) / min(len(unbrs), len(vnbrs))
G.add_edge(u, v, weight=wt)
return G
@not_implemented_for("multigraph")
def generic_weighted_projected_graph(B, nodes, weight_function=None):
r"""Weighted projection of B with a user-specified weight function.
The bipartite network B is projected on to the specified nodes
with weights computed by a user-specified function. This function
must accept as a parameter the neighborhood sets of two nodes and
return an integer or a float.
The nodes retain their attributes and are connected in the resulting graph
if they have an edge to a common node in the original graph.
Parameters
----------
B : NetworkX graph
The input graph should be bipartite.
nodes : list or iterable
Nodes to project onto (the "bottom" nodes).
weight_function : function
This function must accept as parameters the same input graph
that this function, and two nodes; and return an integer or a float.
The default function computes the number of shared neighbors.
Returns
-------
Graph : NetworkX graph
A graph that is the projection onto the given nodes.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> # Define some custom weight functions
>>> def jaccard(G, u, v):
... unbrs = set(G[u])
... vnbrs = set(G[v])
... return float(len(unbrs & vnbrs)) / len(unbrs | vnbrs)
...
>>> def my_weight(G, u, v, weight="weight"):
... w = 0
... for nbr in set(G[u]) & set(G[v]):
... w += G[u][nbr].get(weight, 1) + G[v][nbr].get(weight, 1)
... return w
...
>>> # A complete bipartite graph with 4 nodes and 4 edges
>>> B = nx.complete_bipartite_graph(2, 2)
>>> # Add some arbitrary weight to the edges
>>> for i, (u, v) in enumerate(B.edges()):
... B.edges[u, v]["weight"] = i + 1
...
>>> for edge in B.edges(data=True):
... print(edge)
...
(0, 2, {'weight': 1})
(0, 3, {'weight': 2})
(1, 2, {'weight': 3})
(1, 3, {'weight': 4})
>>> # By default, the weight is the number of shared neighbors
>>> G = bipartite.generic_weighted_projected_graph(B, [0, 1])
>>> print(list(G.edges(data=True)))
[(0, 1, {'weight': 2})]
>>> # To specify a custom weight function use the weight_function parameter
>>> G = bipartite.generic_weighted_projected_graph(
... B, [0, 1], weight_function=jaccard
... )
>>> print(list(G.edges(data=True)))
[(0, 1, {'weight': 1.0})]
>>> G = bipartite.generic_weighted_projected_graph(
... B, [0, 1], weight_function=my_weight
... )
>>> print(list(G.edges(data=True)))
[(0, 1, {'weight': 10})]
Notes
-----
No attempt is made to verify that the input graph B is bipartite.
The graph and node properties are (shallow) copied to the projected graph.
See :mod:`bipartite documentation <networkx.algorithms.bipartite>`
for further details on how bipartite graphs are handled in NetworkX.
See Also
--------
is_bipartite,
is_bipartite_node_set,
sets,
weighted_projected_graph,
collaboration_weighted_projected_graph,
overlap_weighted_projected_graph,
projected_graph
"""
if B.is_directed():
pred = B.pred
G = nx.DiGraph()
else:
pred = B.adj
G = nx.Graph()
if weight_function is None:
def weight_function(G, u, v):
# Notice that we use set(pred[v]) for handling the directed case.
return len(set(G[u]) & set(pred[v]))
G.graph.update(B.graph)
G.add_nodes_from((n, B.nodes[n]) for n in nodes)
for u in nodes:
nbrs2 = {n for nbr in set(B[u]) for n in B[nbr]} - {u}
for v in nbrs2:
weight = weight_function(B, u, v)
G.add_edge(u, v, weight=weight)
return G
def project(B, nodes, create_using=None):
return projected_graph(B, nodes)
| [
"[email protected]"
] | |
89d623c28a996e84828f0a45a67a973512b06bb1 | bc963c3c109c2d39c42f305ae555dc32625b2ba3 | /exp/030.py | d28143b48203c034c57b7b6c2bb676e65f627d7d | [] | no_license | osuossu8/BirdCLEF2021 | 0d03d68f0fdddd2859e8a323df99e56ec47000fd | 99a4f2121355f8bb2c6db330dad90a2fd7b9aaff | refs/heads/main | 2023-05-25T05:50:29.076941 | 2021-06-01T11:31:12 | 2021-06-01T11:31:12 | 359,748,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,075 | py | import ast
import gc
import os
import math
import random
import time
import warnings
import sys
sys.path.append("/root/workspace/BirdCLEF2021")
import albumentations as A
import cv2
import librosa
import numpy as np
import pandas as pd
import soundfile as sf
import colorednoise as cn
import timm
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as torchdata
import torchvision.models as models
from pathlib import Path
from typing import List
from albumentations.pytorch import ToTensorV2
from albumentations.core.transforms_interface import ImageOnlyTransform
from catalyst.core import Callback, CallbackOrder, IRunner
from catalyst.dl import Runner, SupervisedRunner
from sklearn import model_selection
from sklearn import metrics
from timm.models.layers import SelectAdaptivePool2d
from torch.optim.optimizer import Optimizer
from torchlibrosa.stft import LogmelFilterBank, Spectrogram
from torchlibrosa.augmentation import SpecAugmentation
from tqdm import tqdm
import albumentations as A
import audiomentations as AD
from apex import amp
class CFG:
EXP_ID = '030'
######################
# Globals #
######################
seed = 6718
epochs = 55
# cutmix_and_mixup_epochs = 75
train = True
folds = [0]
img_size = 224
main_metric = "epoch_f1_at_03"
minimize_metric = False
######################
# Data #
######################
train_datadir = Path("inputs/train_short_audio")
train_csv = "inputs/train_metadata.csv"
train_soundscape = "inputs/train_soundscape_labels.csv"
######################
# Dataset #
######################
transforms = {
"train": [{"name": "Normalize"}],
"valid": [{"name": "Normalize"}]
}
period = 5
n_mels = 128
fmin = 20
fmax = 16000
n_fft = 2048
hop_length = 512
sample_rate = 32000
melspectrogram_parameters = {
"n_mels": 224,
"fmin": 20,
"fmax": 16000
}
target_columns = [
'acafly', 'acowoo', 'aldfly', 'ameavo', 'amecro',
'amegfi', 'amekes', 'amepip', 'amered', 'amerob',
'amewig', 'amtspa', 'andsol1', 'annhum', 'astfly',
'azaspi1', 'babwar', 'baleag', 'balori', 'banana',
'banswa', 'banwre1', 'barant1', 'barswa', 'batpig1',
'bawswa1', 'bawwar', 'baywre1', 'bbwduc', 'bcnher',
'belkin1', 'belvir', 'bewwre', 'bkbmag1', 'bkbplo',
'bkbwar', 'bkcchi', 'bkhgro', 'bkmtou1', 'bknsti', 'blbgra1',
'blbthr1', 'blcjay1', 'blctan1', 'blhpar1', 'blkpho',
'blsspa1', 'blugrb1', 'blujay', 'bncfly', 'bnhcow', 'bobfly1',
'bongul', 'botgra', 'brbmot1', 'brbsol1', 'brcvir1', 'brebla',
'brncre', 'brnjay', 'brnthr', 'brratt1', 'brwhaw', 'brwpar1',
'btbwar', 'btnwar', 'btywar', 'bucmot2', 'buggna', 'bugtan',
'buhvir', 'bulori', 'burwar1', 'bushti', 'butsal1', 'buwtea',
'cacgoo1', 'cacwre', 'calqua', 'caltow', 'cangoo', 'canwar',
'carchi', 'carwre', 'casfin', 'caskin', 'caster1', 'casvir',
'categr', 'ccbfin', 'cedwax', 'chbant1', 'chbchi', 'chbwre1',
'chcant2', 'chispa', 'chswar', 'cinfly2', 'clanut', 'clcrob',
'cliswa', 'cobtan1', 'cocwoo1', 'cogdov', 'colcha1', 'coltro1',
'comgol', 'comgra', 'comloo', 'commer', 'compau', 'compot1',
'comrav', 'comyel', 'coohaw', 'cotfly1', 'cowscj1', 'cregua1',
'creoro1', 'crfpar', 'cubthr', 'daejun', 'dowwoo', 'ducfly', 'dusfly',
'easblu', 'easkin', 'easmea', 'easpho', 'eastow', 'eawpew', 'eletro',
'eucdov', 'eursta', 'fepowl', 'fiespa', 'flrtan1', 'foxspa', 'gadwal',
'gamqua', 'gartro1', 'gbbgul', 'gbwwre1', 'gcrwar', 'gilwoo',
'gnttow', 'gnwtea', 'gocfly1', 'gockin', 'gocspa', 'goftyr1',
'gohque1', 'goowoo1', 'grasal1', 'grbani', 'grbher3', 'grcfly',
'greegr', 'grekis', 'grepew', 'grethr1', 'gretin1', 'greyel',
'grhcha1', 'grhowl', 'grnher', 'grnjay', 'grtgra', 'grycat',
'gryhaw2', 'gwfgoo', 'haiwoo', 'heptan', 'hergul', 'herthr',
'herwar', 'higmot1', 'hofwoo1', 'houfin', 'houspa', 'houwre',
'hutvir', 'incdov', 'indbun', 'kebtou1', 'killde', 'labwoo', 'larspa',
'laufal1', 'laugul', 'lazbun', 'leafly', 'leasan', 'lesgol', 'lesgre1',
'lesvio1', 'linspa', 'linwoo1', 'littin1', 'lobdow', 'lobgna5', 'logshr',
'lotduc', 'lotman1', 'lucwar', 'macwar', 'magwar', 'mallar3', 'marwre',
'mastro1', 'meapar', 'melbla1', 'monoro1', 'mouchi', 'moudov', 'mouela1',
'mouqua', 'mouwar', 'mutswa', 'naswar', 'norcar', 'norfli', 'normoc', 'norpar',
'norsho', 'norwat', 'nrwswa', 'nutwoo', 'oaktit', 'obnthr1', 'ocbfly1',
'oliwoo1', 'olsfly', 'orbeup1', 'orbspa1', 'orcpar', 'orcwar', 'orfpar',
'osprey', 'ovenbi1', 'pabspi1', 'paltan1', 'palwar', 'pasfly', 'pavpig2',
'phivir', 'pibgre', 'pilwoo', 'pinsis', 'pirfly1', 'plawre1', 'plaxen1',
'plsvir', 'plupig2', 'prowar', 'purfin', 'purgal2', 'putfru1', 'pygnut',
'rawwre1', 'rcatan1', 'rebnut', 'rebsap', 'rebwoo', 'redcro', 'reevir1',
'rehbar1', 'relpar', 'reshaw', 'rethaw', 'rewbla', 'ribgul', 'rinkin1',
'roahaw', 'robgro', 'rocpig', 'rotbec', 'royter1', 'rthhum', 'rtlhum',
'ruboro1', 'rubpep1', 'rubrob', 'rubwre1', 'ruckin', 'rucspa1', 'rucwar',
'rucwar1', 'rudpig', 'rudtur', 'rufhum', 'rugdov', 'rumfly1', 'runwre1',
'rutjac1', 'saffin', 'sancra', 'sander', 'savspa', 'saypho', 'scamac1',
'scatan', 'scbwre1', 'scptyr1', 'scrtan1', 'semplo', 'shicow', 'sibtan2',
'sinwre1', 'sltred', 'smbani', 'snogoo', 'sobtyr1', 'socfly1', 'solsan',
'sonspa', 'soulap1', 'sposan', 'spotow', 'spvear1', 'squcuc1', 'stbori',
'stejay', 'sthant1', 'sthwoo1', 'strcuc1', 'strfly1', 'strsal1', 'stvhum2',
'subfly', 'sumtan', 'swaspa', 'swathr', 'tenwar', 'thbeup1', 'thbkin',
'thswar1', 'towsol', 'treswa', 'trogna1', 'trokin', 'tromoc', 'tropar',
'tropew1', 'tuftit', 'tunswa', 'veery', 'verdin', 'vigswa', 'warvir',
'wbwwre1', 'webwoo1', 'wegspa1', 'wesant1', 'wesblu', 'weskin', 'wesmea',
'westan', 'wewpew', 'whbman1', 'whbnut', 'whcpar', 'whcsee1', 'whcspa',
'whevir', 'whfpar1', 'whimbr', 'whiwre1', 'whtdov', 'whtspa', 'whwbec1',
'whwdov', 'wilfly', 'willet1', 'wilsni1', 'wiltur', 'wlswar', 'wooduc',
'woothr', 'wrenti', 'y00475', 'yebcha', 'yebela1', 'yebfly', 'yebori1',
'yebsap', 'yebsee1', 'yefgra1', 'yegvir', 'yehbla', 'yehcar1', 'yelgro',
'yelwar', 'yeofly1', 'yerwar', 'yeteup1', 'yetvir'] \
+ ['nocall']
######################
# Loaders #
######################
loader_params = {
"train": {
"batch_size": 64,
"num_workers": 0,
"shuffle": True
},
"valid": {
"batch_size": 128,
"num_workers": 0,
"shuffle": False
}
}
######################
# Split #
######################
split = "StratifiedKFold"
split_params = {
"n_splits": 5,
"shuffle": True,
"random_state": 6718
}
######################
# Model #
######################
base_model_name = "tf_efficientnet_b0_ns"
pooling = "max"
pretrained = True
num_classes = 398
in_channels = 1
N_FOLDS = 5
LR = 1e-3
apex = True
T_max=10
min_lr=1e-6
def set_seed(seed=42):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def get_device() -> torch.device:
return torch.device("cuda" if torch.cuda.is_available() else "cpu")
def init_logger(log_file='train.log'):
from logging import getLogger, INFO, FileHandler, Formatter, StreamHandler
logger = getLogger(__name__)
logger.setLevel(INFO)
handler1 = StreamHandler()
handler1.setFormatter(Formatter("%(message)s"))
handler2 = FileHandler(filename=log_file)
handler2.setFormatter(Formatter("%(message)s"))
logger.addHandler(handler1)
logger.addHandler(handler2)
return logger
class WaveformDataset(torchdata.Dataset):
def __init__(self,
df: pd.DataFrame,
mode='train'):
self.df = df
self.mode = mode
def __len__(self):
return len(self.df)
def __getitem__(self, idx: int):
sample = self.df.loc[idx, :]
wav_path = sample["filepath"]
start_sec = sample["start_seconds"]
end_sec = sample["end_seconds"]
len_label = sample["len_label"]
labels = sample["primary_label"]
secondary_labels = sample["secondary_labels"]
y, sr = sf.read(wav_path)
len_y = len(y)
effective_length = sr * 5
if len_y < effective_length:
new_y = np.zeros(effective_length, dtype=y.dtype)
if self.mode == 'train':
start = np.random.randint(effective_length - len_y)
else:
start = 0
new_y[start:start + len_y] = y
y = new_y.astype(np.float32)
elif len_y > effective_length:
if self.mode == 'train':
start = np.random.randint(len_y - effective_length)
else:
start = 0
y = y[start:start + effective_length].astype(np.float32)
else:
y = y.astype(np.float32)
y = np.nan_to_num(y)
y = audio_augmenter(y)
y = np.nan_to_num(y)
all_targets = np.zeros(len(CFG.target_columns), dtype=float)
targets = np.zeros(len(CFG.target_columns), dtype=float)
for ebird_code in labels.split():
targets[CFG.target_columns.index(ebird_code)] = 1.0
all_targets[CFG.target_columns.index(ebird_code)] = 1.0
secondary_targets = np.zeros(len(CFG.target_columns), dtype=float)
if secondary_labels is not None:
for ebird_code in secondary_labels.split():
if ebird_code == 'rocpig1':
ebird_code = 'rocpig'
secondary_targets[CFG.target_columns.index(ebird_code)] = 1.0
all_targets[CFG.target_columns.index(ebird_code)] = 1.0
return {
"image": y.reshape(1, -1),
"all_targets": all_targets,
"primary_targets": targets,
"secondary_targets": secondary_targets
}
def get_transforms(phase: str):
transforms = CFG.transforms
if transforms is None:
return None
else:
if transforms[phase] is None:
return None
trns_list = []
for trns_conf in transforms[phase]:
trns_name = trns_conf["name"]
trns_params = {} if trns_conf.get("params") is None else \
trns_conf["params"]
if globals().get(trns_name) is not None:
trns_cls = globals()[trns_name]
trns_list.append(trns_cls(**trns_params))
if len(trns_list) > 0:
return Compose(trns_list)
else:
return None
class Normalize:
def __call__(self, y: np.ndarray):
max_vol = np.abs(y).max()
y_vol = y * 1 / max_vol
return np.asfortranarray(y_vol)
# Mostly taken from https://www.kaggle.com/hidehisaarai1213/rfcx-audio-data-augmentation-japanese-english
class AudioTransform:
def __init__(self, always_apply=False, p=0.5):
self.always_apply = always_apply
self.p = p
def __call__(self, y: np.ndarray):
if self.always_apply:
return self.apply(y)
else:
if np.random.rand() < self.p:
return self.apply(y)
else:
return y
def apply(self, y: np.ndarray):
raise NotImplementedError
class Compose:
def __init__(self, transforms: list):
self.transforms = transforms
def __call__(self, y: np.ndarray):
for trns in self.transforms:
y = trns(y)
return y
class OneOf:
def __init__(self, transforms: list):
self.transforms = transforms
def __call__(self, y: np.ndarray):
n_trns = len(self.transforms)
trns_idx = np.random.choice(n_trns)
trns = self.transforms[trns_idx]
return trns(y)
class GaussianNoiseSNR(AudioTransform):
def __init__(self, always_apply=False, p=0.5, min_snr=5.0, max_snr=20.0, **kwargs):
super().__init__(always_apply, p)
self.min_snr = min_snr
self.max_snr = max_snr
def apply(self, y: np.ndarray, **params):
snr = np.random.uniform(self.min_snr, self.max_snr)
a_signal = np.sqrt(y ** 2).max()
a_noise = a_signal / (10 ** (snr / 20))
white_noise = np.random.randn(len(y))
a_white = np.sqrt(white_noise ** 2).max()
augmented = (y + white_noise * 1 / a_white * a_noise).astype(y.dtype)
return augmented
class PinkNoiseSNR(AudioTransform):
def __init__(self, always_apply=False, p=0.5, min_snr=5.0, max_snr=20.0, **kwargs):
super().__init__(always_apply, p)
self.min_snr = min_snr
self.max_snr = max_snr
def apply(self, y: np.ndarray, **params):
snr = np.random.uniform(self.min_snr, self.max_snr)
a_signal = np.sqrt(y ** 2).max()
a_noise = a_signal / (10 ** (snr / 20))
pink_noise = cn.powerlaw_psd_gaussian(1, len(y))
a_pink = np.sqrt(pink_noise ** 2).max()
augmented = (y + pink_noise * 1 / a_pink * a_noise).astype(y.dtype)
return augmented
class TimeShift(AudioTransform):
def __init__(self, always_apply=False, p=0.5, max_shift_second=2, sr=32000, padding_mode="zero"):
super().__init__(always_apply, p)
assert padding_mode in [
"replace", "zero"], "`padding_mode` must be either 'replace' or 'zero'"
self.max_shift_second = max_shift_second
self.sr = sr
self.padding_mode = padding_mode
def apply(self, y: np.ndarray, **params):
shift = np.random.randint(-self.sr * self.max_shift_second,
self.sr * self.max_shift_second)
augmented = np.roll(y, shift)
return augmented
class VolumeControl(AudioTransform):
def __init__(self, always_apply=False, p=0.5, db_limit=10, mode="uniform"):
super().__init__(always_apply, p)
assert mode in ["uniform", "fade", "fade", "cosine", "sine"], \
"`mode` must be one of 'uniform', 'fade', 'cosine', 'sine'"
self.db_limit = db_limit
self.mode = mode
def apply(self, y: np.ndarray, **params):
db = np.random.uniform(-self.db_limit, self.db_limit)
if self.mode == "uniform":
db_translated = 10 ** (db / 20)
elif self.mode == "fade":
lin = np.arange(len(y))[::-1] / (len(y) - 1)
db_translated = 10 ** (db * lin / 20)
elif self.mode == "cosine":
cosine = np.cos(np.arange(len(y)) / len(y) * np.pi * 2)
db_translated = 10 ** (db * cosine / 20)
else:
sine = np.sin(np.arange(len(y)) / len(y) * np.pi * 2)
db_translated = 10 ** (db * sine / 20)
augmented = y * db_translated
return augmented
def init_layer(layer):
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, "bias"):
if layer.bias is not None:
layer.bias.data.fill_(0.)
def init_bn(bn):
bn.bias.data.fill_(0.)
bn.weight.data.fill_(1.0)
def init_weights(model):
classname = model.__class__.__name__
if classname.find("Conv2d") != -1:
nn.init.xavier_uniform_(model.weight, gain=np.sqrt(2))
model.bias.data.fill_(0)
elif classname.find("BatchNorm") != -1:
model.weight.data.normal_(1.0, 0.02)
model.bias.data.fill_(0)
elif classname.find("GRU") != -1:
for weight in model.parameters():
if len(weight.size()) > 1:
nn.init.orghogonal_(weight.data)
elif classname.find("Linear") != -1:
model.weight.data.normal_(0, 0.01)
model.bias.data.zero_()
def interpolate(x: torch.Tensor, ratio: int):
"""Interpolate data in time domain. This is used to compensate the
resolution reduction in downsampling of a CNN.
Args:
x: (batch_size, time_steps, classes_num)
ratio: int, ratio to interpolate
Returns:
upsampled: (batch_size, time_steps * ratio, classes_num)
"""
(batch_size, time_steps, classes_num) = x.shape
upsampled = x[:, :, None, :].repeat(1, 1, ratio, 1)
upsampled = upsampled.reshape(batch_size, time_steps * ratio, classes_num)
return upsampled
def pad_framewise_output(framewise_output: torch.Tensor, frames_num: int):
"""Pad framewise_output to the same length as input frames. The pad value
is the same as the value of the last frame.
Args:
framewise_output: (batch_size, frames_num, classes_num)
frames_num: int, number of frames to pad
Outputs:
output: (batch_size, frames_num, classes_num)
"""
output = F.interpolate(
framewise_output.unsqueeze(1),
size=(frames_num, framewise_output.size(2)),
align_corners=True,
mode="bilinear").squeeze(1)
return output
class AttBlockV2(nn.Module):
def __init__(self,
in_features: int,
out_features: int,
activation="linear"):
super().__init__()
self.activation = activation
self.att = nn.Conv1d(
in_channels=in_features,
out_channels=out_features,
kernel_size=1,
stride=1,
padding=0,
bias=True)
self.cla = nn.Conv1d(
in_channels=in_features,
out_channels=out_features,
kernel_size=1,
stride=1,
padding=0,
bias=True)
self.init_weights()
def init_weights(self):
init_layer(self.att)
init_layer(self.cla)
def forward(self, x):
# x: (n_samples, n_in, n_time)
norm_att = torch.softmax(torch.tanh(self.att(x)), dim=-1)
cla = self.nonlinear_transform(self.cla(x))
x = torch.sum(norm_att * cla, dim=2)
return x, norm_att, cla
def nonlinear_transform(self, x):
if self.activation == 'linear':
return x
elif self.activation == 'sigmoid':
return torch.sigmoid(x)
class PANNsDense121Att(nn.Module):
def __init__(self, sample_rate: int, window_size: int, hop_size: int,
mel_bins: int, fmin: int, fmax: int, classes_num: int, apply_aug: bool, top_db=None):
super().__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
self.interpolate_ratio = 32 # Downsampled ratio
self.apply_aug = apply_aug
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(
n_fft=window_size,
hop_length=hop_size,
win_length=window_size,
window=window,
center=center,
pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(
sr=sample_rate,
n_fft=window_size,
n_mels=mel_bins,
fmin=fmin,
fmax=fmax,
ref=ref,
amin=amin,
top_db=top_db,
freeze_parameters=True)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(
time_drop_width=64,
time_stripes_num=2,
freq_drop_width=8,
freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(mel_bins)
self.fc1 = nn.Linear(1024, 1024, bias=True)
self.att_block = AttBlockV2(1024, classes_num, activation='sigmoid')
self.densenet_features = models.densenet121(pretrained=True).features
self.init_weight()
def init_weight(self):
init_bn(self.bn0)
init_layer(self.fc1)
def cnn_feature_extractor(self, x):
x = self.densenet_features(x)
return x
def preprocess(self, input_x, mixup_lambda=None):
x = self.spectrogram_extractor(input_x) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
frames_num = x.shape[2]
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
if self.training and self.apply_aug:
x = self.spec_augmenter(x)
# Mixup on spectrogram
if self.training and self.apply_aug and mixup_lambda is not None:
x = do_mixup(x, mixup_lambda)
return x, frames_num
def forward(self, input_data):
# input_x, mixup_lambda = input_data
input_x = input_data
mixup_lambda = None
"""
Input: (batch_size, data_length)"""
b, c, s = input_x.shape
input_x = input_x.reshape(b*c, s)
x, frames_num = self.preprocess(input_x, mixup_lambda=mixup_lambda)
if mixup_lambda is not None:
b = (b*c)//2
c = 1
# Output shape (batch size, channels, time, frequency)
x = x.expand(x.shape[0], 3, x.shape[2], x.shape[3])
x = self.cnn_feature_extractor(x)
# Aggregate in frequency axis
x = torch.mean(x, dim=3)
x1 = F.max_pool1d(x, kernel_size=3, stride=1, padding=1)
x2 = F.avg_pool1d(x, kernel_size=3, stride=1, padding=1)
x = x1 + x2
x = F.dropout(x, p=0.5, training=self.training)
x = x.transpose(1, 2)
x = F.relu_(self.fc1(x))
x = x.transpose(1, 2)
x = F.dropout(x, p=0.5, training=self.training)
(clipwise_output, norm_att, segmentwise_output) = self.att_block(x)
segmentwise_output = segmentwise_output.transpose(1, 2)
# Get framewise output
framewise_output = interpolate(segmentwise_output,
self.interpolate_ratio)
framewise_output = pad_framewise_output(framewise_output, frames_num)
frame_shape = framewise_output.shape
clip_shape = clipwise_output.shape
output_dict = {
'framewise_output': framewise_output.reshape(b, c, frame_shape[1],frame_shape[2]),
'clipwise_output': clipwise_output.reshape(b, c, clip_shape[1]),
}
return output_dict
EPSILON_FP16 = 1e-5
class SedScaledPosNegFocalLoss(nn.Module):
def __init__(self, gamma=0.0, alpha_1=1.0, alpha_0=1.0, secondary_factor=1.0):
super().__init__()
self.loss_fn = nn.BCELoss(reduction='none')
self.secondary_factor = secondary_factor
self.gamma = gamma
self.alpha_1 = alpha_1
self.alpha_0 = alpha_0
self.loss_keys = ["bce_loss", "F_loss", "FScaled_loss", "F_loss_0", "F_loss_1"]
def forward(self, y_pred, y_target):
y_true = y_target[0].float() # y_target["all_targets"]
y_sec_true = y_target[1].float() # y_target["secondary_targets"]
# bs, s, o = y_true.shape
# Sigmoid has already been applied in the model
y_pred = torch.clamp(y_pred, min=EPSILON_FP16, max=1.0-EPSILON_FP16)
y_pred = y_pred.float()
# y_pred = y_pred.reshape(bs*s,o)
# y_true = y_true.reshape(bs*s,o)
# y_sec_true = y_sec_true.reshape(bs*s,o)
with torch.no_grad():
y_all_ones_mask = torch.ones_like(y_true, requires_grad=False)
y_all_zeros_mask = torch.zeros_like(y_true, requires_grad=False)
y_all_mask = torch.where(y_true > 0.0, y_all_ones_mask, y_all_zeros_mask)
y_ones_mask = torch.ones_like(y_sec_true, requires_grad=False)
y_zeros_mask = torch.ones_like(y_sec_true, requires_grad=False) *self.secondary_factor
y_secondary_mask = torch.where(y_sec_true > 0.0, y_zeros_mask, y_ones_mask)
bce_loss = self.loss_fn(y_pred, y_true)
pt = torch.exp(-bce_loss)
F_loss_0 = (self.alpha_0*(1-y_all_mask)) * (1-pt)**self.gamma * bce_loss
F_loss_1 = (self.alpha_1*y_all_mask) * (1-pt)**self.gamma * bce_loss
F_loss = F_loss_0 + F_loss_1
FScaled_loss = y_secondary_mask*F_loss
FScaled_loss = FScaled_loss.mean()
# return FScaled_loss, {"bce_loss": bce_loss.mean(), "F_loss_1": F_loss_1.mean(), "F_loss_0": F_loss_0.mean(), "F_loss": F_loss.mean(), "FScaled_loss": FScaled_loss }
return FScaled_loss
# ====================================================
# Training helper functions
# ====================================================
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class MetricMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.y_true = []
self.y_pred = []
def update(self, y_true, y_pred):
self.y_true.extend(y_true.cpu().detach().numpy().tolist())
# self.y_pred.extend(torch.sigmoid(y_pred).cpu().detach().numpy().tolist())
self.y_pred.extend(y_pred["clipwise_output"].max(axis=1)[0].cpu().detach().numpy().tolist())
@property
def avg(self):
self.f1_03 = metrics.f1_score(np.array(self.y_true), np.array(self.y_pred) > 0.3, average="micro")
self.f1_05 = metrics.f1_score(np.array(self.y_true), np.array(self.y_pred) > 0.5, average="micro")
return {
"f1_at_03" : self.f1_03,
"f1_at_05" : self.f1_05,
}
def loss_fn(y_pred, y_all, y_second):
loss_fct = SedScaledPosNegFocalLoss()
loss = loss_fct(y_pred["clipwise_output"], (y_all, y_second))
return loss
def train_fn(model, data_loader, device, optimizer, scheduler):
model.train()
losses = AverageMeter()
scores = MetricMeter()
tk0 = tqdm(data_loader, total=len(data_loader))
for data in tk0:
optimizer.zero_grad()
inputs = data['image'].to(device)
targets = data['all_targets'].to(device)
secondary_targets = data['secondary_targets'].to(device)
outputs = model(inputs)
loss = loss_fn(outputs, targets, secondary_targets)
if CFG.apex:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
scheduler.step()
losses.update(loss.item(), inputs.size(0))
scores.update(targets, outputs)
tk0.set_postfix(loss=losses.avg)
return scores.avg, losses.avg
def valid_fn(model, data_loader, device):
model.eval()
losses = AverageMeter()
scores = MetricMeter()
tk0 = tqdm(data_loader, total=len(data_loader))
valid_preds = []
with torch.no_grad():
for data in tk0:
inputs = data['image'].to(device)
targets = data['all_targets'].to(device)
secondary_targets = data['secondary_targets'].to(device)
outputs = model(inputs)
loss = loss_fn(outputs, targets, secondary_targets)
losses.update(loss.item(), inputs.size(0))
scores.update(targets, outputs)
tk0.set_postfix(loss=losses.avg)
return scores.avg, losses.avg
mean = (0.485, 0.456, 0.406) # RGB
std = (0.229, 0.224, 0.225) # RGB
albu_transforms = {
'train' : A.Compose([
A.HorizontalFlip(p=0.5),
A.Cutout(max_h_size=5*4, max_w_size=16*3, p=0.3),
A.Normalize(mean, std),
]),
'valid' : A.Compose([
A.Normalize(mean, std),
]),
}
audio_augmenter = Compose([
OneOf([
GaussianNoiseSNR(min_snr=10),
PinkNoiseSNR(min_snr=10)
]),
TimeShift(sr=32000),
VolumeControl(p=0.5),
Normalize()
])
model_config = {
"sample_rate": 32000,
"window_size": 1024,
"hop_size": 320,
"mel_bins": 64,
"fmin": 50,
"fmax": 14000,
"classes_num": 398,
"apply_aug": False, # True,
"top_db": None
}
OUTPUT_DIR = f'outputs/{CFG.EXP_ID}/'
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
warnings.filterwarnings("ignore")
logger = init_logger(log_file=Path("logs") / f"train_{CFG.EXP_ID}.log")
# environment
set_seed(CFG.seed)
device = get_device()
# data
# train = pd.read_csv('inputs/image_folds.csv')
# train['filepath'] = train['filepath'].map(lambda x: 'inputs/train_images/' + '/'.join(x.split('/')[4:]))
train = pd.read_csv('inputs/folds.csv')
train['filepath'] = train['filepath'].map(lambda x: 'inputs/' + '/'.join(x.split('/')[3:]))
short_audio = train.loc[:62873].copy()
long_audio = train.loc[62874:].copy()
meta = pd.read_csv('inputs/train_metadata.csv')
short_audio['secondary_labels'] = meta['secondary_labels'].copy()
short_audio['secondary_labels'] = short_audio['secondary_labels'].map(lambda x: ' '.join(ast.literal_eval(x)))
long_audio['secondary_labels'] = None
short_audio['rating'] = meta['rating'].copy()
long_audio['rating'] = 999 # -1
new_train = pd.concat([short_audio, long_audio]).reset_index(drop=True)
# main loop
for fold in range(5):
if fold not in CFG.folds:
continue
logger.info("=" * 120)
logger.info(f"Fold {fold} Training")
logger.info("=" * 120)
trn_df = new_train[new_train['kfold']!=fold].reset_index(drop=True)
val_df = new_train[new_train.kfold == fold].reset_index(drop=True)
loaders = {
phase: torchdata.DataLoader(
WaveformDataset(
df_,
mode=phase
),
**CFG.loader_params[phase]) # type: ignore
for phase, df_ in zip(["train", "valid"], [trn_df, val_df])
}
model = PANNsDense121Att(**model_config)
optimizer = torch.optim.Adam(model.parameters(), lr=CFG.LR)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=CFG.T_max, eta_min=CFG.min_lr, last_epoch=-1)
model = model.to(device)
model, optimizer = amp.initialize(model, optimizer, opt_level='O1', verbosity=0)
p = 0
min_loss = 999
best_score = -np.inf
for epoch in range(CFG.epochs):
logger.info("Starting {} epoch...".format(epoch+1))
start_time = time.time()
train_avg, train_loss = train_fn(model, loaders['train'], device, optimizer, scheduler)
valid_avg, valid_loss = valid_fn(model, loaders['valid'], device)
scheduler.step()
elapsed = time.time() - start_time
logger.info(f'Epoch {epoch+1} - avg_train_loss: {train_loss:.5f} avg_val_loss: {valid_loss:.5f} time: {elapsed:.0f}s')
logger.info(f"Epoch {epoch+1} - train_f1_at_03:{train_avg['f1_at_03']:0.5f} valid_f1_at_03:{valid_avg['f1_at_03']:0.5f}")
logger.info(f"Epoch {epoch+1} - train_f1_at_05:{train_avg['f1_at_05']:0.5f} valid_f1_at_05:{valid_avg['f1_at_05']:0.5f}")
if valid_avg['f1_at_03'] > best_score:
logger.info(f">>>>>>>> Model Improved From {best_score} ----> {valid_avg['f1_at_03']}")
logger.info(f"other scores here... {valid_avg['f1_at_03']}, {valid_avg['f1_at_05']}")
torch.save(model.state_dict(), OUTPUT_DIR+f'fold-{fold}.bin')
best_score = valid_avg['f1_at_03']
| [
"[email protected]"
] | |
96195a397e80348016e9ddf846478112f9dadba0 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-network/azure/mgmt/network/v2018_02_01/models/connectivity_parameters_py3.py | 2f6d375168c517e8f45f0201c2a3c695caf2c4b8 | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 2,034 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ConnectivityParameters(Model):
"""Parameters that determine how the connectivity check will be performed.
All required parameters must be populated in order to send to Azure.
:param source: Required.
:type source: ~azure.mgmt.network.v2018_02_01.models.ConnectivitySource
:param destination: Required.
:type destination:
~azure.mgmt.network.v2018_02_01.models.ConnectivityDestination
:param protocol: Network protocol. Possible values include: 'Tcp', 'Http',
'Https', 'Icmp'
:type protocol: str or ~azure.mgmt.network.v2018_02_01.models.Protocol
:param protocol_configuration:
:type protocol_configuration:
~azure.mgmt.network.v2018_02_01.models.ProtocolConfiguration
"""
_validation = {
'source': {'required': True},
'destination': {'required': True},
}
_attribute_map = {
'source': {'key': 'source', 'type': 'ConnectivitySource'},
'destination': {'key': 'destination', 'type': 'ConnectivityDestination'},
'protocol': {'key': 'protocol', 'type': 'str'},
'protocol_configuration': {'key': 'protocolConfiguration', 'type': 'ProtocolConfiguration'},
}
def __init__(self, *, source, destination, protocol=None, protocol_configuration=None, **kwargs) -> None:
super(ConnectivityParameters, self).__init__(**kwargs)
self.source = source
self.destination = destination
self.protocol = protocol
self.protocol_configuration = protocol_configuration
| [
"[email protected]"
] | |
079f73405b82e6ed8e06092b520bc341fcf6c5bb | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /neural_guided_symbolic_regression/mcts/tree_test.py | 3e108c8496714cd1df5f3dfda77470f2b6451863 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 12,674 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tree."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import mock
import numpy as np
import tensorflow.compat.v1 as tf
from neural_guided_symbolic_regression.mcts import tree
class UtilitiesTest(parameterized.TestCase, tf.test.TestCase):
def test_get_max_values_indices(self):
array = [0., 0.3, 0.1, 0.3, 0.3]
np.testing.assert_array_equal(tree._get_max_values_indices(array),
[1, 3, 4])
def test_random_argmax(self):
# The maximum values has index [1, 3, 4].
array = [0., 0.3, 0.1, 0.3, 0.3]
random_state = np.random.RandomState(2)
# Make sure every time the returned index are different.
# Those indices are fixed for give random_state.
self.assertEqual(tree.random_argmax(array, random_state), 1)
self.assertEqual(tree.random_argmax(array, random_state), 3)
self.assertEqual(tree.random_argmax(array, random_state), 1)
self.assertEqual(tree.random_argmax(array, random_state), 4)
self.assertEqual(tree.random_argmax(array, random_state), 4)
@parameterized.parameters([
# All states are terminal and there is one unique maximum.
([True, True, True], [1., 3., 2.], True, 3., 1),
([True, True, True], [1., 3., 2.], False, 3., 1),
# There are non-terminal states and ignore_nonterminal is False.
# In these cases, the expected max_state is always the one with largest
# reward_value and no matter whether it is terminal.
([False, True, True], [1., 3., 2.], False, 3., 1),
([True, False, True], [1., 3., 2.], False, 3., 1),
([True, True, False], [1., 3., 2.], False, 3., 1),
([True, False, False], [1., 3., 2.], False, 3., 1),
# There are non-terminal states and ignore_nonterminal is True.
([False, True, True], [1., 3., 2.], True, 3., 1),
([True, False, True], [1., 3., 2.], True, 2., 2),
([True, True, False], [1., 3., 2.], True, 3., 1),
([True, False, False], [1., 3., 2.], True, 1., 0),
])
def test_max_reward_and_state_unique_maximum(self,
states_terminal,
reward_values,
ignore_nonterminal,
expected_max_reward_value,
expected_max_state_index):
mock_state0 = mock.MagicMock()
mock_state0.is_terminal.return_value = states_terminal[0]
mock_state1 = mock.MagicMock()
mock_state1.is_terminal.return_value = states_terminal[1]
mock_state2 = mock.MagicMock()
mock_state2.is_terminal.return_value = states_terminal[2]
mock_states_list = [mock_state0, mock_state1, mock_state2]
max_reward_value, max_state = tree.max_reward_and_state(
reward_values=reward_values,
states_list=mock_states_list,
ignore_nonterminal=ignore_nonterminal)
self.assertAlmostEqual(max_reward_value, expected_max_reward_value)
self.assertEqual(max_state, mock_states_list[expected_max_state_index])
@parameterized.parameters([
# All states are terminal and there are two state with maximum reward
# value.
([True, True, True], [1., 3., 3.], True, 3., [1, 2, 2, 1, 1, 2]),
([True, True, True], [1., 3., 3.], False, 3., [1, 2, 2, 1, 1, 2]),
# There are non-terminal states and ignore_nonterminal is False.
# The returned results will not change.
([False, True, True], [1., 3., 3.], False, 3., [1, 2, 2, 1, 1, 2]),
([True, False, True], [1., 3., 3.], False, 3., [1, 2, 2, 1, 1, 2]),
([True, True, False], [1., 3., 3.], False, 3., [1, 2, 2, 1, 1, 2]),
# There are non-terminal states and ignore_nonterminal is True.
([False, True, True], [1., 3., 3.], True, 3., [1, 2, 2, 1, 1, 2]),
([True, False, True], [1., 3., 3.], True, 3., [2, 2, 2, 2, 2, 2]),
([True, True, False], [1., 3., 3.], True, 3., [1, 1, 1, 1, 1, 1]),
])
def test_max_reward_and_state_multiple_maximum(self,
states_terminal,
reward_values,
ignore_nonterminal,
expected_max_reward_value,
expected_max_state_indices):
# In order to test the random selection, a fixed random seed is used
# the expected_max_state_indices is a sequence of index of state
# returned. This ensures that the states with maximum reward value
# are selected randomly.
random_state = np.random.RandomState(2)
mock_state0 = mock.MagicMock()
mock_state0.is_terminal.return_value = states_terminal[0]
mock_state1 = mock.MagicMock()
mock_state1.is_terminal.return_value = states_terminal[1]
mock_state2 = mock.MagicMock()
mock_state2.is_terminal.return_value = states_terminal[2]
mock_states_list = [mock_state0, mock_state1, mock_state2]
for expected_max_state_index in expected_max_state_indices:
max_reward_value, max_state = tree.max_reward_and_state(
reward_values=reward_values,
states_list=mock_states_list,
ignore_nonterminal=ignore_nonterminal,
random_state=random_state)
self.assertAlmostEqual(max_reward_value, expected_max_reward_value)
self.assertEqual(max_state, mock_states_list[expected_max_state_index])
def test_max_reward_and_state_length_not_match(self):
with self.assertRaisesRegex(
ValueError,
r'The length of reward_values \(2\) does not match the length of '
r'states_list \(1\)'):
tree.max_reward_and_state(
reward_values=[42., 9.], states_list=[mock.MagicMock()])
def test_max_reward_and_state_allowed_states_list_empty(self):
with self.assertRaisesRegex(
ValueError, 'The number of allowed states to choose is 0'):
tree.max_reward_and_state(
reward_values=[], states_list=[], ignore_nonterminal=False)
mock_state = mock.MagicMock()
mock_state.is_terminal.return_value = False
with self.assertRaisesRegex(
ValueError, 'The number of allowed states to choose is 0'):
tree.max_reward_and_state(
reward_values=[42.],
states_list=[mock_state],
ignore_nonterminal=True)
class BackPropagationTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super(BackPropagationTest, self).setUp()
# Since back propagration will not affect state, the states of each node are
# set as None for simplicity.
#
# root
# / \
# child1 child2
# / \
# grandchild1 grandchild2
root = tree.Node(None)
child1 = tree.Node(None)
root.add_child(child1)
child2 = tree.Node(None)
root.add_child(child2)
grandchild1 = tree.Node(None)
child1.add_child(grandchild1)
grandchild2 = tree.Node(None)
child1.add_child(grandchild2)
self.root = root
self.child1 = child1
self.child2 = child2
self.grandchild1 = grandchild1
self.grandchild2 = grandchild2
def test_back_propagation_add(self):
# First back propapate the reward on grandchild1.
tree.back_propagation(self.grandchild1, 1., update_method='add')
# Only nodes on lineage:
# grandchild1 -- child1 -- root will be updated.
self.assertEqual(self.grandchild1.visits, 1)
self.assertAlmostEqual(self.grandchild1.quality, 1.)
self.assertEqual(self.child1.visits, 1)
self.assertAlmostEqual(self.child1.quality, 1.)
self.assertEqual(self.root.visits, 1)
self.assertAlmostEqual(self.root.quality, 1.)
# Other nodes will not be affected.
self.assertEqual(self.grandchild2.visits, 0)
self.assertAlmostEqual(self.grandchild2.quality, 0.)
self.assertEqual(self.child2.visits, 0)
self.assertAlmostEqual(self.child2.quality, 0.)
# Then back propapate the reward on child2.
tree.back_propagation(self.child2, 9., update_method='add')
# Only nodes on lineage:
# child2 -- root will be updated.
self.assertEqual(self.child2.visits, 1)
self.assertAlmostEqual(self.child2.quality, 9.)
self.assertEqual(self.root.visits, 2)
self.assertAlmostEqual(self.root.quality, 10.)
# Other nodes will not be affected.
self.assertEqual(self.grandchild1.visits, 1)
self.assertAlmostEqual(self.grandchild1.quality, 1.)
self.assertEqual(self.grandchild2.visits, 0)
self.assertAlmostEqual(self.grandchild2.quality, 0.)
self.assertEqual(self.child1.visits, 1)
self.assertAlmostEqual(self.child1.quality, 1.)
def test_back_propagation_max(self):
# First back propapate the reward on grandchild1.
tree.back_propagation(self.grandchild1, 1., update_method='max')
# Only nodes on lineage:
# grandchild1 -- child1 -- root will be updated.
self.assertEqual(self.grandchild1.visits, 1)
self.assertAlmostEqual(self.grandchild1.quality, 1.)
self.assertEqual(self.child1.visits, 1)
self.assertAlmostEqual(self.child1.quality, 1.)
self.assertEqual(self.root.visits, 1)
self.assertAlmostEqual(self.root.quality, 1.)
# Other nodes will not be affected.
self.assertEqual(self.grandchild2.visits, 0)
self.assertAlmostEqual(self.grandchild2.quality, 0.)
self.assertEqual(self.child2.visits, 0)
self.assertAlmostEqual(self.child2.quality, 0.)
# Then back propapate the reward on child2.
tree.back_propagation(self.child2, 9., update_method='max')
# Only nodes on lineage:
# child2 -- root will be updated.
self.assertEqual(self.child2.visits, 1)
self.assertAlmostEqual(self.child2.quality, 9.)
self.assertEqual(self.root.visits, 2)
self.assertAlmostEqual(self.root.quality, 9.)
# Other nodes will not be affected.
self.assertEqual(self.grandchild1.visits, 1)
self.assertAlmostEqual(self.grandchild1.quality, 1.)
self.assertEqual(self.grandchild2.visits, 0)
self.assertAlmostEqual(self.grandchild2.quality, 0.)
self.assertEqual(self.child1.visits, 1)
self.assertAlmostEqual(self.child1.quality, 1.)
@parameterized.parameters([(np.nan, 'max'),
(np.inf, 'max'),
(-np.inf, 'max'),
(np.nan, 'add'),
(np.inf, 'add'),
(-np.inf, 'add')])
def test_back_propagation_reward_value_not_finite(
self, reward_value, update_method):
# Back propapate the reward on grandchild1.
tree.back_propagation(
self.grandchild1, reward_value, update_method=update_method)
# Nodes on lineage
# grandchild1 -- child1 -- root
# will not be affected since the back propagation step is skipped:
self.assertEqual(self.grandchild1.visits, 0)
self.assertAlmostEqual(self.grandchild1.quality, 0.)
self.assertEqual(self.child1.visits, 0)
self.assertAlmostEqual(self.child1.quality, 0.)
self.assertEqual(self.root.visits, 0)
self.assertAlmostEqual(self.root.quality, 0.)
# Other nodes will not be affected.
self.assertEqual(self.grandchild2.visits, 0)
self.assertAlmostEqual(self.grandchild2.quality, 0.)
self.assertEqual(self.child2.visits, 0)
self.assertAlmostEqual(self.child2.quality, 0.)
class ProbsRemoveNaNTest(tf.test.TestCase):
def test_probs_remove_nan_all_nan(self):
with self.assertRaisesRegexp(ValueError,
'All the elements in probs are nan.'):
tree.probs_remove_nan(np.array([np.nan, np.nan]))
def test_probs_remove_nan_no_nan(self):
np.testing.assert_allclose(
tree.probs_remove_nan(np.array([0.1, 0.1])), [0.5, 0.5])
def test_probs_remove_nan(self):
np.testing.assert_allclose(
tree.probs_remove_nan(np.array([0.1, 0.1, np.nan])), [0.5, 0.5, 0.])
if __name__ == '__main__':
tf.test.main()
| [
"[email protected]"
] | |
fb6e59194cd56c41ffbf2f949fdb863868fbed1e | 82f998aec53e7bc49eb5aad4fdb18cbe72976b89 | /transformers/configuration_albert.py | 144678774cdc1e5b1ea30145fdf9204c810d854a | [] | no_license | MatNLP/SMedBERT | 6ab8d2749a8a26005eef36dc347f779c9e6a217b | 8dd549f902ca59ad2b84bf3b951213565fde4dc0 | refs/heads/main | 2023-09-02T03:22:13.298661 | 2021-11-17T05:44:50 | 2021-11-17T05:44:50 | 372,204,217 | 75 | 13 | null | null | null | null | UTF-8 | Python | false | false | 5,303 | py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" ALBERT model configuration """
from .configuration_utils import PretrainedConfig
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
'albert-base-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-config.json",
'albert-large-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-config.json",
'albert-xlarge-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-config.json",
'albert-xxlarge-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-config.json",
'albert-base-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v2-config.json",
'albert-large-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v2-config.json",
'albert-xlarge-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v2-config.json",
'albert-xxlarge-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v2-config.json",
}
class AlbertConfig(PretrainedConfig):
"""Configuration for `AlbertModel`.
The default settings match the configuration of model `albert_xxlarge`.
"""
pretrained_config_archive_map = ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP
def __init__(self,
vocab_size_or_config_json_file=30000,
embedding_size=128,
hidden_size=4096,
num_hidden_layers=12,
num_hidden_groups=1,
num_attention_heads=64,
intermediate_size=16384,
inner_group_num=1,
hidden_act="gelu_new",
hidden_dropout_prob=0,
attention_probs_dropout_prob=0,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12, **kwargs):
"""Constructs AlbertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `AlbertModel`.
embedding_size: size of voc embeddings.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_hidden_groups: Number of group for the hidden layers, parameters in
the same group are shared.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
inner_group_num: int, number of inner repetition of attention and ffn.
down_scale_factor: float, the scale to apply
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probability for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`AlbertModel`.
initializer_range: The stdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
super(AlbertConfig, self).__init__(**kwargs)
self.vocab_size = vocab_size_or_config_json_file
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_hidden_groups = num_hidden_groups
self.num_attention_heads = num_attention_heads
self.inner_group_num = inner_group_num
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps | [
"[email protected]"
] | |
d57c34be95b4a4e63226be4b67e05cb99573eb54 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03326/s618374476.py | ab457fda2c9fc24c21e8f4fbf5a51f82f641ff89 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | import sys, re, os
from collections import deque, defaultdict, Counter
from math import ceil, sqrt, hypot, factorial, pi, sin, cos, radians
from itertools import permutations, combinations, product, accumulate
from operator import itemgetter, mul
from copy import deepcopy
from string import ascii_lowercase, ascii_uppercase, digits
from heapq import heapify, heappop, heappush
def input(): return sys.stdin.readline().strip()
def INT(): return int(input())
def MAP(): return map(int, input().split())
def S_MAP(): return map(str, input().split())
def LIST(): return list(map(int, input().split()))
def S_LIST(): return list(map(str, input().split()))
sys.setrecursionlimit(10 ** 9)
INF = float('inf')
mod = 10 ** 9 + 7
N, M = MAP()
L = [LIST() for i in range(N)]
# L = [-1, 1] #生成する数字
num = 3 #生成するビット数
bit_list = list(product([-1, 1], repeat=num))
# print(bit_list)
ans = 0
for a, b, c in bit_list:
tmp = [a * x + b * y + c * z for x, y, z in L]
tmp.sort(reverse=True)
selected = sum(tmp[0:M])
if ans < selected:
ans = selected
# ans.append(sum(tmp[0:M]))
print(ans)
| [
"[email protected]"
] | |
5397c4ec035464d3f1523de7b3931fc7aed77c1d | 031d986fedef859c56d862fad71be339eec8365c | /saf/data_utils/prepare_datasets_from_tables.py | c847a0d7361c17c1bf042161a2107ac0bb1888aa | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | pc-seawind/google-research | bcdbf8bebf9a4a60a2407356a6ef3cdafe333070 | a7f09315705b650b75be08370cc4c70edb11e475 | refs/heads/master | 2022-11-13T20:10:47.006299 | 2022-11-04T23:29:14 | 2022-11-04T23:32:35 | 245,136,474 | 0 | 0 | Apache-2.0 | 2020-03-05T10:44:20 | 2020-03-05T10:44:19 | null | UTF-8 | Python | false | false | 10,346 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Prepares training, validation and tests datasets from given tables."""
import gc
from typing import Optional
import numpy as np
import tensorflow as tf
def create_windowed_dataset(
dataset,
len_max_lookback,
forecast_horizon,
num_items = None,
num_parallel_calls=None,
):
"""Creates a dataset with lookback windows given a dataset of timesteps.
Args:
dataset: A tf.data.Dataset where each example is a tensor of shape
(num_timesteps, ...).
len_max_lookback: The length of each lookback window.
forecast_horizon: The length of the future forecast window.
num_items: The number of items in the data. If equal to the number of items
the data will cycle by item and then by time step (e.g. X1[0], X2[0], ...,
X1[1], x2[1], ...). If 0 or None the opposite will occur (e.g. X1[0],
X1[1], ..., x2[0], x2[1], ...).
num_parallel_calls: Number of threads to use for processing and interleaving
results. None is no parallelization, while tf.data.experimental.AUTOTUNE
sets it automatically based on the number of CPUs. If we have a large
number of items this should be set to None to avoid creating too many
threads.
Returns:
A tf.data.Dataset where each example is a tensor of shape (len_max_lookback
+ forecast_horizon, ...), and the dataset iterates over all lookback windows
for all examples (moving forward one step at a time within num_timesteps),
with the windows from each example interleaved. If cycle_by_item_first is
True the same time step for all items will be returned first and then then
the time step will increment. If it is false all the data for item 0 will be
returned first, followed by the data for item 2, etc.
"""
def create_windows(x):
# x is a tensor of shape (num_timesteps, ...). We create a dataset from this
# of length num_timesteps, and the window method then yields a dataset of
# sub datasets each of length len_max_lookback + forecast_horizon for all
# lookback windows. Those sub datasets are batched such that there is a
# single example of shape (len_max_lookback + forecast_horizon, ...) per sub
# dataset, and then the dataset of sub datasets is flat mapped to yield a
# single dataset with a length equal to the number of lookback windows.
len_window = len_max_lookback + forecast_horizon
dataset = tf.data.Dataset.from_tensor_slices(x)
dataset = dataset.window(len_window, shift=1)
dataset = dataset.flat_map(
lambda sub_ds: sub_ds.batch(len_window, drop_remainder=True))
return dataset
# Each example in the original dataset is mapped to a dataset of lookback
# windows. The order in which these are returned depends on the cycle length.
# If the cycle length is 1 all of the timesteps for each item will be returned
# followed by the next item. If the cycle length is equal to the number of
# items then each of the items for one time step will be returned followed by
# other timesteps.
cycle_length = num_items or 1
return dataset.interleave(
create_windows,
cycle_length=cycle_length,
num_parallel_calls=num_parallel_calls)
def return_datasets(
input_tables,
train_start,
train_end,
val_start,
val_end,
test_start,
test_end,
num_static,
forecast_horizon,
len_max_lookback,
target_index,
shuffle_train_items=False,
shuffle_after_windowing=None,
max_train_examples=None,
cycle_items_first=True,
**unused_kwargs,
):
"""Prepares the datasets for training, validation, and testing.
Args:
input_tables: A dictionary of NumPy arrays containing a `time_sequences`
array of shape (num_items, len_labeled_timesteps, num_features), and a
`static` array of shape (num_items, num_static).
train_start: The start index for the training data.
train_end: The end index for the training data.
val_start: The start index for the validation data.
val_end: The end index for the validation data.
test_start: The start index for the test data.
test_end: The end index for the test data.
num_static: The number of static features.
forecast_horizon: The number of time-steps that will be forecast.
len_max_lookback: The maximum number of time-step previous to the prediction
target_index: The index of the target feature in the input_tables.
shuffle_train_items: Whether or not to reproducibly shuffle the training
examples. This will apply to the initial dataset that is of length
num_items prior to the windowing operations.
shuffle_after_windowing: Whether or not to reproducibly shuffle the training
examples after windowing. If True the model will be presented data in a
time-randomized order.
max_train_examples: Maximum number of training examples to yield. By
default, all examples are kept.
cycle_items_first: If true the data will cycle by item and then by time step
(e.g. X1[0], X2[0], ..., X1[1], x2[1], ...). If false, the opposite will
occur (e.g. X1[0], X1[1], ..., x2[0], x2[1], ...).
unused_kwargs: Additional parameters should not be used but are included to
make calling the function with hyper-parameters easier.
Returns:
A tuple of (train tf.data.Dataset, val tf.data.Dataset, test
tf.data.Dataset). Each dataset yields a (time_series_input, static_input,
labels) tuple per example containing data for one item at one timestep,
where time_series_input is a tensor of shape (len_max_lookback,
num_features), static_input is a tensor of shape (num_static,), and labels
is a tensor of shape (forecast_horizon,). All shape values are represented
in the dataset_params dictionary.
"""
del unused_kwargs # Unused but makes passing in hyper-parameters easier.
if shuffle_after_windowing is None:
shuffle_after_windowing = shuffle_train_items
if max_train_examples is None:
max_train_examples = -1
# Data as numpy objects
time_sequences = input_tables["time_sequences"]
static = input_tables["static"]
num_items = time_sequences.shape[0]
if num_items != static.shape[0]:
raise ValueError(
"The first dimension of time_sequences and static data must match")
# Training dataset preparation
def split_tensors(x):
time_series_features = x[:-forecast_horizon, :-num_static]
static_features = x[0, -num_static:]
labels = x[-forecast_horizon:, target_index]
return (time_series_features, static_features, labels)
input_sequence_train = time_sequences[:, train_start:train_end + 1]
input_static_train = np.broadcast_to(
np.expand_dims(static, axis=1),
(static.shape[0], input_sequence_train.shape[1], static.shape[1]))
input_train = np.concatenate([input_sequence_train, input_static_train],
axis=-1)
train_dataset = tf.data.Dataset.from_tensor_slices(input_train)
if shuffle_train_items:
train_dataset = train_dataset.shuffle(
1000, seed=42, reshuffle_each_iteration=True)
windowed_dataset_num_items = num_items if cycle_items_first else 1
# TODO(nyoder): Explore different ways to structure the data and it's impact
# on performance.
train_dataset = create_windowed_dataset(
train_dataset,
len_max_lookback=len_max_lookback,
forecast_horizon=forecast_horizon,
num_items=1,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
train_dataset = train_dataset.map(split_tensors)
if shuffle_after_windowing:
train_dataset = train_dataset.shuffle(
1000, seed=42, reshuffle_each_iteration=True)
train_dataset = train_dataset.take(max_train_examples)
del input_sequence_train, input_static_train
gc.collect()
# Validation dataset preparation
# Note that val_start can be smaller than train_end.
# Indeed, choosing val_start = train_end - len_max_lookback - 1 would yield
# that the last prediction date of training is followed by the first
# prediction date of validation.
input_sequence_valid = time_sequences[:, val_start:val_end + 1]
input_static_valid = np.broadcast_to(
np.expand_dims(static, axis=1),
(static.shape[0], input_sequence_valid.shape[1], static.shape[1]))
input_valid = np.concatenate([input_sequence_valid, input_static_valid],
axis=-1)
valid_dataset = tf.data.Dataset.from_tensor_slices(input_valid)
valid_dataset = create_windowed_dataset(
valid_dataset,
len_max_lookback=len_max_lookback,
forecast_horizon=forecast_horizon,
num_items=windowed_dataset_num_items,
)
valid_dataset = valid_dataset.map(split_tensors)
del input_sequence_valid, input_static_valid
gc.collect()
# Testing dataset preparation
# Note that test_start can be smaller than val_end.
# Indeed, choosing test_start = val_end - len_max_lookback - 1 would yield
# that the last prediction date of validation is followed by the first
# prediction date of test.
input_sequence_test = time_sequences[:, test_start:test_end + 1]
input_static_test = np.broadcast_to(
np.expand_dims(static, axis=1),
(static.shape[0], input_sequence_test.shape[1], static.shape[1]))
input_test = np.concatenate([input_sequence_test, input_static_test], axis=-1)
test_dataset = tf.data.Dataset.from_tensor_slices(input_test)
test_dataset = create_windowed_dataset(
test_dataset,
len_max_lookback=len_max_lookback,
forecast_horizon=forecast_horizon,
num_items=windowed_dataset_num_items,
)
test_dataset = test_dataset.map(split_tensors)
del input_sequence_test, input_static_test
gc.collect()
return train_dataset, valid_dataset, test_dataset
| [
"[email protected]"
] | |
5993102db04b63b021c0792c45e33184b33f0e7e | dc3d310934705034ab2f5bc4d3a96f07dab9b48b | /venv/Scripts/pip3.8-script.py | d6438c0257f98b46c7098f54e08d33868c8e9a97 | [] | no_license | createnewdemo/istudy_test | 82197488d9e9fa05e0c6cc91362645fc4555dc1d | 806693f2bee13e3c28571d0d75f6b6ea70acf7a0 | refs/heads/master | 2022-04-19T05:52:53.780973 | 2020-04-17T17:04:10 | 2020-04-17T17:04:10 | 256,507,355 | 0 | 1 | null | null | null | null | WINDOWS-1250 | Python | false | false | 400 | py | #!F:\pycharmÁ·Ď°\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.8'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.8')()
)
| [
"[email protected]"
] | |
10cabdec7e10d144746752e0c1d59045fc66cc76 | 9d1c769fb97c9287fc86cf582ac84bbf9cfdeec8 | /PythonFunctionalProgramming(Advanced)/7.Lambda Expression.py | 8b180d4009858955fcd193726d37957f15f09c82 | [] | no_license | rohan9769/Python-Coding-and-Practice | a0bb1b560e995b2f484b6e6a9cc42e4bac9e84cc | 27da1d4c3d0a1067fb8ce7f937d469bc4a2d2189 | refs/heads/master | 2021-02-10T09:15:17.999508 | 2020-03-22T13:12:44 | 2020-03-22T13:12:44 | 244,368,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | #Lambda Expression - one time anonymous functions
# lambda parameter : action to take on the pararmeter
from functools import reduce
my_list = [1,2,3]
# def multi_by2(i):
# return i*2
def check_odd(i):
return i%2 != 0
def accumulator(acc,i):
print(acc,i)
return acc + i
print(list(map(lambda i: i*2,my_list)))
print(list(filter(lambda i:i%2!=0,my_list)))
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.