id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
8022836
|
a00=( '''
######################################################################
# ______ _____ _ _ #
# | ___ \/ __ \ | | (_) #
# | |_/ /| / \/ _ __ _ __ ___ _ __ ___ _ __| |_ _ ___ ___ #
# | __/ | | | '_ \| '__/ _ \| '_ \ / _ \ '__| __| |/ _ \/ __| #
# | | | \__/\ | |_) | | | (_) | |_) | __/ | | |_| | __/\__ \ #
# \_| \____/ | .__/|_| \___/| .__/ \___|_| \__|_|\___||___/ #
# | | | | #
# |_| |_| #
# #
######################################################################
################{{{{coded by <NAME>}}}}##################
''')
import getpass
c=getpass.getuser()
#before using this script you should install this librarys , by cmd.. just write the commands 1:cd/ then press enter
#2:cd python27 then press enter 3:cd Scripts then press enter 4:pip install platform then press enter
# now platform installed
#Repeat the steps for (psutil ,socket and subprocess )
def write_data(data):
file_data="C:/Users/%s/Desktop/pr.txt" % c
m=open(file_data,'a')
m.write(data)
import platform
import win32com.client
import os
import sys
import psutil
import time
import socket
import subprocess
a0=('########################## system #########################')
a1=('system :', platform.system())
a2=('node :', platform.node())
a3=('release :', platform.release())
a4=('version :', platform.version())
a5=('machine :', platform.machine())
a6=('processor:', platform.processor())
a7=('\n')
a8=('########################## cpu #########################')
percentc=psutil.cpu_percent()
a9=('the percent cpu used :',percentc)
a10=('Number of cpu in the processor :',(psutil.cpu_count()))
freq=psutil.cpu_freq()
current=freq.current
minn=freq.min
maxx=freq.max
a11=('*cpu frequency* ')
a12=('1:current :',current)
a13=('2:min :',minn)
a14=('3:max :',maxx)
state=(psutil.cpu_stats())
ctx_switches=state.ctx_switches
interrupts=state.interrupts
soft_interrupts=state.soft_interrupts
syscalls=state.syscalls
a15=('*cpu state*')
a16=('Context Switch:',ctx_switches)
a17=('interrupts:',interrupts)
a18=('software interrupts :' ,soft_interrupts)
a19=('syscalls:',syscalls)
a20=('########################## memory #########################')
memory=(psutil.virtual_memory())
total=memory.total
available=memory.available
used=memory.used
free=memory.free
percentm=memory.percent
a21=('the total memory :', (total))
a22=('the used memory :', (used))
a23=('the available memory :', (available))
a24=('the percent of using memory :', (percentm),'%')
a26=('\n' )
a27=('########################disk##########################')
disk=psutil.disk_usage('/')
totald=disk.total
usedd=disk.used
freed=disk.free
percentd=disk.percent
a28=('total disk size:',total)
a29=('used disk:',usedd)
a30=('free disk:',freed)
a31=('the percent disk used:',percentd,'%')
counters=psutil.disk_io_counters()
read_count=counters.read_count
write_count=counters.write_count
read_bytes=counters.read_bytes
write_bytes=counters.write_bytes
read_time=counters.read_time
write_time=counters.write_time
a32=('read counter:',read_count)
a33=('write counter:',write_count)
a34=('read bytes:',read_bytes)
a35=('write bytes:',write_bytes)
a36=('read time:',read_time)
a37=('write time:',write_time)
a38=('\n')
a39=('########################network##########################')
net=psutil.net_io_counters()
bytes_sent=net.bytes_sent
bytes_recv=net.bytes_recv
packets_sent=net.packets_sent
packets_recv=net.packets_recv
errin=net.errin
errout=net.errout
dropin=net.dropin
dropout=net.dropout
a40=('bytes sent',bytes_sent)
a41=('bytes recv',bytes_recv)
a42=('packets sent',packets_sent)
a43=('packets recv',packets_recv)
a44=('errin',errin)
a45=('errout',errout)
a46=('dropin',dropin)
a47=('dropout',dropout)
a48=('the local ip addr:',(socket.gethostbyname(socket.gethostname())))
a49=('\n')
a50=('########################battery##########################')
battery=psutil.sensors_battery()
percantb=battery.percent
secselfb=battery.secsleft
power_plugged=battery.power_plugged
a51=('percant of charging :',percantb,'%')
a52=('time left:',secselfb)
a53=('power plugged:',power_plugged)
a54=('\n')
a55=('########################services#########################')
a56=("*The service running now*")
processes= subprocess.Popen('tasklist', stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE).communicate()[0]
a57=(processes)
data=[a00,a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13,a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a26, a27, a28, a29,a30, a31, a32, a33, a34, a35, a36, a37, a38, a39, a40, a41, a42, a43, a44, a45, a46,a47, a48, a49, a50, a51, a52, a53, a54, a55, a56,a57]
for i in data:
write_data(str(i)+'\n')
|
StarcoderdataPython
|
5128193
|
<gh_stars>1-10
#!/usr/bin/env python3
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Module for script: amoebae.
"""
# Import built-in modules.
#import argparse
import sys
import os
import subprocess
#import re
#from datapaths import DataPaths
#import shutil
#import glob
import time
import pandas as pd
# Import modules from installed libraries/packages.
from Bio import SeqIO
#from Bio import AlignIO
#from Bio.Alphabet import IUPAC, Gapped
import amoebae_m
from get_datatype import get_dbtype
# Define functions to be used in amoebae.
def get_query_file_type(query_file):
"""Takes an input query file and determines whether it is a FASTA file or
an HMM file. Returns either "FASTA" or "HMM".
"""
file_type = None
with open(query_file) as infh:
first_line = infh.readline().strip()
if first_line.startswith('>'):
file_type = 'FASTA'
elif first_line.startswith('HMM'):
file_type = 'HMM'
# Check that it worked.
assert file_type is not None, """Error: Input file type could not be
determined: %s""" % query_file
# Return file type.
return file_type
def get_hmm_datatype(query_file):
"""Takes an HMM file (HMMer3 software package) and determines what data
type it has (i.e., generated from an amino acid or nucleic acid alignment).
Returns either "prot" or "nucl".
"""
datatype = None
with open(query_file) as infh:
for i in infh:
if i.startswith('ALPH'):
dname = i.strip().split(' ')[1]
if dname == 'amino':
datatype = 'prot'
elif dname == 'DNA':
datatype = 'nucl'
break
# Check that it worked.
assert datatype is not None, """Error: Data type could not be
determined for input file: %s""" % query_file
# Return the data type.
return datatype
def get_mod_query_path(query_file, filetype, datatype, query_dir):
"""Define a new query path.
"""
exten = None
if filetype == 'fa':
if datatype == 'prot':
exten = 'faa'
elif datatype == 'nucl':
exten = 'fna'
elif filetype == 'afa':
if datatype == 'prot':
exten = 'afaa'
elif datatype == 'nucl':
extenn = 'afna'
# Check that it worked.
assert exten is not None, """Error: New extension could not be
determined for input file: %s""" % query_file
# Define basename with new extension.
query_file_new_exten = os.path.basename(query_file).rsplit('.', 1)[0] +\
'.' + exten
# Get new path.
new_query_file_path = os.path.join(query_dir, query_file_new_exten)
# Return the new path.
return new_query_file_path
def is_single_fasta(query_file):
"""Return True if the given fasta file contains only one sequence.
"""
single = False
num_seqs = 0
with open(query_file) as infh:
for seq in SeqIO.parse(query_file, 'fasta'):
num_seqs += 1
if num_seqs == 1:
single = True
return single
def fasta_seqs_all_same_len(query_file):
"""Returns True if the sequences in a given fasta file are all the same
length.
"""
seq_lengths = []
with open(query_file) as infh:
for seq in SeqIO.parse(infh, 'fasta'):
seq_lengths.append(len(seq))
# If the sequences are the same length, then a nonredundant list of
# sequence lengths will only contain one element.
if len(set(seq_lengths)) == 1:
return True
else:
return False
def update_query_csv(csv_file, mod_query_path, datatype, main_data_dir):
"""Appends a line to the given spreadsheet with info about the given fasta
file added to a directory.
"""
# Define column headers.
headers = ['Filename',
'Query title',
'Query source description',
'Query taxon (species if applicable)',
'Query database filename (if applicable)',
'File type',
'Data type',
'Date added',
'Citation'
]
# Make new csv file if necessary.
if not os.path.isfile(csv_file):
#df = pd.DataFrame(columns=headers)
#df.to_csv(csv_file) #, index_label='Filename')
with open(csv_file, 'w') as o:
o.write(','.join(headers)) # + '\n')
# Load dataframe from csv file.
df = pd.read_csv(csv_file, encoding='utf-8') #, index_col='Filename')
# Get current date.
cur_date = time.strftime("%Y/%m/%d")
# Get query filename and extension.
full = os.path.basename(mod_query_path).rsplit('.', 1)
filename = full[0]
exten = full[1]
# Get query basename.
query_basename = os.path.basename(mod_query_path)
# Extract info from query filename.
query_title = '?'
taxon = '?'
species = '?'
if len(query_basename.split('_')) > 2:
# Get query title name.
query_title = query_basename.split('_')[0]
# Get query taxon name.
# Assumes that there is an accession or something else after the "taxon" in
# the query file name.
taxon = amoebae_m.get_query_taxon_from_filename(query_basename)
# Get species based on taxon.
species = amoebae_m.get_species_from_db_csv(taxon, main_data_dir)
else:
## Print warning.
#print("""Warning: Could not identify query title or database/taxon name
#in input filename.""")
## Just use the whole filename minus the filename extension.
#query_title = query_basename.rsplit('.')[0]
query_title = query_basename.split('_')[0]
# Initiate dataframe for line to append.
new_row = pd.DataFrame(columns=headers)
# Get database filename.
db_filename = amoebae_m.get_db_filename_for_query_from_db_csv(taxon,
main_data_dir)
# Add info to new row.
new_row.loc[0] = ['???'] * len(headers)
new_row.loc[0]['Filename'] = query_basename
new_row.loc[0]['Query title'] = query_title
new_row.loc[0]['Query source description'] = taxon
new_row.loc[0]['Query taxon (species if applicable)'] = species
new_row.loc[0]['Data type'] = datatype
new_row.loc[0]['File type'] = exten
new_row.loc[0]['Date added'] = cur_date
new_row.loc[0]['Citation'] = '?'
new_row.loc[0]['Query database filename (if applicable)'] =\
db_filename
# Check that it worked.
assert not '???' in new_row.loc[0], """Could not add all the necessary info
to the query info spreadsheet for query file:\n\t%s""" % mod_query_path
## Get new row as text appropriate for a line in a CSV file.
#new_row_csv_text = get_csv_line_text_from_pd_row(new_row)
## Append new line/row to CSV file.
#subprocess.call(['echo', new_row_csv_text, '>>', csv_file])
# Append new row to dataframe.
df = df.append(new_row, ignore_index=True)
## Re-order columns in output dataframe (unnecessary?).
#df = df[new_row.columns]
## Reduce likelihood of writing to the same file at the same time from
## different processes (***this is not an optimal solution!).
#time.sleep(random.randint(1,30))
# Write updated dataframe to csv file.
#df[1:].to_csv(csv_file)
#df.to_csv(csv_file, index=False)
temp_file = os.path.join(os.path.dirname(csv_file), query_basename + '_temp_row.csv')
# Write updated dataframe to temporary CSV file.
df.to_csv(temp_file, index=False)
# Append the last line of the temporary CSV file to the main CSV file.
# (This should be more robust to writing from multiple processes).
with open(temp_file) as infh, open(csv_file, 'a') as o:
lines = infh.read().splitlines()
last_line = lines[-1]
o.write('\n' + last_line)
# Remove temporary CSV file.
os.remove(temp_file)
# Check that the CSV was actually updated with the information about this
# query file.
csv_text = None
with open(csv_file) as infh:
csv_text = infh.read()
assert query_basename in csv_text, """Error: Information about the
query with filename %s was not added to the CSV file at path %s.""" \
% (query_basename, csv_file)
# Report activity:
#print('Information added to spreadsheet %s:' % os.path.basename(csv_file))
#print('\tFilename: ' + query_basename)
#print('\tQuery title: ' + query_title)
#print('\tQuery source description: ' + taxon)
#print('\tQuery taxon (species if applicable): ' + species)
#print('\tData type: ' + datatype)
#print('\tFile type: ' + exten)
#print('\tDate added: ' + cur_date)
#print('\tCitation: ' + '?')
#print('\tQuery database filename (if applicable): ' + db_filename)
|
StarcoderdataPython
|
3398191
|
<filename>fooof/plts/error.py
"""Plots for visualizing model error."""
import numpy as np
from fooof.core.modutils import safe_import, check_dependency
from fooof.plts.spectra import plot_spectra
from fooof.plts.settings import PLT_FIGSIZES
from fooof.plts.style import style_spectrum_plot, style_plot
from fooof.plts.utils import check_ax, savefig
plt = safe_import('.pyplot', 'matplotlib')
###################################################################################################
###################################################################################################
@savefig
@style_plot
@check_dependency(plt, 'matplotlib')
def plot_spectral_error(freqs, error, shade=None, log_freqs=False, ax=None, **plot_kwargs):
"""Plot frequency by frequency error values.
Parameters
----------
freqs : 1d array
Frequency values, to be plotted on the x-axis.
error : 1d array
Calculated error values or mean error values across frequencies, to plot on the y-axis.
shade : 1d array, optional
Values to shade in around the plotted error.
This could be, for example, the standard deviation of the errors.
log_freqs : bool, optional, default: False
Whether to plot the frequency axis in log spacing.
ax : matplotlib.Axes, optional
Figure axes upon which to plot.
**plot_kwargs
Keyword arguments to pass into the ``style_plot``.
"""
ax = check_ax(ax, plot_kwargs.pop('figsize', PLT_FIGSIZES['spectral']))
plt_freqs = np.log10(freqs) if log_freqs else freqs
plot_spectra(plt_freqs, error, ax=ax, linewidth=3)
if np.any(shade):
ax.fill_between(plt_freqs, error-shade, error+shade, alpha=0.25)
ymin, ymax = ax.get_ylim()
if ymin < 0:
ax.set_ylim([0, ymax])
ax.set_xlim(plt_freqs.min(), plt_freqs.max())
style_spectrum_plot(ax, log_freqs, True)
ax.set_ylabel('Absolute Error')
|
StarcoderdataPython
|
11295882
|
<filename>main.py
import torch
from torch.utils import data
import os
import numpy as np
from utils import trainer, visualizer, cagan_dataset, options, IS_score
from tqdm import tqdm
opt = options.GatherOptions().parse()
cagan_dataset = cagan_dataset.CAGAN_Dataset(opt)
if opt.mode == "train":
train_dataloader = data.DataLoader(cagan_dataset, opt.batchsize, shuffle=True,
num_workers=opt.num_workers, pin_memory=True)
# calculate num of steps for decaying learning to zero at the end of training
if (opt.lr_policy == 'lambda') and (opt.niter_decay == -1):
opt.niter_decay = opt.epoch*len(train_dataloader) - opt.niter
model = trainer.CAGAN_Trainer(opt)
vis_custom = visualizer.Visualizer(opt.mode)
vis_custom.reset_env()
loss_save_path = os.path.join(opt.save_dir, 'loss.dict')
step = 0
if opt.resume:
step = opt.step
model.load_networks(step=step, load_netD=True)
try:
vis_custom.recover_loss(loss_save_path)
except:
print("Loss dict can not be found in %s"%opt.save_dir)
for epoch in range(opt.epoch):
if opt.lr_policy not in ['lambda']:
model.update_learning_rate()
for real in train_dataloader:
step += 1
model.set_input(real)
model.optimize_parameters()
loss_dict =model.get_current_losses()
loss_str = '[step %d/epoch %d]'%(step, epoch+1)
for key, data in loss_dict.items():
if "sum" in key:
loss_str += key + ': ' + '%.3f'%(data) + ' '
print(loss_str[:-2])
vis_custom.plot_current_losses(step, loss_dict)
vis_custom.plot_current_images(model.get_current_visuals(), 'real-fake-rec-alpha',len(real))
if step % opt.save_image_freq == 0:
model.save_current_visuals(step, opt.batchsize)
if step % opt.save_model_freq == 0:
model.save_networks(step)
torch.save(vis_custom.plot_data, loss_save_path)
if opt.lr_policy in ['lambda']:
model.update_learning_rate()
model.save_current_visuals(step, opt.batchsize)
model.save_networks(step)
else:
test_dataloader = data.DataLoader(cagan_dataset, opt.batchsize, shuffle=False,
num_workers=opt.num_workers, pin_memory=True)
inception_model = IS_score.INCEPTION_V3()
inception_model.eval()
inception_model.to('cuda')
model = trainer.CAGAN_Trainer(opt)
step = None
if opt.step != 0:
step = opt.step
model.load_networks(step=step, load_dir=opt.model_dir)
vis_custom = visualizer.Visualizer('test')
vis_custom.reset_env()
predictions = []
step = 0
for real in tqdm(test_dataloader):
step += 1
model.set_input(real)
with torch.autograd.no_grad():
model.netG_forward()
pred = inception_model(model.output_dict['fake_outputs'][-1])
predictions.append(pred.data.cpu().numpy())
vis_custom.plot_current_images(model.get_current_visuals(),
'real-fake-rec-alpha', len(real))
predictions = np.concatenate(predictions, 0)
mean, std = IS_score.compute_inception_score(predictions, 10)
print('IS score --- mean: %.4f, std: %.4f'%(mean, std))
|
StarcoderdataPython
|
1609072
|
<reponame>relsqui/protocards
#!/usr/bin/python
import unittest
import random
from .. import base
class TestBase(unittest.TestCase):
def test_property_attrs(self):
prop = base.CardProperty("bar")
self.assertEqual(prop.name, "bar")
self.assertEqual(prop.plural, "bars")
self.assertEqual(prop.short, "b")
def test_shuffle(self):
data = [1, 2, 3, 4, 5]
hand = base.Hand(data)
self.assertEqual(hand.data, data)
random.seed(0)
hand.shuffle()
self.assertNotEqual(hand.data, data)
def test_deal(self):
hand = base.Hand([1, 2, 3, 4, 5])
book = hand.deal(3)
self.assertEqual(hand.data, [1, 2])
self.assertEqual(book.data, [3, 4, 5])
def test_deal_toomany(self):
hand = base.Hand([1, 2, 3, 4, 5])
self.assertRaises(IndexError, hand.deal, 10)
class TestEqualityMixin(unittest.TestCase):
def setUp(self):
class Foo(base.EqualityMixin):
pass
class Bar(base.EqualityMixin):
pass
self.Foo = Foo
self.Bar = Bar
def test_same_class_same_attrs(self):
a = self.Foo()
b = self.Foo()
a.use = "fruit"
b.use = "fruit"
self.assertEqual(a, b)
def test_different_class_same_attrs(self):
a = self.Foo()
b = self.Bar()
a.use = "fruit"
b.use = "fruit"
self.assertEqual(a, b)
def test_same_class_different_attrs(self):
a = self.Foo()
b = self.Foo()
a.name = "apple"
b.name = "banana"
self.assertNotEqual(a, b)
def test_different_class_different_attrs(self):
a = self.Foo
b = self.Bar
a.name = "apple"
b.name = "banana"
self.assertNotEqual(a, b)
|
StarcoderdataPython
|
1915523
|
import os
from io import StringIO
from django.core.management import call_command
from django.db import transaction
from django.db.utils import IntegrityError
from django.test import TransactionTestCase
from apps.author.models import Author
PATH_FILE = os.path.dirname(os.path.abspath(__file__))
class ImportAuthorsCommandsTest(TransactionTestCase):
def test_command_output_all_created(self):
out = StringIO()
try:
call_command(
"import_authors",
os.path.join(PATH_FILE, "authors.csv"),
stdout=out
)
expected = """<NAME> created\nOsvaldo Santana Neto created\n"""
self.assertEqual(expected, out.getvalue())
authors = Author.objects.all()
find_authors = [author.name for author in authors]
self.assertCountEqual(
find_authors,
["<NAME>", "<NAME>"]
)
self.assertEqual(2, Author.objects.count())
finally:
out.close()
def test_command_output_already_created(self):
author_model = Author(
name="<NAME>"
)
author_model.save()
out = StringIO()
try:
call_command(
"import_authors",
os.path.join(PATH_FILE, "authors.csv"),
stdout=out
)
expected = """<NAME> already created\nOsvaldo Santana Neto created\n"""
self.assertEqual(expected, out.getvalue())
authors = Author.objects.all()
find_authors = [author.name for author in authors]
self.assertCountEqual(
find_authors,
["<NAME>", "<NAME>"]
)
self.assertEqual(2, Author.objects.count())
finally:
out.close()
|
StarcoderdataPython
|
1863838
|
<reponame>davidbrownell/Common_cpp_boost_1.70.0
# ----------------------------------------------------------------------
# |
# | _custom_data.py
# |
# | <NAME> <<EMAIL>>
# | 2019-04-12 11:51:46
# |
# ----------------------------------------------------------------------
# |
# | Copyright <NAME> 2019-21
# | Distributed under the Boost Software License, Version 1.0. See
# | accompanying file LICENSE_1_0.txt or copy at
# | http://www.boost.org/LICENSE_1_0.txt.
# |
# ----------------------------------------------------------------------
"""Contains data used by both Setup_custom.py and Activate_custom.py"""
import os
import CommonEnvironment
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
_CUSTOM_DATA = [
(
"boost - 1.70.0",
"0bc6eeb01484831568bb6bd0fdf92beb9ad2ca9748de9cf2c825fa12865b2db0",
[
"Libraries",
"C++",
"boost",
"v1.70.0",
],
),
]
|
StarcoderdataPython
|
6575618
|
<gh_stars>0
from app import *
from app.vote.models import *
db.__init__(app)
def next_post(id):
count = Question.query.count()
while (count > 0):
next_post = None
id += 1
if Question.query.get(id) is not None:
next_post = Question.query.get(id)
return next_post.id
break
if id > count:
return 'finish'
break
count += 1
|
StarcoderdataPython
|
6697465
|
<reponame>OSUmageed/pyHeatTransfer<filename>pyHeatTransfer/geometry.py
''' These are the global geometry parameters for the discretization.
It's ugly and it's hand written, but there's very little pattern in this madness.
Each coordinate will have a tag describing it's positionposition (i.e. center or top corner) in East, West, South, North, Up, Down (i.e. ESU would be a top corner at the
origin of a specified line).
'''
from collections import defaultdict
''' Each tag refers to a dictionary with a stencil template, a coefficient for the node volume, the cross sectional area for each stencil point, and the surface area for ambient (convection and radiation) heat transfer which will be split up into vertical and horizontal components to allow free convection in the future.
'''
#At this moment we are ignoring inside corners.
A = [1]
B = [-1]
AB = A+B
f8 = 1.0/8.0
f4 = 1.0/4.0
f2 = 1.0/2.0
f1 = f2+f2
full = [[1,0,0], [-1,0,0], [0,1,0], [0,-1,0], [0,0,1], [0,0,-1]]
tags = defaultdict(dict)
# CORNERS
# Outside top corners
loc = 'ESU'
tags[loc]['Stencil'] = [[1,0,0], [0,1,0], [0,0,-1]]
tags[loc]['Vc'] = f8
tags[loc]['Acond'] = [f4, f4, f4]
tags[loc]['Aconv'] = [f2 + f4]
loc = 'ENU'
tags[loc]['Stencil'] = [[1,0,0], [0,-1,0], [0,0,-1]]
tags[loc]['Vc'] = f8
tags[loc]['Acond'] = [f4, f4, f4]
tags[loc]['Aconv'] = [f2 + f4]
loc = 'WNU'
tags[loc]['Stencil'] = [[-1,0,0],[0,-1,0],[0,0,-1]]
tags[loc]['Vc'] = f8
tags[loc]['Acond'] = [f4, f4, f4]
tags[loc]['Aconv'] = [f2 + f4]
loc = 'WSU'
tags[loc]['Stencil'] = [[-1,0,0],[0,1,0],[0,0,-1]]
tags[loc]['Vc'] = f8
tags[loc]['Acond'] = [f4, f4, f4]
tags[loc]['Aconv'] = [f2 + f4]
#Outside bottom corners
loc = 'ESB'
tags[loc]['Stencil'] = [[1,0,0], [0,1,0], [0,0,1]]
tags[loc]['Vc'] = f8
tags[loc]['Acond'] = [f4, f4, f4]
tags[loc]['Aconv'] = [f2 + f4]
loc = 'ENB'
tags[loc]['Stencil'] = [[1,0,0], [0,-1,0], [0,0,1]]
tags[loc]['Vc'] = f8
tags[loc]['Acond'] = [f4, f4, f4]
tags[loc]['Aconv'] = [f2 + f4]
loc = 'WNB'
tags[loc]['Stencil'] = [[-1,0,0], [0,-1,0], [0,0,1]]
tags[loc]['Vc'] = f8
tags[loc]['Acond'] = [f4, f4, f4]
tags[loc]['Aconv'] = [f2 + f4]
loc = 'WSB'
tags[loc]['Stencil'] = [[-1,0,0], [0,1,0], [0,0,1]]
tags[loc]['Vc'] = f8
tags[loc]['Acond'] = [f4, f4, f4]
tags[loc]['Aconv'] = [f2 + f4]
#EDGES
#----------------------------------------------
# Ouside vertical edges
loc = 'ES'
tags[loc]['Stencil'] = [[1,0,0], [0,1,0], [0,0,1], [0,0,-1]]
tags[loc]['Vc'] = f4
tags[loc]['Acond'] = [f2, f2, f4, f4]
tags[loc]['Aconv'] = [f1]
loc = 'EN'
tags[loc]['Stencil'] = [[1,0,0], [0,-1,0], [0,0,1], [0,0,-1]]
tags[loc]['Vc'] = f4
tags[loc]['Acond'] = [f2, f2, f4, f4]
tags[loc]['Aconv'] = [f1]
loc = 'WN'
tags[loc]['Stencil'] = [[-1,0,0], [0,-1,0], [0,0,1], [0,0,-1]]
tags[loc]['Vc'] = f4
tags[loc]['Acond'] = [f2, f2, f4, f4]
tags[loc]['Aconv'] = [f1]
loc = 'WS'
tags[loc]['Stencil'] = [[-1,0,0], [0,1,0], [0,0,1], [0,0,-1]]
tags[loc]['Vc'] = f4
tags[loc]['Acond'] = [f2, f2, f4, f4]
tags[loc]['Aconv'] = [f1]
# Outside horizontal edges top
loc = 'EU'
tags[loc]['Stencil'] = [[1,0,0], [0,1,0], [0,-1,0], [0,0,-1]]
tags[loc]['Vc'] = f4
tags[loc]['Acond'] = [f2, f4, f4, f2]
tags[loc]['Aconv'] = [f1]
loc = 'WU'
tags[loc]['Stencil'] = [[-1,0,0], [0,1,0], [0,-1,0], [0,0,-1]]
tags[loc]['Vc'] = f4
tags[loc]['Acond'] = [f2, f4, f4, f2]
tags[loc]['Aconv'] = [f1]
loc = 'NU'
tags[loc]['Stencil'] =[[1,0,0], [-1,0,0], [0,-1,0], [0,0,-1]]
tags[loc]['Vc'] = f4
tags[loc]['Acond'] = [f4, f4, f2, f2]
tags[loc]['Aconv'] = [f1]
loc = 'SU'
tags[loc]['Stencil'] = [[1,0,0], [-1,0,0], [0,1,0], [0,0,-1]]
tags[loc]['Vc'] = f4
tags[loc]['Acond'] = [f4, f4, f2, f2]
tags[loc]['Aconv'] = [f1]
# Outside horizontal edges bottom
loc = 'EB'
tags[loc]['Stencil'] = [[1,0,0], [0,1,0], [0,-1,0], [0,0,1]]
tags[loc]['Vc'] = f4
tags[loc]['Acond'] = [f2, f4, f4, f2]
tags[loc]['Aconv'] = [f1]
loc = 'WB'
tags[loc]['Stencil'] = [[-1,0,0], [0,1,0], [0,-1,0], [0,0,1]]
tags[loc]['Vc'] = f4
tags[loc]['Acond'] = [f2, f4, f4, f2]
tags[loc]['Aconv'] = [f1]
loc = 'NB'
tags[loc]['Stencil'] =[[1,0,0], [-1,0,0], [0,-1,0], [0,0,1]]
tags[loc]['Vc'] = f4
tags[loc]['Acond'] = [f4, f4, f2, f2]
tags[loc]['Aconv'] = [f1]
loc = 'SB'
tags[loc]['Stencil'] = [[1,0,0], [-1,0,0], [0,1,0], [0,0,1]]
tags[loc]['Vc'] = f4
tags[loc]['Acond'] = [f4, f4, f2, f2]
tags[loc]['Aconv'] = [f1]
#FACES
#---------------------------------------------
#Faces cannot be inside or outside
loc = 'E'
tags[loc]['Stencil'] = [[1,0,0], [0,1,0], [0,-1,0], [0,0,1], [0,0,-1]]
tags[loc]['Vc'] = f2
tags[loc]['Acond'] = [f1, f2, f2, f2, f2]
tags[loc]['Aconv'] = [f1]
loc = 'W'
tags[loc]['Stencil'] = [[-1,0,0], [0,1,0], [0,-1,0], [0,0,1], [0,0,-1]]
tags[loc]['Vc'] = f2
tags[loc]['Acond'] = [f1, f2, f2, f2, f2]
tags[loc]['Aconv'] = [f1]
loc = 'N'
tags[loc]['Stencil'] = [[1,0,0], [-1,0,0], [0,-1,0], [0,0,1], [0,0,-1]]
tags[loc]['Vc'] = f2
tags[loc]['Acond'] = [f2, f2, f1, f2, f2]
tags[loc]['Aconv'] = [f1]
loc = 'S'
tags[loc]['Stencil'] = [[1,0,0], [-1,0,0], [0,1,0], [0,0,1], [0,0,-1]]
tags[loc]['Vc'] = f2
tags[loc]['Acond'] = [f2, f2, f1, f2, f2]
tags[loc]['Aconv'] = [f1]
loc = 'U'
tags[loc]['Stencil'] = [[1,0,0], [-1,0,0], [0,1,0], [0,-1,0], [0,0,-1]]
tags[loc]['Vc'] = f2
tags[loc]['Acond'] = [f2, f2, f2, f2, f1]
tags[loc]['Aconv'] = [f1]
loc = 'B'
tags[loc]['Stencil'] = [[1,0,0], [-1,0,0], [0,1,0], [0,-1,0], [0,0,1]]
tags[loc]['Vc'] = f2
tags[loc]['Acond'] = [f2, f2, f2, f2, f1]
tags[loc]['Aconv'] = [f1]
#The main one: Center
loc = ""
tags[loc]['Stencil'] = full
tags[loc]['Vc'] = f1
tags[loc]['Acond'] = [f1]*6
tags[loc]['Aconv'] = [0.0]
#We alias full because the inside corners are also full stencils. The main problem is recognizing those corners and edges.
#INSIDE (4 top and bottom corners, 4 top and bottom edges, ignore vertical inside corners)
#INSIDE CORNERS
loc = "ESUI"
tags[loc]['Stencil'] = full #All inside items
tags[loc]['Vc'] = f2 + f8 #All inside corners 5 of 8 cubes in cube
tags[loc]['Acond'] = [f2+f4, f2, f2+f4, f2, f1, f4]
tags[loc]['Aconv'] = [f1+f4]
loc = "ESBI"
tags[loc]['Stencil'] = full
tags[loc]['Vc'] = f2 + f8
tags[loc]['Acond'] = [f2+f4, f2, f2+f4, f2, f4, f1]
tags[loc]['Aconv'] = [f1+f4]
|
StarcoderdataPython
|
3461178
|
<filename>tests/internal/commands/test_register.py
import os
import shutil
import tempfile
import unittest
from concurrent.futures.thread import ThreadPoolExecutor
import click
import yaml
from mock import MagicMock
from mock import call
from cli.internal.commands.register import RegisterApkCommand
from cli.internal.commands.register import RegisterConfigCommand
from cli.internal.commands.register import RegisterMediaCommand
from cli.internal.commands.register import RegisterProjectCommand
from cli.internal.models.apk import Apk
from cli.internal.models.media import Media
from cli.internal.models.os_config import OSConfig
from cli.internal.utils.remote import ApiError
from tests import __tests_root__
class RegisterCommandTest(unittest.TestCase):
def setUp(self):
self.maxDiff = None
self.config = MagicMock()
self.config.push = True
self.config.no_https = False
self.config.executor = ThreadPoolExecutor()
def test_registration_exits_cleanly_on_failure(self):
config_file = os.path.join(__tests_root__, 'res', 'config.yml')
command = RegisterConfigCommand(self.config, [config_file])
self.config.api.upload_artifact = MagicMock(side_effect=ApiError())
with self.assertRaises(click.Abort):
command.run()
def test_registration_with_rewrite_exits_cleanly_on_failure(self):
config_file = os.path.join(__tests_root__, 'res', 'config3.yml')
command = RegisterConfigCommand(self.config, [config_file])
self.config.api.get_latest_artifact = MagicMock(side_effect=ApiError())
with self.assertRaises(click.Abort):
command.run()
def test_config_registers_successfully(self):
input_config_file = os.path.join(__tests_root__, 'res', 'config.yml')
working_dir = tempfile.mkdtemp()
config_file = os.path.join(working_dir, 'config.yml')
command = RegisterConfigCommand(self.config, [input_config_file], working_dir)
command.run()
self.config.api.upload_artifact.assert_called_with(
config_file, OSConfig.parse(self.config, config_file))
def test_config_registers_rewritten_config_successfully(self):
self.config.api.get_latest_artifact = MagicMock(return_value={'version': '41'})
self.config.api.get_highest_artifact = MagicMock(return_value={'version': '41'})
input_config_file = os.path.join(__tests_root__, 'res', 'config4.yml')
working_dir = tempfile.mkdtemp()
config_file = os.path.join(working_dir, 'config4.yml')
command = RegisterConfigCommand(self.config, [input_config_file], working_dir)
command.run()
with open(config_file) as f:
yml = yaml.safe_load(f)
self.assertDictEqual(yml, {
'os': {
'name': 'project-id4',
'version': 42,
'configurations': {'mason-management': {'disable_keyguard': True}}
},
'apps': [{
'name': '<NAME>',
'package_name': 'com.example.app1',
'version_code': 1
}, {
'name': '<NAME>',
'package_name': 'com.example.app2',
'version_code': 41
}],
'media': {
'bootanimation': {
'name': 'anim',
'version': 41
}
}
})
def test_config_registers_new_rewritten_config_successfully(self):
# noinspection PyUnusedLocal
def version_finder(name, type):
if type == 'apk' or type == 'media':
return {'version': '12'}
self.config.api.get_latest_artifact = MagicMock(side_effect=version_finder)
self.config.api.get_highest_artifact = MagicMock(side_effect=version_finder)
input_config_file = os.path.join(__tests_root__, 'res', 'config4.yml')
working_dir = tempfile.mkdtemp()
config_file = os.path.join(working_dir, 'config4.yml')
command = RegisterConfigCommand(self.config, [input_config_file], working_dir)
command.run()
with open(config_file) as f:
yml = yaml.safe_load(f)
self.assertDictEqual(yml, {
'os': {
'name': 'project-id4',
'version': 1,
'configurations': {'mason-management': {'disable_keyguard': True}}
},
'apps': [{
'name': '<NAME>',
'package_name': 'com.example.app1',
'version_code': 1
}, {
'name': '<NAME>',
'package_name': 'com.example.app2',
'version_code': 12
}],
'media': {
'bootanimation': {
'name': 'anim',
'version': 12
}
}
})
def test_apk_registers_successfully(self):
apk_file1 = os.path.join(__tests_root__, 'res', 'v1.apk')
apk_file2 = os.path.join(__tests_root__, 'res', 'v1and2.apk')
command = RegisterApkCommand(self.config, [apk_file1, apk_file2])
command.run()
self.config.api.upload_artifact.assert_has_calls([
call(apk_file1, Apk.parse(self.config, apk_file1)),
call(apk_file2, Apk.parse(self.config, apk_file2))
], any_order=True)
def test_media_registers_successfully(self):
media_file = os.path.join(__tests_root__, 'res', 'bootanimation.zip')
command = RegisterMediaCommand(self.config, 'Boot Anim', 'bootanimation', '1', media_file)
command.run()
self.config.api.upload_artifact.assert_called_with(
media_file, Media.parse(self.config, 'Boot anim', 'bootanimation', '1', media_file))
def test_latest_media_registers_successfully(self):
self.config.api.get_highest_artifact = MagicMock(return_value={'version': '41'})
media_file = os.path.join(__tests_root__, 'res', 'bootanimation.zip')
command = RegisterMediaCommand(
self.config, 'Boot Anim', 'bootanimation', 'latest', media_file)
command.run()
self.config.api.upload_artifact.assert_called_with(
media_file, Media.parse(self.config, 'Boot anim', 'bootanimation', '42', media_file))
def test_latest_non_existant_media_registers_successfully(self):
self.config.api.get_highest_artifact = MagicMock(return_value=None)
media_file = os.path.join(__tests_root__, 'res', 'bootanimation.zip')
command = RegisterMediaCommand(
self.config, 'Boot Anim', 'bootanimation', 'latest', media_file)
command.run()
self.config.api.upload_artifact.assert_called_with(
media_file, Media.parse(self.config, 'Boot anim', 'bootanimation', '1', media_file))
def test_project_registers_successfully(self):
self.config.endpoints_store.__getitem__ = MagicMock(return_value='https://google.com')
self.config.api.get_build = MagicMock(return_value={'data': {'status': 'COMPLETED'}})
simple_project = os.path.join(__tests_root__, 'res', 'simple-project')
apk_file = os.path.join(__tests_root__, 'res', 'simple-project', 'v1.apk')
working_dir = tempfile.mkdtemp()
config_file = os.path.join(working_dir, 'mason.yml')
command = RegisterProjectCommand(self.config, simple_project, working_dir)
command.run()
self.config.api.upload_artifact.assert_has_calls([
call(apk_file, Apk.parse(self.config, apk_file)),
call(config_file, OSConfig.parse(self.config, config_file))
])
self.config.api.start_build.assert_called_with('project-id2', '2', None)
def test_project_registers_updated_config(self):
self.config.endpoints_store.__getitem__ = MagicMock(return_value='https://google.com')
self.config.api.get_build = MagicMock(return_value={'data': {'status': 'COMPLETED'}})
simple_project = os.path.join(__tests_root__, 'res', 'simple-project')
working_dir = tempfile.mkdtemp()
config_file = os.path.join(working_dir, 'mason.yml')
command = RegisterProjectCommand(self.config, simple_project, working_dir)
command.run()
with open(config_file) as f:
yml = yaml.safe_load(f)
self.assertDictEqual(yml, {
'os': {
'name': 'project-id2',
'version': 2,
'configurations': {'mason-management': {'disable_keyguard': True}}
},
'apps': [{
'name': 'Dummy app',
'package_name': 'com.supercilex.test',
'version_code': 384866
}]
})
def test_project_registers_specific_app_version(self):
self.config.endpoints_store.__getitem__ = MagicMock(return_value='https://google.com')
self.config.api.get_build = MagicMock(return_value={'data': {'status': 'COMPLETED'}})
project_dir = tempfile.mkdtemp()
working_dir = tempfile.mkdtemp()
config_file = os.path.join(working_dir, 'mason.yml')
command = RegisterProjectCommand(self.config, project_dir, working_dir)
shutil.copytree(
os.path.join(__tests_root__, 'res', 'simple-project'),
project_dir,
dirs_exist_ok=True
)
with open(os.path.join(project_dir, 'mason.yml'), 'w') as f:
f.write("""
os:
name: test-project
version: 1
apps:
- name: Dummy app
package_name: com.supercilex.test
version_code: 123
""")
command.run()
with open(config_file) as f:
yml = yaml.safe_load(f)
self.assertDictEqual(yml, {
'os': {
'name': 'test-project',
'version': 1
},
'apps': [{
'name': 'Dummy app',
'package_name': 'com.supercilex.test',
'version_code': 123
}]
})
def test_project_registers_latest_app_version_from_local(self):
self.config.endpoints_store.__getitem__ = MagicMock(return_value='https://google.com')
self.config.api.get_build = MagicMock(return_value={'data': {'status': 'COMPLETED'}})
self.config.api.get_latest_artifact = MagicMock(return_value={'version': '1234'})
project_dir = tempfile.mkdtemp()
working_dir = tempfile.mkdtemp()
config_file = os.path.join(working_dir, 'mason.yml')
command = RegisterProjectCommand(self.config, project_dir, working_dir)
shutil.copytree(
os.path.join(__tests_root__, 'res', 'simple-project'),
project_dir,
dirs_exist_ok=True
)
with open(os.path.join(project_dir, 'mason.yml'), 'w') as f:
f.write("""
os:
name: test-project
version: 1
apps:
- name: Dummy app
package_name: com.supercilex.test
version_code: latest
""")
command.run()
with open(config_file) as f:
yml = yaml.safe_load(f)
self.assertDictEqual(yml, {
'os': {
'name': 'test-project',
'version': 1
},
'apps': [{
'name': 'Dummy app',
'package_name': 'com.supercilex.test',
'version_code': 384866
}]
})
def test_project_registers_latest_app_version_from_remote(self):
self.config.endpoints_store.__getitem__ = MagicMock(return_value='https://google.com')
self.config.api.get_build = MagicMock(return_value={'data': {'status': 'COMPLETED'}})
self.config.api.get_latest_artifact = MagicMock(return_value={'version': '999999'})
project_dir = tempfile.mkdtemp()
working_dir = tempfile.mkdtemp()
config_file = os.path.join(working_dir, 'mason.yml')
command = RegisterProjectCommand(self.config, project_dir, working_dir)
shutil.copytree(
os.path.join(__tests_root__, 'res', 'simple-project'),
project_dir,
dirs_exist_ok=True
)
with open(os.path.join(project_dir, 'mason.yml'), 'w') as f:
f.write("""
os:
name: test-project
version: 1
apps:
- name: Dummy app
package_name: com.supercilex.test
version_code: latest
""")
command.run()
with open(config_file) as f:
yml = yaml.safe_load(f)
self.assertDictEqual(yml, {
'os': {
'name': 'test-project',
'version': 1
},
'apps': [{
'name': 'Dummy app',
'package_name': 'com.supercilex.test',
'version_code': 999999
}]
})
def test_project_registers_latest_app_version_when_remote_nonexistent(self):
self.config.endpoints_store.__getitem__ = MagicMock(return_value='https://google.com')
self.config.api.get_build = MagicMock(return_value={'data': {'status': 'COMPLETED'}})
self.config.api.get_latest_artifact = MagicMock(return_value=None)
project_dir = tempfile.mkdtemp()
working_dir = tempfile.mkdtemp()
config_file = os.path.join(working_dir, 'mason.yml')
command = RegisterProjectCommand(self.config, project_dir, working_dir)
shutil.copytree(
os.path.join(__tests_root__, 'res', 'simple-project'),
project_dir,
dirs_exist_ok=True
)
with open(os.path.join(project_dir, 'mason.yml'), 'w') as f:
f.write("""
os:
name: test-project
version: 1
apps:
- name: Dummy app
package_name: com.supercilex.test
version_code: latest
""")
command.run()
with open(config_file) as f:
yml = yaml.safe_load(f)
self.assertDictEqual(yml, {
'os': {
'name': 'test-project',
'version': 1
},
'apps': [{
'name': 'Dummy app',
'package_name': 'com.supercilex.test',
'version_code': 384866
}]
})
def test_project_registers_updated_complex_config(self):
self.config.endpoints_store.__getitem__ = MagicMock(return_value='https://google.com')
self.config.api.get_build = MagicMock(return_value={'data': {'status': 'COMPLETED'}})
self.config.api.get_latest_artifact = MagicMock(return_value={'version': '41'})
self.config.api.get_highest_artifact = MagicMock(return_value={'version': '41'})
complex_project = os.path.join(__tests_root__, 'res', 'complex-project')
working_dir = tempfile.mkdtemp()
config_file = os.path.join(working_dir, 'config3.yml')
command = RegisterProjectCommand(self.config, complex_project, working_dir)
command.run()
with open(config_file) as f:
yml = yaml.safe_load(f)
self.assertDictEqual(yml, {
'os': {
'name': 'project-id3',
'version': 42,
'configurations': {'mason-management': {'disable_keyguard': True}}
},
'apps': [{
'name': '<NAME>',
'package_name': 'com.example.app1',
'version_code': 1
}, {
'name': 'Dummy app',
'package_name': 'com.supercilex.test',
'version_code': 384866
}, {
'name': '<NAME>',
'package_name': 'com.example.app2',
'version_code': 41
}],
'media': {
'bootanimation': {
'name': 'anim-1',
'version': 42
},
'splash': {
'name': 'splash-1',
'version': 42
}
}
})
|
StarcoderdataPython
|
3450463
|
<reponame>heart-your-health/valve
import socket
import os
import json
from aiohttp.web import Response, Application, json_response, HTTPForbidden
import aiohttp_cors
from .lib.database import db_init
from .lib.utils import get_config, get_file, parse_auth_header
from .lib.loggers import logger
from .middleware.validation import validation
from .middleware.transaction import transaction
from .middleware.validate_app import validate_app
from .constants.ignore import ignore_validation
def create_app():
file_name = get_file(path="config", extention="py")
config, config_error = get_config(file_name=file_name)
if config_error:
raise config_error
app = Application(middlewares=[
validate_app,
validation(ignore=ignore_validation),
transaction
])
app["config"] = config
db_init(app)
# apikey = config["HOSTEDGRAPHITE_APIKEY"]
# sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# sock.sendto("%s.request.time 1444\n" % apikey, ("carbon.hostedgraphite.com", 2003))
cors = aiohttp_cors.setup(app, defaults={
"*": aiohttp_cors.ResourceOptions(
allow_credentials=True,
expose_headers="*",
allow_headers="*",
)
})
from .api.routes import (
handle_authenticate_profile,
handle_get_profile,
handle_token_validation,
handle_token_exchange,
handle_add_eats,
handle_search_eats,
handle_browse_eats,
handle_get_eats)
cors.add(app.router.add_post('/api/v1/profile', handle_authenticate_profile))
cors.add(app.router.add_get('/api/v1/profile', handle_get_profile))
cors.add(app.router.add_put('/api/v1/profile', handle_authenticate_profile))
cors.add(app.router.add_get('/api/v1/exchange', handle_token_exchange))
cors.add(app.router.add_get('/api/v1/validation', handle_token_validation))
#eats
cors.add(app.router.add_get('/api/v1/eats', handle_get_eats))
cors.add(app.router.add_post('/api/v1/eats', handle_add_eats))
cors.add(app.router.add_get('/api/v1/eats/search', handle_search_eats))
cors.add(app.router.add_get('/api/v1/eats/browse', handle_browse_eats))
return app
|
StarcoderdataPython
|
4954702
|
<reponame>urbanenomad/clusterfuzz<gh_stars>1-10
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handler used for adding new CC's to filed oss-fuzz bugs."""
import logging
from base import external_users
from base import memoize
from datastore import data_types
from datastore import ndb_utils
from handlers import base_handler
from libs import handler
from libs.issue_management import issue_filer
from libs.issue_management import issue_tracker_policy
from libs.issue_management import issue_tracker_utils
def get_open_testcases_with_bugs():
"""Return iterator to open testcases with bugs."""
return data_types.Testcase.query(
ndb_utils.is_true(data_types.Testcase.open),
data_types.Testcase.status == 'Processed',
data_types.Testcase.bug_information != '').order( # pylint: disable=g-explicit-bool-comparison
data_types.Testcase.bug_information, data_types.Testcase.key)
class Handler(base_handler.Handler):
"""Cron handler for adding new CC's to oss-fuzz bugs.."""
@handler.check_cron()
def get(self):
"""Handle a cron job."""
@memoize.wrap(memoize.FifoInMemory(256))
def cc_users_for_job(job_type, security_flag):
"""Return users to CC for a job."""
# Memoized per cron run.
return external_users.cc_users_for_job(job_type, security_flag)
for testcase in get_open_testcases_with_bugs():
issue_tracker = issue_tracker_utils.get_issue_tracker_for_testcase(
testcase)
if not issue_tracker:
logging.error('Failed to get issue tracker manager for %s',
testcase.key.id())
continue
policy = issue_tracker_policy.get(issue_tracker.project)
reported_label = policy.label('reported')
if not reported_label:
return
reported_pattern = issue_filer.get_label_pattern(reported_label)
try:
issue = issue_tracker.get_original_issue(testcase.bug_information)
except:
logging.error('Error occurred when fetching issue %s.',
testcase.bug_information)
continue
if not issue or not issue.is_open:
continue
ccs = cc_users_for_job(testcase.job_type, testcase.security_flag)
new_ccs = [cc for cc in ccs if cc not in issue.ccs]
if not new_ccs:
# Nothing to do.
continue
for cc in new_ccs:
logging.info('CCing %s on %s', cc, issue.id)
issue.ccs.add(cc)
comment = None
if not issue.labels.has_with_pattern(reported_pattern):
# Add reported label and deadline comment if necessary.
for result in issue_filer.apply_substitutions(policy, reported_label,
testcase):
issue.labels.add(result)
if policy.label('restrict_view') in issue.labels:
logging.info('Adding deadline comment on %s', issue.id)
comment = policy.deadline_policy_message
issue.save(new_comment=comment, notify=True)
|
StarcoderdataPython
|
4817290
|
class Surface_Brightness_Class():
def __init__(self, option=1):
import sys
if (option==1):
from simpleBetaProfile import Surface_Brightness_Model
elif (option==2):
from simpleDoubleBetaProfile import Surface_Brightness_Model
elif (option==3):
from simpleCCandNCCProfile import Surface_Brightness_Model
else:
print "ERROR: Input option does not exist!"
print "Please change XML file and try again."
raw_input("Press enter to exit! ")
sys.exit(2)
self.F2SB_Class = Surface_Brightness_Model()
|
StarcoderdataPython
|
6704812
|
<filename>backend/appengine/routes/produtos/home.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from categoria.categoria_model import Categoria, Produto
from config.template_middleware import TemplateResponse
from gaecookie.decorator import no_csrf
from gaepermission.decorator import login_not_required
from routes.produtos.new import salvar
from tekton.router import to_path
@login_not_required
@no_csrf
def index(categoria_selecionada=None):
ctx={'categorias':Categoria.query_ordenada_por_nome().fetch(),
'salvar_path':to_path(salvar)}
if categoria_selecionada is None:
ctx['produtos']=Produto.query_ordenada_por_nome().fetch()
ctx['categoria_selecionada'] = None
else:
ctx['categoria_selecionada'] = Categoria.get_by_id(int(categoria_selecionada))
ctx['produtos']=Produto.query_por_categoria_ordenada_por_nome(categoria_selecionada).fetch()
return TemplateResponse(ctx,'/produtos/home.html')
|
StarcoderdataPython
|
1655781
|
# Generated by Django 2.2.5 on 2020-10-22 03:58
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('health', '0007_auto_20201018_1832'),
]
operations = [
migrations.AlterField(
model_name='bodymassindex',
name='weighing_date',
field=models.DateTimeField(default=datetime.datetime(2020, 10, 22, 0, 58, 6, 600368), editable=False, verbose_name='Data Pesagem'),
),
]
|
StarcoderdataPython
|
1616425
|
<gh_stars>1-10
from typing import Union, List
from oolearning.model_processors.SingleUseObject import Cloneable
from oolearning.model_wrappers.HyperParamsBase import HyperParamsBase
from oolearning.model_wrappers.ModelWrapperBase import ModelWrapperBase
from oolearning.transformers.TransformerBase import TransformerBase
class CloneableFactory:
def __init__(self, cloneable: Union[Cloneable, List[Cloneable]]):
if isinstance(cloneable, list):
for x in cloneable:
assert x is None or isinstance(x, Cloneable)
else:
assert cloneable is None or isinstance(cloneable, Cloneable)
self._cloneable = cloneable
def get(self):
if self._cloneable is None:
return None
if isinstance(self._cloneable, list):
new_object = [x.clone() if x is not None else None for x in self._cloneable]
else:
new_object = self._cloneable.clone()
return new_object
class ModelFactory(CloneableFactory):
def __init__(self, model: ModelWrapperBase, hyper_params: HyperParamsBase = None):
super().__init__(cloneable=[model, hyper_params])
assert isinstance(model, ModelWrapperBase)
assert hyper_params is None or isinstance(hyper_params, HyperParamsBase)
def get_model(self):
return self.get()[0]
def get_hyper_params(self):
return self.get()[1]
class TransformerFactory(CloneableFactory):
def __init__(self, transformations: List[TransformerBase]):
transformations = [x for x in transformations if x is not None] if transformations is not None else []
super().__init__(cloneable=transformations)
if transformations is not None:
for x in transformations:
assert x is None or isinstance(x, TransformerBase)
def has_transformations(self):
return self._cloneable is not None and len(self._cloneable) > 0
def append_transformations(self, transformations: List[TransformerBase]):
if transformations is not None:
transformations = [x for x in transformations if x is not None]
self._cloneable = self._cloneable + transformations
|
StarcoderdataPython
|
1835939
|
<reponame>ardihikaru/learn-to-cluster<filename>baseline/__init__.py
from .sklearn_cluster import *
# from .approx_rank_order_cluster import *
|
StarcoderdataPython
|
8107350
|
<reponame>gt-ros-pkg/hrl-haptic-manip<filename>hrl_haptic_mpc/src/hrl_haptic_mpc/crona_sim_arms.py
#!/usr/bin/env python
# Copyright 2013 Georgia Tech Research Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# http://healthcare-robotics.com/
## @package hrl_haptic_mpc
# @author <NAME>
import numpy as np, math
from threading import RLock, Timer
import sys, copy
import roslib; roslib.load_manifest('sttr_behaviors')
import rospy
import actionlib
import tf
from sensor_msgs.msg import JointState
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from hrl_msgs.msg import FloatArrayBare
from equilibrium_point_control.hrl_arm import HRLArm
from pykdl_utils.kdl_kinematics import create_kdl_kin
class CronaArm(HRLArm):
def __init__(self, arm, tf_listener=None):
if arm != 'l' and arm != 'r':
raise Exception, 'Arm should only be "l" or "r"'
kinematics = create_kdl_kin('/torso_chest_link', arm + '_hand_link')
HRLArm.__init__(self, kinematics)
self.joint_names_list = kinematics.get_joint_names()
self.torso_position = None
self.arm_efforts = None
self.delta_jep = None
try:
self.kp = [rospy.get_param(arm+'_arm_controller/gains/'+nm+'/p') for nm in self.joint_names_list]
except:
print "kp is not on param server ... exiting"
assert(False)
# max_kp = np.max(self.kp)
#self.kp[-1] = 5. #This smells like a Hack.
#self.kp[-2] = 50.
#self.kp[-3] = 50.
try:
self.kd = [rospy.get_param(arm+'_arm_controller/gains/'+nm+'/d') for nm in self.joint_names_list]
except:
print "kd is not on param server ... exiting"
assert(False)
rospy.Subscriber('/joint_states', JointState, self.joint_states_cb)
# Set desired joint angle - either through a delta from the current position, or as an absolute value.
rospy.Subscriber ("/haptic_mpc/q_des", FloatArrayBare, self.set_ep_ros)
rospy.Subscriber ("/haptic_mpc/delta_q_des", FloatArrayBare, self.set_delta_ep_ros)
#rospy.Subscriber("/delta_jep_mpc_cvxgen", FloatArrayBare, self.set_ep_ros)
#self.marker_pub = rospy.Publisher(arm+'_arm/viz/markers', Marker)
#self.cep_marker_id = 1
try:
if tf_listener == None:
self.tf_lstnr = tf.TransformListener()
else:
self.tf_lstnr = tf_listener
except rospy.ServiceException, e:
rospy.loginfo("ServiceException caught while instantiating a TF listener. This seems to be normal.")
pass
self.joint_angles_pub = rospy.Publisher(arm+'_arm_controller/command',
JointTrajectory)
##
# Callback for /joint_states topic. Updates current joint
# angles and efforts for the arms constantly
# @param data JointState message recieved from the /joint_states topic
def joint_states_cb(self, data):
arm_angles = []
arm_efforts = []
arm_vel = []
jt_idx_list = [0]*len(self.joint_names_list)
for i, jt_nm in enumerate(self.joint_names_list):
jt_idx_list[i] = data.name.index(jt_nm)
for i, idx in enumerate(jt_idx_list):
if data.name[idx] != self.joint_names_list[i]:
raise RuntimeError('joint angle name does not match.')
arm_angles.append(data.position[idx])
arm_efforts.append(data.effort[idx])
arm_vel.append(data.velocity[idx])
with self.lock:
self.q = arm_angles
self.arm_efforts = arm_efforts
self.qdot = arm_vel
torso_idx = data.name.index('torso_chest_joint')
self.torso_position = data.position[torso_idx]
def set_ep(self, jep, duration=0.15):
jep = copy.copy(jep)
if jep is None or len(jep) != len(self.joint_names_list):
raise RuntimeError("set_jep value is " + str(jep))
with self.lock:
trajectory = JointTrajectory()
trajectory.joint_names = self.joint_names_list
jtp = JointTrajectoryPoint()
jtp.positions = jep
jtp.time_from_start = rospy.Duration(duration)
trajectory.points.append(jtp)
self.joint_angles_pub.publish(trajectory)
self.ep = jep
def set_delta_ep_ros(self, msg):
delta_jep = copy.copy(msg.data)
if delta_jep is None or len(delta_jep) != len(self.joint_names_list):
raise RuntimeError("set_jep value is " + str(delta_jep))
with self.lock:
if self.ep == None:
self.ep = self.get_joint_angles()
jep = (np.array(self.ep) + np.array(delta_jep)).tolist()
trajectory = JointTrajectory()
trajectory.joint_names = self.joint_names_list
jtp = JointTrajectoryPoint()
jtp.positions = jep
jtp.time_from_start = rospy.Duration(0.15)
trajectory.points.append(jtp)
self.joint_angles_pub.publish(trajectory)
self.ep = jep
def set_ep_ros(self, msg):
with self.lock:
des_jep = copy.copy(msg.data)
if des_jep is None or len(des_jep) != len(self.joint_names_list):
raise RuntimeError("set_jep value is " + str(des_jep))
# self.delta_jep = des_jep
jep = (np.array(des_jep)).tolist()
trajectory = JointTrajectory()
trajectory.joint_names = self.joint_names_list
jtp = JointTrajectoryPoint()
jtp.positions = jep
jtp.time_from_start = rospy.Duration(0.15)
trajectory.points.append(jtp)
self.joint_angles_pub.publish(trajectory)
self.ep = jep
def wrap_angles(self, q):
for ind in [4, 6]:
while q[ind] < -np.pi:
q[ind] += 2*np.pi
while q[ind] > np.pi:
q[ind] -= 2*np.pi
return q
def publish_rviz_markers(self):
# publish the CEP marker.
o = np.matrix([0.,0.,0.,1.]).T
jep = self.get_ep()
cep, r = self.kinematics.FK(jep)
cep_marker = hv.single_marker(cep, o, 'sphere',
'/torso_lift_link', color=(0., 0., 1., 1.),
scale = (0.02, 0.02, 0.02), duration=0.,
m_id=1)
cep_marker.header.stamp = rospy.Time.now()
self.marker_pub.publish(cep_marker)
q = self.get_joint_angles()
ee, r = self.kinematics.FK(q)
ee_marker = hv.single_marker(ee, o, 'sphere',
'/torso_lift_link', color=(0., 1., 0., 1.),
scale = (0.02, 0.02, 0.02), duration=0.,
m_id=2)
ee_marker.header.stamp = rospy.Time.now()
self.marker_pub.publish(ee_marker)
if __name__ == '__main__':
rospy.init_node('crona_arms_test')
robot = CronaArm('l')
|
StarcoderdataPython
|
9667516
|
import datetime
import pandas as pd
from django.core.management.base import BaseCommand
from library.log_utils import console_logger
from snpdb.models import Lab, LabProject, Organization
LEADER = 'Leader'
MEMBERS = 'Members'
NAME = 'Name'
INSTITUTION = 'Institution'
CITY = 'City'
COUNTRY = 'Country'
LAT = 'Lat'
LONG = 'Long'
FAMILIES = 'Families'
INVOLVED = 'Involved'
URL = 'URL'
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('LabNameLocations', help='csv file for Lab Details')
def handle(self, *args, **options):
filename = options["LabNameLocations"]
logger = console_logger()
df = pd.read_csv(filename, sep='\t', index_col=None)
for col in [LEADER, MEMBERS, NAME, INSTITUTION, CITY, COUNTRY, LAT, LONG, FAMILIES, INVOLVED, URL]:
if col not in df.columns:
msg = f"Expected column '{col}' in tab separated file LabNameLocations"
raise ValueError(msg)
# Insert Lab Information and Project Data
for _, row in df.iterrows():
organization, _ = Organization.objects.get_or_create(name=row[INSTITUTION])
lab = Lab.objects.create(name=row[NAME],
organization=organization,
city=row[CITY],
country=row[COUNTRY],
url=row[URL],
lat=row[LAT],
long=row[LONG])
LabProject.objects.create(lab=lab,
leader=row[LEADER],
members=row[MEMBERS],
families=row[FAMILIES],
involved=row[INVOLVED] == "Y",
date=datetime.date.today())
print("saved lab '%s'" % row[NAME])
logger.info("saved data")
|
StarcoderdataPython
|
1738303
|
#!/usr/bin/env python
#
# Copyright (c) 2020, Pycom Limited.
#
# This software is licensed under the GNU GPL version 3 or any
# later version, with permitted additional terms. For more information
# see the Pycom Licence v1.0 document supplied with this file, or
# available at https://www.pycom.io/opensource/licensing
#
from pycoproc import Pycoproc
__version__ = '1.4.1'
class Pytrack(Pycoproc):
def __init__(self, i2c=None, sda='P22', scl='P21'):
Pycoproc.__init__(self, Pycoproc.PYTRACK, i2c, sda, scl)
|
StarcoderdataPython
|
9622672
|
<gh_stars>1-10
__version_tuple__ = (0, 2, 1)
__version__ = '0.2.1'
|
StarcoderdataPython
|
126731
|
"""
Handles sounds for Some Platformer Game
Created by sheepy0125
30/10/2021
"""
#############
### Setup ###
#############
# Import
from pygame_setup import pygame
from utils import Logger, ROOT_PATH
from time import time
# Variables
SOUND_PATH = ROOT_PATH / "assets" / "sfx"
#######################
### Sound dataclass ###
#######################
class Sounds:
"""Dataclass for storing sounds"""
sound_dict = {
"jump": {"filename": str(SOUND_PATH / "jump.wav"), "idx": 0},
"grass_step": {"filename": str(SOUND_PATH / "grass_step.wav"), "idx": 1},
"stone_step": {"filename": str(SOUND_PATH / "stone_step.wav"), "idx": 2},
"damage": {"filename": str(SOUND_PATH / "damage.wav"), "idx": 3},
"death": {"filename": str(SOUND_PATH / "death.wav"), "idx": 4},
}
# Initialize sounds
pygame.mixer.init(frequency=44100, size=-16, channels=2, buffer=(2 ** 12))
channels = [pygame.mixer.Channel(i) for i in range(len(Sounds.sound_dict))]
##################
### Play sound ###
##################
def play_sound(sound_name: str, volume: float = 1.0):
"""
Plays a sound with error handling
Has timeouts as well
Refer to Sounds.sound_dict for sound names
"""
try:
sound = pygame.mixer.Sound(Sounds.sound_dict[sound_name]["filename"])
sound_idx = Sounds.sound_dict[sound_name]["idx"]
# Ensure sound is not already playing
if channels[sound_idx].get_busy():
raise RuntimeError
# Play sound
channels[sound_idx].set_volume(volume)
channels[sound_idx].play(sound)
except KeyError:
Logger.fatal(f"Sound {sound_name} not found, cannot play")
except RuntimeError:
# This occurs when the sound is already being played.
# Logger.log("Attempted to play sound, but the sound is already playing")
pass
except Exception as error:
Logger.fatal(
"Error in playing sound: Sound was found, but another error occurred"
)
Logger.log_error(error)
##################
### Stop sound ###
##################
def stop_sound(sound_name: str):
try:
sound_idx = Sounds.sound_dict[sound_name]["idx"]
channels[sound_idx].stop()
except KeyError:
Logger.fatal(f"Sound {sound_name} not found, cannot stop")
except Exception as error:
Logger.fatal(
"Error in stopping sound: Sound was found, but another error occurred"
)
Logger.log_error(error)
|
StarcoderdataPython
|
3437675
|
import re
text = 'purple <EMAIL>, blah monkey <EMAIL> blah dishwasher'
emails = re.findall(r'[\w\.-]+@[\w\.-]+', text)
for email in emails:
print email
|
StarcoderdataPython
|
3500170
|
#!/usr/bin/python
"""
The MIT License (MIT)
Copyright (c) 2015 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# This script generates PNGs from RRD files for a given time range. The expected
# use case is by a cron job. Usage should be fairly straightforward.
outpath = "/opt/ghpi/www/graphs/"
rrdpath = "/opt/ghpi/rrd/"
import rrdtool, glob, os, sys, datetime
def graph(time, rrdfile):
if time not in ["6h", "1d", "2d", "7d", "14d", "1m", "3m", "1y"]:
print "invalid time specified"
sys.exit(1)
if os.path.isfile(rrdfile):
print "generating %s graph for %s" % (time, rrdfile)
rrdfile = os.path.basename(rrdfile)
outfile = outpath + time + '-' + rrdfile + ".png"
rrdtool.graph(outfile, '-a', 'PNG',
'-w', '370', '-h', '130',
'-u 110', '-l', '40',
'--start', '-%s' % time, '--end', 'now',
'--slope-mode',
'--font', 'DEFAULT:7:',
'--title', "last %s from %s" % (time, rrdfile),
'--watermark', 'Generated on %s' % datetime.datetime.now(),
#'--vertical-label', 'temp',
#'--right-axis', '1:0',
#'--x-grid', 'MINUTE:10:HOUR:1:MINUTE:120:0:%I%p',
'--alt-y-grid', '--rigid',
'DEF:tempa=' + rrdpath + rrdfile +':t:AVERAGE',
'AREA:tempa#CCCCCC:avg',
'DEF:temp=' + rrdpath + rrdfile +':t:MAX',
'LINE2:temp#0000FF:max',
'GPRINT:temp:LAST:Cur\: %5.2lf\g',
'GPRINT:temp:MAX:Max\: %5.2lf\g',
'GPRINT:temp:MIN:Min\: %5.2lf\t' )
if len(sys.argv) == 1:
print "Usage: ./graph.py <6h,1d,2d,7d,14d,1m,3m,1y> [rrd file path]"
sys.exit(1)
# graph all rrds in rrdpath
if len(sys.argv) == 2:
rrds = glob.glob(rrdpath + "*.rrd")
for rrdfile in rrds:
graph(sys.argv[1], rrdfile)
# graph one rrd
if len(sys.argv) == 3:
rrdfile = rrdpath + sys.argv[2]
if os.path.isfile(rrdfile):
graph(sys.argv[1], rrdfile)
else:
print "rrd file %s not found" % rrdfile
|
StarcoderdataPython
|
9616720
|
<reponame>codeif/WeChat-OAuth2
#!/usr/bin/env python
from setuptools import setup, find_packages
import re
with open('README.rst') as f:
readme = f.read()
with open('wechat_oauth2/__about__.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
setup(
name='WeChat-OAuth2',
version=version,
description='wechat sdk',
long_description=readme,
author='codeif',
author_email='<EMAIL>',
url='https://github.com/codeif/WeChat-OAuth2',
license='MIT',
install_requires=['rauth', 'requests'],
packages=find_packages(),
)
|
StarcoderdataPython
|
3577939
|
import asyncio
import logging
from typing import AsyncGenerator, Dict, Optional
from src.protocols.introducer_protocol import RespondPeers, RequestPeers
from src.server.connection import PeerConnections
from src.server.outbound_message import Delivery, Message, NodeType, OutboundMessage
from src.types.sized_bytes import bytes32
from src.server.server import ChiaServer
from src.util.api_decorators import api_request
log = logging.getLogger(__name__)
class Introducer:
def __init__(self, max_peers_to_send: int, recent_peer_threshold: int):
self.vetted: Dict[bytes32, bool] = {}
self.max_peers_to_send = max_peers_to_send
self.recent_peer_threshold = recent_peer_threshold
self._shut_down = False
self.server: Optional[ChiaServer] = None
async def _start(self):
self._vetting_task = asyncio.create_task(self._vetting_loop())
def _close(self):
self._shut_down = True
async def _await_closed(self):
await self._vetting_task
def _set_server(self, server: ChiaServer):
self.server = server
async def _vetting_loop(self):
while True:
if self._shut_down:
return
try:
log.info("Vetting random peers.")
rawpeers = self.global_connections.peers.get_peers(
100, True, self.recent_peer_threshold
)
for peer in rawpeers:
if self._shut_down:
return
if peer.get_hash() not in self.vetted:
try:
log.info(f"Vetting peer {peer.host} {peer.port}")
r, w = await asyncio.wait_for(
asyncio.open_connection(peer.host, int(peer.port)),
timeout=3,
)
w.close()
except Exception as e:
log.warning(f"Could not vet {peer}. {type(e)}{str(e)}")
self.vetted[peer.get_hash()] = False
continue
log.info(f"Have vetted {peer} successfully!")
self.vetted[peer.get_hash()] = True
except Exception as e:
log.error(e)
for i in range(30):
if self._shut_down:
return
await asyncio.sleep(1)
def _set_global_connections(self, global_connections: PeerConnections):
self.global_connections: PeerConnections = global_connections
@api_request
async def request_peers(
self, request: RequestPeers
) -> AsyncGenerator[OutboundMessage, None]:
max_peers = self.max_peers_to_send
rawpeers = self.global_connections.peers.get_peers(
max_peers * 5, True, self.recent_peer_threshold
)
peers = []
for peer in rawpeers:
if peer.get_hash() not in self.vetted:
continue
if self.vetted[peer.get_hash()]:
peers.append(peer)
if len(peers) >= max_peers:
break
log.info(f"Sending vetted {peers}")
msg = Message("respond_peers", RespondPeers(peers))
yield OutboundMessage(NodeType.FULL_NODE, msg, Delivery.RESPOND)
yield OutboundMessage(NodeType.WALLET, msg, Delivery.RESPOND)
|
StarcoderdataPython
|
1664333
|
<filename>tests/end_to_end/target_snowflake/tap_mariadb/__init__.py
from tests.end_to_end.target_snowflake import TargetSnowflake
class TapMariaDB(TargetSnowflake):
"""
Base class for E2E tests for tap mysql -> target snowflake
"""
# pylint: disable=arguments-differ
def setUp(self, tap_id: str, target_id: str):
super().setUp(tap_id=tap_id, target_id=target_id, tap_type='TAP_MYSQL')
self.e2e_env.setup_tap_mysql()
|
StarcoderdataPython
|
1686100
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Stinespring representation of a Quantum Channel.
"""
import copy
from numbers import Number
import numpy as np
from qiskit.circuit.quantumcircuit import QuantumCircuit
from qiskit.circuit.instruction import Instruction
from qiskit.exceptions import QiskitError
from qiskit.quantum_info.operators.predicates import is_identity_matrix
from qiskit.quantum_info.operators.channel.quantum_channel import QuantumChannel
from qiskit.quantum_info.operators.channel.kraus import Kraus
from qiskit.quantum_info.operators.channel.choi import Choi
from qiskit.quantum_info.operators.channel.superop import SuperOp
from qiskit.quantum_info.operators.channel.transformations import _to_stinespring
class Stinespring(QuantumChannel):
r"""Stinespring representation of a quantum channel.
The Stinespring representation of a quantum channel :math:`\mathcal{E}`
is a rectangular matrix :math:`A` such that the evolution of a
:class:`~qiskit.quantum_info.DensityMatrix` :math:`\rho` is given by
.. math::
\mathcal{E}(ρ) = \mbox{Tr}_2\left[A ρ A^\dagger\right]
where :math:`\mbox{Tr}_2` is the :func:`partial_trace` over subsystem 2.
A general operator map :math:`\mathcal{G}` can also be written using the
generalized Stinespring representation which is given by two matrices
:math:`A`, :math:`B` such that
.. math::
\mathcal{G}(ρ) = \mbox{Tr}_2\left[A ρ B^\dagger\right]
See reference [1] for further details.
References:
1. <NAME>, <NAME>, <NAME>, *Tensor networks and graphical calculus
for open quantum systems*, Quant. Inf. Comp. 15, 0579-0811 (2015).
`arXiv:1111.6950 [quant-ph] <https://arxiv.org/abs/1111.6950>`_
"""
def __init__(self, data, input_dims=None, output_dims=None):
"""Initialize a quantum channel Stinespring operator.
Args:
data (QuantumCircuit or
Instruction or
BaseOperator or
matrix): data to initialize superoperator.
input_dims (tuple): the input subsystem dimensions.
[Default: None]
output_dims (tuple): the output subsystem dimensions.
[Default: None]
Raises:
QiskitError: if input data cannot be initialized as a
a list of Kraus matrices.
Additional Information:
If the input or output dimensions are None, they will be
automatically determined from the input data. This can fail for the
Stinespring operator if the output dimension cannot be automatically
determined.
"""
# If the input is a list or tuple we assume it is a pair of general
# Stinespring matrices. If it is a numpy array we assume that it is
# a single Stinespring matrix.
if isinstance(data, (list, tuple, np.ndarray)):
if not isinstance(data, tuple):
# Convert single Stinespring set to length 1 tuple
stine = (np.asarray(data, dtype=complex), None)
if isinstance(data, tuple) and len(data) == 2:
if data[1] is None:
stine = (np.asarray(data[0], dtype=complex), None)
else:
stine = (np.asarray(data[0], dtype=complex),
np.asarray(data[1], dtype=complex))
dim_left, dim_right = stine[0].shape
# If two Stinespring matrices check they are same shape
if stine[1] is not None:
if stine[1].shape != (dim_left, dim_right):
raise QiskitError("Invalid Stinespring input.")
input_dim = dim_right
if output_dims:
output_dim = np.product(output_dims)
else:
output_dim = input_dim
if dim_left % output_dim != 0:
raise QiskitError("Invalid output_dim")
else:
# Otherwise we initialize by conversion from another Qiskit
# object into the QuantumChannel.
if isinstance(data, (QuantumCircuit, Instruction)):
# If the input is a Terra QuantumCircuit or Instruction we
# convert it to a SuperOp
data = SuperOp._init_instruction(data)
else:
# We use the QuantumChannel init transform to intialize
# other objects into a QuantumChannel or Operator object.
data = self._init_transformer(data)
data = self._init_transformer(data)
input_dim, output_dim = data.dim
# Now that the input is an operator we convert it to a
# Stinespring operator
rep = getattr(data, '_channel_rep', 'Operator')
stine = _to_stinespring(rep, data._data, input_dim, output_dim)
if input_dims is None:
input_dims = data.input_dims()
if output_dims is None:
output_dims = data.output_dims()
# Check and format input and output dimensions
input_dims = self._automatic_dims(input_dims, input_dim)
output_dims = self._automatic_dims(output_dims, output_dim)
# Initialize either single or general Stinespring
if stine[1] is None or (stine[1] == stine[0]).all():
# Standard Stinespring map
super().__init__((stine[0], None),
input_dims=input_dims,
output_dims=output_dims,
channel_rep='Stinespring')
else:
# General (non-CPTP) Stinespring map
super().__init__(stine,
input_dims=input_dims,
output_dims=output_dims,
channel_rep='Stinespring')
@property
def data(self):
# Override to deal with data being either tuple or not
if self._data[1] is None:
return self._data[0]
else:
return self._data
def is_cptp(self, atol=None, rtol=None):
"""Return True if completely-positive trace-preserving."""
if atol is None:
atol = self.atol
if rtol is None:
rtol = self.rtol
if self._data[1] is not None:
return False
check = np.dot(np.transpose(np.conj(self._data[0])), self._data[0])
return is_identity_matrix(check, rtol=self.rtol, atol=self.atol)
def conjugate(self):
"""Return the conjugate of the QuantumChannel."""
# pylint: disable=assignment-from-no-return
stine_l = np.conjugate(self._data[0])
stine_r = None
if self._data[1] is not None:
stine_r = np.conjugate(self._data[1])
return Stinespring((stine_l, stine_r), self.input_dims(),
self.output_dims())
def transpose(self):
"""Return the transpose of the QuantumChannel."""
din, dout = self.dim
dtr = self._data[0].shape[0] // dout
stine = [None, None]
for i, mat in enumerate(self._data):
if mat is not None:
stine[i] = np.reshape(
np.transpose(np.reshape(mat, (dout, dtr, din)), (2, 1, 0)),
(din * dtr, dout))
return Stinespring(tuple(stine),
input_dims=self.output_dims(),
output_dims=self.input_dims())
def compose(self, other, qargs=None, front=False):
"""Return the composed quantum channel self @ other.
Args:
other (QuantumChannel): a quantum channel.
qargs (list or None): a list of subsystem positions to apply
other on. If None apply on all
subsystems [default: None].
front (bool): If True compose using right operator multiplication,
instead of left multiplication [default: False].
Returns:
Stinespring: The quantum channel self @ other.
Raises:
QiskitError: if other cannot be converted to a Stinespring or has
incompatible dimensions.
Additional Information:
Composition (``@``) is defined as `left` matrix multiplication for
:class:`SuperOp` matrices. That is that ``A @ B`` is equal to ``B * A``.
Setting ``front=True`` returns `right` matrix multiplication
``A * B`` and is equivalent to the :meth:`dot` method.
"""
if qargs is None:
qargs = getattr(other, 'qargs', None)
if qargs is not None:
return Stinespring(
SuperOp(self).compose(other, qargs=qargs, front=front))
# Otherwise we convert via Kraus representation rather than
# superoperator to avoid unnecessary representation conversions
return Stinespring(Kraus(self).compose(other, front=front))
def dot(self, other, qargs=None):
"""Return the right multiplied quantum channel self * other.
Args:
other (QuantumChannel): a quantum channel.
qargs (list or None): a list of subsystem positions to apply
other on. If None apply on all
subsystems [default: None].
Returns:
Stinespring: The quantum channel self * other.
Raises:
QiskitError: if other cannot be converted to a Stinespring or has
incompatible dimensions.
"""
return super().dot(other, qargs=qargs)
def power(self, n):
"""The matrix power of the channel.
Args:
n (int): compute the matrix power of the superoperator matrix.
Returns:
Stinespring: the matrix power of the SuperOp converted to a
Stinespring channel.
Raises:
QiskitError: if the input and output dimensions of the
QuantumChannel are not equal, or the power is not
an integer.
"""
if n > 0:
return super().power(n)
return Stinespring(SuperOp(self).power(n))
def tensor(self, other):
"""Return the tensor product channel self ⊗ other.
Args:
other (QuantumChannel): a quantum channel subclass.
Returns:
Stinespring: the tensor product channel other ⊗ self as a
Stinespring object.
Raises:
QiskitError: if other cannot be converted to a channel.
"""
return self._tensor_product(other, reverse=False)
def expand(self, other):
"""Return the tensor product channel other ⊗ self.
Args:
other (QuantumChannel): a quantum channel subclass.
Returns:
Stinespring: the tensor product channel other ⊗ self as a
Stinespring object.
Raises:
QiskitError: if other cannot be converted to a channel.
"""
return self._tensor_product(other, reverse=True)
def _add(self, other, qargs=None):
"""Return the QuantumChannel self + other.
If ``qargs`` are specified the other operator will be added
assuming it is identity on all other subsystems.
Args:
other (QuantumChannel): a quantum channel subclass.
qargs (None or list): optional subsystems to add on
(Default: None)
Returns:
Stinespring: the linear addition channel self + other.
Raises:
QiskitError: if other cannot be converted to a channel or
has incompatible dimensions.
"""
# Since we cannot directly add two channels in the Stinespring
# representation we convert to the Choi representation
return Stinespring(Choi(self)._add(other, qargs=qargs))
def _multiply(self, other):
"""Return the QuantumChannel other * self.
Args:
other (complex): a complex number.
Returns:
Stinespring: the scalar multiplication other * self.
Raises:
QiskitError: if other is not a valid scalar.
"""
if not isinstance(other, Number):
raise QiskitError("other is not a number")
ret = copy.copy(self)
# If the number is complex or negative we need to convert to
# general Stinespring representation so we first convert to
# the Choi representation
if isinstance(other, complex) or other < 1:
# Convert to Choi-matrix
ret._data = Stinespring(Choi(self)._multiply(other))._data
return ret
# If the number is real we can update the Kraus operators
# directly
num = np.sqrt(other)
stine_l, stine_r = self._data
stine_l = num * self._data[0]
stine_r = None
if self._data[1] is not None:
stine_r = num * self._data[1]
ret._data = (stine_l, stine_r)
return ret
def _evolve(self, state, qargs=None):
"""Evolve a quantum state by the quantum channel.
Args:
state (DensityMatrix or Statevector): The input state.
qargs (list): a list of quantum state subsystem positions to apply
the quantum channel on.
Returns:
DensityMatrix: the output quantum state as a density matrix.
Raises:
QiskitError: if the quantum channel dimension does not match the
specified quantum state subsystem dimensions.
"""
return SuperOp(self)._evolve(state, qargs)
def _tensor_product(self, other, reverse=False):
"""Return the tensor product channel.
Args:
other (QuantumChannel): a quantum channel subclass.
reverse (bool): If False return self ⊗ other, if True return
if True return (other ⊗ self) [Default: False]
Returns:
Stinespring: the tensor product channel as a Stinespring object.
Raises:
QiskitError: if other cannot be converted to a channel.
"""
# Convert other to Stinespring
if not isinstance(other, Stinespring):
other = Stinespring(other)
# Tensor Stinespring ops
sa_l, sa_r = self._data
sb_l, sb_r = other._data
# Reshuffle tensor dimensions
din_a, dout_a = self.dim
din_b, dout_b = other.dim
dtr_a = sa_l.shape[0] // dout_a
dtr_b = sb_l.shape[0] // dout_b
if reverse:
shape_in = (dout_b, dtr_b, dout_a, dtr_a, din_b * din_a)
shape_out = (dout_b * dtr_b * dout_a * dtr_a, din_b * din_a)
else:
shape_in = (dout_a, dtr_a, dout_b, dtr_b, din_a * din_b)
shape_out = (dout_a * dtr_a * dout_b * dtr_b, din_a * din_b)
# Compute left Stinespring op
if reverse:
input_dims = self.input_dims() + other.input_dims()
output_dims = self.output_dims() + other.output_dims()
sab_l = np.kron(sb_l, sa_l)
else:
input_dims = other.input_dims() + self.input_dims()
output_dims = other.output_dims() + self.output_dims()
sab_l = np.kron(sa_l, sb_l)
# Reravel indices
sab_l = np.reshape(
np.transpose(np.reshape(sab_l, shape_in), (0, 2, 1, 3, 4)),
shape_out)
# Compute right Stinespring op
if sa_r is None and sb_r is None:
sab_r = None
else:
if sa_r is None:
sa_r = sa_l
elif sb_r is None:
sb_r = sb_l
if reverse:
sab_r = np.kron(sb_r, sa_r)
else:
sab_r = np.kron(sa_r, sb_r)
# Reravel indices
sab_r = np.reshape(
np.transpose(np.reshape(sab_r, shape_in), (0, 2, 1, 3, 4)),
shape_out)
return Stinespring((sab_l, sab_r), input_dims, output_dims)
|
StarcoderdataPython
|
8127210
|
<filename>fenalib/writer.py
import os
import logging
if __name__ == "__main__":
import sys
sys.path.append("..")
del sys
from fenalib.mcfunction import McFunction
from fenalib.assert_utils import assert_type, assert_list_types
PATH_TO_LOG_DIR = "log"
def write_after_pre_pyexpander(text):
current_dir_path = os.path.dirname(__file__)
after_pre_pyexpander_path = os.path.join(current_dir_path, PATH_TO_LOG_DIR, "after_pre_pyexpander.txt")
with open(after_pre_pyexpander_path, "w") as file:
file.write(text)
logging.debug("Successfully wriitten 'after_pre_pyexpander.txt'")
def write_after_pyexpander(text):
current_dir_path = os.path.dirname(__file__)
after_pyexpander_path = os.path.join(current_dir_path, PATH_TO_LOG_DIR, "after_pyexpander.txt")
with open(after_pyexpander_path, "w") as file:
file.write(text)
logging.debug("Successfully wriitten 'after_pyexpander.txt'")
class Writer:
"""
Literally writes out all the proper files
Attributes:
mcfunctions (list or tuple of McFunction objects)
clean (bool): Whether the mcfunctions should be deleted or not
debug (bool): Whether a debug command should be added to all commands or not
Writes:
- each mcfunction file based off of their full path
- fenalib/log/parsed_cmds.txt to summarize all commands for each mcfunction file
"""
def __init__(self, mcfunctions, clean=False, debug=False):
assert_list_types(mcfunctions, McFunction)
assert_type(clean, bool)
assert_type(debug, bool)
self.mcfunctions = tuple(mcfunctions)
self.clean = clean
self.debug = debug
def _get_mcfunction_paths(self):
"""
Returns:
set of str objects: All possible paths for the mcfunctions to clean
"""
return {os.path.dirname(mcfunction.full_path) for mcfunction in self.mcfunctions}
def clean_files(self):
"""
All files ending with .mcfunction for each folder the mcfunctions will be deleted
- all folder paths are based off the given existing mcfunctions
- there will be no folder walking to delete mcfunctions
eg. if there are two mcfunctions with paths /functions/test/main.mcfunction and /functions/test/src/something.mcfunction:
- all files ending with ".mcfunction" under /functions/test and /functions/test/src will be deleted
- if there is an mcfunction that wasn't parsed under /functions/ego/test/not_parsed/hello.mcfunction, all
mcfunctions inside /functions/ego/test/not_parsed will be kept
"""
for mfunc_dir in self._get_mcfunction_paths():
if os.path.isdir(mfunc_dir):
for path in os.listdir(mfunc_dir):
full_path = os.path.join(mfunc_dir, path)
if os.path.isfile(full_path) and path.endswith(".mcfunction"):
logging.debug(f"Deleted function file {full_path}")
os.remove(full_path)
def write(self):
"""
Writes all commands specified under the mcfunction
Note that new directories will be made if necessary
"""
if self.clean:
self.clean_files()
self.write_parsed_commands()
for mcfunction in self.mcfunctions:
assert mcfunction.finalized
directories = os.path.dirname(mcfunction.full_path)
if not os.path.exists(directories):
os.makedirs(directories)
with open(mcfunction.full_path, "w") as file:
if self.debug:
file.write(f"say debug mode: running {mcfunction.mfunc_name}\n")
file.write("\n".join(mcfunction.commands) + "\n")
logging.debug(f"Wrote function file {mcfunction.full_path}")
def write_parsed_commands(self):
# gets the directory path of this current file path
current_dir_path = os.path.dirname(__file__)
parsed_cmds_path = os.path.join(current_dir_path, PATH_TO_LOG_DIR, "parsed_cmds.txt")
with open(parsed_cmds_path, "w") as file:
for mcfunction in self.mcfunctions:
assert mcfunction.finalized
file.write(mcfunction.full_path + "\n ")
file.write("\n ".join(mcfunction.commands) + "\n\n")
if __name__ == "__main__":
command_template = (
"say test1 inside function {mfunc_name}",
"say test2 inside function {mfunc_name}",
"say test3 inside function {mfunc_name}",
)
def add_function(commands, function_name):
return tuple(f"{command} with function {function_name}" for command in commands)
def get_full_path(path):
dir_path = os.path.dirname(os.path.realpath(__file__))
return os.path.join(dir_path, path) + ".mcfunction"
def get_mfunc_name(full_path):
# wtf is this code even
# substring to the left of the last instance of functions
# removes the first character of the string
# replaces the first instance of '/' with ':'
return full_path.split("functions")[-1][1:].replace("/", ":", 1)
def create_mfunc(relative_path):
full_path = get_full_path(relative_path)
mfunc_name = get_mfunc_name(full_path)
mcfunction = McFunction(mfunc_name, full_path)
for command in command_template:
mcfunction.add_command(command.format(mfunc_name=mfunc_name))
mcfunction.finalize()
return mcfunction
mcfunc1 = create_mfunc("functions/ego/test/test1")
mcfunc2 = create_mfunc("functions/ego/test/test2")
mcfuncwithin1 = create_mfunc("functions/ego/test/within_test/test1")
mcfuncwithin2 = create_mfunc("functions/ego/test/within_test/test2")
mcfuncwithin22 = create_mfunc("functions/ego/test/within_test2/test2")
mcfunctions = [mcfunc1, mcfunc2, mcfuncwithin1, mcfuncwithin2, mcfuncwithin22]
# mcfunctions = [mcfunc1, mcfuncwithin1, mcfuncwithin2, mcfuncwithin22]
for mcfunction in mcfunctions:
print(repr(mcfunction))
writer = Writer(mcfunctions, clean=True, debug=True)
writer.write()
|
StarcoderdataPython
|
8026714
|
<reponame>paprikachan/biotool
# -*- coding: utf-8 -*-
"""
tenxtools.io
~~~~~~~~~~~~
@Copyright: (c) 2018-06 by <NAME> (<EMAIL>).
@License: LICENSE_NAME, see LICENSE for more details.
"""
import os
import gzip
import csv
import vcf
import yaml
import pysam
class Record(object):
fields = []
sep = '\t'
def __init__(self, stream_or_str=None, parent=None, args=None, sep='\t',
**kwargs):
self.parent = parent
self.sep = sep
args = args or {}
if parent:
self.from_parent()
elif stream_or_str:
self.parse(stream_or_str)
elif args:
self.set(**args)
else:
self.set(**kwargs)
def set(self, **kwargs):
for key in kwargs:
setattr(self, key, kwargs[key])
for k in kwargs.keys():
if k not in self.fields:
self.fields.append(k)
def from_parent(self, **kwargs):
pass
def parse(self, stream_or_str):
# parse a stream_or_string to record object
pass
def _validate(self, x, func=None):
if func in [None, int, float]:
if not x:
return x
if x == '.':
return None
if func is str:
if not x:
return ''
if func:
return func(x)
return x
def _parse_list(self, line, sep=','):
if isinstance(line, list):
return line
if not line or line == '.':
return []
return line.split(sep)
def _format_list(self, x, sep=','):
return sep.join(map(self._format_value, x))
def _format_value(self, x): # None, int, float, list
if x == 0:
return '0'
if not x:
return '.' # notice not 0 is True
if type(x) == list:
return self._format_list(x)
return str(x)
def _parse_value(self, x): # None, int, float, list
if x == '.':
return None
splits = x.split(',')
if splits:
return map(self._parse_value, splits)
return float(x)
def __repr__(self):
res_list = []
for field in self.fields:
try:
x = self._format_value(getattr(self, field))
res_list.append(x)
except Exception:
pass
return self.sep.join(res_list)
def __str__(self):
return self.__repr__()
def __eq__(self):
pass
def safe_open(fn, mode):
if not fn:
return None
if fn.endswith('gz'):
return gzip.open(fn, mode)
else:
return open(fn, mode)
class Writer(object):
def __init__(self, fn=None, record_cls=Record,
fields_prefix='#',
*args, **kwargs):
self.fn = fn
self.fields_prefix = fields_prefix
self.sep = record_cls.sep
self.fields = record_cls.fields
self.write('', 'w')
def write_headers(self, mode='a'):
pass
def write_fields(self, fields=None, mode='a'):
if fields is None:
fields = self.fields
line = self.fields_prefix + self.sep.join(fields) + '\n'
with safe_open(self.fn, mode) as f:
f.write(line)
return True
def write_chunk(self, chunk, mode='a', dynamic_fields=False):
with safe_open(self.fn, mode) as f:
for i, record in enumerate(chunk):
if i == 0 and dynamic_fields:
self.write_fields(fields=record.fields)
f.write(str(record) + '\n')
def write(self, content, mode='a'):
with safe_open(self.fn, mode) as f:
f.write(content)
class Reader(object): # currently just csv, tsv file
record_cls = Record
def __init__(self,
in_data=None, in_fn=None,
record_cls=Record,
file_type='csv', sep=',', has_header=False,
sample=None):
self.in_data = in_data
self.in_fn = in_fn
self.record_cls = record_cls
self.fields = record_cls.fields
self.file_type = file_type
self.sep = sep
self.has_header = has_header
self.sample = sample
if isinstance(self.in_fn, str) and self.in_fn.endswith('gz'):
self.tbx = pysam.TabixFile(self.in_fn)
if isinstance(self.in_fn, list) or isinstance(self.in_data, list):
self.in_iter = self._multiple_data(in_datas=in_data, in_fns=in_fn)
else:
self.in_iter = self._single_data(in_data=in_data, in_fn=in_fn)
def _read(self, in_fn):
for line in safe_open(in_fn, 'rt'):
if self.file_type != 'vcf' and line.startswith('#'):
continue
yield line
def _single_data(self, in_data=None, in_fn=None):
if in_data:
return in_data
if in_fn:
if self.file_type == 'csv':
in_iter = self._csv_fn(in_fn)
elif self.file_type == 'vcf':
in_iter = self._vcf_fn(in_fn)
elif self.file_type == 'yaml':
in_iter = self._yaml_fn(in_fn)
else:
in_iter = None
else:
in_iter = None
return in_iter
def _csv_fn(self, in_data):
if self.has_header:
in_iter = csv.DictReader(
self._read(in_data),
delimiter=self.sep, skipinitialspace=True)
else:
in_iter = csv.DictReader(
self._read(in_data),
fieldnames=self.fields,
delimiter=self.sep, skipinitialspace=True)
return in_iter
def _vcf_fn(self, in_data):
in_iter = vcf.Reader(self._read(in_data))
return in_iter
def _yaml_fn(self, in_fn):
docs = yaml.load_all(safe_open(in_fn, 'r'))
for doc in docs:
if not doc:
continue
yield doc
def _multiple_data(self, in_datas=None, in_fns=None):
if in_datas:
for in_data in in_datas:
for x in self._single_data(in_data=in_data):
yield x
elif in_fns:
for in_fn in in_fns:
for x in self._single_data(in_fn=in_fn):
yield os.path.split(in_fn)[1], x
def __iter__(self):
return self.read_record()
def conditional_read(self, condition_func=None):
for meta, record in self:
if condition_func and condition_func(record):
yield meta, record
def fetch(self, chrom, start, end, condition_func=None):
for line in self.tbx.fetch(chrom, start, end):
row = dict(zip(self.record_cls.fields, line.split(self.sep)))
record = self.record_cls(args=row)
if condition_func is None:
yield None, record
elif condition_func and condition_func(record):
yield None, record
def read_record(self):
for i, row_or_record in enumerate(self.in_iter):
if isinstance(row_or_record, dict):
row_or_record['sample'] = self.sample
record = self.record_cls(args=row_or_record)
meta = None
elif isinstance(row_or_record, tuple):
meta, record = row_or_record
if isinstance(record, dict):
record['sample'] = self.sample
record = self.record_cls(args=record)
elif not isinstance(record, self.record_cls):
record.sample = self.sample
record = self.record_cls(parent=record)
elif not isinstance(row_or_record, self.record_cls):
meta = None
row_or_record.sample = self.sample
record = self.record_cls(parent=row_or_record)
else:
meta = None
record.sample = self.sample
record = row_or_record
yield meta, record
def read_chunks(self, field):
chunks = []
prev_value = None
for i, record in enumerate(self.read_record()):
meta, record = record
value = getattr(record, field)
if prev_value is None or prev_value == value:
chunks.append(record)
else: # update
if i:
yield prev_value, chunks
chunks = [record]
prev_value = value
if chunks:
yield prev_value, chunks
|
StarcoderdataPython
|
4852081
|
import os
from pathlib import Path
config = """subscriptionID:
tenant_id:
app_id:
client_secret:
resource_group:
location: # english name of the location, for example centralus.
ssh_key_private_file:
ssh_key_public_file:
ansible_host_key_checking: true
name_for_logging:
my_public_ip: none # ports will be exposed to the internet
# password must fulfill Azure password requirements
user:
password:
# targeted azure version for terraform, do not change
azure_rm_version: =2.24.0
azure_ad_version: =1.0.0
# configuration options for azure virtual machines
storage_account_type: Standard_LRS
vm_size: Standard_DS1_V2
vm_os_version: latest
delete_os_disk_on_termination: true
data_storage_account_type: Standard_LRS """
spec = "# specify here. for example see specifications/repoExample"
def create_configuration_file():
with open('./configuration.yml', 'w+') as outfile:
outfile.write(config)
outfile.close()
def create_specification_file():
with open('./specification.yml', 'w+') as outfile:
outfile.write(spec)
outfile.close()
'''
creates configuration and specification files
'''
def setup():
create_configuration_file()
create_specification_file()
Path("./ansible").mkdir(parents=True, exist_ok=True)
|
StarcoderdataPython
|
11235478
|
from setuptools import setup
setup(
name='re_transliterate',
version='1.3',
url="https://github.com/MatthewDarling/re_transliterate/",
py_modules=['re_transliterate'],
include_package_data=True,
#Metadata
description='Functions for transliteration using regular expressions',
long_description=(open('readme.rst').read() + '\n\n' +
open('CHANGELOG.rst').read()),
license='http://opensource.org/licenses/MIT',
author='<NAME>',
author_email='<EMAIL>',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering'],
)
|
StarcoderdataPython
|
300121
|
# Generated by Django 2.1 on 2018-12-26 00:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('journeylog', '0012_auto_20181223_0055'),
]
operations = [
migrations.AddField(
model_name='journalpage',
name='timezone_end',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='journalpage',
name='timezone_start',
field=models.CharField(blank=True, max_length=50, null=True),
),
]
|
StarcoderdataPython
|
1706498
|
from dateutil.relativedelta import relativedelta
from django.shortcuts import get_object_or_404
from django.core.exceptions import PermissionDenied
from django.utils import timezone
from rest_framework.viewsets import GenericViewSet
from rest_framework.permissions import IsAuthenticated, IsAuthenticatedOrReadOnly
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from talentmap_api.user_profile.models import UserProfile
import talentmap_api.fsbid.services as services
import logging
logger = logging.getLogger(__name__)
class FSBidBidSeasonsListView(APIView):
permission_classes = (IsAuthenticatedOrReadOnly,)
@classmethod
def get_extra_actions(cls):
return []
def get(self, request, *args, **kwargs):
'''
Gets all bid seasons
'''
return Response(services.get_bid_seasons(request.query_params.get('bsn_future_vacancy_ind', None)))
|
StarcoderdataPython
|
1885501
|
<filename>Utils/InferenceHelpers/NCNNHelper.py
from abc import ABC
from Utils.InferenceHelpers.BaseInferenceHelper import CustomInferenceHelper
class NCNNInferenceHelper(CustomInferenceHelper, ABC):
def __init__(self, _algorithm_name):
super().__init__()
self.name = _algorithm_name
self.type_name = 'ncnn'
self.handler = None
|
StarcoderdataPython
|
12847114
|
<filename>django_frontend_presets/presets/__init__.py
from .Bootstrap import Bootstrap
from .Init import Init
from .React import React
from .Reset import Reset
from .Vue import Vue
|
StarcoderdataPython
|
1700403
|
<gh_stars>0
#!/usr/bin/python
# Classification (U)
"""Program: mysql_db_dump.py
Description: Runs the mysqldump program against a MySQL database and dumps
one or more databases to file(s).
Usage:
mysql_db_dump.py -c file -d path
{-B db_name [db_name ...] -o /path/name [-s] [-z] [-r] [-w]
[-e email {email2 email3 ...} {-t subject_line} [-u]]
[-p dir_path] [-l] |
-A -o /path/name [-s] [-z] [-r] [-w]
[-e email {email2 email3 ...} {-t subject_line} [-u]]
[-p dir_path] [-l] |
-D -o /path/name [-s] [-z] [-r] [-w]
[-e email {email2 email3 ...} {-t subject_line} [-u]]
[-p dir_path] [-l]}
[-y flavor_id]
[-v | -h]
Arguments:
-c file => Server configuration file. Required arg.
-d dir path => Directory path to config file (-c). Required arg.
-B databases [db_name ...] => Database names, space delimited.
-o dir path => Directory path to dump directory.
-s => Run dump as a single transaction.
-r => Remove GTID entries from dump file.
-z => Compress database dump files.
-p dir_path => Directory path to mysql programs. Only required
if the mysql binary programs do not run properly. (i.e. not
in the $PATH variable.)
-w => Redirect standard error out from the database dump command to
an error file that will be co-located with the database dump
file(s).
-e email_address(es) => Send output to one or more email addresses.
-t subject_line => Subject line of email.
-u => Override the default mail command and use mailx.
-l => Use SSL connection.
-A => Dump all databases to individual files.
-o dir path => Directory path to dump directory.
-s => Run dump as a single transaction.
-r => Remove GTID entries from dump file.
-z => Compress database dump files.
-p dir_path => Directory path to mysql programs. Only required
if the mysql binary programs do not run properly. (i.e. not
in the $PATH variable.)
-w => Redirect standard error out from the database dump command to
an error file that will be co-located with the database dump
file(s).
-e email_address(es) => Send output to one or more email addresses.
-t subject_line => Subject line of email.
-u => Override the default mail command and use mailx.
-l => Use SSL connection.
-D => Dump all databases to a single dump file.
-o dir path => Directory path to dump directory.
-s => Run dump as a single transaction.
-r => Remove GTID entries from dump file.
-z => Compress database dump files.
-p dir_path => Directory path to mysql programs. Only required
if the mysql binary programs do not run properly. (i.e. not
in the $PATH variable.)
-w => Redirect standard error out from the database dump command to
an error file that will be co-located with the database dump
file(s).
-e email_address(es) => Send output to one or more email addresses.
-t subject_line => Subject line of email.
-u => Override the default mail command and use mailx.
-l => Use SSL connection.
-y value => A flavor id for the program lock. To create unique lock.
-v => Display version of this program.
-h => Help and usage message.
NOTE 1: -v or -h overrides the other options.
NOTE 2: -A, -B, and -D are XOR arguments.
Notes:
Database configuration file format (config/mysql_cfg.py.TEMPLATE):
# Configuration file for Database
user = "USER"
japd = "<PASSWORD>"
host = "SERVER_IP"
name = "HOST_NAME"
sid = SERVER_ID
extra_def_file = "PYTHON_PROJECT/config/mysql.cfg"
serv_os = "Linux"
port = 3306
cfg_file = "MYSQL_DIRECTORY/mysqld.cnf"
# If SSL connections are being used, configure one or more of these
entries:
ssl_client_ca = "CAFilename"
ssl_ca_path = "CAPath"
ssl_client_key = "KeyFilename"
ssl_client_cert = "CertFilename"
ssl_mode = "PREFERRED"
# Only changes these if necessary and have knowledge in MySQL
SSL configuration setup:
ssl_client_flag = None
ssl_disabled = False
ssl_verify_id = False
ssl_verify_cert = False
# TLS versions: Set the TLS versions allowed in the connection
tls_versions = []
NOTE 1: Include the cfg_file even if running remotely as the file will
be used in future releases.
NOTE 2: In MySQL 5.6 - it now gives warning if password is passed on
the command line. To suppress this warning, will require the use
of the --defaults-extra-file option (i.e. extra_def_file) in the
database configuration file. See below for the defaults-extra-file
format.
NOTE 3: Ignore the entries for replication login as this template is
used for a variety of different MySQL programs.
NOTE 4: May have to set host to "localhost" to use sockets properly
when using SSL connections.
configuration modules -> name is runtime dependent as it can be
used to connect to different databases with different names.
Defaults Extra File format (config/mysql.cfg.TEMPLATE):
[client]
password="PASSWORD"
socket=DIRECTORY_PATH/mysql.sock"
NOTE 1: The socket information can be obtained from the my.cnf
file under ~/mysql directory.
NOTE 2: Socket use is only required to be set in certain conditions
when connecting using localhost
Example:
mysql_db_dump.py -c mysql_cfg -d config -A -o /path/dumps -z -s
"""
# Libraries and Global Variables
# Standard
import sys
import subprocess
import datetime
# Local
import lib.arg_parser as arg_parser
import lib.gen_libs as gen_libs
import lib.gen_class as gen_class
import mysql_lib.mysql_class as mysql_class
import mysql_lib.mysql_libs as mysql_libs
import version
__version__ = version.__version__
# Global
SSL_ARG_DICT = {
"ssl_client_ca": "--ssl-ca=", "ssl_ca_path": "--ssl-capath=",
"ssl_client_key": "--ssl-key=", "ssl_client_cert": "--ssl-cert=",
"ssl_mode": "--ssl-mode="}
def help_message():
"""Function: help_message
Description: Displays the program's docstring which is the help and usage
message when -h option is selected.
Arguments:
"""
print(__doc__)
def crt_dump_cmd(server, args_array, opt_arg_list, opt_dump_list):
"""Function: crt_dump_cmd
Description: Create the database dump command line.
Arguments:
(input) server -> Database server instance.
(input) args_array -> Array of command line options and values.
(input) opt_arg_list -> List of commands to add to cmd line.
(input) opt_dump_list -> Dictionary of additional options.
(output) -> Database dump command line.
"""
args_array = dict(args_array)
opt_dump_list = dict(opt_dump_list)
opt_arg_list = list(opt_arg_list)
dump_args = mysql_libs.crt_cmd(
server, arg_parser.arg_set_path(args_array, "-p") + "mysqldump")
# Add arguments to dump command.
for arg in opt_arg_list:
dump_args = gen_libs.add_cmd(dump_args, arg=arg)
# Append additional options to command.
return gen_libs.is_add_cmd(args_array, dump_args, opt_dump_list)
def dump_run(dump_cmd, dmp_file, compress, **kwargs):
"""Function: dump_run
Description: Run the database dump command, save to file, and compress.
Arguments:
(input) dump_cmd -> Database dump command line.
(input) compress -> Compression flag.
(input) dmp_file -> Dump file and path name.
(input) **kwargs:
errfile -> File handler for error file.
"""
subp = gen_libs.get_inst(subprocess)
dump_cmd = list(dump_cmd)
e_file = kwargs.get("errfile", None)
with open(dmp_file, "wb") as f_name:
proc1 = subp.Popen(dump_cmd, stdout=f_name, stderr=e_file)
proc1.wait()
if compress:
gen_libs.compress(dmp_file)
def dump_db(dump_cmd, db_list, compress, dmp_path, **kwargs):
"""Function: dump_db
Description: Runs the database dump command against one or more databases
in the database list. Will create a dump file for each database.
Arguments:
(input) dump_cmd -> Database dump command line.
(input) db_list -> Array of database names.
(input) compress -> Compression flag.
(input) dmp_path -> Database dump output directory path.
(input) **kwargs:
err_sup -> Suppression of standard error to standard out.
mail -> Email class instance.
use_mailx -> True|False - Override postfix and use mailx.
"""
dump_cmd = list(dump_cmd)
db_list = list(db_list)
errfile = None
if kwargs.get("err_sup", False):
efile = gen_libs.crt_file_time("ErrOut", dmp_path, ".log")
errfile = open(efile, "a")
if db_list:
for item in db_list:
dump_cmd = gen_libs.add_cmd(dump_cmd, arg=item)
dmp_file = gen_libs.crt_file_time(item, dmp_path, ".sql")
dump_run(dump_cmd, dmp_file, compress, errfile=errfile)
# Remove database name from command.
dump_cmd.pop(len(dump_cmd) - 1)
elif "--all-databases" in dump_cmd:
dmp_file = gen_libs.crt_file_time("All_Databases", dmp_path, ".sql")
dump_run(dump_cmd, dmp_file, compress, errfile=errfile)
else:
print("WARNING: No databases to dump or missing -D option.")
if errfile:
errfile.close()
mail = kwargs.get("mail", None)
if mail and not gen_libs.is_empty_file(efile):
for line in gen_libs.file_2_list(efile):
mail.add_2_msg(line)
mail.send_mail(use_mailx=kwargs.get("use_mailx", False))
def set_db_list(server, args_array):
"""Function: set_db_list
Description: Get the database list and check if all databases or a single
database is being selected.
Arguments:
(input) server -> Database server instance.
(input) args_array -> Array of command line options and values.
(output) -> Database list.
"""
args_array = dict(args_array)
dump_list = []
db_list = gen_libs.dict_2_list(
mysql_libs.fetch_db_dict(server), "Database")
# Specified databases.
if "-B" in args_array:
# Difference of -B databases to database list.
for item in set(args_array["-B"]) - set(db_list):
print("Warning: Database(%s) does not exist." % (item))
# Intersect of -B databases to database list.
dump_list = list(set(args_array["-B"]) & set(db_list))
# All databases.
elif "-A" in args_array:
dump_list = list(db_list)
return dump_list
def add_ssl(cfg, dump_cmd):
"""Function: add_ssl
Description: Add SSL options to the dump command line.
Arguments:
(input) cfg -> Configuration file module instance.
(input) dump_cmd -> Database dump command line.
(output) dump_cmd -> Database dump command line.
(output) status -> Status of SSL options.
(output) err_msg -> Error message for SSL options.
"""
global SSL_ARG_DICT
dump_cmd = list(dump_cmd)
status = True
err_msg = None
if hasattr(cfg, "ssl_client_ca") and hasattr(cfg, "ssl_client_key") \
and hasattr(cfg, "ssl_client_cert"):
if getattr(cfg, "ssl_client_ca") or (getattr(cfg, "ssl_client_key") and
getattr(cfg, "ssl_client_cert")):
data = [SSL_ARG_DICT[opt] + getattr(cfg, opt)
for opt in SSL_ARG_DICT.keys() if getattr(cfg, opt)]
dump_cmd.extend(data)
else:
status = False
err_msg = "One or more values missing for required SSL settings."
else:
status = False
err_msg = "Configuration file is missing SSL entries."
return dump_cmd, status, err_msg
def add_tls(cfg, dump_cmd):
"""Function: add_tls
Description: Add TLS option to the dump command line, if available.
Arguments:
(input) cfg -> Configuration file module instance.
(input) dump_cmd -> Database dump command line.
(output) dump_cmd -> Database dump command line.
"""
dump_cmd = list(dump_cmd)
if hasattr(cfg, "tls_versions") and getattr(cfg, "tls_versions"):
dump_cmd.append("--tls-version=" + str(getattr(cfg, "tls_versions")))
return dump_cmd
def run_program(args_array, opt_arg_list, opt_dump_list, **kwargs):
"""Function: run_program
Description: Creates class instance(s) and controls flow of the program.
Arguments:
(input) args_array -> Array of command line options and values.
(input) opt_arg_list -> List of commands to add to cmd line.
(input) opt_dump_list -> Dictionary of additional options.
"""
status = True
err_msg = None
args_array = dict(args_array)
opt_dump_list = dict(opt_dump_list)
opt_arg_list = list(opt_arg_list)
mail = None
server = mysql_libs.create_instance(
args_array["-c"], args_array["-d"], mysql_class.Server)
server.connect(silent=True)
if server.conn_msg:
print("run_program: Error encountered on server(%s): %s" %
(server.name, server.conn_msg))
else:
server.set_srv_gtid()
dump_cmd = crt_dump_cmd(
server, args_array, opt_arg_list, opt_dump_list)
db_list = set_db_list(server, args_array, **kwargs)
# Remove the -r option if database is not GTID enabled.
if "-r" in args_array and not server.gtid_mode \
and opt_dump_list["-r"] in dump_cmd:
dump_cmd.remove(opt_dump_list["-r"])
compress = args_array.get("-z", False)
dmp_path = None
if "-o" in args_array:
dmp_path = args_array["-o"] + "/"
if args_array.get("-e", False):
dtg = datetime.datetime.strftime(
datetime.datetime.now(), "%Y%m%d_%H%M%S")
subj = args_array.get(
"-t", [server.name, ": mysql_db_dump: ", dtg])
mail = gen_class.setup_mail(args_array.get("-e"), subj=subj)
err_sup = args_array.get("-w", False)
if "-l" in args_array:
cfg = gen_libs.load_module(args_array["-c"], args_array["-d"])
dump_cmd, status, err_msg = add_ssl(cfg, dump_cmd)
dump_cmd = add_tls(cfg, dump_cmd)
if status:
dump_db(dump_cmd, db_list, compress, dmp_path, err_sup=err_sup,
mail=mail, use_mailx=args_array.get("-u", False))
else:
print("run_program: Error encountered with SSL setup: %s" %
(err_msg))
mysql_libs.disconnect(server)
def main():
"""Function: main
Description: Initializes program-wide used variables and processes command
line arguments and values.
Variables:
dir_chk_list -> contains options which will be directories.
dir_crt_list -> contain options that require directory to be created.
opt_arg_list -> contains arguments to add to command line by default.
opt_con_req_dict -> contains options requiring other options.
opt_dump_list -> contains optional arguments to mysqldump.
opt_multi_list -> contains the options that will have multiple values.
opt_req_list -> contains the options that are required for the program.
opt_val_list -> contains options which require values.
opt_xor_dict -> contains options which are XOR with its values.
Arguments:
(input) argv -> Arguments from the command line.
"""
cmdline = gen_libs.get_inst(sys)
dir_chk_list = ["-o", "-d", "-p"]
dir_crt_list = ["-o"]
# --ignore-table=mysql.event -> Skips dumping the event table.
opt_arg_list = ["--ignore-table=mysql.event"]
opt_con_req_dict = {
"-t": ["-e"], "-A": ["-o"], "-B": ["-o"], "-D": ["-o"], "-u": ["-e"]}
opt_dump_list = {
"-s": "--single-transaction",
"-D": ["--all-databases", "--triggers", "--routines", "--events"],
"-r": "--set-gtid-purged=OFF"}
opt_multi_list = ["-B", "-e", "-t"]
opt_req_list = ["-c", "-d"]
opt_val_list = ["-B", "-c", "-d", "-o", "-p", "-y", "-e", "-t"]
opt_xor_dict = {"-A": ["-B", "-D"], "-B": ["-A", "-D"], "-D": ["-A", "-B"]}
# Process argument list from command line.
args_array = arg_parser.arg_parse2(
cmdline.argv, opt_val_list, multi_val=opt_multi_list)
if not gen_libs.help_func(args_array, __version__, help_message) \
and not arg_parser.arg_require(args_array, opt_req_list) \
and arg_parser.arg_xor_dict(args_array, opt_xor_dict) \
and not arg_parser.arg_dir_chk_crt(args_array, dir_chk_list,
dir_crt_list) \
and arg_parser.arg_cond_req_or(args_array, opt_con_req_dict):
try:
prog_lock = gen_class.ProgramLock(cmdline.argv,
args_array.get("-y", ""))
run_program(args_array, opt_arg_list, opt_dump_list)
del prog_lock
except gen_class.SingleInstanceException:
print("WARNING: Lock in place for mysql_db_dump with id: %s"
% (args_array.get("-y", "")))
if __name__ == "__main__":
sys.exit(main())
|
StarcoderdataPython
|
164311
|
#!/usr/bin/env python
# Runs "mean absolute deviation" QC metrics on two long-RNA-seq gene quantifications
import os, subprocess, json
import dxpy
def divide_on_common(str_a,str_b):
'''Divides each string into [common_prefix,variable_middle,common_ending] and returns as set (parts_a,parts_b).'''
parts_a = ['','','']
parts_b = ['','','']
# The common parts at the start of the 2 strings
while len(str_a) > 0 and len(str_a) > 0 and str_a[0] == str_b[0]:
parts_a[0] += str_a[0]
parts_b[0] += str_b[0]
str_a = str_a[1:]
str_b = str_b[1:]
# The common parts at the end of the 2 strings
while len(str_a) > 0 and len(str_a) > 0 and str_a[-1] == str_b[-1]:
parts_a[2] = str_a[-1] + parts_a[2]
parts_b[2] = str_b[-1] + parts_b[2]
str_a = str_a[:-1]
str_b = str_b[:-1]
# These are the different parts in the middle of the 2 strings
parts_a[1] = str_a
parts_b[1] = str_b
return (parts_a,parts_b)
def root_name_from_pair(filename_a,filename_b):
'''Returns a root name based upon the common and uncommon parts of a pair of file names.'''
(a_parts, b_parts) = divide_on_common(filename_a,filename_b)
out_root = ''
if len(a_parts[0]) > 0:
out_root = a_parts[0]
out_root += a_parts[1] + '-' + b_parts[1]
if len(a_parts[2]) > 0:
out_root += a_parts[2]
return out_root # exp1_rep1_1_quants.tsv and exp1_rep2_1_quants.tsv yield exp1_rep1-2_1_quants.tsv
@dxpy.entry_point("main")
def main(quants_a, quants_b):
# tool_versions.py --applet $script_name --appver $script_ver
sw_versions = subprocess.check_output(['tool_versions.py', '--dxjson', 'dnanexus-executable.json'])
dxfile_a = dxpy.DXFile(quants_a)
dxfile_b = dxpy.DXFile(quants_b)
print "* Downloading files..."
dxpy.download_dxfile(dxfile_a.get_id(), "quants_a.tsv")
dxpy.download_dxfile(dxfile_b.get_id(), "quants_b.tsv")
# Create and appropriate name for output files
out_root = root_name_from_pair(dxfile_a.name.split('.')[0],dxfile_b.name.split('.')[0])
out_root += '_mad'
mad_plot_file = out_root + '_plot.png'
# DX/ENCODE independent script is found in resources/usr/bin
print "* Runnning MAD.R..."
subprocess.check_call(["ls","-l"])
#mad_output = subprocess.check_output(['Rscript', '/usr/bin/MAD.R', 'quants_a.tsv', 'quants_b.tsv'])
#subprocess.check_call(['mv', "MAplot.png", mad_plot_file ])
subprocess.check_call(['rampage_mad_qc.sh', 'quants_a.tsv', 'quants_b.tsv', out_root ])
mad_json_file = out_root + '.json'
print "* package properties..."
qc_metrics = {}
#qc_metrics["MAD.R"] = json.loads(mad_output)
fileH = open(mad_json_file, 'r')
qc_metrics["MAD.R"] = json.load(fileH)
fileH.close()
meta_string = json.dumps(qc_metrics)
print json.dumps(qc_metrics,indent=4)
props = {}
props["SW"] = sw_versions
print "* Upload Plot..."
plot_dxfile = dxpy.upload_local_file(mad_plot_file,properties=props,details=qc_metrics)
return { "metadata": meta_string, "mad_plot": plot_dxfile }
dxpy.run()
|
StarcoderdataPython
|
151086
|
from django.contrib import admin
import pytz
from .models import Event
from .forms import EventAdminForm
@admin.register(Event)
class EventAdmin(admin.ModelAdmin):
raw_id_fields = ('attendees', 'facilitators', 'projects',)
form = EventAdminForm
def save_model(self, request, obj, form, change):
tz = pytz.timezone(obj.timezone)
# First converting to naive timezone, otherwise localize wont work.
start = obj.starts_at.replace(tzinfo=None)
end = obj.ends_at.replace(tzinfo=None)
# Using the user selected timezone
obj.starts_at = tz.localize(start)
obj.ends_at = tz.localize(end)
super().save_model(request, obj, form, change)
def get_object(self, request, object_id, from_field):
obj = super().get_object(request, object_id, from_field)
if obj is not None:
tz = pytz.timezone(obj.timezone)
# Converting the datetimes to user selected timezones
start = obj.starts_at.astimezone(tz)
end = obj.ends_at.astimezone(tz)
# Then converting to naive timezone, otherwise django-templates
# will change it back to UTC
obj.starts_at = start.replace(tzinfo=None)
obj.ends_at = end.replace(tzinfo=None)
return obj
class Media:
css = {
'all': ('admin/custom-event-admin.css',)
}
|
StarcoderdataPython
|
3308183
|
import requests
from .Models import ResponseTx
import binascii
class WhatsOnChainLib(object):
def __init__(self, txid):
self.txid = txid
@classmethod
def get_textdata(self, txid):
try:
#print("txid")
#print(txid)
#time.sleep(0.1)
if txid != "":
url = "https://api.whatsonchain.com/v1/bsv/test/tx/hash/" + txid
headers = {"content-type": "application/json"}
r = requests.get(url, headers=headers)
data = r.json()
op_return = data['vout'][0]['scriptPubKey']['opReturn']
if op_return is None:
return None
hex_upload_data = data['vout'][0]['scriptPubKey']['asm'].split()[3] ##uploaddata (charactor)
parts = op_return['parts']
if parts is None:
return None
upload_mimetype = parts[1] ##MEDIA_Type: image/png, image/jpeg, text/plain, text/html, text/css, text/javascript, application/pdf, audio/mp3
upload_charset = parts[2] ##ENCODING: binary, utf-8 (Definition polyglot/upload.py)
upload_filename = parts[3] ##filename
# print("upload_mimetype: " + upload_mimetype)
# print("upload_charset: " + upload_charset)
# print("upload_filename: " + upload_filename)
# print("hex_upload_data: " + hex_upload_data)
# response = make_response()
if upload_charset == 'binary': #47f0706cdef805761a975d4af2a418c45580d21d4d653e8410537a3de1b1aa4b
#print(binascii.hexlify(upload_data))
upload_data = binascii.unhexlify(hex_upload_data)
elif upload_charset == 'utf-8': #cc80675a9a64db116c004b79d22756d824b16d485990a7dfdf46d4a183b752b2
upload_data = parts[0]
else:
#print('upload_charset' + upload_charset)
upload_data = ''
# downloadFilename = upload_filename
# response.headers["Content-Disposition"] = 'attachment; filename=' + downloadFilename
# response.mimetype = upload_mimetype
#print(upload_data)
# return response]
return ResponseTx(txid, upload_data, upload_mimetype, upload_charset, upload_filename)
except Exception as e:
# TODO: To Logger
print(e)
return None
|
StarcoderdataPython
|
1848172
|
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from .models import Question
from django.http import Http404
from django.urls import reverse
from django.shortcuts import get_object_or_404
from django.http import JsonResponse
import urllib.parse as urlparse
from urllib.parse import unquote
import urllib
from bs4 import BeautifulSoup
import json
import os
import subprocess
from django.views.decorators.csrf import csrf_exempt
import logging
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
context = {
'latest_question_list': latest_question_list,
}
return render(request, 'polls/question_answer.html', context)
def detail(request, question_id):
try:
question = Question.objects.get(pk=question_id)
except Question.DoesNotExist:
raise Http404("Question does not exist.")
return render(request, 'polls/detail.html', {'question': question})
def results(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/results.html', {'question': question})
def vote(request, question_id):
question = Question.objects.get(pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except(KeyError, Choice.DoesNotExist):
return render(request, 'polls/detail.html', {'question': question,
'error_message': "You did not sekect a choice"
})
else:
selected_choice.votes += 1
selected_choice.save()
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
def qaserver(request, query):
url = request.get_full_path()
url = unquote(url)
url = url.replace("/query", "/?query", 1)
parsed = urlparse.urlparse(url)
query = urlparse.parse_qs(parsed.query)['query']
base = "/home/petrichor/Projects/GSoC/working-gsoc-anand/"
subprocess.call("cd "+base+r" && ./ask.sh data/new_test_frequency_fixed_attention_drop07 " +
"\"" + query[0][:-1].strip() + "\"" + " deactivate && cat nmt/output_decoded.txt > example", shell=True,)
print("cd "+base+r"&& ./ask.sh data/new_test_frequency_fixed_attention_drop07 " +
"\"" + query[0][:-1].strip() + "\"" + " && cat nmt/output_decoded.txt > example")
answer_query = open(base+"example").readline()
answer_query = answer_query.replace("limit\n", "limit 1\n")
query = urllib.parse.quote(answer_query)
url2 = "https://dbpedia.org/sparql?default-graph-uri=http%3A%2F%2Fdbpedia.org&query="+query + \
"&format=text%2Fhtml&CXML_redir_for_subjs=121&CXML_redir_for_hrefs=&timeout=30000&debug=on&run=+Run+Query+"
page = urllib.request.urlopen(url2)
soup = BeautifulSoup(page, "html.parser")
answer = []
for rows in (soup.find_all("tr")):
for td in rows.find_all("a"):
for a in td:
answer.append(a)
return HttpResponse(';'.join(answer))
@csrf_exempt
def qaserver_json(request):
# Create a logger object
logger = logging.getLogger()
# Configure logger
logging.basicConfig(filename="logfile.log", format='%(filename)s: %(message)s', filemode='a')
# Setting threshold level
logger.setLevel(logging.DEBUG)
# Use the logging methods
#logger.debug("This is a debug message")
query = (request.POST.get("query"))
""" url = request.get_full_path()
url = unquote(url)
url = url.replace("/query","/?query",1)
parsed = urlparse.urlparse(url)
query = urlparse.parse_qs(parsed.query)['query'] """
original_query = query
base = "/home/petrichor/Projects/GSoC/working-gsoc-anand/"
subprocess.call("cd "+base+r" && ./ask.sh data/new_test_frequency_fixed_attention_drop07 " +
"\"" + query + "\"" + " deactivate && cat nmt/output_decoded.txt > example", shell=True,)
print("cd "+base+r"&& ./ask.sh data/new_test_frequency_fixed_attention_drop07 " +
"\"" + query + "\"" + " && cat nmt/output_decoded.txt > example")
answer_query = open(base+"example").readline()
answer_query = answer_query.replace("limit\n", "limit 1\n")
query = urllib.parse.quote(answer_query)
url2 = "https://dbpedia.org/sparql?default-graph-uri=http%3A%2F%2Fdbpedia.org&query="+query+"&format=application%2Fsparql-results%2Bjson&CXML_redir_for_subjs=121&CXML_redir_for_hrefs=&timeout=30000&debug=on&run=+Run+Query+"
page = urllib.request.urlopen(url2)
soup = BeautifulSoup(page, "html.parser")
#print(str(soup))
logger.info(request.POST.get("query"))
dic_answer = json.loads(str(soup))
logger.info( dic_answer )
val = {"questions": [{"id": "1", "question": [{"language": "en", "string": original_query}], "query": {"sparql": answer_query}, "answers":[{"head":dic_answer["head"], "results" : {"bindings":dic_answer["results"]["bindings"]}}]}]}
#print(type(val))
j = json.dumps(val)
resp = JsonResponse(val, safe=False)
logger.info(j)
logger.info("**********************")
resp['Access-Control-Allow-Origin'] = '*'
return resp
|
StarcoderdataPython
|
3427068
|
<reponame>kiraacorsac/wonderwordsmodule<filename>wonderwords/cmdline.py
from rich.console import Console
from rich.panel import Panel
from rich.text import Text
from rich.emoji import Emoji
from rich.padding import Padding
from rich.markdown import Markdown
from . import __version__
console = Console()
AVAILABLE_COMMANDS = """## Available Commands
* `wonderwords -w` - generate a random word
* `wonderwords -f` - get all words matching a certain criteria
* `wonderwords -l AMOUNT` - get a list of `AMOUNT` random words
* `wonderwords -s SENT_TYPE` - generate a random sentence of a certain type
For a list of all options, type `wonderwords -h`. To see a detailed and
comprehensive explanation of the commands, visit
[the documentation](https://wonderwords.readthedocs.io)
"""
class WonderwordsCommandLine:
def print_title(self):
title = Panel(
Text(f"WONDERWORDS {__version__}", justify="center"),
padding=1,
style="bold navy_blue on white",
)
console.print(title)
def print_commands(self):
commands = Markdown(AVAILABLE_COMMANDS)
console.print(commands)
def version(self):
console.print(
f"Running wonderwords version {__version__}", style="navy_blue on"
"white"
)
def intro(self):
self.print_title()
info_text = Text(
f"No commands given {Emoji('disappointed_face')}",
justify="center",
style="bold",
)
console.print(Padding(info_text, pad=1))
self.print_commands()
def word(self, word):
word_text = Text(word, style="bold white on navy_blue")
console.print(word_text)
def words(self, words, delimiter):
word_text = Text(
delimiter.join(words), style="bold white on navy_blue"
)
console.print(word_text)
def sentence(self, sent):
sent_text = Text(sent, style="bold white on dark_green")
console.print(sent_text)
def no_word(self):
console.print(
"A word with the parameters specified does not exist! :anguished:",
style="white on red",
)
def no_words(self):
console.print(
(
"There weren't enough words that matched your request. All"
" words available are listed below :anguished: "
),
style="white on red",
)
|
StarcoderdataPython
|
4906405
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Common sampler utilities."""
import random
from typing import Tuple, Union
from torchgeo.datasets.utils import BoundingBox
def _to_tuple(value: Union[Tuple[float, float], float]) -> Tuple[float, float]:
"""Convert value to a tuple if it is not already a tuple.
Args:
value: input value
Returns:
value if value is a tuple, else (value, value)
"""
if isinstance(value, (float, int)):
return (value, value)
else:
return value
def get_random_bounding_box(
bounds: BoundingBox, size: Union[Tuple[float, float], float]
) -> BoundingBox:
"""Returns a random bounding box within a given bounding box.
The ``size`` argument can either be:
* a single ``float`` - in which case the same value is used for the height and
width dimension
* a ``tuple`` of two floats - in which case, the first *float* is used for the
height dimension, and the second *float* for the width dimension
Args:
bounds: the larger bounding box to sample from
size: the size of the bounding box to sample
Returns:
randomly sampled bounding box from the extent of the input
"""
t_size: Tuple[float, float] = _to_tuple(size)
minx = random.uniform(bounds.minx, bounds.maxx - t_size[1])
maxx = minx + t_size[1]
miny = random.uniform(bounds.miny, bounds.maxy - t_size[0])
maxy = miny + t_size[0]
mint = bounds.mint
maxt = bounds.maxt
return BoundingBox(minx, maxx, miny, maxy, mint, maxt)
|
StarcoderdataPython
|
269104
|
<gh_stars>0
from lxml import etree
html = etree.parse('./test.html', etree.HTMLParser())
result = html.xpath('//*')
print(result)
|
StarcoderdataPython
|
4996048
|
from functions_files import get_plaintext_from_container_file
def get_container_content(container_file_name, container_password):
container_data = {}
container_content = ""
if container_file_name == "":
container_data = {'error': True, 'status': 'ERROR: Container name was not specified!'}
elif container_password == "":
container_data = {'error': True, 'status': 'ERROR: Container password is empty!'}
else:
try:
container_content = get_plaintext_from_container_file(container_file_name, container_password)
except:
container_data = {'error': True, 'status': 'ERROR: Container ' + container_file_name + ' is not found!'}
if container_content == "":
container_data = {'error': True, 'status': 'ERROR: Container ' + container_file_name + ' is empty!'}
else:
container_data = {'error': False, 'container_content': container_content,
'status': 'OK: Container ' + container_file_name + ' was loaded successfully'}
print(container_data['status'])
return(container_data)
|
StarcoderdataPython
|
4946113
|
<filename>siwe_auth/admin.py
from django import forms
from django.contrib import admin
from django.contrib.auth.models import Group
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from django.core.exceptions import ValidationError
from .models import Wallet
class WalletCreationForm(forms.ModelForm):
class Meta:
model = Wallet
fields = ("ethereum_address",)
def save(self, commit=True):
# Save the provided password in hashed format
user = super().save(commit=False)
if commit:
user.save()
return user
class WalletChangeForm(forms.ModelForm):
"""A form for updating users. Includes all the fields on
the user, but replaces the password field with admin's
disabled password hash display field.
"""
class Meta:
model = Wallet
fields = ("ethereum_address", "ens_name", "ens_avatar", "is_active", "is_admin")
class WalletAdmin(BaseUserAdmin):
# The forms to add and change user instances
form = WalletChangeForm
add_form = WalletCreationForm
list_display = (
"ethereum_address",
"ens_name",
"ens_avatar",
"is_active",
"is_admin",
)
list_filter = (
"is_active",
"is_admin",
)
fieldsets = (
(None, {"fields": ("ethereum_address",)}),
("Permissions", {"fields": ("is_admin",)}),
)
add_fieldsets = (
(
None,
{
"classes": ("wide",),
"fields": ("ethereum_address",),
},
),
)
search_fields = (
"ethereum_address",
"ens_name",
)
ordering = (
"ethereum_address",
"ens_name",
)
filter_horizontal = ()
# Now register the new UserAdmin...
admin.site.register(Wallet, WalletAdmin)
|
StarcoderdataPython
|
4947051
|
<gh_stars>1-10
import pytest
from pubnub.pubnub import PubNub
from pubnub.pnconfiguration import PNConfiguration
from pubnub.endpoints.space.get_space import GetSpace
from pubnub.exceptions import PubNubException
SUB_KEY = 'sub'
AUTH = 'auth'
def test_get_space():
config = PNConfiguration()
config.subscribe_key = SUB_KEY
config.auth_key = AUTH
space = PubNub(config).get_space()
space.include(['a', 'b'])
with pytest.raises(PubNubException):
space.build_path()
space.space_id('foo')
assert space.build_path() == GetSpace.GET_SPACE_PATH % (SUB_KEY, 'foo')
params = space.custom_params()
assert params['include'] == ['a', 'b']
assert AUTH == space.build_params_callback()({})['auth']
|
StarcoderdataPython
|
11398744
|
"""Subclass of settings_dialog, which is generated by wxFormBuilder."""
import os
import re
import wx
from . import dialog_base
def pop_error(msg):
wx.MessageBox(msg, 'Error', wx.OK | wx.ICON_ERROR)
class SettingsDialog(dialog_base.SettingsDialogBase):
def __init__(self, config_save_func,
file_name_format_hint, version):
dialog_base.SettingsDialogBase.__init__(self, None)
self.panel = SettingsDialogPanel(
self, config_save_func, file_name_format_hint)
best_size = self.panel.BestSize
# hack for some gtk themes that incorrectly calculate best size
best_size.IncBy(dx=0, dy=30)
self.SetClientSize(best_size)
self.SetTitle('KiZip %s' % version)
# hack for new wxFormBuilder generating code incompatible with old wxPython
# noinspection PyMethodOverriding
def SetSizeHints(self, sz1, sz2):
try:
# wxPython 3
self.SetSizeHintsSz(sz1, sz2)
except TypeError:
# wxPython 4
super(SettingsDialog, self).SetSizeHints(sz1, sz2)
# Implementing settings_dialog
class SettingsDialogPanel(dialog_base.SettingsDialogPanel):
def __init__(self, parent, config_save_func,
file_name_format_hint):
self.config_save_func = config_save_func
dialog_base.SettingsDialogPanel.__init__(self, parent)
self.general = GeneralSettingsPanel(self.notebook,file_name_format_hint)
self.notebook.AddPage(self.general, "General")
self.layers = LayerSettingsPanel(self.notebook)
self.notebook.AddPage(self.layers, "Layers")
def OnExit(self, event):
self.GetParent().EndModal(wx.ID_CANCEL)
def OnSaveSettings(self, event):
self.config_save_func(self)
def OnGenerateGerbers(self, event):
self.GetParent().EndModal(wx.ID_OK)
def finish_init(self):
self.html.OnBoardRotationSlider(None)
# Implementing GeneralSettingsPanelBase
class GeneralSettingsPanel(dialog_base.GeneralSettingsPanelBase):
def __init__(self, parent, file_name_format_hint):
dialog_base.GeneralSettingsPanelBase.__init__(self, parent)
self.file_name_format_hint = file_name_format_hint
def OnNameFormatHintClick(self, event):
wx.MessageBox(self.file_name_format_hint, 'File name format help',
style=wx.ICON_NONE | wx.OK)
# Implementing LayerSettingsPanelBase
class LayerSettingsPanel(dialog_base.LayerSettingsPanelBase):
def __init__(self, parent):
dialog_base.LayerSettingsPanelBase.__init__(self, parent)
self.layers = []
self.bSizer = wx.BoxSizer( wx.VERTICAL )
self.LayerPanelArea.SetSizer(self.bSizer)
def AddLayer(self, l):
array = [p for p,lyr in list(self.layers) if lyr == l]
pnl = next(array) if len(array) > 0 else None
if pnl is None:
# Create wx element and add to scroll box
pnl = LayerItemPanelBase(self.LayerPanelArea, l.enabled, l.name, l.ext)
self.bSizer.Add(pnl, 0, wx.EXPAND, 5 )
self.layers += [(pnl, l)]
# Implementing LayerSettingsPanelBase
class LayerItemPanelBase(dialog_base.LayerItemPanelBase):
def __init__(self, parent, enabled, name, ext):
dialog_base.LayerItemPanelBase.__init__(self, parent)
self.LayerEnabledCheckbox.SetLabel(name)
self.LayerEnabledCheckbox.SetValue(enabled)
self.Extension.SetValue(ext)
self.name = name
def IsEnabled(self):
return self.LayerEnabledCheckbox.IsChecked()
def GetExtension(self):
return self.Extension.GetValue()
|
StarcoderdataPython
|
6494231
|
<reponame>datamut/date-calculator<filename>scidate/scidate/exceptions.py
"""
Customised Exceptions
"""
class InvalidDateException(Exception):
"""
Invalid Date Exception
"""
pass
class InvalidDateFormatException(Exception):
"""
Invalid Date Format Exception
"""
pass
|
StarcoderdataPython
|
302940
|
import sys, os
ApplicationDirectory = 'warehouse'
ApplicationName = 'warehouse'
VirtualEnvDirectory = 'python-app-venv'
VirtualEnv = os.path.join(os.getcwd(), VirtualEnvDirectory, 'bin', 'python')
if sys.executable != VirtualEnv: os.execl(VirtualEnv, VirtualEnv, *sys.argv)
sys.path.insert(0, os.path.join(os.getcwd(), ApplicationDirectory))
sys.path.insert(0, os.path.join(os.getcwd(), ApplicationDirectory, ApplicationName))
sys.path.insert(0, os.path.join(os.getcwd(), VirtualEnvDirectory, 'bin'))
os.chdir(os.path.join(os.getcwd(), ApplicationDirectory))
os.environ.setdefault('DJANGO_SETTINGS_MODULE', ApplicationName + '.settings')
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
StarcoderdataPython
|
1670652
|
<filename>shb-vgg/exp/12-05_13-19_SHHB_VGG_1e-05_[norm+flip]/code/old-cca/loaders.py
import csv
import math
import os
from glob import glob
import cv2
import numpy as np
from scipy.io import loadmat
def get_density_map_gaussian(im, points):
"""
Create a Gaussian density map from the points.
Credits: https://github.com/ZhengPeng7/Multi_column_CNN_in_Keras/blob/master/data_preparation/get_density_map_gaussian.py
:param im: Original image, used only for getting needed shape of the density map.
:param points: List of (X, Y) tuples that point at where human heads are located in a picture.
:return: Density map constructed from the points.
"""
im_density = np.zeros_like(im[:, :, 0], dtype=np.float64)
h, w = im_density.shape
if points is None:
return im_density
if points.shape[0] == 1:
x1 = max(0, min(w-1, round(points[0, 0])))
y1 = max(0, min(h-1, round(points[0, 1])))
im_density[y1, x1] = 255
return im_density
for j in range(points.shape[0]):
f_sz = 15
sigma = 4.0
H = np.multiply(cv2.getGaussianKernel(f_sz, sigma), (cv2.getGaussianKernel(f_sz, sigma)).T)
x = min(w-1, max(0, abs(int(math.floor(points[j, 0])))))
y = min(h-1, max(0, abs(int(math.floor(points[j, 1])))))
if x >= w or y >= h:
continue
x1 = x - f_sz//2 + 0
y1 = y - f_sz//2 + 0
x2 = x + f_sz//2 + 1
y2 = y + f_sz//2 + 1
dfx1, dfy1, dfx2, dfy2 = 0, 0, 0, 0
change_H = False
if x1 < 0:
dfx1 = abs(x1) + 0
x1 = 0
change_H = True
if y1 < 0:
dfy1 = abs(y1) + 0
y1 = 0
change_H = True
if x2 > w:
dfx2 = x2 - w
x2 = w
change_H = True
if y2 > h:
dfy2 = y2 - h
y2 = h
change_H = True
x1h, y1h, x2h, y2h = 1 + dfx1, 1 + dfy1, f_sz - dfx2, f_sz - dfy2
if change_H is True:
H = np.multiply(cv2.getGaussianKernel(y2h-y1h+1, sigma), (cv2.getGaussianKernel(x2h-x1h+1, sigma)).T)
im_density[y1:y2, x1:x2] += H
return im_density
class Loader:
"""
Abstract base loader that should return an iterable of samples, either images, lists of points or density maps.
"""
def load(self):
""" Method that must be implemented in the subclasses, returning an iterable of samples """
raise NotImplementedError("load not implemented in the child class")
@staticmethod
def _prepare_args(local_vars):
""" Simple method that removes unwanted 'self' variable from the set that will be stored for loading and saving pipelines"""
return {k: v for k, v in local_vars.items() if k != 'self'}
def get_number_of_loadable_samples(self):
"""
Return number of samples from the dataset that can and will be loaded by the loader, or None if it's unknown.
:return: Number of samples that can be loaded, including the already loaded ones.
"""
return None
class BasicImageFileLoader(Loader):
"""
Loader for images stored in image files. Allows reading any files that opencv-python can handle - e.g. JPG, PNG.
"""
def __init__(self, img_paths):
"""
Create a new image loader that reads all image files from paths.
:param img_paths: Paths to all images that are to be loaded.
"""
Loader.__init__(self)
self.args = self._prepare_args(locals())
self.img_paths = img_paths
def get_number_of_loadable_samples(self):
"""
Get number of images to load, according to number of specified paths.
:return: Number of images.
"""
return len(self.img_paths)
def load(self):
"""
Load all images based on provided paths to files.
:return: Generator of images in BGR format.
"""
for path in self.img_paths:
yield cv2.imread(path, cv2.IMREAD_COLOR)
class ImageFileLoader(BasicImageFileLoader):
"""
Loader for all images of some type in a given directory.
"""
def __init__(self, img_dir, file_extension="jpg"):
"""
Create a new image loader that reads all the images with specified file extension in a given directory.
:param img_dir: Directory to be searched.
:param file_extension: Desired extension of files to be loaded.
"""
local = locals().copy()
paths = sorted(glob(os.path.join(img_dir, f"*.{file_extension}")))
BasicImageFileLoader.__init__(self, paths)
self.args = self._prepare_args(local)
class BasicGTPointsMatFileLoader(Loader):
"""
Loader for ground truth data stored as lists of head positions in Matlab files.
"""
def __init__(self, gt_paths, getter):
"""
Create a loader that loads all data from the provided file paths using a given getter.
:param gt_paths: Paths of files that are to be read.
:param getter: Lambda that takes Matlab file content and returns list of head positions in form of (X, Y) tuples.
"""
Loader.__init__(self)
self.args = self._prepare_args(locals())
self.gt_paths = gt_paths
self.getter = getter
def get_number_of_loadable_samples(self):
"""
Get number of GTs to load, according to number of specified paths.
:return: Number of GTs.
"""
return len(self.gt_paths)
def load(self):
"""
Load all Matlab files from paths.
:return: Generator of lists of head positions - (X, Y) tuples.
"""
for path in self.gt_paths:
yield self.getter(loadmat(path))
class GTPointsMatFileLoader(BasicGTPointsMatFileLoader):
"""
Loader for head positions in all Matlab files in a given directory.
"""
def __init__(self, gt_dir, getter, file_extension="mat"):
"""
Create a loader that searches for files with specified extension in a given directory and loads them.
:param gt_dir: Directory to be searched.
:param file_extension: Desired file extension of Matlab files.
"""
local = locals().copy()
paths = sorted(glob(os.path.join(gt_dir, f"*.{file_extension}")))
BasicGTPointsMatFileLoader.__init__(self, paths, getter)
self.args = self._prepare_args(local)
class BasicDensityMapCSVFileLoader(Loader):
"""
Loader for density maps stored in separate CSV files.
"""
def __init__(self, dm_paths):
"""
Create a loader that loads density maps at specified paths.
:param dm_paths: Paths to CSV files with density maps.
"""
Loader.__init__(self)
self.args = self._prepare_args(locals())
self.dm_paths = dm_paths
def get_number_of_loadable_samples(self):
"""
Get number of density maps to load, according to number of specified paths.
:return: Number of density maps.
"""
return len(self.dm_paths)
def load(self):
"""
Load all density maps from all specified paths.
:return: Generator of density maps.
"""
for path in self.dm_paths:
den_map = []
with open(path, 'r', newline='') as f:
for row in csv.reader(f):
den_row = []
for cell in row:
den_row.append(float(cell))
den_map.append(den_row)
yield np.array(den_map)
class DensityMapCSVFileLoader(BasicDensityMapCSVFileLoader):
"""
Loader for density maps stored in all CSV files in a given directory.
"""
def __init__(self, den_map_dir, file_extension="csv"):
"""
Create a loader that searches for files with the given extension in the given directory and loads them.
:param den_map_dir: Directory to be searched.
:param file_extension: Desired extension of files to be loaded.
"""
local = locals().copy()
paths = sorted(glob(os.path.join(den_map_dir, f"*.{file_extension}")))
BasicDensityMapCSVFileLoader.__init__(self, paths)
self.args = self._prepare_args(local)
class VariableLoader(Loader):
"""
Loader that loads from a variable (list or array) instead of file. May be useful when connecting pipelines.
"""
def __init__(self, data):
"""
Create a loader that reads from a variable (list or array most probably) and yields the results.
:param data: Iterable that has len() with either images or density maps.
"""
self.args = None # saving dataset variables, possibly consisting of thousands of samples, to a json file would be dangerous
self.data = data
def get_number_of_loadable_samples(self):
"""
Return length of the dataset in the variable.
:return: Number of samples.
"""
return len(self.data)
def load(self):
"""
Read the variable and yield samples one by one.
:return: Generator of either images or density maps.
"""
for sample in self.data:
yield sample
class ConcatenatingLoader(Loader):
"""
Loader that doesn't perform any loading on its own but rather concatenates samples from a few sources.
"""
def __init__(self, loaders):
"""
Create a loader that concatenates loading results from a few loaders.
:param loaders: Loaders whose results will be concatenated.
"""
Loader.__init__(self)
self.args = [{'name': loader.__class__.__name__, 'args': loader.args} for loader in loaders]
self.loaders = loaders
def get_number_of_loadable_samples(self):
"""
Get number of samples to load throughout loaders.
:return: Cumulative number of samples.
"""
return sum([loader.get_number_of_loadable_samples() for loader in self.loaders])
def load(self):
"""
Load all samples from all connected loaders.
:return: Generator of samples, be it images, GT point lists or density maps.
"""
for loader in self.loaders:
for sample in loader:
yield sample
class CombinedLoader(Loader):
"""
Loader that should be primarily used with a pipeline - zips or combines an iterable of images with an iterable of
density maps (be it straight from a loader or from transformed on-the-fly GT points).
"""
def __init__(self, img_loader, gt_loader, den_map_loader=None):
"""
Create a combined loader. Either `gt_loader` or `den_map_loader` must be specified (but not both) in order to
provide density maps related to the images loaded using `img_loader`.
:param img_loader: Loader that provides an iterable of images.
:param gt_loader: Loader that provides an iterable of lists of points.
:param den_map_loader: Loader that provides an iterable of density maps.
"""
if (gt_loader is None) == (den_map_loader is None):
raise ValueError("One and only one loader for target must be selected")
Loader.__init__(self)
self.args = {
'img_loader': {'name': img_loader.__class__.__name__, 'args': img_loader.args},
'gt_loader': None if gt_loader is None else {'name': img_loader.__class__.__name__, 'args': img_loader.args},
'den_map_loader': None if den_map_loader is None else {'name': den_map_loader.__class__.__name__, 'args': den_map_loader.args}
}
self.img_loader = img_loader
self.gt_loader = gt_loader
self.den_map_loader = den_map_loader
def get_number_of_loadable_samples(self):
"""
Get number of full samples (img+DM pairs).
:return: Number of samples.
"""
if self.den_map_loader is None:
return min(self.img_loader.get_number_of_loadable_samples(), self.gt_loader.get_number_of_loadable_samples())
else:
return min(self.img_loader.get_number_of_loadable_samples(), self.den_map_loader.get_number_of_loadable_samples())
def load(self):
"""
Load and return all img+DM pairs, one by one. If a GT loader is used instead of a DM loader, first transform
GT points to a density map.
:return: Generator of img+DM pairs.
"""
cnt = 0
img_gen = self.img_loader.load()
if self.den_map_loader is None:
gt_gen = self.gt_loader.load()
try:
while True:
img = next(img_gen)
try:
gt = next(gt_gen)
except StopIteration:
raise ValueError(f"Missing ground truth for image {str(cnt)}")
den_map = get_density_map_gaussian(img, gt)
yield img, den_map
cnt += 1
except StopIteration:
pass
else:
dm_gen = self.den_map_loader.load()
try:
while True:
img = next(img_gen)
try:
den_map = next(dm_gen)
except StopIteration:
raise ValueError(f"Missing density map for image {str(cnt)}")
yield img, den_map
cnt += 1
except StopIteration:
pass
|
StarcoderdataPython
|
1957975
|
import torch
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
def _get_transform():
return transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
def get_train_data_loader():
transform = _get_transform()
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
return torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=2)
def get_test_data_loader():
transform = _get_transform()
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
return torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False, num_workers=2)
# function to show an image
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
|
StarcoderdataPython
|
9697111
|
import pickle
from collections import Counter
import json
import jieba
import nltk
from tqdm import tqdm
import numpy as np
from config import train_filename,valid_filename,maxlen_in,\
vocab_file, maxlen_out, data_file, sos_id, eos_id, n_src_vocab, \
unk_id
from utils import normalizeString, encode_text
def build_vocab(token, word2idx, idx2char):
if token not in word2idx:
next_index = len(word2idx)
word2idx[token] = next_index
idx2char[next_index] = token
def getCQpair(filename):
with open(filename, 'r') as fh:
train_txt = json.load(fh)
questions = []
contexts = []
for i in range(len(train_txt)):
questions.append(train_txt[i]["question"])
contexts_per = ""
for j in range(len(train_txt[i]["context"])):
contexts_per += train_txt[i]["context"][j][1][0]
contexts.append(contexts_per)
# train_data = []
# for i in range(len(contexts)):
# train_data.append(["ask_question", contexts[i], questions[i]])
#
# train_df = pd.DataFrame(train_data)
# train_df.columns = ["prefix", "input_text", "target_text"]
return contexts,questions
def process(file):
print('processing {}...'.format(file))
contexts, questions = getCQpair(file)
print('contexts length',len(contexts))
print('questions length',len(questions))
word_freq = Counter()
lengths = []
for line in tqdm(contexts):
sentence = line.strip()
# sentence_en = sentence.lower()
tokens = [s for s in nltk.word_tokenize(sentence)]
word_freq.update(list(tokens))
vocab_size = n_src_vocab
lengths.append(len(tokens))
words = word_freq.most_common(vocab_size - 4)
word_map = {k[0]: v + 4 for v, k in enumerate(words)}
word_map['<pad>'] = 0
word_map['<sos>'] = 1
word_map['<eos>'] = 2
word_map['<unk>'] = 3
print(len(word_map))
print(words[:100])
#
# n, bins, patches = plt.hist(lengths, 50, density=True, facecolor='g', alpha=0.75)
#
# plt.xlabel('Lengths')
# plt.ylabel('Probability')
# plt.title('Histogram of Lengths')
# plt.grid(True)
# plt.show()
word2idx = word_map
idx2char = {v: k for k, v in word2idx.items()}
return word2idx, idx2char,word_freq
def get_data(in_file):
contexts, questions = getCQpair(in_file)
samples = []
for i in tqdm(range(len(contexts))):
tokens = [s.strip() for s in nltk.word_tokenize(contexts[i])]
in_data = encode_text(word2idx_dict, tokens)
q_tokens = [s.strip() for s in nltk.word_tokenize(questions[i])]
out_data = [sos_id] + encode_text(word2idx_dict, q_tokens) + [eos_id]
if len(in_data) < maxlen_in and len(out_data) < maxlen_out:
samples.append({'in': in_data, 'out': out_data})
return samples
def get_embedding(counter, data_type, limit=-1, emb_file=None, size=None, vec_size=None, token2idx_dict=None):
print("Generating {} embedding...".format(data_type))
embedding_dict = {}
# filtered_elements = [k for (k, v) in counter if v > limit]
filtered_elements = [k for k,v in counter.items() if v > limit]
if emb_file is not None:
assert size is not None
assert vec_size is not None
with open(emb_file, "r", encoding="utf-8") as fh:
for line in tqdm(fh, total=size):
array = line.split()
word = "".join(array[0:-vec_size])
vector = list(map(float, array[-vec_size:]))
if word in counter and counter[word] > limit:
embedding_dict[word] = vector
print("{} / {} tokens have corresponding {} embedding vector".format(
len(embedding_dict), len(filtered_elements), data_type))
token2idx_dict = {token: idx+2 for idx, token in enumerate(
embedding_dict.keys(), 2)}
token2idx_dict['<pad>'] = 0
token2idx_dict['<sos>'] = 1
token2idx_dict['<eos>'] = 2
token2idx_dict['<unk>'] = 3
embedding_dict['<pad>'] = [0. for _ in range(vec_size)]
embedding_dict['<sos>'] = [0. for _ in range(vec_size)]
embedding_dict['<eos>'] = [0. for _ in range(vec_size)]
embedding_dict['<unk>'] = [0. for _ in range(vec_size)]
idx2emb_dict = {idx: embedding_dict[token]
for token, idx in token2idx_dict.items()}
emb_mat = [idx2emb_dict[idx] for idx in range(len(idx2emb_dict))]
idx2token_dict = {idx: token for token, idx in token2idx_dict.items()}
return emb_mat, token2idx_dict, idx2token_dict
if __name__ == '__main__':
word2idx, idx2word,word_counter = process(train_filename)
# vocab = pickle.load(open( "vocab.pkl", "rb" ))
# print(vocab)
# word2idx = vocab['dict']['word2idx']
# idx2word = vocab['dict']['idx2word']
word_emb_mat, word2idx_dict, idx2word_dict = get_embedding(word_counter, "word", emb_file= "glove.840B.300d.txt",
size=int(2.2e6), vec_size=300,
token2idx_dict=word2idx)
print(len(word_emb_mat))
print(len(word2idx_dict))
print(len(idx2word_dict))
data = {
'dict': {
'word2idx': word2idx_dict,
'idx2word': idx2word_dict,
}
}
with open('word_emb.p', 'wb') as file:
pickle.dump(word_emb_mat, file)
with open(vocab_file, 'wb') as file:
pickle.dump(data, file)
train = get_data(train_filename)
valid = get_data(valid_filename)
data = {
'train': train,
'valid': valid
}
print('num_train: ' + str(len(train)))
print('num_valid: ' + str(len(valid)))
with open(data_file, 'wb') as file:
pickle.dump(data, file)
|
StarcoderdataPython
|
6701107
|
import argparse
import sys
from itertools import izip
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
import numpy as np
import trace_parser
import trace as trace_utils
import string
import pdb
import search
dot = lambda x,y: sum(a*b for a,b in izip(x,y))
def produce_gnuplot_file(costs, times, names):
k_stats(costs, times, names)
with open("whole_program.dat", "w") as f:
for cost, time, name in izip(costs, times, names):
f.write(str(cost) + " " + str(time) + " " + name + " " + str(time / cost) + "\n")
def k_stats(costs, times, names):
ks = np.array([time/cost for cost, time in izip(costs, times)])
print "Mean: " + str(np.mean(ks)) + " STD DEV: " + str(np.std(ks))
def graph_residual(costs0, costsc, costsw, times, names):
width = 0.2333
ind = np.arange(len(names))
fn0 = np.poly1d(np.polyfit(costs0,times, 1))
fnc = np.poly1d(np.polyfit(costsc,times, 1))
fnw = np.poly1d(np.polyfit(costsw,times, 1))
res0 = np.subtract(times, fn0(costs0))
resc = np.subtract(times, fnc(costsc))
resw = np.subtract(times, fnw(costsw))
rects0 = plt.bar(ind, res0, width, color='r', hatch='/')
rectsc = plt.bar(ind+width, resc, width, color='g', hatch='-')
rectsw = plt.bar(ind+ 2*width, resw, width, color='b', hatch='\\')
plt.ylabel("Residual")
plt.xlabel("Benchmark")
plt.title("Residuals for each benchmark")
tick_names = [string.replace(name, "generic", "gen") for name in names]
plt.xticks(ind + 1.5*width, tick_names, rotation=20, ha = 'right')
outliers = ["fibfp", "heapsort", "ack", "divrec", "fib", "lattice", "trav2", "tak"]
plt.legend((rects0[0], rectsc[0], rectsw[0]), ("CM0", "CMC", "CMW"), title="Cost Model")
for rect0,rectc,rectw, name in izip(rects0,rectsc,rectsw, names):
if name in outliers:
rect0.set(hatch='*', alpha=0.50)
rectc.set(hatch='*', alpha=0.50)
rectw.set(hatch='*', alpha=0.50)
plt.show()
def residual_graph(filenames):
cm0 = [0,0,0,0,0]
cmc = [1,1,1,1,1]
cmw = [15.07, 2.43, 42.14, 709.79,1]
average_times = trace_parser.calculate_average_times()
programs = trace_parser.parse_files(filenames)
counts = {program.name: program.class_counts() for program in programs}
trace_utils.Fragment.model = cm0
costsc = [dot(counts[program.name], cmc) for program in programs]
costsw = [dot(counts[program.name], cmw) for program in programs]
costs0 = [program.cost() for program in programs]
times = [average_times[program.name] for program in programs]
names = [program.name for program in programs]
graph_residual(costs0, costsc,costsw, times, names)
sys.exit(0)
def rsquared(coeffs, x,y ):
# Polynomial Coefficients
results = {}
results['polynomial'] = coeffs.tolist()
# r-squared
p = np.poly1d(coeffs)
# fit values, and mean
yhat = p(x) # or [p(z) for z in x]
ybar = np.sum(y)/len(y) # or sum(y)/len(y)
ssreg = np.sum((yhat-ybar)**2) # or sum([ (yihat - ybar)**2 for yihat in yhat])
sstot = np.sum((y - ybar)**2) # or sum([ (yi - ybar)**2 for yi in y])
results['determination'] = ssreg / sstot
return results
def graph(costs, times, names, model):
outliers = ["fibfp", "heapsort", "ack", "divrec", "fib", "lattice", "trav2", "tak"]
filtered_names = [name for name in names if name not in outliers]
filtered_costs = [ cost for cost,name in izip(costs, names) if name in filtered_names]
filtered_times = [time for time, name in izip(times, names) if name in filtered_names]
outlier_costs = [cost for cost, name in izip(costs, names) if name in outliers]
outlier_times = [time for time, name in izip(times, names) if name in outliers]
coeffs = np.polyfit(filtered_costs, filtered_times, 1)
fit_fn = np.poly1d(coeffs)
k_stats(costs, times, names)
print fit_fn
print "rsquared"
print rsquared(coeffs, filtered_costs, filtered_times)
plt.ylabel("Execution time ($\mu s$)")
plt.xlabel("Cost")
plt.title("Whole program plot for " + model.upper())
plt.plot( filtered_costs, filtered_times, 'xg', label="Points included in fit" )
plt.plot(filtered_costs, fit_fn(filtered_costs), '-b')
plt.plot( outlier_costs, outlier_times, 'or', label="Points excluded by subsampling")
plt.legend()
plt.show()
def superimpose(costs1, costs2, times,names):
axes = [plt, plt.twiny()]
colors = ('g', 'b')
offsets = (20,-20)
for ax, color, costs, offset in izip(axes, colors, [costs1,costs2], offsets):
#parameter, covariance_matrix = curve_fit(line_func, times, costs)
m, b = np.polyfit(costs, times, 1)
fit_fn = np.poly1d((m,b))
ax.plot( costs[:10], times[:10], 'o' + color, costs, fit_fn(costs), '-' + color)
print fit_fn
for name, x,y in izip(names[:10], costs[:10], times[:10]):
plt.annotate(
name,
xy =(x,y),
xytext =(20,offset),
textcoords = 'offset points', ha = 'left', va = 'bottom',
arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))
#ax.plot(x, line_func(x, *parameter), color=color)
plt.show()
def line_func(x, a, b):
return a*x + b
def super_graph(filenames):
cm0 = [0,0,0,0,0]
cmc = [1,1,1,1,1]
cmw = [211,34,590,9937,14]
average_times = trace_parser.calculate_average_times()
programs = trace_parser.parse_files(filenames)
counts = {program.name: program.class_counts() for program in programs}
trace_utils.Fragment.model = cm0
costsc = [dot(counts[program.name], cmc) for program in programs]
costsw = [dot(counts[program.name], cmw) for program in programs]
costs0 = [program.cost() for program in programs]
times = [average_times[program.name] for program in programs]
names = [program.name for program in programs]
superimpose(costsc,costsw, times, names)
sys.exit(0)
def unfiltered_graph(costs, times):
coeffs = np.polyfit(costs, times,1)
fit_fn = np.poly1d(coeffs)
print fit_fn
print "rsquared", search.fit(costs, times)
plt.ylabel("Execution time ($\\mu s$)")
plt.title("Plot using parameters found through linear regression")
plt.xlabel("Cost")
plt.plot(costs, times, 'xg')
plt.plot(costs, fit_fn(costs), '-b')
#plt.show()
plt.savefig("model_scatter.png")
def main():
parser = argparse.ArgumentParser(description="Run cost analysis")
parser.add_argument("filenames", metavar="<file>", nargs = '+')
parser.add_argument("--model", "-m", default="cmw")
parser.add_argument( "-k", action='store_true')
parser.add_argument( "-s", action='store_true')
parser.add_argument( "-n", action='store_true')
args = parser.parse_args()
if args.k:
residual_graph(args.filenames)
if args.s:
super_graph(args.filenames)
model = []
if args.model == "cm0":
model = [0,0,0,0,0]
elif args.model == "cmc":
model = [1,1,1,1,1]
elif args.model == "cmw":
model = [211,34,590,9937,14]
else:
model = [float(num) for num in args.model.split(",")]
programs = trace_parser.parse_files(args.filenames)
counts = {program.name: program.class_counts() for program in programs}
average_times = []
for program in programs:
if program.net_time() > 20000000:
print "foo", program.name
if args.n:
times = [program.net_time() for program in programs]
else:
average_times = trace_parser.calculate_average_times()
trace_utils.Fragment.model = model
costs = [dot(counts[program.name], model) for program in programs]
if model == [0,0,0,0,0]:
print "FOOOOOO"
trace_utils.Fragment.model = [0,0,0,0,0,0,0]
costs = [program.cost() for program in programs]
if args.n:
unfiltered_graph(costs, times)
times = [average_times[program.name] for program in programs]
names = [program.name for program in programs]
graph(costs, times, names, args.model)
#produce_gnuplot_file(costs, times,names)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
147502
|
#!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_hdinsightcluster
version_added: "2.8"
short_description: Manage Azure HDInsight Cluster instance
description:
- Create, update and delete instance of Azure HDInsight Cluster.
options:
resource_group:
description:
- The name of the resource group.
required: True
name:
description:
- The name of the cluster.
required: True
location:
description:
- Resource location. If not set, location from the resource group will be used as default.
cluster_version:
description:
- The version of the cluster. For example C(3.6).
os_type:
description:
- The type of operating system.
choices:
- 'linux'
tier:
description:
- The cluster tier.
choices:
- 'standard'
- 'premium'
cluster_definition:
description:
- The cluster definition.
suboptions:
kind:
description:
- The type of cluster.
choices:
- hadoop
- spark
- hbase
- storm
gateway_rest_username:
description:
- Gateway REST user name.
gateway_rest_password:
description:
- Gateway REST password.
compute_profile_roles:
description:
- The list of roles in the cluster.
type: list
suboptions:
name:
description:
- The name of the role.
choices:
- 'headnode'
- 'workernode'
- 'zookepernode'
min_instance_count:
description:
- The minimum instance count of the cluster.
target_instance_count:
description:
- The instance count of the cluster.
vm_size:
description:
- The size of the VM.
linux_profile:
description:
- The Linux OS profile.
suboptions:
username:
description:
- SSH user name.
password:
description:
- SSH password.
storage_accounts:
description:
- The list of storage accounts in the cluster.
type: list
suboptions:
name:
description:
- Blob storage endpoint. For example storage_account_name.blob.core.windows.net.
is_default:
description:
- Whether or not the storage account is the default storage account.
container:
description:
- The container in the storage account.
key:
description:
- The storage account access key.
state:
description:
- Assert the state of the cluster.
- Use C(present) to create or update a cluster and C(absent) to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
- azure_tags
author:
- <NAME> (@zikalino)
'''
EXAMPLES = '''
- name: Create instance of HDInsight Cluster
azure_rm_hdinsightcluster:
resource_group: myResourceGroup
name: myCluster
location: eastus2
cluster_version: 3.6
os_type: linux
tier: standard
cluster_definition:
kind: spark
gateway_rest_username: http-user
gateway_rest_password: <PASSWORD>
storage_accounts:
- name: myStorageAccount.blob.core.windows.net
is_default: yes
container: myContainer
key: <KEY>
compute_profile_roles:
- name: headnode
target_instance_count: 2
hardware_profile:
vm_size: Standard_D3
linux_profile:
username: sshuser
password: <PASSWORD>
- name: workernode
target_instance_count: 2
vm_size: Standard_D3
linux_profile:
username: sshuser
password: <PASSWORD>
'''
RETURN = '''
id:
description:
- Fully qualified resource id of the cluster.
returned: always
type: str
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.HDInsight/clusters/myCluster
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller
from msrestazure.azure_operation import AzureOperationPoller
from azure.mgmt.hdinsight import HDInsightManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMClusters(AzureRMModuleBase):
"""Configuration class for an Azure RM Cluster resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
location=dict(
type='str'
),
cluster_version=dict(
type='str'
),
os_type=dict(
type='str',
choices=['linux']
),
tier=dict(
type='str',
choices=['standard',
'premium']
),
cluster_definition=dict(
type='dict'
),
compute_profile_roles=dict(
type='list'
),
storage_accounts=dict(
type='list'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.name = None
self.parameters = dict()
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.to_do = Actions.NoAction
self.tags_changed = False
self.new_instance_count = None
super(AzureRMClusters, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
self.parameters[key] = kwargs[key]
dict_expand(self.parameters, ['cluster_version'], 'properties')
dict_camelize(self.parameters, ['os_type'], True)
dict_expand(self.parameters, ['os_type'], 'properties')
dict_camelize(self.parameters, ['tier'], True)
dict_expand(self.parameters, ['tier'], 'properties')
dict_rename(self.parameters, ['cluster_definition', 'gateway_rest_username'], 'restAuthCredential.username')
dict_rename(self.parameters, ['cluster_definition', 'gateway_rest_password'], 'restAuthCredential.password')
dict_expand(self.parameters, ['cluster_definition', 'restAuthCredential.username'], 'gateway')
dict_expand(self.parameters, ['cluster_definition', 'restAuthCredential.password'], 'gateway')
dict_expand(self.parameters, ['cluster_definition', 'gateway'], 'configurations')
dict_expand(self.parameters, ['cluster_definition'], 'properties')
dict_expand(self.parameters, ['compute_profile_roles', 'vm_size'], 'hardware_profile')
dict_rename(self.parameters, ['compute_profile_roles', 'linux_profile'], 'linux_operating_system_profile')
dict_expand(self.parameters, ['compute_profile_roles', 'linux_operating_system_profile'], 'os_profile')
dict_rename(self.parameters, ['compute_profile_roles'], 'roles')
dict_expand(self.parameters, ['roles'], 'compute_profile')
dict_expand(self.parameters, ['compute_profile'], 'properties')
dict_rename(self.parameters, ['storage_accounts'], 'storageaccounts')
dict_expand(self.parameters, ['storageaccounts'], 'storage_profile')
dict_expand(self.parameters, ['storage_profile'], 'properties')
response = None
self.mgmt_client = self.get_mgmt_svc_client(HDInsightManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager)
resource_group = self.get_resource_group(self.resource_group)
if "location" not in self.parameters:
self.parameters["location"] = resource_group.location
old_response = self.get_cluster()
if not old_response:
self.log("Cluster instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log("Cluster instance already exists")
if self.state == 'absent':
self.to_do = Actions.Delete
elif self.state == 'present':
compare_result = {}
if (not default_compare(self.parameters, old_response, '', compare_result)):
if compare_result.pop('/properties/compute_profile/roles/*/target_instance_count', False):
# check if it's workernode
new_count = 0
old_count = 0
for role in self.parameters['properties']['compute_profile']['roles']:
if role['name'] == 'workernode':
new_count = role['target_instance_count']
for role in old_response['properties']['compute_profile']['roles']:
if role['name'] == 'workernode':
old_count = role['target_instance_count']
if old_count != new_count:
self.new_instance_count = new_count
self.to_do = Actions.Update
if compare_result.pop('/tags', False):
self.to_do = Actions.Update
self.tags_changed = True
if compare_result:
for k in compare_result.keys():
self.module.warn("property '" + k + "' cannot be updated (" + compare_result[k] + ")")
self.module.warn("only tags and target_instance_count can be updated")
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log("Need to Create / Update the Cluster instance")
self.results['changed'] = True
if self.check_mode:
return self.results
response = self.create_update_cluster()
self.log("Creation / Update done")
elif self.to_do == Actions.Delete:
self.log("Cluster instance deleted")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_cluster()
else:
self.log("Cluster instance unchanged")
self.results['changed'] = False
response = old_response
if self.state == 'present':
self.results.update(self.format_item(response))
return self.results
def create_update_cluster(self):
'''
Creates or updates Cluster with the specified configuration.
:return: deserialized Cluster instance state dictionary
'''
self.log("Creating / Updating the Cluster instance {0}".format(self.name))
try:
if self.to_do == Actions.Create:
response = self.mgmt_client.clusters.create(resource_group_name=self.resource_group,
cluster_name=self.name,
parameters=self.parameters)
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
else:
if self.tags_changed:
response = self.mgmt_client.clusters.update(resource_group_name=self.resource_group,
cluster_name=self.name,
tags=self.parameters.get('tags'))
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
if self.new_instance_count:
response = self.mgmt_client.clusters.resize(resource_group_name=self.resource_group,
cluster_name=self.name,
target_instance_count=self.new_instance_count)
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.fail("Error creating or updating Cluster instance: {0}".format(str(exc)))
return response.as_dict() if response else {}
def delete_cluster(self):
'''
Deletes specified Cluster instance in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the Cluster instance {0}".format(self.name))
try:
response = self.mgmt_client.clusters.delete(resource_group_name=self.resource_group,
cluster_name=self.name)
except CloudError as e:
self.fail("Error deleting the Cluster instance: {0}".format(str(e)))
return True
def get_cluster(self):
'''
Gets the properties of the specified Cluster.
:return: deserialized Cluster instance state dictionary
'''
self.log("Checking if the Cluster instance {0} is present".format(self.name))
found = False
try:
response = self.mgmt_client.clusters.get(resource_group_name=self.resource_group,
cluster_name=self.name)
found = True
self.log("Response : {0}".format(response))
self.log("Cluster instance : {0} found".format(response.name))
except Exception as e:
self.log('Did not find the Cluster instance.')
if found is True:
return response.as_dict()
return False
def format_item(self, d):
d = {
'id': d.get('id', None)
}
return d
def default_compare(new, old, path, result):
if new is None:
match = True
elif isinstance(new, dict):
match = True
if not isinstance(old, dict):
result[path] = 'old dict is null'
match = False
else:
for k in new.keys():
if not default_compare(new.get(k), old.get(k, None), path + '/' + k, result):
match = False
elif isinstance(new, list):
if not isinstance(old, list) or len(new) != len(old):
result[path] = 'length is different or null'
match = False
elif len(old) == 0:
match = True
else:
match = True
if isinstance(old[0], dict):
key = None
if 'id' in old[0] and 'id' in new[0]:
key = 'id'
elif 'name' in old[0] and 'name' in new[0]:
key = 'name'
else:
key = list(old[0])[0]
new = sorted(new, key=lambda x: x.get(key, ''))
old = sorted(old, key=lambda x: x.get(key, ''))
else:
new = sorted(new)
old = sorted(old)
for i in range(len(new)):
if not default_compare(new[i], old[i], path + '/*', result):
match = False
return match
else:
if path.endswith('password'):
match = True
else:
if path == '/location' or path.endswith('location_name'):
new = new.replace(' ', '').lower()
old = new.replace(' ', '').lower()
if new == old:
match = True
else:
result[path] = str(new) + ' != ' + str(old)
match = False
return match
def dict_camelize(d, path, camelize_first):
if isinstance(d, list):
for i in range(len(d)):
dict_camelize(d[i], path, camelize_first)
elif isinstance(d, dict):
if len(path) == 1:
old_value = d.get(path[0], None)
if old_value is not None:
d[path[0]] = _snake_to_camel(old_value, camelize_first)
else:
sd = d.get(path[0], None)
if sd is not None:
dict_camelize(sd, path[1:], camelize_first)
def dict_upper(d, path):
if isinstance(d, list):
for i in range(len(d)):
dict_upper(d[i], path)
elif isinstance(d, dict):
if len(path) == 1:
old_value = d.get(path[0], None)
if old_value is not None:
d[path[0]] = old_value.upper()
else:
sd = d.get(path[0], None)
if sd is not None:
dict_upper(sd, path[1:])
def dict_rename(d, path, new_name):
if isinstance(d, list):
for i in range(len(d)):
dict_rename(d[i], path, new_name)
elif isinstance(d, dict):
if len(path) == 1:
old_value = d.pop(path[0], None)
if old_value is not None:
d[new_name] = old_value
else:
sd = d.get(path[0], None)
if sd is not None:
dict_rename(sd, path[1:], new_name)
def dict_expand(d, path, outer_dict_name):
if isinstance(d, list):
for i in range(len(d)):
dict_expand(d[i], path, outer_dict_name)
elif isinstance(d, dict):
if len(path) == 1:
old_value = d.pop(path[0], None)
if old_value is not None:
d[outer_dict_name] = d.get(outer_dict_name, {})
d[outer_dict_name][path[0]] = old_value
else:
sd = d.get(path[0], None)
if sd is not None:
dict_expand(sd, path[1:], outer_dict_name)
def _snake_to_camel(snake, capitalize_first=False):
if capitalize_first:
return ''.join(x.capitalize() or '_' for x in snake.split('_'))
else:
return snake.split('_')[0] + ''.join(x.capitalize() or '_' for x in snake.split('_')[1:])
def main():
"""Main execution"""
AzureRMClusters()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
9765317
|
<gh_stars>0
import mock
import os
import pytest
import yaml
from mlflow.entities.run_status import RunStatus
from mlflow.projects import Project
TEST_DIR = "tests"
TEST_PROJECT_DIR = os.path.join(TEST_DIR, "resources", "example_project")
GIT_PROJECT_URI = "https://github.com/mlflow/mlflow-example"
def load_project():
""" Loads an example project for use in tests, returning an in-memory `Project` object. """
with open(os.path.join(TEST_PROJECT_DIR, "MLproject")) as mlproject_file:
project_yaml = yaml.safe_load(mlproject_file.read())
return Project(yaml_obj=project_yaml)
def validate_exit_status(status_str, expected):
assert RunStatus.from_string(status_str) == expected
@pytest.fixture()
def tracking_uri_mock(tmpdir):
with mock.patch("mlflow.tracking.get_tracking_uri") as get_tracking_uri_mock:
get_tracking_uri_mock.return_value = str(tmpdir)
yield get_tracking_uri_mock
|
StarcoderdataPython
|
8155987
|
def show_color_swatches():
colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)
# Sort colors by hue, saturation, value and name.
by_hsv = sorted((tuple(mcolors.rgb_to_hsv(mcolors.to_rgba(color)[:3])), name)
for name, color in colors.items())
sorted_names = [name for hsv, name in by_hsv]
n = len(sorted_names)
ncols = 4
nrows = n // ncols + 1
fig, ax = plt.subplots(figsize=(12, 8))
# Get height and width
X, Y = fig.get_dpi() * fig.get_size_inches()
h = Y / (nrows + 1)
w = X / ncols
for i, name in enumerate(sorted_names):
col = i % ncols
row = i // ncols
y = Y - (row * h) - h
xi_line = w * (col + 0.05)
xf_line = w * (col + 0.25)
xi_text = w * (col + 0.3)
ax.text(xi_text, y, name, fontsize=(h * 0.8),
horizontalalignment='left',
verticalalignment='center')
ax.hlines(y + h * 0.1, xi_line, xf_line,
color=colors[name], linewidth=(h * 0.6))
ax.set_xlim(0, X)
ax.set_ylim(0, Y)
ax.set_axis_off()
fig.subplots_adjust(left=0, right=1,
top=1, bottom=0,
hspace=0, wspace=0)
plt.show()
|
StarcoderdataPython
|
3365396
|
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Sortmerna(CMakePackage):
"""SortMeRNA is a program tool for filtering, mapping and OTU-picking NGS
reads in metatranscriptomic and metagenomic data"""
homepage = "https://github.com/biocore/sortmerna"
git = "https://github.com/biocore/sortmerna.git"
version('2017-07-13', commit='<PASSWORD>')
depends_on('zlib')
def install(self, spec, prefix):
mkdirp(prefix.bin)
with working_dir(join_path('spack-build', 'src', 'indexdb')):
install('indexdb', prefix.bin)
with working_dir(join_path('spack-build', 'src', 'sortmerna')):
install('sortmerna', prefix.bin)
|
StarcoderdataPython
|
3323948
|
import random
def get_enemy(player):
if player == 'X':
return 'O'
return 'X'
def determine(board, player):
a = -2
choices = []
if len(board.available_moves()) == 9:
return 4
for move in board.available_moves():
board.make_move(move, player)
val = board.alphabeta(board, get_enemy(player), -2, 2)
board.make_move(move, None)
print "move:", move + 1, "causes:", board.winners[val + 1]
if val > a:
a = val
choices = [move]
elif val == a:
choices.append(move)
return random.choice(choices)
|
StarcoderdataPython
|
105803
|
# @time: 2021/12/10 4:00 下午
# Author: pan
# @File: ATM.py
# @Software: PyCharm
# 选做题:编写ATM程序实现下述功能,数据来源于文件db.txt
# 1、充值功能:用户输入充值钱数,db.txt中该账号钱数完成修改
# 2、转账功能:用户A向用户B转账1000元,db.txt中完成用户A账号减钱,用户B账号加钱
# 3、提现功能:用户输入提现金额,db.txt中该账号钱数减少
# 4、查询余额功能:输入账号查询余额
db_data = {}
def update_db_data():
"""更新数据"""
with open("db.txt", mode="rt") as f:
for line in f:
res = line.strip()
name, money = res.split(":")
# print(name, money)
db_data[name] = int(money)
print("当前账户的信息:", db_data)
def update_local_db_data():
"""更新文件中的数据"""
with open("db.txt", mode="wt") as f:
for key, value in db_data.items():
f.write("{}:{}\n".format(key, value))
def is_avalide_user(name):
"""判断用户是否存在"""
if db_data.get(name):
return True
else:
return False
def re_charge():
"""充值功能"""
while True:
inp_name = input("请输入充值账户:").strip()
if not is_avalide_user(inp_name):
print("充值用户不存在,请重新输入充值用户!")
continue
inp_money = input("请输入充值金额:").strip()
if not inp_money.isdigit():
print("请输入数字金额:")
continue
# 可以进行充值
db_data[inp_name] += int(inp_money)
print(db_data)
update_local_db_data()
update_db_data()
break
def tranfer_money():
"""转账功能:用户A向用户B转账1000元,db.txt中完成用户A账号减钱,用户B账号加钱"""
while True:
inp_name_a = input("请输入用户A:").strip()
if not is_avalide_user(inp_name_a):
print("用户A不存在!")
continue
inp_name_b = input("请输入用户B:").strip()
if not is_avalide_user(inp_name_a):
print("用户B不存在!")
continue
if inp_name_a == inp_name_b:
print("不能给自己转账!")
continue
inp_money = input("请输入转账金额:").strip()
if not inp_money.isdigit():
print("请输入数字金额:")
continue
if db_data[inp_name_a] < int(inp_money):
print("转账金额不能大于A账户的余额!")
continue
# 可以进行转账
db_data[inp_name_a] -= int(inp_money)
db_data[inp_name_b] += int(inp_money)
print(db_data)
update_local_db_data()
update_db_data()
break
def hand_out_money():
"""提现功能:用户输入提现金额,db.txt中该账号钱数减少"""
while True:
inp_name = input("请输入用户名:").strip()
if not is_avalide_user(inp_name):
print("该用户不存在!")
continue
inp_money = input("请输入提现金额:").strip()
if not inp_money.isdigit():
print("请输入提现数字金额:")
continue
if db_data[inp_name] < int(inp_money):
print("提现金额过大!")
continue
# 可以提现
db_data[inp_name] -= int(inp_money)
print(db_data)
update_local_db_data()
update_db_data()
break
def check_user_money():
"""查询余额功能:输入账号查询余额"""
while True:
inp_name = input("请输入用户名:").strip()
if not is_avalide_user(inp_name):
print("该用户不存在!")
continue
# 可以查询
user_money = db_data[inp_name]
print("用户当前账户余额为:", user_money)
break
update_db_data()
# re_charge()
# tranfer_money()
# hand_out_money()
check_user_money()
# 选做题中的选做题:登录功能
# 用户登录成功后,内存中记录下该状态,上述功能以当前登录状态为准,必须先登录才能操作
|
StarcoderdataPython
|
3463880
|
# Generated by Django 2.2.4 on 2019-08-14 10:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('evento', '0002_auto_20190814_1001'),
]
operations = [
migrations.RemoveField(
model_name='palestrante',
name='slug',
),
]
|
StarcoderdataPython
|
6543272
|
<filename>pytupi/datasets/loader.py<gh_stars>0
import os
import urllib
import gzip
import cPickle as pickle
class Loader:
def __init__(self, datasets, cache_folder):
self._datasets = datasets
self._cache_folder = cache_folder
def get(self, dataset_name):
dataset = self._datasets[dataset_name]
file_name = dataset['name']
file_path = os.path.join(self._cache_folder, file_name)
uri = dataset['uri']
if not os.path.exists(file_path):
print("Downloading {0} into cache folder ...".format(file_name))
urllib.urlretrieve(uri, file_path)
with gzip.open(file_path, 'rb') as f:
data = pickle.load(f)
return data
|
StarcoderdataPython
|
11200963
|
<filename>xgboost-0.6-py3.6.egg/xgboost/rabit/guide/broadcast.py<gh_stars>0
#!/usr/bin/python
"""
demo python script of rabit
"""
import os
import sys
# add path to wrapper
# for normal run without tracker script, add following line
# sys.path.append(os.path.dirname(__file__) + '/../wrapper')
import rabit
rabit.init()
n = 3
rank = rabit.get_rank()
s = None
if rank == 0:
s = {'hello world':100, 2:3}
print('@node[%d] before-broadcast: s=\"%s\"' % (rank, str(s)))
s = rabit.broadcast(s, 0)
print('@node[%d] after-broadcast: s=\"%s\"' % (rank, str(s)))
rabit.finalize()
|
StarcoderdataPython
|
8183874
|
expected_output = {
"clock_state": {
"system_status": {
"associations_address": "172.16.229.65",
"associations_local_mode": "active",
"clock_offset": 73.819,
"clock_refid": ".GNSS.",
"clock_state": "synchronized",
"clock_stratum": 1,
"root_delay": 1.436,
}
},
"peer": {
"10.2.2.2": {
"local_mode": {
"active": {
"delay": 1.47,
"jitter": 52.506,
"mode": "falseticker",
"offset": -46.76,
"poll": 128,
"reach": 271,
"receive_time": 84,
"refid": "172.16.229.65",
"remote": "10.2.2.2",
"stratum": 2,
"type": "active",
}
}
},
"172.16.229.65": {
"local_mode": {
"active": {
"delay": 1.436,
"jitter": 10.905,
"mode": "synchronized",
"offset": 73.819,
"poll": 64,
"reach": 377,
"receive_time": 59,
"refid": ".GNSS.",
"remote": "172.16.229.65",
"stratum": 1,
"type": "active",
}
}
},
"172.16.229.66": {
"local_mode": {
"active": {
"delay": 0.969,
"jitter": 8.964,
"mode": "final selection set",
"offset": 59.428,
"poll": 64,
"reach": 377,
"receive_time": 63,
"refid": ".GNSS.",
"remote": "172.16.229.66",
"stratum": 1,
"type": "active",
}
}
},
"10.145.32.44": {
"local_mode": {
"active": {
"delay": 42.72,
"jitter": 6.228,
"mode": "final selection set",
"offset": 64.267,
"poll": 64,
"reach": 377,
"receive_time": 61,
"refid": ".GNSS.",
"remote": "10.145.32.44",
"stratum": 1,
"type": "active",
}
}
},
},
}
|
StarcoderdataPython
|
3254134
|
<reponame>jeisch/bokeh
import numpy as np
from bokeh.io import show
from bokeh.plotting import Figure
from bokeh.models import ColumnDataSource, CustomJS, Spinner
from bokeh.layouts import row, column
data = np.random.rand(10, 2)
cds = ColumnDataSource(data=dict(x=data[:, 0], y=data[:, 1]))
p = Figure(x_range=(0, 1), y_range=(0, 1))
points = p.scatter(x='x', y='y', source=cds)
w = Spinner(title="Glyph size", low=1, high=20, step=0.1, value=4, width=100)
cb = CustomJS(args={'points': points}, code="""
points.glyph.size = cb_obj.value
""")
points.glyph.size = w.value
w.js_on_change('value', cb)
show(row(column(w, width=100), p))
|
StarcoderdataPython
|
6591613
|
<reponame>nizovn/luna-sysmgr<filename>hooks/webkitpy/tool/bot/irc_command.py
# Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import itertools
import random
import re
from webkitpy.common.config import irc as config_irc
from webkitpy.common.config import urls
from webkitpy.common.config.committers import CommitterList
from webkitpy.common.checkout.changelog import parse_bug_id
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.bot.queueengine import TerminateQueue
from webkitpy.tool.grammar import join_with_separators
def _post_error_and_check_for_bug_url(tool, nicks_string, exception):
tool.irc().post("%s" % exception)
bug_id = parse_bug_id(exception.output)
if bug_id:
bug_url = tool.bugs.bug_url_for_bug_id(bug_id)
tool.irc().post("%s: Ugg... Might have created %s" % (nicks_string, bug_url))
# FIXME: Merge with Command?
class IRCCommand(object):
def execute(self, nick, args, tool, sheriff):
raise NotImplementedError, "subclasses must implement"
class LastGreenRevision(IRCCommand):
def execute(self, nick, args, tool, sheriff):
return "%s: %s" % (nick,
urls.view_revision_url(tool.buildbot.last_green_revision()))
class Restart(IRCCommand):
def execute(self, nick, args, tool, sheriff):
tool.irc().post("Restarting...")
raise TerminateQueue()
class Rollout(IRCCommand):
def _extract_revisions(self, arg):
revision_list = []
possible_revisions = arg.split(",")
for revision in possible_revisions:
revision = revision.strip()
if not revision:
continue
revision = revision.lstrip("r")
# If one part of the arg isn't in the correct format,
# then none of the arg should be considered a revision.
if not revision.isdigit():
return None
revision_list.append(int(revision))
return revision_list
def _parse_args(self, args):
if not args:
return (None, None)
svn_revision_list = []
remaining_args = args[:]
# First process all revisions.
while remaining_args:
new_revisions = self._extract_revisions(remaining_args[0])
if not new_revisions:
break
svn_revision_list += new_revisions
remaining_args = remaining_args[1:]
# Was there a revision number?
if not len(svn_revision_list):
return (None, None)
# Everything left is the reason.
rollout_reason = " ".join(remaining_args)
return svn_revision_list, rollout_reason
def _responsible_nicknames_from_revisions(self, tool, sheriff, svn_revision_list):
commit_infos = map(tool.checkout().commit_info_for_revision, svn_revision_list)
nickname_lists = map(sheriff.responsible_nicknames_from_commit_info, commit_infos)
return sorted(set(itertools.chain(*nickname_lists)))
def _nicks_string(self, tool, sheriff, requester_nick, svn_revision_list):
# FIXME: _parse_args guarentees that our svn_revision_list is all numbers.
# However, it's possible our checkout will not include one of the revisions,
# so we may need to catch exceptions from commit_info_for_revision here.
target_nicks = [requester_nick] + self._responsible_nicknames_from_revisions(tool, sheriff, svn_revision_list)
return ", ".join(target_nicks)
def _update_working_copy(self, tool):
tool.scm().ensure_clean_working_directory(force_clean=True)
tool.executive.run_and_throw_if_fail(tool.port().update_webkit_command(), quiet=True, cwd=tool.scm().checkout_root)
def execute(self, nick, args, tool, sheriff):
svn_revision_list, rollout_reason = self._parse_args(args)
if (not svn_revision_list or not rollout_reason):
# return is equivalent to an irc().post(), but makes for easier unit testing.
return "%s: Usage: rollout SVN_REVISION [SVN_REVISIONS] REASON" % nick
self._update_working_copy(tool)
# FIXME: IRCCommand should bind to a tool and have a self._tool like Command objects do.
# Likewise we should probably have a self._sheriff.
nicks_string = self._nicks_string(tool, sheriff, nick, svn_revision_list)
revision_urls_string = join_with_separators([urls.view_revision_url(revision) for revision in svn_revision_list])
tool.irc().post("%s: Preparing rollout for %s..." % (nicks_string, revision_urls_string))
try:
complete_reason = "%s (Requested by %s on %s)." % (
rollout_reason, nick, config_irc.channel)
bug_id = sheriff.post_rollout_patch(svn_revision_list, complete_reason)
bug_url = tool.bugs.bug_url_for_bug_id(bug_id)
tool.irc().post("%s: Created rollout: %s" % (nicks_string, bug_url))
except ScriptError, e:
tool.irc().post("%s: Failed to create rollout patch:" % nicks_string)
_post_error_and_check_for_bug_url(tool, nicks_string, e)
class RollChromiumDEPS(IRCCommand):
def _parse_args(self, args):
if not args:
return
revision = args[0].lstrip("r")
if not revision.isdigit():
return
return revision
def execute(self, nick, args, tool, sheriff):
revision = self._parse_args(args)
roll_target = "r%s" % revision if revision else "last-known good revision"
tool.irc().post("%s: Rolling Chromium DEPS to %s" % (nick, roll_target))
try:
bug_id = sheriff.post_chromium_deps_roll(revision)
bug_url = tool.bugs.bug_url_for_bug_id(bug_id)
tool.irc().post("%s: Created DEPS roll: %s" % (nick, bug_url))
except ScriptError, e:
match = re.search(r"Current Chromium DEPS revision \d+ is newer than \d+\.", e.output)
if match:
tool.irc().post("%s: %s" % (nick, match.group(0)))
return
tool.irc().post("%s: Failed to create DEPS roll:" % nick)
_post_error_and_check_for_bug_url(tool, nick, e)
class Help(IRCCommand):
def execute(self, nick, args, tool, sheriff):
return "%s: Available commands: %s" % (nick, ", ".join(sorted(visible_commands.keys())))
class Hi(IRCCommand):
def execute(self, nick, args, tool, sheriff):
quips = tool.bugs.quips()
quips.append('"Only you can prevent forest fires." -- Smokey the Bear')
return random.choice(quips)
class Whois(IRCCommand):
def _nick_or_full_record(self, contributor):
if contributor.irc_nicknames:
return ', '.join(contributor.irc_nicknames)
return unicode(contributor)
def execute(self, nick, args, tool, sheriff):
if len(args) != 1:
return "%s: Usage: whois SEARCH_STRING" % nick
search_string = args[0]
# FIXME: We should get the ContributorList off the tool somewhere.
contributors = CommitterList().contributors_by_search_string(search_string)
if not contributors:
return "%s: Sorry, I don't know any contributors matching '%s'." % (nick, search_string)
if len(contributors) > 5:
return "%s: More than 5 contributors match '%s', could you be more specific?" % (nick, search_string)
if len(contributors) == 1:
contributor = contributors[0]
if not contributor.irc_nicknames:
return "%s: %s hasn't told me their nick. Boo hoo :-(" % (nick, contributor)
if contributor.emails and search_string.lower() not in map(lambda email: email.lower(), contributor.emails):
formattedEmails = ', '.join(contributor.emails)
return "%s: %s is %s (%s). Why do you ask?" % (nick, search_string, self._nick_or_full_record(contributor), formattedEmails)
else:
return "%s: %s is %s. Why do you ask?" % (nick, search_string, self._nick_or_full_record(contributor))
contributor_nicks = map(self._nick_or_full_record, contributors)
contributors_string = join_with_separators(contributor_nicks, only_two_separator=" or ", last_separator=', or ')
return "%s: I'm not sure who you mean? %s could be '%s'." % (nick, contributors_string, search_string)
class Eliza(IRCCommand):
therapist = None
def __init__(self):
if not self.therapist:
import webkitpy.thirdparty.autoinstalled.eliza as eliza
Eliza.therapist = eliza.eliza()
def execute(self, nick, args, tool, sheriff):
return "%s: %s" % (nick, self.therapist.respond(" ".join(args)))
class CreateBug(IRCCommand):
def execute(self, nick, args, tool, sheriff):
if not args:
return "%s: Usage: create-bug BUG_TITLE" % nick
bug_title = " ".join(args)
bug_description = "%s\nRequested by %s on %s." % (bug_title, nick, config_irc.channel)
# There happens to be a committers list hung off of Bugzilla, so
# re-using that one makes things easiest for now.
requester = tool.bugs.committers.contributor_by_irc_nickname(nick)
requester_email = requester.bugzilla_email() if requester else None
try:
bug_id = tool.bugs.create_bug(bug_title, bug_description, cc=requester_email, assignee=requester_email)
bug_url = tool.bugs.bug_url_for_bug_id(bug_id)
return "%s: Created bug: %s" % (nick, bug_url)
except Exception, e:
return "%s: Failed to create bug:\n%s" % (nick, e)
# FIXME: Lame. We should have an auto-registering CommandCenter.
visible_commands = {
"help": Help,
"hi": Hi,
"last-green-revision": LastGreenRevision,
"restart": Restart,
"rollout": Rollout,
"whois": Whois,
"create-bug": CreateBug,
"roll-chromium-deps": RollChromiumDEPS,
}
# Add revert as an "easter egg" command. Why?
# revert is the same as rollout and it would be confusing to list both when
# they do the same thing. However, this command is a very natural thing for
# people to use and it seems silly to have them hunt around for "rollout" instead.
commands = visible_commands.copy()
commands["revert"] = Rollout
|
StarcoderdataPython
|
6629439
|
from fastapi import FastAPI
import uvicorn
app = FastAPI()
@app.get("/")
def home():
return "Hello World"
|
StarcoderdataPython
|
4800955
|
<reponame>FrancescoConforte/ICT-in-Transport-System
#%%
import pymongo as pm
from datetime import datetime
client = pm.MongoClient('bigdatadb.polito.it',
ssl=True,
authSource = 'carsharing',
tlsAllowInvalidCertificates=True)
db = client['carsharing'] #Choose the DB to use
db.authenticate('ictts', 'Ictts16!')# mechanism='MONGODB-CR') #authentication
# Bookings_collection = db['PermanentBookings'] # Collection for Car2go to use
#%% Step1: How many documents are present in each collection?
collectionName = ['ActiveBookings','ActiveParkings','PermanentBookings',
'PermanentParkings','enjoy_ActiveBookings',
'enjoy_ActiveParkings','enjoy_PermanentBookings',
'enjoy_PermanentParkings']
for i in collectionName:
collection=db.get_collection(i)
print(i + ": " + str(collection.estimated_document_count()))
#%% Step1: For which cities the system is collecting data?
print('Cities Car2Go: ' +
str(db.get_collection('PermanentBookings').distinct("city")))
print('Cities Enjoy: ' +
str(db.get_collection('enjoy_PermanentBookings').distinct("city")))
#%% Step1: When the collection started? When the collection ended?
timestamps_sorted_car2go = db.get_collection('PermanentBookings').find().sort(
'init_time', pm.DESCENDING).distinct('init_time')
start_car2go=timestamps_sorted_car2go[-1]
end_car2go=timestamps_sorted_car2go[0]
timestamps_sorted_enjoy = db.get_collection('enjoy_PermanentBookings').find().sort(
'init_time', pm.DESCENDING).distinct('init_time')
start_enjoy=timestamps_sorted_enjoy[-1]
end_enjoy=timestamps_sorted_enjoy[0]
ts_car2go = int(start_car2go) # initial time
te_car2go = int(end_car2go) #ending time
ts_enjoy = int(start_enjoy) # initial time
te_enjoy = int(end_enjoy) #ending time
print("UTC time of the first car collected in car2go")
print(datetime.utcfromtimestamp(ts_car2go).strftime('%Y-%m-%d %H:%M:%S'))
print("UTC time of the last car collected in car2go")
print(datetime.utcfromtimestamp(te_car2go).strftime('%Y-%m-%d %H:%M:%S'))
less = db.get_collection('PermanentBookings').find_one({'init_time':start_car2go})
date = less.get('init_date')
city_car2go = less.get('city')
print("Local time of the first car collected for car2go in " + str(city_car2go) + ": ")
print(date)
print("UTC time of the first car collected in enjoy")
print(datetime.utcfromtimestamp(ts_enjoy).strftime('%Y-%m-%d %H:%M:%S'))
print("UTC time of the last car collected in enjoy")
print(datetime.utcfromtimestamp(te_enjoy).strftime('%Y-%m-%d %H:%M:%S'))
less_enjoy = db.get_collection('enjoy_PermanentBookings').find_one({'init_time':start_enjoy})
city_enjoy = less_enjoy.get('city')
date_enjoy = less_enjoy.get('init_date')
print("Local time of the first car collected for enjoy in " + str(city_enjoy) + ": ")
print(date_enjoy)
#%% Step1: How many cars are available in each city?
cities = ['Milano','Calgary','Amsterdam']
start = datetime(2017,11,1,0,0,0) #1 novembre 2017 unixtime: 1509494400 #1 novembre 2017
end = datetime(2017,11,30,23,59,59) #30 novembre 2017 unixtime: 1512086399 #30 novembre 2017
for c in cities:
car=db.PermanentBookings.distinct("plate", {"city": c})
print("Cars in " + c + ": " + str(len(car)))
#%% Step1: How many bookings have been recorded on the November 2017 in each city?
bookings = db.PermanentBookings.count_documents(
{"$and": [
{"city":c},
{"init_date": {"$gte":start} },
{"final_date": {"$lte":end} }
]
}
)
print("Booked cars in november 2017 in " + c + " are: " + str(bookings))
#%% Step1: How many bookings have also the alternative transportation modes recorded in each city?
alternative = db.PermanentBookings.count_documents(
{"$and": [ {"city":c},
{"$or": [ {"walking.distance":{"$ne":-1} },
{"driving.distance":{"$ne":-1} },
{"public_transport.distance":{"$ne":-1} }
]
}
]
}
)
print("Alternative transportation mode for " + c + ": " + str(alternative))
#%% Prova
# h=[]
# for c in cities:
# s = db.get_collection('PermanentBookings').aggregate(
# [
# { "$match" : {"$and": [ { "city": c },
# { "init_date": { "$gte":start } },
# { "final_date": { "$lte":end } },
# ]
# }
# },
# { "$project": {
# "_id": 1,
# "city": 1,
# }
# },
# { "$group": {
# "_id": "$city",
# "count": { "$sum": 1 }
# }
# },
# ])
# h.append(list(s))
|
StarcoderdataPython
|
202719
|
<filename>core/platform/auth/firebase_auth_services_test.py
# coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Firebase Authentication platform services."""
from __future__ import absolute_import
from __future__ import unicode_literals
import contextlib
import datetime
import itertools
import json
import logging
from unittest import mock
from core import feconf
from core import python_utils
from core import utils
from core.constants import constants
from core.domain import auth_domain
from core.domain import user_services
from core.platform import models
from core.platform.auth import firebase_auth_services
from core.tests import test_utils
import firebase_admin
from firebase_admin import auth as firebase_auth
from firebase_admin import exceptions as firebase_exceptions
from typing import ContextManager, Dict, List, Optional, Tuple, Union, cast
import webapp2
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import auth_models
auth_models, user_models = (
models.Registry.import_models([models.NAMES.auth, models.NAMES.user]))
UidsPartitionTupleType = Tuple[
List[Tuple[int, str]],
List[Tuple[int, str]]
]
UidsZipPartitionTupleType = Tuple[
List[Tuple[int, Tuple[str, str]]],
List[Tuple[int, Tuple[str, str]]]
]
RecordsPartitionTupleType = Tuple[
List[Tuple[int, Tuple[firebase_auth.ImportUserRecord, str]]],
List[Tuple[int, Tuple[firebase_auth.ImportUserRecord, str]]]
]
class FirebaseAdminSdkStub:
"""Helper class for swapping the Firebase Admin SDK with a stateful stub.
NOT INTENDED TO BE USED DIRECTLY. Just install it and then interact with the
Firebase Admin SDK as if it were real.
FRAGILE! This class returns users as firebase_admin.auth.UserRecord objects
for API parity, but the Firebase Admin SDK doesn't expose a constructor for
it as part of the public API. To compensate, we depend on implementation
details (isolated to the _set_user_fragile method) that may stop working in
newer versions of the SDK. We're OK with taking that risk, because this is a
test-only class.
Example:
class Test(test_utils.TestBase):
def setUp(self):
super(Test, self).setUp()
self.firebase_sdk_stub = FirebaseAdminSdkStub(self)
self.firebase_sdk_stub.install()
self.firebase_sdk_stub.create_user('foo')
def tearDown(self):
self.firebase_sdk_stub.uninstall()
super(Test, self).tearDown()
def test_sdk(self):
user_record = firebase_admin.get_user('uid')
self.assertEqual(user_record.uid, 'uid')
"""
_IMPLEMENTED_SDK_FUNCTION_NAMES = [
'create_session_cookie',
'create_user',
'delete_user',
'delete_users',
'get_user',
'get_users',
'get_user_by_email',
'import_users',
'list_users',
'revoke_refresh_tokens',
'set_custom_user_claims',
'update_user',
'verify_id_token',
'verify_session_cookie',
]
_UNIMPLEMENTED_SDK_FUNCTION_NAMES = [
'create_custom_token',
'generate_email_verification_link',
'generate_password_reset_link',
'generate_sign_in_with_email_link',
'get_user_by_phone_number',
]
def __init__(self) -> None:
self._users_by_uid: Dict[str, firebase_auth.UserRecord] = {}
self._uid_by_session_cookie: Dict[str, str] = {}
self._swap_stack: Optional[contextlib.ExitStack] = None
self._test: Optional[test_utils.TestBase] = None
def install(self, test: test_utils.TestBase) -> None:
"""Installs the stub on the given test instance. Idempotent.
Args:
test: test_utils.TestBase. The test to install the stub on.
"""
self.uninstall()
self._test = test
with contextlib.ExitStack() as swap_stack:
for name in self._IMPLEMENTED_SDK_FUNCTION_NAMES:
swap_stack.enter_context(
test.swap(firebase_auth, name, getattr(self, name)))
for name in self._UNIMPLEMENTED_SDK_FUNCTION_NAMES:
swap_stack.enter_context(test.swap_to_always_raise(
firebase_auth, name, NotImplementedError))
# Allows us to exit the current context manager without closing the
# entered contexts. They will be exited later by the uninstall()
# method.
self._swap_stack = swap_stack.pop_all()
def uninstall(self) -> None:
"""Uninstalls the stub. Idempotent."""
if self._swap_stack:
self._swap_stack.close()
self._swap_stack = None
def create_session_cookie(
self, id_token: str, unused_max_age: datetime.timedelta
) -> str:
"""Creates a new session cookie which expires after given duration.
Args:
id_token: str. The ID Token to generate the cookie from.
unused_max_age: datetime.timedelta. The duration the cookie remains
valid. Unused by our stub implementation.
Returns:
str. A session cookie that can validate the user.
"""
if not id_token:
raise firebase_auth.InvalidIdTokenError('missing id_token')
# NOTE: Session cookies are fundamentally different, in terms of
# encoding and security, from ID Tokens. Regardless, for the purposes of
# this stub, we use the same values for both.
session_cookie = id_token
# NOTE: `uid` (Firebase account ID) is the 'sub' claim of the ID token.
claims = self._decode_user_claims(id_token)
# Letting mypy know that 'claims' is not None and
# claims['sub'] is of type str.
assert claims and isinstance(claims['sub'], str)
self._uid_by_session_cookie[session_cookie] = claims['sub']
return session_cookie
def create_user(
self, uid: str, email: Optional[str] = None, disabled: bool = False
) -> str:
"""Adds user to storage if new, otherwise raises an error.
Args:
uid: str. The unique Firebase account ID for the user.
email: str|None. The email address for the user, or None.
disabled: bool. Whether the user account is to be disabled.
Returns:
str. An ID token that represents the given user's authorization.
Raises:
ValueError. The uid argument was not provided.
UidAlreadyExistsError. The uid has already been assigned to a user.
"""
if uid in self._users_by_uid:
raise firebase_auth.UidAlreadyExistsError(
'uid=%r already exists' % uid, None, None)
self._set_user_fragile(uid, email, disabled, None)
return self._encode_user_claims(uid)
def delete_user(self, uid: str) -> None:
"""Removes user from storage if found, otherwise raises an error.
Args:
uid: str. The Firebase account ID of the user.
Raises:
UserNotFoundError. The Firebase account has not been created yet.
"""
if uid not in self._users_by_uid:
raise firebase_auth.UserNotFoundError('%s not found' % uid)
del self._users_by_uid[uid]
def delete_users(
self, uids: List[str], force_delete: bool = False
) -> 'firebase_auth.BatchDeleteAccountsResponse':
"""Deletes the users identified by the specified user ids.
Deleting a non-existing user does not generate an error (the method is
idempotent). Non-existing users are considered to be successfully
deleted and are therefore not reported as errors.
A maximum of 1000 identifiers may be supplied. If more than 1000
identifiers are supplied, this method raises a `ValueError`.
Args:
uids: A list of strings indicating the uids of the users to be
deleted. Must have <= 1000 entries.
force_delete: Optional parameter that indicates if users should be
deleted, even if they're not disabled. Defaults to False.
Returns:
BatchDeleteAccountsResponse. Holds the errors encountered, if any.
Raises:
ValueError. If any of the identifiers are invalid or if more than
1000 identifiers are specified.
"""
if len(uids) > 1000:
raise ValueError('`uids` paramter must have <= 1000 entries.')
if force_delete:
uids_to_delete = set(uids)
errors = []
else:
disabled_uids, enabled_uids = cast(
UidsPartitionTupleType,
utils.partition(
uids,
predicate=lambda uid: self._users_by_uid[uid].disabled,
enumerated=True))
uids_to_delete = {uid for _, uid in disabled_uids}
errors = [(i, 'uid=%r must be disabled first' % uid)
for i, uid in enabled_uids]
for uid in uids_to_delete.intersection(self._users_by_uid):
del self._users_by_uid[uid]
return self._create_delete_users_result_fragile(errors)
def get_user(self, uid: str) -> firebase_auth.UserRecord:
"""Returns user with given ID if found, otherwise raises an error.
Args:
uid: str. The Firebase account ID of the user.
Returns:
firebase_auth.UserRecord. The UserRecord object of the user.
Raises:
UserNotFoundError. The Firebase account has not been created yet.
"""
users = self.get_users([firebase_auth.UidIdentifier(uid)]).users
if len(users) == 0:
raise firebase_auth.UserNotFoundError('%s not found' % uid)
return users[0]
def get_users(
self, identifiers: List[firebase_auth.UidIdentifier]
) -> firebase_auth.GetUsersResult:
"""Returns user with given ID if found, otherwise raises an error.
Args:
identifiers: list(firebase_auth.UserIdentifier). The Firebase
account IDs of the user.
Returns:
firebase_auth.GetUsersResult. The UserRecord object of the user.
Raises:
UserNotFoundError. The Firebase account has not been created yet.
"""
found_users = [
self._users_by_uid[identifier.uid] for identifier in identifiers
if identifier.uid in self._users_by_uid
]
not_found_identifiers = [
identifier for identifier in identifiers
if identifier.uid not in self._users_by_uid
]
return firebase_auth.GetUsersResult(found_users, not_found_identifiers)
def get_user_by_email(self, email: str) -> firebase_auth.UserRecord:
"""Returns user with given email if found, otherwise raises an error.
Args:
email: str. The email address of the user.
Returns:
UserRecord. The UserRecord object of the user.
Raises:
UserNotFoundError. The Firebase account has not been created yet.
"""
matches = (u for u in self._users_by_uid.values() if u.email == email)
user = next(matches, None)
if user is None:
raise firebase_auth.UserNotFoundError('%s not found' % email)
return user
def import_users(
self, records: List[firebase_admin.auth.ImportUserRecord]
) -> firebase_admin.auth.UserImportResult:
"""Adds the given user records to the stub's storage.
Args:
records: list(firebase_admin.auth.ImportUserRecord). The users to
add.
Returns:
firebase_admin.auth.UserImportResult. Object with details about the
operation.
"""
for record in records:
self._set_user_fragile(
record.uid, record.email, record.disabled,
json.dumps(record.custom_claims))
return self._create_user_import_result_fragile(len(records), [])
def list_users(
self, page_token: Optional[str] = None, max_results: int = 1000
) -> firebase_admin.auth.ListUsersPage:
"""Retrieves a page of user accounts from a Firebase project.
The `page_token` argument governs the starting point of the page. The
`max_results` argument governs the maximum number of user accounts that
may be included in the returned page. This function never returns None.
If there are no user accounts in the Firebase project, this returns an
empty page.
Args:
page_token: str|None. A non-empty page token string, which indicates
the starting point of the page (optional). Defaults to `None`,
which will retrieve the first page of users.
max_results: int. A positive integer indicating the maximum
number of users to include in the returned page (optional).
Defaults to 1000, which is also the maximum number allowed.
Returns:
ListUsersPage. A ListUsersPage instance.
Raises:
ValueError. If max_results or page_token are invalid.
FirebaseError. If an error occurs while retrieving the user
accounts.
"""
if max_results > 1000:
raise ValueError('max_results=%r must be <= 1000' % max_results)
# NOTE: This is only sorted to make unit testing easier.
all_users = sorted(self._users_by_uid.values(), key=lambda u: u.uid)
page_list = [
[user for user in user_group if user is not None]
for user_group in utils.grouper(all_users, max_results)
]
if not page_list:
return self._create_list_users_page_fragile([], 0)
try:
page_index = int(page_token) if page_token is not None else 0
except (ValueError, TypeError):
raise ValueError('page_token=%r is invalid' % page_token)
if 0 <= page_index < len(page_list):
return self._create_list_users_page_fragile(page_list, page_index)
else:
raise ValueError('page_token=%r is invalid' % page_token)
def revoke_refresh_tokens(self, uid: str) -> None:
"""Revokes all refresh tokens for an existing user.
Args:
uid: str. The uid (Firebase account ID) of the user.
Raises:
UserNotFoundError. The Firebase account has not been created yet.
"""
if uid not in self._users_by_uid:
raise firebase_auth.UserNotFoundError('%s not found' % uid)
self._uid_by_session_cookie = {
k: v for k, v in self._uid_by_session_cookie.items() if v != uid
}
def set_custom_user_claims(
self, uid: str, custom_claims: Optional[str]
) -> str:
"""Updates the custom claims of the given user.
Args:
uid: str. The Firebase account ID of the user.
custom_claims: str|None. A string-encoded JSON with string keys and
values, e.g. '{"role":"admin"}', or None.
Returns:
str. The uid of the user.
Raises:
UserNotFoundError. The Firebase account has not been created yet.
"""
return self.update_user(uid, custom_claims=custom_claims)
def update_user(
self,
uid: str,
email: Optional[str] = None,
disabled: bool = False,
custom_claims: Optional[str] = None
) -> str:
"""Updates the user in storage if found, otherwise raises an error.
Args:
uid: str. The Firebase account ID of the user.
email: str|None. The email address for the user, or None.
disabled: bool. Whether the user account is to be disabled.
custom_claims: str|None. A string-encoded JSON with string keys and
values, e.g. '{"role":"admin"}', or None.
Returns:
str. The uid of the user.
Raises:
UserNotFoundError. The Firebase account has not been created yet.
"""
if uid not in self._users_by_uid:
raise firebase_auth.UserNotFoundError('%s not found' % uid)
self._set_user_fragile(uid, email, disabled, custom_claims)
return uid
def verify_id_token(
self, token: str
) -> Dict[str, Optional[Union[str, bool]]]:
"""Returns claims for the corresponding user if the ID token is valid.
Args:
token: str. The ID token.
Returns:
dict(str: *). Claims for the user corresponding to the ID token.
"""
claims = self._decode_user_claims(token)
assert claims is not None
uid = claims['sub']
if uid not in self._users_by_uid:
raise firebase_auth.UserNotFoundError('%s not found' % uid)
return claims
def verify_session_cookie(
self, session_cookie: str, check_revoked: bool = False
) -> Dict[str, Optional[Union[str, bool]]]:
"""Returns claims for the corresponding user if the cookie is valid.
Args:
session_cookie: str. The session cookie.
check_revoked: bool. When true, checks whether the cookie has been
revoked.
Returns:
dict(str: *). Claims for the user corresponding to the session
cookie.
"""
if check_revoked and session_cookie not in self._uid_by_session_cookie:
raise firebase_auth.RevokedSessionCookieError(
'The provided Firebase session cookie is invalid')
claims = self._decode_user_claims(session_cookie)
assert claims is not None
uid = claims['sub']
if uid not in self._users_by_uid:
raise firebase_auth.UserNotFoundError('%s not found' % uid)
return claims
def assert_is_user(self, uid: str) -> None:
"""Asserts that an account with the given id exists.
NOTE: This method can only be called after the stub has been installed
to a test case!
Args:
uid: str. The ID of the user to confirm.
"""
assert self._test is not None
self._test.assertIn(
uid, self._users_by_uid,
msg='Firebase account not found: uid=%r' % uid)
def assert_is_not_user(self, uid: str) -> None:
"""Asserts that an account with the given id does not exist.
NOTE: This method can only be called after the stub has been installed
to a test case!
Args:
uid: str. The ID of the user to confirm.
"""
assert self._test is not None
self._test.assertNotIn(
uid, self._users_by_uid,
msg='Unexpected Firebase account exists: uid=%r' % uid)
def assert_is_super_admin(self, uid: str) -> None:
"""Asserts that the given ID has super admin privileges.
NOTE: This method can only be called after the stub has been installed
to a test case!
Args:
uid: str. The ID of the user to confirm.
"""
self.assert_is_user(uid)
custom_claims = self.get_user(uid).custom_claims or {}
assert self._test is not None
self._test.assertEqual(
custom_claims.get('role', None), feconf.FIREBASE_ROLE_SUPER_ADMIN)
def assert_is_not_super_admin(self, uid: str) -> None:
"""Asserts that the given ID does not have super admin privileges.
NOTE: This method can only be called after the stub has been installed
to a test case!
Args:
uid: str. The ID of the user to confirm.
"""
self.assert_is_user(uid)
custom_claims = self.get_user(uid).custom_claims or {}
assert self._test is not None
self._test.assertNotEqual(
custom_claims.get('role', None), feconf.FIREBASE_ROLE_SUPER_ADMIN)
def assert_is_disabled(self, uid: str) -> None:
"""Asserts that the given ID is a disabled account.
NOTE: This method can only be called after the stub has been installed
to a test case!
Args:
uid: str. The ID of the user to confirm.
"""
self.assert_is_user(uid)
assert self._test is not None
self._test.assertTrue(self.get_user(uid).disabled)
def assert_is_not_disabled(self, uid: str) -> None:
"""Asserts that the given ID is not a disabled account.
NOTE: This method can only be called after the stub has been installed
to a test case!
Args:
uid: str. The ID of the user to confirm.
"""
self.assert_is_user(uid)
assert self._test is not None
self._test.assertFalse(self.get_user(uid).disabled)
def assert_is_user_multi(self, uids: List[str]) -> None:
"""Asserts that every account with the given ids exist.
NOTE: This method can only be called after the stub has been installed
to a test case!
Args:
uids: list(str). The IDs of the users to confirm.
"""
not_found = [uid for uid in uids if uid not in self._users_by_uid]
assert self._test is not None
self._test.assertEqual(
not_found, [],
msg='Firebase accounts not found: uids=%r' % (not_found,))
def assert_is_not_user_multi(self, uids: List[str]) -> None:
"""Asserts that every account with the given ids do not exist.
NOTE: This method can only be called after the stub has been installed
to a test case!
Args:
uids: list(str). The IDs of the users to confirm.
"""
found = [uid for uid in uids if uid in self._users_by_uid]
assert self._test is not None
self._test.assertEqual(
found, [],
msg='Unexpected Firebase accounts exists: uids=%r' % (found,))
def mock_delete_users_error(
self,
batch_error_pattern: Tuple[Optional[Exception]] = (None,),
individual_error_pattern: Tuple[Optional[bool]] = (None,)
) -> ContextManager[None]:
"""Returns a context in which `delete_users` fails according to the
given patterns.
Example:
with mock_delete_users_error(batch_error_pattern=(None, Exception)):
delete_users(...) # OK.
delete_users(...) # Raises Exception.
delete_users(...) # OK.
delete_users(...) # Raises Exception.
delete_users(...) # OK.
Args:
batch_error_pattern: tuple(Exception|None). Enumerates which
successive calls will raise an exception. For values of None, no
exception is raised. The pattern is cycled. By default, an error
will never be raised.
individual_error_pattern: tuple(bool). Enumerates which individual
users will cause an error. The pattern is cycled. By default, an
error will never be raised.
Returns:
Context manager. The context manager with the mocked implementation.
"""
updated_batch_error_pattern = itertools.cycle(batch_error_pattern)
updated_individual_error_pattern = (
itertools.cycle(individual_error_pattern))
def mock_delete_users(
uids: List[str], force_delete: bool = False
) -> 'firebase_auth.BatchDeleteAccountsResponse':
"""Mock function that fails according to the input patterns."""
error_to_raise = next(updated_batch_error_pattern)
if error_to_raise is not None:
raise error_to_raise
uids_to_delete, uids_to_fail = cast(
UidsZipPartitionTupleType,
utils.partition(
python_utils.ZIP(uids, updated_individual_error_pattern),
predicate=lambda uid_and_error: uid_and_error[1] is None,
enumerated=True))
updated_uids_to_delete = [uid for _, (uid, _) in uids_to_delete]
errors = [(i, error) for i, (_, error) in uids_to_fail]
self.delete_users(updated_uids_to_delete, force_delete=force_delete)
return self._create_delete_users_result_fragile(errors)
assert self._test is not None
return self._test.swap(firebase_auth, 'delete_users', mock_delete_users)
def mock_import_users_error(
self,
batch_error_pattern: Tuple[Optional[Exception]] = (None,),
individual_error_pattern: Tuple[Optional[str]] = (None,)
) -> ContextManager[None]:
"""Returns a context in which `import_users` fails according to the
given patterns.
Example:
with mock_import_users_error(batch_error_pattern=(False, True)):
import_users(...) # OK
import_users(...) # Raises!
import_users(...) # OK
import_users(...) # Raises!
import_users(...) # OK
Args:
batch_error_pattern: tuple(Exception|None). Enumerates which
successive calls will raise an exception. For values of None, no
exception is raised. The pattern is cycled. By default, an error
will never be raised.
individual_error_pattern: tuple(str|None). Enumerates which
individual users will cause an error. Each value is either the
error reason (a string), or None. The pattern is cycled. By
default, an error will never be raised.
Returns:
Context manager. The context manager with the mocked implementation.
"""
updated_batch_error_pattern = itertools.cycle(batch_error_pattern)
updated_individual_error_pattern = (
itertools.cycle(individual_error_pattern))
def mock_import_users(
records: List[firebase_admin.auth.ImportUserRecord]
) -> firebase_auth.UserImportResult:
"""Mock function that fails according to the input patterns."""
error_to_raise = next(updated_batch_error_pattern)
if error_to_raise is not None:
raise error_to_raise
records_to_import, records_to_fail = cast(
RecordsPartitionTupleType,
utils.partition(
python_utils.ZIP(records, updated_individual_error_pattern),
predicate=(
lambda record_and_error: record_and_error[1] is None),
enumerated=True))
self.import_users([record for _, (record, _) in records_to_import])
return self._create_user_import_result_fragile(
len(records), cast(
List[Tuple[int, str]],
[(i, error) for i, (_, error) in records_to_fail]))
assert self._test is not None
return self._test.swap(firebase_auth, 'import_users', mock_import_users)
def _encode_user_claims(self, uid: str) -> str:
"""Returns encoded claims for the given user.
Args:
uid: str. The ID of the target user.
Returns:
str. An encoded representation of the user's claims.
"""
user = self.get_user(uid)
claims = {'sub': user.uid}
if user.email:
claims['email'] = user.email
if user.custom_claims:
claims.update(user.custom_claims)
return json.dumps(claims)
def _decode_user_claims(
self, encoded_claims: str
) -> Optional[Dict[str, Optional[Union[str, bool]]]]:
"""Returns the given decoded claims.
Args:
encoded_claims: str. The encoded claims.
Returns:
dict(str: *)|None. The decoded claims or None.
"""
try:
# Casting the result here because the type stubs for library 'json'
# returns 'Any' from json.loads.
# https://github.com/python/typeshed/blob/30ad9e945f42cca1190cdba58c65bdcfc313480f/stdlib/json/__init__.pyi#L36
return cast(
Dict[str, Optional[Union[str, bool]]],
json.loads(encoded_claims))
except ValueError:
return None
def _set_user_fragile(
self,
uid: str,
email: Optional[str],
disabled: bool,
custom_claims: Optional[str]
) -> None:
"""Sets the given properties for the corresponding user.
FRAGILE! The dict keys used by the UserRecord constructor are an
implementation detail that may break in future versions of the SDK.
Args:
uid: str. The Firebase account ID of the user.
email: str. The email address for the user.
disabled: bool. Whether the user account is to be disabled.
custom_claims: str. A string-encoded JSON with string keys and
values, e.g. '{"role":"admin"}'.
"""
self._users_by_uid[uid] = firebase_auth.UserRecord({
'localId': uid, 'email': email, 'disabled': disabled,
'customAttributes': custom_claims,
})
def _create_list_users_page_fragile(
self,
page_list: List[List[firebase_auth.UserRecord]],
page_index: int
) -> mock.Mock:
"""Creates a new ListUsersPage mock.
FRAGILE! The mock is not from the real SDK, so it's vulnerable to
becoming out-of-sync with the interface of the real ListUsersPage.
Args:
page_list: list(list(UserRecord)). The pages of users.
page_index: int. The starting index of the page.
Returns:
Mock. A mock implementation of ListUsersPage.
"""
page = mock.Mock()
if page_index < len(page_list):
page.users = page_list[page_index]
page.has_next_page = (page_index + 1) < len(page_list)
page.next_page_token = (
'' if not page.has_next_page else
python_utils.UNICODE(page_index + 1))
page.get_next_page = lambda: (
None if not page.has_next_page else
self._create_list_users_page_fragile(page_list, page_index + 1))
page.iterate_all = lambda: (
itertools.chain.from_iterable(page_list[page_index:]))
else:
page.users = []
page.has_next_page = False
page.next_page_token = ''
page.get_next_page = lambda: None
page.iterate_all = lambda: iter([])
return page
def _create_delete_users_result_fragile(
self, errors: List[Tuple[int, str]]
) -> 'firebase_auth.BatchDeleteAccountsResponse':
"""Creates a new BatchDeleteAccountsResponse instance with the given
values.
FRAGILE! The dict keys used by the BatchDeleteAccountsResponse
constructor are an implementation detail that may break in future
versions of the SDK.
Args:
errors: list(tuple(int, str)). A list of (index, error) pairs.
Returns:
firebase_admin.auth.BatchDeleteAccountsResponse. The response.
"""
return firebase_auth.BatchDeleteAccountsResponse(
errors=[{'index': i, 'message': error} for i, error in errors])
def _create_user_import_result_fragile(
self, total: int, errors: List[Tuple[int, str]]
) -> firebase_auth.UserImportResult:
"""Creates a new UserImportResult instance with the given values.
FRAGILE! The dict keys used by the UserImportResult constructor are an
implementation detail that may break in future versions of the SDK.
Args:
total: int. The total number of records initially requested.
errors: list(tuple(int, str)). A list of (index, error) pairs.
Returns:
firebase_admin.auth.UserImportResult. The response.
"""
return firebase_auth.UserImportResult({
'error': [{'index': i, 'message': error} for i, error in errors],
}, total)
class EstablishFirebaseConnectionTests(test_utils.TestBase):
APP = object()
def test_initializes_when_connection_does_not_exist(self) -> None:
get_app_swap = self.swap_with_call_counter(
firebase_admin, 'get_app', raises=ValueError('initialize_app'))
init_app_swap = self.swap_with_call_counter(
firebase_admin, 'initialize_app', returns=self.APP)
with get_app_swap as get_app_counter, init_app_swap as init_app_counter:
firebase_auth_services.establish_firebase_connection()
self.assertEqual(get_app_counter.times_called, 1)
self.assertEqual(init_app_counter.times_called, 1)
def test_returns_existing_connection(self) -> None:
get_app_swap = self.swap_with_call_counter(
firebase_admin, 'get_app', returns=self.APP)
init_app_swap = self.swap_with_call_counter(
firebase_admin, 'initialize_app',
raises=Exception('unexpected call'))
with get_app_swap as get_app_counter, init_app_swap as init_app_counter:
firebase_auth_services.establish_firebase_connection()
self.assertEqual(get_app_counter.times_called, 1)
self.assertEqual(init_app_counter.times_called, 0)
def test_raises_authentic_get_app_error(self) -> None:
get_app_swap = self.swap_with_call_counter(
firebase_admin, 'get_app', raises=ValueError('uh-oh!'))
init_app_swap = self.swap_with_call_counter(
firebase_admin, 'initialize_app',
raises=Exception('unexpected call'))
with get_app_swap as get_app_counter, init_app_swap as init_app_counter:
with self.assertRaisesRegexp(ValueError, 'uh-oh!'): # type: ignore[no-untyped-call]
firebase_auth_services.establish_firebase_connection()
self.assertEqual(get_app_counter.times_called, 1)
self.assertEqual(init_app_counter.times_called, 0)
def test_raises_authentic_initialize_app_error(self) -> None:
get_app_swap = self.swap_with_call_counter(
firebase_admin, 'get_app', raises=ValueError('initialize_app'))
init_app_swap = self.swap_with_call_counter(
firebase_admin, 'initialize_app', raises=ValueError('uh-oh!'))
with get_app_swap as get_app_counter, init_app_swap as init_app_counter:
with self.assertRaisesRegexp(ValueError, 'uh-oh!'): # type: ignore[no-untyped-call]
firebase_auth_services.establish_firebase_connection()
self.assertEqual(get_app_counter.times_called, 1)
self.assertEqual(init_app_counter.times_called, 1)
class FirebaseAuthServicesTestBase(test_utils.AppEngineTestBase):
"""Test base for Firebase-specific tests with helpful default behavior."""
AUTH_ID = 'aid'
EMAIL = '<EMAIL>'
def setUp(self) -> None:
super(FirebaseAuthServicesTestBase, self).setUp()
self.firebase_sdk_stub = FirebaseAdminSdkStub()
self.firebase_sdk_stub.install(self)
def tearDown(self) -> None:
self.firebase_sdk_stub.uninstall()
super(FirebaseAuthServicesTestBase, self).tearDown()
def capture_logging(
self, min_level: int = logging.INFO
) -> 'contextlib._GeneratorContextManager[List[str]]':
"""Context manager that captures logs into a list.
Overridden to set the minimum logging level as INFO.
Args:
min_level: int. The minimum logging level captured by the context
manager. By default, all logging levels are captured. Values
should be one of the following values from the logging module:
NOTSET, DEBUG, INFO, WARNING, ERROR, CRITICAL.
Returns:
Context manager. The context manager for capturing logging messages.
"""
return super(FirebaseAuthServicesTestBase, self).capture_logging(
min_level=min_level)
def create_request(
self,
id_token: Optional[str] = None,
session_cookie: Optional[str] = None
) -> webapp2.Request:
"""Returns a new request with the given auth values.
Args:
id_token: str|None. The ID token to be placed into the Authorization
header.
session_cookie: str|None. The value to be placed into the request's
cookies.
Returns:
webapp2.Request. A new request with the given auth values set.
"""
req = webapp2.Request.blank('/')
if id_token:
req.headers['Authorization'] = 'Bearer %s' % id_token
if session_cookie:
req.cookies[constants.FIREBASE_AUTH_SESSION_COOKIE_NAME] = (
session_cookie)
return req
def create_response(
self, session_cookie: Optional[str] = None
) -> webapp2.Response:
"""Returns a new response with the given session cookie.
Args:
session_cookie: str|None. The value to be placed into the response's
cookies.
Returns:
webapp2.Response. A new response with the given cookie.
"""
res = webapp2.Response()
if session_cookie:
res.set_cookie(
constants.FIREBASE_AUTH_SESSION_COOKIE_NAME,
value=session_cookie)
return res
class SuperAdminPrivilegesTests(FirebaseAuthServicesTestBase):
def test_updates_user_successfully(self) -> None:
auth_models.UserAuthDetailsModel(id='uid', firebase_auth_id='aid').put()
self.firebase_sdk_stub.create_user('aid')
self.firebase_sdk_stub.assert_is_not_super_admin('aid')
firebase_auth_services.grant_super_admin_privileges('uid')
self.firebase_sdk_stub.assert_is_super_admin('aid')
firebase_auth_services.revoke_super_admin_privileges('uid')
self.firebase_sdk_stub.assert_is_not_super_admin('aid')
def test_raises_error_when_user_does_not_exist(self) -> None:
auth_models.UserAuthDetailsModel(id='uid', firebase_auth_id=None).put()
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
ValueError, 'user_id=uid has no Firebase account'):
firebase_auth_services.grant_super_admin_privileges('uid')
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
ValueError, 'user_id=uid has no Firebase account'):
firebase_auth_services.revoke_super_admin_privileges('uid')
def test_grant_super_admin_privileges_revokes_session_cookies(self) -> None:
id_token = self.firebase_sdk_stub.create_user('aid')
firebase_auth_services.associate_auth_id_with_user_id(
auth_domain.AuthIdUserIdPair('aid', 'uid'))
cookie = firebase_auth.create_session_cookie(
id_token, feconf.FIREBASE_SESSION_COOKIE_MAX_AGE)
# Should not raise.
firebase_auth.verify_session_cookie(cookie, check_revoked=True)
firebase_auth_services.grant_super_admin_privileges('uid')
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
firebase_auth.RevokedSessionCookieError, 'invalid'):
firebase_auth.verify_session_cookie(cookie, check_revoked=True)
def test_revoke_super_admin_privileges_revokes_session_cookies(
self
) -> None:
id_token = self.firebase_sdk_stub.create_user('aid')
firebase_auth_services.associate_auth_id_with_user_id(
auth_domain.AuthIdUserIdPair('aid', 'uid'))
cookie = firebase_auth.create_session_cookie(
id_token, feconf.FIREBASE_SESSION_COOKIE_MAX_AGE)
# Should not raise.
firebase_auth.verify_session_cookie(cookie, check_revoked=True)
firebase_auth_services.revoke_super_admin_privileges('uid')
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
firebase_auth.RevokedSessionCookieError, 'invalid'):
firebase_auth.verify_session_cookie(cookie, check_revoked=True)
class EstablishAuthSessionTests(FirebaseAuthServicesTestBase):
def setUp(self) -> None:
super(EstablishAuthSessionTests, self).setUp()
self.id_token = (
self.firebase_sdk_stub.create_user(self.AUTH_ID, email=self.EMAIL))
def test_adds_cookie_to_response_from_id_token_in_request(self) -> None:
req = self.create_request(id_token=self.id_token)
res = self.create_response()
firebase_auth_services.establish_auth_session(req, res)
self.assert_matches_regexps( # type: ignore[no-untyped-call]
res.headers.get_all('Set-Cookie'), ['session=.*;'])
def test_does_nothing_when_request_has_cookie(self) -> None:
cookie = firebase_auth.create_session_cookie(
self.id_token, feconf.FIREBASE_SESSION_COOKIE_MAX_AGE)
req = self.create_request(session_cookie=cookie)
res = self.create_response()
firebase_auth_services.establish_auth_session(req, res)
self.assertEqual(res.headers.get_all('Set-Cookie'), [])
def test_reports_error_when_request_missing_both_cookie_and_id_token(
self
) -> None:
req = self.create_request()
res = self.create_response()
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
firebase_auth.InvalidIdTokenError, 'missing id_token'):
firebase_auth_services.establish_auth_session(req, res)
self.assertEqual(res.headers.get_all('Set-Cookie'), [])
class DestroyAuthSessionTests(FirebaseAuthServicesTestBase):
def test_deletes_cookie_from_response(self) -> None:
res = self.create_response(session_cookie='abc')
self.assert_matches_regexps( # type: ignore[no-untyped-call]
res.headers.get_all('Set-Cookie'),
['session=abc;'])
firebase_auth_services.destroy_auth_session(res)
self.assert_matches_regexps( # type: ignore[no-untyped-call]
res.headers.get_all('Set-Cookie'),
['session=abc;', 'session=; Max-Age=0;'])
class GetAuthClaimsFromRequestTests(FirebaseAuthServicesTestBase):
def test_returns_none_when_cookie_is_missing(self) -> None:
id_token = self.firebase_sdk_stub.create_user(self.AUTH_ID)
self.assertIsNone(firebase_auth_services.get_auth_claims_from_request(
self.create_request()))
self.assertIsNone(firebase_auth_services.get_auth_claims_from_request(
self.create_request(id_token=id_token)))
def test_returns_claims_when_cookie_is_present(self) -> None:
cookie = firebase_auth.create_session_cookie(
self.firebase_sdk_stub.create_user(self.AUTH_ID, email=self.EMAIL),
feconf.FIREBASE_SESSION_COOKIE_MAX_AGE)
self.assertEqual(
firebase_auth_services.get_auth_claims_from_request(
self.create_request(session_cookie=cookie)),
auth_domain.AuthClaims(self.AUTH_ID, self.EMAIL, False)) # type: ignore[no-untyped-call]
def test_feconf_admin_email_address_is_super_admin(self) -> None:
cookie = firebase_auth.create_session_cookie(
self.firebase_sdk_stub.create_user(
self.AUTH_ID, email=feconf.ADMIN_EMAIL_ADDRESS),
feconf.FIREBASE_SESSION_COOKIE_MAX_AGE)
self.assertEqual(
firebase_auth_services.get_auth_claims_from_request(
self.create_request(session_cookie=cookie)),
auth_domain.AuthClaims( # type: ignore[no-untyped-call]
self.AUTH_ID, feconf.ADMIN_EMAIL_ADDRESS, True))
def test_raises_stale_auth_session_error_when_cookie_is_expired(
self
) -> None:
cookie = firebase_auth.create_session_cookie(
self.firebase_sdk_stub.create_user(self.AUTH_ID, email=self.EMAIL),
feconf.FIREBASE_SESSION_COOKIE_MAX_AGE)
always_raise_expired_session_cookie_error = self.swap_to_always_raise(
firebase_auth, 'verify_session_cookie',
error=firebase_auth.ExpiredSessionCookieError('uh-oh', None))
with always_raise_expired_session_cookie_error, self.assertRaisesRegexp( # type: ignore[no-untyped-call]
auth_domain.StaleAuthSessionError, 'expired'
):
firebase_auth_services.get_auth_claims_from_request(
self.create_request(session_cookie=cookie))
def test_raises_stale_auth_session_error_when_cookie_is_revoked(
self
) -> None:
cookie = firebase_auth.create_session_cookie(
self.firebase_sdk_stub.create_user(self.AUTH_ID, email=self.EMAIL),
feconf.FIREBASE_SESSION_COOKIE_MAX_AGE)
always_raise_revoked_session_cookie_error = self.swap_to_always_raise(
firebase_auth, 'verify_session_cookie',
error=firebase_auth.RevokedSessionCookieError('uh-oh'))
with always_raise_revoked_session_cookie_error:
self.assertRaisesRegexp( # type: ignore[no-untyped-call]
auth_domain.StaleAuthSessionError, 'revoked',
lambda: firebase_auth_services.get_auth_claims_from_request(
self.create_request(session_cookie=cookie)))
def test_raises_auth_session_error_when_cookie_is_invalid(self) -> None:
cookie = firebase_auth.create_session_cookie(
self.firebase_sdk_stub.create_user(self.AUTH_ID, email=self.EMAIL),
feconf.FIREBASE_SESSION_COOKIE_MAX_AGE)
always_raise_unknown_error = self.swap_to_always_raise(
firebase_auth, 'verify_session_cookie',
error=firebase_exceptions.UnknownError('uh-oh'))
with always_raise_unknown_error:
self.assertRaisesRegexp( # type: ignore[no-untyped-call]
auth_domain.InvalidAuthSessionError, 'uh-oh',
lambda: firebase_auth_services.get_auth_claims_from_request(
self.create_request(session_cookie=cookie)))
class GenericAssociationTests(FirebaseAuthServicesTestBase):
def test_get_association_that_is_present(self) -> None:
firebase_auth_services.associate_auth_id_with_user_id(
auth_domain.AuthIdUserIdPair('aid', 'uid'))
self.assertEqual(
firebase_auth_services.get_user_id_from_auth_id('aid'), 'uid')
self.assertEqual(
firebase_auth_services.get_auth_id_from_user_id('uid'), 'aid')
def test_get_association_that_is_missing(self) -> None:
self.assertIsNone(
firebase_auth_services.get_user_id_from_auth_id('does_not_exist'))
self.assertIsNone(
firebase_auth_services.get_auth_id_from_user_id('does_not_exist'))
def test_get_multi_associations_with_all_present(self) -> None:
firebase_auth_services.associate_auth_id_with_user_id(
auth_domain.AuthIdUserIdPair('aid1', 'uid1'))
firebase_auth_services.associate_auth_id_with_user_id(
auth_domain.AuthIdUserIdPair('aid2', 'uid2'))
firebase_auth_services.associate_auth_id_with_user_id(
auth_domain.AuthIdUserIdPair('aid3', 'uid3'))
self.assertEqual(
firebase_auth_services.get_multi_user_ids_from_auth_ids(
['aid1', 'aid2', 'aid3']),
['uid1', 'uid2', 'uid3'])
self.assertEqual(
firebase_auth_services.get_multi_auth_ids_from_user_ids(
['uid1', 'uid2', 'uid3']),
['aid1', 'aid2', 'aid3'])
def test_get_multi_associations_with_one_missing(self) -> None:
firebase_auth_services.associate_auth_id_with_user_id(
auth_domain.AuthIdUserIdPair('aid1', 'uid1'))
# The aid2 <-> uid2 association is missing.
firebase_auth_services.associate_auth_id_with_user_id(
auth_domain.AuthIdUserIdPair('aid3', 'uid3'))
self.assertEqual(
firebase_auth_services.get_multi_user_ids_from_auth_ids(
['aid1', 'aid2', 'aid3']),
['uid1', None, 'uid3'])
self.assertEqual(
firebase_auth_services.get_multi_auth_ids_from_user_ids(
['uid1', 'uid2', 'uid3']),
['aid1', None, 'aid3'])
def test_associate_without_collision(self) -> None:
firebase_auth_services.associate_auth_id_with_user_id(
auth_domain.AuthIdUserIdPair('aid', 'uid'))
self.assertEqual(
firebase_auth_services.get_user_id_from_auth_id('aid'), 'uid')
self.assertEqual(
firebase_auth_services.get_auth_id_from_user_id('uid'), 'aid')
def test_associate_with_user_id_collision_raises(self) -> None:
firebase_auth_services.associate_auth_id_with_user_id(
auth_domain.AuthIdUserIdPair('aid', 'uid'))
with self.assertRaisesRegexp(Exception, 'already associated'): # type: ignore[no-untyped-call]
firebase_auth_services.associate_auth_id_with_user_id(
auth_domain.AuthIdUserIdPair('aid', 'uid'))
def test_associate_with_auth_id_collision_raises(self) -> None:
firebase_auth_services.associate_auth_id_with_user_id(
auth_domain.AuthIdUserIdPair('aid', 'uid'))
# Erase the user_id collision, but leave the auth_id collision.
auth_models.UserIdByFirebaseAuthIdModel.delete_by_id('aid')
with self.assertRaisesRegexp(Exception, 'already associated'): # type: ignore[no-untyped-call]
firebase_auth_services.associate_auth_id_with_user_id(
auth_domain.AuthIdUserIdPair('aid', 'uid'))
def test_associate_multi_without_collisions(self) -> None:
firebase_auth_services.associate_multi_auth_ids_with_user_ids(
[auth_domain.AuthIdUserIdPair('aid1', 'uid1'),
auth_domain.AuthIdUserIdPair('aid2', 'uid2'),
auth_domain.AuthIdUserIdPair('aid3', 'uid3')])
self.assertEqual(
[firebase_auth_services.get_user_id_from_auth_id('aid1'),
firebase_auth_services.get_user_id_from_auth_id('aid2'),
firebase_auth_services.get_user_id_from_auth_id('aid3')],
['uid1', 'uid2', 'uid3'])
def test_associate_multi_with_user_id_collision_raises(self) -> None:
firebase_auth_services.associate_auth_id_with_user_id(
auth_domain.AuthIdUserIdPair('aid1', 'uid1'))
with self.assertRaisesRegexp(Exception, 'already associated'): # type: ignore[no-untyped-call]
firebase_auth_services.associate_multi_auth_ids_with_user_ids(
[auth_domain.AuthIdUserIdPair('aid1', 'uid1'),
auth_domain.AuthIdUserIdPair('aid2', 'uid2'),
auth_domain.AuthIdUserIdPair('aid3', 'uid3')])
def test_associate_multi_with_auth_id_collision_raises(self) -> None:
firebase_auth_services.associate_auth_id_with_user_id(
auth_domain.AuthIdUserIdPair('aid1', 'uid1'))
# Erase the user_id collision, but leave the auth_id collision.
auth_models.UserIdByFirebaseAuthIdModel.delete_by_id('aid1')
with self.assertRaisesRegexp(Exception, 'already associated'): # type: ignore[no-untyped-call]
firebase_auth_services.associate_multi_auth_ids_with_user_ids(
[auth_domain.AuthIdUserIdPair('aid1', 'uid1'),
auth_domain.AuthIdUserIdPair('aid2', 'uid2'),
auth_domain.AuthIdUserIdPair('aid3', 'uid3')])
def test_present_association_is_not_considered_to_be_deleted(self) -> None:
self.firebase_sdk_stub.create_user('aid')
firebase_auth_services.associate_auth_id_with_user_id(
auth_domain.AuthIdUserIdPair('aid', 'uid'))
self.assertFalse(
firebase_auth_services
.verify_external_auth_associations_are_deleted('uid'))
def test_missing_association_is_considered_to_be_deleted(self) -> None:
self.assertTrue(
firebase_auth_services
.verify_external_auth_associations_are_deleted('does_not_exist'))
def test_delete_association_when_it_is_present(self) -> None:
self.firebase_sdk_stub.create_user('aid')
firebase_auth_services.associate_auth_id_with_user_id(
auth_domain.AuthIdUserIdPair('aid', 'uid'))
self.assertFalse(
firebase_auth_services
.verify_external_auth_associations_are_deleted('uid'))
firebase_auth_services.delete_external_auth_associations('uid')
self.assertTrue(
firebase_auth_services
.verify_external_auth_associations_are_deleted('uid'))
def test_delete_association_when_it_is_missing_does_not_raise(self) -> None:
# Should not raise.
firebase_auth_services.delete_external_auth_associations(
'does_not_exist')
def test_disable_association_marks_user_for_deletion(self) -> None:
self.firebase_sdk_stub.create_user('aid')
firebase_auth_services.associate_auth_id_with_user_id(
auth_domain.AuthIdUserIdPair('aid', 'uid'))
self.assertEqual(
firebase_auth_services.get_user_id_from_auth_id('aid'), 'uid')
self.firebase_sdk_stub.assert_is_not_disabled('aid')
firebase_auth_services.mark_user_for_deletion('uid')
self.assertIsNone(
firebase_auth_services.get_user_id_from_auth_id('aid'))
self.assertEqual(
firebase_auth_services.get_user_id_from_auth_id(
'aid', include_deleted=True),
'uid')
self.firebase_sdk_stub.assert_is_disabled('aid')
def test_disable_association_warns_when_firebase_fails_to_update_user(
self
) -> None:
self.firebase_sdk_stub.create_user('aid')
firebase_auth_services.associate_auth_id_with_user_id(
auth_domain.AuthIdUserIdPair('aid', 'uid'))
update_user_swap = self.swap_to_always_raise(
firebase_auth, 'update_user',
error=firebase_exceptions.UnknownError('could not update'))
log_capturing_context = self.capture_logging()
self.assertEqual(
firebase_auth_services.get_user_id_from_auth_id('aid'), 'uid')
self.firebase_sdk_stub.assert_is_not_disabled('aid')
with update_user_swap, log_capturing_context as logs:
firebase_auth_services.mark_user_for_deletion('uid')
self.assert_matches_regexps(logs, ['could not update']) # type: ignore[no-untyped-call]
self.assertIsNone(
firebase_auth_services.get_user_id_from_auth_id('aid'))
self.assertEqual(
firebase_auth_services.get_user_id_from_auth_id(
'aid', include_deleted=True),
'uid')
self.firebase_sdk_stub.assert_is_not_disabled('aid')
def test_disable_association_gives_up_when_auth_assocs_do_not_exist(
self
) -> None:
with self.capture_logging() as logs:
firebase_auth_services.mark_user_for_deletion('uid')
self.assert_matches_regexps( # type: ignore[no-untyped-call]
logs, [
r'\[WIPEOUT\] User with user_id=uid has no Firebase account'
])
class FirebaseSpecificAssociationTests(FirebaseAuthServicesTestBase):
USER_ID = 'uid'
AUTH_ID = 'sub'
def setUp(self) -> None:
super(FirebaseSpecificAssociationTests, self).setUp()
self.firebase_sdk_stub.create_user(self.AUTH_ID)
firebase_auth_services.associate_auth_id_with_user_id(
auth_domain.AuthIdUserIdPair(self.AUTH_ID, self.USER_ID))
def test_delete_user_when_firebase_raises_an_error(self) -> None:
delete_swap = self.swap_to_always_raise(
firebase_auth, 'delete_user',
error=firebase_exceptions.InternalError('could not connect'))
with delete_swap, self.capture_logging() as logs:
firebase_auth_services.delete_external_auth_associations(
self.USER_ID)
self.assertFalse(
firebase_auth_services
.verify_external_auth_associations_are_deleted(self.USER_ID))
self.assert_matches_regexps(logs, ['could not connect']) # type: ignore[no-untyped-call]
def test_delete_user_when_firebase_succeeds(self) -> None:
with self.capture_logging() as logs:
firebase_auth_services.delete_external_auth_associations(
self.USER_ID)
self.assertTrue(
firebase_auth_services
.verify_external_auth_associations_are_deleted(self.USER_ID))
self.assertEqual(logs, [])
class DeleteAuthAssociationsTests(FirebaseAuthServicesTestBase):
EMAIL = '<EMAIL>'
USERNAME = 'username'
AUTH_ID = 'authid'
UNKNOWN_ERROR = firebase_exceptions.UnknownError('error')
def setUp(self) -> None:
super(DeleteAuthAssociationsTests, self).setUp()
self.firebase_sdk_stub.create_user(self.AUTH_ID)
user_settings = user_services.create_new_user(self.AUTH_ID, self.EMAIL) # type: ignore[no-untyped-call]
self.user_id = user_settings.user_id
firebase_auth_services.mark_user_for_deletion(self.user_id)
def swap_get_users_to_return_non_empty_users_result(
self
) -> ContextManager[None]:
"""Swaps the get_user function so that it always fails."""
return self.swap_to_always_return(
firebase_auth,
'get_users',
firebase_auth.GetUsersResult(
[firebase_auth.UserRecord({'localId': 'id'})], []
)
)
def swap_get_users_to_raise_error(self) -> ContextManager[None]:
"""Swaps the get_user function so that it always fails."""
return self.swap_to_always_raise(
firebase_auth,
'get_users',
firebase_exceptions.FirebaseError(message='error', code='E111')
)
def swap_delete_user_to_always_fail(self) -> ContextManager[None]:
"""Swaps the delete_user function so that it always fails."""
return self.swap_to_always_raise(
firebase_auth, 'delete_user', error=self.UNKNOWN_ERROR)
def test_delete_external_auth_associations_happy_path(self) -> None:
firebase_auth_services.delete_external_auth_associations(self.user_id)
self.firebase_sdk_stub.assert_is_not_user(self.AUTH_ID)
self.assertTrue(
firebase_auth_services
.verify_external_auth_associations_are_deleted(self.user_id))
def test_delete_external_auth_associations_when_user_not_found(
self
) -> None:
firebase_auth.delete_user(self.AUTH_ID)
with self.capture_logging() as logs:
firebase_auth_services.delete_external_auth_associations(
self.user_id)
self.assert_matches_regexps( # type: ignore[no-untyped-call]
logs, [
r'\[WIPEOUT\] Firebase account already deleted',
])
def test_delete_external_auth_associations_when_delete_user_fails(
self
) -> None:
with self.swap_delete_user_to_always_fail():
firebase_auth_services.delete_external_auth_associations(
self.user_id)
self.firebase_sdk_stub.assert_is_user(self.AUTH_ID)
self.assertFalse(
firebase_auth_services
.verify_external_auth_associations_are_deleted(self.user_id))
def test_delete_external_auth_associations_when_get_users_fails(
self
) -> None:
firebase_auth_services.delete_external_auth_associations(self.user_id)
self.firebase_sdk_stub.assert_is_not_user(self.AUTH_ID)
with self.swap_get_users_to_return_non_empty_users_result():
self.assertFalse(
firebase_auth_services
.verify_external_auth_associations_are_deleted(self.user_id))
self.assertTrue(
firebase_auth_services
.verify_external_auth_associations_are_deleted(self.user_id))
def test_delete_external_auth_associations_when_get_users_raise_error(
self
) -> None:
firebase_auth_services.delete_external_auth_associations(self.user_id)
self.firebase_sdk_stub.assert_is_not_user(self.AUTH_ID)
with self.swap_get_users_to_raise_error():
with self.capture_logging() as logs:
self.assertFalse(
firebase_auth_services
.verify_external_auth_associations_are_deleted(
self.user_id))
self.assertEqual(len(logs), 1)
self.assertEqual(
logs[0].split('\n')[0],
'[WIPEOUT] Firebase Admin SDK failed! Stack trace:'
)
self.assertTrue(
firebase_auth_services
.verify_external_auth_associations_are_deleted(self.user_id))
|
StarcoderdataPython
|
1899155
|
<filename>docs/_static/demos/ml/SecondaryStructureWord2VecEncoder.ipynb.py
# coding: utf-8
# # Secondary Structure Word2Vec Encoder
#
# This demo creates a dataset of sequence segments derived from a non-redundent set. The dataset contains the seuqence segment, the DSSP Q8 and DSSP Q3 code of the center residue in a sequnece segment, and a Word2Vec encoding of the seuqnece segment.
#
# ## Imports
# In[1]:
from pyspark import SparkConf, SparkContext, SQLContext
from mmtfPyspark.ml import ProteinSequenceEncoder
from mmtfPyspark.mappers import StructureToPolymerChains
from mmtfPyspark.filters import ContainsLProteinChain
from mmtfPyspark.datasets import secondaryStructureSegmentExtractor
from mmtfPyspark.webfilters import Pisces
from mmtfPyspark.io import mmtfReader
import time
# ## Configure Spark Context
# In[2]:
conf = SparkConf() .setMaster("local[*]") .setAppName("secondaryStructureWord2VecEncodeDemo")
sc = SparkContext(conf = conf)
# ## Read in, filter and sample Hadoop Sequence Files
# In[3]:
path = "../../resources/mmtf_reduced_sample/"
sequenceIdentity = 20
resolution = 2.0
fraction = 0.95
seed = 123
pdb = mmtfReader .read_sequence_file(path, sc) .flatMap(StructureToPolymerChains()) .filter(Pisces(sequenceIdentity, resolution)) .filter(ContainsLProteinChain()) .sample(False, fraction, seed)
# ## Extract Secondary Structure Segments
# In[4]:
segmentLength = 11
data = secondaryStructureSegmentExtractor.get_dataset(pdb, segmentLength).cache()
# ## Add Word2Vec encoded feature vector
# In[6]:
encoder = ProteinSequenceEncoder(data)
n = 2
windowSize = (segmentLength -1) // 2
vectorSize = 50
# overlapping_ngram_word2vec_encode uses keyword attributes
data = encoder.overlapping_ngram_word2vec_encode(n=n , windowSize=windowSize, vectorSize=vectorSize)
# ## Show dataset schema and few rows of data
# In[7]:
data.printSchema()
data.show(10, False)
# ## Terminate Spark Context
# In[8]:
sc.stop()
|
StarcoderdataPython
|
3211546
|
<gh_stars>0
from os import write
import sympy
n = 13
def prime_test(number, witness):
if witness >= number:
raise ValueError("witness must be smaller than the number")
elif number % 2 == 0:
return False
factor = (number - 1)/2
d = 1
while factor % 2 == 0:
d += 1
factor = factor / 2
factor = int(factor)
result = []
for i in range(d):
result.append(pow(witness, factor * (2 ** d), number))
val = not (1 not in result and (number - 1) not in result)
return val
def determine_liars(pow_of_two):
liars = {}
for index in range(1, pow_of_two + 1):
hi_bound = 2 ** index
lo_bound = hi_bound >> 1
for number in range(lo_bound, hi_bound):
if sympy.isprime(number):
continue
for witness in range(2, number, 2):
test_result = prime_test(number, witness)
if test_result == True:
key = "_" + str(witness)
if key not in liars:
liars.update({key: 1})
else:
liars.update({key: liars[key] + 1})
with open("data/liars-" + str(index) + ".csv", "w") as f:
f.write("witness,lie_count\n")
for liar in liars.keys():
f.write(liar[1:] + "," + str(liars.get(liar)) + "\n")
determine_liars(n)
# print(prime_test(9, 2))
|
StarcoderdataPython
|
100006
|
<reponame>paulveillard/cybersecurity-http-encrypted
#!/usr/bin/python3
import ssl, socket
import sys
import time, datetime
import yaml
import smtplib
exitcode = 0
messages = []
'''
Read yaml file and return dictionary
'''
def parse_yaml(filepath):
with open(filepath) as f:
dataMap = yaml.safe_load(f)
return dataMap
def getCertExpiry(domain):
try:
ctx = ssl.create_default_context()
s = ctx.wrap_socket(socket.socket(), server_hostname=domain)
s.connect((domain, 443))
cert = s.getpeercert()
return int(time.mktime(datetime.datetime.strptime(cert['notAfter'], "%b %d %H:%M:%S %Y %Z").timetuple()))
except:
return 0
def check_domain_group(domains,notify_before):
messages = []
for domain in domains:
expiry_date = getCertExpiry(domain)
current_timestamp = int(time.time())
days_till_expiry = (expiry_date - current_timestamp)//(60*60*24)
if days_till_expiry <= notify_before:
messages.append("Certificate for "+domain+" will expire in "+str(days_till_expiry)+" days")
return messages
def send_mail(mail_meta,message):
sender = mail_meta['sender']
receivers = mail_meta['receivers']
subject = mail_meta['subject']
body = """\
From: %s
To: %s
Subject: %s
%s
""" % (sender,', '.join(receivers),subject,'\n'.join(message))
try:
if 'server' in mail_meta and 'username' in mail_meta and 'password' in mail_meta:
server = mail_meta['server']
username = mail_meta['username']
password = mail_meta['password']
mail_client = smtplib.SMTP_SSL(server)
mail_client.login(username,password)
mail_client.sendmail(sender, receivers, body)
print("Successfully sent email")
except:
print("Error: unable to send email:",sys.exc_info()[0])
def handle_notification(notification_group,message):
if notification_group['type'] == 'mail':
send_mail(notification_group['mail_meta'],message)
def main(argv):
yaml_dic = parse_yaml(argv[0])
for key, domain_group in yaml_dic['domain_groups'].items():
messages =check_domain_group(domain_group['domains'],domain_group['notify_before'])
if len(messages) and 'notification_groups' in domain_group:
for notification_group in domain_group['notification_groups']:
handle_notification(yaml_dic['notification_groups'][notification_group],messages)
if __name__ == "__main__":
main(sys.argv[1:])
|
StarcoderdataPython
|
6529311
|
<filename>api/calendars/tests.py
import json
from django.contrib.auth import authenticate
from django.contrib.auth import get_user_model
from rest_framework.test import APITestCase
from rest_framework_jwt.settings import api_settings
from rest_framework.authtoken.models import Token
from companies.models import Company, Contact
from .models import Schedule
from users.models import User
class CalendarsListViewTest(APITestCase):
calendarslist_url = "/calendars"
def setUp(self):
self.company = Company.objects.create(
id = 1,
name = "testcompany",
contact = "0211112222",
email = "<EMAIL>",
address = "testaddress",
priority = "m"
)
self.contact = Contact.objects.create(
id = 1,
company_id = 1,
name = "contactname",
department = "sale",
job_title = "manager",
email = "<EMAIL>",
phone_number = "010-1111-2222"
)
self.user = User.objects.create_user(
employee_number = "testuser",
phone_number = "000000000000",
password = "<PASSWORD>",
name = "testuser",
department = 4,
job_title = 5
)
self.schedule = Schedule.objects.create(
id = 1,
title = "testschedule",
content = "testcontent",
schedule_date = "2021-01-01 06:23:53",
company_id = 1,
contact_id = 1,
)
self.schedule.user.add(self.user)
JWT_PAYLOAD_HANDLER = api_settings.JWT_PAYLOAD_HANDLER
JWT_ENCODE_HANDLER = api_settings.JWT_ENCODE_HANDLER
payload = JWT_PAYLOAD_HANDLER(self.user)
self.access_token = JWT_ENCODE_HANDLER(payload)
def tearDown(self):
self.company.delete()
self.contact.delete()
self.user.delete()
self.schedule.delete()
def test_calendarslistview_get_success(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Bearer ' + self.access_token)
response = self.client.get(self.calendarslist_url)
self.assertEqual(200, response.status_code)
def test_calendarslistview_get_date_success(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Bearer ' + self.access_token)
response = self.client.get(self.calendarslist_url, {"year" : 2021, "month" : 11})
self.assertEqual(200, response.status_code)
class ScheduleViewTest(APITestCase):
def setUp(self):
self.company = Company.objects.create(
id = 1,
name = "testcompany",
contact = "0211112222",
email = "<EMAIL>",
address = "testaddress",
priority = "m"
)
self.contact = Contact.objects.create(
id = 1,
company_id = 1,
name = "contactname",
department = "sale",
job_title = "manager",
email = "<EMAIL>",
phone_number = "010-1111-2222"
)
self.user = User.objects.create_user(
employee_number = "testuser",
phone_number = "000000000000",
password = "<PASSWORD>",
name = "testuser",
department = 4,
job_title = 5
)
self.schedule = Schedule.objects.create(
id = 1,
title = "testschedule",
content = "testcontent",
schedule_date = "2021-01-01 06:23:53",
company_id = 1,
contact_id = 1,
)
self.schedule.user.add(self.user)
self.schedule_url = "/calendars/" + str(self.schedule.id)
JWT_PAYLOAD_HANDLER = api_settings.JWT_PAYLOAD_HANDLER
JWT_ENCODE_HANDLER = api_settings.JWT_ENCODE_HANDLER
payload = JWT_PAYLOAD_HANDLER(self.user)
self.access_token = JWT_ENCODE_HANDLER(payload)
def tearDown(self):
self.company.delete()
self.contact.delete()
self.user.delete()
self.schedule.delete()
def test_scheduledetail_get_success(self):
self.client.credentials(HTTP_AUTHORIZATION = "Bearer " + self.access_token)
response = self.client.get(self.schedule_url)
self.assertEqual(200, response.status_code)
def test_scheduledetail_delete(self):
self.client.credentials(HTTP_AUTHORIZATION = "Bearer " + self.access_token)
response = self.client.delete(self.schedule_url)
self.assertEqual(204, response.status_code)
def test_scheduledetail_title_update(self):
self.client.credentials(HTTP_AUTHORIZATION = "Bearer " + self.access_token)
response = self.client.patch(self.schedule_url, {"title": "meeting!"})
self.assertEqual(200, response.status_code)
def test_scheduledetail_content_update(self):
self.client.credentials(HTTP_AUTHORIZATION = "Bearer " + self.access_token)
response = self.client.patch(self.schedule_url, {"content": "update content!"})
self.assertEqual(200, response.status_code)
class SchduleCreateViewTest(APITestCase):
schedule_url = "/calendars/schedule"
def setUp(self):
self.company = Company.objects.create(
id = 1,
name = "testcompany",
contact = "0211112222",
email = "<EMAIL>",
address = "testaddress",
priority = "m"
)
self.contact = Contact.objects.create(
id = 1,
company_id = 1,
name = "contactname",
department = "sale",
job_title = "manager",
email = "<EMAIL>",
phone_number = "010-1111-2<PASSWORD>"
)
self.user = User.objects.create_user(
employee_number = "testuser",
phone_number = "000000000000",
password = "<PASSWORD>",
name = "testuser",
department = 4,
job_title = 5
)
JWT_PAYLOAD_HANDLER = api_settings.JWT_PAYLOAD_HANDLER
JWT_ENCODE_HANDLER = api_settings.JWT_ENCODE_HANDLER
payload = JWT_PAYLOAD_HANDLER(self.user)
self.access_token = JWT_ENCODE_HANDLER(payload)
def tearDown(self):
self.company.delete()
self.contact.delete()
self.user.delete()
def test_schedule_post(self):
self.client.credentials(HTTP_AUTHORIZATION = "Bearer " + self.access_token)
response = self.client.post(self.schedule_url,{
"company": "(주)Adra",
"schedule_date": "2032-12-16 06:23:53",
"title": "계약진행",
"content": "계약 진행 여부 검토하여 회신완료",
"employee": [{"user": 1},{"user": 2}]
})
self.assertEqual(201, response.status_code)
|
StarcoderdataPython
|
350168
|
<gh_stars>0
# Given 2 ints, a and b, return their sum. However, sums in the range 10..19 inclusive, are forbidden, so in
# that case just return 20.
#
# sorta_sum(3, 4) → 7
# sorta_sum(9, 4) → 20
# sorta_sum(10, 11) → 21
def sorta_sum(a, b):
if (a + b >= 10 and a + b <= 19):
return 20
return a + b
|
StarcoderdataPython
|
269804
|
from onnx_tf.handlers.frontend_handler import FrontendHandler
from onnx_tf.handlers.handler import onnx_op
from onnx_tf.handlers.handler import tf_op
@onnx_op("Relu")
@tf_op("Relu")
class Relu(FrontendHandler):
@classmethod
def version_1(cls, node, **kwargs):
return cls.make_node_from_tf_node(node)
@classmethod
def version_6(cls, node, **kwargs):
return cls.make_node_from_tf_node(node)
|
StarcoderdataPython
|
1633159
|
<filename>spanning/__init__.py
"""Python Span Library
Written by Gorea (https://github.com/Gorea235).
"""
__author__ = "Gorea (https://github.com/Gorea235)"
__all__ = ["Span", "ReadOnlySpan"]
import math
class __SpanIter__:
def __init__(self, span):
self.__span = span
self.__i = 0
def __iter__(self):
return self
def __next__(self):
if self.__i < len(self.__span):
item = self.__span[self.__i]
self.__i += 1
return item
else:
raise StopIteration()
next = __next__ # Python 2 support
class ReadOnlySpan:
"""Provides access over a list via reference rather than re-allocation.
This object will act like a standard list that has been spliced (meaning functions
like 'len' will work correctly), however it will not cause any re-allocations beyond
the Span object itself (the list doesn't actually change, just the referencing of
it does).
Read-only version. This means that this object does not provide an index setter, making
it safe for situations where modification should not be, or it not, allowed (e.g. strings).
Params:
over: the list-like object to span over (can be another Span object).
start: the starting point of the span (if None, defaults to 0) (inclusive).
end: the ending point of the span (if None, defaults to the end of over) (exclusive).
step: the step to span over the items with (if None, defaults to 1).
Used to create spans over list-like objects that index in different ways
(still no reallocation, just index adjustment). Negative steps can be used to
create spans that go in reverse.
"""
def __init__(self, over, start=None, end=None, step=None):
offset = 0 # the offset of the start
prev_end = None
self._over = over
if isinstance(over, ReadOnlySpan):
# if we are spanning of a Span, get the reference directly and just
# adjust the start & end points
self._over = over._over
offset = over._indices[0]
prev_end = over._indices[1]
over_ln = len(self._over)
start = (0 if start is None else start) + offset
if prev_end is not None and end is None:
# if we are re-spanning a span without changing the end,
# just use that
end = prev_end
else:
end = over_ln if end is None else end + offset
step = 1 if step is None else step
# use slice to simplify start/end point calculation
current_slice = slice(start, end, step)
if step < 0:
over_ln += 1 # fix to get upper bound index to match
self._indices = current_slice.indices(
over_ln) # stores the start & end points
def _key_to_actual(self, key):
lself = len(self)
# forces the key to be within the span limits
if key >= lself:
raise IndexError("span index out of range")
if key < 0: # apply wrap-around with negative indexes
key %= lself
if self._indices[2] < 0: # if reverse span, get reverse index
key = (lself - 1) - key
key *= abs(self._indices[2]) # scale key last to get actual index
return self._indices[0] + key
def __getitem__(self, key):
return self._over[self._key_to_actual(key)]
def __cmp__(self, other, op=None):
# 1 : greater than
# 0 : equal
# -1: less than
try:
lself = len(self)
lother = len(other)
# compare items
for i in range(min(lself, lother)):
if self[i] < other[i]:
return -1
elif self[i] > other[i]:
return 1
# compare length if all items are equal to the end of the
# smallest list-like object
if lself < lother:
return -1
elif lself > lother:
return 1
# equal items and size
return 0
except TypeError:
raise TypeError("{} is not supported against '{}' and '{}'".format(
"comparison" if op is None else ("'" + op + "'"),
self.__class__.__name__, other.__class__.__name__))
def __eq__(self, other):
try:
return self.__cmp__(other, "==") == 0
except TypeError:
return False
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self.__cmp__(other, "<") == -1
def __le__(self, other):
c = self.__cmp__(other, "<=")
if c == 0 or c == -1:
return True
return False
def __gt__(self, other):
return self.__cmp__(other, "<") == 1
def __ge__(self, other):
c = self.__cmp__(other, "<=")
if c == 0 or c == 1:
return True
return False
def __len__(self):
ln = int(math.ceil(
(self._indices[1] - self._indices[0]) / float(abs(self._indices[2]))))
if ln < 0:
return 0
return ln
def __iter__(self):
return __SpanIter__(self)
def __reversed__(self):
# returns the iterator of a reversed version of the current span
# object (this means that the iterator is actuall going over a
# different object, however this should be a non-issue since the
# same list-like object is still being referenced)
return iter(Span(self, step=-self._indices[2]))
def __contains__(self, item):
# since we do not know what the underlying data structure is
# we need to search through the whole list
for i in self:
if i == item:
return True
return False
def __repr__(self):
return self.__class__.__name__ + "(" + str(self) + ")"
def __str__(self):
# written as to not reallocate any part of the list
sb = "["
ln = len(self)
for i in range(ln):
sb += str(self[i])
if i < ln - 1:
sb += ", "
return sb + "]"
class Span(ReadOnlySpan):
"""Provides access over a list via reference rather than re-allocation.
This object will act like a standard list that has been spliced (meaning functions
like 'len' will work correctly), however it will not cause any re-allocations beyond
the Span object itself (the list doesn't actually change, just the referencing of
it does).
This is a the same as ReadOnlySpan, but also provides an index setter, allowing
modification of the spanned object.
Do take into account that this is still referencing the stored spanned object, so
any changes applied to this object will be applied to the underlying object (and
thus will effect all other code referencing the same object).
Params:
over: the list-like object to span over (can be another Span object).
start: the starting point of the span (if None, defaults to 0) (inclusive).
end: the ending point of the span (if None, defaults to the end of over) (exclusive).
step: the step to span over the items with (if None, defaults to 1).
Used to create spans over list-like objects that index in different ways
(still no reallocation, just index adjustment). Negative steps can be used to
create spans that go in reverse.
"""
def __setitem__(self, key, value):
self._over[self._key_to_actual(key)] = value
|
StarcoderdataPython
|
11284336
|
<reponame>jakzy/Simple-Automatas
from distutils.core import setup
setup(name="statemap",
version="0.03",
description="SM runtime",
author="<NAME>",
author_email="<EMAIL>",
url="http://smc.sourceforge.net",
license="MPL 1.1",
py_modules=['statemap'],
)
|
StarcoderdataPython
|
3575562
|
# Generated by Django 2.2 on 2019-04-26 09:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mysite', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BaseInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.CharField(max_length=255)),
('telephone', models.CharField(max_length=255)),
('email', models.EmailField(max_length=254)),
('linkedin', models.URLField()),
('github', models.URLField()),
('website', models.URLField()),
],
options={
'verbose_name': 'BaseInfo',
},
),
migrations.AlterModelOptions(
name='achievements',
options={'verbose_name': 'Achievements'},
),
migrations.AlterModelOptions(
name='edu',
options={'verbose_name': 'Educations'},
),
migrations.AlterModelOptions(
name='jobs',
options={'verbose_name': 'Jobs'},
),
migrations.AlterModelOptions(
name='lang',
options={'verbose_name': 'Languages'},
),
migrations.AlterModelOptions(
name='showblocks',
options={'verbose_name': 'Blocks'},
),
migrations.AlterModelOptions(
name='skills',
options={'verbose_name': 'Skills'},
),
migrations.AlterField(
model_name='achievements',
name='title',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='edu',
name='description',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='edu',
name='duration',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='edu',
name='title',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='jobs',
name='duration',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='jobs',
name='title',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='lang',
name='quality',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='lang',
name='title',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='showblocks',
name='title',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='skills',
name='title',
field=models.CharField(max_length=255),
),
]
|
StarcoderdataPython
|
8073279
|
<reponame>naradta/MedrecSampleTestOnCluster
from java.io import FileInputStream
import java.lang
import os
import string
import sys
import java.sql.SQLException
from oracle.jdbc.pool import OracleDataSource
propdir=sys.argv[6]
propfile= propdir+"/resource_config.properties"
propInputStream = FileInputStream(propfile)
configProps = Properties()
configProps.load(propInputStream)
#Read Properties
##############################
### - Connecting details - read from system arguments
username=sys.argv[3]
password=sys.argv[4]
adminHost = sys.argv[1]
adminPort = sys.argv[2]
serverUrl="t3://"+adminHost+":"+adminPort
clusterName=sys.argv[5]
#migratableTargetName = configProps.get("migratabletarget.name")
## changes this value dynamically
migratableTargetName= 'MS-2'+' (migratable)'
#machineName = configProps.get("machine.name")
### - JMSServer details
jmsServerName = configProps.get("jms.server.name")
jdbcstoreName = configProps.get("jdbcstore.name")
storeName = configProps.get("store.name")
#storePath = configProps.get("store.path")
### - SystemModule Details
systemModuleName = configProps.get("system.module.name")
### - ConnectionFactory Details
connectionFactoryName = configProps.get("connection.factory.name")
ConnectionFactoryJNDIName = configProps.get("connection.factory.jndi.name")
### - SubDeployment, Queue
SubDeploymentName = configProps.get("sub.deployment.name")
queueName = configProps.get("queue.name")
queueJNDIName = configProps.get("queue.jndi.name")
domainname = sys.argv[7]
storePath = sys.argv[8]
### -Admin User details for medrec
medrec_admin_userid=configProps.get("medrec.admin.username")
medrec_admin_password=configProps.get("medrec.admin.password")
medrec_admin_userdesc=configProps.get("medrec.admin.userdesc")
#Connection to the Server
print 'connection to Weblogic Admin Server'
connect(username,password,serverUrl)
runningServer = ''
candidateServerList = []
################New changes###################
# Stop the cluster
def shutdownCluster(clusterName):
try:
shutdown(clusterName,'Cluster')
except Exception, e:
print 'Error while shutting down cluster' ,e
dumpStack()
# Start the cluster
def startCluster(clusterName):
try:
start(clusterName,'Cluster',block='true')
except Exception, e:
print 'Error while starting cluster' ,e
dumpStack()
def startClusterServers(clusterName):
try:
serverConfig()
clusters = cmo.getClusters()
if clusters !=None:
for cluster in clusters:
if cluster.getName() == clusterName:
servers = cluster.getServers()
if servers !=None:
print('Number of servers in '+clusterName+' cluster is '+str(len(servers)))
for server in servers:
statevalMap = state(server.getName(),'Server',returnMap="true")
#print('stateval of server'+server.getName() +'is'+str(statevalMap))
stateval = str(statevalMap.get(server.getName()))
if stateval != 'RUNNING':
start(server.getName(),'Server',block='true')
except Exception, e:
print 'Error while starting servers ',e
dumpStack()
def setRequriedParamsOfCluster(clusterName):
global runningServer
global migratableTargetName
global candidateServerList
try:
serverConfig()
clusters = cmo.getClusters()
if clusters !=None:
for cluster in clusters:
if cluster.getName() == clusterName:
servers = cluster.getServers()
if servers !=None:
print('Number of servers in '+clusterName+' cluster are: '+str(len(servers)))
print('Checking Cluster '+clusterName+' Managed Servers State')
for server in servers:
statevalMap = state(server.getName(),'Server',returnMap="true")
#print('stateval of server'+server.getName() +'is'+str(statevalMap))
stateval = str(statevalMap.get(server.getName()))
if stateval == 'RUNNING':
runningServer = server.getName()
migratableTargetName= runningServer+' (migratable)'
break;
for server in servers:
statevalMap = state(server.getName(),'Server',returnMap="true")
print('stateval of server'+server.getName() +'is'+str(statevalMap))
#stateval = str(statevalMap.get(server.getName()))
if stateval == 'RUNNING' and runningServer != server.getName() :
migratableTargetName = server.getName()+' (migratable)'
break;
for server in servers:
statevalMap = state(server.getName(),'Server',returnMap="true")
#print('stateval of server'+server.getName() +'is'+str(statevalMap))
stateval = str(statevalMap.get(server.getName()))
if stateval == 'RUNNING':
candidateServerList.append(ObjectName('com.bea:Name='+server.getName()+',Type=Server'))
except Exception, e:
print 'Error while checking server states ',e
dumpStack()
startClusterServers(clusterName)
setRequriedParamsOfCluster(clusterName)
domainConfig()
edit()
startEdit()
print 'Setting Consensus Leasing Migration Basis for Cluster '+clusterName
cd('/Clusters/'+clusterName)
cmo.setMigrationBasis('consensus')
save()
activate()
print '###### Completed configuration of Consensus Leasing Migration Basis for Cluster ##############'
print 'Restarting Cluster to ensure changes are reflected....'
shutdown(clusterName,'Cluster')
start(clusterName,'Cluster')
print 'Cluster Restarted succesfully.'
print 'RunningServer name is '+runningServer
print 'MigratableTargetName name is '+migratableTargetName
cd('/')
startEdit()
ref = getMBean('/MigratableTargets/' + migratableTargetName)
if(ref != None):
print '########## Migratable Target already exists with name '+ migratableTargetName
else:
cmo.createMigratableTarget(migratableTargetName)
cd('/MigratableTargets/'+migratableTargetName)
cmo.setCluster(getMBean('/Clusters/'+clusterName))
cmo.setUserPreferredServer(getMBean('/Servers/'+runningServer))
cmo.setMigrationPolicy('exactly-once')
set('ConstrainedCandidateServers',jarray.array(candidateServerList, ObjectName))
cmo.setNumberOfRestartAttempts(6)
cmo.setNonLocalPostAllowed(false)
cmo.setRestartOnFailure(false)
cmo.setPostScriptFailureFatal(true)
cmo.setSecondsBetweenRestarts(30)
save()
activate()
print '###### Completed configuration of Migratable targets##############'
##############################################
#Creating Authentication user
###########################
serverConfig()
print 'Creating Admin User....'
cd('/SecurityConfiguration/'+domainname+'/Realms/myrealm/AuthenticationProviders/DefaultAuthenticator')
if (cmo.userExists(medrec_admin_userid)):
print '########## User already exists with username '+medrec_admin_userid
else:
cmo.createUser(medrec_admin_userid,medrec_admin_password,medrec_admin_userdesc)
domainConfig()
#creating FileStore
############################
print 'Creating JMS FileStore....'
#domainConfig()
edit()
startEdit()
cd('/')
ref = getMBean('/FileStores/' + storeName)
if(ref != None):
print '########## File Store already exists with name '+ storeName
else:
cmo.createFileStore(storeName)
print '===> Created FileStore - ' + storeName
Thread.sleep(10)
cd('/FileStores/'+storeName)
cmo.setDirectory(storePath)
print 'Running Server '+runningServer
#set('Targets',jarray.array([ObjectName('com.bea:Name='+runningServer+' (migratable),Type=MigratableTarget')], ObjectName))
set('Targets',jarray.array([ObjectName('com.bea:Name='+migratableTargetName+',Type=MigratableTarget')], ObjectName))
save()
activate()
#Creating JMS Server
############################
print 'Creating JMS Server....'
startEdit()
cd('/')
ref = getMBean('/JMSServers/' + jmsServerName)
if(ref != None):
print '########## JMS Server already exists with name '+ jmsServerName
else:
cmo.createJMSServer(jmsServerName)
print '===> Created JMS Server - ' + jmsServerName
Thread.sleep(10)
cd('/JMSServers/'+jmsServerName)
#cmo.setPersistentStore(getMBean('/JDBCStores/'+jdbcstoreName))
cmo.setPersistentStore(getMBean('/FileStores/'+storeName))
set('Targets',jarray.array([ObjectName('com.bea:Name='+migratableTargetName+',Type=MigratableTarget')], ObjectName))
save()
activate()
#Creating JMS Module
#########################
print 'Creating JMS Module....in cluster: '+clusterName
startEdit()
cd('/')
ref = getMBean('/JMSSystemResources/' + systemModuleName)
if(ref != None):
print '########## JMS System Module Already exists with name '+ systemModuleName
else:
cmo.createJMSSystemResource(systemModuleName)
print '===> Created JMS System Module - ' + systemModuleName
cd('/JMSSystemResources/'+systemModuleName)
set('Targets',jarray.array([ObjectName('com.bea:Name='+clusterName+',Type=Cluster')], ObjectName))
save()
activate()
#Creating JMS SubDeployment
############################
print 'Creating JMS SubDeployment....'
startEdit()
ref = getMBean('/JMSSystemResources/'+systemModuleName+'/SubDeployments/'+SubDeploymentName)
if(ref != None):
print '########## JMS SubDeployment Already exists with name '+ SubDeploymentName + 'in module '+systemModuleName
else:
cmo.createSubDeployment(SubDeploymentName)
print '===> Created JMS SubDeployment - ' + systemModuleName
cd('/JMSSystemResources/'+systemModuleName+'/SubDeployments/'+SubDeploymentName)
set('Targets',jarray.array([ObjectName('com.bea:Name='+jmsServerName+',Type=JMSServer')], ObjectName))
save()
activate()
#Creating JMS Connection Factory
###############################
print 'Creating JMS Connection Factory....'
startEdit()
ref = getMBean('/JMSSystemResources/'+systemModuleName+'/JMSResource/'+systemModuleName+'/ConnectionFactories/'+connectionFactoryName)
if(ref != None):
print '########## JMS Connection Factory Already exists with name '+ connectionFactoryName + 'in module '+systemModuleName
else:
cd('/JMSSystemResources/'+systemModuleName+'/JMSResource/'+systemModuleName)
cmo.createConnectionFactory(connectionFactoryName)
print '===> Created Connection Factory - ' + connectionFactoryName
cd('/JMSSystemResources/'+systemModuleName+'/JMSResource/'+systemModuleName+'/ConnectionFactories/'+connectionFactoryName)
cmo.setJNDIName(ConnectionFactoryJNDIName)
cd('/JMSSystemResources/'+systemModuleName+'/JMSResource/'+systemModuleName+'/ConnectionFactories/'+connectionFactoryName+'/SecurityParams/'+connectionFactoryName)
cmo.setAttachJMSXUserId(false)
cd('/JMSSystemResources/'+systemModuleName+'/JMSResource/'+systemModuleName+'/ConnectionFactories/'+connectionFactoryName+'/ClientParams/'+connectionFactoryName)
cmo.setClientIdPolicy('Restricted')
cmo.setSubscriptionSharingPolicy('Exclusive')
cmo.setMessagesMaximum(10)
cd('/JMSSystemResources/'+systemModuleName+'/JMSResource/'+systemModuleName+'/ConnectionFactories/'+connectionFactoryName+'/TransactionParams/'+connectionFactoryName)
cmo.setXAConnectionFactoryEnabled(true)
cd('/JMSSystemResources/'+systemModuleName+'/SubDeployments/'+SubDeploymentName)
set('Targets',jarray.array([ObjectName('com.bea:Name='+jmsServerName+',Type=JMSServer')], ObjectName))
cd('/JMSSystemResources/'+systemModuleName+'/JMSResource/'+systemModuleName+'/ConnectionFactories/'+connectionFactoryName)
cmo.setSubDeploymentName(''+SubDeploymentName)
save()
activate()
#Creating JMS Queue
##################################
print 'Creating JMS Queue....'
startEdit()
ref = getMBean('/JMSSystemResources/'+systemModuleName+'/JMSResource/'+systemModuleName+'/Queues/'+queueName)
if(ref != None):
print '########## JMS Queue Already exists with name '+ queueName + 'in module '+systemModuleName
else:
cd('/JMSSystemResources/'+systemModuleName+'/JMSResource/'+systemModuleName)
cmo.createQueue(queueName)
print '===> Created Queue - ' + queueName
cd('/JMSSystemResources/'+systemModuleName+'/JMSResource/'+systemModuleName+'/Queues/'+queueName)
cmo.setJNDIName(queueJNDIName)
cd('/JMSSystemResources/'+systemModuleName+'/SubDeployments/'+SubDeploymentName)
set('Targets',jarray.array([ObjectName('com.bea:Name='+jmsServerName+',Type=JMSServer')], ObjectName))
cd('/JMSSystemResources/'+systemModuleName+'/JMSResource/'+systemModuleName+'/Queues/'+queueName)
cmo.setSubDeploymentName(''+SubDeploymentName)
save()
activate()
print '###### Completed configuration of all required JMS Objects ##############'
#Creating Mail Session
##################################
print 'Creating Mail Session....'
#8 -Mail Session Details
ms_name=configProps.get("mailsession.name")
ms_username=configProps.get("mailsession.username")
ms_password=configProps.get("mailsession.password")
ms_mail_user=configProps.get("mailsession.mail.user")
ms_mail_host=configProps.get("mailsession.mail.host")
ms_jndiname=configProps.get("mailsession.jndiname")
startEdit()
ref = getMBean('/MailSessions/mail/'+ms_name)
if(ref != None):
print '########## MailSession Already exists with name '+ms_name
else:
cd('/')
cmo.createMailSession('mail/'+ms_name)
cd('/MailSessions/mail/'+ms_name)
cmo.setSessionUsername(ms_username)
cmo.setSessionPassword(<PASSWORD>)
#setEncrypted('SessionPassword', '<PASSWORD>', '/scratch/app/mw/wls_domains/wls_12_2_1_3_d2/Script1580205057775Config', '/scratch/app/mw/wls_domains/wls_12_2_1_3_d2/Script1580205057775Secret')
prop = Properties()
prop.setProperty('mail.user', ms_mail_user)
prop.setProperty('mail.host', ms_mail_host)
cmo.setProperties(prop)
cmo.setJNDIName(ms_jndiname)
set('Targets',jarray.array([ObjectName('com.bea:Name='+clusterName+',Type=Cluster')], ObjectName))
save()
activate()
#Creating WLDF Resources
##################################
print 'Creating WLDF....'
startEdit()
ref = getMBean('/WLDFSystemResources/MedRecWLDF')
if(ref != None):
print '########## WLDF Already exists with name MedRecWLDF'
else:
cd('/WLDFSystemResources')
cmo.createWLDFSystemResource('MedRecWLDF')
cd('/WLDFSystemResources/MedRecWLDF')
cmo.setDescription('')
set('Targets',jarray.array([ObjectName('com.bea:Name='+clusterName+',Type=Cluster')], ObjectName))
cd('/WLDFSystemResources/MedRecWLDF/WLDFResource/MedRecWLDF/Harvester/MedRecWLDF')
cmo.createHarvestedType('com.oracle.medrec.admin.AdminReport')
cd('/WLDFSystemResources/MedRecWLDF/WLDFResource/MedRecWLDF/Harvester/MedRecWLDF/HarvestedTypes/com.oracle.medrec.admin.AdminReport')
set('HarvestedAttributes',jarray.array([String('NewUserCount')], String))
cmo.setHarvestedInstances(None)
cmo.setNamespace('ServerRuntime')
cd('/WLDFSystemResources/MedRecWLDF/WLDFResource/MedRecWLDF/Instrumentation/MedRecWLDF')
cmo.setEnabled(true)
cmo.createWLDFInstrumentationMonitor('DyeInjection')
cd('/WLDFSystemResources/MedRecWLDF/WLDFResource/MedRecWLDF/Instrumentation/MedRecWLDF/WLDFInstrumentationMonitors/DyeInjection')
cmo.setDescription('Dye Injection monitor')
cmo.setProperties('ADDR1=127.0.0.1 USER1=<EMAIL>')
set('Actions',jarray.array([], String))
cmo.setDyeMask(None)
save()
activate()
disconnect()
exit()
|
StarcoderdataPython
|
170444
|
<gh_stars>0
"""Methods related to jwt, including encode, decode"""
import jwt
from base64 import b64decode
from django.conf import settings
import time
from random import randint
def decode_jwt(token):
"""Decodes the jwt token with ecrypted key from settings
Returns the dict of data
"""
# Removes some extra text
token = token.replace('Bearer ', '')
token = token.replace('Token ', '')
try:
result = jwt.decode(token, b64decode(settings.JWT_ENCRYPT_KEY))
except jwt.exceptions.DecodeError:
result = None
return result
def encode_jwt(user_id, algorithm='HS256', exp_in_seconds=864000):
"""Create a jwt token with user_id. What is the jti?
"""
iat = int(time.time())
exp = iat + exp_in_seconds
jti = iat + randint(9999, 999999)
data = dict(jti=jti, sub=user_id, iat=iat, exp=exp)
return jwt.encode(data, b64decode(settings.JWT_ENCRYPT_KEY), algorithm=algorithm)
def detect_lang(lang_header):
lang = 'en' # or vi
if 'vi' in lang_header:
lang = 'vi'
return lang
def jwt_decode(token):
return jwt.decode(token, b64decode(settings.JWT_ENCRYPT_KEY))
|
StarcoderdataPython
|
11388965
|
from collections import defaultdict
from mesa.visualization.ModularVisualization import ModularServer
from mesa.visualization.UserParam import UserSettableParameter
from mesa.visualization.modules import CanvasGrid
from src.core.agents import SlidingWindowVoronoiAgent, VoronoiAgent, MultiMinimaxAgent
from src.core.model import SpeedAgent, AgentTrace, AgentTraceCollision
from src.core.model import SpeedModel
class CustomCanvasGrid(CanvasGrid):
"""
CanvasGrid with top left origin.
"""
def render(self, model):
grid_state = defaultdict(list)
for x in range(model.grid.width):
for y in range(model.grid.height):
cell_objects = model.grid.get_cell_list_contents([(x, y)])
for obj in cell_objects:
portrayal = self.portrayal_method(obj)
if portrayal:
portrayal["x"] = x
portrayal["y"] = model.grid.height - 1 - y
grid_state[portrayal["Layer"]].append(portrayal)
return grid_state
def agent_portrayal(agent):
portrayal = {
"Filled": "true",
"Layer": 0
}
if isinstance(agent, SpeedAgent):
portrayal = dict(portrayal, **{
"Shape": "circle",
"Color": COLOR_PALETTE[agent.unique_id - 1],
"r": 0.95
})
elif type(agent) is AgentTrace:
portrayal = dict(portrayal, **{
"Shape": "rect",
"Color": COLOR_PALETTE[agent.origin.unique_id - 1],
"w": 0.95,
"h": 0.95
})
if type(agent) is AgentTraceCollision:
portrayal = dict(portrayal, **{
"Shape": "circle",
"Color": " brown",
"r": 0.95
})
return portrayal
if __name__ == "__main__":
# Parameters
WIDTH = 35
HEIGHT = 35
COLOR_PALETTE = [
'green',
'blue',
'red',
'black',
'pink',
'orange'
]
model_params = {
"width": WIDTH,
"height": HEIGHT,
"agent_classes": [SlidingWindowVoronoiAgent, VoronoiAgent, MultiMinimaxAgent],
"nb_agents": UserSettableParameter('slider', 'Amount of Agents', value=3, min_value=1, max_value=6, step=1),
}
grid = CustomCanvasGrid(agent_portrayal, WIDTH, HEIGHT, 700, 700)
# create and launch server instance
server = ModularServer(SpeedModel,
[grid],
"Speed",
model_params)
server.port = 8521
server.launch()
|
StarcoderdataPython
|
8026267
|
'''8. Посчитать, сколько раз встречается определенная цифра в введенной
последовательности чисел. Количество вводимых чисел и цифра, которую необходимо
посчитать, задаются вводом с клавиатуры.'''
user_range = input('Введите последовательность: ')
user_patten = input('Введите цифру для поиска: ')
count = 0
for i in user_range:
if i == user_patten:
count += 1
print(
f'Цифра встречается {user_patten} в последовательности {user_range}: \
{count} раз(а)'
)
|
StarcoderdataPython
|
1863182
|
from locust import HttpUser
class AbstractUser(HttpUser):
abstract = True
def __init__(self, parent):
super(AbstractUser, self).__init__(parent)
self.user_attr = {}
def set_email(self, email):
self.user_attr['email'] = email
def get_email(self):
if 'email' in self.user_attr.keys():
return self.user_attr['email']
else:
return None
def set_cookie(self, cookie):
self.user_attr['cookie'] = cookie
def get_cookie(self):
return self.user_attr['cookie']
|
StarcoderdataPython
|
3511397
|
# encoding: utf-8
import io
'''
字符串api:
ord(str) 字符串的编码
len(str) 字符串长度
input() 从控制台读取字符串
str1 + str2 / str * n 字符串拼接
str1[n] 使用[]提取字符
replace() 实现字符串替换
str[起始偏移量 start:终止偏移量 end:步长 step] 字符串切片slice操作
[:] 提取整个字符串 "abcdef"[:] => "abcdef"
[start:]从 start 索引开始到结尾 "abcdef"[2:] => "cdef"
[:end]从头开始知道 end-1 "abcdef"[:2] => "ab"
[start:end:step]从 start 提取到end-1,步长是 step "abcdef"[1:5:2] => "bd"
str.split() 分割
char.join(str) 合并
常用查找方法: eg: a = " 我爱编程,我喜欢写代码,学习python很快乐,高兴,666,哈哈哈!!"
len(a) 字符串长度
a.startswith("我爱") 以指定字符串开头
a.endswith('过我') 以指定字符串结尾
a.find('我') 第一次出现指定字符串的位置
a.rfind('高') 最后一次出现指定字符串的位置
a.count("编程") 指定字符串出现了几次
a.isalnum() 所有字符全是字母或数字
去除首尾信息:
a.strip(" ") 去除字符串首尾指定信息
a.lstrip(" ")去除字符串左边指定信息
a.rstrip(" ")去除字符串右边指定信息。
大小写转换: a = "liuzhen love programming, love SXT"
a.capitalize()产生新的字符串,首字母大写
a.title()产生新的字符串,每个单词都首字母大写
a.upper()产生新的字符串,所有字符全转成大写
a.lower()产生新的字符串,所有字符全转成小写
a.swapcase()产生新的,所有字母大小写转换
格式排版:center()、ljust()、rjust()这三个函数用于对字符串实现排版
其他方法 str.api()
1. isalnum() 是否为字母或数字
2. isalpha() 检测字符串是否只由字母组成(含汉字)。
3. isdigit() 检测字符串是否只由数字组成。
4. isspace() 检测是否为空白符
5. isupper() 是否为大写字母
6. islower() 是否为小写字母
字符串的格式化 str.format() 基本语法是通过 {} 和 : 来代替以前的 % 。format 函数可以接受不限个参数,位置可以不按顺序。eg如下
填充与对齐:填充常跟对齐一起使用
^、<、> 分别是居中、左对齐、右对齐,后面带宽度
: 号后面带填充的字符,只能是一个字符,不指定的话默认是用空格填充
数字格式化:浮点数通过 f,整数通过 d 进行需要的格式化。
可变字符串:在 Python 中,字符串属于不可变对象,不支持原地修改,如果需要修改其中的值,智能创建新的字符串对象。但是,经常我们确实需要原地修改字符串,
可以使用 io.StringIO 对象或 array 模块。
'''
binaryA = ord('A')
print(binaryA)
binaryLiu = ord("刘")
print(binaryLiu)
# myName = input("请输入你的名字:")
# print(myName)
# print(len(myName))
str1 = "a" + "b"
str2 = str1 * 3
print(str1)
print(str2)
print(str2[1])
str1 = "abadaa"
str1 = str1.replace("a", "振")
print(str1)
# 字符串切片slice操作
print("abcdefghijklmnopqrstuvwxyz"[-3:]) # 倒数三个 xyz
print("abcdefghijklmnopqrstuvwxyz"[-8:-3]) # 倒数第八个到倒数第三个(包头不包尾) stuvw
print("abcdefghijklmnopqrstuvwxyz"[::-1]) # 步长为负,从右到左反向提取 zyxwvutsrqponmlkjihgfedcba
# split()分割 和 join()合并
a = "你好 哈哈 刘振 name"
b = a.split(" ") # ['你好', '哈哈', '刘振', 'name']
print(b)
c = "*".join(b) # 你好*哈哈*刘振*name
print(c)
# 格式排版 center()、ljust()、rjust()
a = "LIU"
b = a.center(13, "&")
print(b) # "&&&&&LIU&&&&&"
b = a.center(13)
print(b) # " LIU "
b = a.ljust(13, "*")
print(b) # "LIU**********"
# 字符串的格式化 str.format()
a = "我叫{0},今年{1}岁,{0}好棒"
a = a.format("刘振", 26)
print(a) # 我叫刘振,今年26岁,刘振好棒
b = "我的名字是{name},年龄是{age}"
b = b.format(name="刘振", age=33)
print(b) # 我的名字是刘振,年龄是33
# 填充与对齐
a = "{:>8}".format("256") # " 256"
print(a)
# 数字格式化
a = "我是{0},我的村快有{1:.2f}"
print(a.format("刘振", 10000.9999)) # 我是刘振,我的村快有10001.00
# 可变字符串: 在 Python 中,字符串属于不可变对象,不支持原地修改,如果需要修改其中的值,智能创建新的字符串对象。但是,经常我们确实需要原地修改字符串,可以使用 io.StringIO 对象或 array 模块。
s = "hello,sxt"
sio = io.StringIO(s)
value = sio.getvalue()
print("value: " + value) # hello,sxt
seek = sio.seek(7)
print("seek: " + str(seek)) # 7 (下一步write将操作x)
sio.write('g') # 将 x 变为 g
value = sio.getvalue()
print("value2: " + value) # hello,sgt
'''
序列
'''
|
StarcoderdataPython
|
5002174
|
<gh_stars>1000+
# -*- coding: utf-8 -*-
# @Time : 2021/6/28 23:15
# @File : aastock_new_stock.py
# @Author : Rocky <EMAIL>
'''
http://www.aastocks.com/sc/stocks/market/ipo/listedipo.aspx?s=3&o=0&page=20
'''
import time
from parsel import Selector
from selenium import webdriver
import sys
sys.path.append('..')
import datetime
from common.BaseService import BaseService
from configure.settings import DBSelector
path = r'C:\OneDrive\Python\selenium\chromedriver.exe'
class AAStockNewStock(BaseService):
def __init__(self):
super(AAStockNewStock, self).__init__('../log/aastock.log')
self.conn = DBSelector().get_mysql_conn('db_stock')
self.cursor = self.conn.cursor()
def create_table(self):
sql = '''CREATE TABLE IF NOT EXISTS `tb_hk_new_stock` (
`id` int(11) NOT NULL AUTO_INCREMENT PRIMARY KEY ,
`name` varchar(50) DEFAULT NULL,
`code` varchar(10) NOT NULL,
`issue_date` date DEFAULT NULL,
`each_hand_stock` varchar(50) DEFAULT NULL,
`share_value_Yi` varchar(50) DEFAULT NULL,
`margin_price` varchar(50) DEFAULT NULL,
`price` float(255,4) DEFAULT NULL,
`over_price_part` varchar(50) DEFAULT NULL,
`hit_least_num` int(255) DEFAULT NULL,
`hit_ratio` float(255,4) DEFAULT NULL,
`current_price` float(255,4) DEFAULT NULL,
`first_day_raise` float(255,4) DEFAULT NULL,
`accumulate_raise` float(255,4) DEFAULT NULL,
`crawltime` DATETIME DEFAULT NULL,
UNIQUE INDEX code_ix(`code` ASC)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4'''
try:
self.cursor.execute(sql)
except Exception as e:
print(e)
self.conn.rollback()
else:
self.conn.commit()
def fetch(self, page):
options = webdriver.ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-automation'])
prefs = {'profile.managed_default_content_settings.images': 2}
options.add_experimental_option('prefs', prefs)
options.add_argument(
'--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36')
driver = webdriver.Chrome(executable_path=path,
chrome_options=options)
driver.implicitly_wait(40)
url = 'http://www.aastocks.com/sc/stocks/market/ipo/listedipo.aspx?s=3&o=0&page={}'
for p in range(1, page + 1):
driver.get(url.format(p))
time.sleep(5)
yield driver.page_source
def convert_float(self, data):
if data is None:
print('数据为空')
return None
data = data.strip().replace('%', '').replace(',', '')
try:
print('解析后')
print(data)
data = float(data)
except Exception as e:
if data != 'N/A':
print('解析异常')
print(data)
data = None
return data
def convert_date(self, data_str):
try:
date = datetime.datetime.strptime(data_str, '%Y/%m/%d')
except Exception as e:
print(e)
date = None
return date
def convert_hand_int(self, data):
try:
data = int(data.strip().replace('手', ''))
except:
data = None
return data
def parse(self, content):
response = Selector(text=content)
ipo_list = response.xpath('//div[@id="IPOListed"]/table/tbody/tr')
insert_sql = '''insert into `tb_hk_new_stock` (`name`,`code`,`issue_date`,`each_hand_stock`,`share_value_Yi`,`margin_price`,`price`,`over_price_part`,`hit_least_num`,`hit_ratio`,`current_price`,`first_day_raise`,`accumulate_raise`,`crawltime`)
VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) ON DUPLICATE KEY UPDATE `crawltime`=%s'''
for ipo_item in ipo_list:
item_list = ipo_item.xpath('.//td')
if len(item_list) < 2:
continue
name = item_list[1].xpath('.//a[1]/text()').extract_first()
code = item_list[1].xpath('.//a[2]/text()').extract_first()
issue_date = self.convert_date(item_list[2].xpath('.//text()').extract_first())
each_hand_stock = item_list[3].xpath('.//text()').extract_first()
share_value_Yi = item_list[4].xpath('.//text()').extract_first()
margin_price = item_list[5].xpath('.//text()').extract_first()
price = self.convert_float(item_list[6].xpath('.//text()').extract_first())
over_price_part = item_list[7].xpath('.//text()').extract_first()
hit_least_num = self.convert_hand_int(item_list[8].xpath('.//text()').extract_first())
hit_ratio = self.convert_float(item_list[9].xpath('.//text()').extract_first())
current_price = self.convert_float(item_list[10].xpath('.//text()').extract_first())
first_day_raise = self.convert_float(item_list[11].xpath('.//text()').extract_first())
accumulate_raise = self.convert_float(item_list[12].xpath('.//text()').extract_first())
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if margin_price == 'N/A':
# 上市失败的
continue
try:
self.cursor.execute(insert_sql, (
name, code, issue_date, each_hand_stock, share_value_Yi, margin_price, price, over_price_part,
hit_least_num, hit_ratio, current_price, first_day_raise, accumulate_raise, now, now))
except Exception as e:
print(e)
self.conn.rollback()
else:
self.conn.commit()
def run(self):
total_page = 25
self.create_table()
gen = self.fetch(total_page)
page = 0
for content in gen:
print('page ', page)
self.parse(content)
page += 1
self.conn.close()
def clear_data(self):
'select code from tb_hk_new_stock group by code having count(*) as n >1'
pass
def main():
app = AAStockNewStock()
app.run()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
6546412
|
from django.apps import AppConfig
default_app_config = 'leonardo_celery_email.Config'
LEONARDO_APPS = ['leonardo_celery_email', 'djcelery_email']
LEONARDO_CONFIG = {
"CELERY_MAIL_FAIL_SILENTLY": (True, "Fail silently in sending emails")
}
class Config(AppConfig):
name = 'leonardo_celery_email'
verbose_name = "leonardo-celery-email"
|
StarcoderdataPython
|
11761
|
<gh_stars>0
# -*- coding:utf8 -*-
import random
import time
from lib.navigation.PathFinding import Pathfinding
from lib.control.Control import Control
from lib.unit.Player import Player
from lib.struct.CoordiPoint import CoordiPoint
# 区域打怪
class AreaFighting(Pathfinding):
# area_pos: 区域4角坐标。顺序为:左上,右上,左下,右下
def __init__(self, control: Control, player: Player, area_pos, move_type=0):
Pathfinding.__init__(self, control=control, player=player)
self.area_pos = area_pos
self.hander_area = open("tmp/logs/" + self.getFormatTime(False) + "_areafighting.log", 'a+')
self.start_pos = self.getNowPos()
self.move_type = move_type
# 回到区域内
def goto_area(self):
nowPos = self.getNowPos()
if not AreaFighting.pos_in_area(nowPos,self.area_pos) and self.player.getStatus()['combat'] == 0:
print(nowPos.toString())
print("not in area #################################################################################")
print("not in area #################################################################################", file=self.hander_area)
from lib.navigation.EnemyFinder import EnemyFinder
EnemyFinder(self.player,self.control).clear_target() #在区域外选中怪先取消掉,免得走回区域后又跑出来打这个怪
# 直接走向区域中心,没有多余路点
self.walk(
self.__get_center_of_area(),
move_type=self.move_type,
sleep=0.3,
precision=0.3,
last=3,
combat_exit=True
)
#self.player.not_combat_recover()
self.player.combat_recover()
return True
# 获取区域中心点坐标
# 计算方法:使用中点公式计算一i条对角线的中点即可
def __get_center_of_area(self):
left_top = self.area_pos["leftTop"]
right_bottom = self.area_pos["rightBottom"]
center = [(left_top[0] + right_bottom[0]) / 2, (left_top[1] + right_bottom[1]) / 2]
print("center:")
print(center)
return CoordiPoint(center[0], center[1])
# 判断给定坐标是否在区域内
# 向量叉积(顺时针方向)
# 四边形内的点都在顺时针(逆时针)向量的同一边,即夹角小于90o,向量积同向
# a = (B.x - A.x)*(y - A.y) - (B.y - A.y)*(x - A.x);
# b = (C.x - B.x)*(y - B.y) - (C.y - B.y)*(x - B.x);
# c = (D.x - C.x)*(y - C.y) - (D.y - C.y)*(x - C.x);
# d = (A.x - D.x)*(y - D.y) - (A.y - D.y)*(x - D.x);
@staticmethod
def pos_in_area(pos: CoordiPoint,area):
A = CoordiPoint(area["leftTop"][0], area["leftTop"][1])
B = CoordiPoint(area["rightTop"][0], area["rightTop"][1])
C = CoordiPoint(area["rightBottom"][0], area["rightBottom"][1])
D = CoordiPoint(area["leftBottom"][0], area["leftBottom"][1])
a = (B.x - A.x) * (pos.y - A.y) - (B.y - A.y) * (pos.x - A.x)
b = (C.x - B.x) * (pos.y - B.y) - (C.y - B.y) * (pos.x - B.x)
c = (D.x - C.x) * (pos.y - C.y) - (D.y - C.y) * (pos.x - C.x)
d = (A.x - D.x) * (pos.y - D.y) - (A.y - D.y) * (pos.x - D.x)
if (a > 0 and b > 0 and c > 0 and d > 0) or (a < 0 and b < 0 and c < 0 and d < 0):
return True
return False
|
StarcoderdataPython
|
5064520
|
import discord
from discord.ext import commands
import chickensmoothie as cs
class Pet:
def __init__(self, bot):
self.bot = bot
@commands.command()
@commands.guild_only()
async def pet(self, ctx, link: str = ''): # Pet command
pet = await cs.pet(link) # Get pet data
if pet is None:
embed = discord.Embed(title='Pet', description='An error has occurred while processing pet image.', colour=0xff5252) # Create embed
else:
embed = discord.Embed(title=pet['owner'] + '\'s Pet', colour=0x4ba139) # Create embed
embed.set_image(url=pet['image']) # Set image
initial = True
for key, value in pet.items():
if (key == 'owner' or key == 'pps') and initial:
if key == 'pps':
if not value:
continue
else:
embed.add_field(name='PPS', value='[This pet has "PPS". What\'s that?](http://www.chickensmoothie.com/help/pets#pps)', inline=False)
elif key == 'owner':
value = f'[{pet["owner"]}]({pet["owner_link"]})'
embed.add_field(name=key.capitalize(), value=value, inline=False)
else:
if key == 'image' or key == 'owner_link' or key == 'given_link':
pass
else:
if key == 'id':
key = 'Pet ID'
elif key == 'name':
if value == '':
continue
else:
key = 'Pet\'s name'
elif key == 'age':
key = 'Age'
value = f'{value} days'
elif key == 'given':
if value == '':
continue
else:
key = f'Given to {pet["owner"]} by'
value = f'[{pet["given"]}]({pet["given_link"]})'
else:
key = key.capitalize()
embed.add_field(name=key, value=value, inline=True)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Pet(bot))
|
StarcoderdataPython
|
6701713
|
class ParsedRule():
def __init__(self, predicate, params=[]):
self._predicate = predicate
self._params = params
def __str__(self):
self.__repr__()
def __repr__(self):
return str({
'predicate': self._predicate,
'params': self._params
})
def __eq__(self, other):
if isinstance(other, str) or callable(other): # when using: 'rule' in [...]
return self._predicate == other
if isinstance(other, ParsedRule):
return self._predicate == other._predicate and self._params == other._params
return False
def get_predicate(self):
return self._predicate
def get_params(self):
return self._params
def has_unregistered_handler(self):
return callable(self._predicate)
|
StarcoderdataPython
|
9712256
|
# Generated by Django 2.2.12 on 2020-06-11 11:22
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auth', '0011_update_proxy_permissions'),
('rbac', '0003_auto_20200603_1440'),
]
operations = [
migrations.CreateModel(
name='PermissionProfile',
fields=[
('permission_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='auth.Permission')),
],
options={
'verbose_name': '权限',
'verbose_name_plural': '权限',
},
bases=('auth.permission',),
managers=[
('objects', django.contrib.auth.models.PermissionManager()),
],
),
]
|
StarcoderdataPython
|
6446727
|
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""\
Subsetsum by splitting
<NAME> et <NAME> - 2014-2018
"""
# snip{
def part_sum(x_table, i=0):
"""All subsetsums from x_table[i:]
:param x_table: table of values
:param int i: index_table defining suffix_table of x_table to be considered
:iterates: over all values, in arbitrary order
:complexity: :math:`O(2^{len(x_table)-i})`
"""
if i == len(x_table):
yield 0
else:
for s_idx in part_sum(x_table, i + 1):
yield s_idx
yield s_idx + x_table[i]
def subset_sum(x_table, r_target):
"""Subsetsum by splitting
:param x_table: table of values
:param r_target: target value
:returns bool: if there is a subsequence of x_table with total sum r_target
:complexity: :math:`O(n^{\\lceil n/2 \\rceil})`
"""
k = len(x_table) // 2 # divide input
y_value = [v for v in part_sum(x_table[:k])]
z_value = [r_target - v for v in part_sum(x_table[k:])]
y_value.sort() # test of intersection between y_value and z_value
z_value.sort()
i = 0
j = 0
while i < len(y_value) and j < len(z_value):
if y_value[i] == z_value[j]:
return True
if y_value[i] < z_value[j]: # increment index_table of smallest element
i += 1
else:
j += 1
return False
# snip}
# snip{ subset_sum2
def part_sum2(x_table):
"""All subsetsums from a list x
:param x_table: list of values
:complexity: :math:`O(2^{len(x)})`
"""
answer = set([0]) # 0 = value of empty set
for xi in x_table:
answer |= set(value + xi for value in answer)
return answer
def subset_sum2(x_table, r_target):
"""Subsetsum by splitting
:param x_table: table of values
:param r_target: target value
:returns bool: if there is a subsequence of x_table with total sum r_target
:complexity: :math:`O(n^{\\lceil n/2 \\rceil})`
"""
k = len(x_table) // 2 # divide input
y_set = part_sum2(x_table[:k])
z_set = set(r_target - value for value in part_sum2(x_table[k:]))
return len(y_set & z_set)>0 # test intersection
# snip}
|
StarcoderdataPython
|
366883
|
from os import PathLike
from typing import Union
AnyStr = Union[bytes, str]
FSPath = Union[AnyStr, PathLike]
|
StarcoderdataPython
|
64681
|
<reponame>aeroaks/PySyft<filename>src/syft/__init__.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
Welcome to the syft package! This package is the primary package for PySyft.
This package has two kinds of attributes: submodules and convenience functions.
Submodules are configured in the standard way, but the convenience
functions exist to allow for a convenient `import syft as sy` to then expose
the most-used functionalities directly on syft. Note that this way of importing
PySyft is the strict convention in this codebase. (Do no simply call
`import syft` and then directly use `syft.<method>`.)
The syft module is split into two distinct groups of functionality which we casually refer to
as syft "core" and syft "python". "core" functionality is functionality which is designed
to be universal across all Syft languages (javascript, kotlin, swift, etc.).
Syft "python" includes all functionality which by its very nature cannot be
truly polyglot. Syft "core" functionality includes the following modules:
* :py:mod:`syft.core.node` - APIs for interacting with remote machines you do not directly control.
* :py:mod:`syft.core.message` - APIs for serializing messages sent between Client and Node classes.
* :py:mod:`syft.core.pointer` - Client side API for referring to objects on a Node
* :py:mod:`syft.core.store` - Server side API for referring to object storage on a node (things pointers point to)
Syft "python" functionality includes the following modules:
* :py:mod:`syft.ast` - code generates external library common syntax tree using an allowlist list of methods
* :py:mod:`syft.typecheck` - automatically checks and enforces Python type hints and the exclusive use of kwargs.
* :py:mod:`syft.lib` - uses the ast library to dynamically create remote execution APIs for supported Python libs.
IMPORTANT: syft.core should be very careful when importing functionality from outside of syft
core!!! Since we plan to drop syft core down to a language (such as C++ or Rust)
this can create future complications with lower level languages calling
higher level ones.
To begin your education in Syft, continue to the :py:mod:`syft.core.node.vm.vm` module...
"""
# stdlib
from pathlib import Path
import sys
# third party
from pkg_resources import DistributionNotFound # noqa: F401
from pkg_resources import get_distribution # noqa: F401
# syft absolute
# ASTRACT OBJECT IMPORTS
from syft.core import common # noqa: F401
from syft.core.common import event_loop # noqa: F401
# Convenience Methods
from syft.core.common.serde.deserialize import _deserialize as deserialize # noqa: F401
from syft.core.common.serde.serialize import _serialize as serialize # noqa: F401
from syft.core.node.common.service.repr_service import ReprMessage # noqa: F401
from syft.core.node.device.device import Device # noqa: F401
from syft.core.node.device.device import DeviceClient # noqa: F401
from syft.core.node.domain.domain import Domain # noqa: F401
from syft.core.node.domain.domain import DomainClient # noqa: F401
from syft.core.node.network.network import Network # noqa: F401
from syft.core.node.network.network import NetworkClient # noqa: F401
# Convenience Constructors
from syft.core.node.vm.vm import VirtualMachine # noqa: F401
from syft.core.node.vm.vm import VirtualMachineClient # noqa: F401
# Convenience Functions
from syft.decorators import type_hints # noqa: F401
from syft.grid.duet import bcolors # noqa: F401
from syft.grid.duet import duet # noqa: F401
from syft.grid.duet import join_duet # noqa: F401
from syft.grid.duet import launch_duet # noqa: F401
# Convenience Objects
from syft.lib import lib_ast # noqa: F401
from syft.lib import load_lib # noqa: F401
from syft.lib.torch.module import Module # noqa: F401
# syft relative
# Package Imports
from . import lib # noqa: F401
from . import logger # noqa: F401
# VERSIONING
try:
# Change here if project is renamed and does not equal the package name
dist_name = __name__
__version__ = get_distribution(dist_name).version
except DistributionNotFound:
__version__ = "unknown"
finally:
del get_distribution, DistributionNotFound
sys.path.append(str(Path(__file__)))
logger.add(sink=sys.stderr, level="CRITICAL")
|
StarcoderdataPython
|
3282152
|
<filename>vortex/VortexServerConnection.py
"""
* Created by Synerty Pty Ltd
*
* This software is open source, the MIT license applies.
*
* Website : http://www.synerty.com
* Support : <EMAIL>
"""
import logging
from datetime import datetime
import pytz
from twisted.internet import task
from .PayloadPriority import DEFAULT_PRIORITY
from .VortexConnectionABC import VortexConnectionABC
from .VortexServer import VortexServer, HEART_BEAT_PERIOD, HEART_BEAT_TIMEOUT
from .VortexWritePushProducer import VortexWritePushProducer
logger = logging.getLogger(name=__name__)
class VortexServerConnection(VortexConnectionABC):
def __init__(self, vortexServer: VortexServer,
remoteVortexUuid: str,
remoteVortexName: str,
httpSession, transport,
addr) -> None:
VortexConnectionABC.__init__(self,
logger,
vortexServer,
remoteVortexUuid=remoteVortexUuid,
remoteVortexName=remoteVortexName,
httpSessionUuid=httpSession)
self._lastHeartBeatTime = datetime.now(pytz.utc)
self._lastHeartBeatCheckTime = datetime.now(pytz.utc)
self._transport = transport
self._addr = addr
# Start our heart beat
self._beatLoopingCall = task.LoopingCall(self._beat)
d = self._beatLoopingCall.start(HEART_BEAT_PERIOD, now=False)
d.addErrback(lambda f: logger.exception(f.value))
self._producer = None
# Register the producer if there isn't one already.
# The websocket server already has one.
if not self._transport.producer:
self._producer = VortexWritePushProducer(transport,
lambda: self.close(),
remoteVortexName)
transport.registerProducer(self._producer, True)
def beatReceived(self):
self._lastHeartBeatTime = datetime.now(pytz.utc)
def _beat(self):
# If we're closed, do nothing
if self._closed:
if self._beatLoopingCall.running:
self._beatLoopingCall.stop()
return
beatTimeout = (datetime.now(pytz.utc) - self._lastHeartBeatTime) \
.seconds > HEART_BEAT_TIMEOUT
# If we've been asleep, then make note of that (VM suspended)
checkTimout = (datetime.now(pytz.utc) - self._lastHeartBeatCheckTime) \
.seconds > HEART_BEAT_TIMEOUT
# Mark that we've just checked it
self._lastHeartBeatCheckTime = datetime.now(pytz.utc)
if checkTimout:
self._lastHeartBeatTime = datetime.now(pytz.utc)
return
# If we havn't heard from the client, then close the connection
if beatTimeout:
self._beatLoopingCall.stop()
self.close()
return
self._write(b'.', DEFAULT_PRIORITY)
@property
def ip(self):
return self._addr.host
@property
def port(self):
return self._addr.port
def write(self, payloadVortexStr: bytes, priority: int = DEFAULT_PRIORITY):
assert not self._closed
self._write(payloadVortexStr, priority)
def _write(self, payloadVortexStr: bytes, priority: int):
if self._producer:
self._producer.write(payloadVortexStr, priority)
else:
self._transport.write(payloadVortexStr)
def close(self):
if self._beatLoopingCall.running:
self._beatLoopingCall.stop()
self._transport.loseConnection()
def transportClosed(self):
self._producer.close()
VortexConnectionABC.close(self)
|
StarcoderdataPython
|
1957222
|
# (C) Copyright 1996- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
from .. import logger
from ..authentication.auth import Auth
from ..custom_exceptions import EngineException
from ..user_config import EngineConfig
from . import EngineType
class EngineFactory:
"""
Factory class of the Engine objects. It uses the server specific implementation depending on the value of
ServerType.ETCD3
"""
def __init__(self, engine_conf: EngineConfig, auth: Auth):
"""
It uses the EngineConfig object to instantiate one instance of the server object. This instance contains the
user's authentication details and the server URL. It is unique across the various Engine objects
:param engine_conf:
:param auth
"""
assert engine_conf is not None, "Engine configuration required"
assert engine_conf.host != "", "Server host is required"
assert engine_conf.port != "", "Server port is required"
assert engine_conf.type != "", "Server type is required"
self._conf = engine_conf
self._auth = auth
def create_engine(self):
"""
:return: an instance of the specific implementation of the Engine class
"""
if self._conf.type == EngineType.ETCD_GRPC:
# connect to the server by using pythonEtcd3
logger.debug(f"Setting up gRPC stub to connect to the etcd server {self._conf.host}:{self._conf.port}")
elif self._conf.type == EngineType.ETCD_REST:
# connect to the server by using the REST API
logger.debug(
f"Setting up REST interface to connect to the etcd server " f"{self._conf.host}:{self._conf.port}"
)
elif self._conf.type == EngineType.FILE_BASED:
# connect to the test file based server
logger.debug("Setting up file-based test engine")
else:
raise EngineException(f"Configuration error - Engine: {self._conf.type} is not recognised")
# instantiate the engine and return it
engine_class = self._conf.type.get_class()
try:
# instantiate the engine and return
return engine_class(config=self._conf, auth=self._auth)
except Exception as e:
raise EngineException(f"Error in creating the engine {engine_class.__name__}: {e}")
|
StarcoderdataPython
|
333666
|
from operator import itemgetter
# - an individual node contains the word associated with the node along with
# pointers to its kids and parents.
class node:
def __init__(self, word):
if word != None:
self.word = word
self.kids = []
self.parent = []
self.finished = 0
self.is_word = 1
# the "ind" variable stores the look-up index of the word in the
# word embedding matrix We. set this value when the vocabulary is finalized
self.ind = -1
else:
self.is_word = 0
# - a dtree consists of a list of nodes
# - if you want to use a different dataset, check out the preprocessing scripts
# that convert stanford dependency parses to dtrees
class dtree:
def __init__(self, word_list):
self.nodes = []
for word in word_list:
self.nodes.append(node(word))
# stores the look-up index of the sentence's answer in We
self.ans_ind = -1
# stores the question ID if present
self.qid = -1
def add_edge(self, par, child, rel):
self.nodes[par].kids.append( (child, rel ) )
self.nodes[child].parent.append( (par, rel) )
# return all non-None nodes
def get_nodes(self):
return [node for node in self.nodes if node.is_word]
def get_node_inds(self):
return [(ind, node) for ind, node in enumerate(self.nodes) if node.is_word]
# get a node from the raw node list
def get(self, ind):
return self.nodes[ind]
# return the raw text of the sentence
def get_words(self):
return ' '.join([node.word for node in self.get_nodes()[1:]])
# return raw text of phrase associated with the given node
def get_phrase(self, ind):
node = self.get(ind)
words = [(ind, node.word), ]
to_do = []
for ind, rel in node.kids:
to_do.append(self.get(ind))
words.append((ind, self.get(ind).word))
while to_do:
curr = to_do.pop()
# add this kid's kids to to_do
if len(curr.kids) > 0:
for ind, rel in curr.kids:
words.append((ind, self.get(ind).word))
to_do.insert(0, self.get(ind))
return ' '.join([word for ind, word in sorted(words, key=itemgetter(0) ) ]).strip()
def reset_finished(self):
for node in self.get_nodes():
node.finished = 0
# one tree's error is the sum of the error at all nodes of the tree
def error(self):
sum = 0.0
for node in self.get_nodes():
sum += node.ans_error
return sum
# - this function is not used in the paper, but it enables corrective weighting ala
# Richard's 2014 TACL paper to be implemented
# - it counts the size of the subtree rooted at every node in the tree
def count_kids(self):
for node in self.get_nodes():
# all nodes include themselves
node.count = 1.0
# no kids, count is just 1
if len(node.kids) == 0:
continue
to_do = [self.get(ind) for ind, rel in node.kids]
# otherwise, traverse subtrees
while to_do:
curr = to_do.pop()
node.count += 1
# add this kid's kids to to_do
if len(curr.kids) > 0:
for ind, rel in curr.kids:
to_do.append(self.get(ind))
|
StarcoderdataPython
|
1786647
|
def least_rotation(s):
a, n = 0, len(s)
s = s + s
for b in range(n):
for i in range(n):
if (a + i == b) or (s[a + i] < s[b + i]):
b += max(0, i - 1)
break
if s[a + i] > s[b + i]:
a = b
break
return s[a:a + n]
|
StarcoderdataPython
|
8193641
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import time
from uuid import uuid4
from marquez_client.clients import Clients
from marquez_client.models import JobType
import airflow.models
from marquez_airflow import log
from marquez_airflow.extractors import (Dataset, Source, StepMetadata,
get_extractors)
from marquez_airflow.utils import JobIdMapping, get_location
from pendulum import Pendulum
_NOMINAL_TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
class DAG(airflow.models.DAG):
DEFAULT_NAMESPACE = 'default'
_job_id_mapping = None
_marquez_client = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._marquez_dataset_cache = {}
self._marquez_source_cache = {}
self.marquez_namespace = os.getenv('MARQUEZ_NAMESPACE',
DAG.DEFAULT_NAMESPACE)
self._job_id_mapping = JobIdMapping()
def create_dagrun(self, *args, **kwargs):
# run Airflow's create_dagrun() first
dagrun = super(DAG, self).create_dagrun(*args, **kwargs)
create_dag_start_ms = self._now_ms()
execution_date = kwargs.get('execution_date')
run_args = {
'external_trigger': kwargs.get('external_trigger', False)
}
extractors = {}
try:
extractors = get_extractors()
except Exception as e:
log.warn(f'Failed retrieve extractors: {e}',
airflow_dag_id=self.dag_id,
marquez_namespace=self.marquez_namespace)
# Marquez metadata collection
try:
marquez_client = self.get_marquez_client()
# Create the Namespace
# TODO: Use 'anonymous' owner for now, but we may want to use
# the 'owner' attribute defined via default_args for a DAG
marquez_client.create_namespace(self.marquez_namespace,
"anonymous")
# Register each task in the DAG
for task_id, task in self.task_dict.items():
t = self._now_ms()
try:
self.report_task(
dagrun.run_id,
execution_date,
run_args,
task,
extractors.get(task.__class__.__name__))
except Exception as e:
log.error(f'Failed to record task: {e}',
airflow_dag_id=self.dag_id,
task_id=task_id,
marquez_namespace=self.marquez_namespace,
duration_ms=(self._now_ms() - t))
log.info('Successfully recorded metadata',
airflow_dag_id=self.dag_id,
marquez_namespace=self.marquez_namespace,
duration_ms=(self._now_ms() - create_dag_start_ms))
except Exception as e:
log.error(f'Failed to record metadata: {e}',
airflow_dag_id=self.dag_id,
marquez_namespace=self.marquez_namespace,
duration_ms=(self._now_ms() - create_dag_start_ms))
return dagrun
def handle_callback(self, *args, **kwargs):
try:
dagrun = args[0]
task_instances = dagrun.get_task_instances()
for ti in task_instances:
try:
job_name = f'{ti.dag_id}.{ti.task_id}'
self.report_jobrun_change(
job_name, dagrun.run_id, **kwargs)
except Exception as e:
log.error(
f'Failed to record task run state change: {e}',
dag_id=self.dag_id)
except Exception as e:
log.error(
f'Failed to record dagrun state change: {e}',
dag_id=self.dag_id)
return super().handle_callback(*args, **kwargs)
def report_task(self,
dag_run_id,
execution_date,
run_args,
task,
extractor):
report_job_start_ms = self._now_ms()
marquez_client = self.get_marquez_client()
if execution_date:
start_time = self._to_iso_8601(execution_date)
end_time = self.compute_endtime(execution_date)
else:
start_time = None
end_time = None
if end_time:
end_time = self._to_iso_8601(end_time)
task_location = None
try:
if hasattr(task, 'file_path') and task.file_path:
task_location = get_location(task.file_path)
else:
task_location = get_location(task.dag.fileloc)
except Exception:
log.warn('Unable to fetch the location')
steps_metadata = []
if extractor:
try:
log.info(f'Using extractor {extractor.__name__}',
task_type=task.__class__.__name__,
airflow_dag_id=self.dag_id,
task_id=task.task_id,
airflow_run_id=dag_run_id,
marquez_namespace=self.marquez_namespace)
steps_metadata = extractor(task).extract()
except Exception as e:
log.error(f'Failed to extract metadata {e}',
airflow_dag_id=self.dag_id,
task_id=task.task_id,
airflow_run_id=dag_run_id,
marquez_namespace=self.marquez_namespace)
else:
log.warn('Unable to find an extractor.',
task_type=task.__class__.__name__,
airflow_dag_id=self.dag_id,
task_id=task.task_id,
airflow_run_id=dag_run_id,
marquez_namespace=self.marquez_namespace)
task_name = f'{self.dag_id}.{task.task_id}'
# If no extractor found or failed to extract metadata,
# report the task metadata
if not steps_metadata:
steps_metadata = [StepMetadata(
name=task_name,
context={
'airflow.operator': task.__class__.__name__,
'airflow.task_info': str(task.__dict__)
})]
# store all the JobRuns associated with a task
marquez_jobrun_ids = []
for step in steps_metadata:
input_datasets = []
output_datasets = []
try:
input_datasets = self.register_datasets(step.inputs)
except Exception as e:
log.error(f'Failed to register inputs: {e}',
inputs=str(step.inputs),
airflow_dag_id=self.dag_id,
task_id=task.task_id,
step=step.name,
airflow_run_id=dag_run_id,
marquez_namespace=self.marquez_namespace)
try:
output_datasets = self.register_datasets(step.outputs)
except Exception as e:
log.error(f'Failed to register outputs: {e}',
outputs=str(step.outputs),
airflow_dag_id=self.dag_id,
task_id=task.task_id,
step=step.name,
airflow_run_id=dag_run_id,
marquez_namespace=self.marquez_namespace)
marquez_client.create_job(job_name=step.name,
job_type=JobType.BATCH, # job type
location=(step.location or
task_location),
input_dataset=input_datasets,
output_dataset=output_datasets,
context=step.context,
description=self.description,
namespace_name=self.marquez_namespace)
log.info(f'Successfully recorded job: {step.name}',
airflow_dag_id=self.dag_id,
marquez_namespace=self.marquez_namespace)
# TODO: Look into generating a uuid based on the DAG run_id
external_run_id = str(uuid4())
marquez_client.create_job_run(
namespace_name=self.marquez_namespace,
job_name=step.name,
run_id=external_run_id,
run_args=run_args,
nominal_start_time=start_time,
nominal_end_time=end_time)
if external_run_id:
marquez_jobrun_ids.append(external_run_id)
marquez_client.mark_job_run_as_started(external_run_id)
else:
log.error(f'Failed to get run id: {step.name}',
airflow_dag_id=self.dag_id,
airflow_run_id=dag_run_id,
marquez_namespace=self.marquez_namespace)
log.info(f'Successfully recorded job run: {step.name}',
airflow_dag_id=self.dag_id,
airflow_dag_execution_time=start_time,
marquez_run_id=external_run_id,
marquez_namespace=self.marquez_namespace,
duration_ms=(self._now_ms() - report_job_start_ms))
# Store the mapping for all the steps associated with a task
try:
self._job_id_mapping.set(
JobIdMapping.make_key(task_name, dag_run_id),
json.dumps(marquez_jobrun_ids))
except Exception as e:
log.error(f'Failed to set id mapping : {e}',
airflow_dag_id=self.dag_id,
task_id=task.task_id,
airflow_run_id=dag_run_id,
marquez_run_id=marquez_jobrun_ids,
marquez_namespace=self.marquez_namespace)
def compute_endtime(self, execution_date):
return self.following_schedule(execution_date)
def report_jobrun_change(self, job_name, run_id, **kwargs):
session = kwargs.get('session')
marquez_job_run_ids = self._job_id_mapping.pop(
JobIdMapping.make_key(job_name, run_id), session)
if marquez_job_run_ids:
log.info('Found job runs.',
airflow_dag_id=self.dag_id,
airflow_job_id=job_name,
airflow_run_id=run_id,
marquez_run_ids=marquez_job_run_ids,
marquez_namespace=self.marquez_namespace)
ids = json.loads(marquez_job_run_ids)
if kwargs.get('success'):
for marquez_job_run_id in ids:
self.get_marquez_client().mark_job_run_as_completed(
marquez_job_run_id)
else:
for marquez_job_run_id in ids:
self.get_marquez_client().mark_job_run_as_failed(
marquez_job_run_id)
state = 'COMPLETED' if kwargs.get('success') else 'FAILED'
log.info(f'Marked job run(s) as {state}.',
airflow_dag_id=self.dag_id,
airflow_job_id=job_name,
airflow_run_id=run_id,
marquez_run_id=marquez_job_run_ids,
marquez_namespace=self.marquez_namespace)
def get_marquez_client(self):
if not self._marquez_client:
self._marquez_client = Clients.new_write_only_client()
return self._marquez_client
@staticmethod
def _now_ms():
return int(round(time.time() * 1000))
def register_datasets(self, datasets):
dataset_names = []
if not datasets:
return dataset_names
client = self.get_marquez_client()
for dataset in datasets:
if isinstance(dataset, Dataset):
_key = str(dataset)
if _key not in self._marquez_dataset_cache:
source_name = self.register_source(
dataset.source)
if source_name:
dataset = client.create_dataset(
dataset.name,
dataset.type,
dataset.name, # physical_name the same for now
source_name,
namespace_name=self.marquez_namespace)
dataset_name = dataset.get('name')
if dataset_name:
self._marquez_dataset_cache[_key] = dataset_name
dataset_names.append(dataset_name)
else:
dataset_names.append(self._marquez_dataset_cache[_key])
return dataset_names
def register_source(self, source):
if isinstance(source, Source):
_key = str(source)
if _key in self._marquez_source_cache:
return self._marquez_source_cache[_key]
client = self.get_marquez_client()
ds = client.create_source(source.name,
source.type,
source.connection_url)
source_name = ds.get('name')
self._marquez_source_cache[_key] = source_name
return source_name
@staticmethod
def _to_iso_8601(dt):
if isinstance(dt, Pendulum):
return dt.format(_NOMINAL_TIME_FORMAT)
else:
return dt.strftime(_NOMINAL_TIME_FORMAT)
|
StarcoderdataPython
|
8028970
|
<gh_stars>0
import torch
import torch.nn as nn
import torchvision.models as models
import torch.nn.functional as F
class EncoderCNN(nn.Module):
def __init__(self, embed_size):
super(EncoderCNN, self).__init__()
resnet = models.resnet50(pretrained=True)
for param in resnet.parameters():
param.requires_grad_(False)
modules = list(resnet.children())[:-1]
self.resnet = nn.Sequential(*modules)
self.embed = nn.Linear(resnet.fc.in_features, embed_size)
def forward(self, images):
features = self.resnet(images)
features = features.view(features.size(0), -1)
features = self.embed(features)
return features
###
# Added for comparasitation of results.
# Taken from the tutorial at:
# https://medium.com/analytics-vidhya/image-captioning-with-attention-part-1-e8a5f783f6d3
###
class EncoderCNNv1(nn.Module):
def __init__(self, embed_size):
super(EncoderCNNv1, self).__init__()
resnet = models.resnet50(pretrained=True)
for param in resnet.parameters():
param.requires_grad_(False)
modules = list(resnet.children())[:-1]
self.resnet = nn.Sequential(*modules)
self.embed = nn.Linear(resnet.fc.in_features, embed_size)
def forward(self, images):
features = self.resnet(images)
# first, we need to resize the tensor to be
# (batch, size*size, feature_maps)
batch, feature_maps, size_1, size_2 = features.size()
features = features.permute(0, 2, 3, 1)
features = features.view(batch, size_1*size_2, feature_maps)
return features
# Simplest Decoder
class DecoderRNNv101(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1):
super().__init__()
self.hidden_size = hidden_size
self.embed_size = embed_size
self.vocab_size = vocab_size
self.num_layers = num_layers
# Embedding Layer: transform captions into embeded_size
self.embedding = nn.Embedding(vocab_size, embed_size)
# LSTM Layer: Do the magic of finding the next word
self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first = True)
# convert output from LSTM into predictions for each word in vocab
self.linear = nn.Linear(hidden_size, vocab_size)
def forward(self, features, captions):
embed = self.embedding(captions)
embed = torch.cat((features.unsqueeze(1), embed), dim = 1)
# Initialize the hidden state
batch_size = features.shape[0]
lstm_outputs, _ = self.lstm(embed, self.init_hidden(batch_size))
out = self.linear(lstm_outputs)
return out
def sample(self, features, states=None, end_word = 1, max_len=20):
output_ids = []
inputs = features.unsqueeze(1)
for i in range(max_len):
# pass data through recurrent network
hiddens, states = self.lstm(inputs, states)
outputs = self.linear(hiddens.squeeze(1))
# find maximal predictions
predicted = outputs.max(1)[1]
# append results from given step to global results
output_ids.append(predicted.cpu().numpy()[0].item())
# prepare chosen words for next decoding step
inputs = self.embedding(predicted)
inputs = inputs.unsqueeze(1)
# arrived to the end of the sentence
if predicted == end_word : break
return output_ids
# taken from the previous lesson
def init_hidden(self, batch_size):
""" At the start of training, we need to initialize a hidden state;
there will be none because the hidden state is formed based on previously seen data.
So, this function defines a hidden state with all zeroes
The axes semantics are (num_layers, batch_size, hidden_dim)
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
return (torch.zeros((1, batch_size, self.hidden_size), device=device), \
torch.zeros((1, batch_size, self.hidden_size), device=device))
# Slightly more complex than v101 with a Linear layer as states from LSTM
# proved that performs better
class DecoderRNNv102(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1):
super().__init__()
self.hidden_size = hidden_size
self.embed_size = embed_size
self.vocab_size = vocab_size
self.num_layers = num_layers
# Embedding Layer: transform captions into embeded_size
self.embedding = nn.Embedding(vocab_size, embed_size)
# LSTM Layer: Do the magic of finding the next word
self.lstm_hc = (nn.Linear(self.embed_size, self.hidden_size), nn.Linear(self.embed_size, self.hidden_size))
self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first = True)
# convert output from LSTM into predictions for each word in vocab
self.linear = nn.Linear(hidden_size, vocab_size)
def forward(self, features, captions):
embed = self.embedding(captions)
embed = torch.cat((features.unsqueeze(1), embed), dim = 1)
# Initialize the hidden state
self.lstm_hc = self.init_hd_hidden(self.lstm_hc[0], self.lstm_hc[1], features)
lstm_outputs, self.lstm_hc = self.lstm(embed, self.lstm_hc)
out = self.linear(lstm_outputs)
return out
def sample(self, inputs, hidden=None, end_word = 1, max_len=20):
" accepts pre-processed image tensor (inputs) and returns predicted sentence (list of tensor ids of length max_len) "
output = []
# Either get hidden states already from pretrained or initialize
if hidden is None:
batch_size = inputs.shape[0]
hidden = self.init_hidden(batch_size)
while True:
lstm_out, hidden = self.lstm(inputs, hidden)
outputs = self.linear(lstm_out)
outputs = outputs.squeeze(1)
# get the word with the best ranking
_, found_word = torch.max(outputs, dim=1)
# save new word
output.append(found_word.cpu().numpy()[0].item()) # storing the word predicted
# In case new word is the end of the sentence... end the sampling
if found_word == end_word or len(output) > max_len: break
# embed the last predicted word to predict next
inputs = self.embedding(found_word)
inputs = inputs.unsqueeze(1)
return output
def init_hd_hidden(self, h, c, features):
if torch.cuda.is_available(): h = h.cuda(); c = c.cuda()
h = h(features).unsqueeze(0)
c = c(features).unsqueeze(0)
return h, c
def init_hidden(self, batch_size):
""" At the start of training, we need to initialize a hidden state;
there will be none because the hidden state is formed based on previously seen data.
So, this function defines a hidden state with all zeroes
The axes semantics are (num_layers, batch_size, hidden_dim)
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
return (torch.zeros((1, batch_size, self.hidden_size), device=device), \
torch.zeros((1, batch_size, self.hidden_size), device=device))
# Added a MultiHeadAttention Layer after the LSTM, trying to focus the attention after LSTM
# Then performing the operations with EMB -> LSTM -> Attention -> Linear
class DecoderRNNv120(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1, num_heads=8):
super().__init__()
self.hidden_size = hidden_size
self.embed_size = embed_size
self.vocab_size = vocab_size
self.num_layers = num_layers
self.num_heads = num_heads
# Embedding Layer: transform captions into embeded_size
self.embedding = nn.Embedding(vocab_size, embed_size)
# Get the focus from features where it should
self.attention = nn.MultiheadAttention(hidden_size, num_heads)
# LSTM Layer: Do the magic of finding the next word
self.lstm_hc = (nn.Linear(self.embed_size, self.hidden_size), nn.Linear(self.embed_size, self.hidden_size))
self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first = True)
# convert output from LSTM into predictions for each word in vocab
self.linear = nn.Linear(hidden_size, vocab_size)
def forward(self, features, captions):
embed = self.embedding(captions)
embed = torch.cat((features.unsqueeze(1), embed), dim = 1)
# Initialize the hidden state
self.lstm_hc = self.init_hd_hidden(self.lstm_hc[0], self.lstm_hc[1], features)
lstm_outputs, self.lstm_hc = self.lstm(embed, self.lstm_hc)
att_out, _ = self.attention(lstm_outputs, lstm_outputs, lstm_outputs)
out = self.linear(att_out)
return out
def sample(self, inputs, hidden=None, end_word = 1, max_len=20):
" accepts pre-processed image tensor (inputs) and returns predicted sentence (list of tensor ids of length max_len) "
output = []
if hidden is None:
batch_size = inputs.shape[0]
hidden = self.init_hidden(batch_size)
while True:
lstm_out, hidden = self.lstm(inputs, hidden)
outputs, _ = self.attention(lstm_out, lstm_out, lstm_out)
outputs = self.linear(outputs)
outputs = outputs.squeeze(1)
# get the word with the best ranking
_, max_indice = torch.max(outputs, dim=1)
# save new word
output.append(max_indice.cpu().numpy()[0].item()) # storing the word predicted
# In case new word is the end of the sentence... end the sampling
if max_indice == end_word or len(output) > max_len: break
# embed the last predicted word to predict next
inputs = self.embedding(max_indice)
inputs = inputs.unsqueeze(1)
return output
def init_hd_hidden(self, h, c, features):
if torch.cuda.is_available(): h = h.cuda(); c = c.cuda()
h = h(features).unsqueeze(0)
c = c(features).unsqueeze(0)
return h, c
def init_hidden(self, batch_size):
""" At the start of training, we need to initialize a hidden state;
there will be none because the hidden state is formed based on previously seen data.
So, this function defines a hidden state with all zeroes
The axes semantics are (num_layers, batch_size, hidden_dim)
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
return (torch.zeros((1, batch_size, self.hidden_size), device=device), \
torch.zeros((1, batch_size, self.hidden_size), device=device))
# Added a MultiHeadAttention Layer before, trying to focus the attention first at features
# It actually performs better than the v120
# Then performing the operations with EMB -> Attention -> LSTM -> Linear
class DecoderRNNv121(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1, num_heads=8):
super().__init__()
self.hidden_size = hidden_size
self.embed_size = embed_size
self.vocab_size = vocab_size
self.num_layers = num_layers
self.num_heads = num_heads
# Embedding Layer: transform captions into embeded_size
self.embedding = nn.Embedding(vocab_size, embed_size)
# Get the focus from features where it should
self.attention = nn.MultiheadAttention(embed_size, num_heads)
# LSTM Layer: Do the magic of finding the next word
self.lstm_hc = (nn.Linear(self.embed_size, self.hidden_size), nn.Linear(self.embed_size, self.hidden_size))
self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first = True)
# convert output from LSTM into predictions for each word in vocab
self.linear = nn.Linear(hidden_size, vocab_size)
def forward(self, features, captions):
embed = self.embedding(captions)
embed = torch.cat((features.unsqueeze(1), embed), dim = 1)
# Initialize the hidden state
self.lstm_hc = self.init_hd_hidden(self.lstm_hc[0], self.lstm_hc[1], features)
lstm_outputs, self.lstm_hc = self.lstm(embed, self.lstm_hc)
att_out, _ = self.attention(lstm_outputs, lstm_outputs, lstm_outputs)
out = self.linear(att_out)
return out
def sample(self, inputs, hidden=None, end_word = 1, max_len=20):
" accepts pre-processed image tensor (inputs) and returns predicted sentence (list of tensor ids of length max_len) "
output = []
if hidden is None:
batch_size = inputs.shape[0]
hidden = self.init_hidden(batch_size)
while True:
outputs, _ = self.attention(inputs, inputs, inputs)
outputs, hidden = self.lstm(outputs, hidden)
outputs = self.linear(outputs)
outputs = outputs.squeeze(1)
_, found_word = torch.max(outputs, dim=1) # predict the most likely next word, found_word shape : (1)
# save new word
output.append(found_word.cpu().numpy()[0].item())
# In case new word is the end of the sentence... end the sampling
if found_word == end_word or len(output) > max_len: break
# embed the last predicted word to predict next
inputs = self.embedding(found_word)
inputs = inputs.unsqueeze(1)
return output
def init_hd_hidden(self, h, c, features):
if torch.cuda.is_available(): h = h.cuda(); c = c.cuda()
h = h(features).unsqueeze(0)
c = c(features).unsqueeze(0)
return h, c
def init_hidden(self, batch_size):
""" At the start of training, we need to initialize a hidden state;
there will be none because the hidden state is formed based on previously seen data.
So, this function defines a hidden state with all zeroes
The axes semantics are (num_layers, batch_size, hidden_dim)
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
return (torch.zeros((1, batch_size, self.hidden_size), device=device), \
torch.zeros((1, batch_size, self.hidden_size), device=device))
###
# Added for comparasitation of results.
# Taken from the tutorial at:
# https://medium.com/analytics-vidhya/image-captioning-with-attention-part-1-e8a5f783f6d3
###
class BahdanauAttention(nn.Module):
""" Class performs Additive Bahdanau Attention.
Source: https://arxiv.org/pdf/1409.0473.pdf
"""
def __init__(self, num_features, hidden_dim, output_dim = 1):
super(BahdanauAttention, self).__init__()
self.num_features = num_features
self.hidden_dim = hidden_dim
self.output_dim = output_dim
# fully-connected layer to learn first weight matrix Wa
self.W_a = nn.Linear(self.num_features, self.hidden_dim)
# fully-connected layer to learn the second weight matrix Ua
self.U_a = nn.Linear(self.hidden_dim, self.hidden_dim)
# fully-connected layer to produce score (output), learning weight matrix va
self.v_a = nn.Linear(self.hidden_dim, self.output_dim)
def forward(self, features, decoder_hidden):
"""
Arguments:
----------
- features - features returned from Encoder
- decoder_hidden - hidden state output from Decoder
Returns:
---------
- context - context vector with a size of (1,2048)
- atten_weight - probabilities, express the feature relevance
"""
# add additional dimension to a hidden (required for summation)
decoder_hidden = decoder_hidden.unsqueeze(1)
atten_1 = self.W_a(features)
atten_2 = self.U_a(decoder_hidden)
# apply tangent to combine result from 2 fc layers
atten_tan = torch.tanh(atten_1+atten_2)
atten_score = self.v_a(atten_tan)
atten_weight = F.softmax(atten_score, dim = 1)
# first, we will multiply each vector by its softmax score
# next, we will sum up this vectors, producing the attention context vector
# the size of context equals to a number of feature maps
context = torch.sum(atten_weight * features, dim = 1)
atten_weight = atten_weight.squeeze(dim=2)
return context, atten_weight
###
# Added for comparasitation of results.
# Taken from the tutorial at:
# https://medium.com/analytics-vidhya/image-captioning-with-attention-part-1-e8a5f783f6d3
###
class DecoderRNNv200(nn.Module):
"""Attributes:
- embedding_dim - specified size of embeddings;
- hidden_dim - the size of RNN layer (number of hidden states)
- vocab_size - size of vocabulary
- p - dropout probability
"""
def __init__(self, num_features, embedding_dim, hidden_dim, vocab_size, p =0.5):
super(DecoderRNNv200, self).__init__()
self.num_features = num_features
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.vocab_size = vocab_size
# scale the inputs to softmax
self.sample_temp = 0.5
# embedding layer that turns words into a vector of a specified size
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
# LSTM will have a single layer of size 512 (512 hidden units)
# it will input concatinated context vector (produced by attention)
# and corresponding hidden state of Decoder
self.lstm = nn.LSTMCell(embedding_dim + num_features, hidden_dim)
# produce the final output
self.fc = nn.Linear(hidden_dim, vocab_size)
# add attention layer
self.attention = BahdanauAttention(num_features, hidden_dim)
# dropout layer
self.drop = nn.Dropout(p=p)
# add initialization fully-connected layers
# initialize hidden state and cell memory using average feature vector
# Source: https://arxiv.org/pdf/1502.03044.pdf
self.init_h = nn.Linear(num_features, hidden_dim)
self.init_c = nn.Linear(num_features, hidden_dim)
def forward(self, captions, features, sample_prob = 0.0):
import numpy as np
embed = self.embeddings(captions)
h, c = self.init_hidden(features)
seq_len = captions.size(1)
feature_size = features.size(1)
batch_size = features.size(0)
# these tensors will store the outputs from lstm cell and attention weights
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
outputs = torch.zeros(batch_size, seq_len, self.vocab_size).to(device)
atten_weights = torch.zeros(batch_size, seq_len, feature_size).to(device)
# scheduled sampling for training
# we do not use it at the first timestep (<start> word)
# but later we check if the probability is bigger than random
for t in range(seq_len):
sample_prob = 0.0 if t == 0 else 0.5
use_sampling = np.random.random() < sample_prob
if use_sampling == False:
word_embed = embed[:,t,:]
context, atten_weight = self.attention(features, h)
# input_concat shape at time step t = (batch, embedding_dim + hidden_dim)
input_concat = torch.cat([word_embed, context], 1)
h, c = self.lstm(input_concat, (h,c))
h = self.drop(h)
output = self.fc(h)
if use_sampling == True:
# use sampling temperature to amplify the values before applying softmax
scaled_output = output / self.sample_temp
scoring = F.log_softmax(scaled_output, dim=1)
top_idx = scoring.topk(1)[1]
word_embed = self.embeddings(top_idx).squeeze(1)
outputs[:, t, :] = output
#atten_weights[:, t, :] = atten_weights
return outputs, atten_weights
def init_hidden(self, features):
"""Initializes hidden state and cell memory using average feature vector.
Arguments:
----------
- features - features returned from Encoder
Retruns:
----------
- h0 - initial hidden state (short-term memory)
- c0 - initial cell state (long-term memory)
"""
mean_annotations = torch.mean(features, dim = 1)
h0 = self.init_h(mean_annotations)
c0 = self.init_c(mean_annotations)
return h0, c0
def sample(self, features, max_sentence = 20):
"""Greedy search to sample top candidate from distribution.
Arguments
----------
- features - features returned from Encoder
- max_sentence - max number of token per caption (default=20)
Returns:
----------
- sentence - list of tokens
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
sentence = []
weights = []
input_word = torch.tensor(0).unsqueeze(0).to(device)
h, c = self.init_hidden(features)
while True:
embedded_word = self.embeddings(input_word)
context, atten_weight = self.attention(features, h)
# input_concat shape at time step t = (batch, embedding_dim + context size)
input_concat = torch.cat([embedded_word, context], dim = 1)
h, c = self.lstm(input_concat, (h,c))
h = self.drop(h)
output = self.fc(h)
scoring = F.log_softmax(output, dim=1)
top_idx = scoring[0].topk(1)[1]
sentence.append(top_idx.item())
weights.append(atten_weight)
input_word = top_idx
if (len(sentence) >= max_sentence or top_idx == 1):
break
return sentence, weights
|
StarcoderdataPython
|
9614601
|
# -*- coding: utf-8 -*-
import openerp
from openerp.http import request
from openerp.osv import osv
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
from datetime import datetime
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
import werkzeug.urls
import urllib2
import simplejson
import logging
_logger = logging.getLogger(__name__)
TIMEOUT = 20
class google_service(osv.osv_memory):
_name = 'google.service'
def generate_refresh_token(self, cr, uid, service, authorization_code, context=None):
ir_config = self.pool['ir.config_parameter']
client_id = ir_config.get_param(cr, SUPERUSER_ID, 'google_%s_client_id' % service)
client_secret = ir_config.get_param(cr, SUPERUSER_ID, 'google_%s_client_secret' % service)
redirect_uri = ir_config.get_param(cr, SUPERUSER_ID, 'google_redirect_uri')
#Get the Refresh Token From Google And store it in ir.config_parameter
headers = {"Content-type": "application/x-www-form-urlencoded"}
data = dict(code=authorization_code, client_id=client_id, client_secret=client_secret, redirect_uri=redirect_uri, grant_type="authorization_code")
data = werkzeug.url_encode(data)
try:
req = urllib2.Request("https://accounts.google.com/o/oauth2/token", data, headers)
content = urllib2.urlopen(req, timeout=TIMEOUT).read()
except urllib2.HTTPError:
error_msg = "Something went wrong during your token generation. Maybe your Authorization Code is invalid or already expired"
raise self.pool.get('res.config.settings').get_config_warning(cr, _(error_msg), context=context)
content = simplejson.loads(content)
return content.get('refresh_token')
def _get_google_token_uri(self, cr, uid, service, scope, context=None):
ir_config = self.pool['ir.config_parameter']
params = {
'scope': scope,
'redirect_uri': ir_config.get_param(cr, SUPERUSER_ID, 'google_redirect_uri'),
'client_id': ir_config.get_param(cr, SUPERUSER_ID, 'google_%s_client_id' % service),
'response_type': 'code',
'client_id': ir_config.get_param(cr, SUPERUSER_ID, 'google_%s_client_id' % service),
}
uri = 'https://accounts.google.com/o/oauth2/auth?%s' % werkzeug.url_encode(params)
return uri
# If no scope is passed, we use service by default to get a default scope
def _get_authorize_uri(self, cr, uid, from_url, service, scope=False, context=None):
""" This method return the url needed to allow this instance of OpenErp to access to the scope of gmail specified as parameters """
state_obj = dict(d=cr.dbname, s=service, f=from_url)
base_url = self.get_base_url(cr, uid, context)
client_id = self.get_client_id(cr, uid, service, context)
params = {
'response_type': 'code',
'client_id': client_id,
'state': simplejson.dumps(state_obj),
'scope': scope or 'https://www.googleapis.com/auth/%s' % (service,),
'redirect_uri': base_url + '/google_account/authentication',
'approval_prompt': 'force',
'access_type': 'offline'
}
uri = self.get_uri_oauth(a='auth') + "?%s" % werkzeug.url_encode(params)
return uri
def _get_google_token_json(self, cr, uid, authorize_code, service, context=None):
res = False
base_url = self.get_base_url(cr, uid, context)
client_id = self.get_client_id(cr, uid, service, context)
client_secret = self.get_client_secret(cr, uid, service, context)
params = {
'code': authorize_code,
'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'authorization_code',
'redirect_uri': base_url + '/google_account/authentication'
}
headers = {"content-type": "application/x-www-form-urlencoded"}
try:
uri = self.get_uri_oauth(a='token')
data = werkzeug.url_encode(params)
st, res, ask_time = self._do_request(cr, uid, uri, params=data, headers=headers, type='POST', preuri='', context=context)
except urllib2.HTTPError:
error_msg = "Something went wrong during your token generation. Maybe your Authorization Code is invalid"
raise self.pool.get('res.config.settings').get_config_warning(cr, _(error_msg), context=context)
return res
def _refresh_google_token_json(self, cr, uid, refresh_token, service, context=None): # exchange_AUTHORIZATION vs Token (service = calendar)
res = False
client_id = self.get_client_id(cr, uid, service, context)
client_secret = self.get_client_secret(cr, uid, service, context)
params = {
'refresh_token': refresh_token,
'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'refresh_token',
}
headers = {"content-type": "application/x-www-form-urlencoded"}
try:
uri = self.get_uri_oauth(a='token')
data = werkzeug.url_encode(params)
st, res, ask_time = self._do_request(cr, uid, uri, params=data, headers=headers, type='POST', preuri='', context=context)
except urllib2.HTTPError, e:
if e.code == 400: # invalid grant
registry = openerp.modules.registry.RegistryManager.get(request.session.db)
with registry.cursor() as cur:
self.pool['res.users'].write(cur, uid, [uid], {'google_%s_rtoken' % service: False}, context=context)
error_key = simplejson.loads(e.read()).get("error", "nc")
_logger.exception("Bad google request : %s !" % error_key)
error_msg = "Something went wrong during your token generation. Maybe your Authorization Code is invalid or already expired [%s]" % error_key
raise self.pool.get('res.config.settings').get_config_warning(cr, _(error_msg), context=context)
return res
def _do_request(self, cr, uid, uri, params={}, headers={}, type='POST', preuri="https://www.googleapis.com", context=None):
if context is None:
context = {}
""" Return a tuple ('HTTP_CODE', 'HTTP_RESPONSE') """
_logger.debug("Uri: %s - Type : %s - Headers: %s - Params : %s !" % (uri, type, headers, werkzeug.url_encode(params) if type == 'GET' else params))
status = 418
response = ""
ask_time = datetime.now().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
try:
if type.upper() == 'GET' or type.upper() == 'DELETE':
data = werkzeug.url_encode(params)
req = urllib2.Request(preuri + uri + "?" + data)
elif type.upper() == 'POST' or type.upper() == 'PATCH' or type.upper() == 'PUT':
req = urllib2.Request(preuri + uri, params, headers)
else:
raise ('Method not supported [%s] not in [GET, POST, PUT, PATCH or DELETE]!' % (type))
req.get_method = lambda: type.upper()
request = urllib2.urlopen(req, timeout=TIMEOUT)
status = request.getcode()
if int(status) in (204, 404): # Page not found, no response
response = False
else:
content = request.read()
response = simplejson.loads(content)
try:
ask_time = datetime.strptime(request.headers.get('date'), "%a, %d %b %Y %H:%M:%S %Z")
except:
pass
except urllib2.HTTPError, e:
if e.code in (204, 404):
status = e.code
response = ""
else:
_logger.exception("Bad google request : %s !" % e.read())
if e.code in (400, 401, 410):
raise e
raise self.pool.get('res.config.settings').get_config_warning(cr, _("Something went wrong with your request to google"), context=context)
return (status, response, ask_time)
def get_base_url(self, cr, uid, context=None):
return self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url', default='http://www.openerp.com?NoBaseUrl', context=context)
def get_client_id(self, cr, uid, service, context=None):
return self.pool.get('ir.config_parameter').get_param(cr, SUPERUSER_ID, 'google_%s_client_id' % (service,), default=False, context=context)
def get_client_secret(self, cr, uid, service, context=None):
return self.pool.get('ir.config_parameter').get_param(cr, SUPERUSER_ID, 'google_%s_client_secret' % (service,), default=False, context=context)
def get_uri_oauth(self, a=''): # a = optional action
return "https://accounts.google.com/o/oauth2/%s" % (a,)
def get_uri_api(self):
return 'https://www.googleapis.com'
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.