id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
4980848
|
<filename>SRC/Chapter_13-Advanced-Iteration/01_read_csv.py
with open(
"/Users/arunab/myWork/myTutorials/myPython/HeadFirstPython/SRC/Chapter_13-Advanced-Iteration/buzzdata.csv"
) as raw_data:
print(raw_data.read())
|
StarcoderdataPython
|
6422079
|
# -*- coding: utf-8 -*-
from typing import Union
from ink.sys.database.connector import BaseConnector
class NullConnector(BaseConnector):
def connect(self, connect_config: dict):
print('== NullConnector : connect ==')
def close(self):
print('== NullConnector : close ==')
def execute(self, statements: Union[str, list] = str):
if isinstance(statements, str):
statements = [statements]
for stmt in statements:
print('execute> ' + stmt)
|
StarcoderdataPython
|
277557
|
from django.core.management.base import BaseCommand, CommandError
import time
from atlas.prodtask.hashtag import hashtag_request_to_tasks
class Command(BaseCommand):
args = '<request_id, request_id>'
help = 'Save hashtags from request to tasks'
def handle(self, *args, **options):
self.stdout.write('Start hashtag from request to tasks at %s'%time.ctime())
if not args:
try:
hashtag_request_to_tasks()
except Exception as e:
raise CommandError('Some problem during hashtag assign: %s'%str(e))
self.stdout.write('Successfully finished request hashtag to tasks: %s'%time.ctime())
|
StarcoderdataPython
|
9692746
|
<reponame>sphuber/aiida-fleur
# -*- coding: utf-8 -*-
"""Tests for the `FleurinputgenCalculation` class."""
from __future__ import absolute_import
from __future__ import print_function
import os
import pytest
from aiida import orm
from aiida.common import datastructures
from aiida.engine import run_get_node
from aiida.plugins import CalculationFactory, DataFactory
from aiida_fleur.calculation.fleur import FleurCalculation
from ..conftest import run_regression_tests
def test_fleurinpgen_default_calcinfo(aiida_profile, fixture_sandbox, generate_calc_job, fixture_code,
generate_structure): # file_regression
"""Test a default `FleurinputgenCalculation`."""
entry_point_name = 'fleur.inpgen'
parameters = {}
inputs = {
'code': fixture_code(entry_point_name),
'structure': generate_structure(),
# 'parameters': orm.Dict(dict=parameters),
'metadata': {
'options': {
'resources': {
'num_machines': 1
},
'max_wallclock_seconds': int(100),
'withmpi': False
}
}
}
calc_info = generate_calc_job(fixture_sandbox, entry_point_name, inputs)
codes_info = calc_info.codes_info
cmdline_params = ['-explicit'] # for inpgen2 ['+all', '-explicit', 'aiida.in']
local_copy_list = []
retrieve_list = ['inp.xml', 'out', 'shell.out', 'out.error', 'struct.xsf', 'aiida.in']
retrieve_temporary_list = []
# Check the attributes of the returned `CalcInfo`
assert isinstance(calc_info, datastructures.CalcInfo)
#assert sorted(codes_info[0].cmdline_params) == sorted(cmdline_params)
assert sorted(calc_info.local_copy_list) == sorted(local_copy_list)
assert sorted(calc_info.retrieve_list) == sorted(retrieve_list)
# assert sorted(calc_info.retrieve_temporary_list) == sorted(retrieve_temporary_list)
assert sorted(calc_info.remote_symlink_list) == sorted([])
with fixture_sandbox.open('aiida.in') as handle:
input_written = handle.read()
aiida_in_text = """A Fleur input generator calculation with aiida\n&input cartesian=F /
0.000000000 5.130606429 5.130606429
5.130606429 0.000000000 5.130606429
5.130606429 5.130606429 0.000000000
1.0000000000
1.000000000 1.000000000 1.000000000
2\n 14 0.0000000000 0.0000000000 0.0000000000
14 0.2500000000 0.2500000000 0.2500000000\n"""
# Checks on the files written to the sandbox folder as raw input
assert sorted(fixture_sandbox.get_content_list()) == sorted(['aiida.in'])
assert input_written == aiida_in_text
# file_regression.check(input_written, encoding='utf-8', extension='.in')
def test_fleurinpgen_with_parameters(aiida_profile, fixture_sandbox, generate_calc_job, fixture_code,
generate_structure): # file_regression
"""Test a default `FleurinputgenCalculation`."""
# Todo add (more) tests with full parameter possibilities, i.e econfig, los, ....
entry_point_name = 'fleur.inpgen'
parameters = {
'atom': {
'element': 'Si',
'rmt': 2.1,
'jri': 981,
'lmax': 12,
'lnonsph': 6
}, #'econfig': '[He] 2s2 2p6 | 3s2 3p2', 'lo': ''},
'comp': {
'kmax': 5.0,
'gmaxxc': 12.5,
'gmax': 15.0
},
'kpt': {
'div1': 17,
'div2': 17,
'div3': 17,
'tkb': 0.0005
}
}
inputs = {
'code': fixture_code(entry_point_name),
'structure': generate_structure(),
'parameters': orm.Dict(dict=parameters),
'metadata': {
'options': {
'resources': {
'num_machines': 1
},
'max_wallclock_seconds': int(100),
'withmpi': False
}
}
}
calc_info = generate_calc_job(fixture_sandbox, entry_point_name, inputs)
with fixture_sandbox.open('aiida.in') as handle:
input_written = handle.read()
aiida_in_text = """A Fleur input generator calculation with aiida\n&input cartesian=F /
0.000000000 5.130606429 5.130606429
5.130606429 0.000000000 5.130606429
5.130606429 5.130606429 0.000000000
1.0000000000
1.000000000 1.000000000 1.000000000
2\n 14 0.0000000000 0.0000000000 0.0000000000
14 0.2500000000 0.2500000000 0.2500000000
&atom
element="Si" jri=981 lmax=12 lnonsph=6 rmt=2.1 /
&comp
gmax=15.0 gmaxxc=12.5 kmax=5.0 /
&kpt
div1=17 div2=17 div3=17 tkb=0.0005 /
"""
# Checks on the files written to the sandbox folder as raw input
assert sorted(fixture_sandbox.get_content_list()) == sorted(['aiida.in'])
assert input_written == aiida_in_text
# file_regression.check(input_written, encoding='utf-8', extension='.in')
@pytest.mark.skipif(not run_regression_tests, reason='Aiida-testing not their or not wanted.')
def test_FleurinpgenJobCalc_full_mock(aiida_profile, mock_code_factory, generate_structure_W): # pylint: disable=redefined-outer-name
"""
Tests the fleur inputgenerate with a mock executable if the datafiles are their,
otherwise runs inpgen itself if a executable was specified
"""
CALC_ENTRY_POINT = 'fleur.inpgen'
parameters = {
'atom': {
'element': 'W',
'rmt': 2.1,
'jri': 981,
'lmax': 12,
'lnonsph': 6,
'econfig': '[Kr] 4d10 4f14 | 5s2 5p6 6s2 5d4',
'lo': '5s 5p'
},
'comp': {
'kmax': 5.0,
'gmaxxc': 12.5,
'gmax': 15.0
},
'kpt': {
'div1': 3,
'div2': 3,
'div3': 3,
'tkb': 0.0005
}
}
mock_code = mock_code_factory(label='inpgen',
data_dir_abspath=os.path.join(os.path.dirname(os.path.abspath(__file__)),
'data_dir/'),
entry_point=CALC_ENTRY_POINT,
ignore_files=['_aiidasubmit.sh'])
print(mock_code)
inputs = {
'structure': generate_structure_W(),
'parameters': orm.Dict(dict=parameters),
'metadata': {
'options': {
'resources': {
'num_machines': 1,
'tot_num_mpiprocs': 1
},
'max_wallclock_seconds': int(100),
'withmpi': False
}
}
}
calc = CalculationFactory(CALC_ENTRY_POINT) # (code=mock_code, **inputs)
print(calc)
res, node = run_get_node(CalculationFactory(CALC_ENTRY_POINT), code=mock_code, **inputs)
print(node)
print((res['remote_folder'].list_objects()))
print((res['retrieved'].list_objects()))
assert bool(node.is_finished_ok)
|
StarcoderdataPython
|
6445056
|
<filename>bareml/machinelearning/utils/misc.py
"""
Utility functions
Author: <NAME> <<EMAIL>>
References:
"""
import operator as op
from functools import reduce
import math
import numpy as np
def ncr(n, r):
"""
Calculates nCr in efficient manner.
This function is not my original code, but copied from below url.
https://stackoverflow.com/questions/4941753/is-there-a-math-ncr-function-in-python
Parameters
----------
n, r: int
"""
r = min(r, n-r)
numer = reduce(op.mul, range(n, n-r, -1), 1)
denom = reduce(op.mul, range(1, r+1), 1)
return numer / denom
def flatten(l):
"""
flatten a nested list.
https://stackoverflow.com/questions/952914/how-to-make-a-flat-list-out-of-list-of-lists
Parameters
----------
l: array-like
"""
return [item for sublist in l for item in sublist]
def split_array(a, n):
"""
Split an array into n chunks.
https://stackoverflow.com/questions/2130016/splitting-a-list-into-n-parts-of-approximately-equal-length
Parameters
----------
a: array-like
n: int
number of chunks
"""
k, m = divmod(len(a), n)
return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n))
def supremum_eigen(X):
"""
Estimates approximate supremum of eigen values of a square matrix
by Gershgorin circle theorem.
Ref(in JP): https://qiita.com/fujiisoup/items/e7f703fc57e2dfc441ad
Parameters
----------
X: np.ndarray (d,d)
"""
return np.max(np.sum(np.abs(X), axis=0))
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def softmax(x):
if x.ndim == 1:
x = np.array([x])
# Avoid overflow
x = x - np.array([x.max(axis=1)]).T
exp_x = np.exp(x)
sum_exp_x = np.sum(exp_x,axis=1)[np.newaxis].T
return exp_x / sum_exp_x
|
StarcoderdataPython
|
6404639
|
<gh_stars>0
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.tokenize import PunktSentenceTokenizer
def tokenize_sentence(sentences):
sent_tokens = sent_tokenize(sentences)
return sent_tokens
def tokenize_words(text):
word_tokens = word_tokenize(text)
return word_tokens
def tokenize_words_without_stopwords(text):
word_tokens = word_tokenize(text)
stop_words_list = set(stopwords.words("English"))
filtered_sentence = [w for w in word_tokens if not w in stop_words_list]
stop_words = [w for w in word_tokens if w in stop_words_list]
return filtered_sentence, stop_words
def stem_tokens(text):
word_tokens = tokenize_words(text)
ps = PorterStemmer()
words_tokens_after_stemming = [ps.stem(w) for w in word_tokens]
return words_tokens_after_stemming
def tag_parts_of_speech(train_text, test_text):
punkt_sent_tokenizer = PunktSentenceTokenizer(train_text=train_text)
tokenized_words = punkt_sent_tokenizer.tokenize(test_text)
return tokenized_words
|
StarcoderdataPython
|
1873917
|
<filename>FEBDAQMULTx2/data_analysis/7_preamp_gain_analysis_and_charge_injection/injection_and_pedestal_peak_adc.py
#!/usr/bin/env python
'''
This script is the OOP version that finds the peak ADC position for a single channel.
The file names follow the convention such as:
"ch0.root" for charge injected to channel 0.
"ch0_ped.root" for pedestal ADC measurement for channel 0.
'''
import argparse
import os
from typing import OrderedDict
import pandas as pd
import statistics
import uproot
class peak_adc:
def __init__(self, infpn):
'''
Load an input data and fill some properties.
'''
# store the path name
self.infp = os.path.dirname(infpn)
# store the file name
self.infn = os.path.basename(infpn)
# store the measurement type
self.meas_type = None
# store the injection channel
self.inj_ch = None
# open the file and store the tree to a dataframe
tr_mppc = uproot.open(infpn)['mppc']
self.df_mppc = tr_mppc.arrays(library='pd')
# create an output dataframe
self.peak_positions = OrderedDict()
# initialize member variables
# initialize meas_type
self.fill_measurement_type()
self.fill_injection_channel()
self.fill_peak_positions()
def fill_measurement_type(self):
'''
Two types: 'pedestal' or 'injection'
'''
file_atts = self.infn.rstrip('.root').split('_')
if 'ped' in file_atts:
self.meas_type = 'pedestal'
else:
self.meas_type = 'injection'
def fill_injection_channel(self):
'''
Find the injection channel from the file name.
'''
file_atts = self.infn.rstrip('.root').split('_')
for att in file_atts:
if 'ch' in att:
try:
self.inj_ch = int(att.lstrip('ch'))
except:
continue
return
self.inj_ch = -1
def fill_peak_positions(self):
'''
Fill peak positions for all channels.
Note that charge injection measurement is conducted with only one FEB.
'''
for i in range(32):
self.peak_positions[i] = int(self.df_mppc['chg[{}]'.format(i)].value_counts().idxmax())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_filenames', type=str, nargs='*')
parser.add_argument('-sn', '--feb_serial_number', type=int, default='428')
args = parser.parse_args()
infpns = args.input_filenames
# make two containers, for the injection and the pedestal measurement
peak_adcs_ped = OrderedDict()
peak_adcs_inj = OrderedDict()
for infpn in infpns:
my_peak_adc = peak_adc(infpn)
if my_peak_adc.meas_type == 'injection':
peak_adcs_inj[my_peak_adc.inj_ch] = my_peak_adc.peak_positions[my_peak_adc.inj_ch]
# store the peak ADC of non-injection channels as the pedestal
for ch in range(32):
if ch != my_peak_adc.inj_ch:
if not ch in peak_adcs_ped.keys():
peak_adcs_ped[ch] = []
peak_adcs_ped[ch].append(my_peak_adc.peak_positions[ch])
# replace the lists by the averages
for ch in peak_adcs_ped.keys():
peak_adcs_ped[ch] = statistics.mean(peak_adcs_ped[ch])
# output a csv file with peak adc, ped adc, and calibration factor as columns
df_calib = pd.DataFrame()
df_calib['peak_adc'] = peak_adcs_inj.values()
df_calib['ped_adc'] = peak_adcs_ped.values()
# df_calib['calib_factor'] = [21.7096875e-3*100e-12/1.6e-19/(peak_adcs_inj[ch]-peak_adcs_ped[ch]) for ch in peak_adcs_inj.keys()]
df_calib['calib_factor'] = [28e-3*100e-12/1.6e-19/(peak_adcs_inj[ch]-peak_adcs_ped[ch]) for ch in peak_adcs_inj.keys()]
# save results to file
out_dir = 'processed_data'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
df_calib.to_csv(os.path.join(out_dir, 'calib_factors_feb{}.csv'.format(args.feb_serial_number)), index=False)
|
StarcoderdataPython
|
6425080
|
"""
Initialise the two_qubit_simulator module.
Add import statements from auxiliary modules here.
"""
# testing soz
|
StarcoderdataPython
|
6420074
|
<reponame>project-origin/account-service<filename>src/origin/services/datahub/service.py
import json
import requests
import marshmallow
import marshmallow_dataclass as md
from origin.settings import (
PROJECT_URL,
DATAHUB_SERVICE_URL,
TOKEN_HEADER,
DEBUG,
WEBHOOK_SECRET,
)
from .models import (
GetGgoListRequest,
GetGgoListResponse,
GetMeasurementRequest,
GetMeasurementResponse,
GetMeasurementListRequest,
GetMeasurementListResponse,
GetMeteringPointsResponse,
SetKeyRequest,
SetKeyResponse,
WebhookSubscribeRequest,
WebhookSubscribeResponse,
GetTechnologiesResponse,
)
class DataHubServiceConnectionError(Exception):
"""
Raised when invoking DataHubService results in a connection error
"""
pass
class DataHubServiceError(Exception):
"""
Raised when invoking DataHubService results in a status code != 200
"""
def __init__(self, message, status_code, response_body):
super(DataHubServiceError, self).__init__(message)
self.status_code = status_code
self.response_body = response_body
class DataHubService(object):
"""
An interface to the Project Origin DataHub Service API.
"""
def invoke(self, path, response_schema, token=None, request=None, request_schema=None):
"""
:param str path:
:param obj request:
:param str token:
:param Schema request_schema:
:param Schema response_schema:
:rtype obj:
"""
url = '%s%s' % (DATAHUB_SERVICE_URL, path)
headers = {}
body = None
if token:
headers = {TOKEN_HEADER: f'Bearer {token}'}
if request and request_schema:
body = request_schema().dump(request)
try:
response = requests.post(
url=url,
json=body,
headers=headers,
verify=not DEBUG,
)
except:
raise DataHubServiceConnectionError(
'Failed to POST request to DataHubService')
if response.status_code != 200:
raise DataHubServiceError(
(
f'Invoking webhook resulted in status code {response.status_code}: '
f'{url}\n\n{response.content}'
),
status_code=response.status_code,
response_body=str(response.content),
)
try:
response_json = response.json()
response_model = response_schema().load(response_json)
except json.decoder.JSONDecodeError:
raise DataHubServiceError(
f'Failed to parse response JSON: {url}\n\n{response.content}',
status_code=response.status_code,
response_body=str(response.content),
)
except marshmallow.ValidationError:
raise DataHubServiceError(
f'Failed to validate response JSON: {url}\n\n{response.content}',
status_code=response.status_code,
response_body=str(response.content),
)
return response_model
def set_key(self, token, gsrn, key):
"""
:param str token:
:param str gsrn:
:param str key:
:rtype: SetKeyResponse
"""
return self.invoke(
token=token,
path='/meteringpoints/set-key',
request=SetKeyRequest(gsrn=gsrn, key=key),
request_schema=md.class_schema(SetKeyRequest),
response_schema=md.class_schema(SetKeyResponse),
)
def get_meteringpoints(self, token):
"""
:param str token:
:rtype: GetMeteringPointsResponse
"""
return self.invoke(
token=token,
path='/meteringpoints',
response_schema=md.class_schema(GetMeteringPointsResponse),
)
def get_measurements(self, token, request):
"""
:param GetMeasurementListRequest request:
:param str token:
:rtype: GetMeasurementListResponse
"""
return self.invoke(
token=token,
path='/measurements',
request=request,
request_schema=md.class_schema(GetMeasurementListRequest),
response_schema=md.class_schema(GetMeasurementListResponse),
)
def get_ggo_list(self, token, request):
"""
:param str token:
:param GetGgoListRequest request:
:rtype: GetGgoListResponse
"""
return self.invoke(
token=token,
path='/ggo',
request=request,
request_schema=md.class_schema(GetGgoListRequest),
response_schema=md.class_schema(GetGgoListResponse),
)
def get_consumption(self, token, request):
"""
:param str token:
:param GetMeasurementRequest request:
:rtype: GetMeasurementResponse
"""
return self.invoke(
token=token,
path='/measurements/consumed',
request=request,
request_schema=md.class_schema(GetMeasurementRequest),
response_schema=md.class_schema(GetMeasurementResponse),
)
def webhook_on_meteringpoint_available_subscribe(self, token):
"""
:param str token:
:rtype: WebhookSubscribeResponse
"""
callback_url = f'{PROJECT_URL}/webhook/on-meteringpoint-available'
return self.invoke(
token=token,
path='/webhook/on-meteringpoint-available/subscribe',
request=WebhookSubscribeRequest(url=callback_url, secret=WEBHOOK_SECRET),
request_schema=md.class_schema(WebhookSubscribeRequest),
response_schema=md.class_schema(WebhookSubscribeResponse),
)
def webhook_on_ggo_issued_subscribe(self, token):
"""
:param str token:
:rtype: WebhookSubscribeResponse
"""
callback_url = f'{PROJECT_URL}/webhook/on-ggo-issued'
return self.invoke(
token=token,
path='/webhook/on-ggo-issued/subscribe',
request=WebhookSubscribeRequest(url=callback_url, secret=WEBHOOK_SECRET),
request_schema=md.class_schema(WebhookSubscribeRequest),
response_schema=md.class_schema(WebhookSubscribeResponse),
)
def get_technologies(self):
"""
:rtype: GetTechnologiesResponse
"""
return self.invoke(
path='/technologies',
response_schema=md.class_schema(GetTechnologiesResponse),
)
|
StarcoderdataPython
|
5129277
|
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
# Copyright: Stateoftheart AI PBC 2021.
'''Alchemy's library wrapper.
Dataset information taken from: https://alchemy.cs.washington.edu/data/
'''
SOURCE_METADATA = {
'name': 'alchemy',
'original_name': 'Alchemy: Open Source AI',
'url': 'https://alchemy.cs.washington.edu/'
}
DATASETS = {
'Unknown': [
'Animals', 'Citeseer', 'Cora', 'Epinions', 'IMDB', 'Kinships',
'Nations', 'ProteinInteraction', 'RadishRobotMapping', 'Tutorial',
'UMLS', 'UW-CSE', 'WebKB'
]
}
def load_dataset(name: str) -> dict:
return {'name': name, 'source': 'alchemy'}
|
StarcoderdataPython
|
1859347
|
<gh_stars>1-10
#link https://practice.geeksforgeeks.org/problems/common-elements1132/1#
class Solution:
def commonElements (self,A, B, C, n1, n2, n3):
# your code here
a=set(A)
b=set(B)
c=set(C)
lena=len(A)
lenb = len(B)
lenc = len(C)
lis=[]
for i in a:
if (i in b) and (i in c):
lis.append(i)
lis.sort()
return lis
#{
# Driver Code Starts
#Initial Template for Python 3
t = int (input ())
for tc in range (t):
n1, n2, n3 = list(map(int,input().split()))
A = list(map(int,input().split()))
B = list(map(int,input().split()))
C = list(map(int,input().split()))
ob = Solution()
res = ob.commonElements (A, B, C, n1, n2, n3)
if len (res) == 0:
print (-1)
else:
for i in range (len (res)):
print (res[i], end=" ")
print ()
# } Driver Code Ends
|
StarcoderdataPython
|
9783058
|
<reponame>OxfordHED/sunbear<filename>sunbear/math/__init__.py
"""
This module contains functions to calculate first and second derivatives of a
numpy.ndarray. The first derivatives are calculated with a central difference
scheme while the second derivatives are calculated using the central difference
as well.
The input size is (n0+2, n1+2, ..., nd1+2) and the output size is
(n0, n1, ..., nd1), unless specified.
"""
|
StarcoderdataPython
|
1620945
|
<gh_stars>0
from pyglet import image
import os, sys
base = os.getcwd() + "/Assets/"
icon = image.load(base + 'icon.png')
mario_img = image.load(base + 'mario.png')
luigi_img = image.load(base + 'luigi.png')
|
StarcoderdataPython
|
3232943
|
###################################################################################
#
# Copyright (c) 2017-2019 MuK IT GmbH.
#
# This file is part of MuK Security
# (see https://mukit.at).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###################################################################################
import os
import base64
import logging
from odoo import exceptions
from odoo.tests import common
_path = os.path.dirname(os.path.dirname(__file__))
_logger = logging.getLogger(__name__)
class AccessGroupsTestCase(common.TransactionCase):
def setUp(self):
super(AccessGroupsTestCase, self).setUp()
self.user_id = self.ref('base.user_demo')
self.group_id = self.ref('base.group_system')
self.groups = self.env['muk_security.access_groups']
self.group01 = self.groups.create({
'name': 'Group 01',
'explicit_users': [(6, 0, [self.user_id])]})
self.group02 = self.groups.create({
'name': 'Group 02',
'groups': [(6, 0, [self.group_id])]})
self.user = self.env['res.users'].browse(self.user_id)
self.group = self.env['res.groups'].browse(self.group_id)
def tearDown(self):
super(AccessGroupsTestCase, self).tearDown()
def test_access_groups_users(self):
count = len(self.group02.users)
self.group02.write({'explicit_users': [(6, 0, [self.user_id])]})
self.assertTrue(len(self.group02.users) > count)
def test_access_groups_groups(self):
count = len(self.group01.users)
self.group01.write({'groups': [(6, 0, [self.group_id])]})
self.assertTrue(len(self.group01.users) > count)
def test_access_groups_groups_group(self):
count = len(self.group02.users)
self.group.write({'users': [(4, self.user_id)]})
self.assertTrue(len(self.group02.users) > count)
def test_access_groups_groups_user(self):
count = len(self.group02.users)
self.user.write({'groups_id':[(4, self.group_id)]})
self.assertTrue(len(self.group02.users) > count)
def test_access_groups_parent(self):
count = len(self.group02.users)
self.group02.write({'parent_group': self.group01.id})
self.assertTrue(len(self.group02.users) > count)
def test_access_groups_parent_multi(self):
group01 = self.groups.create({'name': 'MGroup 01'})
group02 = self.groups.create({'name': 'MGroup 02', 'parent_group': group01.id})
group03 = self.groups.create({'name': 'MGroup 03', 'parent_group': group02.id})
init_count = len(group03.users)
group02.write({'explicit_users': [(6, 0, [self.user_id])]})
self.assertTrue(len(group03.users) > init_count)
updated_count = len(group03.users)
group01.write({'groups': [(6, 0, [self.group_id])]})
self.assertTrue(len(group03.users) > updated_count)
|
StarcoderdataPython
|
4919110
|
<reponame>majeformation/JD-micro-services<filename>Chapter10/microservices/thoughts_backend/ThoughtsBackend/load_test_data.py
from thoughts_backend.app import create_app
from thoughts_backend.models import ThoughtModel
if __name__ == '__main__':
application = create_app(script=True)
application.app_context().push()
# Create some test data
test_data = [
# username, timestamp, text
('bruce', "1962-05-11 09:53:41Z",
"A few seconds more and we'll know whether we have "
"succeeded or not!"),
('bruce', "1962-05-11 09:58:23Z",
"And now, if you'll excuse me, it's time for the final countdown"),
('bruce', "1962-05-11 10:07:13Z",
"In a few seconds we will finally learn what happens when the "
"powerful gamma rays are released"),
('stephen', "1963-06-11 19:53:41Z",
"Naturally! All who come to me are! Speak..."),
('stephen', "1963-06-11 19:58:23Z",
"Tonight I shall visit you! I shall find the answer to your dream! "
"Now go"),
]
for username, timestamp, text in test_data:
thought = ThoughtModel(username=username, text=text,
timestamp=timestamp)
application.db.session.add(thought)
application.db.session.commit()
|
StarcoderdataPython
|
9741983
|
import numpy as np
import pandas as pd
import indicators
import config
import util
import sys
from pathlib import Path
def get_algo_dataset(choose_set_num: int):
"""run_set = ['goldman', 'index', '^BVSP', '^TWII', '^IXIC', 'index_sampled']
Returns df_list, date_range, trend_list, stocks
"""
# Do not change run_set order. The order is hardcoded into below code
run_set = ['goldman', 'index', '^BVSP', '^TWII', '^IXIC', 'index_sampled']
choose_set = run_set[choose_set_num]
df_list = []
date_range = []
trend_list = []
stocks = []
### For GS stocks: 'GGSIX', 'GOIIX', 'GIPIX'
if choose_set == run_set[0]:
# Must be same order
stocks = ['GGSIX', 'GOIIX', 'GIPIX']
folder = ['growth', 'growth_income', 'balanced']
for i, stock in enumerate(stocks):
df=pd.read_csv('data/goldman/portfolio/{}/{}.csv'.format(folder[i],stock), usecols=config.column_names, parse_dates=['Date'])
df = df[df['Close'] > 0].reset_index(drop=True)
df['returns'] = indicators.day_gain(df, 'Close').dropna()
df_list.append(df)
start = '1/1/2016'
end = '31/12/2018'
# date_range = df_list[0][(df_list[0]['Date'] >= df_list[1].iloc[0]['Date']) & (df_list[0]['Date'] >= df_list[2].iloc[0]['Date'])]['Date'].tolist()
date_range = remove_uncommon_dates(df_list)
trend_list = util.get_trend_list(stocks, df_list, start=start, end=end)
### For Index stocks: '^BVSP', '^TWII', '^IXIC'
elif choose_set == run_set[1]:
stocks = ['^BVSP', '^TWII', '^IXIC']
high_risk_df = pd.read_csv('data/indexes/{}.csv'.format('^BVSP'), usecols=config.column_names, parse_dates=['Date'])
high_risk_df = high_risk_df[high_risk_df['Close'] > 0].reset_index(drop=True)
high_risk_df['returns'] = indicators.day_gain(high_risk_df, 'Close').dropna()
med_risk_df = pd.read_csv('data/indexes/{}.csv'.format('^TWII'), usecols=config.column_names, parse_dates=['Date'])
med_risk_df = med_risk_df[med_risk_df['Close'] > 0].reset_index(drop=True)
med_risk_df['returns'] = indicators.day_gain(med_risk_df, 'Close').dropna()
low_risk_df = pd.read_csv('data/indexes/{}.csv'.format('^IXIC'), parse_dates=['Date'])
# IXIC dates are reversed
low_risk_df = low_risk_df.reindex(index=low_risk_df.index[::-1])
low_risk_df = low_risk_df[low_risk_df['Close'] > 0].reset_index(drop=True)
low_risk_df['returns'] = indicators.day_gain(low_risk_df, 'Close').dropna()
df_list = [high_risk_df, med_risk_df, low_risk_df]
start = '1/1/2014'
end = '31/12/2018'
# date_range = high_risk_df[(high_risk_df['Date'] >= med_risk_df.iloc[0]['Date']) & (high_risk_df['Date'] >= low_risk_df.iloc[0]['Date'])]['Date'].tolist()
date_range = remove_uncommon_dates(df_list)
trend_list = util.get_trend_list(stocks, df_list, start=start, end=end)
elif choose_set == run_set[2] or choose_set == run_set[3] or choose_set == run_set[4]:
stocks =[]
folder =''
if choose_set == run_set[2]:
stocks = ['EQTL3.SA', 'ITSA4.SA', 'PETR3.SA']
folder = '^BVSP'
elif choose_set==run_set[3]:
stocks = ['1326.TW', '2882.TW', '3008.TW']
folder = '^TWII'
elif choose_set==run_set[4]:
stocks = ['TSLA', 'IBKC', 'FEYE']
folder = '^IXIC'
else:
print('An error occured in fetching the data for algo stocks.')
for stock in stocks:
df=pd.read_csv('data/algo/{}/{}.csv'.format(folder,stock), usecols=config.column_names, parse_dates=['Date'])
df = df[df['Close'] > 0].reset_index(drop=True)
df['returns'] = indicators.day_gain(df, 'Close').dropna()
df_list.append(df)
start = '1/1/2014'
end = '31/12/2018'
date_range = remove_uncommon_dates(df_list)
trend_list = util.get_trend_list(stocks, df_list, start=start, end=end)
elif choose_set == run_set[5]:
stocks =['^BVSP', '^TWII', '^IXIC']
for stock in stocks:
df=pd.read_csv('data/algo/{}/daily_price.csv'.format(stock), parse_dates=['Date'])
df = df[df['Close'] > 0].reset_index(drop=True)
df['returns'] = indicators.day_gain(df, 'Close').dropna()
df_list.append(df)
start = '1/1/2014'
end = '31/12/2018'
date_range = remove_uncommon_dates(df_list)
trend_list = util.get_trend_list(stocks, df_list, start=start, end=end)
return df_list, date_range, trend_list, stocks
def get_algo_results(choose_set_num: int, asset_list, base_rates, portfolio_comp, df_list, date_range, trend_list, cal_avg_nav=False):
"""Returns the change list and final asset value
"""
change_list = []
average_asset = 0
if cal_avg_nav:
if choose_set_num == 0:
average_asset, asset_list, portfolio_comp = util.cal_portfolio_changed_nav(asset_list, base_rates, portfolio_comp, df_list, date_range, trend_list,
[8.0, 6.0, 12.0], [9.0, 5.0, 9.0], [6.0, 12.0, 6.0], [0.9712034471256101, -1.6709072749507035, -1.0777099909032646], [-3.4145406491989023, -0.18272123074956848, -0.7245604433339186], 0.0816132948369838)
# [8.0, 8.0, 4.0], [5.0, 6.0, 5.0], [5.0, 2.0, 3.0], [0.22948733470032123, 0.8909251765940478, -0.20656673058505381], [-1.7417846430478365, -0.4628863373977188, 1.5419043896500977], 0.14266550931364091)
# [21.0, 6.0, 5.0], [2.0, 2.0, 6.0], [27.0, 12.0, 3.0], [3.125115822639779, -2.561089882241202, -1.4940972093691949], [1.2063367792987396, 1.4663555035726752, -0.2846560129041551], 0.1614246940280476)
elif choose_set_num == 1:
average_asset, asset_list, portfolio_comp = util.cal_portfolio_changed_nav(asset_list, base_rates, portfolio_comp, df_list, date_range, trend_list,
# [8.0, 6.0, 12.0], [9.0, 5.0, 9.0], [6.0, 12.0, 6.0], [0.9712034471256101, -1.6709072749507035, -1.0777099909032646], [-3.4145406491989023, -0.18272123074956848, -0.7245604433339186], 0.0816132948369838)
# [5.0, 5.0, 6.0], [5.0, 6.0, 6.0], [19.0, 5.0, 8.0], [1.8954915289833882, -1.450482294216655, 1.125418440357023], [-2.3676311336976132, -1.8970317071693157, 0.23699516374694385], 0.046795990258734835)
[8.0, 14.0, 11.0], [11.0, 11.0, 2.0], [15.0, 10.0, 2.0], [1.363647435463774, 2.716953337278016, -4.324164482875698], [-1.7062595953617727, 2.5105760118208957, -4.060094673509836], 0.07240419552333409)
elif choose_set_num == 2:
average_asset, asset_list, portfolio_comp = util.cal_portfolio_changed_nav(asset_list, base_rates, portfolio_comp, df_list, date_range, trend_list,
# [4.0, 2.0, 4.0], [4.0, 8.0, 9.0], [6.0, 4.0, 7.0], [0.6078976284270344, 1.2577097768694967, 2.0213163271738006], [-2.566918900257593, 2.90468608230902, -1.7097040021899894], 0.07797085783765784)
[3.0, 3.0, 13.0], [11.0, 5.0, 9.0], [8.0, 4.0, 18.0], [0.06083023158629253, 0.5601483772918827, 1.9569019466459423], [-1.3881334364246258, 2.8163651325079524, 0.9492765355184316], 0.15511606897450375)
elif choose_set_num == 3:
average_asset, asset_list, portfolio_comp = util.cal_portfolio_changed_nav(asset_list, base_rates, portfolio_comp, df_list, date_range, trend_list,
[3.0, 9.0, 4.0], [4.0, 14.0, 3.0], [2.0, 2.0, 16.0], [0.30059198706758106, 1.0952845039110184, 1.8392867588452613], [2.771352403174757, -1.3669589385046343, -2.3406274217770866], 0.17345428438145236)
elif choose_set_num == 4:
average_asset, asset_list, portfolio_comp = util.cal_portfolio_changed_nav(asset_list, base_rates, portfolio_comp, df_list, date_range, trend_list,
# [9.0, 7.0, 9.0], [4.0, 3.0, 7.0], [6.0, 5.0, 15.0], [0.9351583394555885, 1.3754760765507819, 2.348134831028588], [-2.471478593919233, 1.379869639191209, 4.95188889034387], 0.1444277817979811)
# [8.0, 11.0, 2.0], [6.0, 8.0, 6.0], [7.0, 8.0, 12.0], [1.1255518400058317, -0.36346414388153225, -1.0247284676654485], [-0.6274220138552453, -1.1083765565671055, 0.00449200835519481], 0.13718457807344167)
[2.0, 5.0, 11.0], [4.0, 2.0, 2.0], [7.0, 5.0, 5.0], [0.2774502065258735, 0.16677941009065034, -0.45385907412444926], [-0.2098008442952385, 1.289022800463935, 2.003346238448586], 0.15779763053682244)
elif choose_set_num == 5:
average_asset, asset_list, portfolio_comp = util.cal_portfolio_changed_nav(asset_list, base_rates, portfolio_comp, df_list, date_range, trend_list,
# [7.0, 12.0, 3.0], [2.0, 7.0, 2.0], [13.0, 3.0, 8.0], [2.522702769828708, -0.5707216899389504, 0.8348229423350395], [-1.7493395408023145, 1.0817636863501934, 0.8232680695157204], 0.1963583867900387)
[4.0, 6.0, 3.0], [2.0, 4.0, 7.0], [14.0, 2.0, 5.0], [1.3929077534652725, 0.18393055682065484, 2.6440755858307075], [-1.601189152927202, 1.3377505947800103, -1.9787536808104849], 0.13726920065461523)
else:
print('ERROR! Wrong choose_set_num')
return average_asset, asset_list, portfolio_comp
else:
if choose_set_num == 0:
change_list, asset_list, portfolio_comp = util.cal_portfolio_comp_fitness(asset_list, base_rates, portfolio_comp, df_list, date_range, trend_list,
[8.0, 6.0, 12.0], [9.0, 5.0, 9.0], [6.0, 12.0, 6.0], [0.9712034471256101, -1.6709072749507035, -1.0777099909032646], [-3.4145406491989023, -0.18272123074956848, -0.7245604433339186], 0.0816132948369838)
# [8.0, 8.0, 4.0], [5.0, 6.0, 5.0], [5.0, 2.0, 3.0], [0.22948733470032123, 0.8909251765940478, -0.20656673058505381], [-1.7417846430478365, -0.4628863373977188, 1.5419043896500977], 0.14266550931364091)
# [21.0, 6.0, 5.0], [2.0, 2.0, 6.0], [27.0, 12.0, 3.0], [3.125115822639779, -2.561089882241202, -1.4940972093691949], [1.2063367792987396, 1.4663555035726752, -0.2846560129041551], 0.1614246940280476)
elif choose_set_num == 1:
change_list, asset_list, portfolio_comp = util.cal_portfolio_comp_fitness(asset_list, base_rates, portfolio_comp, df_list, date_range, trend_list,
# [8.0, 6.0, 12.0], [9.0, 5.0, 9.0], [6.0, 12.0, 6.0], [0.9712034471256101, -1.6709072749507035, -1.0777099909032646], [-3.4145406491989023, -0.18272123074956848, -0.7245604433339186], 0.0816132948369838)
# [5.0, 5.0, 6.0], [5.0, 6.0, 6.0], [19.0, 5.0, 8.0], [1.8954915289833882, -1.450482294216655, 1.125418440357023], [-2.3676311336976132, -1.8970317071693157, 0.23699516374694385], 0.046795990258734835)
[8.0, 14.0, 11.0], [11.0, 11.0, 2.0], [15.0, 10.0, 2.0], [1.363647435463774, 2.716953337278016, -4.324164482875698], [-1.7062595953617727, 2.5105760118208957, -4.060094673509836], 0.07240419552333409)
elif choose_set_num == 2:
change_list, asset_list, portfolio_comp = util.cal_portfolio_comp_fitness(asset_list, base_rates, portfolio_comp, df_list, date_range, trend_list,
# [4.0, 2.0, 4.0], [4.0, 8.0, 9.0], [6.0, 4.0, 7.0], [0.6078976284270344, 1.2577097768694967, 2.0213163271738006], [-2.566918900257593, 2.90468608230902, -1.7097040021899894], 0.07797085783765784)
[3.0, 3.0, 13.0], [11.0, 5.0, 9.0], [8.0, 4.0, 18.0], [0.06083023158629253, 0.5601483772918827, 1.9569019466459423], [-1.3881334364246258, 2.8163651325079524, 0.9492765355184316], 0.15511606897450375)
elif choose_set_num == 3:
change_list, asset_list, portfolio_comp = util.cal_portfolio_comp_fitness(asset_list, base_rates, portfolio_comp, df_list, date_range, trend_list,
[3.0, 9.0, 4.0], [4.0, 14.0, 3.0], [2.0, 2.0, 16.0], [0.30059198706758106, 1.0952845039110184, 1.8392867588452613], [2.771352403174757, -1.3669589385046343, -2.3406274217770866], 0.17345428438145236)
elif choose_set_num == 4:
change_list, asset_list, portfolio_comp = util.cal_portfolio_comp_fitness(asset_list, base_rates, portfolio_comp, df_list, date_range, trend_list,
# [9.0, 7.0, 9.0], [4.0, 3.0, 7.0], [6.0, 5.0, 15.0], [0.9351583394555885, 1.3754760765507819, 2.348134831028588], [-2.471478593919233, 1.379869639191209, 4.95188889034387], 0.1444277817979811)
# [8.0, 11.0, 2.0], [6.0, 8.0, 6.0], [7.0, 8.0, 12.0], [1.1255518400058317, -0.36346414388153225, -1.0247284676654485], [-0.6274220138552453, -1.1083765565671055, 0.00449200835519481], 0.13718457807344167)
[2.0, 5.0, 11.0], [4.0, 2.0, 2.0], [7.0, 5.0, 5.0], [0.2774502065258735, 0.16677941009065034, -0.45385907412444926], [-0.2098008442952385, 1.289022800463935, 2.003346238448586], 0.15779763053682244)
elif choose_set_num == 5:
change_list, asset_list, portfolio_comp = util.cal_portfolio_comp_fitness(asset_list, base_rates, portfolio_comp, df_list, date_range, trend_list,
# [7.0, 12.0, 3.0], [2.0, 7.0, 2.0], [13.0, 3.0, 8.0], [2.522702769828708, -0.5707216899389504, 0.8348229423350395], [-1.7493395408023145, 1.0817636863501934, 0.8232680695157204], 0.1963583867900387)
[4.0, 6.0, 3.0], [2.0, 4.0, 7.0], [14.0, 2.0, 5.0], [1.3929077534652725, 0.18393055682065484, 2.6440755858307075], [-1.601189152927202, 1.3377505947800103, -1.9787536808104849], 0.13726920065461523)
else:
print('ERROR! Wrong choose_set_num')
return change_list, asset_list, portfolio_comp
def gen_algo_data(run_set: list, choose_set_num: int, save_algo_data=False, save_passive=False, save_sub_folder='', is_rl_data=False, base_rates=[], portfolio_comp=[]):
df_list, date_range, trend_list, stocks = util.get_algo_dataset(choose_set_num)
# this is an afterthought
if base_rates == []:
base_rates = [0.2, 0.2, 0.2]
if portfolio_comp == []:
portfolio_comp = [base_rates[i] + [0.4/3, 0.4/3, 0.4/3][i] for i in range(len(base_rates))]
asset_list = [100000, 100000, 100000]
change_list = []
# print('Initial portfolio composition: {}'.format(portfolio_comp))
change_list,_,_ = util.get_algo_results(choose_set_num, asset_list, base_rates, portfolio_comp, df_list, date_range, trend_list)
print('Reallocated {} times'.format(len([i for i in change_list if i[0]])))
# print([i[1] for i in change_list if i[0]])
nav_daily_dates_list = []
nav_daily_composition_list = [[], [], []]
nav_daily_net_list = []
daily_price_list = []
asset_list = [100000, 100000, 100000]
nav_daily_adjust_list = [i[0] for i in change_list]
j = 0
last_trade_date = date_range[0]
for date in date_range:
# Generate daily NAV value for visualisation
high_risk_date = df_list[0][df_list[0]['Date'] == date]
med_risk_date = df_list[1][df_list[1]['Date'] == date]
low_risk_date = df_list[2][df_list[2]['Date'] == date]
if not (high_risk_date.empty or med_risk_date.empty or low_risk_date.empty):
current_nav_list = []
if not change_list[j][0]:
for i in range(len(portfolio_comp)):
previous_close_price = df_list[i][df_list[i]['Date'] == last_trade_date]['Close'].values[0]
current_close_price = df_list[i][df_list[i]['Date'] == date]['Close'].values[0]
current_nav_list.append(asset_list[i] * current_close_price / previous_close_price)
else:
for i in range(len(portfolio_comp)):
asset_list[i] = change_list[j][1][i]
current_nav_list.append(asset_list[i])
last_trade_date = change_list[j][2]
nav_daily_dates_list.append(date)
for i in range(len(portfolio_comp)):
nav_daily_composition_list[i].append(current_nav_list[i])
daily_price_list.append(sum(current_nav_list)/300000 *100)
nav_daily_net_list.append(sum(current_nav_list))
j+=1
# Note that we are using the Laspeyres Price Index for calculation
daily_price_df = pd.DataFrame({'Date': nav_daily_dates_list, 'Close': daily_price_list})
daily_df = pd.DataFrame({'Date': nav_daily_dates_list,\
stocks[0]: nav_daily_composition_list[0],\
stocks[1]: nav_daily_composition_list[1],\
stocks[2]: nav_daily_composition_list[2],\
'Net': nav_daily_net_list,\
'Adjusted': nav_daily_adjust_list})
# Generate quarterly NAV returns for visualisation
quarterly_df = util.cal_fitness_with_quarterly_returns(daily_df, [], price_col='Net')
# Generate passive NAV returns for comparison (buy and hold)
# assets are all 300000 to be able to compare to algo
asset_list = [300000, 300000, 300000]
last_date = nav_daily_dates_list[0]
passive_nav_daily_composition_list = [[],[],[]]
for date in nav_daily_dates_list:
for i in range(len(stocks)):
previous_close_price = df_list[i][df_list[i]['Date'] == last_date]['Close'].values[0]
current_close_price = df_list[i][df_list[i]['Date'] == date]['Close'].values[0]
asset_list[i] = asset_list[i] * current_close_price / previous_close_price
passive_nav_daily_composition_list[i].append(asset_list[i])
last_date = date
passive_daily_df = pd.DataFrame({'Date': nav_daily_dates_list,\
stocks[0]: passive_nav_daily_composition_list[0],\
stocks[1]: passive_nav_daily_composition_list[1],\
stocks[2]: passive_nav_daily_composition_list[2]})
passive_quarterly_df = pd.DataFrame()
for i in range(len(stocks)):
if i == 0:
passive_quarterly_df = util.cal_fitness_with_quarterly_returns(passive_daily_df, [], price_col=stocks[i])
passive_quarterly_df = passive_quarterly_df.rename(columns={"quarterly_return": stocks[i]})
else:
passive_quarterly_df[stocks[i]] = util.cal_fitness_with_quarterly_returns(passive_daily_df, [], price_col=stocks[i])['quarterly_return']
# print(passive_quarterly_df)
# Print some quarterly difference statistics
for symbol in stocks:
difference = quarterly_df['quarterly_return'].values - passive_quarterly_df[symbol].values
# print('Stock {}: {}'.format(symbol, difference))
print('Stock {} total return difference = {}'.format(symbol,sum(difference)))
# cvar = 0
# for symbol in stocks:
# composition = daily_df[symbol].iloc[-1]/daily_df['Net'].iloc[-1]
# cvar_value = util.cvar_percent(daily_df, len(daily_df)-1, len(daily_df)-1, price_col=symbol) * composition
# print(cvar_value)
# cvar += abs(cvar_value)
# print('Portfolio cvar = {}'.format(cvar))
for symbol in stocks:
symbol_cvar = abs(util.cvar_percent(passive_daily_df, len(passive_daily_df)-1, len(passive_daily_df)-1, price_col=symbol))
print('Stock cvar {}: {}'.format(symbol, symbol_cvar))
# print('Stock {} cvar difference = {}'.format(symbol, cvar - symbol_cvar))
path_str = ''
if is_rl_data:
path_str = 'data/rl/{}'.format(run_set[choose_set_num])
path = Path(path_str)
path.mkdir(parents=True, exist_ok=True)
else:
path_str = 'data/algo/{}'.format(run_set[choose_set_num])
path = Path(path_str)
path.mkdir(parents=True, exist_ok=True)
path = Path(f'{path_str}/{save_sub_folder}')
path.mkdir(parents=True, exist_ok=True)
if save_passive:
passive_daily_df.to_csv(f'{path_str}/{save_sub_folder}passive_daily_nav.csv')
passive_quarterly_df.to_csv(f'{path_str}/{save_sub_folder}passive_quarterly_nav_return.csv')
print('Passive data saved for {}'.format(run_set[choose_set_num]))
if save_algo_data:
daily_df.to_csv(f'{path_str}/{save_sub_folder}daily_nav.csv')
quarterly_df.to_csv(f'{path_str}/{save_sub_folder}quarterly_nav_return.csv')
daily_price_df.to_csv(f'{path_str}/{save_sub_folder}daily_price.csv')
print('Data saved for {}'.format(run_set[choose_set_num]))
def remove_uncommon_dates(df_list):
date_range = []
# temp_date_range = df_list[0][(df_list[0]['Date'] >= df_list[1].iloc[0]['Date']) & (df_list[0]['Date'] >= df_list[2].iloc[0]['Date'])]['Date'].tolist()
for date in df_list[0]['Date']:
empty = 0
for df in df_list:
temp_df = df[df['Date'] == date]
if temp_df.empty:
empty +=1
if empty == 0:
date_range.append(date)
return date_range
|
StarcoderdataPython
|
8155247
|
<gh_stars>1-10
"""
# SORT COLORS
Given an array nums with n objects colored red, white, or blue, sort them in-place so that objects of the same color are adjacent, with the colors in the order red, white, and blue.
Here, we will use the integers 0, 1, and 2 to represent the color red, white, and blue respectively.
Follow up:
Could you solve this problem without using the library's sort function?
Could you come up with a one-pass algorithm using only O(1) constant space?
Example 1:
Input: nums = [2,0,2,1,1,0]
Output: [0,0,1,1,2,2]
Example 2:
Input: nums = [2,0,1]
Output: [0,1,2]
Example 3:
Input: nums = [0]
Output: [0]
Example 4:
Input: nums = [1]
Output: [1]
Constraints:
n == nums.length
1 <= n <= 300
nums[i] is 0, 1, or 2.
"""
def sortColors(nums) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
dic = {0: 0, 1: 0, 2: 0}
for x in nums:
if x == 0:
dic[0] += 1
elif x == 1:
dic[1] += 1
else:
dic[2] += 1
nums.clear()
nums.extend([0 for _ in range(dic[0])])
nums.extend([1 for _ in range(dic[1])])
nums.extend(2 for _ in range(dic[2]))
|
StarcoderdataPython
|
12824328
|
<gh_stars>1-10
r=6
pi=3.14
volume=(4*pi*6**3)/3
print("Volume of sphere=",volume)
|
StarcoderdataPython
|
1737255
|
import unittest
import tempfile
import textwrap
import six
import conan.tools.qbs.qbstoolchain as qbs
from conans import tools
from conans.errors import ConanException
from conans.test.utils.mocks import MockConanfile, MockSettings, MockOptions
class RunnerMock(object):
class Expectation(object):
def __init__(self, return_ok=True, output=None):
self.return_ok = return_ok
if six.PY2 and output:
output = output.decode("utf-8")
self.output = output
def __init__(self, expectations=None):
self.command_called = []
self.expectations = expectations or [RunnerMock.Expectation()]
def __call__(self, command, output, win_bash=False, subsystem=None):
self.command_called.append(command)
self.win_bash = win_bash
self.subsystem = subsystem
if not self.expectations:
return 1
expectation = self.expectations.pop(0)
if expectation.output and output and hasattr(output, 'write'):
output.write(expectation.output)
return 0 if expectation.return_ok else 1
class MockConanfileWithFolders(MockConanfile):
install_folder = tempfile.mkdtemp()
def __del__(self):
tools.rmdir(self.install_folder)
def run(self, *args, **kwargs):
if self.runner:
if 'output' not in kwargs:
kwargs['output'] = None
self.runner(*args, **kwargs)
class QbsGenericTest(unittest.TestCase):
def test_convert_bool(self):
self.assertEqual(qbs._bool(True), 'true')
self.assertEqual(qbs._bool(False), 'false')
def test_convert_build_variant(self):
conanfile = MockConanfileWithFolders(MockSettings({
'os': 'Linux',
'compiler': 'gcc'}))
qbs_toolchain = qbs.QbsToolchain(conanfile)
self.assertEqual(qbs_toolchain._build_variant, None)
for build_type, build_variant in qbs._build_variant.items():
conanfile = MockConanfileWithFolders(MockSettings({
'os': 'Linux',
'compiler': 'gcc',
'build_type': build_type}))
qbs_toolchain = qbs.QbsToolchain(conanfile)
self.assertEqual(qbs_toolchain._build_variant, build_variant)
def test_convert_architecture(self):
conanfile = MockConanfileWithFolders(MockSettings({
'os': 'Linux',
'compiler': 'gcc'}))
qbs_toolchain = qbs.QbsToolchain(conanfile)
self.assertEqual(qbs_toolchain._architecture, None)
for arch, architecture in qbs._architecture.items():
conanfile = MockConanfileWithFolders(MockSettings({
'os': 'Linux',
'compiler': 'gcc',
'arch': arch}))
qbs_toolchain = qbs.QbsToolchain(conanfile)
self.assertEqual(qbs_toolchain._architecture, architecture)
def test_convert_optimization(self):
conanfile = MockConanfileWithFolders(MockSettings({
'os': 'Linux',
'compiler': 'gcc'}))
qbs_toolchain = qbs.QbsToolchain(conanfile)
self.assertEqual(qbs_toolchain._optimization, None)
for build_type, optimization in qbs._optimization.items():
conanfile = MockConanfileWithFolders(MockSettings({
'os': 'Linux',
'compiler': 'gcc',
'build_type': build_type}))
qbs_toolchain = qbs.QbsToolchain(conanfile)
self.assertEqual(qbs_toolchain._optimization, optimization)
def test_use_sysroot_from_env(self):
conanfile = MockConanfileWithFolders(MockSettings({
'os': 'Linux',
'compiler': 'gcc'}))
sysroot = '/path/to/sysroot/foo/bar'
with tools.environment_append({'SYSROOT': sysroot}):
qbs_toolchain = qbs.QbsToolchain(conanfile)
self.assertEqual(qbs_toolchain._sysroot, sysroot)
def test_detect_fpic_from_options(self):
f_pic = {
True: 'true',
False: 'false',
None: None
}
for option, value in f_pic.items():
conanfile = MockConanfileWithFolders(MockSettings({
'os': 'Linux',
'compiler': 'gcc'
}),
MockOptions({
'fPIC': option
}))
qbs_toolchain = qbs.QbsToolchain(conanfile)
self.assertEqual(qbs_toolchain._position_independent_code, value)
def test_convert_cxx_language_version(self):
conanfile = MockConanfileWithFolders(MockSettings({
'os': 'Linux',
'compiler': 'gcc'}))
qbs_toolchain = qbs.QbsToolchain(conanfile)
self.assertEqual(qbs_toolchain._cxx_language_version, None)
conanfile = MockConanfileWithFolders(MockSettings({
'os': 'Linux',
'compiler': 'gcc',
'compiler.cppstd': 17}))
qbs_toolchain = qbs.QbsToolchain(conanfile)
self.assertEqual(qbs_toolchain._cxx_language_version, 'c++17')
for cppstd, cxx_language_version in qbs._cxx_language_version.items():
conanfile = MockConanfileWithFolders(MockSettings({
'os': 'Linux',
'compiler': 'gcc',
'compiler.cppstd': cppstd}))
qbs_toolchain = qbs.QbsToolchain(conanfile)
self.assertEqual(qbs_toolchain._cxx_language_version,
cxx_language_version)
def test_split_env_var_into_list(self):
env_var_list = ['-p1', '-p2', '-p3_with_value=13',
'-p_with_space1="hello world"',
'"-p_with_space2=Hello World"']
expected_list = ['-p1', '-p2', '-p3_with_value=13',
'-p_with_space1=hello world',
'-p_with_space2=Hello World']
env_var = ' '.join(env_var_list)
self.assertEqual(qbs._env_var_to_list(env_var), expected_list)
def test_compiler_not_in_settings(self):
conanfile = MockConanfile(MockSettings({}))
with self.assertRaises(ConanException):
qbs._check_for_compiler(conanfile)
def test_compiler_in_settings_not_supported(self):
conanfile = MockConanfile(
MockSettings({'compiler': 'not realy a compiler name'}))
with self.assertRaises(ConanException):
qbs._check_for_compiler(conanfile)
def test_valid_compiler(self):
supported_compilers = ['Visual Studio', 'gcc', 'clang']
for compiler in supported_compilers:
conanfile = MockConanfile(MockSettings({'compiler': compiler}))
qbs._check_for_compiler(conanfile)
@staticmethod
def _settings_to_test_against():
return [
{'os': 'Windows', 'compiler': 'gcc', 'compiler.version': '6', 'qbs_compiler': 'mingw'},
{'os': 'Windows', 'compiler': 'clang', 'compiler.version': '3.9',
'qbs_compiler': 'clang-cl'},
{'os': 'Windows', 'compiler': 'Visual Studio', 'compiler.version': '15',
'qbs_compiler': 'cl'},
{'os': 'Windows', 'compiler': 'Visual Studio', 'compiler.version': '15',
'compiler.toolset': 'ClangCL', 'qbs_compiler': 'clang-cl'},
{'os': 'Linux', 'compiler': 'gcc', 'compiler.version': '6', 'qbs_compiler': 'gcc'},
{'os': 'Linux', 'compiler': 'clang', 'compiler.version': '3.9', 'qbs_compiler': 'clang'}
]
def test_convert_compiler_name_to_qbs_compiler_name(self):
for settings in self._settings_to_test_against():
def expected():
return settings['qbs_compiler']
conanfile = MockConanfile(MockSettings(settings))
self.assertEqual(qbs._default_compiler_name(conanfile), expected())
def test_settings_dir_location(self):
conanfile = MockConanfileWithFolders(MockSettings({}))
self.assertEqual(
qbs._settings_dir(conanfile),
'%s/conan_qbs_toolchain_settings_dir' % conanfile.install_folder)
def test_setup_toolchain_without_any_env_values(self):
for settings in self._settings_to_test_against():
conanfile = MockConanfileWithFolders(MockSettings(settings), runner=RunnerMock())
qbs._setup_toolchains(conanfile)
self.assertEqual(len(conanfile.runner.command_called), 1)
self.assertEqual(
conanfile.runner.command_called[0],
'qbs-setup-toolchains --settings-dir "%s" %s %s' % (
qbs._settings_dir(conanfile), settings['qbs_compiler'],
qbs._profile_name))
def test_setup_toolchain_with_compiler_from_env(self):
compiler = 'compiler_from_env'
for settings in self._settings_to_test_against():
conanfile = MockConanfileWithFolders(MockSettings(settings), runner=RunnerMock())
with tools.environment_append({'CC': compiler}):
qbs._setup_toolchains(conanfile)
self.assertEqual(len(conanfile.runner.command_called), 1)
self.assertEqual(
conanfile.runner.command_called[0],
'qbs-setup-toolchains --settings-dir "%s" %s %s' % (
qbs._settings_dir(conanfile), compiler,
qbs._profile_name))
def test_linker_flags_parser(self):
test_data_ld_flags = {
'-Wl,flag1': ([], ['flag1']),
'-Wl,flag1,flag2': ([], ['flag1', 'flag2']),
'-Wl,flag1 -Wl,flag2': ([], ['flag1', 'flag2']),
'-dFlag1': (['-dFlag1'], []),
'-dFlag1 -dFlag2': (['-dFlag1', '-dFlag2'], []),
'-Wl,flag1 -dFlag1': (['-dFlag1'], ['flag1']),
'-Wl,flag1,flag2 -dFlag1': (['-dFlag1'], ['flag1', 'flag2']),
'-Wl,flag1,flag2 -dFlag1 -Wl,flag3 -dFlag2 -dFlag3 -Wl,flag4,flag5':
(['-dFlag1', '-dFlag2', '-dFlag3'],
['flag1', 'flag2', 'flag3', 'flag4', 'flag5']),
}
for ld_flags, expected in test_data_ld_flags.items():
driver_linker_flags, linker_flags = expected
parser = qbs.LinkerFlagsParser(qbs._env_var_to_list(ld_flags))
self.assertEqual(parser.driver_linker_flags,
driver_linker_flags)
self.assertEqual(parser.linker_flags,
linker_flags)
@staticmethod
def _generate_flags(flag, qbs_key):
return {'env': ('-{0}1 -{0}2 -{0}3_with_value=13 '
'-{0}_with_space="hello world"').format(flag),
'qbs_value': ("['-{0}1', '-{0}2', '-{0}3_with_value=13', "
"'-{0}_with_space=hello world']").format(flag),
'qbs_key': qbs_key}
def test_flags_from_env(self):
asm = self._generate_flags('asm', 'assemblerFlags')
c = self._generate_flags('c', 'cFlags')
cpp = self._generate_flags('cpp', 'cppFlags')
cxx = self._generate_flags('cxx', 'cxxFlags')
wl = self._generate_flags('Wl,', 'linkerFlags')
ld = self._generate_flags('ld', 'driverLinkerFlags')
env = {
'ASFLAGS': asm['env'],
'CFLAGS': c['env'],
'CPPFLAGS': cpp['env'],
'CXXFLAGS': cxx['env'],
'LDFLAGS': '%s %s' % (wl['env'], ld['env'])
}
print(env)
with tools.environment_append(env):
flags_from_env = qbs._flags_from_env()
expected_flags = {
'cpp.'+asm['qbs_key']: asm['qbs_value'],
'cpp.'+c['qbs_key']: c['qbs_value'],
'cpp.'+cpp['qbs_key']: cpp['qbs_value'],
'cpp.'+cxx['qbs_key']: cxx['qbs_value'],
'cpp.'+wl['qbs_key']: wl['qbs_value'].replace('-Wl,', ''),
'cpp.'+ld['qbs_key']: ld['qbs_value']
}
self.assertEqual(flags_from_env, expected_flags)
@staticmethod
def _generate_qbs_config_output():
return textwrap.dedent('''\
profiles.conan.cpp.cCompilerName: "gcc"
profiles.conan.cpp.compilerName: "g++"
profiles.conan.cpp.cxxCompilerName: "g++"
profiles.conan.cpp.driverFlags: \
["-march=armv7e-m", "-mtune=cortex-m4", "--specs=nosys.specs"]
profiles.conan.cpp.platformCommonCompilerFlags: undefined
profiles.conan.cpp.platformLinkerFlags: undefined
profiles.conan.cpp.toolchainInstallPath: "/usr/bin"
profiles.conan.cpp.toolchainPrefix: "arm-none-eabi-"
profiles.conan.qbs.targetPlatform: ""
profiles.conan.qbs.someBoolProp: "true"
profiles.conan.qbs.someIntProp: "13"
profiles.conan.qbs.toolchain: ["gcc"]
''')
def test_read_qbs_toolchain_from_qbs_config_output(self):
expected_config = {
'cpp.cCompilerName': '"gcc"',
'cpp.compilerName': '"g++"',
'cpp.cxxCompilerName': '"g++"',
'cpp.driverFlags': '["-march=armv7e-m", "-mtune=cortex-m4", "--specs=nosys.specs"]',
'cpp.platformCommonCompilerFlags': 'undefined',
'cpp.platformLinkerFlags': 'undefined',
'cpp.toolchainInstallPath': '"/usr/bin"',
'cpp.toolchainPrefix': '"arm-none-eabi-"',
'qbs.targetPlatform': '""',
'qbs.someBoolProp': 'true',
'qbs.someIntProp': '13',
'qbs.toolchain': '["gcc"]'
}
conanfile = MockConanfileWithFolders(
MockSettings({}), runner=RunnerMock(
expectations=[RunnerMock.Expectation(
output=self._generate_qbs_config_output())]))
config = qbs._read_qbs_toolchain_from_config(conanfile)
self.assertEqual(len(conanfile.runner.command_called), 1)
self.assertEqual(conanfile.runner.command_called[0],
'qbs-config --settings-dir "%s" --list' % (
qbs._settings_dir(conanfile)))
self.assertEqual(config, expected_config)
@unittest.skipIf(six.PY2, "Order of qbs output is defined only for PY3")
def test_toolchain_content(self):
expected_content = textwrap.dedent('''\
import qbs
Project {
Profile {
name: "conan_toolchain_profile"
/* detected via qbs-setup-toolchains */
cpp.cCompilerName: "gcc"
cpp.compilerName: "g++"
cpp.cxxCompilerName: "g++"
cpp.driverFlags: ["-march=armv7e-m", "-mtune=cortex-m4", "--specs=nosys.specs"]
cpp.platformCommonCompilerFlags: undefined
cpp.platformLinkerFlags: undefined
cpp.toolchainInstallPath: "/usr/bin"
cpp.toolchainPrefix: "arm-none-eabi-"
qbs.targetPlatform: ""
qbs.someBoolProp: true
qbs.someIntProp: 13
qbs.toolchain: ["gcc"]
/* deduced from environment */
qbs.sysroot: "/foo/bar/path"
/* conan settings */
qbs.buildVariant: "release"
qbs.architecture: "x86_64"
qbs.optimization: "small"
cpp.cxxLanguageVersion: "c++17"
/* package options */
cpp.positionIndependentCode: true
}
}''')
conanfile = MockConanfileWithFolders(
MockSettings({
'compiler': 'gcc',
'compiler.cppstd': 17,
'os': 'Linux',
'build_type': 'MinSizeRel',
'arch': 'x86_64'
}),
options=MockOptions({
'fPIC': True
}),
runner=RunnerMock(
expectations=[
RunnerMock.Expectation(),
RunnerMock.Expectation(
output=self._generate_qbs_config_output()),
]))
with tools.environment_append({'SYSROOT': '/foo/bar/path'}):
qbs_toolchain = qbs.QbsToolchain(conanfile)
self.assertEqual(qbs_toolchain.content, expected_content)
|
StarcoderdataPython
|
5128027
|
<reponame>jtauber/online-reader
from pysblgnt import morphgnt_rows
from . import ref
def rows_for_verse(verse):
book_num, chapter_num, verse_num = verse.tup
rows = []
for row in morphgnt_rows(book_num):
c = int(row["bcv"][2:4])
v = int(row["bcv"][4:6])
if (c, v) == (chapter_num, verse_num):
rows.append(row)
return rows
def rows_by_verses_for_chapter(chapter):
book_num, chapter_num = chapter.tup
last_verse = 0
verses = []
rows = None
for row in morphgnt_rows(book_num):
c = int(row["bcv"][2:4])
v = int(row["bcv"][4:6])
if c == chapter_num:
if v != last_verse:
if rows:
verses.append(rows)
rows = (chapter.verse(v), [])
last_verse = v
rows[1].append(row)
verses.append(rows)
return verses
def rows_by_verses_by_chapters_for_book(book_num):
last_chapter = 0
chapters = []
verses = None
rows = None
for row in morphgnt_rows(book_num):
c = int(row["bcv"][2:4])
v = int(row["bcv"][4:6])
if c != last_chapter:
if verses:
verses[1].append(rows)
chapters.append(verses)
chapter = ref.Chapter(book_num, c)
verses = (chapter, [])
rows = None
last_chapter = c
last_verse = 0
if v != last_verse:
if rows:
verses[1].append(rows)
rows = (chapter.verse(v), [])
last_verse = v
rows[1].append(row)
verses[1].append(rows)
chapters.append(verses)
return chapters
def before(row):
word = row["word"]
text = row["text"]
return text[:text.index(word)]
def after(row):
word = row["word"]
text = row["text"]
return text[text.index(word) + len(word):]
def pos(row):
return row["ccat-pos"].strip("-")
def parse(row):
if row["ccat-parse"][3] == "-":
return row["ccat-parse"][4:].strip("-")
elif row["ccat-parse"][3] == "N":
return row["ccat-parse"][1:4]
elif row["ccat-parse"][3] == "P":
return row["ccat-parse"][1:4] + "." + row["ccat-parse"][4:7]
elif row["ccat-parse"][3] in "DISO":
return row["ccat-parse"][1:4] + "." + row["ccat-parse"][0] + row["ccat-parse"][5]
def render_pos(row):
return {
"RA": "article",
"A-": "adjective",
"N-": "noun",
"C-": "conjunction",
"RP": "personal pronoun",
"RR": "relative pronoun",
"V-": "verb",
"P-": "preposition",
"D-": "adverb",
"RD": "demonstrative",
"RI": "interogative/indefinite pronoun",
"X-": "particle",
"I-": "interjection",
}[row["ccat-pos"]]
def render_person(row):
return {
"1": "1st person",
"2": "2nd person",
"3": "3rd person",
}[row["ccat-parse"][0]]
def render_tense(row):
return {
"P": "present",
"F": "future",
"A": "aorist",
"X": "perfect",
"Y": "pluperfect",
"I": "imperfect",
}[row["ccat-parse"][1]]
def render_voice(row):
return {
"A": "active",
"M": "middle",
"P": "passive",
}[row["ccat-parse"][2]]
def render_mood(row):
return {
"I": "indicative",
"S": "subjunctive",
"D": "imperative",
"O": "optative",
}[row["ccat-parse"][3]]
def render_case(row):
return {
"N": "nominative",
"A": "accusative",
"G": "genitive",
"D": "dative",
"V": "vocative",
"-": "",
}[row["ccat-parse"][4]]
def render_number(row):
return {
"S": "singular",
"P": "plural",
"-": "",
}[row["ccat-parse"][5]]
def render_gender(row):
return {
"M": "masculine",
"F": "feminine",
"N": "neuter",
"-": "",
}[row["ccat-parse"][6]]
def render_degree(row):
return {
"C": "comparative",
"S": "superlative",
"-": "",
}[row["ccat-parse"][7]]
def render_parse(row):
parse = []
if row["ccat-pos"] == "V-":
if row["ccat-parse"][3] == "P":
parse.append(render_tense(row))
parse.append(render_voice(row))
parse.append("participle")
parse.append(render_case(row))
parse.append(render_number(row))
parse.append(render_gender(row))
elif row["ccat-parse"][3] == "N":
parse.append(render_tense(row))
parse.append(render_voice(row))
parse.append("infinitive")
else:
parse.append(render_tense(row))
parse.append(render_voice(row))
parse.append(render_mood(row))
parse.append(render_person(row))
parse.append(render_number(row))
else:
parse.append(render_case(row))
parse.append(render_number(row))
parse.append(render_gender(row))
parse.append(render_degree(row))
return " ".join(parse).strip()
|
StarcoderdataPython
|
9786613
|
x = input()
s = input()
print(s.replace(x, ''))
|
StarcoderdataPython
|
6665025
|
import base64
import io
import mimetypes
from datetime import datetime
import pytz
from PIL import Image
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from django.conf import settings
from django.contrib.gis.geos import Point
from django.forms.models import model_to_dict
from django.shortcuts import get_object_or_404
from django.utils import timezone
from rest_framework import status
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from rest_framework.parsers import MultiPartParser
from rest_framework.permissions import IsAdminUser
from rest_framework.response import Response
from rest_framework.views import APIView
from taiwan.models import Substitute
from .models import Source
tw_tz = pytz.timezone('Asia/Taipei')
class CsrfExemptSessionAuthentication(SessionAuthentication):
def enforce_csrf(self, request):
return
def photo_to_s3(user_uuid, source_uuid, photo_obj):
photo_name = photo_obj._name
photo_content = photo_obj.read()
photo_mime = mimetypes.guess_type(photo_name)[0]
if 'image' not in photo_mime:
return ""
# Upload to S3
photo_type = photo_name.split('.')[-1]
conn = S3Connection(settings.AWS_ACCESS_KEY, settings.AWS_SECRET_KEY)
bucket = conn.get_bucket('dengue-backend')
k = Key(bucket)
k.key = 'breeding_source/' + user_uuid + '_' + source_uuid + '.' + photo_type
k.set_metadata("Content-Type", photo_mime)
k.set_contents_from_string(photo_content)
k.set_acl("public-read")
# Compress Photo
photo_bytesio = io.BytesIO(photo_content)
photo_image_obj = Image.open(photo_bytesio)
photo_size_x, photo_size_y = photo_image_obj.size
photo_image_obj = photo_image_obj.resize(
(int(photo_size_x/3*2), int(photo_size_y/3*2)), Image.ANTIALIAS)
compress_bytesio = io.BytesIO()
photo_image_obj.save(compress_bytesio,
format=photo_mime.split('/')[1],
optimize=True,
quality=95)
# photo_image_obj.save('test.jpg',
# format=photo_mime.split('/')[1],
# optimize=True,
# quality=95)
compress_bytesio = compress_bytesio.getvalue()
return compress_bytesio, k.generate_url(expires_in=0, query_auth=False)
class SourceCollection(APIView):
parser_classes = (MultiPartParser,)
def post(self, request):
userprofile = request.user.userprofile
photo_obj = request.data.get('photo', '')
source_type = request.data.get('source_type', '')
lng = request.data.get('lng', 0)
lat = request.data.get('lat', 0)
address = request.data.get('address', '')
modified_address = request.data.get('modified_address', '')
description = request.data.get('description', '')
if photo_obj == '' or source_type == '' or lng == 0 or lat == 0:
return Response({"detail": "請填寫完整孳生源資料"}, status=status.HTTP_406_NOT_ACCEPTABLE)
try:
source_point = Point(float(lng), float(lat), srid=4326)
substitute = Substitute.objects.filter(mpoly__intersects=source_point)[0]
except Exception:
return Response({"detail": "經緯度錯誤"}, status=status.HTTP_406_NOT_ACCEPTABLE)
breeding_source = Source(userprofile=userprofile)
photo_content, photo_url = photo_to_s3(userprofile.user_uuid, \
str(breeding_source.source_uuid), \
photo_obj)
if photo_url == "":
return Response({"detail": "照片上傳錯誤"}, status=status.HTTP_406_NOT_ACCEPTABLE)
photo_base64 = base64.b64encode(photo_content)
breeding_source.photo_url = photo_url
breeding_source.photo_base64 = photo_base64
breeding_source.source_type = source_type
breeding_source.lng = lng
breeding_source.lat = lat
breeding_source.address = address
breeding_source.modified_address = modified_address
breeding_source.village_name = substitute.v_name
breeding_source.description = description
breeding_source.save()
return Response(status=status.HTTP_201_CREATED)
def get(self, request):
qualified_status = request.GET.get('qualified_status', '')
before_timestamp = request.GET.get('before_timestamp', '')
limit = request.GET.get('limit', 10)
if qualified_status == '':
qualified_status = ['待審核', '已通過', '未通過']
else:
qualified_status = qualified_status.split(',')
try:
before_timestamp = datetime.fromtimestamp(float(before_timestamp))
except Exception:
before_timestamp = timezone.now()
try:
limit = int(limit)
except Exception:
limit = 10
source_filter = Source.objects.filter(
userprofile=request.user.userprofile,
qualified_status__in=qualified_status,
created_at__lt=before_timestamp).order_by('-updated_at')[:limit]
res_data = list()
for source in source_filter:
source_dict = model_to_dict(source, exclude=\
['userprofile', 'created_at', 'updated_at', 'location'])
source_dict['created_at'] = str(source.created_at.astimezone(tw_tz))
source_dict['timestamp'] = str(source.created_at.astimezone(tw_tz).timestamp())
res_data.append(source_dict)
return Response(res_data, status=status.HTTP_200_OK)
class SourceTotal(APIView):
def get(self, request):
qualified_status = request.GET.get('qualified_status', '')
if qualified_status == '':
qualified_status = ['待審核', '已通過', '未通過']
else:
qualified_status = qualified_status.split(',')
total = Source.objects.filter(
userprofile=request.user.userprofile,
qualified_status__in=qualified_status).count()
res_data = {'total': total}
return Response(res_data, status=status.HTTP_200_OK)
class AdminSourceCollection(APIView):
if settings.DEBUG == "False":
authentication_classes = (SessionAuthentication, BasicAuthentication)
else:
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
permission_classes = (IsAdminUser,)
def get(self, request):
phone = request.GET.get('phone', '')
if phone == '':
source = Source.objects.filter(qualified_status="待審核").order_by('?')[0]
else:
try:
source = Source.objects.filter(
userprofile__phone=phone,
qualified_status="待審核"
).order_by('?')[0]
except Exception:
return Response([], status=status.HTTP_200_OK)
source_filter = Source.objects.filter(userprofile=source.userprofile,
qualified_status="待審核")
res_data = list()
for source in source_filter:
source_dict = model_to_dict(source, fields=['photo_url'])
source_dict['source_uuid'] = str(source.source_uuid)
res_data.append(source_dict)
return Response(res_data, status=status.HTTP_200_OK)
def put(self, request):
breeding_source_list = request.data.get('breeding_source_list', list())
for source_dict in breeding_source_list:
source = get_object_or_404(Source, source_uuid=source_dict['source_uuid'])
source.qualified_status = source_dict['qualified_status']
source.save()
return Response(status=status.HTTP_200_OK)
|
StarcoderdataPython
|
6656627
|
<reponame>yakomaxa/micanpymol
import subprocess
import tempfile
import os
def mican(mobile, target, option=""):
#make temporary dir and do everything there
with tempfile.TemporaryDirectory() as dname:
# print tmp dir name
print("Temporary directory =" + dname)
# make sure you have mican in PATH
# directly giving 'execute' full path below is good alternative
# For example : execute = "/usr/bin/mican"
execute = "mican"
tmptarget = dname + "/target.pdb"
tmpmobile = dname + "/mobile.pdb"
tmpout = dname + "/aligned.pdb"
# save pdb for mican
pymol.cmd.save(tmptarget, target)
pymol.cmd.save(tmpmobile, mobile)
modeoption = "-" + option
option2 = "-o"
outfile = tmpout
mican = [execute, tmpmobile, tmptarget, option2, outfile]
for op in option.split():
if(op == "-o"):
print("option -o is reserved")
raise CmdException
mican.append(op)
proc=subprocess.run(mican,stdout = subprocess.PIPE)
print(proc.stdout.decode("utf8")) # print result to pymol console
pymol.cmd.load(outfile, "aligned")
pymol.cmd.split_states("aligned")
pymol.cmd.select("mobileback",mobile + " and backbone")
pymol.cmd.align("mobileback", "aligned_0001 and backbone")
# use cmd pair_fit if you think align is not good
# print("Using cmd.align instead of cmd.pair_fit")
# pymol.cmd.pair_fit("mobileback", "aligned_0001 and backbone")
pymol.cmd.delete("mobileback")
pymol.cmd.delete("aligned")
pymol.cmd.delete("aligned_0001")
pymol.cmd.delete("aligned_0002")
# pymol.cmd.quit()
pymol.cmd.extend("mican", mican)
cmd.auto_arg[0]['mican'] = cmd.auto_arg[0]['align']
cmd.auto_arg[1]['mican'] = cmd.auto_arg[1]['align']
|
StarcoderdataPython
|
6508147
|
#!/usr/bin/python
from __future__ import print_function
import magic
import sys
import argparse
def detect_dicom(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='Filenames to check')
args = parser.parse_args(argv)
dicom_files = []
for filename in args.filenames:
if magic.from_file(filename,mime=True) == 'application/dicom':
dicom_files.append(filename)
if dicom_files:
for dicom_file in dicom_files:
print('DICOM file found: {}'.format(dicom_file))
sys.exit(1)
sys.exit(0)
if __name__ == '__main__':
detect_dicom()
|
StarcoderdataPython
|
1920603
|
<gh_stars>0
#!/usr/bin/env python
from __future__ import print_function
import six
from werkzeug.security import check_password_hash, generate_password_hash
if __name__ == '__main__':
password = six.moves.input("password: ")
password_hash = generate_password_hash(password)
password = six.moves.input("verify: ")
if check_password_hash(password_hash, password):
print(password_hash)
else:
print("Failure!")
|
StarcoderdataPython
|
1887221
|
#!/usr/bin/env python
# #############################
# # GO stone camera detection #
# #############################
#
# Licensed under MIT License (MIT)
#
# Copyright (c) 2018 <NAME> | <EMAIL>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os, sys
from multiprocessing import Process, Manager, Value, Array
from multiprocessing import Pool, Array, Process
from threading import Thread
my_file = os.path.abspath(__file__)
my_path ='/'.join(my_file.split('/')[0:-1])
sys.path.insert(0,my_path + "/libs" )
sys.path.insert(0,my_path + "/libs/opencv" )
# import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
class CameraStoneDetection():
_showImage = True
_camera = None
_rawCapture = None
_stream = None
_cascadeBlack = None
_cascadeWhite = None
_useGrayscale = True;
__cameraResolutionX = 640*2
__cameraResolutionY = 480*2
_windowName = "iGoBot camera";
RectsBlack = [];
RectsWhite = [];
_counter = 0;
_process = None
_released = False
# define settings of brightness and contrast
_settings = [[50,50],[50,30],[50,80],[60,30],[60,50],[60,80],[70,50]];
def __init__(self):
print("camera init")
self.posXFace = -1
self.posYFace = -1
self.InitCamera()
#thread = Thread(target=self._update, args=())
#thread.nice = -20 # -20 high prio, 20 low prio
#thread.start()
#thread.nice = -20
def SetCameraSettings(self, settingsNo):
if (settingsNo >= len(self._settings)):
return False;
self._camera.brightness = self._settings[settingsNo][0];
self._camera.contrast = self._settings[settingsNo][0];
return True;
def detect(self, img, cascade):
rects = cascade.detectMultiScale(img, scaleFactor=1.1, minNeighbors=3, minSize=(int(self.__cameraResolutionX / 30), int( self.__cameraResolutionY / 30)), flags=cv2.CASCADE_SCALE_IMAGE)
if len(rects) == 0:
return []
#rects[:,2:] += rects[:,:2] # convert from [[x,y,h,b]] to [[x1,y1,x2,y2]]
return rects
def draw_rects(self, img, rects, color):
for x, y, b, h in rects:
cv2.rectangle(img, (x, y), (x+b, y+h), color, 4)
def InitCamera(self):
print("camera start")
cv2.destroyAllWindows()
cv2.namedWindow(self._windowName, cv2.WINDOW_NORMAL)
cv2.resizeWindow(self._windowName, 400,300)
# initialize the camera and grab a reference to the raw camera capture
self._camera = PiCamera()
self._camera.resolution = (self.__cameraResolutionX, self.__cameraResolutionY)
self.SetCameraSettings(settingsNo=0);
#self._camera.contrast = 50;
#self._camera.brightness = 50;
self._camera.framerate = 12
self._rawCapture = PiRGBArray(self._camera, size=(self.__cameraResolutionX, self.__cameraResolutionY))
#self._stream = self._camera.capture_continuous(self._rawCapture, format="bgr", use_video_port=True)
# allow the camera to warmup
time.sleep(0.2)
if (self._useGrayscale):
cascade_black_fn = "stoneDetection/black-cascade-grayscale.xml"
cascade_white_fn = "stoneDetection/white-cascade-grayscale.xml"
else:
cascade_black_fn = "stoneDetection/black-cascade.xml"
cascade_white_fn = "stoneDetection/white-cascade.xml"
self._cascadeBlack = cv2.CascadeClassifier(cascade_black_fn)
self._cascadeWhite = cv2.CascadeClassifier(cascade_white_fn)
print("camera start done")
def Update(self):
#global ftimestamp, getFPS
# keep looping infinitely until the thread is stopped
#print ("<<<" , self._stream);
# for f in self._stream:
if (True):
self._camera.capture(self._rawCapture, format="bgr")
#image = rawCapture.array
self._counter = self._counter+1;
#print (self._counter);
if (self._counter > 100):
self._counter = 0;
# grab the frame from the stream and clear the stream in
# preparation for the next frame
#image = f.array
image = self._rawCapture.array
rawImage = image
if (self._useGrayscale):
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#self_actualFrame = image
self._rawCapture.truncate(0)
self.RectsBlack = self.detect(image, self._cascadeBlack)
self.RectsWhite = self.detect(image, self._cascadeWhite)
if (self._showImage==True):
key = cv2.waitKey(1) & 0xFF
self.draw_rects(rawImage, self.RectsBlack, (0, 0, 0))
self.draw_rects(rawImage, self.RectsWhite, (255, 255, 255))
cv2.putText(rawImage, str(self._counter), (10,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))
cv2.imshow(self._windowName, rawImage)
# if the thread indicator variable is set, stop the thread
# and resource camera resources
#if (self._released == True):
#self._stream.close()
#self._rawCapture.close()
#self._camera.close()
#return
#time.sleep(0.01)
#return;
def Release(self):
if (self._released == False):
self._released = True
time.sleep(0.5)
print ("shutting down camera")
#self._stream.close()
self._rawCapture.close()
self._camera.close()
def __del__(self):
self.Release()
import atexit
def exit_handler():
testCamera.Release()
if __name__ == '__main__':
from hardware.Light import Light;
light = Light();
light.On();
testCamera = CameraStoneDetection();
setting = 0;
for c in range(0,1000):
testCamera.Update();
setting = setting+1;
if (testCamera.SetCameraSettings(setting)==False):
setting = 0;
testCamera.SetCameraSettings(setting);
time.sleep(1)
testCamera.Release()
|
StarcoderdataPython
|
1600270
|
import math
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
# from model.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
from ..utils import build_norm_layer
'''
class IBN(nn.Module):
def __init__(self, planes):
super(IBN, self).__init__()
half1 = int(planes/2)
self.half = half1
half2 = planes - half1
self.IN = nn.InstanceNorm2d(half1, affine=True)
self.BN = nn.BatchNorm2d(half2)
def forward(self, x):
split = torch.split(x, self.half, 1)
out1 = self.IN(split[0].contiguous())
out2 = self.BN(split[1].contiguous())
out = torch.cat((out1, out2), 1)
return out
'''
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
downsample=None,
norm_cfg=dict(type='BN'),
sw_cfg=None):
super(BasicBlock, self).__init__()
self.norm1_name, norm1 = build_norm_layer(
sw_cfg if sw_cfg is not None else norm_cfg, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
self.conv1 = conv3x3(inplanes, planes, stride)
self.add_module(self.norm1_name, norm1)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.add_module(self.norm2_name, norm2)
self.downsample = downsample
self.stride = stride
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def norm2(self):
return getattr(self, self.norm2_name)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self,
inplanes,
planes,
stride=1,
downsample=None,
norm_cfg=dict(type='BN'),
sw_cfg=None):
super(Bottleneck, self).__init__()
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(
sw_cfg if sw_cfg is not None else norm_cfg, planes, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
norm_cfg, planes * 4, postfix=3)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.add_module(self.norm3_name, norm3)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def norm2(self):
return getattr(self, self.norm2_name)
@property
def norm3(self):
return getattr(self, self.norm3_name)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__( self,
block,
layers,
output_stride,
num_classes = 1000,
norm_cfg=dict(type='BN', requires_grad=True),
sw_cfg=None,
stage_with_sw=(True, True, True, False)):
self.inplanes = 64
super(ResNet, self).__init__()
blocks = [1, 2, 4]
if output_stride == 16:
strides = [1, 2, 2, 1]
dilations = [1, 1, 1, 2]
elif output_stride == 8:
strides = [1, 2, 1, 1]
dilations = [1, 1, 2, 4]
else:
raise NotImplementedError
self.norm_cfg = norm_cfg
self.sw_cfg = sw_cfg
self.stage_with_sw = stage_with_sw
self.norm1_name, norm1 = build_norm_layer(
sw_cfg if sw_cfg is not None else norm_cfg, 64, postfix=1)
# Modules
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.add_module(self.norm1_name, norm1)
#self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=strides[0], dilation=dilations[0], with_sw=stage_with_sw[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[1], dilation=dilations[1], with_sw=stage_with_sw[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[2], dilation=dilations[2], with_sw=stage_with_sw[0])
self.layer4 = self._make_layer(block, 512, layers[3], stride=strides[3], dilation=dilations[3], with_sw=stage_with_sw[0])
# self.layer4 = self._make_layer(block, 512, layers[3], stride=strides[3], dilation=dilations[3])
self.avgpool = nn.AvgPool2d(7)
self._init_weight()
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
@property
def norm1(self):
return getattr(self, self.norm1_name)
def _make_layer(self, block, planes, blocks, with_sw, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
build_norm_layer(self.norm_cfg, planes * block.expansion)[1],
)
layers = []
layers.append(
block(self.inplanes,
planes,
stride,
downsample,
norm_cfg=self.norm_cfg,
sw_cfg=None))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(self.inplanes,
planes,
norm_cfg=self.norm_cfg,
sw_cfg=self.sw_cfg if
(with_sw and i % 2 == 1) else None))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
low_level_feat = x
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x, low_level_feat
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _load_pretrained_model(model, url=None, path=None):
if url is not None:
pretrain_dict = model_zoo.load_url(url)
else:
pretrain_dict = torch.load(path)
model_dict = {}
state_dict = model.state_dict()
for k, v in pretrain_dict.items():
# print(k)
if k in state_dict:
model_dict[k] = v
else:
print(k)
state_dict.update(model_dict)
model.load_state_dict(state_dict)
def ResNet50(output_stride, ibn_mode='none', pretrained=True, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], output_stride, **kwargs)
if pretrained:
_load_pretrained_model(model, path='pretrained/resnet50_sw.pth')
return model
def ResNet101(output_stride, ibn_mode='none', pretrained=True, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
assert ibn_mode in ['none', 'a']
model = ResNet(Bottleneck, [3, 4, 23, 3], output_stride, **kwargs)
if pretrained:
_load_pretrained_model(model, path='pretrained/resnet101_sw.pth')
return model
if __name__ == "__main__":
model = ResNet50(output_stride=16, ibn_mode='a', pretrained=True)
input = torch.rand(1, 3, 512, 512)
output, low_level_feat = model(input)
print(output.size())
print(low_level_feat.size())
|
StarcoderdataPython
|
4870900
|
import os
import unittest
import pandas as pd
from set_up_grasp_models.set_up_models.manipulate_model import remove_spaces, reorder_reactions, rename_columns
class TestManipulateModel(unittest.TestCase):
def setUp(self):
this_dir, this_filename = os.path.split(__file__)
self.test_folder = os.path.join(this_dir, 'test_files', 'test_set_up_models', 'manipulate_model')
def test_reorder_reactions(self):
true_res = pd.read_excel(os.path.join(self.test_folder, 'true_res_HMP2360_r0_t0_mech_order_fixed.xlsx'),
sheet_name=None, index_col=0)
rxn_list = ['TPH', 'DDC', 'AANAT', 'ASMT', 'DDC_tryptm', 'AANAT_tryptm', 'IN_trp',
'EX_trp', 'EX_fivehtp', 'EX_nactsertn', 'EX_meltn', 'EX_nactryptm', 'EX_srtn']
data_dict = pd.read_excel(os.path.join(self.test_folder, 'HMP2360_r0_t0_mech_order.xlsx'),
sheet_name=None, index_col=0)
file_out = os.path.join(self.test_folder, 'HMP2360_r0_t0_mech_order_fixed.xlsx')
reorder_reactions(data_dict, rxn_list, file_out)
res = pd.read_excel(file_out, sheet_name=None, index_col=0)
self.assertListEqual(list(true_res.keys()), list(res.keys()))
for sheet in true_res.keys():
self.assertTrue(true_res[sheet].equals(res[sheet]))
def test_remove_spaces(self):
true_res = pd.read_excel(os.path.join(self.test_folder, 'true_res_HMP2360_r0_t0_mech_spaces_fixed.xlsx'),
sheet_name=None, index_col=0)
data_dict = pd.read_excel(os.path.join(self.test_folder, 'HMP2360_r0_t0_mech_spaces.xlsx'),
sheet_name=None, index_col=0)
file_out = os.path.join(self.test_folder, 'HMP2360_r0_t0_mech_spaces_fixed.xlsx')
remove_spaces(data_dict, file_out)
res = pd.read_excel(file_out, sheet_name=None, index_col=0)
self.assertListEqual(list(true_res.keys()), list(res.keys()))
for sheet in true_res.keys():
print(sheet)
self.assertTrue(true_res[sheet].equals(res[sheet]))
def test_rename_columns(self):
true_res = pd.read_excel(os.path.join(self.test_folder, 'true_res_HMP2360_r0_t0_cols.xlsx'),
sheet_name=None, index_col=0)
data_dict = pd.read_excel(os.path.join(self.test_folder, 'HMP2360_r0_t0_cols.xlsx'),
sheet_name=None, index_col=0)
file_out = os.path.join(self.test_folder, 'HMP2360_r0_t0_cols_fixed.xlsx')
rename_columns(data_dict, file_out)
res = pd.read_excel(file_out, sheet_name=None, index_col=0)
self.assertListEqual(list(true_res.keys()), list(res.keys()))
for sheet in true_res.keys():
print(sheet)
self.assertTrue(true_res[sheet].equals(res[sheet]))
|
StarcoderdataPython
|
4872146
|
for t in range(int(input())):
arr = list(map(int, input().split()))
n = len(arr)
maximum = -999999999999999999
newMax = maximum
maxsubsequence = 0
minimum = maximum
for i in range(n-1, -1, -1):
# print("Array: ",arr[i])
# print("SUm: ",arr[i]+maximum)
if arr[i] > 0:
maxsubsequence += arr[i]
elif arr[i] > minimum:
minimum = arr[i]
maximum = max(arr[i], arr[i]+maximum)
if maximum > newMax:
newMax = maximum
if maxsubsequence == 0 :
maxsubsequence = minimum
print(newMax, maxsubsequence)
|
StarcoderdataPython
|
9604078
|
<reponame>android-risc-v/external_webrtc
#!/usr/bin/env python
import json
import sys
# Set this to True to generate a default entry with all the flags and defines
# common to all the modules that needs to be curated by hand after it's
# generated. When set to False it prints the last curated version of the
# default, which could be incomplete.
GENERATE_FULL_DEFAULT = False
PRINT_ORIGINALS = False
GENERATE_ALL_FLAGS = False
if len(sys.argv) != 2:
print('wrong number of arguments')
exit()
def FormatList(l):
return json.dumps(l)
def FilterHeaders(l):
return [x for x in l if not x.endswith('.h')]
def PrintOriginCommentedOut(target):
if PRINT_ORIGINALS:
print('/* From target:')
print(json.dumps(target, sort_keys = True, indent = 4))
print('*/')
def MakeRelatives(l):
return [x.split('//').pop() for x in l]
def FormatName(name):
return 'webrtc_' + name.split('/').pop().replace(':', '__')# .replace('/', '_').replace(':', '_')
def FilterFlags(flags, to_skip = set([])):
if GENERATE_ALL_FLAGS:
skip = set([
'-L',
'-isystem',
'-Xclang',
'-B',
'--sysroot',
'-fcrash-diagnostics-dir',
'.',
'-fdebug-compilation-dir',
'-instcombine-lower-dbg-declare=0',
'-Wno-non-c-typedef-for-linkage',
'-Werror',
'-fcomplete-member-pointers',
'-m64',
'-march=x86-64'
]).union(to_skip)
return [x for x in flags if not any([x.startswith(y) for y in skip])]
else:
return [x for x in flags if x == '-msse2']
def GenerateDefault(targets):
in_default = {
'cflags' : [],
'cflags_c' : [],
'cflags_cc' : [],
'ldflags' : [],
'asmflags' : [],
'defines' : []
}
first = True
for target in targets:
typ = target['type']
if typ == 'static_library':
if first:
first = False
# Add all the flags to the default, we'll remove some later
for flag_type in in_default.keys():
in_default[flag_type] = []
for flag in target[flag_type]:
in_default[flag_type].append(flag)
else:
for flag_type in in_default.keys():
flags_to_remove = []
for flag in in_default[flag_type]:
if flag not in target[flag_type]:
flags_to_remove.append[flag_type]
for flag in flags_to_remove:
in_default[flag_type].remove(flag)
defines = in_default['defines']
in_default.pop('defines')
in_default['cflags'].extend(['-D{0}'.format(x) for x in defines])
if GENERATE_FULL_DEFAULT:
print('cc_defaults {')
print(' name: "webrtc_defaults",')
print(' local_include_dirs: ["."],')
for typ in in_default.keys():
print(' {0}: ['.format(typ.replace('asmflags', 'asflags')
.replace('cflags_cc', 'cppflags')
.replace('cflags_c', 'conlyflags')))
for flag in FilterFlags(in_default[typ]):
print(' "{0}",'.format(flag.replace('"', '\\"')))
print(' ],')
print('}')
else:
print('cc_defaults {')
print(' name: "webrtc_defaults",')
print(' local_include_dirs: [')
print(' ".",')
print(' ],')
print(' cflags: [')
print(' "-Wno-unused-parameter",')
print(' "-Wno-missing-field-initializers",')
print(' "-DUSE_UDEV",')
print(' "-DUSE_AURA=1",')
print(' "-DUSE_GLIB=1",')
print(' "-DUSE_NSS_CERTS=1",')
print(' "-DUSE_X11=1",')
print(' "-D_FILE_OFFSET_BITS=64",')
print(' "-D_LARGEFILE_SOURCE",')
print(' "-D_LARGEFILE64_SOURCE",')
print(' "-D_GNU_SOURCE",')
print(' "-DWEBRTC_ENABLE_PROTOBUF=0",')
print(' "-DWEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE",')
print(' "-DRTC_ENABLE_VP9",')
print(' "-DHAVE_SCTP",')
print(' "-DWEBRTC_LIBRARY_IMPL",')
print(' "-DWEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS=1",')
print(' "-DWEBRTC_POSIX",')
print(' "-DWEBRTC_LINUX",')
print(' ],')
print(' header_libs: [')
print(' "libwebrtc_absl_headers",')
print(' ],')
print(' static_libs: [')
print(' "libaom",')
print(' "libevent",')
print(' "libopus",')
print(' "libsrtp2",')
print(' "libvpx",')
print(' "libyuv",')
print(' "libpffft",')
print(' "rnnoise_rnn_vad",')
print(' "usrsctplib",')
print(' ],')
print(' shared_libs: [')
print(' "libcrypto",')
print(' "libssl",')
print(' ],')
print(' host_supported: true,')
print(' device_supported: false,')
print(' arch: {')
print(' arm: {')
print(' enabled: false,')
print(' },')
print(' },')
print(' target: {')
print(' darwin: {')
print(' enabled: false,')
print(' },')
print(' },')
print('}')
in_default['cflags'].extend([
"-Wno-unused-parameter",
"-Wno-missing-field-initializers",
"-DUSE_UDEV",
"-DUSE_AURA=1",
"-DUSE_GLIB=1",
"-DUSE_NSS_CERTS=1",
"-DUSE_X11=1",
"-D_FILE_OFFSET_BITS=64",
"-D_LARGEFILE_SOURCE",
"-D_LARGEFILE64_SOURCE",
"-D_GNU_SOURCE",
"-DWEBRTC_ENABLE_PROTOBUF=0",
"-DWEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE",
"-DRTC_ENABLE_VP9",
"-DHAVE_SCTP",
"-DWEBRTC_LIBRARY_IMPL",
"-DWEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS=1",
"-DWEBRTC_POSIX",
"-DWEBRTC_LINUX"
])
return in_default
def GenerateGroup(target):
PrintOriginCommentedOut(target)
def GenerateSourceSet(target):
PrintOriginCommentedOut(target)
if target.has_key('sources'):
name = FormatName(target['name'])
print('filegroup {')
print(' name: "{0}",'.format(name))
print(' srcs: {0},'.format(FormatList(MakeRelatives(FilterHeaders(target['sources'])))))
print('}')
return name
return ""
def GenerateStaticLib(target, targets, flags_in_default):
PrintOriginCommentedOut(target)
name = FormatName(target['name'])
print('cc_library_static {')
print(' name: "{0}",'.format(name))
print(' defaults: ["webrtc_defaults"],')
sources = []
sources.extend(MakeRelatives(FilterHeaders(target['sources'])))
for trg in targets:
if trg['type'] == 'source_set' and trg['name'] in target['deps']:
sources.append(':' + FormatName(trg['name']))
print(' srcs: {0},'.format(FormatList(sources)))
print(' host_supported: true,')
if target.has_key('asmflags'):
asmflags = FilterFlags(target['asmflags'], set(flags_in_default['asmflags']))
if len(asmflags) > 0:
print(' asflags: {0},'.format(FormatList(asmflags)))
cflags = []
if target.has_key('cflags'):
cflags.extend(target['cflags'])
cflags = FilterFlags(cflags, set(flags_in_default['cflags']))
if target.has_key('defines'):
cflags.extend(['-D' + x for x in target['defines']])
cflags = [x for x in cflags if x not in flags_in_default['cflags']]
if len(cflags) > 0:
print(' cflags: {0},'.format(FormatList(cflags)))
if target.has_key('cflags_c'):
cflags_c = FilterFlags(target['cflags_c'], set(flags_in_default['cflags_c']))
if len(cflags_c) > 0:
print(' conlyflags: {0},'.format(FormatList(cflags_c)))
if target.has_key('cflags_cc'):
cflags_cc = FilterFlags(target['cflags_cc'], set(flags_in_default['cflags_cc']))
if len(cflags_cc) > 0:
print(' cppflags: {0},'.format(FormatList(cflags_cc)))
if target.has_key('ldflags'):
ldflags = FilterFlags(target['ldflags'], set(flags_in_default['ldflags']))
if len(ldflags) > 0:
print(' ldflags: {0},'.format(FormatList(ldflags)))
static_libs = []
for trg in targets:
if trg['type'] == 'static_library' and target['deps'].count(trg['name']) > 0:
static_libs.append(FormatName(trg['name']))
if len(static_libs) > 0:
print(' static_libs: {0},'.format(FormatList(static_libs)))
for dep in target['deps']:
if FormatName(dep) not in static_libs:
#print(' // omitted dep: {0}'.format(dep))
pass
print('}')
return name
json_file = open(sys.argv[1], "r")
targets = json.load(json_file)
flags_in_default = GenerateDefault(targets)
print("\n\n")
static_libs = []
file_groups = []
for target in targets:
typ = target['type']
if typ == 'static_library':
lib_name = GenerateStaticLib(target, targets, flags_in_default)
static_libs.append(lib_name)
elif typ == 'source_set':
group_name = GenerateSourceSet(target)
if len(group_name) > 0:
file_groups.append(group_name)
elif typ == 'group':
GenerateGroup(target)
else:
print('Unknown type: {0}'.format(typ))
print("\n\n")
print('cc_library_static {')
print(' name: "libwebrtc",')
print(' defaults: ["webrtc_defaults"],')
print(' export_include_dirs: ["."],')
print(' whole_static_libs: {0},'.format(FormatList(static_libs + ['libpffft', 'rnnoise_rnn_vad', 'usrsctplib'])))
print(' srcs: {0},').format(FormatList([':{0}'.format(x) for x in file_groups]))
print('}')
|
StarcoderdataPython
|
3564332
|
# -*- encoding: utf-8 -*-
'''
-------------------------
@File : data_explore.ipynb
@Time : 2021/12/28 17:58:18
@Author : <NAME>
@Contact : <EMAIL>
@Desc : 此脚本用于将原始数据及导出的峰值坐标进行裁剪,输出json格式,用于后续打标训练用
-------------------------
'''
import json
import numpy as np
import matplotlib.pyplot as plt
import random
from tqdm import tqdm
import os, sys, shutil
sys.path.append("..")
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
os.chdir(sys.path[-1])
def data_split(data:np.ndarray, loc_list:list, save_path:str, split_len=150, plot=False):
"""根据loc的位置去前后截取raw_data的数据
"""
label = {
"code": "",
"label": 0,
"number of peaks": 0,
"peaks' labels": [],
"borders": [],
"description": "",
"rt":[],
"scan": [],
"intensity": [],
"mz": [],
}
for i, loc in tqdm(enumerate(loc_list[1000:1500])):
# 截取数据
# 将loc的位置随机前后位移 使得峰值点不在数据切片的中心
loc += random.randint(int(-1 * 1/3 * split_len), int(1/3 * split_len))
data_slice = data[loc - split_len: loc + split_len].tolist()
# 改写json内容
json_save_name = save_path + "peak_sample_" + str(i).zfill(4)
json_file = json_save_name + ".json"
label["code"] = "data slice NO_" + str(i).zfill(4)
label["intensity"] = data_slice
label["rt"] = [loc - split_len, loc + split_len]
label["mz"] = data_slice
with open(json_file, mode="w", encoding="utf-8") as jf:
json.dump(label, jf)
# plot
if plot:
plt.figure()
plt.plot(data_slice)
fig_save_path = save_path + "/fig/"
if not os.path.exists(fig_save_path):
os.makedirs(fig_save_path)
plt.savefig(fig_save_path + "peak_sample_" + str(i).zfill(4) + ".jpg")
plt.close("all")
if __name__ == "__main__":
raw_data_file = "./rawData.csv" # 原始数据
raw_peak_loc_file = "./raw_data_loc.txt" # 原始数据的峰值点坐标
save_path = "./peak_data/"
split_len = 50
raw_data = np.genfromtxt(raw_data_file, delimiter=",")
with open(raw_peak_loc_file, mode='r', encoding='utf-8') as f:
lines = f.readlines()
loc = []
for line in lines:
loc.append(int(line))
try:
shutil.rmtree(save_path)
except:
pass
if not os.path.exists(save_path):
os.makedirs(save_path)
data_split(data=raw_data, loc_list=loc, save_path=save_path, split_len=split_len, plot=True)
|
StarcoderdataPython
|
3463166
|
<filename>unified/models/vms.py
from sqlalchemy import PrimaryKeyConstraint
from . import db
class Instance(db.Model):
"""Monthly report of virtual machine instances produced by VRB vms report"""
server_id = db.Column(db.String(), nullable=False)
server = db.Column(db.String(), nullable=False)
core = db.Column(db.SmallInteger, nullable=False)
ram = db.Column(db.SmallInteger, nullable=False)
storage = db.Column(db.Float)
os = db.Column(db.String(), nullable=False)
business_unit = db.Column(db.String(), nullable=False)
span = db.Column(db.Integer)
# Monthly Up Time (%)
uptime_percent = db.Column(db.Float)
# timestamp of the start of reporting month
month = db.Column(db.Integer)
__table_args__ = (PrimaryKeyConstraint('server_id', 'month', name='pk_instance'),)
def json(self):
"""Jsonify"""
return {
"id": self.server_id,
"server": self.server,
"core": self.core,
"ram": self.ram,
"storage": self.storage,
"os": self.os,
"businessUnit": self.business_unit,
"span": self.span,
"uptimePercent": self.uptime_percent,
"month": self.month
}
@classmethod
def list(cls, start_ts=0, end_ts=0):
""""Gets vms run between start_ts and end_ts."""
query = cls.query
if start_ts > 0:
query = query.filter(Instance.month >= start_ts)
if end_ts > 0:
query = query.filter(Instance.month < end_ts)
fields = ('id', 'server', 'core', 'ram', 'storage', 'os', 'businessUnit', 'span', 'uptimePercent')
return [dict(zip(fields, q)) for q in query.
with_entities(Instance.server_id,
Instance.server,
Instance.core,
Instance.ram,
Instance.storage,
Instance.os,
Instance.business_unit,
Instance.span,
Instance.uptime_percent)]
|
StarcoderdataPython
|
4825001
|
<filename>tests/integration/test_s3.py
import io
import os
import ssl
import boto3
import gzip
import json
import time
import uuid
import unittest
import datetime
import requests
from io import BytesIO
from pytz import timezone
from urllib.parse import parse_qs, quote
from botocore.exceptions import ClientError
from six.moves.urllib import parse as urlparse
from six.moves.urllib.request import Request, urlopen
from localstack import config, constants
from botocore.client import Config
from localstack.utils import testutil
from localstack.constants import TEST_AWS_ACCESS_KEY_ID, TEST_AWS_SECRET_ACCESS_KEY, S3_VIRTUAL_HOSTNAME
from localstack.utils.aws import aws_stack
from localstack.services.s3 import s3_listener, s3_utils
from localstack.utils.common import (
short_uid, retry, get_service_protocol, to_bytes, safe_requests, to_str, new_tmp_file, rm_rf, load_file)
from localstack.services.awslambda.lambda_utils import LAMBDA_RUNTIME_PYTHON36
TEST_BUCKET_NAME_WITH_POLICY = 'test-bucket-policy-1'
TEST_QUEUE_FOR_BUCKET_WITH_NOTIFICATION = 'test_queue_for_bucket_notification_1'
TEST_BUCKET_WITH_VERSIONING = 'test-bucket-versioning-1'
TEST_BUCKET_NAME_2 = 'test-bucket-2'
TEST_KEY_2 = 'test-key-2'
TEST_GET_OBJECT_RANGE = 17
THIS_FOLDER = os.path.dirname(os.path.realpath(__file__))
TEST_LAMBDA_PYTHON_ECHO = os.path.join(THIS_FOLDER, 'lambdas', 'lambda_triggered_by_s3.py')
TEST_LAMBDA_PYTHON_DOWNLOAD_FROM_S3 = os.path.join(THIS_FOLDER, 'lambdas',
'lambda_triggered_by_sqs_download_s3_file.py')
BATCH_DELETE_BODY = """
<Delete xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Object>
<Key>%s</Key>
</Object>
<Object>
<Key>%s</Key>
</Object>
</Delete>
"""
class PutRequest(Request):
""" Class to handle putting with urllib """
def __init__(self, *args, **kwargs):
return Request.__init__(self, *args, **kwargs)
def get_method(self, *args, **kwargs):
return 'PUT'
# def test_host_and_path_addressing(wrapped):
# """ Decorator that runs a test method with both - path and host style addressing. """
# # TODO - needs to be fixed below!
# def wrapper(self):
# try:
# # test via path based addressing
# TestS3.OVERWRITTEN_CLIENT = aws_stack.connect_to_service('s3', config={'addressing_style': 'virtual'})
# wrapped()
# # test via host based addressing
# TestS3.OVERWRITTEN_CLIENT = aws_stack.connect_to_service('s3', config={'addressing_style': 'path'})
# wrapped()
# finally:
# # reset client
# TestS3.OVERWRITTEN_CLIENT = None
# return
class TestS3(unittest.TestCase):
OVERWRITTEN_CLIENT = None
def setUp(self):
self._s3_client = aws_stack.connect_to_service('s3')
self.sqs_client = aws_stack.connect_to_service('sqs')
@property
def s3_client(self):
return TestS3.OVERWRITTEN_CLIENT or self._s3_client
def test_create_bucket_via_host_name(self):
body = """<?xml version="1.0" encoding="UTF-8"?>
<CreateBucketConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<LocationConstraint>eu-central-1</LocationConstraint>
</CreateBucketConfiguration>"""
headers = aws_stack.mock_aws_request_headers('s3')
bucket_name = 'test-%s' % short_uid()
headers['Host'] = s3_utils.get_bucket_hostname(bucket_name)
response = requests.put(config.TEST_S3_URL, data=body, headers=headers, verify=False)
self.assertEquals(response.status_code, 200)
response = self.s3_client.get_bucket_location(Bucket=bucket_name)
self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200)
self.assertIn('LocationConstraint', response)
# @test_host_and_path_addressing
def test_bucket_policy(self):
# create test bucket
self.s3_client.create_bucket(Bucket=TEST_BUCKET_NAME_WITH_POLICY)
# put bucket policy
policy = {
'Version': '2012-10-17',
'Statement': {
'Action': ['s3:GetObject'],
'Effect': 'Allow',
'Resource': 'arn:aws:s3:::bucketName/*',
'Principal': {
'AWS': ['*']
}
}
}
response = self.s3_client.put_bucket_policy(
Bucket=TEST_BUCKET_NAME_WITH_POLICY,
Policy=json.dumps(policy)
)
self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 204)
# retrieve and check policy config
saved_policy = self.s3_client.get_bucket_policy(Bucket=TEST_BUCKET_NAME_WITH_POLICY)['Policy']
self.assertEqual(json.loads(saved_policy), policy)
def test_s3_put_object_notification(self):
bucket_name = 'notif-%s' % short_uid()
key_by_path = 'key-by-hostname'
key_by_host = 'key-by-host'
queue_url, queue_attributes = self._create_test_queue()
self._create_test_notification_bucket(queue_attributes, bucket_name=bucket_name)
self.s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': 'Enabled'})
# put an object where the bucket_name is in the path
obj = self.s3_client.put_object(Bucket=bucket_name, Key=key_by_path, Body='something')
# put an object where the bucket_name is in the host
headers = aws_stack.mock_aws_request_headers('s3')
headers['Host'] = s3_utils.get_bucket_hostname(bucket_name)
url = '{}/{}'.format(config.TEST_S3_URL, key_by_host)
# verify=False must be set as this test fails on travis because of an SSL error non-existent locally
response = requests.put(url, data='something else', headers=headers, verify=False)
self.assertTrue(response.ok)
self.assertEqual(self._get_test_queue_message_count(queue_url), '2')
response = self.sqs_client.receive_message(QueueUrl=queue_url)
messages = [json.loads(to_str(m['Body'])) for m in response['Messages']]
record = messages[0]['Records'][0]
self.assertIsNotNone(record['s3']['object']['versionId'])
self.assertEquals(record['s3']['object']['versionId'], obj['VersionId'])
# clean up
self.s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': 'Disabled'})
self.sqs_client.delete_queue(QueueUrl=queue_url)
self._delete_bucket(bucket_name, [key_by_path, key_by_host])
def test_s3_upload_fileobj_with_large_file_notification(self):
bucket_name = 'notif-large-%s' % short_uid()
queue_url, queue_attributes = self._create_test_queue()
self._create_test_notification_bucket(queue_attributes, bucket_name=bucket_name)
# has to be larger than 64MB to be broken up into a multipart upload
file_size = 75000000
large_file = self.generate_large_file(file_size)
download_file = new_tmp_file()
try:
self.s3_client.upload_file(Bucket=bucket_name, Key=large_file.name, Filename=large_file.name)
self.assertEqual(self._get_test_queue_message_count(queue_url), '1')
# ensure that the first message's eventName is ObjectCreated:CompleteMultipartUpload
messages = self.sqs_client.receive_message(QueueUrl=queue_url, AttributeNames=['All'])
message = json.loads(messages['Messages'][0]['Body'])
self.assertEqual(message['Records'][0]['eventName'], 'ObjectCreated:CompleteMultipartUpload')
# download the file, check file size
self.s3_client.download_file(Bucket=bucket_name, Key=large_file.name, Filename=download_file)
self.assertEqual(os.path.getsize(download_file), file_size)
# clean up
self.sqs_client.delete_queue(QueueUrl=queue_url)
self._delete_bucket(bucket_name, large_file.name)
finally:
# clean up large files
large_file.close()
rm_rf(large_file.name)
rm_rf(download_file)
def test_s3_multipart_upload_with_small_single_part(self):
# In a multipart upload "Each part must be at least 5 MB in size, except the last part."
# https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html
bucket_name = 'notif-large-%s' % short_uid()
key_by_path = 'key-by-hostname'
queue_url, queue_attributes = self._create_test_queue()
self._create_test_notification_bucket(queue_attributes, bucket_name=bucket_name)
# perform upload
self._perform_multipart_upload(bucket=bucket_name, key=key_by_path, zip=True)
self.assertEqual(self._get_test_queue_message_count(queue_url), '1')
# clean up
self.sqs_client.delete_queue(QueueUrl=queue_url)
self._delete_bucket(bucket_name, [key_by_path])
def test_invalid_range_error(self):
bucket_name = 'range-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
self.s3_client.create_bucket(Bucket=bucket_name)
self.s3_client.put_object(Bucket=bucket_name, Key='steve', Body=b'is awesome')
try:
self.s3_client.get_object(Bucket=bucket_name, Key='steve', Range='bytes=1024-4096')
except ClientError as e:
self.assertEqual(e.response['Error']['Code'], 'InvalidRange')
# clean up
self._delete_bucket(bucket_name, ['steve'])
def test_range_key_not_exists(self):
bucket_name = 'range-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
self.s3_client.create_bucket(Bucket=bucket_name)
with self.assertRaises(ClientError) as ctx:
self.s3_client.get_object(Bucket=bucket_name, Key='key', Range='bytes=1024-4096')
self.assertIn('NoSuchKey', str(ctx.exception))
# clean up
self._delete_bucket(bucket_name)
def test_upload_key_with_hash_prefix(self):
bucket_name = 'hash-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
key_name = '#key-with-hash-prefix'
content = b'test 123'
self.s3_client.put_object(Bucket=bucket_name, Key=key_name, Body=content)
downloaded_object = self.s3_client.get_object(Bucket=bucket_name, Key=key_name)
downloaded_content = to_str(downloaded_object['Body'].read())
self.assertEqual(to_str(downloaded_content), to_str(content))
# clean up
self._delete_bucket(bucket_name, [key_name])
with self.assertRaises(Exception):
self.s3_client.head_object(Bucket=bucket_name, Key=key_name)
def test_s3_multipart_upload_acls(self):
bucket_name = 'test-bucket-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name, ACL='public-read')
def check_permissions(key, expected_perms):
grants = self.s3_client.get_object_acl(Bucket=bucket_name, Key=key)['Grants']
grants = [g for g in grants if 'AllUsers' in g.get('Grantee', {}).get('URI', '')]
self.assertEquals(len(grants), 1)
permissions = grants[0]['Permission']
permissions = permissions if isinstance(permissions, list) else [permissions]
self.assertEquals(len(permissions), expected_perms)
# perform uploads (multipart and regular) and check ACLs
self.s3_client.put_object(Bucket=bucket_name, Key='acl-key0', Body='something')
check_permissions('acl-key0', 1)
self._perform_multipart_upload(bucket=bucket_name, key='acl-key1')
check_permissions('acl-key1', 1)
self._perform_multipart_upload(bucket=bucket_name, key='acl-key2', acl='public-read-write')
check_permissions('acl-key2', 2)
def test_s3_presigned_url_upload(self):
key_by_path = 'key-by-hostname'
bucket_name = 'notif-large-%s' % short_uid()
queue_url, queue_attributes = self._create_test_queue()
self._create_test_notification_bucket(queue_attributes, bucket_name=bucket_name)
self._perform_presigned_url_upload(bucket=bucket_name, key=key_by_path)
self.assertEqual(self._get_test_queue_message_count(queue_url), '1')
# clean up
self.sqs_client.delete_queue(QueueUrl=queue_url)
self._delete_bucket(bucket_name, [key_by_path])
def test_s3_get_response_default_content_type(self):
# When no content type is provided by a PUT request
# 'binary/octet-stream' should be used
# src: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
bucket_name = 'test-bucket-%s' % short_uid()
client = self._get_test_client()
client.create_bucket(Bucket=bucket_name)
# put object
object_key = 'key-by-hostname'
client.put_object(Bucket=bucket_name, Key=object_key, Body='something')
url = client.generate_presigned_url(
'get_object', Params={'Bucket': bucket_name, 'Key': object_key})
# get object and assert headers
response = requests.get(url, verify=False)
self.assertEqual(response.headers['content-type'], 'binary/octet-stream')
# clean up
self._delete_bucket(bucket_name, [object_key])
def test_s3_put_presigned_url_metadata(self):
# Object metadata should be passed as query params via presigned URL
# https://github.com/localstack/localstack/issues/544
bucket_name = 'test-bucket-%s' % short_uid()
client = self._get_test_client()
client.create_bucket(Bucket=bucket_name)
metadata = {
'foo': 'bar'
}
# put object
object_key = 'key-by-hostname'
url = client.generate_presigned_url(
'put_object', Params={'Bucket': bucket_name, 'Key': object_key, 'Metadata': metadata})
# append metadata manually to URL (this is not easily possible with boto3, as "Metadata" cannot
# be passed to generate_presigned_url, and generate_presigned_post works differently)
# get object and assert metadata is present
response = requests.put(url, data='content 123', verify=False)
self.assertLess(response.status_code, 400)
# response body should be empty, see https://github.com/localstack/localstack/issues/1317
self.assertEqual('', to_str(response.content))
response = client.head_object(Bucket=bucket_name, Key=object_key)
self.assertEquals('bar', response.get('Metadata', {}).get('foo'))
# clean up
self._delete_bucket(bucket_name, [object_key])
def test_s3_put_metadata_underscores(self):
# Object metadata keys should accept keys with underscores
# https://github.com/localstack/localstack/issues/1790
bucket_name = 'test-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
# put object
object_key = 'key-with-metadata'
metadata = {'test_meta_1': 'foo', '__meta_2': 'bar'}
self.s3_client.put_object(Bucket=bucket_name, Key=object_key, Metadata=metadata, Body='foo')
metadata_saved = self.s3_client.head_object(Bucket=bucket_name, Key=object_key)['Metadata']
self.assertEqual(metadata, metadata_saved)
# clean up
self._delete_bucket(bucket_name, [object_key])
def test_s3_object_expiry(self):
# handle s3 object expiry
# https://github.com/localstack/localstack/issues/1685
bucket_name = 'test-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
# put object
object_key = 'key-with-metadata'
metadata = {'test_meta_1': 'foo', '__meta_2': 'bar'}
self.s3_client.put_object(Bucket=bucket_name, Key=object_key, Metadata=metadata, Body='foo',
Expires=datetime.datetime.now(timezone('GMT')) - datetime.timedelta(hours=1))
# try to fetch an object which is already expired
self.assertRaises(Exception, self.s3_client.get_object, Bucket=bucket_name, Key=object_key.lower())
self.s3_client.put_object(Bucket=bucket_name, Key=object_key, Metadata=metadata, Body='foo',
Expires=datetime.datetime.now(timezone('GMT')) + datetime.timedelta(hours=1))
# try to fetch has not been expired yet.
resp = self.s3_client.get_object(Bucket=bucket_name, Key=object_key)
self.assertIn('Expires', resp)
# clean up
self._delete_bucket(bucket_name, [object_key])
def test_s3_presigned_url_expired(self):
bucket_name = 'test-bucket-%s' % short_uid()
client = self._get_test_client()
client.create_bucket(Bucket=bucket_name)
# put object and CORS configuration
object_key = 'key-by-hostname'
client.put_object(Bucket=bucket_name, Key=object_key, Body='something')
# get object and assert headers
url = client.generate_presigned_url(
'get_object', Params={'Bucket': bucket_name, 'Key': object_key}, ExpiresIn=2
)
# retrieving it before expiry
resp = requests.get(url, verify=False)
self.assertEqual(resp.status_code, 200)
self.assertEqual(to_str(resp.content), 'something')
# waiting for the url to expire
time.sleep(3)
resp = requests.get(url, verify=False)
self.assertEqual(resp.status_code, 403)
url = client.generate_presigned_url(
'get_object', Params={'Bucket': bucket_name, 'Key': object_key}, ExpiresIn=120
)
resp = requests.get(url, verify=False)
self.assertEqual(resp.status_code, 200)
self.assertEqual(to_str(resp.content), 'something')
# clean up
self._delete_bucket(bucket_name, [object_key])
def test_bucket_availability(self):
bucket_name = 'test-bucket-lifecycle'
returned_empty_lifecycle = s3_listener.get_lifecycle(bucket_name)
self.assertRegexpMatches(returned_empty_lifecycle._content, r'The bucket does not exist')
response = s3_listener.get_replication(bucket_name)
self.assertRegexpMatches(response._content, r'The bucket does not exist')
response = s3_listener.get_object_lock(bucket_name)
self.assertRegexpMatches(response._content, r'The bucket does not exist')
def test_delete_bucket_lifecycle_configuration(self):
bucket_name = 'test-bucket-%s' % short_uid()
client = self._get_test_client()
client.create_bucket(Bucket=bucket_name)
lfc = {
'Rules': [
{
'Expiration': {'Days': 7},
'ID': 'wholebucket',
'Filter': {'Prefix': ''},
'Status': 'Enabled',
}
]
}
client.put_bucket_lifecycle_configuration(
Bucket=bucket_name, LifecycleConfiguration=lfc
)
result = client.get_bucket_lifecycle_configuration(Bucket=bucket_name)
self.assertIn('Rules', result)
client.delete_bucket_lifecycle(Bucket=bucket_name)
try:
client.get_bucket_lifecycle_configuration(Bucket=bucket_name)
except ClientError as e:
self.assertEqual(e.response['Error']['Code'], 'NoSuchLifecycleConfiguration')
# clean up
client.delete_bucket(Bucket=bucket_name)
def test_delete_lifecycle_configuration_on_bucket_deletion(self):
bucket_name = 'test-bucket-%s' % short_uid()
client = self._get_test_client()
client.create_bucket(Bucket=bucket_name)
lfc = {
'Rules': [
{
'Expiration': {'Days': 7},
'ID': 'wholebucket',
'Filter': {'Prefix': ''},
'Status': 'Enabled',
}
]
}
client.put_bucket_lifecycle_configuration(
Bucket=bucket_name, LifecycleConfiguration=lfc
)
result = client.get_bucket_lifecycle_configuration(Bucket=bucket_name)
self.assertIn('Rules', result)
client.delete_bucket(Bucket=bucket_name)
client.create_bucket(Bucket=bucket_name)
try:
client.get_bucket_lifecycle_configuration(Bucket=bucket_name)
except ClientError as e:
self.assertEqual(e.response['Error']['Code'], 'NoSuchLifecycleConfiguration')
# clean up
client.delete_bucket(Bucket=bucket_name)
def test_range_header_body_length(self):
# Test for https://github.com/localstack/localstack/issues/1952
object_key = 'sample.bin'
bucket_name = 'test-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
chunk_size = 1024
with io.BytesIO() as data:
data.write(os.urandom(chunk_size * 2))
data.seek(0)
self.s3_client.upload_fileobj(data, bucket_name, object_key)
range_header = 'bytes=0-%s' % (chunk_size - 1)
resp = self.s3_client.get_object(Bucket=bucket_name, Key=object_key, Range=range_header)
content = resp['Body'].read()
self.assertEquals(len(content), chunk_size)
# clean up
self._delete_bucket(bucket_name, [object_key])
def test_s3_get_response_content_type_same_as_upload_and_range(self):
bucket_name = 'test-bucket-%s' % short_uid()
client = self._get_test_client()
client.create_bucket(Bucket=bucket_name)
# put object
object_key = 'foo/bar/key-by-hostname'
content_type = 'foo/bar; charset=utf-8'
client.put_object(Bucket=bucket_name,
Key=object_key,
Body='something ' * 20,
ContentType=content_type)
url = client.generate_presigned_url(
'get_object', Params={'Bucket': bucket_name, 'Key': object_key}
)
# get object and assert headers
response = requests.get(url, verify=False)
self.assertEqual(response.headers['content-type'], content_type)
# get object using range query and assert headers
response = requests.get(url, headers={'Range': 'bytes=0-18'}, verify=False)
self.assertEqual(response.headers['content-type'], content_type)
self.assertEqual(to_str(response.content), 'something something')
# clean up
self._delete_bucket(bucket_name, [object_key])
def test_s3_get_get_object_headers(self):
object_key = 'sample.bin'
bucket_name = 'test-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
chunk_size = 1024
with io.BytesIO() as data:
data.write(os.urandom(chunk_size * 2))
data.seek(0)
self.s3_client.upload_fileobj(data, bucket_name, object_key)
range_header = 'bytes=0-%s' % (chunk_size - 1)
resp = self.s3_client.get_object(Bucket=bucket_name, Key=object_key, Range=range_header)
self.assertEqual(resp.get('AcceptRanges'), 'bytes')
self.assertIn('x-amz-request-id', resp['ResponseMetadata']['HTTPHeaders'])
self.assertIn('x-amz-id-2', resp['ResponseMetadata']['HTTPHeaders'])
self.assertIn('content-language', resp['ResponseMetadata']['HTTPHeaders'])
# We used to return `cache-control: no-cache` if the header wasn't set
# by the client, but this was a bug because s3 doesn't do that. It simply
# omits it.
self.assertNotIn('cache-control', resp['ResponseMetadata']['HTTPHeaders'])
# Do not send a content-encoding header as discussed in Issue #3608
self.assertNotIn('content-encoding', resp['ResponseMetadata']['HTTPHeaders'])
# clean up
self._delete_bucket(bucket_name, [object_key])
def test_s3_head_response_content_length_same_as_upload(self):
bucket_name = 'test-bucket-%s' % short_uid()
client = self._get_test_client()
client.create_bucket(Bucket=bucket_name)
body = 'something body \n \n\r'
# put object
object_key = 'key-by-hostname'
client.put_object(Bucket=bucket_name, Key=object_key, Body=body, ContentType='text/html; charset=utf-8')
url = client.generate_presigned_url(
'head_object', Params={'Bucket': bucket_name, 'Key': object_key}
)
# get object and assert headers
response = requests.head(url, verify=False)
self.assertEqual(response.headers['content-length'], str(len(body)))
# clean up
self._delete_bucket(bucket_name, [object_key])
def test_s3_put_object_chunked_newlines(self):
# Test for https://github.com/localstack/localstack/issues/1571
bucket_name = 'test-bucket-%s' % short_uid()
object_key = 'data'
self.s3_client.create_bucket(Bucket=bucket_name)
body = 'Hello\r\n\r\n\r\n\r\n'
headers = """
Authorization: %s
Content-Type: audio/mpeg
X-Amz-Content-Sha256: STREAMING-AWS4-HMAC-SHA256-PAYLOAD
X-Amz-Date: 20190918T051509Z
X-Amz-Decoded-Content-Length: %s
""" % (aws_stack.mock_aws_request_headers('s3')['Authorization'], len(body))
headers = dict([[field.strip() for field in pair.strip().split(':', 1)]
for pair in headers.strip().split('\n')])
data = ('d;chunk-signature=af5e6c0a698b0192e9aa5d9083553d4d241d81f69ec62b184d05c509ad5166af\r\n' +
'%s\r\n0;chunk-signature=f2a50a8c0ad4d212b579c2489c6d122db88d8a0d0b987ea1f3e9d081074a5937\r\n') % body
# put object
url = '%s/%s/%s' % (config.TEST_S3_URL, bucket_name, object_key)
req = PutRequest(url, to_bytes(data), headers)
urlopen(req, context=ssl.SSLContext()).read()
# get object and assert content length
downloaded_object = self.s3_client.get_object(Bucket=bucket_name, Key=object_key)
download_file_object = to_str(downloaded_object['Body'].read())
self.assertEqual(len(str(download_file_object)), len(body))
self.assertEqual(str(download_file_object), body)
# clean up
self._delete_bucket(bucket_name, [object_key])
def test_s3_put_object_on_presigned_url(self):
bucket_name = 'test-bucket-%s' % short_uid()
client = self._get_test_client()
client.create_bucket(Bucket=bucket_name)
body = 'something body'
# get presigned URL
object_key = 'test-presigned-key'
url = client.generate_presigned_url(
'put_object', Params={'Bucket': bucket_name, 'Key': object_key}
)
# put object
response = requests.put(url, data=body, verify=False)
self.assertEqual(response.status_code, 200)
# get object and compare results
downloaded_object = client.get_object(Bucket=bucket_name, Key=object_key)
download_object = downloaded_object['Body'].read()
self.assertEqual(to_str(body), to_str(download_object))
# clean up
self._delete_bucket(bucket_name, [object_key])
def test_s3_post_object_on_presigned_post(self):
bucket_name = 'test-presigned-%s' % short_uid()
client = self._get_test_client()
client.create_bucket(Bucket=bucket_name)
body = 'something body'
# get presigned URL
object_key = 'test-presigned-post-key'
presigned_request = client.generate_presigned_post(
Bucket=bucket_name, Key=object_key, ExpiresIn=60)
# put object
files = {'file': body}
response = requests.post(presigned_request['url'], data=presigned_request['fields'], files=files, verify=False)
self.assertIn(response.status_code, [200, 204])
# get object and compare results
downloaded_object = client.get_object(Bucket=bucket_name, Key=object_key)
download_object = downloaded_object['Body'].read()
self.assertEqual(to_str(body), to_str(download_object))
# clean up
self._delete_bucket(bucket_name, [object_key])
def test_s3_presigned_post_success_action_status_201_response(self):
bucket_name = 'test-presigned-%s' % short_uid()
client = self._get_test_client()
client.create_bucket(Bucket=bucket_name)
body = 'something body'
# get presigned URL
object_key = 'key-${filename}'
presigned_request = client.generate_presigned_post(
Bucket=bucket_name,
Key=object_key,
Fields={'success_action_status': 201},
ExpiresIn=60
)
files = {'file': ('my-file', body)}
response = requests.post(presigned_request['url'], data=presigned_request['fields'], files=files, verify=False)
# test
expected_response_content = """
<PostResponse>
<Location>{location}</Location>
<Bucket>{bucket}</Bucket>
<Key>{key}</Key>
<ETag>{etag}</ETag>
</PostResponse>
""".format(
location='http://localhost/key-my-file',
bucket=bucket_name,
key='key-my-file',
etag='d41d8cd98f00b204e9800998ecf8427f'
)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.text, expected_response_content)
# clean up
self._delete_bucket(bucket_name, ['key-my-file'])
def test_s3_presigned_post_expires(self):
bucket_name = 'test-bucket-%s' % short_uid()
client = self._get_test_client()
client.create_bucket(Bucket=bucket_name)
# presign a post with a short expiry time
object_key = 'test-presigned-post-key'
presigned_request = client.generate_presigned_post(
Bucket=bucket_name,
Key=object_key,
ExpiresIn=2
)
# sleep so it expires
time.sleep(3)
# attempt to use the presigned request
files = {'file': 'file content'}
response = requests.post(presigned_request['url'], data=presigned_request['fields'], files=files, verify=False)
self.assertEqual(response.status_code, 400)
self.assertTrue('ExpiredToken' in response.text)
# clean up
self._delete_bucket(bucket_name)
def test_s3_delete_response_content_length_zero(self):
bucket_name = 'test-bucket-%s' % short_uid()
client = self._get_test_client()
client.create_bucket(Bucket=bucket_name)
for encoding in None, 'gzip':
# put object
object_key = 'key-by-hostname'
client.put_object(Bucket=bucket_name,
Key=object_key,
Body='something',
ContentType='text/html; charset=utf-8')
url = client.generate_presigned_url(
'delete_object',
Params={'Bucket': bucket_name, 'Key': object_key}
)
# get object and assert headers
headers = {}
if encoding:
headers['Accept-Encoding'] = encoding
response = requests.delete(url, headers=headers, verify=False)
self.assertEqual(response.headers['content-length'],
'0',
f'Unexpected response Content-Length for encoding {encoding}')
# clean up
self._delete_bucket(bucket_name, [object_key])
def test_delete_object_tagging(self):
bucket_name = 'test-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name, ACL='public-read')
object_key = 'test-key-tagging'
self.s3_client.put_object(Bucket=bucket_name, Key=object_key, Body='something')
# get object and assert response
url = '%s/%s/%s' % (config.TEST_S3_URL, bucket_name, object_key)
response = requests.get(url, verify=False)
self.assertEqual(response.status_code, 200)
# delete object tagging
self.s3_client.delete_object_tagging(Bucket=bucket_name, Key=object_key)
# assert that the object still exists
response = requests.get(url, verify=False)
self.assertEqual(response.status_code, 200)
# clean up
self._delete_bucket(bucket_name, [object_key])
def test_delete_non_existing_keys(self):
bucket_name = 'test-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
object_key = 'test-key-nonexistent'
self.s3_client.put_object(Bucket=bucket_name, Key=object_key, Body='something')
response = self.s3_client.delete_objects(Bucket=bucket_name,
Delete={'Objects': [{'Key': object_key}, {'Key': 'dummy1'}, {'Key': 'dummy2'}]})
self.assertEqual(len(response['Deleted']), 3)
self.assertNotIn('Errors', response)
# clean up
self._delete_bucket(bucket_name)
def test_bucket_exists(self):
# Test setup
bucket = 'test-bucket-%s' % short_uid()
s3_client = aws_stack.connect_to_service('s3')
s3_client.create_bucket(Bucket=bucket)
s3_client.put_bucket_cors(
Bucket=bucket,
CORSConfiguration={
'CORSRules': [{'AllowedMethods': ['GET', 'POST', 'PUT', 'DELETE'],
'AllowedOrigins': ['localhost']}]
}
)
response = s3_client.get_bucket_cors(Bucket=bucket)
self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200)
# Cleanup
s3_client.delete_bucket(Bucket=bucket)
def test_s3_uppercase_key_names(self):
# bucket name should be case-sensitive
bucket_name = 'testuppercase-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
# key name should be case-sensitive
object_key = 'camelCaseKey'
self.s3_client.put_object(Bucket=bucket_name, Key=object_key, Body='something')
self.s3_client.get_object(Bucket=bucket_name, Key=object_key)
res = self.s3_client.get_object(Bucket=bucket_name, Key=object_key)
self.assertEqual(res['ResponseMetadata']['HTTPStatusCode'], 200)
def test_s3_get_response_headers(self):
bucket_name = 'test-bucket-%s' % short_uid()
client = self._get_test_client()
client.create_bucket(Bucket=bucket_name)
# put object and CORS configuration
object_key = 'key-by-hostname'
client.put_object(Bucket=bucket_name, Key=object_key, Body='something')
client.put_bucket_cors(Bucket=bucket_name,
CORSConfiguration={
'CORSRules': [{
'AllowedMethods': ['GET', 'PUT', 'POST'],
'AllowedOrigins': ['*'],
'ExposeHeaders': [
'ETag', 'x-amz-version-id'
]
}]
},
)
# get object and assert headers
url = client.generate_presigned_url(
'get_object', Params={'Bucket': bucket_name, 'Key': object_key}
)
response = requests.get(url, verify=False)
self.assertEquals(response.headers['Access-Control-Expose-Headers'], 'ETag,x-amz-version-id')
# clean up
self._delete_bucket(bucket_name, [object_key])
def test_s3_get_response_header_overrides(self):
# Signed requests may include certain header overrides in the querystring
# https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
bucket_name = 'test-bucket-%s' % short_uid()
client = self._get_test_client()
client.create_bucket(Bucket=bucket_name)
# put object
object_key = 'key-by-hostname'
client.put_object(Bucket=bucket_name, Key=object_key, Body='something')
# get object and assert headers
expiry_date = 'Wed, 21 Oct 2015 07:28:00 GMT'
url = client.generate_presigned_url(
'get_object', Params={
'Bucket': bucket_name,
'Key': object_key,
'ResponseCacheControl': 'max-age=74',
'ResponseContentDisposition': 'attachment; filename="foo.jpg"',
'ResponseContentEncoding': 'identity',
'ResponseContentLanguage': 'de-DE',
'ResponseContentType': 'image/jpeg',
'ResponseExpires': expiry_date}
)
response = requests.get(url, verify=False)
self.assertEqual(response.headers['cache-control'], 'max-age=74')
self.assertEqual(response.headers['content-disposition'], 'attachment; filename="foo.jpg"')
self.assertEqual(response.headers['content-encoding'], 'identity')
self.assertEqual(response.headers['content-language'], 'de-DE')
self.assertEqual(response.headers['content-type'], 'image/jpeg')
# Note: looks like depending on the environment/libraries, we can get different date formats...
possible_date_formats = ['2015-10-21T07:28:00Z', expiry_date]
self.assertIn(response.headers['expires'], possible_date_formats)
# clean up
self._delete_bucket(bucket_name, [object_key])
def test_s3_copy_md5(self):
bucket_name = 'test-bucket-%s' % short_uid()
client = self._get_test_client()
client.create_bucket(Bucket=bucket_name)
# put object
src_key = 'src'
client.put_object(Bucket=bucket_name, Key=src_key, Body='something')
# copy object
dest_key = 'dest'
response = client.copy_object(
Bucket=bucket_name,
CopySource={
'Bucket': bucket_name,
'Key': src_key
},
Key=dest_key
)
self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200)
# Create copy object to try to match s3a setting Content-MD5
dest_key2 = 'dest'
url = client.generate_presigned_url(
'copy_object', Params={'Bucket': bucket_name, 'CopySource': {'Bucket': bucket_name, 'Key': src_key},
'Key': dest_key2}
)
request_response = requests.put(url, verify=False)
self.assertEqual(request_response.status_code, 200)
# Cleanup
self._delete_bucket(bucket_name, [src_key, dest_key, dest_key2])
def test_s3_invalid_content_md5(self):
bucket_name = 'test-bucket-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
# put object with invalid content MD5
hashes = {
'__invalid__': 'InvalidDigest',
'000': 'InvalidDigest',
'not base64 encoded checksum': 'InvalidDigest', # InvalidDigest
'MTIz': 'BadDigest' # "123" base64 encoded
}
for hash, error in hashes.items():
with self.assertRaises(Exception) as ctx:
self.s3_client.put_object(Bucket=bucket_name, Key='test-key',
Body='something', ContentMD5=hash)
self.assertIn(error, str(ctx.exception))
# Cleanup
self.s3_client.delete_bucket(Bucket=bucket_name)
def test_s3_upload_download_gzip(self):
bucket_name = 'test-bucket-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
data = '1234567890 ' * 100
# Write contents to memory rather than a file.
upload_file_object = BytesIO()
with gzip.GzipFile(fileobj=upload_file_object, mode='w') as filestream:
filestream.write(data.encode('utf-8'))
# Upload gzip
self.s3_client.put_object(Bucket=bucket_name, Key='test.gz',
ContentEncoding='gzip', Body=upload_file_object.getvalue())
# Download gzip
downloaded_object = self.s3_client.get_object(Bucket=bucket_name, Key='test.gz')
download_file_object = BytesIO(downloaded_object['Body'].read())
with gzip.GzipFile(fileobj=download_file_object, mode='rb') as filestream:
downloaded_data = filestream.read().decode('utf-8')
self.assertEqual(downloaded_data, data)
def test_set_external_hostname(self):
bucket_name = 'test-bucket-%s' % short_uid()
key = 'test.file'
hostname_before = config.HOSTNAME_EXTERNAL
config.HOSTNAME_EXTERNAL = 'foobar'
try:
content = 'test content 123'
acl = 'public-read'
self.s3_client.create_bucket(Bucket=bucket_name)
# upload file
response = self._perform_multipart_upload(bucket=bucket_name, key=key, data=content, acl=acl)
expected_url = '%s://%s:%s/%s/%s' % (get_service_protocol(), config.HOSTNAME_EXTERNAL,
config.PORT_S3, bucket_name, key)
self.assertEqual(expected_url, response['Location'])
# fix object ACL - currently not directly support for multipart uploads
self.s3_client.put_object_acl(Bucket=bucket_name, Key=key, ACL=acl)
# download object via API
downloaded_object = self.s3_client.get_object(Bucket=bucket_name, Key=key)
self.assertEqual(to_str(downloaded_object['Body'].read()), content)
# download object directly from download link
download_url = response['Location'].replace('%s:' % config.HOSTNAME_EXTERNAL, 'localhost:')
response = safe_requests.get(download_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(to_str(response.content), content)
finally:
config.HOSTNAME_EXTERNAL = hostname_before
def test_s3_static_website_hosting(self):
bucket_name = 'test-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
self.s3_client.put_object(Bucket=bucket_name, Key='test/index.html', Body='index')
self.s3_client.put_object(Bucket=bucket_name, Key='test/error.html', Body='error')
self.s3_client.put_object(Bucket=bucket_name, Key='actual/key.html', Body='key')
self.s3_client.put_bucket_website(
Bucket=bucket_name,
WebsiteConfiguration={'IndexDocument': {'Suffix': 'index.html'},
'ErrorDocument': {'Key': 'test/error.html'}}
)
headers = aws_stack.mock_aws_request_headers('s3')
headers['Host'] = s3_utils.get_bucket_website_hostname(bucket_name)
# actual key
url = 'https://{}.{}:{}/actual/key.html'.format(bucket_name, constants.S3_STATIC_WEBSITE_HOSTNAME,
config.EDGE_PORT)
response = requests.get(url, headers=headers, verify=False)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.text, 'key')
# index document
url = 'https://{}.{}:{}/test'.format(bucket_name, constants.S3_STATIC_WEBSITE_HOSTNAME, config.EDGE_PORT)
response = requests.get(url, headers=headers, verify=False)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.text, 'index')
# root path test
url = 'https://{}.{}:{}/'.format(bucket_name, constants.S3_STATIC_WEBSITE_HOSTNAME, config.EDGE_PORT)
response = requests.get(url, headers=headers, verify=False)
self.assertEqual(response.status_code, 404)
self.assertEqual(response.text, 'error')
# error document
url = 'https://{}.{}:{}/something'.format(bucket_name, constants.S3_STATIC_WEBSITE_HOSTNAME, config.EDGE_PORT)
response = requests.get(url, headers=headers, verify=False)
self.assertEqual(response.status_code, 404)
self.assertEqual(response.text, 'error')
def test_s3_event_notification_with_sqs(self):
key_by_path = 'aws/bucket=2020/test1.txt'
bucket_name = 'notif-sqs-%s' % short_uid()
queue_url, queue_attributes = self._create_test_queue()
self._create_test_notification_bucket(queue_attributes, bucket_name=bucket_name)
self.s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': 'Enabled'})
body = 'Lorem ipsum dolor sit amet, ... ' * 30
# put an object
self.s3_client.put_object(Bucket=bucket_name, Key=key_by_path, Body=body)
self.assertEqual(self._get_test_queue_message_count(queue_url), '1')
rs = self.sqs_client.receive_message(QueueUrl=queue_url)
record = [json.loads(to_str(m['Body'])) for m in rs['Messages']][0]['Records'][0]
download_file = new_tmp_file()
self.s3_client.download_file(Bucket=bucket_name, Key=key_by_path, Filename=download_file)
self.assertEqual(record['s3']['object']['size'], os.path.getsize(download_file))
# clean up
self.s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': 'Disabled'})
self.sqs_client.delete_queue(QueueUrl=queue_url)
self._delete_bucket(bucket_name, [key_by_path])
def test_s3_delete_object_with_version_id(self):
test_1st_key = 'aws/s3/testkey1.txt'
test_2nd_key = 'aws/s3/testkey2.txt'
body = 'Lorem ipsum dolor sit amet, ... ' * 30
self.s3_client.create_bucket(Bucket=TEST_BUCKET_WITH_VERSIONING)
self.s3_client.put_bucket_versioning(Bucket=TEST_BUCKET_WITH_VERSIONING,
VersioningConfiguration={'Status': 'Enabled'})
# put 2 objects
rs = self.s3_client.put_object(Bucket=TEST_BUCKET_WITH_VERSIONING, Key=test_1st_key, Body=body)
self.s3_client.put_object(Bucket=TEST_BUCKET_WITH_VERSIONING, Key=test_2nd_key, Body=body)
version_id = rs['VersionId']
# delete 1st object with version
rs = self.s3_client.delete_objects(Bucket=TEST_BUCKET_WITH_VERSIONING,
Delete={'Objects': [{'Key': test_1st_key, 'VersionId': version_id}]})
deleted = rs['Deleted'][0]
self.assertEqual(deleted['Key'], test_1st_key)
self.assertEqual(deleted['VersionId'], version_id)
rs = self.s3_client.list_object_versions(Bucket=TEST_BUCKET_WITH_VERSIONING)
object_versions = [object['VersionId'] for object in rs['Versions']]
self.assertNotIn(version_id, object_versions)
# clean up
self.s3_client.put_bucket_versioning(Bucket=TEST_BUCKET_WITH_VERSIONING,
VersioningConfiguration={'Status': 'Disabled'})
self._delete_bucket(TEST_BUCKET_WITH_VERSIONING, [test_1st_key, test_2nd_key])
def test_etag_on_get_object_call(self):
self.s3_client.create_bucket(Bucket=TEST_BUCKET_NAME_2)
body = 'Lorem ipsum dolor sit amet, ... ' * 30
rs = self.s3_client.put_object(Bucket=TEST_BUCKET_NAME_2, Key=TEST_KEY_2, Body=body)
etag = rs['ETag']
rs = self.s3_client.get_object(
Bucket=TEST_BUCKET_NAME_2,
Key=TEST_KEY_2
)
self.assertIn('ETag', rs)
self.assertEqual(etag, rs['ETag'])
self.assertEqual(rs['ContentLength'], len(body))
rs = self.s3_client.get_object(
Bucket=TEST_BUCKET_NAME_2,
Key=TEST_KEY_2,
Range='bytes=0-{}'.format(TEST_GET_OBJECT_RANGE - 1)
)
self.assertIn('ETag', rs)
self.assertEqual(etag, rs['ETag'])
self.assertEqual(rs['ContentLength'], TEST_GET_OBJECT_RANGE)
# clean up
self._delete_bucket(TEST_BUCKET_NAME_2, [TEST_KEY_2])
def test_get_object_versioning(self):
bucket_name = 'bucket-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
rs = self.s3_client.list_object_versions(
Bucket=bucket_name,
EncodingType='url'
)
self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200)
self.assertEqual(rs['Name'], bucket_name)
# clean up
self._delete_bucket(bucket_name, [])
def test_bucket_versioning(self):
self.s3_client.create_bucket(Bucket=TEST_BUCKET_WITH_VERSIONING)
self.s3_client.put_bucket_versioning(Bucket=TEST_BUCKET_WITH_VERSIONING,
VersioningConfiguration={'Status': 'Enabled'})
result = self.s3_client.get_bucket_versioning(Bucket=TEST_BUCKET_WITH_VERSIONING)
self.assertEqual(result['Status'], 'Enabled')
def test_get_bucket_versioning_order(self):
bucket_name = 'version-order-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
self.s3_client.put_bucket_versioning(Bucket=bucket_name,
VersioningConfiguration={'Status': 'Enabled'})
self.s3_client.put_object(Bucket=bucket_name, Key='test', Body='body')
self.s3_client.put_object(Bucket=bucket_name, Key='test', Body='body')
self.s3_client.put_object(Bucket=bucket_name, Key='test2', Body='body')
rs = self.s3_client.list_object_versions(
Bucket=bucket_name,
)
self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200)
self.assertEqual(rs['Name'], bucket_name)
self.assertEqual(rs['Versions'][0]['IsLatest'], True)
self.assertEqual(rs['Versions'][2]['IsLatest'], True)
def test_upload_big_file(self):
bucket_name = 'bucket-big-file-%s' % short_uid()
key1 = 'test_key1'
key2 = 'test_key1'
self.s3_client.create_bucket(Bucket=bucket_name)
body1 = '\x01' * 10000000
rs = self.s3_client.put_object(Bucket=bucket_name, Key=key1, Body=body1)
self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200)
body2 = 'a' * 10000000
rs = self.s3_client.put_object(Bucket=bucket_name, Key=key2, Body=body2)
self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200)
rs = self.s3_client.head_object(Bucket=bucket_name, Key=key1)
self.assertEqual(rs['ContentLength'], len(body1))
rs = self.s3_client.head_object(Bucket=bucket_name, Key=key2)
self.assertEqual(rs['ContentLength'], len(body2))
# clean up
self._delete_bucket(bucket_name, [key1, key2])
def test_s3_put_more_than_1000_items(self):
self.s3_client.create_bucket(Bucket=TEST_BUCKET_NAME_2)
for i in range(0, 1010, 1):
body = 'test-' + str(i)
key = 'test-key-' + str(i)
self.s3_client.put_object(Bucket=TEST_BUCKET_NAME_2, Key=key, Body=body)
# trying to get the last item of 1010 items added.
resp = self.s3_client.get_object(Bucket=TEST_BUCKET_NAME_2, Key='test-key-1009')
self.assertEqual(to_str(resp['Body'].read()), 'test-1009')
# trying to get the first item of 1010 items added.
resp = self.s3_client.get_object(Bucket=TEST_BUCKET_NAME_2, Key='test-key-0')
self.assertEqual(to_str(resp['Body'].read()), 'test-0')
resp = self.s3_client.list_objects(Bucket=TEST_BUCKET_NAME_2, MaxKeys=1010)
self.assertEqual(len(resp['Contents']), 1010)
resp = self.s3_client.list_objects(Bucket=TEST_BUCKET_NAME_2)
self.assertEqual(len(resp['Contents']), 1000)
next_marker = resp['NextMarker']
# Second list
resp = self.s3_client.list_objects(Bucket=TEST_BUCKET_NAME_2, Marker=next_marker)
self.assertEqual(len(resp['Contents']), 10)
def test_s3_list_objects_empty_marker(self):
bucket_name = 'test' + short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
resp = self.s3_client.list_objects(Bucket=bucket_name, Marker='')
self.assertEqual(resp['Marker'], '')
def test_s3_multipart_upload_file(self):
def upload(size_in_mb, bucket):
file_name = '{}.tmp'.format(short_uid())
path = '{}'.format(file_name)
with open(path, 'wb') as f:
f.seek(int(size_in_mb * 1e6))
f.write(b'\0')
f.flush()
self.s3_client.upload_file(
path,
bucket,
f'{file_name}',
ExtraArgs={'StorageClass': 'DEEP_ARCHIVE'}
)
os.remove(path)
bucket_name = 'bucket-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
upload(1, bucket_name)
upload(9, bucket_name)
upload(15, bucket_name)
s3_resource = aws_stack.connect_to_resource('s3')
objects = s3_resource.Bucket(bucket_name).objects.all()
keys = []
for obj in objects:
keys.append(obj.key)
self.assertEqual(obj.storage_class, 'DEEP_ARCHIVE')
self._delete_bucket(bucket_name, keys)
def test_cors_with_single_origin_error(self):
client = self._get_test_client()
BUCKET_CORS_CONFIG = {
'CORSRules': [{
'AllowedOrigins': ['https://localhost:4200'],
'AllowedMethods': ['GET', 'PUT'],
'MaxAgeSeconds': 3000,
'AllowedHeaders': ['*'],
}]
}
client.create_bucket(Bucket='my-s3-bucket')
client.put_bucket_cors(Bucket='my-s3-bucket', CORSConfiguration=BUCKET_CORS_CONFIG)
# create signed url
url = client.generate_presigned_url(
ClientMethod='put_object',
Params={
'Bucket': 'my-s3-bucket',
'Key': '424f6bae-c48f-42d8-9e25-52046aecc64d/document.pdf',
'ContentType': 'application/pdf',
'ACL': 'bucket-owner-full-control'
},
ExpiresIn=3600
)
result = requests.put(url, data='something', verify=False,
headers={'Origin': 'https://localhost:4200', 'Content-Type': 'application/pdf'})
self.assertEqual(result.status_code, 200)
BUCKET_CORS_CONFIG = {
'CORSRules': [{
'AllowedOrigins': ['https://localhost:4200', 'https://localhost:4201'],
'AllowedMethods': ['GET', 'PUT'],
'MaxAgeSeconds': 3000,
'AllowedHeaders': ['*'],
}]
}
client.put_bucket_cors(Bucket='my-s3-bucket', CORSConfiguration=BUCKET_CORS_CONFIG)
# create signed url
url = client.generate_presigned_url(
ClientMethod='put_object',
Params={
'Bucket': 'my-s3-bucket',
'Key': '424f6bae-c48f-42d8-9e25-52046aecc64d/document.pdf',
'ContentType': 'application/pdf',
'ACL': 'bucket-owner-full-control'
},
ExpiresIn=3600
)
result = requests.put(url, data='something', verify=False,
headers={'Origin': 'https://localhost:4200', 'Content-Type': 'application/pdf'})
self.assertEqual(result.status_code, 200)
result = requests.put(url, data='something', verify=False,
headers={'Origin': 'https://localhost:4201', 'Content-Type': 'application/pdf'})
self.assertEqual(result.status_code, 200)
def test_s3_put_object_notification_with_lambda(self):
bucket_name = 'bucket-%s' % short_uid()
function_name = 'func-%s' % short_uid()
table_name = 'table-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
testutil.create_lambda_function(
handler_file=TEST_LAMBDA_PYTHON_ECHO,
func_name=function_name,
runtime=LAMBDA_RUNTIME_PYTHON36
)
aws_stack.create_dynamodb_table(
table_name=table_name,
partition_key='uuid'
)
self.s3_client.put_bucket_notification_configuration(
Bucket=bucket_name,
NotificationConfiguration={
'LambdaFunctionConfigurations': [
{
'LambdaFunctionArn': aws_stack.lambda_function_arn(function_name),
'Events': ['s3:ObjectCreated:*']
}
]
}
)
# put an object
obj = self.s3_client.put_object(Bucket=bucket_name, Key=table_name, Body='something..')
etag = obj['ETag']
time.sleep(2)
table = aws_stack.connect_to_resource('dynamodb').Table(table_name)
def check_table():
rs = table.scan()
self.assertEqual(len(rs['Items']), 1)
return rs
rs = retry(check_table, retries=4, sleep=3)
record = rs['Items'][0]
self.assertEqual(record['data']['s3']['bucket']['name'], bucket_name)
self.assertEqual(record['data']['s3']['object']['eTag'], etag)
# clean up
self._delete_bucket(bucket_name, [table_name])
lambda_client = aws_stack.connect_to_service('lambda')
lambda_client.delete_function(FunctionName=function_name)
dynamodb_client = aws_stack.connect_to_service('dynamodb')
dynamodb_client.delete_table(TableName=table_name)
def test_s3_put_object_notification_with_sns_topic(self):
bucket_name = 'bucket-%s' % short_uid()
topic_name = 'topic-%s' % short_uid()
queue_name = 'queue-%s' % short_uid()
key_name = 'bucket-key-%s' % short_uid()
sns_client = aws_stack.connect_to_service('sns')
self.s3_client.create_bucket(Bucket=bucket_name)
queue_url = self.sqs_client.create_queue(QueueName=queue_name)['QueueUrl']
topic_arn = sns_client.create_topic(Name=topic_name)['TopicArn']
sns_client.subscribe(TopicArn=topic_arn, Protocol='sqs', Endpoint=aws_stack.sqs_queue_arn(queue_name))
self.s3_client.put_bucket_notification_configuration(
Bucket=bucket_name,
NotificationConfiguration={
'TopicConfigurations': [
{
'TopicArn': topic_arn,
'Events': ['s3:ObjectCreated:*']
}
]
}
)
# Put an object
# This will trigger an event to sns topic, sqs queue will get a message since it's a subscriber of topic
self.s3_client.put_object(Bucket=bucket_name, Key=key_name, Body='body content...')
time.sleep(2)
def get_message(q_url):
resp = self.sqs_client.receive_message(QueueUrl=q_url)
m = resp['Messages'][0]
self.sqs_client.delete_message(
QueueUrl=q_url,
ReceiptHandle=m['ReceiptHandle']
)
return json.loads(m['Body'])
message = retry(get_message, retries=3, sleep=2, q_url=queue_url)
# We got a notification message in sqs queue (from s3 source)
self.assertEqual(message['Type'], 'Notification')
self.assertEqual(message['TopicArn'], topic_arn)
self.assertEqual(message['Subject'], 'Amazon S3 Notification')
r = json.loads(message['Message'])['Records'][0]
self.assertEqual(r['eventSource'], 'aws:s3')
self.assertEqual(r['s3']['bucket']['name'], bucket_name)
self.assertEqual(r['s3']['object']['key'], key_name)
# clean up
self._delete_bucket(bucket_name, [key_name])
self.sqs_client.delete_queue(QueueUrl=queue_url)
sns_client.delete_topic(TopicArn=topic_arn)
def test_s3_get_deep_archive_object(self):
bucket_name = 'bucket-%s' % short_uid()
object_key = 'key-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
# put DEEP_ARCHIVE object
self.s3_client.put_object(
Bucket=bucket_name,
Key=object_key,
Body='body data',
StorageClass='DEEP_ARCHIVE'
)
with self.assertRaises(ClientError) as ctx:
self.s3_client.get_object(
Bucket=bucket_name,
Key=object_key
)
self.assertIn('InvalidObjectState', str(ctx.exception))
# clean up
self._delete_bucket(bucket_name, [object_key])
def test_s3_get_deep_archive_object_restore(self):
bucket_name = 'bucket-%s' % short_uid()
object_key = 'key-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
# put DEEP_ARCHIVE object
self.s3_client.put_object(
Bucket=bucket_name,
Key=object_key,
Body='body data',
StorageClass='DEEP_ARCHIVE'
)
with self.assertRaises(ClientError) as ctx:
self.s3_client.get_object(
Bucket=bucket_name,
Key=object_key
)
self.assertIn('InvalidObjectState', str(ctx.exception))
# put DEEP_ARCHIVE object
self.s3_client.restore_object(
Bucket=bucket_name,
Key=object_key,
RestoreRequest={
'Days': 30,
'GlacierJobParameters': {
'Tier': 'Bulk'
},
'Tier': 'Bulk',
},
)
response = self.s3_client.get_object(
Bucket=bucket_name,
Key=object_key
)
self.assertIn('etag', response.get('ResponseMetadata').get('HTTPHeaders'))
# clean up
self._delete_bucket(bucket_name, [object_key])
def test_encoding_notification_messages(self):
key = 'a@b'
bucket_name = 'notif-enc-%s' % short_uid()
queue_url = self.sqs_client.create_queue(QueueName='testQueue')['QueueUrl']
queue_attributes = self.sqs_client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=['QueueArn'])
self._create_test_notification_bucket(queue_attributes, bucket_name=bucket_name)
# put an object where the bucket_name is in the path
self.s3_client.put_object(Bucket=bucket_name, Key=key, Body='something')
response = self.sqs_client.receive_message(QueueUrl=queue_url)
self.assertEqual(json.loads(response['Messages'][0]['Body'])['Records'][0]['s3']['object']['key'], 'a%40b')
# clean up
self.s3_client.delete_objects(Bucket=bucket_name, Delete={'Objects': [{'Key': key}]})
def test_s3_batch_delete_objects_using_requests(self):
bucket_name = 'bucket-%s' % short_uid()
object_key_1 = 'key-%s' % short_uid()
object_key_2 = 'key-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
self.s3_client.put_object(Bucket=bucket_name, Key=object_key_1, Body='This body document')
self.s3_client.put_object(Bucket=bucket_name, Key=object_key_2, Body='This body document')
base_url = '{}://{}:{}'.format(get_service_protocol(), config.LOCALSTACK_HOSTNAME, config.PORT_S3)
url = '{}/{}?delete='.format(base_url, bucket_name)
r = requests.post(url=url, data=BATCH_DELETE_BODY % (object_key_1, object_key_2))
self.assertEqual(r.status_code, 200)
s3_resource = aws_stack.connect_to_resource('s3')
bucket = s3_resource.Bucket(bucket_name)
total_keys = sum(1 for _ in bucket.objects.all())
self.assertEqual(total_keys, 0)
# clean up
self._delete_bucket(bucket_name, [])
def test_presigned_url_signature_authentication(self):
client = boto3.client('s3', endpoint_url=config.get_edge_url(),
config=Config(signature_version='s3'), aws_access_key_id=TEST_AWS_ACCESS_KEY_ID,
aws_secret_access_key=TEST_AWS_SECRET_ACCESS_KEY)
client_v4 = boto3.client('s3', endpoint_url=config.get_edge_url(),
config=Config(signature_version='s3v4'), aws_access_key_id=TEST_AWS_ACCESS_KEY_ID,
aws_secret_access_key=TEST_AWS_SECRET_ACCESS_KEY)
OBJECT_KEY = 'temp 1.txt'
OBJECT_DATA = 'this should be found in when you download {}.'.format(OBJECT_KEY)
BUCKET = 'test'
EXPIRES = 4
def make_v2_url_invalid(url):
parsed = urlparse.urlparse(url)
query_params = parse_qs(parsed.query)
url = '{}/{}/{}?AWSAccessKeyId={}&Signature={}&Expires={}'.format(
config.get_edge_url(), BUCKET, OBJECT_KEY,
'test', query_params['Signature'][0], query_params['Expires'][0]
)
return url
def make_v4_url_invalid(url):
parsed = urlparse.urlparse(url)
query_params = parse_qs(parsed.query)
url = ('{}/{}/{}?X-Amz-Algorithm=AWS4-HMAC-SHA256&' +
'X-Amz-Credential={}&X-Amz-Date={}&' +
'X-Amz-Expires={}&X-Amz-SignedHeaders=host&' +
'X-Amz-Signature={}').format(
config.get_edge_url(), BUCKET, OBJECT_KEY,
quote(query_params['X-Amz-Credential'][0]).replace('/', '%2F'),
query_params['X-Amz-Date'][0], query_params['X-Amz-Expires'][0], query_params['X-Amz-Signature'][0]
)
return url
client.create_bucket(Bucket=BUCKET)
client.put_object(Key=OBJECT_KEY, Bucket=BUCKET, Body='123')
# GET requests
presign_get_url = client.generate_presigned_url(
'get_object',
Params={'Bucket': BUCKET, 'Key': OBJECT_KEY},
ExpiresIn=EXPIRES
)
presign_get_url_v4 = client_v4.generate_presigned_url(
'get_object',
Params={'Bucket': BUCKET, 'Key': OBJECT_KEY},
ExpiresIn=EXPIRES
)
# Valid request
response = requests.get(presign_get_url)
self.assertEqual(response.status_code, 200)
response = requests.get(presign_get_url_v4)
self.assertEqual(response.status_code, 200)
presign_get_url = client.generate_presigned_url(
'get_object',
Params={'Bucket': BUCKET, 'Key': OBJECT_KEY, 'ResponseContentType': 'text/plain',
'ResponseContentDisposition': 'attachment; filename=test.txt'},
ExpiresIn=EXPIRES
)
presign_get_url_v4 = client_v4.generate_presigned_url(
'get_object',
Params={'Bucket': BUCKET, 'Key': OBJECT_KEY, 'ResponseContentType': 'text/plain'},
ExpiresIn=EXPIRES
)
# Valid request
response = requests.get(presign_get_url)
self.assertEqual(response.status_code, 200)
response = requests.get(presign_get_url_v4)
self.assertEqual(response.status_code, 200)
# Invalid request
url = make_v2_url_invalid(presign_get_url)
response = requests.get(url, data=OBJECT_DATA, headers={'Content-Type': 'my-fake-content/type'})
self.assertEqual(response.status_code, 403)
url = make_v4_url_invalid(presign_get_url_v4)
response = requests.get(url, headers={'Content-Type': 'my-fake-content/type'})
self.assertEqual(response.status_code, 403)
# PUT Requests
presign_put_url = client.generate_presigned_url(
'put_object',
Params={'Bucket': BUCKET, 'Key': OBJECT_KEY},
ExpiresIn=EXPIRES
)
presign_put_url_v4 = client_v4.generate_presigned_url(
'put_object',
Params={'Bucket': BUCKET, 'Key': OBJECT_KEY},
ExpiresIn=EXPIRES
)
# Valid request
response = requests.put(presign_put_url, data=OBJECT_DATA)
self.assertEqual(response.status_code, 200)
response = requests.put(presign_put_url_v4, data=OBJECT_DATA)
self.assertEqual(response.status_code, 200)
presign_put_url = client.generate_presigned_url(
'put_object',
Params={'Bucket': BUCKET, 'Key': OBJECT_KEY, 'ContentType': 'text/plain'},
ExpiresIn=EXPIRES
)
presign_put_url_v4 = client_v4.generate_presigned_url(
'put_object',
Params={'Bucket': BUCKET, 'Key': OBJECT_KEY, 'ContentType': 'text/plain'},
ExpiresIn=EXPIRES
)
# Valid request
response = requests.put(presign_put_url, data=OBJECT_DATA, headers={'Content-Type': 'text/plain'})
self.assertEqual(response.status_code, 200)
response = requests.put(presign_put_url_v4, data=OBJECT_DATA, headers={'Content-Type': 'text/plain'})
self.assertEqual(response.status_code, 200)
# Invalid request
url = make_v2_url_invalid(presign_put_url)
response = requests.put(url, data=OBJECT_DATA, headers={'Content-Type': 'my-fake-content/type'})
self.assertEqual(response.status_code, 403)
url = make_v4_url_invalid(presign_put_url_v4)
response = requests.put(url, data=OBJECT_DATA, headers={'Content-Type': 'my-fake-content/type'})
self.assertEqual(response.status_code, 403)
# DELETE Requests
presign_delete_url = client.generate_presigned_url(
'delete_object',
Params={'Bucket': BUCKET, 'Key': OBJECT_KEY},
ExpiresIn=EXPIRES
)
presign_delete_url_v4 = client_v4.generate_presigned_url(
'delete_object',
Params={'Bucket': BUCKET, 'Key': OBJECT_KEY},
ExpiresIn=EXPIRES
)
# Valid request
response = requests.delete(presign_delete_url)
self.assertEqual(response.status_code, 204)
response = requests.delete(presign_delete_url_v4)
self.assertEqual(response.status_code, 204)
presign_delete_url = client.generate_presigned_url(
'delete_object',
Params={'Bucket': BUCKET, 'Key': OBJECT_KEY, 'VersionId': '1'},
ExpiresIn=EXPIRES
)
presign_delete_url_v4 = client_v4.generate_presigned_url(
'delete_object',
Params={'Bucket': BUCKET, 'Key': OBJECT_KEY, 'VersionId': '1'},
ExpiresIn=EXPIRES
)
# Valid request
response = requests.delete(presign_delete_url)
self.assertEqual(response.status_code, 204)
response = requests.delete(presign_delete_url_v4)
self.assertEqual(response.status_code, 204)
# Invalid request
url = make_v2_url_invalid(presign_delete_url)
response = requests.delete(url)
self.assertEqual(response.status_code, 403)
url = make_v4_url_invalid(presign_delete_url_v4)
response = requests.delete(url)
self.assertEqual(response.status_code, 403)
# Expired requests
time.sleep(4)
# GET
response = requests.get(presign_get_url)
self.assertEqual(response.status_code, 403)
response = requests.get(presign_get_url_v4)
self.assertEqual(response.status_code, 403)
# PUT
response = requests.put(presign_put_url, data=OBJECT_DATA, headers={'Content-Type': 'text/plain'})
self.assertEqual(response.status_code, 403)
response = requests.put(presign_put_url_v4, data=OBJECT_DATA, headers={'Content-Type': 'text/plain'})
self.assertEqual(response.status_code, 403)
# DELETE
response = requests.delete(presign_delete_url)
self.assertEqual(response.status_code, 403)
response = requests.delete(presign_delete_url_v4)
self.assertEqual(response.status_code, 403)
# Multipart uploading
response = self._perform_multipart_upload_with_presign(BUCKET, OBJECT_KEY, client)
self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = self._perform_multipart_upload_with_presign(BUCKET, OBJECT_KEY, client_v4)
self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200)
client.delete_object(Bucket=BUCKET, Key=OBJECT_KEY)
client.delete_bucket(Bucket=BUCKET)
def test_presigned_url_signature_authentication_virtual_host_addressing(self):
virtual_endpoint = '{}://{}:{}'.format(
config.get_protocol(), S3_VIRTUAL_HOSTNAME, config.EDGE_PORT)
client = boto3.client('s3', endpoint_url=virtual_endpoint,
config=Config(s3={'addressing_style': 'virtual'}),
aws_access_key_id=TEST_AWS_ACCESS_KEY_ID,
aws_secret_access_key=TEST_AWS_SECRET_ACCESS_KEY)
client_v4 = boto3.client('s3', endpoint_url=virtual_endpoint,
config=Config(signature_version='s3v4', s3={'addressing_style': 'virtual'}),
aws_access_key_id=TEST_AWS_ACCESS_KEY_ID,
aws_secret_access_key=TEST_AWS_SECRET_ACCESS_KEY)
OBJECT_KEY = 'temp.txt'
OBJECT_DATA = 'this should be found in when you download {}.'.format(OBJECT_KEY)
BUCKET = 'test'
EXPIRES = 4
def make_v2_url_invalid(url):
parsed = urlparse.urlparse(url)
query_params = parse_qs(parsed.query)
url = '{}://{}.{}:{}/{}?AWSAccessKeyId={}&Signature={}&Expires={}'.format(
config.get_protocol(), BUCKET, S3_VIRTUAL_HOSTNAME, config.EDGE_PORT, OBJECT_KEY,
'test', query_params['Signature'][0], query_params['Expires'][0]
)
return url
def make_v4_url_invalid(url):
parsed = urlparse.urlparse(url)
query_params = parse_qs(parsed.query)
url = ('{}://{}.{}:{}/{}?X-Amz-Algorithm=AWS4-HMAC-SHA256&' +
'X-Amz-Credential={}&X-Amz-Date={}&' +
'X-Amz-Expires={}&X-Amz-SignedHeaders=host&' +
'X-Amz-Signature={}').format(
config.get_protocol(), BUCKET, S3_VIRTUAL_HOSTNAME, config.EDGE_PORT, OBJECT_KEY,
quote(query_params['X-Amz-Credential'][0]).replace('/', '%2F'),
query_params['X-Amz-Date'][0], query_params['X-Amz-Expires'][0], query_params['X-Amz-Signature'][0]
)
return url
self.s3_client.create_bucket(Bucket=BUCKET)
self.s3_client.put_object(
Key=OBJECT_KEY,
Bucket=BUCKET,
Body='123'
)
# GET requests
presign_get_url = client.generate_presigned_url(
'get_object',
Params={'Bucket': BUCKET, 'Key': OBJECT_KEY},
ExpiresIn=EXPIRES
)
presign_get_url_v4 = client_v4.generate_presigned_url(
'get_object',
Params={'Bucket': BUCKET, 'Key': OBJECT_KEY},
ExpiresIn=EXPIRES
)
# Valid request
response = requests.get(presign_get_url)
self.assertEqual(response.status_code, 200)
response = requests.get(presign_get_url_v4)
self.assertEqual(response.status_code, 200)
presign_get_url = client.generate_presigned_url(
'get_object',
Params={'Bucket': BUCKET, 'Key': OBJECT_KEY, 'ResponseContentType': 'text/plain'},
ExpiresIn=EXPIRES
)
presign_get_url_v4 = client_v4.generate_presigned_url(
'get_object',
Params={'Bucket': BUCKET, 'Key': OBJECT_KEY, 'ResponseContentType': 'text/plain'},
ExpiresIn=EXPIRES
)
# Valid request
response = requests.get(presign_get_url)
self.assertEqual(response.status_code, 200)
response = requests.get(presign_get_url_v4)
self.assertEqual(response.status_code, 200)
# Invalid request
url = make_v2_url_invalid(presign_get_url)
response = requests.get(url, data=OBJECT_DATA, headers={'Content-Type': 'my-fake-content/type'})
self.assertEqual(response.status_code, 403)
url = make_v4_url_invalid(presign_get_url_v4)
response = requests.get(url, headers={'Content-Type': 'my-fake-content/type'})
self.assertEqual(response.status_code, 403)
# PUT Requests
presign_put_url = client.generate_presigned_url(
'put_object',
Params={'Bucket': BUCKET, 'Key': OBJECT_KEY},
ExpiresIn=EXPIRES
)
presign_put_url_v4 = client_v4.generate_presigned_url(
'put_object',
Params={'Bucket': BUCKET, 'Key': OBJECT_KEY},
ExpiresIn=EXPIRES
)
# Valid request
response = requests.put(presign_put_url, data=OBJECT_DATA)
self.assertEqual(response.status_code, 200)
response = requests.put(presign_put_url_v4, data=OBJECT_DATA)
self.assertEqual(response.status_code, 200)
presign_put_url = client.generate_presigned_url(
'put_object',
Params={'Bucket': BUCKET, 'Key': OBJECT_KEY, 'ContentType': 'text/plain'},
ExpiresIn=EXPIRES
)
presign_put_url_v4 = client_v4.generate_presigned_url(
'put_object',
Params={'Bucket': BUCKET, 'Key': OBJECT_KEY, 'ContentType': 'text/plain'},
ExpiresIn=EXPIRES
)
# Valid request
response = requests.put(presign_put_url, data=OBJECT_DATA, headers={'Content-Type': 'text/plain'})
self.assertEqual(response.status_code, 200)
response = requests.put(presign_put_url_v4, data=OBJECT_DATA, headers={'Content-Type': 'text/plain'})
self.assertEqual(response.status_code, 200)
# Invalid request
url = make_v2_url_invalid(presign_put_url)
response = requests.put(url, data=OBJECT_DATA, headers={'Content-Type': 'my-fake-content/type'})
self.assertEqual(response.status_code, 403)
url = make_v4_url_invalid(presign_put_url_v4)
response = requests.put(url, data=OBJECT_DATA, headers={'Content-Type': 'my-fake-content/type'})
self.assertEqual(response.status_code, 403)
# DELETE Requests
presign_delete_url = client.generate_presigned_url(
'delete_object',
Params={'Bucket': BUCKET, 'Key': OBJECT_KEY},
ExpiresIn=EXPIRES
)
presign_delete_url_v4 = client_v4.generate_presigned_url(
'delete_object',
Params={'Bucket': BUCKET, 'Key': OBJECT_KEY},
ExpiresIn=EXPIRES
)
# Valid request
response = requests.delete(presign_delete_url)
self.assertEqual(response.status_code, 204)
response = requests.delete(presign_delete_url_v4)
self.assertEqual(response.status_code, 204)
presign_delete_url = client.generate_presigned_url(
'delete_object',
Params={'Bucket': BUCKET, 'Key': OBJECT_KEY, 'VersionId': '1'},
ExpiresIn=EXPIRES
)
presign_delete_url_v4 = client_v4.generate_presigned_url(
'delete_object',
Params={'Bucket': BUCKET, 'Key': OBJECT_KEY, 'VersionId': '1'},
ExpiresIn=EXPIRES
)
# Valid request
response = requests.delete(presign_delete_url)
self.assertEqual(response.status_code, 204)
response = requests.delete(presign_delete_url_v4)
self.assertEqual(response.status_code, 204)
# Invalid request
url = make_v2_url_invalid(presign_delete_url)
response = requests.delete(url)
self.assertEqual(response.status_code, 403)
url = make_v4_url_invalid(presign_delete_url_v4)
response = requests.delete(url)
self.assertEqual(response.status_code, 403)
# Expired requests
time.sleep(4)
# GET
response = requests.get(presign_get_url)
self.assertEqual(response.status_code, 403)
response = requests.get(presign_get_url_v4)
self.assertEqual(response.status_code, 403)
# PUT
response = requests.put(presign_put_url, data=OBJECT_DATA, headers={'Content-Type': 'text/plain'})
self.assertEqual(response.status_code, 403)
response = requests.put(presign_put_url_v4, data=OBJECT_DATA, headers={'Content-Type': 'text/plain'})
self.assertEqual(response.status_code, 403)
# DELETE
response = requests.delete(presign_delete_url)
self.assertEqual(response.status_code, 403)
response = requests.delete(presign_delete_url_v4)
self.assertEqual(response.status_code, 403)
# Multipart uploading
response = self._perform_multipart_upload_with_presign(BUCKET, OBJECT_KEY, client)
self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = self._perform_multipart_upload_with_presign(BUCKET, OBJECT_KEY, client_v4)
self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200)
client.delete_object(Bucket=BUCKET, Key=OBJECT_KEY)
client.delete_bucket(Bucket=BUCKET)
def test_precondition_failed_error(self):
bucket = 'bucket-%s' % short_uid()
client = self._get_test_client()
client.create_bucket(Bucket=bucket)
client.put_object(Bucket=bucket, Key='foo', Body=b'{"foo": "bar"}')
# this line makes localstack crash:
try:
client.get_object(Bucket=bucket, Key='foo', IfMatch='"not good etag"')
except ClientError as e:
self.assertEqual(e.response['Error']['Code'], 'PreconditionFailed')
self.assertEqual(e.response['Error']['Message'], 'At least one of the pre-conditions you '
'specified did not hold')
client.delete_object(Bucket=bucket, Key='foo')
client.delete_bucket(Bucket=bucket)
def test_cors_configurtaions(self):
client = self._get_test_client()
bucket = 'test-cors'
object_key = 'index.html'
url = '{}/{}/{}'.format(config.get_edge_url(), bucket, object_key)
BUCKET_CORS_CONFIG = {
'CORSRules': [{
'AllowedOrigins': [config.get_edge_url()],
'AllowedMethods': ['GET', 'PUT'],
'MaxAgeSeconds': 3000,
'AllowedHeaders': ['x-amz-tagging'],
}]
}
client.create_bucket(Bucket=bucket)
client.put_bucket_cors(Bucket=bucket, CORSConfiguration=BUCKET_CORS_CONFIG)
client.put_object(Bucket=bucket, Key=object_key, Body='<h1>Index</html>')
response = requests.get(url,
headers={'Origin': config.get_edge_url(), 'Content-Type': 'text/html'})
self.assertEqual(response.status_code, 200)
self.assertIn('Access-Control-Allow-Origin'.lower(), response.headers)
self.assertEqual(response.headers['Access-Control-Allow-Origin'], config.get_edge_url())
self.assertIn('Access-Control-Allow-Methods'.lower(), response.headers)
self.assertIn('GET', response.headers['Access-Control-Allow-Methods'])
self.assertIn('Access-Control-Allow-Headers', response.headers)
self.assertEqual(response.headers['Access-Control-Allow-Headers'], 'x-amz-tagging')
self.assertIn('Access-Control-Max-Age'.lower(), response.headers)
self.assertEqual(response.headers['Access-Control-Max-Age'], '3000')
self.assertIn('Access-Control-Allow-Credentials'.lower(), response.headers)
self.assertEqual(response.headers['Access-Control-Allow-Credentials'].lower(), 'true')
BUCKET_CORS_CONFIG = {
'CORSRules': [{
'AllowedOrigins': ['https://anydomain.com'],
'AllowedMethods': ['GET', 'PUT'],
'MaxAgeSeconds': 3000,
'AllowedHeaders': ['x-amz-tagging'],
}]
}
client.put_bucket_cors(Bucket=bucket, CORSConfiguration=BUCKET_CORS_CONFIG)
response = requests.get(url,
headers={'Origin': config.get_edge_url(), 'Content-Type': 'text/html'})
self.assertEqual(response.status_code, 200)
self.assertNotIn('Access-Control-Allow-Origin'.lower(), response.headers)
self.assertNotIn('Access-Control-Allow-Methods'.lower(), response.headers)
self.assertNotIn('Access-Control-Allow-Headers', response.headers)
self.assertNotIn('Access-Control-MaxAge', response.headers)
# cleaning
client.delete_object(Bucket=bucket, Key=object_key)
client.delete_bucket(Bucket=bucket)
def test_s3_download_object_with_lambda(self):
bucket_name = 'bucket-%s' % short_uid()
function_name = 'func-%s' % short_uid()
key = 'key-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
self.s3_client.put_object(Bucket=bucket_name, Key=key, Body='something..')
testutil.create_lambda_function(
handler_file=TEST_LAMBDA_PYTHON_DOWNLOAD_FROM_S3,
func_name=function_name,
runtime=LAMBDA_RUNTIME_PYTHON36,
envvars=dict({
'BUCKET_NAME': bucket_name,
'OBJECT_NAME': key,
'LOCAL_FILE_NAME': '/tmp/' + key,
})
)
lambda_client = aws_stack.connect_to_service('lambda')
lambda_client.invoke(FunctionName=function_name, InvocationType='Event')
retry(testutil.check_expected_lambda_log_events_length, retries=10,
sleep=3, function_name=function_name, expected_length=1)
# clean up
self._delete_bucket(bucket_name, [key])
lambda_client.delete_function(FunctionName=function_name)
def test_putobject_with_multiple_keys(self):
client = self._get_test_client()
bucket = 'bucket-%s' % short_uid()
key_by_path = 'aws/key1/key2/key3'
client.create_bucket(Bucket=bucket)
client.put_object(
Body=b'test',
Bucket=bucket,
Key=key_by_path
)
# Cleanup
self._delete_bucket(bucket, key_by_path)
def test_terraform_request_sequence(self):
reqs = load_file(os.path.join(os.path.dirname(__file__), 'files', 's3.requests.txt'))
reqs = reqs.split('---')
for req in reqs:
header, _, body = req.strip().partition('\n\n')
req, _, headers = header.strip().partition('\n')
headers = {h.split(':')[0]: h.partition(':')[2].strip() for h in headers.split('\n')}
method, path, _ = req.split(' ')
url = '%s%s' % (config.get_edge_url(), path)
result = getattr(requests, method.lower())(url, data=body, headers=headers)
self.assertLess(result.status_code, 400)
# ---------------
# HELPER METHODS
# ---------------
@staticmethod
def generate_large_file(size):
# https://stackoverflow.com/questions/8816059/create-file-of-particular-size-in-python
filename = 'large_file_%s' % uuid.uuid4()
f = open(filename, 'wb')
f.seek(size - 1)
f.write(b'\0')
f.close()
return open(filename, 'r')
def _create_test_queue(self):
queue_url = self.sqs_client.create_queue(QueueName=TEST_QUEUE_FOR_BUCKET_WITH_NOTIFICATION)['QueueUrl']
queue_attributes = self.sqs_client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=['QueueArn'])
return queue_url, queue_attributes
def _create_test_notification_bucket(self, queue_attributes, bucket_name):
self.s3_client.create_bucket(Bucket=bucket_name)
self.s3_client.put_bucket_notification_configuration(
Bucket=bucket_name,
NotificationConfiguration={
'QueueConfigurations': [
{
'QueueArn': queue_attributes['Attributes']['QueueArn'],
'Events': ['s3:ObjectCreated:*']
}
]
}
)
def _get_test_queue_message_count(self, queue_url):
queue_attributes = self.sqs_client.get_queue_attributes(
QueueUrl=queue_url, AttributeNames=['ApproximateNumberOfMessages']
)
return queue_attributes['Attributes']['ApproximateNumberOfMessages']
def _delete_bucket(self, bucket_name, keys=[]):
keys = keys if isinstance(keys, list) else [keys]
objects = [{'Key': k} for k in keys]
if objects:
self.s3_client.delete_objects(Bucket=bucket_name, Delete={'Objects': objects})
self.s3_client.delete_bucket(Bucket=bucket_name)
def _perform_multipart_upload(self, bucket, key, data=None, zip=False, acl=None):
kwargs = {'ACL': acl} if acl else {}
multipart_upload_dict = self.s3_client.create_multipart_upload(Bucket=bucket, Key=key, **kwargs)
upload_id = multipart_upload_dict['UploadId']
# Write contents to memory rather than a file.
data = data or (5 * short_uid())
data = to_bytes(data)
upload_file_object = BytesIO(data)
if zip:
upload_file_object = BytesIO()
with gzip.GzipFile(fileobj=upload_file_object, mode='w') as filestream:
filestream.write(data)
response = self.s3_client.upload_part(Bucket=bucket, Key=key,
Body=upload_file_object, PartNumber=1, UploadId=upload_id)
multipart_upload_parts = [{'ETag': response['ETag'], 'PartNumber': 1}]
return self.s3_client.complete_multipart_upload(
Bucket=bucket, Key=key, MultipartUpload={'Parts': multipart_upload_parts}, UploadId=upload_id
)
def _perform_multipart_upload_with_presign(self, bucket, key, s3_client=None, data=None, zip=False, acl=None):
if not s3_client:
s3_client = self.s3_client
kwargs = {'ACL': acl} if acl else {}
multipart_upload_dict = self.s3_client.create_multipart_upload(Bucket=bucket, Key=key, **kwargs)
upload_id = multipart_upload_dict['UploadId']
# Write contents to memory rather than a file.
data = data or (5 * short_uid())
data = to_bytes(data)
upload_file_object = BytesIO(data)
if zip:
upload_file_object = BytesIO()
with gzip.GzipFile(fileobj=upload_file_object, mode='w') as filestream:
filestream.write(data)
signed_url = s3_client.generate_presigned_url(ClientMethod='upload_part',
Params={'Bucket': bucket, 'Key': key, 'UploadId': upload_id, 'PartNumber': 1})
response = requests.put(signed_url, data=upload_file_object)
multipart_upload_parts = [{'ETag': response.headers['ETag'], 'PartNumber': 1}]
return s3_client.complete_multipart_upload(
Bucket=bucket, Key=key, MultipartUpload={'Parts': multipart_upload_parts}, UploadId=upload_id
)
def _perform_presigned_url_upload(self, bucket, key):
client = self._get_test_client()
url = client.generate_presigned_url(
'put_object', Params={'Bucket': bucket, 'Key': key}
)
url = url + '&X-Amz-Credential=x&X-Amz-Signature=y'
requests.put(url, data='something', verify=False)
def _get_test_client(self):
return boto3.client(
's3',
endpoint_url=config.get_edge_url(),
aws_access_key_id=TEST_AWS_ACCESS_KEY_ID,
aws_secret_access_key=TEST_AWS_SECRET_ACCESS_KEY
)
|
StarcoderdataPython
|
1827759
|
<reponame>AlexanderBerx/NoiceUi
from setuptools import setup
setup(
name='NoiceUi',
version='0.1.0',
packages=['noiceui', 'bin'],
url='',
license='BSD 3',
author='<NAME>',
author_email='<EMAIL>',
description='noiceui, ui for Solidangles Arnold Noice tool',
requires=['qt.py']
)
|
StarcoderdataPython
|
277575
|
import config
import gc
import json
import utime
import neopixel
from machine import Pin, I2C
from ina219 import INA219
from steppers import Stepper, Axis
from logging import ERROR
from letters import characters
from microWebSrv import MicroWebSrv
# lock
lock1 = False
# ina initialization
i2c = I2C(-1, Pin(config.device['ina_scl']), Pin(config.device['ina_sda']))
ina = INA219(config.device['shunt_ohms'], i2c, log_level=ERROR)
ina.configure()
# leds
np = neopixel.NeoPixel(machine.Pin(27), 25)
# steppers initialization
m1 = Stepper(0, Pin(config.device['m1_dir']),
Pin(config.device['m1_step']),
Pin(config.device['m1_enable']),
config.device['pwm_freq'])
m0 = Stepper(1, Pin(config.device['m2_dir']),
Pin(config.device['m2_step']),
Pin(config.device['m2_enable']),
config.device['pwm_freq'])
# axis initialization
aperture = Axis(m0, ina,
config.device['max_ma_aperture'], config.device['margin'])
focus = Axis(m1, ina,
config.device['max_ma_focus'], config.device['margin'])
def write_2leds(letter, color):
rgb = color
char_matrix = characters.get(letter)
led_counter = 0
for row in char_matrix:
for led in row:
if(led):
np[led_counter] = rgb
else:
np[led_counter] = (0, 0, 0)
led_counter += 1
np.write()
# axis calibration
write_2leds(".", (3, 0, 0))
current = ina.current()
write_2leds(".", (0, 0, 5))
aperture.calibration()
utime.sleep_ms(1000)
write_2leds(".", (0, 3, 0))
focus.calibration()
write_2leds(" ", (0, 0, 0))
# webserver functions
def _httpHandlerMemory(httpClient, httpResponse, routeArgs):
print("In Memory HTTP variable route :")
query = str(routeArgs['query'])
if 'gc' in query or 'collect' in query:
gc.collect()
content = """\
{}
""".format(gc.mem_free())
httpResponse.WriteResponseOk(headers=None,
contentType="text/html",
contentCharset="UTF-8",
content=content)
def _httpHandlerGetStatus(httpClient, httpResponse, routeArgs):
global focus, aperture
mtype = routeArgs['mtype']
if 'focus' in mtype:
max_steps = focus.max_steps
calibrated = focus.calibrated
actual_position = focus.actual_position
elif 'aperture' in mtype:
max_steps = aperture.max_steps
calibrated = aperture.calibrated
actual_position = aperture.actual_position
data = {
'mtype': mtype,
'max_steps': max_steps,
'calibrated': calibrated,
'position': actual_position
}
httpResponse.WriteResponseOk(headers=None,
contentType="text/html",
contentCharset="UTF-8",
content=json.dumps(data))
gc.collect()
def _httpHandlerSetCalibration(httpClient, httpResponse, routeArgs):
global focus, aperture
mtype = routeArgs['mtype']
if 'focus' in mtype:
max_steps = focus.calibration()
position = focus.actual_position
calibrated = focus.calibrated
elif 'aperture' in mtype:
max_steps = aperture.calibration()
position = aperture.actual_position
calibrated = aperture.calibrated
data = {
'mtype': mtype,
'max_steps': max_steps,
'calibrated': calibrated,
'position': position
}
httpResponse.WriteResponseOk(headers=None,
contentType="text/html",
contentCharset="UTF-8",
content=json.dumps(data))
gc.collect()
def _httpHandlerSetMove(httpClient, httpResponse, routeArgs):
global focus, aperture, lock1
mtype = routeArgs['mtype']
steps = int(routeArgs['steps'])
clockwise = -1 if int(routeArgs['clockwise']) == 0 else 1
status = 0
position = 0
if 'focus' in mtype:
write_2leds(".", (0, 3, 0))
status = focus.move(clockwise * steps, 1)
position = focus.actual_position
elif 'aperture' in mtype:
write_2leds(".", (0, 0, 5))
status = aperture.move(clockwise * steps, 1)
position = aperture.actual_position
write_2leds(" ", (0, 0, 0))
data = {
'mtype': mtype,
'steps': steps,
'status': status,
'clockwise': clockwise,
'position': position
}
httpResponse.WriteResponseOk(headers=None,
contentType="text/html",
contentCharset="UTF-8",
content=json.dumps(data))
gc.collect()
routeHandlers = [
("/memory/<query>", "GET", _httpHandlerMemory),
("/status/<mtype>", "GET", _httpHandlerGetStatus),
("/calibration/<mtype>", "GET", _httpHandlerSetCalibration),
("/move/<mtype>/<steps>/<clockwise>", "GET", _httpHandlerSetMove)
]
mws = MicroWebSrv(routeHandlers=routeHandlers, webPath="www/")
mws.Start(threaded=True)
gc.collect()
|
StarcoderdataPython
|
6403980
|
<gh_stars>0
from random import *
from words import wordlist
DEBUG = False
class HangMan(object):
def __init__(self, difficulty="easy", words = wordlist, game_id = 0):
self.number = game_id
self.word = words[randint(0,len(words) - 1)]
self.difficulty = difficulty
self.welcome_message = "[+] Welcome To HangMan. \n[+] Game Is Set To " + str(self.difficulty).upper() + " Difficulty. \n"
self.word_state = [ "_" for i in range(len(self.word)) ]
self.turn = None
#self.player_score = {}
self.character_guesses = []
self.players = []
#self.connections = []
self.limit_lookup = { "easy" : 3, "medium" : 2, "hard" : 1 }
self.limit = self.limit_lookup[self.difficulty] * len(self.word)
def welcome(self):
return self.welcome_message
def add_player(self, player):
self.players.append(player)
#self.connections.append(connection)
#self.player_score[player] = 0
def character_guess(self, character):
if len(character) > 1:
if character == self.word:
self.word_state = [ c for c in self.word ]
return len(self.word)
else:
return -1
if character not in self.character_guesses:
self.character_guesses.append(character)
indices = [i for i,word_character in enumerate(self.word) if word_character == character ]
for index in indices:
self.word_state[index] = character
if "".join(self.word_state).count("_") == 0:
return len(self.word)
return self.word.count(character)
return 0
def update_player_state(self, player, occurences):
player.score += occurences
def is_over(self, occurences):
if occurences == len(self.word):
return True
if len(self.character_guesses) == self.limit:
return True
if "".join(self.word_state).count("_") == 0:
occurences = len(self.word)
self.player_score[player] += occurences
return True
return False
def get_players_states(self):
player_states = ""
for player in self.players:
player_states += player.name + " " + str(player.score)
#i = self.players.index(name)
if str(self.turn) == player.name:
player_states += " * "
player_states+="\n"
return player_states
def remove_player(self, player):
self.players.remove(player)
def update_turn(self):
if self.turn == None:
self.turn = self.players[0]
return self.turn
i = self.players.index(self.turn)
self.turn = self.players[(i+1)%len(self.players)]
return self.turn
def get_state(self):
return " ".join(self.word_state) + "\n\n" + self.get_players_states() +"\n[!] Guessed Characters: " + ",".join(self.character_guesses) + "\n"
if __name__ == "__main__":
game = HangMan()
game.add_player("TomRiddler")
game_over = False
while not game_over:
current_turn = game.update_turn()
print game.get_state()
c = raw_input("Enter Guess: ")
game_over = game.update_player_state(current_turn, c)
print "[!] Game Over."
|
StarcoderdataPython
|
12804102
|
<reponame>levilucio/SyVOLT<gh_stars>1-10
"""
__MapDistributable_MDL.py_____________________________________________________
Automatically generated AToM3 Model File (Do not modify directly)
Author: levi
Modified: Fri Aug 23 15:40:27 2013
______________________________________________________________________________
"""
from stickylink import *
from widthXfillXdecoration import *
from MatchModel import *
from ApplyModel import *
from ECU import *
from VirtualDevice import *
from Distributable import *
from ComponentPrototype import *
from SwCompToEcuMapping_component import *
from paired_with import *
from match_contains import *
from directLink_S import *
from directLink_T import *
from apply_contains import *
from graph_ECU import *
from graph_match_contains import *
from graph_Distributable import *
from graph_SwCompToEcuMapping_component import *
from graph_directLink_T import *
from graph_directLink_S import *
from graph_MatchModel import *
from graph_apply_contains import *
from graph_paired_with import *
from graph_ApplyModel import *
from graph_VirtualDevice import *
from graph_ComponentPrototype import *
from ATOM3Enum import *
from ATOM3String import *
from ATOM3BottomType import *
from ATOM3Constraint import *
from ATOM3Attribute import *
from ATOM3Float import *
from ATOM3List import *
from ATOM3Link import *
from ATOM3Connection import *
from ATOM3Boolean import *
from ATOM3Appearance import *
from ATOM3Text import *
from ATOM3Action import *
from ATOM3Integer import *
from ATOM3Port import *
from ATOM3MSEnum import *
def MapDistributable_MDL(self, rootNode, GM2AUTOSAR_MMRootNode=None):
# --- Generating attributes code for ASG GM2AUTOSAR_MM ---
if( GM2AUTOSAR_MMRootNode ):
# author
GM2AUTOSAR_MMRootNode.author.setValue('Annonymous')
# description
GM2AUTOSAR_MMRootNode.description.setValue('\n')
GM2AUTOSAR_MMRootNode.description.setHeight(15)
# name
GM2AUTOSAR_MMRootNode.name.setValue('MapDistributable')
# --- ASG attributes over ---
self.obj41=MatchModel(self)
self.obj41.isGraphObjectVisual = True
if(hasattr(self.obj41, '_setHierarchicalLink')):
self.obj41._setHierarchicalLink(False)
self.obj41.graphClass_= graph_MatchModel
if self.genGraphics:
new_obj = graph_MatchModel(20.0,300.0,self.obj41)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MatchModel", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj41.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj41)
self.globalAndLocalPostcondition(self.obj41, rootNode)
self.obj41.postAction( rootNode.CREATE )
self.obj42=ApplyModel(self)
self.obj42.isGraphObjectVisual = True
if(hasattr(self.obj42, '_setHierarchicalLink')):
self.obj42._setHierarchicalLink(False)
self.obj42.graphClass_= graph_ApplyModel
if self.genGraphics:
new_obj = graph_ApplyModel(20.0,580.0,self.obj42)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("ApplyModel", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj42.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj42)
self.globalAndLocalPostcondition(self.obj42, rootNode)
self.obj42.postAction( rootNode.CREATE )
self.obj43=ECU(self)
self.obj43.isGraphObjectVisual = True
if(hasattr(self.obj43, '_setHierarchicalLink')):
self.obj43._setHierarchicalLink(False)
# classtype
self.obj43.classtype.setValue('ECU')
# cardinality
self.obj43.cardinality.setValue('1')
# name
self.obj43.name.setValue('ecu1')
self.obj43.graphClass_= graph_ECU
if self.genGraphics:
new_obj = graph_ECU(240.0,400.0,self.obj43)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("ECU", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj43.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj43)
self.globalAndLocalPostcondition(self.obj43, rootNode)
self.obj43.postAction( rootNode.CREATE )
self.obj44=VirtualDevice(self)
self.obj44.isGraphObjectVisual = True
if(hasattr(self.obj44, '_setHierarchicalLink')):
self.obj44._setHierarchicalLink(False)
# classtype
self.obj44.classtype.setValue('VirtualDevice')
# cardinality
self.obj44.cardinality.setValue('1')
# name
self.obj44.name.setValue('vd1')
self.obj44.graphClass_= graph_VirtualDevice
if self.genGraphics:
new_obj = graph_VirtualDevice(440.0,400.0,self.obj44)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("VirtualDevice", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj44.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj44)
self.globalAndLocalPostcondition(self.obj44, rootNode)
self.obj44.postAction( rootNode.CREATE )
self.obj45=Distributable(self)
self.obj45.isGraphObjectVisual = True
if(hasattr(self.obj45, '_setHierarchicalLink')):
self.obj45._setHierarchicalLink(False)
# classtype
self.obj45.classtype.setValue('Distributable')
# cardinality
self.obj45.cardinality.setValue('+')
# name
self.obj45.name.setValue('dist1')
self.obj45.graphClass_= graph_Distributable
if self.genGraphics:
new_obj = graph_Distributable(660.0,400.0,self.obj45)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("Distributable", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj45.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj45)
self.globalAndLocalPostcondition(self.obj45, rootNode)
self.obj45.postAction( rootNode.CREATE )
self.obj46=ComponentPrototype(self)
self.obj46.isGraphObjectVisual = True
if(hasattr(self.obj46, '_setHierarchicalLink')):
self.obj46._setHierarchicalLink(False)
# classtype
self.obj46.classtype.setValue('ComponentPrototype')
# cardinality
self.obj46.cardinality.setValue('1')
# name
self.obj46.name.setValue('comp1')
self.obj46.graphClass_= graph_ComponentPrototype
if self.genGraphics:
new_obj = graph_ComponentPrototype(620.0,660.0,self.obj46)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("ComponentPrototype", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj46.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj46)
self.globalAndLocalPostcondition(self.obj46, rootNode)
self.obj46.postAction( rootNode.CREATE )
self.obj47=SwCompToEcuMapping_component(self)
self.obj47.isGraphObjectVisual = True
if(hasattr(self.obj47, '_setHierarchicalLink')):
self.obj47._setHierarchicalLink(False)
# classtype
self.obj47.classtype.setValue('SwCompToEcuMapping_component')
# cardinality
self.obj47.cardinality.setValue('1')
# name
self.obj47.name.setValue('sctemc1')
self.obj47.graphClass_= graph_SwCompToEcuMapping_component
if self.genGraphics:
new_obj = graph_SwCompToEcuMapping_component(320.0,660.0,self.obj47)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("SwCompToEcuMapping_component", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj47.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj47)
self.globalAndLocalPostcondition(self.obj47, rootNode)
self.obj47.postAction( rootNode.CREATE )
self.obj48=paired_with(self)
self.obj48.isGraphObjectVisual = True
if(hasattr(self.obj48, '_setHierarchicalLink')):
self.obj48._setHierarchicalLink(False)
self.obj48.graphClass_= graph_paired_with
if self.genGraphics:
new_obj = graph_paired_with(157.0,484.5,self.obj48)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("paired_with", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
else: new_obj = None
self.obj48.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj48)
self.globalAndLocalPostcondition(self.obj48, rootNode)
self.obj48.postAction( rootNode.CREATE )
self.obj49=match_contains(self)
self.obj49.isGraphObjectVisual = True
if(hasattr(self.obj49, '_setHierarchicalLink')):
self.obj49._setHierarchicalLink(False)
self.obj49.graphClass_= graph_match_contains
if self.genGraphics:
new_obj = graph_match_contains(267.5,393.5,self.obj49)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("match_contains", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
else: new_obj = None
self.obj49.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj49)
self.globalAndLocalPostcondition(self.obj49, rootNode)
self.obj49.postAction( rootNode.CREATE )
self.obj50=match_contains(self)
self.obj50.isGraphObjectVisual = True
if(hasattr(self.obj50, '_setHierarchicalLink')):
self.obj50._setHierarchicalLink(False)
self.obj50.graphClass_= graph_match_contains
if self.genGraphics:
new_obj = graph_match_contains(367.5,393.5,self.obj50)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("match_contains", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
else: new_obj = None
self.obj50.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj50)
self.globalAndLocalPostcondition(self.obj50, rootNode)
self.obj50.postAction( rootNode.CREATE )
self.obj51=match_contains(self)
self.obj51.isGraphObjectVisual = True
if(hasattr(self.obj51, '_setHierarchicalLink')):
self.obj51._setHierarchicalLink(False)
self.obj51.graphClass_= graph_match_contains
if self.genGraphics:
new_obj = graph_match_contains(477.5,393.5,self.obj51)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("match_contains", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
else: new_obj = None
self.obj51.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj51)
self.globalAndLocalPostcondition(self.obj51, rootNode)
self.obj51.postAction( rootNode.CREATE )
self.obj52=directLink_S(self)
self.obj52.isGraphObjectVisual = True
if(hasattr(self.obj52, '_setHierarchicalLink')):
self.obj52._setHierarchicalLink(False)
# associationType
self.obj52.associationType.setValue('virtualDevice')
self.obj52.graphClass_= graph_directLink_S
if self.genGraphics:
new_obj = graph_directLink_S(482.0,444.0,self.obj52)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("directLink_S", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
else: new_obj = None
self.obj52.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj52)
self.globalAndLocalPostcondition(self.obj52, rootNode)
self.obj52.postAction( rootNode.CREATE )
self.obj53=directLink_S(self)
self.obj53.isGraphObjectVisual = True
if(hasattr(self.obj53, '_setHierarchicalLink')):
self.obj53._setHierarchicalLink(False)
# associationType
self.obj53.associationType.setValue('distributable')
self.obj53.graphClass_= graph_directLink_S
if self.genGraphics:
new_obj = graph_directLink_S(692.0,444.0,self.obj53)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("directLink_S", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
else: new_obj = None
self.obj53.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj53)
self.globalAndLocalPostcondition(self.obj53, rootNode)
self.obj53.postAction( rootNode.CREATE )
self.obj54=directLink_T(self)
self.obj54.isGraphObjectVisual = True
if(hasattr(self.obj54, '_setHierarchicalLink')):
self.obj54._setHierarchicalLink(False)
# associationType
self.obj54.associationType.setValue('componentPrototype')
self.obj54.graphClass_= graph_directLink_T
if self.genGraphics:
new_obj = graph_directLink_T(613.0,702.0,self.obj54)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("directLink_T", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
else: new_obj = None
self.obj54.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj54)
self.globalAndLocalPostcondition(self.obj54, rootNode)
self.obj54.postAction( rootNode.CREATE )
self.obj55=apply_contains(self)
self.obj55.isGraphObjectVisual = True
if(hasattr(self.obj55, '_setHierarchicalLink')):
self.obj55._setHierarchicalLink(False)
self.obj55.graphClass_= graph_apply_contains
if self.genGraphics:
new_obj = graph_apply_contains(312.0,664.0,self.obj55)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("apply_contains", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
else: new_obj = None
self.obj55.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj55)
self.globalAndLocalPostcondition(self.obj55, rootNode)
self.obj55.postAction( rootNode.CREATE )
self.obj56=apply_contains(self)
self.obj56.isGraphObjectVisual = True
if(hasattr(self.obj56, '_setHierarchicalLink')):
self.obj56._setHierarchicalLink(False)
self.obj56.graphClass_= graph_apply_contains
if self.genGraphics:
new_obj = graph_apply_contains(462.0,664.0,self.obj56)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("apply_contains", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
else: new_obj = None
self.obj56.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj56)
self.globalAndLocalPostcondition(self.obj56, rootNode)
self.obj56.postAction( rootNode.CREATE )
# Connections for obj41 (graphObject_: Obj0) of type MatchModel
self.drawConnections(
(self.obj41,self.obj48,[153.0, 343.0, 157.0, 484.5],"true", 2),
(self.obj41,self.obj49,[153.0, 343.0, 267.5, 393.5],"true", 2),
(self.obj41,self.obj50,[153.0, 343.0, 367.5, 393.5],"true", 2),
(self.obj41,self.obj51,[153.0, 343.0, 477.5, 393.5],"true", 2) )
# Connections for obj42 (graphObject_: Obj1) of type ApplyModel
self.drawConnections(
(self.obj42,self.obj55,[161.0, 626.0, 312.0, 664.0],"true", 2),
(self.obj42,self.obj56,[161.0, 626.0, 462.0, 664.0],"true", 2) )
# Connections for obj43 (graphObject_: Obj2) named ecu1
self.drawConnections(
(self.obj43,self.obj52,[382.0, 444.0, 482.0, 444.0],"true", 2) )
# Connections for obj44 (graphObject_: Obj3) named vd1
self.drawConnections(
(self.obj44,self.obj53,[582.0, 444.0, 692.0, 444.0],"true", 2) )
# Connections for obj45 (graphObject_: Obj4) named dist1
self.drawConnections(
)
# Connections for obj46 (graphObject_: Obj5) named comp1
self.drawConnections(
)
# Connections for obj47 (graphObject_: Obj6) named sctemc1
self.drawConnections(
(self.obj47,self.obj54,[463.0, 702.0, 613.0, 702.0],"true", 2) )
# Connections for obj48 (graphObject_: Obj7) of type paired_with
self.drawConnections(
(self.obj48,self.obj42,[157.0, 484.5, 161.0, 626.0],"true", 2) )
# Connections for obj49 (graphObject_: Obj8) of type match_contains
self.drawConnections(
(self.obj49,self.obj43,[267.5, 393.5, 382.0, 444.0],"true", 2) )
# Connections for obj50 (graphObject_: Obj9) of type match_contains
self.drawConnections(
(self.obj50,self.obj44,[367.5, 393.5, 582.0, 444.0],"true", 2) )
# Connections for obj51 (graphObject_: Obj10) of type match_contains
self.drawConnections(
(self.obj51,self.obj45,[477.5, 393.5, 802.0, 444.0],"true", 2) )
# Connections for obj52 (graphObject_: Obj11) of type directLink_S
self.drawConnections(
(self.obj52,self.obj44,[482.0, 444.0, 582.0, 444.0],"true", 2) )
# Connections for obj53 (graphObject_: Obj12) of type directLink_S
self.drawConnections(
(self.obj53,self.obj45,[692.0, 444.0, 802.0, 444.0],"true", 2) )
# Connections for obj54 (graphObject_: Obj13) of type directLink_T
self.drawConnections(
(self.obj54,self.obj46,[613.0, 702.0, 763.0, 702.0],"true", 2) )
# Connections for obj55 (graphObject_: Obj14) of type apply_contains
self.drawConnections(
(self.obj55,self.obj47,[312.0, 664.0, 463.0, 702.0],"true", 2) )
# Connections for obj56 (graphObject_: Obj15) of type apply_contains
self.drawConnections(
(self.obj56,self.obj46,[462.0, 664.0, 763.0, 702.0],"true", 2) )
newfunction = MapDistributable_MDL
loadedMMName = 'GM2AUTOSAR_MM_META'
atom3version = '0.3'
|
StarcoderdataPython
|
8018166
|
# Author: <NAME> <<EMAIL>>
"""Command-line parsing library
This module is a argparse-inspired command-line parsing library that:
- customized man like help
- all parameters must be with -x or --xx format
- handles compression of short parameters. eg: -abcd -e
The following is a simple usage example that sums integers from the
command-line and writes the result to a file:
parser = manparse.ParameterParser(
description = "sum the integers at the command line")
parser.add_parameter('-i', '--integers',
type=int,
nargs='+',
section='Required'
help="list of integers to be summed")
parser.add_parameter('-l', '--log',
type=manparse.FileType('w'),
default=log.txt,
section='Optional'
help="the file where the sum should be written")
result = parser.parse_args()
"""
__all__ = [
'ParameterParser'
'Namespace'
'FileType'
'DirType'
'SUPPRESS'
'ParameterError'
]
import __builtin__ as _ori
import copy as _copy
import os as _os
import re as _re
import sys as _sys
SUPPRESS = '==SUPPRESS=='
def _check_type(ptype, value):
try:
ptype(value)
return True
except (ValueError, IOError):
return False
def _check_choices(choices, value):
return True if choices is None or value in choices else False
def _parse_list(parameter, slist):
if _ori.type(parameter.nargs) is int and parameter.nargs > len(slist):
msg = "needs %s values" % (parameter.nargs)
raise ParameterError(parameter, msg)
list_values = []
for value in slist:
# Check if value is a valid value and not a parameter
if _check_type(int, value) or not value.startswith('-'):
if _check_type(parameter.type, value):
value = parameter.type(value)
if _check_choices(parameter.choices, value):
list_values.append(value)
else:
msg = "'%s' not in choices %s" % (value, parameter.choices)
raise ParameterError(parameter, msg)
else:
msg = "'%s' is not %s" % (value, parameter.type)
raise ParameterError(parameter, msg)
else:
msg = "'%s' is not a valid value" % (value)
raise ParameterError(parameter, msg)
return list_values
class ParameterError(Exception):
"""An error from creating or using an argument
"""
def __init__(self, parameter, message):
if parameter is None:
self.parameter_name = None
else:
self.parameter_name = parameter.name
self.message = message
def __str__(self):
if self.parameter_name is None:
format = '%(message)s'
else:
format = 'parameter %(parameter_name)s: %(message)s'
return format % dict(message=self.message,
parameter_name=self.parameter_name)
class FileType(object):
"""Factory for creating file object types
Instances of FileType are typically passed as type= arguments to the
ParameterParser add_parameter() method.
Keyword Arguments:
- mode -- A string indicating how the file is to be opened. Accepts the
same values as the builtin open() function.
- bufsize -- The file's desired buffer size. Accepts the same values as
the builtin open() function.
"""
def __init__(self, mode='r', bufsize=None):
self._mode = mode
self._bufsize = bufsize
def __call__(self, string):
# the special argument "-" means sys.std{in,out}
if string == '-':
if 'r' in self._mode:
return _sys.stdin
elif 'w' in self._mode:
return _sys.stdout
else:
msg = ('argument "-" with mode %r' % self._mode)
raise ValueError(msg)
try:
if self._mode not in ['r', 'w']:
msg = "'%s' is not a valid mode for FileType. Valid modes: 'r', 'w'" % (self._mode)
raise ParameterError(None, msg)
if self._bufsize:
return open(string, self._mode, self._bufsize)
else:
return open(string, self._mode)
except IOError:
err = _sys.exc_info()[1]
message = "cannot open '%s': %s"
raise ParameterError(None, message % (string, err))
def __repr__(self):
args = [self._mode, self._bufsize]
args_str = ', '.join([repr(arg) for arg in args if arg is not None])
return "%s(%s)" % (type(self).__name__, args_str)
def __cmp__(self, other):
return 0
def __eq__(self, other):
if other is FileType:
return True
else:
return False
class DirType(object):
"""Factory for creating dir object types
TODO DOCSTRING
"""
def __init__(self, check=False):
self.check = check
def __call__(self, string):
if type(string) is not str:
msg = "%s must be 'str' type"
raise ParameterError(None, msg)
if self.check:
if not _os.path.isdir(string):
msg = "%s is not an actual directory" % (string)
raise ParameterError(None, msg)
return string
def __repr__(self):
return "%s(check=%s)" % (type(self).__name__, self.check)
def __cmp__(self, other):
return 0
def __eq__(self, other):
if other is DirType:
return True
else:
return False
class _AttributeHolder(object):
"""Abstract base class that provides __repr__.
The __repr__ method returns a string in the format::
ClassName(attr=name, attr=name, ...)
The attributes are determined either by a class-level attribute,
'_kwarg_names', or by inspecting the instance __dict__.
"""
def __repr__(self):
type_name = type(self).__name__
arg_strings = []
for arg in self._get_args():
arg_strings.append(repr(arg))
for name, value in self._get_kwargs():
arg_strings.append('%s=%r' % (name, value))
return '%s(%s)' % (type_name, ', '.join(arg_strings))
def _get_kwargs(self):
return sorted(self.__dict__.items())
def _get_args(self):
return []
class Namespace(_AttributeHolder):
"""Simple object for storing attributes.
Implements equality by attribute names and values, and provides a simple
string representation.
"""
def __init__(self, **kwargs):
for name in kwargs:
setattr(self, name, kwargs[name])
__hash__ = None
def __eq__(self, other):
return vars(self) == vars(other)
def __ne__(self, other):
return not (self == other)
def __contains__(self, key):
return key in self.__dict__
def __iter__(self):
return iter([a for a in dir(self) if not a.startswith('_') and a is not 'next'])
class _Parameter(object):
"""Object that represent a parameter
"""
def __init__(self, name, long_name, dest,
type=None,
action='store',
nargs=None,
default=None,
const=None,
required=False,
choices=None,
section=None,
help=None):
self.name = name
self.long_name = long_name
self.dest = dest
# Transform to bool
self.required = bool(required)
# Section in help
if self.required is True:
section = "Required"
if section is None:
section = "Others"
self.section = section
# Type check
if type is None:
type = str
if type in [str, int, float, bool, FileType, DirType]:
self.type = type
else:
msg = "not valid type '%s'. Valid types: str, int, float, bool, 'FileType', 'DirType'" % (type)
raise ParameterError(self, msg)
# Action check
if action in ['store', 'store_true', 'store_false', 'help', 'version']:
self.action = action
if self.action == 'store_true':
default = False
if self.action == 'store_false':
default = True
else:
msg = "not valid action '%s'. Valid actions: 'store', 'store_true', 'store_false', 'help', 'version'" % (action)
raise ParameterError(self, msg)
# Default type conversion if it is necessary
if default not in [None, SUPPRESS]:
if not _check_type(self.type, default):
msg = "default value '%s' is not '%s'" % (default, self.type)
raise ParameterError(self, msg)
default = self.type(default)
self.default = default
# Const type conversion if it is necessary
if const is not None:
if not _check_type(self.type, const):
msg = "const value '%s' is not '%s'" % (const, self.type)
raise ParameterError(self, msg)
const = self.type(const)
self.const = const
# Check choices
if choices is not None:
if hasattr(choices, '__iter__'):
choices = [self.type(i) for i in choices]
else:
msg = "choices not support 'in' operator"
raise ParameterError(self, msg)
self.choices = choices
# Check nargs (default_nargs stores is nargs is passed)
if nargs is None:
nargs = 1
self.default_nargs = True
else:
self.default_nargs = False
if self.action == "store_true" or self.action == "store_false":
self.nargs = 0
else:
if (_ori.type(nargs) is int and nargs >= 0) or nargs in ['+', '*', '?']:
if nargs == '?':
if self.default is None:
msg = "default value not set for nargs '?'"
raise ParameterError(self, msg)
if self.const is None:
msg = "const value not set for nargs '?'"
raise ParameterError(self, msg)
self.nargs = nargs
else:
msg = "not valid value for nargs"
raise ParameterError(self, msg)
# Transform help
self.help = str(help)
def __str__(self):
return "name=%s long_name=%s dest=%s type=%s default=%s nargs=%s required=%s choices=%s action=%s section=%s" % \
(self.name, self.long_name, self.dest, self.type, self.default, self.nargs, self.required, self.choices, self.action, self.section)
class ParameterParser(_AttributeHolder):
"""Object for parsing command line strings into Python objects.
Keyword arguments:
- prog = The name of the program (default: sys.argv[0])
- short_description = Brief description of the program
- description = A description of what the program does
- epilog = Text at the end of help
- add_help = Add a -h/--help option to the parser (default: True)
"""
def __init__(self,
prog=None,
short_description=None,
description=None,
bugs=None,
epilog=None,
add_help=True,
version=None):
if prog is None:
prog = _os.path.basename(_sys.argv[0])
self.prog = prog
self.short_description = short_description
self.description = description
self.bugs = bugs
self.epilog = epilog
self.add_help = add_help
self.version = version
# Store parameter objects
self.parameters = []
# Store the short name of the parameters introduced by the user (for an easier checking restrictions)
self.user_name_parameters = []
# Store the dependency params restrictions as a dictionary of _Parameter.names (_Parameter.name: list of _Parameter.names)
self.dependency_params_restrictions = {}
# Store incompatible params restrictions as a dictionary of _Parameter.names (_Parameter.name: list of _Parameter.names)
self.incompatible_params_restrictions = {}
if self.add_help:
self.add_parameter('-h', '--help',
action='help',
default=SUPPRESS,
help='Show this help and exit')
if self.version is not None:
self.add_parameter('-V', '--version',
action='version',
default=SUPPRESS,
help='Show the version and exit')
# ================================
# Pretty __repr__ methods
# Inherited from _AttributeHolder
# ================================
def _get_kwargs(self):
names = [
'prog',
'short_description',
'description',
'epilog'
]
return [(name, getattr(self, name)) for name in names]
# ================================
# Parameter methods
# ================================
def add_parameter(self, short_command, long_command=None, **kwargs):
try:
# Checking short_command
if short_command is not None and _re.match('^-[a-zA-Z]$', short_command) is not None:
short_command = short_command.replace('-', '', 1)
for param in self.parameters:
if param.name == short_command:
msg = "'%s' duplicated as short command" % (short_command)
raise ParameterError(None, msg)
else:
msg = "'%s' is not valid as short command" % (short_command)
raise ParameterError(None, msg)
# Checking long_command
if long_command is not None:
if _re.match('^--[A-Za-z]{2,}[A-Za-z_]*[A-Za-z]$', long_command) is not None:
long_command = long_command.replace('--', '', 1)
for param in self.parameters:
if param.long_name == long_command:
msg = "' %s' duplicate as long command" % (long_command)
raise ParameterError(None, msg)
else:
msg = "'%s' is not valid as long command" % (long_command)
raise ParameterError(None, msg)
# Checking dest
if 'dest' in kwargs:
dest = kwargs['dest']
del kwargs['dest']
else:
if long_command is not None:
dest = long_command
else:
dest = short_command
for param in self.parameters:
if dest == param.dest:
msg = "'%s' duplicate as dest" % (dest)
raise ParameterError(None, msg)
new_parameter = _Parameter(short_command, long_command, dest, **kwargs)
self.parameters.append(new_parameter)
except ParameterError:
err = _sys.exc_info()[1]
self._error(err)
# ================================
# Parameter methods - Restrictions
# ================================
def dependency_params(self, *args):
try:
if _ori.len(args) < 2:
msg = "dependency_params method: it needs two parameters at least"
raise ParameterError(None, msg)
# Check last parameters
restriction = args[-1]
if _ori.type(restriction) is str:
r = restriction.replace('-', '', 1)
if not self._valid_parameter("short", r):
msg = "dependency_params method: '-%s' not exists" % (r)
raise ParameterError(None, msg)
restriction = [restriction]
elif _ori.type(restriction) is list:
for r in restriction:
r = r.replace('-', '', 1)
if not self._valid_parameter("short", r):
msg = "dependency_params method: '-%s' not exists" % (r)
raise ParameterError(None, msg)
else:
msg = "dependency_params method: '%s' must be a str or list type" % (restriction)
raise ParameterError(None, msg)
# Checking first parameters
for p in args[:_ori.len(args)-1]:
if _ori.type(p) is not str:
msg = "dependency_params method: '%s' must be a str type" % (p)
raise ParameterError(None, msg)
p_replace = p.replace('-', '', 1)
if not self._valid_parameter("short", p_replace):
msg = "dependency_params method: '-%s' not exists" % (p_replace)
raise ParameterError(None, msg)
# Restriction cannot be a restriction to itself
if p in restriction:
msg = "dependency_params method: '%s' cannot be a restriction to itself" % (p)
raise ParameterError(None, msg)
# Check if there is a restriction on the same param in incompatible_params_restrictions
# If not, create restriction in dependency_params_restrictions
if self.incompatible_params_restrictions.has_key(p):
for r in restriction:
if r in self.incompatible_params_restrictions[p]:
msg = "dependency_params method: '%s' cannot be a dependency and incompatibility to the same param %s" % (p, r)
raise ParameterError(None, msg)
if self.dependency_params_restrictions.has_key(p):
for r in restriction:
self.dependency_params_restrictions[p].append(r)
else:
self.dependency_params_restrictions[p] = restriction
# Remove duplicate values in each key
for key, value in self.dependency_params_restrictions.iteritems():
self.dependency_params_restrictions[key] = list(set(value))
return None
except ParameterError:
err = _sys.exc_info()[1]
self._error(err)
def incompatible_params(self, *args):
try:
if _ori.len(args) < 2:
msg = "incompatible_params method: it needs two parameters at least"
raise ParameterError(None, msg)
# Check last parameters
restriction = args[-1]
if _ori.type(restriction) is str:
r = restriction.replace('-', '', 1)
if not self._valid_parameter("short", r):
msg = "incompatible_params method: '-%s' not exists" % (r)
raise ParameterError(None, msg)
restriction = [restriction]
elif _ori.type(restriction) is list:
for r in restriction:
r = r.replace('-', '', 1)
if not self._valid_parameter("short", r):
msg = "incompatible_params method: '-%s' not exists" % (r)
raise ParameterError(None, msg)
else:
msg = "incompatible_params method: '%s' must be a str or list type" % (restriction)
raise ParameterError(None, msg)
# Checking first parameters
for p in args[:_ori.len(args)-1]:
if _ori.type(p) is not str:
msg = "incompatible_params method: '%s' must be a str type" % (p)
raise ParameterError(None, msg)
p_replace = p.replace('-', '', 1)
if not self._valid_parameter("short", p_replace):
msg = "incompatible_params method: '-%s' not exists" % (p_replace)
raise ParameterError(None, msg)
# Restriction cannot be a restriction to itself
if p in restriction:
msg = "incompatible_params method: '%s' cannot be a restriction to itself" % (p)
raise ParameterError(None, msg)
# Check if there is a restriction on the same param in dependency_params_restrictions
# If not, create restriction in incompatible_params_restrictions
if self.dependency_params_restrictions.has_key(p):
for r in restriction:
if r in self.dependency_params_restrictions[p]:
msg = "incompatible_params method: '%s' cannot be a dependency and incompatibility to the same param %s" % (p, r)
raise ParameterError(None, msg)
if self.incompatible_params_restrictions.has_key(p):
for r in restriction:
self.incompatible_params_restrictions[p].append(r)
else:
self.incompatible_params_restrictions[p] = restriction
# Remove duplicate values in each key
for key, value in self.incompatible_params_restrictions.iteritems():
self.incompatible_params_restrictions[key] = list(set(value))
return None
except ParameterError:
err = _sys.exc_info()[1]
self._error(err)
def _show_store_parameters(self):
i = 0
for x in self.parameters:
i += 1
print "Param %s -> %s" % (i, x)
# ================================
# Parser method
# ================================
def parse_params(self, args=None, namespace=None):
try:
if args is None:
args = _sys.argv[1:]
if namespace is None:
namespace = Namespace()
# Main loop
args_len = len(args)
index = 0
while index < args_len:
checking_param = args[index]
# Validation
if _re.match('^-[A-Za-z]+$', checking_param) is not None:
checking_param = checking_param.replace('-', '', 1)
for p in checking_param:
validated_param = self._valid_parameter("short", p)
if validated_param:
self.user_name_parameters.append(validated_param.name)
index = self._do_action(validated_param, namespace, index, args)
else:
msg = "'-%s' not a valid parameter" % (p)
raise ParameterError(None, msg)
elif _re.match('^--[A-Za-z]{2,}[A-Za-z_]*[A-Za-z]$', checking_param):
# If long parameter, check if it is valid
checking_param = checking_param.replace('--', '', 1)
validated_param = self._valid_parameter("long", checking_param)
if validated_param:
self.user_name_parameters.append(validated_param.name)
index = self._do_action(validated_param, namespace, index, args)
else:
msg = "'--%s' not valid parameter" % (checking_param)
raise ParameterError(None, msg)
else:
msg = "'%s' not a valid parameter format" % (checking_param)
raise ParameterError(None, msg)
index += 1
# Check if missing required parameters (in add_parameter option)
missing_required_param_list = self._check_required_param(namespace)
if len(missing_required_param_list) != 0:
mpl = []
for mp in missing_required_param_list:
mpl.append(mp.dest)
msg = "missing required parameters %s" % (mpl)
raise ParameterError(None, msg)
# First check incompatible_params_restrictions
for value, restrictions in self.incompatible_params_restrictions.iteritems():
value = value.replace('-', '', 1)
if value in self.user_name_parameters:
result = [i for i in restrictions if i.replace('-', '', 1) in self.user_name_parameters]
if _ori.len(result) != 0:
msg = "incompatible parameters %s" % (result)
raise ParameterError(self._valid_parameter("short", value), msg)
# Then check dependency_params_restrictions
for value, restrictions in self.dependency_params_restrictions.iteritems():
value = value.replace('-', '', 1)
if value in self.user_name_parameters:
result = [i for i in restrictions if i.replace('-', '', 1) not in self.user_name_parameters]
if _ori.len(result) != 0:
msg = "missing required parameters %s" % (result)
raise ParameterError(self._valid_parameter("short", value), msg)
# Complete namespace with remaining parameters with default not SUPPRESS
self._complete_namespace(namespace)
return namespace
except ParameterError:
err = _sys.exc_info()[1]
self._error(err)
def _valid_parameter(self, type_param, param):
"""It validates the user param against the valid program parameters
It returns the valid_param if there is a match
"""
if type_param == "short":
for valid_param in self.parameters:
if valid_param.name == param:
return valid_param
else:
for valid_param in self.parameters:
if valid_param.long_name == param:
return valid_param
return False
def _do_action(self, param, namespace, external_index, args):
"""It does the param action and modify the namespace
It can modify de index depending on param.nargs
"""
# Check if the action is help or version
if param.action == 'help':
self._print_help()
elif param.action == 'version':
self._print_version()
else:
external_index = self._store_action(param, namespace, external_index, args)
return external_index
def _check_required_param(self, namespace):
missing_parameters = []
required_params = [p for p in self.parameters if p.required == True]
for rp in required_params:
for p in namespace:
if p == rp.dest:
break
else:
missing_parameters.append(rp)
return missing_parameters
def _complete_namespace(self, namespace):
for p in self.parameters:
if p.default is not SUPPRESS:
if p.dest not in namespace:
setattr(namespace, p.dest, p.default)
return None
# ================
# Actions methods
# ================
def _store_action(self, param, namespace, external_index, args):
# Check if param is already store
for ns in namespace:
if ns == param.dest:
msg = "passed more than one time"
raise ParameterError(param, msg)
if param.action == "store_true":
setattr(namespace, param.dest, True)
elif param.action == "store_false":
setattr(namespace, param.dest, False)
else:
if type(param.nargs) is int:
largs = args[external_index+1:external_index+1+param.nargs]
external_index += param.nargs
list_values = _parse_list(param, largs)
if len(list_values) is 1:
setattr(namespace, param.dest, list_values[0])
else:
setattr(namespace, param.dest, list_values)
else:
largs = []
if param.nargs == '?':
if len(args) != external_index+1:
if _check_type(int, args[external_index+1]) or not args[external_index+1].startswith('-'):
largs.append(args[external_index+1])
external_index += 1
else:
# Take const
largs.append(param.const)
else:
# Take const
largs.append(param.const)
list_values = _parse_list(param, largs)
else:
for a in args[external_index+1:]:
if _check_type(int, a) or not a.startswith('-'):
largs.append(a)
else:
break
external_index += len(largs)
list_values = _parse_list(param, largs)
if param.nargs == '+':
if len(list_values) == 0:
msg = "need at least one value"
raise ParameterError(param, msg)
if param.default_nargs:
setattr(namespace, param.dest, list_values[0])
else:
setattr(namespace, param.dest, list_values)
return external_index
def _print_help(self):
# Copy self.parameters and sort it
param_help = []
for x in self.parameters:
param_help.append(_copy.copy(x))
param_help.sort(key=lambda param: param.name)
sections_list = []
separator = '\n'
# Name section
name_section = "NAME\n"
name_section += ' ' * 7 + self.prog
if self.short_description is not None:
name_section += ' - ' + self.short_description
name_section += '\n'
sections_list.append(name_section)
# Description section
tab_description_section = ' ' * 7
description_section = "DESCRIPTION\n"
if self.description is not None:
description_section += tab_description_section + self.description.replace('\n', '\n' + tab_description_section) + '\n'
description_section += self._info_subsection_help(param_help, tab_description_section) + self._subsections_help(param_help, tab_description_section)
sections_list.append(description_section)
# Reporting bugs section
if self.bugs is not None:
reporting_bugs_section = "REPORTING BUGS\n"
reporting_bugs_section += ' ' * 7 + "Report bugs to " + self.bugs + '\n'
sections_list.append(reporting_bugs_section)
# Epilog section
if self.epilog is not None:
sections_list.append(self.epilog)
# Join all sections and print
formated_help = separator.join(sections_list)
self._print_message('\n' + formated_help + '\n', _sys.stdout)
self._exit()
def _info_subsection_help(self, param_list, actual_tab):
if self.add_help is True or self.version is not None:
param_tab = actual_tab + ' ' * 3
param_help_tab = param_tab + ' ' * 3
info_help = '\n' + actual_tab + 'Info:\n'
# If version exist
if self.version is not None:
for p in param_list:
if p.name == 'V':
version_param = p
param_list.remove(p)
break
info_help += param_tab + '-' + version_param.name + ', --' + version_param.long_name + '\n'
info_help += param_help_tab + version_param.help + '\n\n'
# If help exist
if self.add_help is True:
for p in param_list:
if p.name == 'h':
help_param = p
param_list.remove(p)
break
info_help += param_tab + '-' + help_param.name + ', --' + help_param.long_name + '\n'
info_help += param_help_tab + help_param.help + '\n'
return info_help if 'info_help' in locals() else ''
def _subsections_help(self, param_list, actual_tab):
param_tab = actual_tab + ' ' * 3
param_help_tab = param_tab + ' ' * 3
# Order subsections
subsections = sorted(set([p.section for p in param_list]))
if "Others" in subsections:
subsections.remove("Others")
subsections.insert(0, "Others")
subsections_help = ''
for subsection in subsections:
subsections_help += '\n' + actual_tab + '%s:' % (subsection)
for p in param_list:
if p.section == subsection:
if p.help != SUPPRESS:
subsections_help += '\n'
subsections_help += param_tab + '-' + p.name + ', --' + p.long_name
p_choices = ''
if p.choices is not None:
p_choices = "choices: "
p_choices += str(p.choices)
if p.nargs == '?':
if p_choices is not '':
p_choices += ', '
subsections_help += ' [' + p.dest.upper() + '] (' + p_choices + 'default: ' + str(p.default) + ', const: ' + str(p.const) + ')\n'
elif p.nargs == '+':
if p_choices is not '':
p_choices = ' (' + p_choices + ')'
subsections_help += ' <' + p.dest.upper() + '> [' + p.dest.upper() + ' ...]' + p_choices + '\n'
elif p.nargs == 0:
subsections_help += '\n'
elif p.nargs == 1:
if p_choices is not '':
p_choices = ' (' + p_choices + ')'
subsections_help += ' <' + p.dest.upper() + '>' + p_choices + '\n'
else:
if p_choices is not '':
p_choices = ', ' + p_choices
subsections_help += ' <' + p.dest.upper() + ' ...> (num: ' + str(p.nargs) + p_choices + ')\n'
subsections_help += param_help_tab + p.help.replace('\n', '\n' + param_help_tab) + '\n'
return subsections_help
def _print_version(self):
print "%s %s" % (self.prog, self.version)
self._exit()
def _print_message(self, message, file):
if message:
file.write(message)
# ================
# Exit and error
# ================
def _exit(self, status=0, message=None):
if message:
self._print_message(message, _sys.stderr)
_sys.exit(status)
def _error(self, message):
"""error(message: string)
Incorporating the message to stderr and exits.
"""
self._exit(2, ('%s: error: %s\n') % (self.prog, message))
def throw_error(self, message):
"""This function allows the user to throw an error inside the parser
"""
self._error(message)
|
StarcoderdataPython
|
1697098
|
from revibe.settings.base import *
ENV = 'TEST'
DEBUG = True
ALLOWED_HOSTS = [
'.elasticbeanstalk.com'
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ['RDS_DB_NAME'],
'USER': os.environ['RDS_USERNAME'],
'PASSWORD': os.environ['RDS_PASSWORD'],
'HOST': os.environ['RDS_HOSTNAME'],
'PORT': os.environ['RDS_PORT'],
'OPTIONS': {
'charset': 'utf8mb4'
}
}
}
# AWS stuff
USE_S3 = True
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = os.getenv('AWS_STORAGE_BUCKET_NAME')
AWS_S3_REGION_NAME = 'us-east-2'
AWS_DEFAULT_ACL = None
AWS_S3_CUSTOM_DOMAIN = f'{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com'
AWS_S3_OBJECT_PARAMETERS = {'CacheControl': 'max-age=86400'}
AWS_QUERYSTRING_AUTH = False
# # static files
STATIC_LOCATION = 'static'
STATIC_URL = f'https://{AWS_S3_CUSTOM_DOMAIN}/{STATIC_LOCATION}/'
STATICFILES_STORAGE = 'revibe.storage_backends.StaticStorage'
# media file settings
MEDIA_LOCATION = 'media'
MEDIA_URL = f'https://{AWS_S3_CUSTOM_DOMAIN}/{MEDIA_LOCATION}/'
DEFAULT_FILE_STORAGE = 'utils.storage_backends.MediaStorage' # custom storage settings
# DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage' # default storage settings
# email settings
EMAIL_BACKEND = "django_ses.SESBackend"
AWS_SES_REGION_NAME = "us-east-1"
AWS_SES_REGION_ENDPOINT = f'email.{AWS_SES_REGION_NAME}.amazonaws.com'
_log_level = 'DEBUG'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'django-file': {
'level': _log_level,
'class': 'logging.FileHandler',
'filename': '/opt/python/log/django.log',
},
},
'loggers': {
'django': {
'handlers': ['django-file'],
'level': _log_level,
'propagate': True,
},
},
}
# LOGGING = {} # disable logging
_redis_url = "api-communication-redis.7pqvq5.ng.0001.use2.cache.amazonaws.com"
_redis_port = 6379
CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels_redis.core.RedisChannelLayer",
"CONFIG": {
"hosts": [(_redis_url, _redis_port)]
}
}
}
CELERY_BROKER_URL = "sqs://"
CELERY_BROKER_TRANSPORT_OPTIONS = {
'region': 'us-east-2',
'visibility_timeout': 3600,
# 'polling_interval': 10,
'queue_name_prefix': f'celery-{ENV}',
'CELERYD_PREFETCH_MULTIPLIER': 0,
}
|
StarcoderdataPython
|
335170
|
<reponame>pombredanne/discipline
try:
from management.commands.discipline_migrate import Command
from south.signals import post_migrate
def command(app, *args, **kwargs):
print "Discipline detected a South migration, it will now save the new" \
" schema state automatically."
Command().handle()
# Every time a user performs a South migration, Discipline should
# perform a migration of its own, this is in case the user forgets
# to run "manage.py pervert_migrate"
post_migrate.connect(command)
except ImportError:
pass
__version__ = "0.9.1"
|
StarcoderdataPython
|
6561282
|
<reponame>lace/lacecore
from lacecore import shapes
import numpy as np
from vg.compat import v2 as vg
def test_vertex_centroid():
cube_at_origin = shapes.cube(np.zeros(3), 3.0)
np.testing.assert_array_almost_equal(
cube_at_origin.vertex_centroid, np.repeat(1.5, 3)
)
def test_bounding_box():
bounding_box = shapes.cube(np.zeros(3), 3.0).bounding_box
np.testing.assert_array_equal(bounding_box.origin, np.zeros(3))
np.testing.assert_array_equal(bounding_box.size, np.repeat(3, 3))
def test_apex():
cube_at_origin = shapes.cube(np.zeros(3), 3.0)
np.testing.assert_array_almost_equal(
cube_at_origin.apex(np.array([1.0, 1.0, -1.0])), np.array([3.0, 3.0, 0.0])
)
def test_face_normals():
cube_at_origin = shapes.cube(np.zeros(3), 3.0)
np.testing.assert_array_equal(
cube_at_origin.face_normals(),
np.repeat(
np.array(
[
vg.basis.neg_y,
vg.basis.y,
vg.basis.neg_z,
vg.basis.x,
vg.basis.z,
vg.basis.neg_x,
]
),
2,
axis=0,
),
)
|
StarcoderdataPython
|
47717
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021-2022 CERN.
#
# Invenio-Vocabularies is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Base data stream."""
from .errors import ReaderError, TransformerError, WriterError
class StreamEntry:
"""Object to encapsulate streams processing."""
def __init__(self, entry, errors=None):
"""Constructor."""
self.entry = entry
self.filtered = False
self.errors = errors or []
class DataStream:
"""Data stream."""
def __init__(self, readers, writers, transformers=None, *args, **kwargs):
"""Constructor.
:param readers: an ordered list of readers.
:param writers: an ordered list of writers.
:param transformers: an ordered list of transformers to apply.
"""
self._readers = readers
self._transformers = transformers
self._writers = writers
def filter(self, stream_entry, *args, **kwargs):
"""Checks if an stream_entry should be filtered out (skipped)."""
return False
def process(self, *args, **kwargs):
"""Iterates over the entries.
Uses the reader to get the raw entries and transforms them.
It will iterate over the `StreamEntry` objects returned by
the reader, apply the transformations and yield the result of
writing it.
"""
for stream_entry in self.read():
if stream_entry.errors:
yield stream_entry # reading errors
else:
transformed_entry = self.transform(stream_entry)
if transformed_entry.errors:
yield transformed_entry
elif self.filter(transformed_entry):
transformed_entry.filtered = True
yield transformed_entry
else:
yield self.write(transformed_entry)
def read(self):
"""Recursively read the entries."""
def pipe_gen(gen_funcs, piped_item=None):
_gen_funcs = list(gen_funcs) # copy to avoid modifying ref list
# use and remove the current generator
current_gen_func = _gen_funcs.pop(0)
for item in current_gen_func(piped_item):
try:
# exhaust iterations of subsequent generators
if _gen_funcs:
yield from pipe_gen(_gen_funcs, piped_item=item)
# there is no subsequent generator, return the current item
else:
yield StreamEntry(item)
except ReaderError as err:
yield StreamEntry(
entry=item,
errors=[
f"{current_gen_func.__qualname__}: {str(err)}"
]
)
read_gens = [r.read for r in self._readers]
yield from pipe_gen(read_gens)
def transform(self, stream_entry, *args, **kwargs):
"""Apply the transformations to an stream_entry."""
for transformer in self._transformers:
try:
stream_entry = transformer.apply(stream_entry)
except TransformerError as err:
stream_entry.errors.append(
f"{transformer.__class__.__name__}: {str(err)}"
)
return stream_entry # break loop
return stream_entry
def write(self, stream_entry, *args, **kwargs):
"""Apply the transformations to an stream_entry."""
for writer in self._writers:
try:
writer.write(stream_entry)
except WriterError as err:
stream_entry.errors.append(
f"{writer.__class__.__name__}: {str(err)}"
)
return stream_entry
def total(self, *args, **kwargs):
"""The total of entries obtained from the origin."""
raise NotImplementedError()
|
StarcoderdataPython
|
3317552
|
<gh_stars>0
from pathlib import Path
from logging import ERROR, INFO, DEBUG
# Paths
path_working_directory = Path(__file__).parent.parent
path_credentials_directory = path_working_directory / 'credentials'
path_credentials_directory.mkdir(parents=True, exist_ok=True)
path_data_directory = path_working_directory / 'data'
path_data_directory.mkdir(parents=True, exist_ok=True)
path_logs_directory = path_working_directory / 'logs'
path_logs_directory.mkdir(parents=True, exist_ok=True)
# Scenario: Sync Groups and Zulip
sync_groups_and_zulip = {
'logging_level': INFO,
'google_domain': '',
'mandatory_members': [],
'mandatory_streams': [],
'sleep_time': 60 * 10
}
# Scenario: Create Google Groups
create_google_groups = {
'logging_level': INFO,
'google_domain': '',
'user_filter_query': '',
'mandatory_members': []
}
# Scenario: Get Users From Google
get_users_from_google = {
'logging_level': INFO,
'google_domain': '',
'user_filter_query': ''
}
# Scenario: Sync Google Sheets and Zulip
sync_sheets_and_zulip = {
'logging_level': INFO,
'spreadsheet_id': '',
'range_names': [],
'mandatory_members': [],
'sleep_time': 60 * 30
}
# Scenario: Sync Trello and Zulip
sync_trello_and_zulip = {
'logging_level': INFO,
'mandatory_members': [],
'sleep_time': 60 * 10
}
# Scenario: Sync Sheets and Groups
sync_sheets_and_groups = {
'logging_level': INFO,
'spreadsheet_id': '',
'range_names': [],
'mandatory_members': [],
'google_domain': '',
'sleep_time': 60 * 30
}
# Scenario: Get Users From Zulip
get_users_from_zulip = {
'logging_level': DEBUG
}
|
StarcoderdataPython
|
72849
|
<filename>grortir/externals/__init__.py<gh_stars>0
"""Package contains modified external modules."""
# pylint: skip-file
|
StarcoderdataPython
|
1895401
|
# Author:
# <NAME> <<EMAIL>>
#
# License: BSD 3 clause
"""
cli
"""
from __future__ import print_function, division, absolute_import
import argparse
import splinart as spl
import numpy as np
def circle(img):
"""
circle
"""
def xs_func():
"""
xs function
"""
nsamples = 500
return (np.random.random() + 2 * np.pi * np.linspace(0, 1, nsamples))%(2*np.pi)
theta, path = spl.circle([.5, .5], .3, npoints=40)
spl.update_img(img, path, xs_func, nrep=4000, x=theta, scale_value=.00005)
def line(img):
"""
line
"""
def xs_func():
"""
xs function
"""
nsamples = 500
return .001*np.random.random() + np.linspace(beg+.1, end-.1, nsamples)
beg, end = .2, .8
path = spl.line(beg, end, .5)
spl.update_img(img, path, xs_func, nrep=1000, periodic=False)
def main():
"""
cli
"""
parser = argparse.ArgumentParser(description="Splinart generator")
parser.add_argument('-f', '--filename', type=str,
help="filename where the output is stored",
default='output.png')
parser.add_argument('--path', type=str,
help="path where to store the results",
default='.')
parser.add_argument('-s', '--shape', type=str,
help='apply spline on this shape',
default='circle',
choices=['circle', 'line'])
parser.add_argument('--show',
help='show the result using matplotlib',
action='store_true')
args = parser.parse_args()
img_size, channels = 1000, 4
img = np.ones((img_size, img_size, channels), dtype=np.float32)
if args.shape == 'circle':
circle(img)
else:
line(img)
if args.show:
spl.show_img(img)
else:
spl.save_img(img, args.path, args.filename)
|
StarcoderdataPython
|
4987020
|
# -*- coding: utf-8 -*-
__all__ = ["TimeSeriesKMeans", "TimeSeriesKMedoids"]
from sktime.clustering._k_means import TimeSeriesKMeans
from sktime.clustering._k_medoids import TimeSeriesKMedoids
|
StarcoderdataPython
|
8009716
|
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
import numpy as np
num_of_paths=6
max_node_path=4
grid_size_y = complex(0, max_node_path)
grid_size_x = complex(0, num_of_paths)
x = 0
y = num_of_paths*0.2
grid = np.mgrid[x:y:grid_size_x, x:y:grid_size_y].reshape(2, -1).T
grid = grid[::-1]
# print(grid)
# plt.plot(grid[:,0], grid[:,1], 'ro')
# plt.show()
|
StarcoderdataPython
|
3577545
|
# !/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
from base import FileIO
from datagit.analyze.bp import GitHubAnalysisOrchestrator
from datagit.graph.svc import GraphSocialNetwork
from datagit.navigate.bp import GitHubNavigationAPI
def generate_output_path(issue_number_x, issue_number_y) -> str:
filename = f"G_{issue_number_x}-{issue_number_y}_SOCIAL-IBM.giz"
return os.path.join(os.environ['DESKTOP'], filename)
def main():
issue_number_x = 0
issue_number_y = 1700
IS_DEBUG = False
COLLECTION_NAME = "github-IBMCodeContent_src_20200104"
output_path = generate_output_path(issue_number_x, issue_number_y)
orchestrator = GitHubAnalysisOrchestrator(is_debug=IS_DEBUG)
social_analysis = orchestrator.distributions(collection_name=COLLECTION_NAME).social(write_to_file=True)
lines = ["digraph GitHubSNA {"]
api = GitHubNavigationAPI(is_debug=IS_DEBUG)
for issue in range(int(issue_number_x), int(issue_number_y)):
svcresult = api.navigate(COLLECTION_NAME).by_issue(issue_id=issue)
if svcresult:
lines += GraphSocialNetwork(is_debug=IS_DEBUG,
pattern=svcresult['pattern'],
d_index=svcresult['index'],
df_social_entity_analysis=social_analysis['ent'],
df_social_relationship_analysis=social_analysis['rel']).lines()
lines.append("}")
FileIO.lines_to_file(lines, output_path)
print('\n'.join([
"Wrote to File",
f"\tOutput Path: {output_path}"]))
if __name__ == "__main__":
import plac
plac.call(main)
|
StarcoderdataPython
|
3469286
|
cont = -1
num = 0
num2 = 0
while num != 999:
num = int(input('Informe um numero inteiro ou [digite 999 para parar]:'))
cont+=1
#print('{} +'.format(num),end=' ')
if num != 999:
soma = num + num2
num2 = soma
print('A soma dos {} numeros digitados é {}'.format(cont,soma))
|
StarcoderdataPython
|
3492925
|
<reponame>x06lan/mt
save={}
x=[int(i) for i in input().split()]
for i in range(x[0]):
tem=[int(i) for i in input().split()]
a=tem[0]
b=tem[1]
try:
save[a].append(b)
except:
save[a]=[]
save[a].append(b)
try:
save[b].append(a)
except:
save[b]=[]
save[b].append(a)
def mapfind(save,path,start,targe):
#print(path,targe)
out=False
for i in save[path[0]]:
if i==targe:
return True
elif i==start or i in path:
pass
for i in save[path[0]]:
out=out or mapfind(save,[i]+path,start,targe)
return out
print("Yes!" if mapfind(save,[x[1]],x[1],x[2]) else "No!")
|
StarcoderdataPython
|
8135018
|
<reponame>mszhanyi/PyMultiprocessDemo
from multiprocessing import Process, Queue
def f(q):
q.put('hello world')
def run_mp():
q = Queue()
p = Process(target=f, args=[q])
p.start()
print (q.get())
p.join()
run_mp()
|
StarcoderdataPython
|
247729
|
from random import random, randint
from sklearn.ensemble import RandomForestRegressor
import mlflow
import mlflow.sklearn
import os
os.environ['MLFLOW_S3_ENDPOINT_URL'] = "http://your.minio-or-s3.domain"
os.environ['AWS_ACCESS_KEY_ID'] = "yourAccessKeyOrMinioUser"
os.environ['AWS_SECRET_ACCESS_KEY'] = "yourSecretKeyOrMinioPassword"
mlflow.set_tracking_uri("http://your.mlflow.domain")
experiment_name = "mlflow-test"
try:
experiment_id = mlflow.create_experiment(name=experiment_name)
except:
experiment_id = mlflow.get_experiment_by_name(name=experiment_name).experiment_id
with mlflow.start_run(experiment_id=experiment_id) as run:
params = {"n_estimators": 5, "random_state": 42}
sk_learn_rfr = RandomForestRegressor(**params)
# Log parameters and metrics using the MLflow APIs
mlflow.log_params(params)
mlflow.log_param("param_1", randint(0, 100))
mlflow.log_metrics({"metric_1": random(), "metric_2": random() + 1})
# Log the sklearn model and register as version 1
mlflow.sklearn.log_model(
sk_model=sk_learn_rfr,
artifact_path="sklearn-model",
registered_model_name="test_model"
)
mlflow.log_artifact(local_path = './entrypoint.sh')
|
StarcoderdataPython
|
9682618
|
import nltk, re
from sherlock_holmes import bohemia_ch1, bohemia_ch2, bohemia_ch3, boscombe_ch1, boscombe_ch2, boscombe_ch3
from preprocessing import preprocess_text
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.decomposition import LatentDirichletAllocation
# preparing the text
corpus = [bohemia_ch1, bohemia_ch2, bohemia_ch3, boscombe_ch1, boscombe_ch2, boscombe_ch3]
preprocessed_corpus = [preprocess_text(chapter) for chapter in corpus]
# Update stop_list:
stop_list = ['mr','man','say','one','make','upon','see','look','could','bad']
# filtering topics for stop words
def filter_out_stop_words(corpus):
no_stops_corpus = []
for chapter in corpus:
no_stops_chapter = " ".join([word for word in chapter.split(" ") if word not in stop_list])
no_stops_corpus.append(no_stops_chapter)
return no_stops_corpus
filtered_for_stops = filter_out_stop_words(preprocessed_corpus)
# creating the bag of words model
bag_of_words_creator = CountVectorizer()
bag_of_words = bag_of_words_creator.fit_transform(filtered_for_stops)
# creating the tf-idf model
tfidf_creator = TfidfVectorizer(min_df = 0.2)
tfidf = tfidf_creator.fit_transform(preprocessed_corpus)
# creating the bag of words LDA model
lda_bag_of_words_creator = LatentDirichletAllocation(learning_method='online', n_components=10)
lda_bag_of_words = lda_bag_of_words_creator.fit_transform(bag_of_words)
# creating the tf-idf LDA model
lda_tfidf_creator = LatentDirichletAllocation(learning_method='online', n_components=10)
lda_tfidf = lda_tfidf_creator.fit_transform(tfidf)
print("~~~ Topics found by bag of words LDA ~~~")
for topic_id, topic in enumerate(lda_bag_of_words_creator.components_):
message = "Topic #{}: ".format(topic_id + 1)
message += " ".join([bag_of_words_creator.get_feature_names()[i] for i in topic.argsort()[:-5 :-1]])
print(message)
print("\n\n~~~ Topics found by tf-idf LDA ~~~")
for topic_id, topic in enumerate(lda_tfidf_creator.components_):
message = "Topic #{}: ".format(topic_id + 1)
message += " ".join([tfidf_creator.get_feature_names()[i] for i in topic.argsort()[:-5 :-1]])
print(message)
|
StarcoderdataPython
|
6460558
|
<reponame>allenai/HyBayes
import logging
import pymc3 as pm
import theano.tensor as tt
from theano.compile.ops import as_op
import numpy as np
from scipy import stats
logger = logging.getLogger('root')
def add_exp_uniform_normal_t_model(hierarchical_model):
"""
A student-t model with normal, uniform, exp priors for mu, sigma, nu parameters, respectively.
Credits of the implementation of this model in pymc3 belongs to
http://nbviewer.jupyter.org/github/JWarmenhoven/DBDA-python/blob/master/Notebooks/Chapter%2016.ipynb
For a discussion on this model and implementation on R refer to Chapter 16 in the book
'Doing Bayesian Data Analysis: A Tutorial with R, JAGS, and Stan', Second Edition, by <NAME> (2015).
"""
mean_y = np.mean([hierarchical_model.stats_y[i].mean for i in range(hierarchical_model.n_groups)])
sd_y = np.mean([hierarchical_model.stats_y[i].variance for i in range(hierarchical_model.n_groups)]) ** (0.5)
with pm.Model() as hierarchical_model.pymc_model:
nu = pm.Exponential("nu", 1 / 30) # mean = sd = 30
sigma = pm.Uniform("sigma", sd_y / 100, sd_y * 100, shape=hierarchical_model.n_groups)
mu = pm.Normal("mu", mean_y, (100 * sd_y), shape=hierarchical_model.n_groups)
observations = []
hierarchical_model.mu_parameter = "mu"
hierarchical_model.sigma_parameter = "sigma"
hierarchical_model.outlierness_parameter = "nu"
def add_observations():
with hierarchical_model.pymc_model:
for i in range(hierarchical_model.n_groups):
observations.append(pm.StudentT(f'y_{i}', nu=nu, mu=mu[i], sd=sigma[i], observed=hierarchical_model.y[i]))
hierarchical_model.add_observations_function = add_observations
|
StarcoderdataPython
|
1998371
|
# --------------
# Importing header files
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Reading file
data = np.genfromtxt(path, delimiter=",", skip_header=1)
#Code starts here
census = np.concatenate((data,new_record),axis=0)
age = census[:,0]
max_age = age.max()
min_age = age.min()
age_mean = round(age.mean(),2)
age_std = round(age.std(),2)
race_0 = census[census[:,2]==0]
race_1 = census[census[:,2]==1]
race_2 = census[census[:,2]==2]
race_3 = census[census[:,2]==3]
race_4 = census[census[:,2]==4]
len_0 = len(race_0)
len_1 = len(race_1)
len_2 = len(race_2)
len_3 = len(race_3)
len_4 = len(race_4)
if len(race_0) == np.array([len_0,len_1,len_2,len_3,len_4]).min():
minority_race= 0
elif len(race_1) == np.array([len_0,len_1,len_2,len_3,len_4]).min():
minority_race= 1
elif len(race_2) == np.array([len_0,len_1,len_2,len_3,len_4]).min():
minority_race= 2
elif len(race_3) == np.array([len_0,len_1,len_2,len_3,len_4]).min():
minority_race= 3
elif len(race_4) == np.array([len_0,len_1,len_2,len_3,len_4]).min():
minority_race= 4
senior_citizens = census[census[:,0] > 60]
working_hours_sum = senior_citizens[:,6].sum()
senior_citizens_len = len(senior_citizens)
avg_working_hours = round(working_hours_sum/senior_citizens_len,2)
high = census[census[:,1]> 10]
low = census[census[:,1]<= 10]
avg_pay_high = round(high[:,7].mean(),2)
avg_pay_low = round(low[:,7].mean(),2)
|
StarcoderdataPython
|
1794265
|
<filename>chap18-functions/functions_00.py
# This function takes no arguments
def make_noise() :
print("I am a noisy kid")
def feed_kid() :
print("Feed my favorite daughter Sonal.")
return "Sonal and Daddy are happy!!"
# This function takes one argument
def drive_kids() :
print("Get a van that can hold 30 kids.")
print("Sure thing.")
print("I got the van.")
print("There's only 6 kids.")
print("Oh no!!!!")
# This function takes one argument
def drive_a_kid(kid) :
print("Hello {}".format(kid))
kid01 = "sonal"
kid02 = "nikita"
# This functions takes two arguments
def drive_two_kids(arg01, arg02) :
print("Start of function with two arguments")
print("Good morning {}".format(arg01))
print("Good morning {}".format(arg02))
print("End of function with two arguments")
drive_two_kids(kid01, kid02)
# This function takes multiple arguments
def main():
# Goal is to learn functions
msg = "Goal is to learn functions"
print(msg)
# Invoke a function with no arguments
make_noise()
# Invoke function
print(feed_kid())
drive_kids()
kid_16 = "nikita"
kid_15 = "anusha"
kid_20 = "elisa"
# One Monday we drive Nikita
drive_a_kid(kid_15)
# On Tuesday we drive Anusha
drive_a_kid(kid_16)
# On Friday we drive Elisa
drive_a_kid(kid_20)
drive_two_kids(kid_16, kid_15)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1893091
|
#coding: utf-8
from flask import Blueprint, request
from flask_restful import Resource, Api
from HTTPJsonRule.response import SaveResponse
API_VERSION_V1 = 1
API_VERSION = API_VERSION_V1
api_v1_bp = Blueprint('api_v1', __name__)
api_v1 = Api(api_v1_bp)
class HTTPSave(Resource):
def post(self):
if request.get_json:
return SaveResponse(request.get_json())
api_v1.add_resource(HTTPSave, '/save')
|
StarcoderdataPython
|
3329261
|
<reponame>matchd-ch/matchd-backend
from django.contrib.auth.models import AnonymousUser
import pytest
from api.tests.helper.node_helper import assert_node_field, assert_node_id
@pytest.mark.django_db
def test_query(query_job_types, job_type_objects):
data, errors = query_job_types(AnonymousUser())
assert errors is None
assert data is not None
edges = data.get('jobTypes').get('edges')
assert edges is not None
assert len(edges) == len(job_type_objects)
assert_node_id(edges[0].get('node'), 'JobType', job_type_objects[1].id)
assert_node_id(edges[1].get('node'), 'JobType', job_type_objects[2].id)
assert_node_id(edges[2].get('node'), 'JobType', job_type_objects[0].id)
assert_node_id(edges[3].get('node'), 'JobType', job_type_objects[3].id)
assert_node_field(edges[0].get('node'), 'name', job_type_objects[1].name)
assert_node_field(edges[1].get('node'), 'name', job_type_objects[2].name)
assert_node_field(edges[2].get('node'), 'name', job_type_objects[0].name)
assert_node_field(edges[3].get('node'), 'name', job_type_objects[3].name)
@pytest.mark.django_db
def test_node_query(query_job_type_node, job_type_objects):
data, errors = query_job_type_node(AnonymousUser(), job_type_objects[3].id)
assert errors is None
assert data is not None
node = data.get('node')
assert node is not None
assert_node_id(node, 'JobType', job_type_objects[3].id)
assert_node_field(node, 'name', job_type_objects[3].name)
|
StarcoderdataPython
|
170568
|
<gh_stars>0
# Copyright 2020 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime as dt
import logging
import time
import repokid.hooks
from repokid.role import Role, Roles
from repokid.utils.dynamo import (
find_role_in_cache,
get_role_data,
role_ids_for_account,
set_role_data,
)
from tabulate import tabulate
from tqdm import tqdm
LOGGER = logging.getLogger("repokid")
def _schedule_repo(account_number, dynamo_table, config, hooks):
"""
Schedule a repo for a given account. Schedule repo for a time in the future (default 7 days) for any roles in
the account with repoable permissions.
"""
scheduled_roles = []
roles = Roles(
[
Role(get_role_data(dynamo_table, roleID))
for roleID in tqdm(role_ids_for_account(dynamo_table, account_number))
]
)
scheduled_time = int(time.time()) + (
86400 * config.get("repo_schedule_period_days", 7)
)
for role in roles:
if not role.aa_data:
LOGGER.warning("Not scheduling %s; missing Access Advisor data", role.arn)
continue
if not role.repoable_permissions > 0:
LOGGER.debug("Not scheduling %s; no repoable permissions", role.arn)
continue
if role.repo_scheduled:
LOGGER.debug(
"Not scheduling %s; already scheduled for %s",
role.arn,
role.repo_scheduled,
)
continue
role.repo_scheduled = scheduled_time
# freeze the scheduled perms to whatever is repoable right now
set_role_data(
dynamo_table,
role.role_id,
{
"RepoScheduled": scheduled_time,
"ScheduledPerms": role.repoable_services,
},
)
scheduled_roles.append(role)
LOGGER.info(
"Scheduled repo for {} days from now for account {} and these roles:\n\t{}".format(
config.get("repo_schedule_period_days", 7),
account_number,
", ".join([r.role_name for r in scheduled_roles]),
)
)
repokid.hooks.call_hooks(hooks, "AFTER_SCHEDULE_REPO", {"roles": scheduled_roles})
def _show_scheduled_roles(account_number, dynamo_table):
"""
Show scheduled repos for a given account. For each scheduled show whether scheduled time is elapsed or not.
"""
roles = Roles(
[
Role(get_role_data(dynamo_table, roleID))
for roleID in tqdm(role_ids_for_account(dynamo_table, account_number))
]
)
# filter to show only roles that are scheduled
roles = roles.filter(active=True)
roles = [role for role in roles if (role.repo_scheduled)]
header = ["Role name", "Scheduled", "Scheduled Time Elapsed?"]
rows = []
curtime = int(time.time())
for role in roles:
rows.append(
[
role.role_name,
dt.fromtimestamp(role.repo_scheduled).strftime("%Y-%m-%d %H:%M"),
role.repo_scheduled < curtime,
]
)
print(tabulate(rows, headers=header))
def _cancel_scheduled_repo(account_number, dynamo_table, role_name=None, is_all=None):
"""
Cancel scheduled repo for a role in an account
"""
if not is_all and not role_name:
LOGGER.error("Either a specific role to cancel or all must be provided")
return
if is_all:
roles = Roles(
[
Role(get_role_data(dynamo_table, roleID))
for roleID in role_ids_for_account(dynamo_table, account_number)
]
)
# filter to show only roles that are scheduled
roles = [role for role in roles if (role.repo_scheduled)]
for role in roles:
set_role_data(
dynamo_table, role.role_id, {"RepoScheduled": 0, "ScheduledPerms": []}
)
LOGGER.info(
"Canceled scheduled repo for roles: {}".format(
", ".join([role.role_name for role in roles])
)
)
return
role_id = find_role_in_cache(dynamo_table, account_number, role_name)
if not role_id:
LOGGER.warn(
"Could not find role with name {} in account {}".format(
role_name, account_number
)
)
return
role = Role(get_role_data(dynamo_table, role_id))
if not role.repo_scheduled:
LOGGER.warn(
"Repo was not scheduled for role {} in account {}".format(
role.role_name, account_number
)
)
return
set_role_data(
dynamo_table, role.role_id, {"RepoScheduled": 0, "ScheduledPerms": []}
)
LOGGER.info(
"Successfully cancelled scheduled repo for role {} in account {}".format(
role.role_name, role.account
)
)
|
StarcoderdataPython
|
3425787
|
# Copyright (C) 2010-2011 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.IEC61970.Dynamics.MetaBlockConnectable import MetaBlockConnectable
class BlockOutputReference(MetaBlockConnectable):
"""References output from another block at the meta dynamics level. The other block must be a BlockReference in containing block.
"""
def __init__(self, metaBlockOutput0=None, metaBlockReference0=None, *args, **kw_args):
"""Initialises a new 'BlockOutputReference' instance.
@param metaBlockOutput0:
@param metaBlockReference0:
"""
self._metaBlockOutput0 = None
self.metaBlockOutput0 = metaBlockOutput0
self._metaBlockReference0 = []
self.metaBlockReference0 = [] if metaBlockReference0 is None else metaBlockReference0
super(BlockOutputReference, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["metaBlockOutput0", "metaBlockReference0"]
_many_refs = ["metaBlockReference0"]
def getmetaBlockOutput0(self):
return self._metaBlockOutput0
def setmetaBlockOutput0(self, value):
if self._metaBlockOutput0 is not None:
filtered = [x for x in self.metaBlockOutput0.BlockOutputReference if x != self]
self._metaBlockOutput0._BlockOutputReference = filtered
self._metaBlockOutput0 = value
if self._metaBlockOutput0 is not None:
if self not in self._metaBlockOutput0._BlockOutputReference:
self._metaBlockOutput0._BlockOutputReference.append(self)
metaBlockOutput0 = property(getmetaBlockOutput0, setmetaBlockOutput0)
def getmetaBlockReference0(self):
return self._metaBlockReference0
def setmetaBlockReference0(self, value):
for p in self._metaBlockReference0:
filtered = [q for q in p.blockOutputReference0 if q != self]
self._metaBlockReference0._blockOutputReference0 = filtered
for r in value:
if self not in r._blockOutputReference0:
r._blockOutputReference0.append(self)
self._metaBlockReference0 = value
metaBlockReference0 = property(getmetaBlockReference0, setmetaBlockReference0)
def addmetaBlockReference0(self, *metaBlockReference0):
for obj in metaBlockReference0:
if self not in obj._blockOutputReference0:
obj._blockOutputReference0.append(self)
self._metaBlockReference0.append(obj)
def removemetaBlockReference0(self, *metaBlockReference0):
for obj in metaBlockReference0:
if self in obj._blockOutputReference0:
obj._blockOutputReference0.remove(self)
self._metaBlockReference0.remove(obj)
|
StarcoderdataPython
|
99540
|
<reponame>DoggiKong/deco3801-project
import random
import string
from django.db import models
from django.contrib.auth.models import AbstractUser, BaseUserManager
from django.core import validators
from django.utils.translation import gettext_lazy as _
from systemsdb.models import hrms_system, chromatography, analytical_column
class UserManager(BaseUserManager):
"""
Model manager for User with email as username.
"""
use_in_migrations = True
def _create_user(self, email, first_name, last_name, password, username=None, **extra_fields):
"""
Create and save a User with the given email and password.
"""
if not email:
raise ValueError('Email must be given')
elif self.filter(email__iexact=email):
raise ValueError('An account with this email already exists')
if not first_name:
raise ValueError('First name must be given')
if not last_name:
raise ValueError('Last name must be given')
if not password:
raise ValueError('A password must be given')
# Generate a unique username if not given
if username is None:
pool = string.ascii_lowercase + string.digits
username = ''.join(random.choice(pool) for i in range(8))
while self.filter(username__iexact=username):
username = ''.join(random.choice(pool) for i in range(8))
# If username is given check that it is unique
elif self.filter(username__iexact=username):
raise ValueError('An account with this username already exists')
email = self.normalize_email(email)
username = self.model.normalize_username(username)
user = self.model(
username=username,
email=email,
first_name=first_name,
last_name=last_name,
**extra_fields
)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, first_name, last_name, password=<PASSWORD>, **extra_fields):
"""
Create and save a User with the given email and password.
"""
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(email, first_name, last_name, password, **extra_fields)
def create_superuser(self, email, first_name, last_name, password, **extra_fields):
"""
Create and save a User with the given email and password with superuser access.
"""
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(email, first_name, last_name, password, **extra_fields)
def get_by_natural_key(self, username):
return self.get(**{'{}__iexact'.format(self.model.USERNAME_FIELD): username})
class User(AbstractUser):
"""
User with email as username.
"""
email = models.EmailField(_('email address'), unique=True)
first_name = models.CharField(_('first name'), max_length=30)
last_name = models.CharField(_('last name'), max_length=150)
position = models.TextField(blank=True)
aid = models.ForeignKey('Affiliation', blank=True, null=True, on_delete=models.SET_NULL)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['first_name', 'last_name']
objects = UserManager()
def __unicode__(self):
return self.email
class Affiliation(models.Model):
"""
Affiliation associated with a User.
"""
aid = models.CharField(max_length=100, unique=True)
location = models.TextField()
def __str__(self):
return self.aid
class user_hrms(models.Model):
User_id = models.ForeignKey(User, on_delete=models.CASCADE)
HRMS_id = models.ForeignKey(hrms_system, on_delete=models.CASCADE)
class user_column(models.Model):
User_id = models.ForeignKey(User, on_delete=models.CASCADE)
Column_id = models.ForeignKey(analytical_column, on_delete=models.CASCADE)
class user_chroma(models.Model):
User_id = models.ForeignKey(User, on_delete=models.CASCADE)
Chroma_id = models.ForeignKey(chromatography, on_delete=models.CASCADE)
|
StarcoderdataPython
|
6553611
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# ==============================================================================
import matplotlib.pyplot as plt
import numpy as np
import logging
import mxnet as mx
ALL = 'all'
class MetaLogger():
"""
Class for holding the parameters and losses for a MetaRepurposer and plotting those values.
"""
# TODO: Add support for logging loss/parameters after each batch rather than after every epoch
def __init__(self, alpha_plot=0.1):
self._losses = {}
self._parameters = {}
self.alpha_plot = alpha_plot
self.EPOCH = 'epoch'
self.TASK = 'task'
self.METASTEP = 'metastep'
def reset(self):
self._losses = {}
self._parameters = {}
@property
def num_tasks(self):
all_tasks = []
for ms in self._parameters.keys():
all_tasks.append([k for k in self._parameters[ms].keys() if isinstance(k, int)])
return len(np.unique(all_tasks))
def report(self, end, hook=None):
"""
Report results at end of epoch/task/metastep using hook function.
"""
if hook is None:
hook = logging.info
reporter = {self.EPOCH: self._report_epoch,
self.TASK: self._report_task,
self.METASTEP: self._report_metastep}
reporter[end](hook)
def _report_epoch(self, hook):
hook('\t\tMetastep: {}, Task: {}, Epoch: {}, Loss: {:.3f}'.format(
self.latest_metastep, self.latest_task,
len(self._losses[self.latest_metastep][self.latest_task]),
self._losses[self.latest_metastep][self.latest_task][-1]))
def _report_task(self, hook):
initial_loss = self._losses[self.latest_metastep][self.latest_task][0]
final_loss = self._losses[self.latest_metastep][self.latest_task][-1]
hook('\tMetastep: {}, Task: {}, Initial Loss: {:.3f}, Final Loss: {:.3f}, Loss delta: {:.3f}'.format(
self.latest_metastep, self.latest_task,
initial_loss, final_loss, final_loss - initial_loss))
def _report_metastep(self, hook):
loss_total = 0
for task_loss in self._losses[self.latest_metastep].values():
loss_total += task_loss[-1]
num_tasks = len(self._losses[self.latest_metastep].keys())
mean_loss = loss_total / num_tasks
hook('Metastep: {}, Num tasks: {}, Mean Loss: {:.3f}'.format(self.latest_metastep, num_tasks, mean_loss))
@property
def latest_metastep(self):
return max(self._losses.keys())
@property
def latest_task(self):
return max(self._losses[self.latest_metastep].keys())
def log_loss(self, metastep, task, epoch, loss):
"""
Append loss to dictionary.
"""
if metastep not in self._losses.keys():
self._losses[metastep] = {}
if task not in self._losses[metastep].keys():
self._losses[metastep][task] = []
self._losses[metastep][task].append(loss)
def log_params(self, metastep, task, epoch, net):
"""
Append parameters to dictionary.
"""
parameters = {}
for k, v in net.params.items():
parameters[k] = v.data().copy().asnumpy()
if metastep not in self._parameters.keys():
self._parameters[metastep] = {}
if task not in self._parameters[metastep].keys():
self._parameters[metastep][task] = []
self._parameters[metastep][task].append(parameters)
def log_initial_params(self, ms, net):
"""
Log parameters before any updates made.
"""
if ms in self._parameters.keys():
return
self.log_params(ms, ALL, -1, net)
def plot_losses(self, add_label=True, figsize=(20, 4)):
"""
Plot the logged losses.
"""
if self._losses == {}:
raise ValueError('No losses logged.')
fig, axes = plt.subplots(ncols=self.num_tasks, figsize=figsize)
fig.suptitle('Losses', fontsize=30, y=1.08)
for task in range(self.num_tasks):
axes[task].set_title('Task {}'.format(task))
axes[task].set_xlabel('epoch')
axes[task].set_ylabel('loss')
for ms in self._losses.keys():
for task in range(self.num_tasks):
if task in self._losses[ms].keys():
alpha = 1 if ms == max(self._losses.keys()) else self.alpha_plot
axes[task].plot(self._losses[ms][task], 'o-', alpha=alpha)
if add_label:
axes[task].text(x=0.05, y=self._losses[ms][task][0], s=ms)
def plot_params(self, param, W, loss_fn, figsize=(20, 6), gridsize=(100, 100), a=0.2, loss_samples=100):
"""
Plot the logged parameters.
"""
if self._parameters == {}:
raise ValueError('No parameters logged.')
fig, axes = plt.subplots(ncols=self.num_tasks, figsize=figsize)
for surface in range(self.num_tasks):
for ms in sorted(self._parameters.keys()):
for task in range(self.num_tasks):
if task in self._parameters[ms].keys() or ms == max(self._parameters.keys()):
temp_ms = ms
while task not in self._parameters[temp_ms].keys():
temp_ms -= 1
x = np.concatenate([p[param] for p in self._parameters[temp_ms][task]])
x = np.concatenate([self._parameters[temp_ms]['all'][0][param], x]).T
initial_point = self._parameters[temp_ms]['all'][0][param].T
assert x.shape[0] == 2, 'Dimension of parameter must be 2.'
label = task if ms == max(self._parameters.keys()) else None
alpha = 1 if ms == max(self._parameters.keys()) else self.alpha_plot
color = 'r' if surface == task else 'k'
axes[surface].plot(x[0], x[1], 'o-', color=color, label=label, alpha=alpha)
axes[surface].plot(initial_point[0], initial_point[1], 'o-', color='tab:pink', alpha=alpha)
axes[surface].legend()
axes[surface].set_title('Loss surface for Task {}'.format(surface))
# Plot loss surface
extent = axes[surface].get_xlim() + axes[surface].get_ylim()
grid = np.zeros(gridsize)
for i, w1 in enumerate(np.linspace(extent[0], extent[1], gridsize[0])):
for j, w2 in enumerate(np.linspace(extent[2], extent[3], gridsize[1])):
grid[j][i] = loss_fn(mx.nd.array([w1, w2]), W[surface], loss_samples)
axes[surface].imshow(grid, extent=extent, origin='lower')
# Set labels
axes[surface].set_xlabel(param + ' 1')
axes[surface].set_ylabel(param + ' 2')
fig.suptitle('Parameters', fontsize=30, y=0.9)
|
StarcoderdataPython
|
3582192
|
"""Define the Celery tasks."""
from celery import chain
from celery.schedules import crontab
from celery.utils.log import get_task_logger
from {{ cookiecutter.project_name }}.celery.celery import app
logger = get_task_logger(__name__)
@app.task
def add(x, y):
return x + y
|
StarcoderdataPython
|
8093846
|
<gh_stars>1-10
"""Preprocess the Robot model
The model is available for download from
https://sketchfab.com/3d-models/uaeYu2fwakD1e1bWp5Cxu3XAqrt
The Python Imaging Library is required
pip install pillow
"""
from __future__ import print_function
import json
import os
import zipfile
from PIL import Image
from .utils.gltf import dump_obj_data
SRC_FILENAME = "robo_obj_pose4.zip"
DST_DIRECTORY = "../assets/robot"
def process_meshes(zip_file):
gltf = json.loads(zip_file.read("scene.gltf"))
buffer = zip_file.read("scene.bin")
for mesh_index in range(len(gltf["meshes"])):
obj_data = dump_obj_data(gltf, buffer, mesh_index)
filename = "robot{}.obj".format(mesh_index)
filepath = os.path.join(DST_DIRECTORY, filename)
with open(filepath, "w") as f:
f.write(obj_data)
def load_image(zip_file, filename):
with zip_file.open(filename) as f:
image = Image.open(f)
image = image.transpose(Image.FLIP_TOP_BOTTOM)
return image
def save_image(image, filename, size=512):
if max(image.size) > size:
image = image.resize((size, size), Image.LANCZOS)
filepath = os.path.join(DST_DIRECTORY, filename)
image.save(filepath, rle=True)
def process_images(zip_file):
basecolor_path = "textures/material_0_baseColor.jpeg"
emission_path = "textures/material_0_emissive.jpeg"
packed_path = "textures/material_0_metallicRoughness.png"
basecolor_image = load_image(zip_file, basecolor_path)
save_image(basecolor_image, "robot_basecolor.tga")
emission_image = load_image(zip_file, emission_path)
save_image(emission_image, "robot_emission.tga")
packed_image = load_image(zip_file, packed_path)
_, roughness_image, _, _ = packed_image.convert("RGBA").split()
save_image(roughness_image, "robot_roughness.tga")
def main():
if not os.path.exists(DST_DIRECTORY):
os.makedirs(DST_DIRECTORY)
with zipfile.ZipFile(SRC_FILENAME) as zip_file:
process_meshes(zip_file)
process_images(zip_file)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
4810090
|
__author__ = 'Mario'
omega1 = [[-5.01, -8.12, -3.68],
[-5.43, -3.48, -3.54],
[1.08, -5.52, 1.66],
[0.86, -3.78, -4.11],
[-2.67, 0.63, 7.39],
[4.94, 3.29, 2.08],
[-2.51, 2.09, -2.59],
[-2.25, -2.13, -6.94],
[5.56, 2.86, -2.26],
[1.03, -3.33, 4.33]]
omega2 = [[-0.91, -0.18, -0.05],
[1.30, -2.06, -3.53],
[-7.75, -4.54, -0.95],
[-5.47, 0.50, 3.92],
[6.14, 5.72, -4.85],
[3.60, 1.26, 4.36],
[5.37, -4.63, -3.65],
[7.18, 1.46, -6.66],
[-7.39, 1.17, 6.30],
[-7.50, -6.32, -0.31]]
omega3 = [[5.35, 2.26, 8.13],
[5.12, 3.22, -2.66],
[-1.34, -5.31, -9.87],
[4.48, 3.42, 5.19],
[7.11, 2.39, 9.21],
[7.17, 4.33, -0.98],
[5.75, 3.97, 6.65],
[0.77, 0.27, 2.41],
[0.90, -0.43, -8.71],
[3.52, -0.36, 6.43]]
|
StarcoderdataPython
|
9632976
|
import tensorflow as tf
from tensorflow.keras.layers import Dense
from tensorflow.keras import Model
class Actor(Model):
def __init__(self,
action_dim,
action_lb=None,
action_ub=None,
hidden_size=(400, 300),
name='Actor'):
super(Actor, self).__init__(name=name)
last_init = tf.random_uniform_initializer(minval=-0.003, maxval=0.003)
self.action_dim = action_dim
self.action_lb = action_lb
self.action_ub = action_ub
self.d1 = Dense(hidden_size[0], activation='relu', name="L1")
self.d2 = Dense(hidden_size[1], activation='relu', name="L2")
self.d3 = Dense(action_dim, name="L3", kernel_initializer=last_init)
def call(self, state):
x = self.d1(state)
x = self.d2(x)
action = self.d3(x)
if self.action_lb is not None and self.action_ub is not None:
mid = (self.action_lb + self.action_ub) / 2
span = (self.action_ub - self.action_lb) / 2
action = span * tf.nn.tanh(action) + mid
return action
|
StarcoderdataPython
|
9684613
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.libraries.functions.default import default
import os
import status_params
import ambari_simplejson as json
from functions import calc_xmn_from_xms, ensure_unit_for_memory
from ambari_commons.constants import AMBARI_SUDO_BINARY
from ambari_commons.str_utils import string_set_intersection
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions import format
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions import is_empty
from resource_management.libraries.functions import get_unique_id_and_date
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.expect import expect
from ambari_commons.ambari_metrics_helper import select_metric_collector_hosts_from_hostnames
import sys, os
script_path = os.path.realpath(__file__).split(
'/services')[0] + '/../../../stack-hooks/before-INSTALL/scripts/ranger'
sys.path.append(script_path)
from setup_ranger_plugin_xml import generate_ranger_service_config
# server configurations
config = Script.get_config()
stack_root = Script.get_stack_root()
import os
import multiprocessing
cpu_count = multiprocessing.cpu_count()
mem_bytes = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES')
mem_gib = int(mem_bytes / (1024**3))
men_mib = int(mem_bytes / (1024**2))
regionserver_heapsize = int(men_mib * 0.6)
master_heapsize = int(men_mib * 0.2)
install_dir = stack_root + '/hbase'
download_url = config['configurations']['hbase-env']['download_url']
filename = download_url.split('/')[-1]
version_dir = filename.replace('.tar.gz', '').replace('.tgz', '')
phoenix_install_dir = stack_root + '/phoenix-server'
phoenix_download_url = config['configurations']['hbase-env'][
'phoenix_download_url']
phoenix_filename = phoenix_download_url.split('/')[-1]
phoenix_version_dir = phoenix_filename[:-7]
exec_tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY
stack_name = status_params.stack_name
agent_stack_retry_on_unavailability = config['ambariLevelParams'][
'agent_stack_retry_on_unavailability']
agent_stack_retry_count = expect("/ambariLevelParams/agent_stack_retry_count",
int)
version = default("/commandParams/version", None)
etc_prefix_dir = "/etc/hbase"
stack_root = status_params.stack_root
stack_supports_ranger_kerberos = True
stack_supports_ranger_audit_db = False
# hadoop default parameters
hadoop_bin_dir = install_dir + '/bin'
hadoop_conf_dir = '/etc/hbase'
daemon_script = install_dir + "/bin/hbase-daemon.sh"
region_mover = install_dir + "/bin/region_mover.rb"
region_drainer = install_dir + "/bin/draining_servers.rb"
hbase_cmd = install_dir + "/bin/hbase"
hbase_max_direct_memory_size = None
hbase_conf_dir = status_params.hbase_conf_dir
limits_conf_dir = status_params.limits_conf_dir
hbase_user_nofile_limit = default(
"/configurations/hbase-env/hbase_user_nofile_limit", "1048576")
hbase_user_nproc_limit = default(
"/configurations/hbase-env/hbase_user_nproc_limit", "160000")
# no symlink for phoenix-server at this point
phx_daemon_script = format(phoenix_install_dir + '/bin/queryserver.py')
hbase_excluded_hosts = config['commandParams']['excluded_hosts']
hbase_drain_only = default("/commandParams/mark_draining_only", False)
hbase_included_hosts = config['commandParams']['included_hosts']
hbase_user = status_params.hbase_user
hbase_principal_name = config['configurations']['hbase-env'][
'hbase_principal_name']
smokeuser = config['configurations']['cluster-env']['smokeuser']
_authentication = config['configurations']['core-site'][
'hadoop.security.authentication']
security_enabled = config['configurations']['cluster-env']['security_enabled']
# this is "hadoop-metrics.properties" for 1.x stacks
metric_prop_file_name = "hadoop-metrics2-hbase.properties"
# not supporting 32 bit jdk.
java64_home = config['ambariLevelParams']['java_home']
java_version = expect("/ambariLevelParams/java_version", int)
log_dir = config['configurations']['hbase-env']['hbase_log_dir']
java_io_tmpdir = default("/configurations/hbase-env/hbase_java_io_tmpdir",
"/tmp")
master_heapsize = ensure_unit_for_memory(
config['configurations']['hbase-env']['hbase_master_heapsize'])
regionserver_heapsize = ensure_unit_for_memory(
config['configurations']['hbase-env']['hbase_regionserver_heapsize'])
regionserver_xmn_max = config['configurations']['hbase-env'][
'hbase_regionserver_xmn_max']
regionserver_xmn_percent = expect(
"/configurations/hbase-env/hbase_regionserver_xmn_ratio", float)
regionserver_xmn_size = calc_xmn_from_xms(
regionserver_heapsize, regionserver_xmn_percent, regionserver_xmn_max)
parallel_gc_threads = expect(
"/configurations/hbase-env/hbase_parallel_gc_threads", int)
hbase_regionserver_shutdown_timeout = expect(
'/configurations/hbase-env/hbase_regionserver_shutdown_timeout', int, 30)
phoenix_hosts = default('/clusterHostInfo/phoenix_query_server_hosts', [])
phoenix_enabled = default('/configurations/hbase-env/phoenix_sql_enabled',
False)
has_phoenix = len(phoenix_hosts) > 0
pid_dir = status_params.pid_dir
tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
local_dir = config['configurations']['hbase-site']['hbase.local.dir']
ioengine_param = default(
'/configurations/hbase-site/hbase.bucketcache.ioengine', None)
client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
regionserver_jaas_config_file = format(
"{hbase_conf_dir}/hbase_regionserver_jaas.conf")
queryserver_jaas_config_file = format(
"{hbase_conf_dir}/hbase_queryserver_jaas.conf")
ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host',
[]) # is not passed when ganglia is not present
has_ganglia_server = not len(ganglia_server_hosts) == 0
if has_ganglia_server:
ganglia_server_host = ganglia_server_hosts[0]
set_instanceId = "false"
if 'cluster-env' in config[
'configurations'] and 'metrics_collector_external_hosts' in config[
'configurations']['cluster-env']:
ams_collector_hosts = config['configurations']['cluster-env'][
'metrics_collector_external_hosts']
set_instanceId = "true"
else:
ams_collector_hosts = ",".join(
default("/clusterHostInfo/metrics_collector_hosts", []))
has_metric_collector = not len(ams_collector_hosts) == 0
metric_collector_port = None
if has_metric_collector:
if 'cluster-env' in config['configurations'] and \
'metrics_collector_external_port' in config['configurations']['cluster-env']:
metric_collector_port = config['configurations']['cluster-env'][
'metrics_collector_external_port']
else:
metric_collector_web_address = default(
"/configurations/ams-site/timeline.metrics.service.webapp.address",
"0.0.0.0:6188")
if metric_collector_web_address.find(':') != -1:
metric_collector_port = metric_collector_web_address.split(':')[1]
else:
metric_collector_port = '6188'
if default("/configurations/ams-site/timeline.metrics.service.http.policy",
"HTTP_ONLY") == "HTTPS_ONLY":
metric_collector_protocol = 'https'
else:
metric_collector_protocol = 'http'
metric_truststore_path = default(
"/configurations/ams-ssl-client/ssl.client.truststore.location", "")
metric_truststore_type = default(
"/configurations/ams-ssl-client/ssl.client.truststore.type", "")
metric_truststore_password = default(
"/configurations/ams-ssl-client/ssl.client.truststore.password", "")
host_in_memory_aggregation = default(
"/configurations/ams-site/timeline.metrics.host.inmemory.aggregation",
True)
host_in_memory_aggregation_port = default(
"/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port",
61888)
pass
metrics_report_interval = default(
"/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
metrics_collection_period = default(
"/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
host_in_memory_aggregation = default(
"/configurations/ams-site/timeline.metrics.host.inmemory.aggregation",
True)
host_in_memory_aggregation_port = default(
"/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port",
61888)
is_aggregation_https_enabled = False
if default(
"/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.http.policy",
"HTTP_ONLY") == "HTTPS_ONLY":
host_in_memory_aggregation_protocol = 'https'
is_aggregation_https_enabled = True
else:
host_in_memory_aggregation_protocol = 'http'
# if hbase is selected the hbase_regionserver_hosts, should not be empty, but still default just in case
if 'datanode_hosts' in config['clusterHostInfo']:
rs_hosts = default(
'/clusterHostInfo/hbase_regionserver_hosts',
'/clusterHostInfo/datanode_hosts'
) # if hbase_regionserver_hosts not given it is assumed that region servers on same nodes as slaves
else:
rs_hosts = default('/clusterHostInfo/hbase_regionserver_hosts',
'/clusterHostInfo/all_hosts')
smoke_test_user = config['configurations']['cluster-env']['smokeuser']
smokeuser_principal = config['configurations']['cluster-env'][
'smokeuser_principal_name']
smokeuser_permissions = "RWXCA"
service_check_data = get_unique_id_and_date()
user_group = config['configurations']['cluster-env']["user_group"]
if security_enabled:
_hostname_lowercase = config['agentLevelParams']['hostname'].lower()
master_jaas_princ = config['configurations'][
'hbase-site']['hbase.master.kerberos.principal'].replace(
'_HOST', _hostname_lowercase)
master_keytab_path = config['configurations']['hbase-site'][
'hbase.master.keytab.file']
regionserver_jaas_princ = config['configurations']['hbase-site'][
'hbase.regionserver.kerberos.principal'].replace(
'_HOST', _hostname_lowercase)
_queryserver_jaas_princ = config['configurations']['hbase-site'][
'phoenix.queryserver.kerberos.principal']
if not is_empty(_queryserver_jaas_princ):
queryserver_jaas_princ = _queryserver_jaas_princ.replace(
'_HOST', _hostname_lowercase)
regionserver_keytab_path = config['configurations']['hbase-site'][
'hbase.regionserver.keytab.file']
queryserver_keytab_path = config['configurations']['hbase-site'][
'phoenix.queryserver.keytab.file']
smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
hbase_user_keytab = config['configurations']['hbase-env']['hbase_user_keytab']
kinit_path_local = get_kinit_path(
default('/configurations/kerberos-env/executable_search_paths', None))
if security_enabled:
kinit_cmd = format(
"{kinit_path_local} -kt {hbase_user_keytab} {hbase_principal_name};")
kinit_cmd_master = format(
"{kinit_path_local} -kt {master_keytab_path} {master_jaas_princ};")
master_security_config = format(
"-Djava.security.auth.login.config={hbase_conf_dir}/hbase_master_jaas.conf"
)
else:
kinit_cmd = ""
kinit_cmd_master = ""
master_security_config = ""
# log4j.properties
# HBase log4j settings
hbase_log_maxfilesize = default(
'configurations/hbase-log4j/hbase_log_maxfilesize', 256)
hbase_log_maxbackupindex = default(
'configurations/hbase-log4j/hbase_log_maxbackupindex', 20)
hbase_security_log_maxfilesize = default(
'configurations/hbase-log4j/hbase_security_log_maxfilesize', 256)
hbase_security_log_maxbackupindex = default(
'configurations/hbase-log4j/hbase_security_log_maxbackupindex', 20)
if (('hbase-log4j' in config['configurations'])
and ('content' in config['configurations']['hbase-log4j'])):
log4j_props = config['configurations']['hbase-log4j']['content']
else:
log4j_props = None
hbase_env_sh_template = config['configurations']['hbase-env']['content']
hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']
hbase_staging_dir = "/hbase/staging"
# for create_hdfs_directory
hostname = config['agentLevelParams']['hostname']
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
hdfs_principal_name = config['configurations']['hadoop-env'][
'hdfs_principal_name']
hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']
dfs_type = default("/clusterLevelParams/dfs_type", "")
import functools
# create partial functions with common arguments for every HdfsResource call
# to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
HdfsResource = functools.partial(
HdfsResource,
user=hdfs_user,
hdfs_resource_ignore_file=
"/var/lib/ambari-agent/data/.hdfs_resource_ignore",
security_enabled=security_enabled,
keytab=hdfs_user_keytab,
kinit_path_local=kinit_path_local,
hadoop_bin_dir=hadoop_bin_dir,
hadoop_conf_dir=hadoop_conf_dir,
principal_name=hdfs_principal_name,
hdfs_site=hdfs_site,
default_fs=default_fs,
immutable_paths=get_not_managed_resources(),
dfs_type=dfs_type)
zookeeper_znode_parent = config['configurations']['hbase-site'][
'zookeeper.znode.parent']
hbase_zookeeper_quorum = config['configurations']['hbase-site'][
'hbase.zookeeper.quorum']
hbase_zookeeper_property_clientPort = config['configurations']['hbase-site'][
'hbase.zookeeper.property.clientPort']
hbase_zookeeper_data_dir = config['configurations']['hbase-site']['hbase.zookeeper.property.dataDir']
hbase_security_authentication = config['configurations']['hbase-site'][
'hbase.security.authentication']
hadoop_security_authentication = config['configurations']['core-site'][
'hadoop.security.authentication']
# ranger hbase plugin section start
# to get db connector jar
jdk_location = config['ambariLevelParams']['jdk_location']
# ranger host
ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
has_ranger_admin = not len(ranger_admin_hosts) == 0
xml_configurations_supported = True
# ranger hbase plugin enabled property
enable_ranger_hbase = default(
"/configurations/ranger-hbase-plugin-properties/ranger-hbase-plugin-enabled",
"No")
enable_ranger_hbase = True if enable_ranger_hbase.lower() == 'yes' else False
# ranger hbase properties
if enable_ranger_hbase:
# get ranger policy url
policymgr_mgr_url = config['configurations']['admin-properties'][
'policymgr_external_url']
if xml_configurations_supported:
policymgr_mgr_url = config['configurations']['ranger-hbase-security'][
'ranger.plugin.hbase.policy.rest.url']
if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'):
policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
# ranger hbase service/repository name
repo_name = str(config['clusterName']) + '_hbase'
repo_name_value = config['configurations']['ranger-hbase-security'][
'ranger.plugin.hbase.service.name']
if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
repo_name = repo_name_value
common_name_for_certificate = config['configurations'][
'ranger-hbase-plugin-properties']['common.name.for.certificate']
repo_config_username = config['configurations'][
'ranger-hbase-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
ranger_plugin_properties = config['configurations'][
'ranger-hbase-plugin-properties']
policy_user = config['configurations']['ranger-hbase-plugin-properties'][
'policy_user']
repo_config_password = config['configurations'][
'ranger-hbase-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']
# ranger-env config
ranger_env = config['configurations']['ranger-env']
# create ranger-env config having external ranger credential properties
if not has_ranger_admin and enable_ranger_hbase:
external_admin_username = default(
'/configurations/ranger-hbase-plugin-properties/external_admin_username',
'admin')
external_admin_password = default(
'/configurations/ranger-hbase-plugin-properties/external_admin_password',
'admin')
external_ranger_admin_username = default(
'/configurations/ranger-hbase-plugin-properties/external_ranger_admin_username',
'ranger_admin')
external_ranger_admin_password = default(
'/configurations/ranger-hbase-plugin-properties/external_ranger_admin_password',
'<PASSWORD>!@#')
ranger_env = {}
ranger_env['admin_username'] = external_admin_username
ranger_env['admin_password'] = <PASSWORD>
ranger_env['ranger_admin_username'] = external_ranger_admin_username
ranger_env['ranger_admin_password'] = external_ranger_admin_password
if security_enabled:
master_principal = config['configurations']['hbase-site'][
'hbase.master.kerberos.principal']
hbase_ranger_plugin_config = {
'username':
repo_config_username,
'password':
repo_config_password,
'hadoop.security.authentication':
hadoop_security_authentication,
'hbase.security.authentication':
hbase_security_authentication,
'hbase.zookeeper.property.clientPort':
hbase_zookeeper_property_clientPort,
'hbase.zookeeper.quorum':
hbase_zookeeper_quorum,
'zookeeper.znode.parent':
zookeeper_znode_parent,
'commonNameForCertificate':
common_name_for_certificate,
'hbase.master.kerberos.principal':
master_principal if security_enabled else ''
}
if security_enabled:
hbase_ranger_plugin_config['policy.download.auth.users'] = hbase_user
hbase_ranger_plugin_config['tag.download.auth.users'] = hbase_user
hbase_ranger_plugin_config[
'policy.grantrevoke.auth.users'] = hbase_user
hbase_ranger_plugin_config['setup.additional.default.policies'] = "true"
hbase_ranger_plugin_config[
'default-policy.1.name'] = "Service Check User Policy for Hbase"
hbase_ranger_plugin_config[
'default-policy.1.resource.table'] = "ambarismoketest"
hbase_ranger_plugin_config['default-policy.1.resource.column-family'] = "*"
hbase_ranger_plugin_config['default-policy.1.resource.column'] = "*"
hbase_ranger_plugin_config[
'default-policy.1.policyItem.1.users'] = policy_user
hbase_ranger_plugin_config[
'default-policy.1.policyItem.1.accessTypes'] = "read,write,create"
custom_ranger_service_config = generate_ranger_service_config(
ranger_plugin_properties)
if len(custom_ranger_service_config) > 0:
hbase_ranger_plugin_config.update(custom_ranger_service_config)
hbase_ranger_plugin_repo = {
'isEnabled': 'true',
'configs': hbase_ranger_plugin_config,
'description': 'hbase repo',
'name': repo_name,
'type': 'hbase'
}
ranger_hbase_principal = None
ranger_hbase_keytab = None
if stack_supports_ranger_kerberos and security_enabled: # hbase master
ranger_hbase_principal = master_jaas_princ
ranger_hbase_keytab = master_keytab_path
elif stack_supports_ranger_kerberos and security_enabled: # hbase regionserver
ranger_hbase_principal = regionserver_jaas_princ
ranger_hbase_keytab = regionserver_keytab_path
xa_audit_hdfs_is_enabled = config['configurations']['ranger-hbase-audit'][
'xasecure.audit.destination.hdfs'] if xml_configurations_supported else False
ssl_keystore_password = config['configurations']['ranger-hbase-policymgr-ssl'][
'xasecure.policymgr.clientssl.keystore.password'] if xml_configurations_supported else None
ssl_truststore_password = config['configurations'][
'ranger-hbase-policymgr-ssl'][
'xasecure.policymgr.clientssl.truststore.password'] if xml_configurations_supported else None
credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
# need this to capture cluster name from where ranger hbase plugin is enabled
cluster_name = config['clusterName']
# ranger hbase plugin section end
create_hbase_home_directory = True
hbase_home_directory = format("/user/{hbase_user}")
atlas_hosts = default('/clusterHostInfo/atlas_server_hosts', [])
has_atlas = len(atlas_hosts) > 0
metadata_user = default('/configurations/atlas-env/metadata_user', None)
atlas_graph_storage_hostname = default(
'/configurations/application-properties/atlas.graph.storage.hostname',
None)
atlas_graph_storage_hbase_table = default(
'/configurations/application-properties/atlas.graph.storage.hbase.table',
None)
atlas_audit_hbase_tablename = default(
'/configurations/application-properties/atlas.audit.hbase.tablename', None)
if has_atlas:
zk_hosts_matches = string_set_intersection(atlas_graph_storage_hostname,
hbase_zookeeper_quorum)
atlas_with_managed_hbase = len(zk_hosts_matches) > 0
else:
atlas_with_managed_hbase = False
# Hbase Atlas hook configurations
atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file',
'atlas-application.properties')
enable_hbase_atlas_hook = default('/configurations/hbase-env/hbase.atlas.hook',
False)
hbase_atlas_hook_properties = default(
'/configurations/hbase-atlas-application-properties', {})
mount_table_xml_inclusion_file_full_path = None
mount_table_content = None
if 'viewfs-mount-table' in config['configurations']:
xml_inclusion_file_name = 'viewfs-mount-table.xml'
mount_table = config['configurations']['viewfs-mount-table']
if 'content' in mount_table and mount_table['content'].strip():
mount_table_xml_inclusion_file_full_path = os.path.join(
hbase_conf_dir, xml_inclusion_file_name)
mount_table_content = mount_table['content']
retryAble = default("/commandParams/command_retry_enabled", False)
|
StarcoderdataPython
|
4886593
|
<filename>test_unidecode.py
from unidecode import unidecode
print(unidecode("\u5317\u4EB0"))
print(unidecode("\u0c13\u0c35\u0c46\u0c28\u0c4d\u200c\u0c28\u0c3f"))
print(unidecode("సంతోషంగా"))
print("\u0c13\u0c35\u0c46\u0c28\u0c4d\u200c\u0c28\u0c3f")
|
StarcoderdataPython
|
1708074
|
import azure.mgmt.batchai as batchai
from azure.storage.file import FileService
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.resource import ResourceManagementClient
from datetime import datetime
import os
def setup_bai(
aad_client_id: str = None,
aad_secret: str = None,
aad_tenant: str = None,
subscription_id: str = None,
rg: str = None,
location: str = None,
) -> 'batchai.BatchAIManagementClient':
'''
Setup credentials, batch AI client, and the resource
group that the resources will be created in
All optional arguments will default to using the
associated environment variable if the parameter
is not provided.
Args:
aad_client_id (str, optional): The client id you get
from creating your Service Principle.
aad_secret (str, optional): The secret key you get
from creating your Service Principle.
aad_tenant (str, optional): The tenant id that your
Service Principle is created in.
subscription_id (str, optional): The subscription id
you wish for your Batch AI resources to be created
in.
rg (str, optional): The Resource Group you will
create your work in.
location (str, optional): The location/region that
will create your Azure resources in.
Returns:
BatchAIManagementClient: An instance of the Batch AI
managment client that can be used to manage Batch
AI resources.
'''
aad_client_id = aad_client_id or os.getenv('AAD_CLIENT_ID')
aad_tenant = aad_tenant or os.getenv('AAD_TENANT')
aad_secret = aad_secret or os.getenv('AAD_SECRET')
subscription_id = subscription_id or os.getenv('SUBSCRIPTION_ID')
rg = rg or os.getenv('RESOURCE_GROUP')
location = location or os.getenv('REGION')
assert aad_client_id
assert aad_tenant
assert aad_secret
assert subscription_id
assert rg
assert location
creds = ServicePrincipalCredentials(
client_id=aad_client_id,
secret=aad_secret,
tenant=aad_tenant
)
resource_management_client = ResourceManagementClient(
credentials=creds,
subscription_id=subscription_id
)
resource = resource_management_client \
.resource_groups.create_or_update(rg, {
'location': location
})
batchai_client = batchai.BatchAIManagementClient(
credentials=creds,
subscription_id=subscription_id
)
return batchai_client
def get_cluster(
batchai_client: 'BatchAIManagementClient',
name: str,
rg: str = None,
ws: str = None
) -> 'batchai.models.Cluster':
'''
Get a BatchAI cluster by cluster name
All optional arguments will default to using the
associated environment variable if the parameter
is not provided.
Args:
batchai_client (BatchAIManagementClient): The
management client to manage Batch AI resources
name (str): The name of the cluster to get
rg (str, optional): The resource group to look for
the cluster under.
ws (str, optional): The Batch AI Workspace to look
for the cluster under.
Returns:
batchai.models.Cluster: The cluster object that
is provided by the BatchAI management sdk.
'''
rg = rg or os.getenv('RESOURCE_GROUP')
ws = ws or os.getenv('WORKSPACE')
assert rg
assert ws
return batchai_client.clusters.get(
resource_group_name=rg,
workspace_name=ws,
cluster_name=name
)
def create_experiment(
batchai_client: 'BatchAIManagementClient',
name: str,
rg: str = os.getenv('RESOURCE_GROUP'),
ws: str = os.getenv('WORKSPACE'),
) -> 'batchai.models.Experiment':
'''
Create a BatchAI Experiment (which is the logical
container for a job)
All optional arguments will default to using the
associated environment variable if the parameter
is not provided.
Args:
batchai_client (BatchAIManagementClient): The
management client to manage Batch AI resources
name (str): The name of the Experiment
rg (str, optional): The resource group to create
the experiment in.
ws (str, optional): The Batch AI Workspace to
create the experiment in.
Returns:
batchai.models.Experiment: The experiment object
that is provided by the BatchAI management sdk.
'''
return batchai_client.experiments.create(
resource_group_name=rg,
workspace_name=ws,
experiment_name=name
)
def create_job_params(
cluster: 'batchai.models.Cluster',
input_dirs: ['batchai.models.InputDirectory'],
output_dirs: ['batchai.models.OutputDirectory'],
container_image: str,
command_line: str,
job_prep_command_line: str = '',
node_count: int = 1,
cluster_mnt_path: str = None
):
'''
Create the parameter object for the Batch AI job.
All optional arguments will default to using the
associated environment variable if the parameter
is not provided.
Args:
cluster (batchai.models.Cluster): The cluster to
the parameters for.
input_dir (List(batchai.models.InputDirectory)):
A list of the input directories to setup.
output_dir (List(batchai.models.OutputDirectory)):
A list of the output directories to setup.
container_image (str): The container image to use
when running the job.
command_line (str): The command line to execute.
job_prep_command_line (str, optional): Optional
command line to execute during job_preparation.
node_count (int, optional): The number of nodes
to use for the job.
cluster_mnt_path (str, optional): The mnt path
of the file share on the cluster.
Returns:
batchai.models.JobCreateParameters: The Parameter
object to pass into the job during creation.
'''
cluster_mnt_path = cluster_mnt_path or \
os.getenv('CLUSTER_CONTAINER_MNT_PATH')
assert cluster_mnt_path
return batchai.models.JobCreateParameters(
cluster=batchai.models.ResourceId(id=cluster.id),
node_count=node_count,
input_directories=input_dirs,
output_directories=output_dirs,
std_out_err_path_prefix='$AZ_BATCHAI_MOUNT_ROOT/{0}'\
.format(cluster_mnt_path),
job_preparation=batchai.models.JobPreparation(
command_line=job_prep_command_line
),
container_settings=batchai.models.ContainerSettings(
image_source_registry=batchai.models.ImageSourceRegistry(
image=container_image
)
),
custom_toolkit_settings=batchai.models.CustomToolkitSettings(
command_line=command_line
)
)
def create_job(
batchai_client: 'BatchAIManagementClient',
job_name: str,
job_params: 'batchai.models.JobCreateParameters',
experiment_name: str,
rg: str = os.getenv('RESOURCE_GROUP'),
ws: str = os.getenv('WORKSPACE'),
async_job: bool = True
) -> 'batchai.models.Job':
'''
Create a BatchAI Experiment (which is the logical
container for a job)
All optional arguments will default to using the
associated environment variable if the parameter
is not provided.
Args:
batchai_client (BatchAIManagementClient): The
management client to manage Batch AI resources
job_name (str): The name of the job.
job_params (JobCreateParameters): The parameters
to pass to the job.
job_experiment_name (str): The name of the
experiment to create the job under.
rg (str, optional): The resource group to create
the job in.
ws (str, optional): The Batch AI Workspace to
create the job in.
Returns:
batchai.models.Job: The Job object
that is provided by the BatchAI management sdk.
'''
job = batchai_client.jobs.create(
resource_group_name=rg,
workspace_name=ws,
experiment_name=experiment_name,
job_name=job_name,
parameters=job_params
)
if not async_job:
return job.result()
else:
return job
def create_workspace(
batchai_client: 'BatchAIManagementClient',
rg: str = None,
ws: str = None,
location: str = None
) -> 'batchai.models.WorkSpace':
'''
Create a BatchAI Workspace
All optional arguments will default to using the
associated environment variable if the parameter
is not provided.
Args:
batchai_client (BatchAIManagementClient): The
management client to manage Batch AI resources
rg (str, optional): The resource group to create
the workspace in.
ws (str, optional): The Batch AI Workspace to
create the job in.
location (str, optional): The location/region that
will create your Workspace in.
Returns:
batchai.models.Workspace: The Workspace object
that is provided by the BatchAI management sdk.
'''
rg = rg or os.getenv('RESOURCE_GROUP')
ws = ws or os.getenv('WORKSPACE')
location = location or os.getenv('REGION')
assert rg
assert ws
assert location
return batchai_client \
.workspaces \
.create(rg, ws, location) \
.result()
def create_autoscale_cluster(
batchai_client: 'BatchAIManagementClient',
cluster_name: str,
vm_size: str = None,
vm_priority: str = None,
min_nodes: int = None,
max_nodes: int = None,
initial_nodes: int = None,
ws: str = None,
rg: str = None,
storage_account_name: str = None,
storage_account_key: str = None,
container_name: str = None,
cluster_mnt_path: str = None,
admin_user_name: str = None,
admin_user_password: str = None
) -> None:
'''
Create an autoscale Batch AI cluster
All optional arguments will default to using the
associated environment variable if the parameter
is not provided.
Args:
batchai_client (BatchAIManagementClient): The
management client to manage Batch AI resources
cluster_name (str): The name of the cluster you
wish to create.
vm_size (str, optional): The vm size of the
cluster you with to create.
vm_priority (str, optional): Choose between low
priority or dedicated.
min_nodes (int, optional): Minimum number of
nodes in the autoscale cluster.
max_nodes (int, optional): Maximum number of
nodes in the autoscale cluster.
initial_nodes (int, optional): Initial number
of nodes in the autoscale cluster.
ws (str, optional): The workspace to create the
cluster in.
rg (str, optional): The resource group to
create the cluster in.
storage_account_name (str, optional): The
storage account to use when mounting the
blob container.
storage_account_key (str, optional): The
key to use when mounting the blob container.
container_name (str, optional): The name of
the container to use in storage.
cluster_mnt_path (str, optional): The mnt path
of the file share on the cluster.
admin_user_name (str, optional): The username
of the user to create for accessing the
cluster.
admin_user_password (str, optional): The
password of the user to create for accesing
the cluster.
Returns:
None
'''
vm_size = vm_size or \
os.getenv('CLUSTER_VM_SIZE')
vm_priority = vm_priority or \
os.getenv('CLUSTER_VM_PRIORITY')
min_nodes = min_nodes if type(min_nodes) is int else \
os.getenv('CLUSTER_MINIMUM_NODE_COUNT')
max_nodes = max_nodes if type(max_nodes) is int else \
os.getenv('CLUSTER_MAXIMUM_NODE_COUNT')
initial_nodes = initial_nodes if type(initial_nodes) is int else \
os.getenv('CLUSTER_INITIAL_NODE_COUNT')
ws = ws or os.getenv('WORKSPACE')
rg = rg or os.getenv('RESOURCE_GROUP')
storage_account_name = storage_account_name or \
os.getenv('STORAGE_ACCOUNT_NAME')
storage_account_key = storage_account_key or \
os.getenv('STORAGE_ACCOUNT_KEY')
container_name = container_name or \
os.getenv('AZURE_CONTAINER_NAME')
cluster_mnt_path = cluster_mnt_path or \
os.getenv('CLUSTER_CONTAINER_MNT_PATH')
admin_user_name = admin_user_name or \
os.getenv('ADMIN_USER_NAME')
admin_user_password = <PASSWORD>user_password or \
os.getenv('ADMIN_USER_PASSWORD')
assert vm_size
assert vm_priority
assert min_nodes or 0
assert max_nodes or 0
assert initial_nodes or 0
assert ws
assert rg
assert storage_account_name
assert storage_account_key
assert container_name
assert cluster_mnt_path
assert admin_user_name
assert admin_user_password
volumes = batchai.models.MountVolumes(
azure_blob_file_systems=[
batchai.models.AzureBlobFileSystemReference(
account_name=storage_account_name,
credentials=batchai.models.AzureStorageCredentialsInfo(
account_key=storage_account_key
),
container_name=container_name,
relative_mount_path=cluster_mnt_path
)
]
)
scale_settings = batchai.models.ScaleSettings(
auto_scale=batchai.models.AutoScaleSettings(
minimum_node_count=min_nodes,
maximum_node_count=max_nodes,
initial_node_count=initial_nodes
)
)
parameters = batchai.models.ClusterCreateParameters(
vm_size=vm_size,
vm_priority=vm_priority,
scale_settings=scale_settings,
node_setup=batchai.models.NodeSetup(
mount_volumes=volumes
),
user_account_settings=batchai.models.UserAccountSettings(
admin_user_name=admin_user_name,
admin_user_password=<PASSWORD>
)
)
_ = batchai_client.clusters.create(
resource_group_name=rg,
workspace_name=ws,
cluster_name=cluster_name,
parameters=parameters
).result()
|
StarcoderdataPython
|
234476
|
<gh_stars>0
from flask import Flask, render_template, flash, redirect, url_for
from app import app
from flask_cors import CORS, cross_origin
import random
# public API, allow all requests *
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
from app.utils.general import sanitize_input, convert_array_to_return_string
from app.utils.rand import pick_random_move, get_available_moves
from app.utils.suggest import convert_csv_to_Q, convert_input_to_key, get_index_of_max, get_available_moves, check_winner, get_max_values, compute_R, convert_Q_key_to_string_array
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html', title='Home')
# suggest random move
@app.route('/api/v1/rand/turn/<turn>/board/<board>')
@cross_origin()
def random_move(turn, board):
turn, board, ok = sanitize_input(turn,board)
key = convert_input_to_key(turn, board)
_, board_state = key
winner = check_winner(board_state)
if not winner:
index = pick_random_move(board)
if index > -1:
board[index] = turn
# check winner again.
board = convert_array_to_return_string(board)
key = convert_input_to_key(turn, board)
_, board_state = key
winner = check_winner(board_state)
return { "board" : board, "winner" : winner}
@app.route('/api/v1/turn/<turn>/board/<board>', methods=['GET'])
@cross_origin()
def suggest_move(turn,board):
file_path = 'Q.csv'
Q = convert_csv_to_Q(file_path)
turn, board, ok = sanitize_input(turn, board)
winner = None
if ok:
key = convert_input_to_key(turn, board)
state = key
turn, board_state = state
# check that there isn't a winner
winner = check_winner(board_state)
if winner == None:
# check to see if are any immediate winning or blocking moves
immediate_rewards = compute_R(state)
if max(immediate_rewards) > 0:
move_here = get_index_of_max(immediate_rewards)
else:
indices_possible_moves = get_available_moves(board_state)
# test to see if the state is in the Q.
valid_state = Q.get(state, False)
if valid_state:
rewards_of_moves = []
for index in indices_possible_moves:
# of the possible moves get there Q scores
rewards_of_moves.append(valid_state[index])
# find the max values within a range to the max value
best_moves = get_max_values(rewards_of_moves)
# choose a random selection from these max values
move_here_value = random.choice(best_moves)
if move_here_value == 0:
move_here = random.choice(indices_possible_moves)
else:
for index in indices_possible_moves:
if valid_state[index] == move_here_value:
move_here = index
else:
move_here = random.choice(indices_possible_moves)
board = list(board_state)
board[move_here] = int(turn)
# check winner again.
board_state = tuple(board)
winner = check_winner(board_state)
board = convert_Q_key_to_string_array(board)
board = convert_array_to_return_string(board)
return { "board" : board, "winner": winner}
|
StarcoderdataPython
|
8112385
|
<reponame>RaghuSpaceRajan/bsuite-mdpp-merge
# python3
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""bsuite logging and image observation wrappers."""
from typing import Any, Dict, Sequence
from bsuite import environments
from bsuite.logging import base
import dm_env
from dm_env import specs
import numpy as np
from skimage import transform
# Keys that are present for all experiments. These are computed from within
# the `Logging` wrapper.
STANDARD_KEYS = frozenset(
['steps', 'episode', 'total_return', 'episode_len', 'episode_return'])
class Logging(dm_env.Environment):
"""Environment wrapper to track and log bsuite stats."""
def __init__(self,
env: environments.Environment,
logger: base.Logger,
log_by_step: bool = False,
log_every: bool = False):
"""Initializes the logging wrapper.
Args:
env: Environment to wrap.
logger: An object that records a row of data. This must have a `write`
method that accepts a dictionary mapping from column name to value.
log_by_step: Whether to log based on step or episode count (default).
log_every: Forces logging at each step or episode, e.g. for debugging.
"""
self._env = env
self._logger = logger
self._log_by_step = log_by_step
self._log_every = log_every
# Accumulating throughout experiment.
self._steps = 0
self._episode = 0
self._total_return = 0.0
# Most-recent-episode.
self._episode_len = 0
self._episode_return = 0.0
def flush(self):
if hasattr(self._logger, 'flush'):
self._logger.flush()
def reset(self):
timestep = self._env.reset()
self._track(timestep)
return timestep
def step(self, action):
timestep = self._env.step(action)
self._track(timestep)
return timestep
def action_spec(self):
return self._env.action_spec()
def observation_spec(self):
return self._env.observation_spec()
def _track(self, timestep: dm_env.TimeStep):
# Count transitions only.
if not timestep.first():
self._steps += 1
self._episode_len += 1
if timestep.last():
self._episode += 1
self._episode_return += timestep.reward or 0.0
self._total_return += timestep.reward or 0.0
# Log statistics periodically, either by step or by episode.
if self._log_by_step:
if _logarithmic_logging(self._steps) or self._log_every:
self._log_bsuite_data()
elif timestep.last():
if _logarithmic_logging(self._episode) or self._log_every:
self._log_bsuite_data()
# Perform bookkeeping at the end of episodes.
if timestep.last():
self._episode_len = 0
self._episode_return = 0.0
def _log_bsuite_data(self):
"""Log summary data for bsuite."""
data = dict(
# Accumulated data.
steps=self._steps,
episode=self._episode,
total_return=self._total_return,
# Most-recent-episode data.
episode_len=self._episode_len,
episode_return=self._episode_return,
)
# Environment-specific metadata used for scoring.
data.update(self._env.bsuite_info())
self._logger.write(data)
@property
def raw_env(self):
# Recursively unwrap until we reach the true 'raw' env.
wrapped = self._env
if hasattr(wrapped, 'raw_env'):
return wrapped.raw_env
return wrapped
def __getattr__(self, attr):
"""Delegate attribute access to underlying environment."""
return getattr(self._env, attr)
def _logarithmic_logging(episode: int, ratios: Sequence[float] = None) -> bool:
"""Returns `True` only at specific ratios of 10**exponent."""
if ratios is None:
ratios = [1., 1.2, 1.4, 1.7, 2., 2.5, 3., 4., 5., 6., 7., 8., 9., 10.]
exponent = np.floor(np.log10(np.maximum(1, episode)))
special_vals = [10**exponent * ratio for ratio in ratios]
return any(episode == val for val in special_vals)
class ImageObservation(dm_env.Environment):
"""Environment wrapper to convert observations to an image-like format."""
def __init__(self, env: dm_env.Environment, shape: Sequence[int]):
self._env = env
self._shape = shape
def observation_spec(self):
spec = self._env.observation_spec()
return specs.Array(shape=self._shape, dtype=spec.dtype, name=spec.name)
def action_spec(self):
return self._env.action_spec()
def reset(self):
timestep = self._env.reset()
return timestep._replace(
observation=to_image(self._shape, timestep.observation))
def step(self, action):
timestep = self._env.step(action)
return timestep._replace(
observation=to_image(self._shape, timestep.observation))
def __getattr__(self, attr):
"""Delegate attribute access to underlying environment."""
return getattr(self._env, attr)
def _small_state_to_image(shape: Sequence[int],
observation: np.ndarray) -> np.ndarray:
"""Converts a small state into an image-like format."""
result = np.empty(shape=shape, dtype=observation.dtype)
size = observation.size
flattened = observation.ravel()
# Explicitly handle small observation dimensions separately
if size == 1:
result[:] = flattened[0]
elif size == 2:
result[:, :shape[1] // 2] = flattened[0]
result[:, shape[1] // 2:] = flattened[1]
elif size == 3 or size == 4:
# Top-left.
result[:shape[0] // 2, :shape[1] // 2] = flattened[0]
# Top-right.
result[shape[0] // 2:, :shape[1] // 2] = flattened[1]
# Bottom-left.
result[:shape[0] // 2, shape[1] // 2:] = flattened[2]
# Bottom-right.
result[shape[0] // 2:, shape[1] // 2:] = flattened[-1]
else:
raise ValueError('Hand-crafted rule only for small state observation.')
return result
def _interpolate_to_image(shape: Sequence[int],
observation: np.ndarray) -> np.ndarray:
"""Converts observation to desired shape using an interpolation."""
result = np.empty(shape=shape, dtype=observation.dtype)
if len(observation.shape) == 1:
observation = np.expand_dims(observation, 0)
# Interpolate the image and broadcast over all trailing channels.
plane_image = transform.resize(observation, shape[:2], preserve_range=True)
while plane_image.ndim < len(shape):
plane_image = np.expand_dims(plane_image, -1)
result[:, :] = plane_image
return result
def to_image(shape: Sequence[int], observation: np.ndarray) -> np.ndarray:
"""Converts a bsuite observation into an image-like format.
Example usage, converting a 3-element array into a stacked Atari-like format:
observation = to_image((84, 84, 4), np.array([1, 2, 0]))
Args:
shape: A sequence containing the desired output shape (length >= 2).
observation: A numpy array containing the observation data.
Returns:
A numpy array with shape `shape` and dtype matching the dtype of
`observation`. The entries in this array are tiled from `observation`'s
entries.
"""
assert len(shape) >= 2
if observation.size <= 4:
return _small_state_to_image(shape, observation)
elif len(observation.shape) <= 2:
return _interpolate_to_image(shape, observation)
else:
raise ValueError(
'Cannot convert observation shape {} to desired shape {}'.format(
observation.shape, shape))
class RewardNoise(environments.Environment):
"""Reward Noise environment wrapper."""
def __init__(self,
env: environments.Environment,
noise_scale: float,
seed: int = None):
"""Builds the Reward Noise environment wrapper.
Args:
env: An environment whose rewards to perturb.
noise_scale: Standard deviation of gaussian noise on rewards.
seed: Optional seed for numpy's random number generator (RNG).
"""
super(RewardNoise, self).__init__()
self._env = env
self._noise_scale = noise_scale
self._rng = np.random.RandomState(seed)
def reset(self):
return self._env.reset()
def step(self, action):
return self._add_reward_noise(self._env.step(action))
def _add_reward_noise(self, timestep: dm_env.TimeStep):
if timestep.first():
return timestep
reward = timestep.reward + self._noise_scale * self._rng.randn()
return dm_env.TimeStep(
step_type=timestep.step_type,
reward=reward,
discount=timestep.discount,
observation=timestep.observation)
def observation_spec(self):
return self._env.observation_spec()
def action_spec(self):
return self._env.action_spec()
@property
def raw_env(self):
# Recursively unwrap until we reach the true 'raw' env.
wrapped = self._env
if hasattr(wrapped, 'raw_env'):
return wrapped.raw_env
return wrapped
def _step(self, action: int) -> dm_env.TimeStep:
raise NotImplementedError('Please call step() instead of _step().')
def _reset(self) -> dm_env.TimeStep:
raise NotImplementedError('Please call reset() instead of _reset().')
def bsuite_info(self) -> Dict[str, Any]:
return self._env.bsuite_info()
def __getattr__(self, attr):
"""Delegate attribute access to underlying environment."""
return getattr(self._env, attr)
class RewardScale(environments.Environment):
"""Reward Scale environment wrapper."""
def __init__(self,
env: environments.Environment,
reward_scale: float,
seed: int = None):
"""Builds the Reward Scale environment wrapper.
Args:
env: Environment whose rewards to rescale.
reward_scale: Rescaling for rewards.
seed: Optional seed for numpy's random number generator (RNG).
"""
super(RewardScale, self).__init__()
self._env = env
self._reward_scale = reward_scale
self._rng = np.random.RandomState(seed)
def reset(self):
return self._env.reset()
def step(self, action):
return self._rescale_rewards(self._env.step(action))
def _rescale_rewards(self, timestep: dm_env.TimeStep):
if timestep.first():
return timestep
reward = timestep.reward * self._reward_scale
return dm_env.TimeStep(
step_type=timestep.step_type,
reward=reward,
discount=timestep.discount,
observation=timestep.observation)
def observation_spec(self):
return self._env.observation_spec()
def action_spec(self):
return self._env.action_spec()
def _step(self, action: int) -> dm_env.TimeStep:
raise NotImplementedError('Please call step() instead of _step().')
def _reset(self) -> dm_env.TimeStep:
raise NotImplementedError('Please call reset() instead of _reset().')
@property
def raw_env(self):
# Recursively unwrap until we reach the true 'raw' env.
wrapped = self._env
if hasattr(wrapped, 'raw_env'):
return wrapped.raw_env
return wrapped
def bsuite_info(self) -> Dict[str, Any]:
return self._env.bsuite_info()
def __getattr__(self, attr):
"""Delegate attribute access to underlying environment."""
return getattr(self._env, attr)
|
StarcoderdataPython
|
3321148
|
# #####################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
# #####################################################################################################################
from aws_cdk import (
aws_s3 as s3,
core,
)
from lib.blueprints.byom.pipeline_definitions.deploy_actions import (
create_baseline_job_lambda,
sagemaker_layer,
create_invoke_lambda_custom_resource,
)
from lib.blueprints.byom.pipeline_definitions.templates_parameters import (
ParameteresFactory as pf,
ConditionsFactory as cf,
)
from lib.blueprints.byom.pipeline_definitions.sagemaker_monitor_role import create_sagemaker_monitor_role
from lib.blueprints.byom.pipeline_definitions.sagemaker_model_monitor_construct import SageMakerModelMonitor
class ModelMonitorStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, monitoring_type: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# validate the provided monitoring_type
if monitoring_type not in ["DataQuality", "ModelQuality"]:
raise ValueError(
(
f"The {monitoring_type} is not valid. Currently supported Monitoring Types are: "
f"['DataQuality'|'ModelQuality']"
)
)
# Baseline/Monitor attributes, this will be updated based on the monitoring_type
self.baseline_attributes = dict()
self.monitor_attributes = dict()
# Parameteres #
blueprint_bucket_name = pf.create_blueprint_bucket_name_parameter(self)
assets_bucket_name = pf.create_assets_bucket_name_parameter(self)
endpoint_name = pf.create_endpoint_name_parameter(self)
baseline_job_output_location = pf.create_baseline_job_output_location_parameter(self)
baseline_data = pf.create_baseline_data_parameter(self)
instance_type = pf.create_instance_type_parameter(self)
instance_count = pf.create_instance_count_parameter(self)
instance_volume_size = pf.create_instance_volume_size_parameter(self)
baseline_max_runtime_seconds = pf.create_baseline_max_runtime_seconds_parameter(self)
monitor_max_runtime_seconds = pf.create_monitor_max_runtime_seconds_parameter(self, "ModelQuality")
kms_key_arn = pf.create_kms_key_arn_parameter(self)
baseline_job_name = pf.create_baseline_job_name_parameter(self)
monitoring_schedule_name = pf.create_monitoring_schedule_name_parameter(self)
data_capture_bucket = pf.create_data_capture_bucket_name_parameter(self)
baseline_output_bucket = pf.create_baseline_output_bucket_name_parameter(self)
data_capture_s3_location = pf.create_data_capture_location_parameter(self)
monitoring_output_location = pf.create_monitoring_output_location_parameter(self)
schedule_expression = pf.create_schedule_expression_parameter(self)
image_uri = pf.create_algorithm_image_uri_parameter(self)
# add ModelQuality specific parameters/conditions, and update self.baseline_attributes/self.monitor_attributes
if monitoring_type == "ModelQuality":
self._add_model_quality_resources()
# conditions
kms_key_arn_provided = cf.create_kms_key_arn_provided_condition(self, kms_key_arn)
# Resources #
assets_bucket = s3.Bucket.from_bucket_name(self, "ImportedAssetsBucket", assets_bucket_name.value_as_string)
# getting blueprint bucket object from its name - will be used later in the stack
blueprint_bucket = s3.Bucket.from_bucket_name(
self, "ImportedBlueprintBucket", blueprint_bucket_name.value_as_string
)
# create sagemaker layer
sm_layer = sagemaker_layer(self, blueprint_bucket)
# update Baseline attributes
self.baseline_attributes.update(
dict(
monitoring_type=monitoring_type,
baseline_job_name=baseline_job_name.value_as_string,
baseline_data_location=baseline_data.value_as_string,
baseline_job_output_location=baseline_job_output_location.value_as_string,
endpoint_name=endpoint_name.value_as_string,
instance_type=instance_type.value_as_string,
instance_volume_size=instance_volume_size.value_as_string,
max_runtime_seconds=baseline_max_runtime_seconds.value_as_string,
kms_key_arn=core.Fn.condition_if(
kms_key_arn_provided.logical_id, kms_key_arn.value_as_string, core.Aws.NO_VALUE
).to_string(),
kms_key_arn_provided_condition=kms_key_arn_provided,
stack_name=core.Aws.STACK_NAME,
)
)
# create baseline job lambda action
baseline_job_lambda = create_baseline_job_lambda(
self,
blueprint_bucket=blueprint_bucket,
assets_bucket=assets_bucket,
sm_layer=sm_layer,
**self.baseline_attributes,
)
# create custom resource to invoke the baseline job lambda
# remove the condition from the custom resource properties. Otherwise, CFN will give an error
del self.baseline_attributes["kms_key_arn_provided_condition"]
invoke_lambda_custom_resource = create_invoke_lambda_custom_resource(
scope=self,
id="InvokeBaselineLambda",
lambda_function_arn=baseline_job_lambda.function_arn,
lambda_function_name=baseline_job_lambda.function_name,
blueprint_bucket=blueprint_bucket,
# add baseline attributes to the invoke lambda custom resource, so any change to these attributes
# (via template update) will re-invoke the baseline lambda and re-calculate the baseline
custom_resource_properties={
"Resource": "InvokeLambda",
"function_name": baseline_job_lambda.function_name,
"assets_bucket_name": assets_bucket_name.value_as_string,
**self.baseline_attributes,
},
)
# add dependency on baseline lambda
invoke_lambda_custom_resource.node.add_dependency(baseline_job_lambda)
# creating monitoring schedule
sagemaker_role = create_sagemaker_monitor_role(
scope=self,
id="MLOpsSagemakerMonitorRole",
kms_key_arn=kms_key_arn.value_as_string,
assets_bucket_name=assets_bucket_name.value_as_string,
data_capture_bucket=data_capture_bucket.value_as_string,
data_capture_s3_location=data_capture_s3_location.value_as_string,
baseline_output_bucket=baseline_output_bucket.value_as_string,
baseline_job_output_location=baseline_job_output_location.value_as_string,
output_s3_location=monitoring_output_location.value_as_string,
kms_key_arn_provided_condition=kms_key_arn_provided,
baseline_job_name=baseline_job_name.value_as_string,
monitoring_schedule_name=monitoring_schedule_name.value_as_string,
endpoint_name=endpoint_name.value_as_string,
model_monitor_ground_truth_input=None
if monitoring_type == "DataQuality"
else self.monitor_attributes["ground_truth_s3_uri"],
)
# resource tags
resource_tags = [{"key": "stack-name", "value": core.Aws.STACK_NAME}]
# update attributes
self.monitor_attributes.update(
dict(
monitoring_schedule_name=monitoring_schedule_name.value_as_string,
endpoint_name=endpoint_name.value_as_string,
baseline_job_name=baseline_job_name.value_as_string,
baseline_job_output_location=baseline_job_output_location.value_as_string,
schedule_expression=schedule_expression.value_as_string,
monitoring_output_location=monitoring_output_location.value_as_string,
instance_type=instance_type.value_as_string,
instance_count=instance_count.value_as_string,
instance_volume_size=instance_volume_size.value_as_string,
max_runtime_seconds=monitor_max_runtime_seconds.value_as_string,
kms_key_arn=core.Fn.condition_if(
kms_key_arn_provided.logical_id, kms_key_arn.value_as_string, core.Aws.NO_VALUE
).to_string(),
role_arn=sagemaker_role.role_arn,
image_uri=image_uri.value_as_string,
monitoring_type=monitoring_type,
tags=resource_tags,
)
)
# create Sagemaker monitoring Schedule
sagemaker_monitor = SageMakerModelMonitor(self, f"{monitoring_type}Monitor", **self.monitor_attributes)
# add job definition dependency on sagemaker role and invoke_lambda_custom_resource (so, the baseline job is created)
sagemaker_monitor.job_definition.node.add_dependency(sagemaker_role)
sagemaker_monitor.job_definition.node.add_dependency(invoke_lambda_custom_resource)
# Outputs #
core.CfnOutput(
self,
id="BaselineName",
value=baseline_job_name.value_as_string,
)
core.CfnOutput(
self,
id="MonitoringScheduleJobName",
value=monitoring_schedule_name.value_as_string,
)
core.CfnOutput(
self,
id="MonitoringScheduleType",
value=monitoring_type,
)
core.CfnOutput(
self,
id="BaselineJobOutput",
value=f"https://s3.console.aws.amazon.com/s3/buckets/{baseline_job_output_location.value_as_string}/",
)
core.CfnOutput(
self,
id="MonitoringScheduleOutput",
value=(
f"https://s3.console.aws.amazon.com/s3/buckets/{monitoring_output_location.value_as_string}/"
f"{endpoint_name.value_as_string}/{monitoring_schedule_name.value_as_string}/"
),
)
core.CfnOutput(
self,
id="MonitoredSagemakerEndpoint",
value=endpoint_name.value_as_string,
)
core.CfnOutput(
self,
id="DataCaptureS3Location",
value=(
f"https://s3.console.aws.amazon.com/s3/buckets/{data_capture_s3_location.value_as_string}"
f"/{endpoint_name.value_as_string}/"
),
)
def _add_model_quality_resources(self):
"""
Adds ModelQuality specific parameters/conditions and updates self.baseline_attributes/self.monitor_attributes
"""
# add baseline job attributes (they are different from Monitor attributes)
baseline_inference_attribute = pf.create_inference_attribute_parameter(self, "Baseline")
baseline_probability_attribute = pf.create_probability_attribute_parameter(self, "Baseline")
ground_truth_attribute = pf.create_ground_truth_attribute_parameter(self)
# add monitor attributes
monitor_inference_attribute = pf.create_inference_attribute_parameter(self, "Monitor")
monitor_probability_attribute = pf.create_probability_attribute_parameter(self, "Monitor")
ground_truth_s3_uri = pf.create_ground_truth_s3_uri_parameter(self)
# problem_type and probability_threshold_attribute are the same for both
problem_type = pf.create_problem_type_parameter(self)
probability_threshold_attribute = pf.create_probability_threshold_attribute_parameter(self)
# add conditions (used by monitor)
is_regression_or_multiclass_classification_problem = (
cf.create_problem_type_regression_or_multiclass_classification_condition(self, problem_type)
)
is_binary_classification_problem = cf.create_problem_type_binary_classification_condition(self, problem_type)
# add ModelQuality Baseline attributes
self.baseline_attributes.update(
dict(
problem_type=problem_type.value_as_string,
ground_truth_attribute=ground_truth_attribute.value_as_string,
inference_attribute=baseline_inference_attribute.value_as_string,
probability_attribute=baseline_probability_attribute.value_as_string,
probability_threshold_attribute=probability_threshold_attribute.value_as_string,
)
)
# add ModelQuality Monitor attributes
self.monitor_attributes.update(
dict(
problem_type=problem_type.value_as_string,
ground_truth_s3_uri=ground_truth_s3_uri.value_as_string,
# inference_attribute is required for Regression/Multiclass Classification problems
# probability_attribute/probability_threshold_attribute are not used
inference_attribute=core.Fn.condition_if(
is_regression_or_multiclass_classification_problem.logical_id,
monitor_inference_attribute.value_as_string,
core.Aws.NO_VALUE,
).to_string(),
# for a Binary Classification problem, we use probability_attribute and probability_threshold_attribute.
# note: probability_attribute is the index of the predicted probability in the captured data by the
# SageMaker endpoint. Tepically, probability_attribute="0" and probability_threshold_attribute="0.5"
probability_attribute=core.Fn.condition_if(
is_binary_classification_problem.logical_id,
monitor_probability_attribute.value_as_string,
core.Aws.NO_VALUE,
).to_string(),
probability_threshold_attribute=core.Fn.condition_if(
is_binary_classification_problem.logical_id,
probability_threshold_attribute.value_as_string,
core.Aws.NO_VALUE,
).to_string(),
)
)
|
StarcoderdataPython
|
372199
|
from django.conf.urls import include, url
from . import views
urlpatterns = [
url(r'^$', views.recipesView),
]
|
StarcoderdataPython
|
1614529
|
<filename>tests/seq2seq_model_tests.py
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reservedimport unittest
import unittest
import torch
from pytext.common.constants import Stage
from pytext.data import Batcher
from pytext.data.data import Data
from pytext.data.sources.data_source import Gazetteer
from pytext.data.sources.tsv import TSVDataSource
from pytext.data.tensorizers import (
ByteTokenTensorizer,
GazetteerTensorizer,
TokenTensorizer,
initialize_tensorizers,
)
from pytext.models.embeddings import (
ContextualTokenEmbedding,
DictEmbedding,
WordEmbedding,
)
from pytext.models.seq_models.rnn_encoder import LSTMSequenceEncoder
from pytext.models.seq_models.rnn_encoder_decoder import RNNModel
from pytext.models.seq_models.seq2seq_model import Seq2SeqModel
# @dep //pytext/utils:utils_lib
from pytext.utils.test import import_tests_module
tests_module = import_tests_module()
TEST_FILE_NAME = tests_module.test_file("seq2seq_model_unit.tsv")
def get_tensorizers(add_dict_feat=False, add_contextual_feat=False):
schema = {"source_sequence": str, "dict_feat": Gazetteer, "target_sequence": str}
data_source = TSVDataSource.from_config(
TSVDataSource.Config(
train_filename=TEST_FILE_NAME,
field_names=["source_sequence", "dict_feat", "target_sequence"],
),
schema,
)
src_tensorizer = TokenTensorizer.from_config(
TokenTensorizer.Config(
column="source_sequence", add_eos_token=True, add_bos_token=True
)
)
tgt_tensorizer = TokenTensorizer.from_config(
TokenTensorizer.Config(
column="target_sequence", add_eos_token=True, add_bos_token=True
)
)
tensorizers = {"src_seq_tokens": src_tensorizer, "trg_seq_tokens": tgt_tensorizer}
initialize_tensorizers(tensorizers, data_source.train)
if add_dict_feat:
tensorizers["dict_feat"] = GazetteerTensorizer.from_config(
GazetteerTensorizer.Config(
text_column="source_sequence", dict_column="dict_feat"
)
)
initialize_tensorizers(
{"dict_feat": tensorizers["dict_feat"]}, data_source.train
)
if add_contextual_feat:
tensorizers["contextual_token_embedding"] = ByteTokenTensorizer.from_config(
ByteTokenTensorizer.Config(column="source_sequence")
)
initialize_tensorizers(
{"contextual_token_embedding": tensorizers["contextual_token_embedding"]},
data_source.train,
)
return tensorizers
# Smoke tests that call torchscriptify and execute the model for all the cases.
# This should at least make sure we're testing end to end.
class Seq2SeqModelExportTests(unittest.TestCase):
def test_tokens(self):
model = Seq2SeqModel.from_config(
Seq2SeqModel.Config(
source_embedding=WordEmbedding.Config(embed_dim=512),
target_embedding=WordEmbedding.Config(embed_dim=512),
),
get_tensorizers(),
)
model.eval()
ts_model = model.torchscriptify()
res = ts_model(["call", "mom"])
assert res is not None
def test_tokens_contextual(self):
model = Seq2SeqModel.from_config(
Seq2SeqModel.Config(
source_embedding=WordEmbedding.Config(embed_dim=512),
target_embedding=WordEmbedding.Config(embed_dim=512),
inputs=Seq2SeqModel.Config.ModelInput(
contextual_token_embedding=ByteTokenTensorizer.Config()
),
contextual_token_embedding=ContextualTokenEmbedding.Config(embed_dim=7),
encoder_decoder=RNNModel.Config(
encoder=LSTMSequenceEncoder.Config(embed_dim=519)
),
),
get_tensorizers(add_contextual_feat=True),
)
model.eval()
ts_model = model.torchscriptify()
res = ts_model(["call", "mom"], contextual_token_embedding=[0.42] * (7 * 2))
assert res is not None
def test_tokens_dictfeat(self):
model = Seq2SeqModel.from_config(
Seq2SeqModel.Config(
source_embedding=WordEmbedding.Config(embed_dim=512),
target_embedding=WordEmbedding.Config(embed_dim=512),
inputs=Seq2SeqModel.Config.ModelInput(
dict_feat=GazetteerTensorizer.Config(text_column="source_sequence")
),
encoder_decoder=RNNModel.Config(
encoder=LSTMSequenceEncoder.Config(embed_dim=612)
),
dict_embedding=DictEmbedding.Config(),
),
get_tensorizers(add_dict_feat=True),
)
model.eval()
ts_model = model.torchscriptify()
res = ts_model(["call", "mom"], (["call", "mom"], [0.42, 0.17], [4, 3]))
assert res is not None
def test_tokens_dictfeat_contextual(self):
model = Seq2SeqModel.from_config(
Seq2SeqModel.Config(
source_embedding=WordEmbedding.Config(embed_dim=512),
target_embedding=WordEmbedding.Config(embed_dim=512),
inputs=Seq2SeqModel.Config.ModelInput(
dict_feat=GazetteerTensorizer.Config(text_column="source_sequence"),
contextual_token_embedding=ByteTokenTensorizer.Config(),
),
encoder_decoder=RNNModel.Config(
encoder=LSTMSequenceEncoder.Config(embed_dim=619)
),
dict_embedding=DictEmbedding.Config(),
contextual_token_embedding=ContextualTokenEmbedding.Config(embed_dim=7),
),
get_tensorizers(add_dict_feat=True, add_contextual_feat=True),
)
model.eval()
ts_model = model.torchscriptify()
res = ts_model(
["call", "mom"], (["call", "mom"], [0.42, 0.17], [4, 3]), [0.42] * (7 * 2)
)
assert res is not None
# Seq2SeqModel has restrictions on what can happen during evaluation, since
# sequence generation has the opportunity to affect the underlying model.
class Seq2SeqModelEvalTests(unittest.TestCase):
def test_force_predictions_on_eval(self):
tensorizers = get_tensorizers()
model = Seq2SeqModel.from_config(
Seq2SeqModel.Config(
source_embedding=WordEmbedding.Config(embed_dim=512),
target_embedding=WordEmbedding.Config(embed_dim=512),
),
tensorizers,
)
# Get sample inputs using a data source.
schema = {
"source_sequence": str,
"dict_feat": Gazetteer,
"target_sequence": str,
}
data = Data.from_config(
Data.Config(
source=TSVDataSource.Config(
train_filename=TEST_FILE_NAME,
field_names=["source_sequence", "dict_feat", "target_sequence"],
)
),
schema,
tensorizers,
)
data.batcher = Batcher(1, 1, 1)
raw_batch, batch = next(iter(data.batches(Stage.TRAIN, load_early=True)))
inputs = model.arrange_model_inputs(batch)
# Verify that model does not run sequence generation on prediction.
outputs = model(*inputs)
pred = model.get_pred(outputs, {"stage": Stage.EVAL})
self.assertEqual(pred, (None, None))
# Verify that attempting to set force_eval_predictions is correctly
# accounted for.
model.force_eval_predictions = True
with self.assertRaises(AssertionError):
_ = model.get_pred(outputs, {"stage": Stage.EVAL})
def test_reset_incremental_states(self):
"""
This test might seem trivial. However, interacting with the scripted
sequence generator crosses the Torchscript boundary, which can lead
to weird behavior. If the incremental states don't get properly
reset, the model will produce garbage _after_ the first call, which
is a pain to debug when you only catch it after training.
"""
tensorizers = get_tensorizers()
# Avoid numeric issues with quantization by setting a known seed.
torch.manual_seed(42)
model = Seq2SeqModel.from_config(
Seq2SeqModel.Config(
source_embedding=WordEmbedding.Config(embed_dim=512),
target_embedding=WordEmbedding.Config(embed_dim=512),
),
tensorizers,
)
# Get sample inputs using a data source.
schema = {
"source_sequence": str,
"dict_feat": Gazetteer,
"target_sequence": str,
}
data = Data.from_config(
Data.Config(
source=TSVDataSource.Config(
train_filename=TEST_FILE_NAME,
field_names=["source_sequence", "dict_feat", "target_sequence"],
)
),
schema,
tensorizers,
)
data.batcher = Batcher(1, 1, 1)
raw_batch, batch = next(iter(data.batches(Stage.TRAIN, load_early=True)))
inputs = model.arrange_model_inputs(batch)
model.eval()
outputs = model(*inputs)
pred, scores = model.get_pred(outputs, {"stage": Stage.TEST})
# Verify that the incremental states reset correctly.
decoder = model.sequence_generator.beam_search.decoder_ens
decoder.reset_incremental_states()
self.assertDictEqual(decoder.incremental_states, {"0": {}})
# Verify that the model returns the same predictions.
new_pred, new_scores = model.get_pred(outputs, {"stage": Stage.TEST})
self.assertEqual(new_scores, scores)
|
StarcoderdataPython
|
3220550
|
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gmock_files.py v0.1.0
Fuses Google Mock and Google Test source code into two .h files and a .cc file.
SYNOPSIS
fuse_gmock_files.py [GMOCK_ROOT_DIR] OUTPUT_DIR
Scans GMOCK_ROOT_DIR for Google Mock and Google Test source
code, assuming Google Test is in the GMOCK_ROOT_DIR/gtest
sub-directory, and generates three files:
OUTPUT_DIR/gtest/gtest.h, OUTPUT_DIR/gmock/gmock.h, and
OUTPUT_DIR/gmock-gtest-all.cc. Then you can build your tests
by adding OUTPUT_DIR to the include search path and linking
with OUTPUT_DIR/gmock-gtest-all.cc. These three files contain
everything you need to use Google Mock. Hence you can
"install" Google Mock by copying them to wherever you want.
GMOCK_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gmock_files.py fused_gmock
./fuse_gmock_files.py path/to/unpacked/gmock fused_gmock
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Mock or Google Test headers. Please
report any problems to <EMAIL>. You can read
http://code.google.com/p/googlemock/wiki/CookBook for more
information.
"""
__author__ = '<EMAIL> (<NAME>)'
import os
import re
import sets
import sys
# We assume that this file is in the scripts/ directory in the Google
# Mock root directory.
DEFAULT_GMOCK_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# We need to call into gtest/scripts/fuse_gtest_files.py.
sys.path.append(os.path.join(DEFAULT_GMOCK_ROOT_DIR, 'gtest/scripts'))
import fuse_gtest_files
gtest = fuse_gtest_files
# Regex for matching '#include "gmock/..."'.
INCLUDE_GMOCK_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gmock/.+)"')
# Where to find the source seed files.
GMOCK_H_SEED = 'include/gmock/gmock.h'
GMOCK_ALL_CC_SEED = 'src/gmock-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GMOCK_H_OUTPUT = 'gmock/gmock.h'
GMOCK_GTEST_ALL_CC_OUTPUT = 'gmock-gtest-all.cc'
def GetGTestRootDir(gmock_root):
"""Returns the root directory of Google Test."""
return os.path.join(gmock_root, 'gtest')
def ValidateGMockRootDir(gmock_root):
"""Makes sure gmock_root points to a valid gmock root directory.
The function aborts the program on failure.
"""
gtest.ValidateGTestRootDir(GetGTestRootDir(gmock_root))
gtest.VerifyFileExists(gmock_root, GMOCK_H_SEED)
gtest.VerifyFileExists(gmock_root, GMOCK_ALL_CC_SEED)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
gtest.VerifyOutputFile(output_dir, gtest.GTEST_H_OUTPUT)
gtest.VerifyOutputFile(output_dir, GMOCK_H_OUTPUT)
gtest.VerifyOutputFile(output_dir, GMOCK_GTEST_ALL_CC_OUTPUT)
def FuseGMockH(gmock_root, output_dir):
"""Scans folder gmock_root to generate gmock/gmock.h in output_dir."""
output_file = file(os.path.join(output_dir, GMOCK_H_OUTPUT), 'w')
processed_files = sets.Set() # Holds all gmock headers we've processed.
def ProcessFile(gmock_header_path):
"""Processes the given gmock header file."""
# We don't process the same header twice.
if gmock_header_path in processed_files:
return
processed_files.add(gmock_header_path)
# Reads each line in the given gmock header.
for line in file(os.path.join(gmock_root, gmock_header_path), 'r'):
m = INCLUDE_GMOCK_FILE_REGEX.match(line)
if m:
# It's '#include "gmock/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
m = gtest.INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/foo.h"'. We translate it to
# "gtest/gtest.h", regardless of what foo is, since all
# gtest headers are fused into gtest/gtest.h.
# There is no need to #include gtest.h twice.
if not gtest.GTEST_H_SEED in processed_files:
processed_files.add(gtest.GTEST_H_SEED)
output_file.write('#include "%s"\n' % (gtest.GTEST_H_OUTPUT,))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GMOCK_H_SEED)
output_file.close()
def FuseGMockAllCcToFile(gmock_root, output_file):
"""Scans folder gmock_root to fuse gmock-all.cc into output_file."""
processed_files = sets.Set()
def ProcessFile(gmock_source_file):
"""Processes the given gmock source file."""
# We don't process the same #included file twice.
if gmock_source_file in processed_files:
return
processed_files.add(gmock_source_file)
# Reads each line in the given gmock source file.
for line in file(os.path.join(gmock_root, gmock_source_file), 'r'):
m = INCLUDE_GMOCK_FILE_REGEX.match(line)
if m:
# It's '#include "gmock/foo.h"'. We treat it as '#include
# "gmock/gmock.h"', as all other gmock headers are being fused
# into gmock.h and cannot be #included directly.
# There is no need to #include "gmock/gmock.h" more than once.
if not GMOCK_H_SEED in processed_files:
processed_files.add(GMOCK_H_SEED)
output_file.write('#include "%s"\n' % (GMOCK_H_OUTPUT,))
else:
m = gtest.INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."'.
# There is no need to #include gtest.h as it has been
# #included by gtest-all.cc.
pass
else:
m = gtest.INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GMOCK_ALL_CC_SEED)
def FuseGMockGTestAllCc(gmock_root, output_dir):
"""Scans folder gmock_root to generate gmock-gtest-all.cc in output_dir."""
output_file = file(os.path.join(output_dir, GMOCK_GTEST_ALL_CC_OUTPUT), 'w')
# First, fuse gtest-all.cc into gmock-gtest-all.cc.
gtest.FuseGTestAllCcToFile(GetGTestRootDir(gmock_root), output_file)
# Next, append fused gmock-all.cc to gmock-gtest-all.cc.
FuseGMockAllCcToFile(gmock_root, output_file)
output_file.close()
def FuseGMock(gmock_root, output_dir):
"""Fuses gtest.h, gmock.h, and gmock-gtest-all.h."""
ValidateGMockRootDir(gmock_root)
ValidateOutputDir(output_dir)
gtest.FuseGTestH(GetGTestRootDir(gmock_root), output_dir)
FuseGMockH(gmock_root, output_dir)
FuseGMockGTestAllCc(gmock_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gmock_files.py OUTPUT_DIR
FuseGMock(DEFAULT_GMOCK_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gmock_files.py GMOCK_ROOT_DIR OUTPUT_DIR
FuseGMock(sys.argv[1], sys.argv[2])
else:
print __doc__
sys.exit(1)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1751271
|
<reponame>taushifkhan/plv-DecoySearch<filename>cs_modules/pdbHearParser.py
#!/usr/bin/python
helpDoc = """
PDB header parser. get formatted information from a PDB
file of annotation related to a PDB Id.
"""
import urllib
import os
class pdbHeader():
def __init__(self,pdbHeader):
self.header={'HEADER':'','TITLE':'','COMPND':'','SOURCE':'','EXPDTA':'','NUMMDL':''}
self.headInfo = pdbHeader
def _decompose_cmpd__(self,compnd):
molIds = compnd.split("MOL_ID")
cmpndDict = {}
for m in range(1,len(molIds),2):
mids = molIds[m].split(";")
mol = mids[0].split(":")[-1].strip()
cmpndDict[mol] = {}
for k in mids[1:]:
kmol = k.split(":")
cmpndDict[mol][kmol[0].strip()] = kmol[1].strip()
return cmpndDict
def getData(self):
for l in self.headInfo:
nameId = l[:6].strip()
if nameId in self.header.keys():
self.header[nameId] += l[10:80].strip()
else:
pass
cmpndDict = self._decompose_cmpd__(self.header['COMPND'])
sourceDict = self._decompose_cmpd__(self.header['SOURCE'])
if cmpndDict.keys():
self.header['COMPND'] = {}
self.header['COMPND'] = cmpndDict
self.header['SOURCE'] = {}
self.header['SOURCE'] = sourceDict
headerTitle = self.header['HEADER'][:40].strip().lower()
headPdbId = self.header['HEADER'][52:56]
self.header['HEADER'] = {'Name':headerTitle,'Id':headPdbId}
return self.header
def getPDBheaderFile(pid):
"""
@param:
pid -> 4 letter pdb Id - Alpha numberic
@return
headerFile -> open file in an array and file path
__docString__ = downloads only header information from rcsb
"""
cwd = os.getcwd()
headFile = urllib.URLopener()
urlHeader = "https://files.rcsb.org/header/%s.pdb"%(pid.upper())
headsave = cwd+'/%s_header.pdb'%pid
try:
headFile.retrieve(urlHeader, headsave)
headRead = open(headsave,'r').readlines()
print "Header file:", headsave
except Exception as e:
raise e
return headRead
def main(pdbId):
headRead = getPDBheaderFile(pdbId)
pH = pdbHeader(headRead)
hdict = pH.getData()
import ipdb; ipdb.set_trace();
if __name__ == '__main__':
import sys
main(sys.argv[1])
|
StarcoderdataPython
|
1975843
|
from splitrule import SplitRule
import numpy as np
import math
def get_labels(ys):
labels = {}
for y in ys:
if y in labels:
labels[y] += 1
else:
labels[y] = 1
return labels
def split(x_sorted, j):
return (x_sorted[: j + 1], x_sorted[j + 1 :])
def H(label_count, total):
res = 0
if total == 0:
return 0
for label in label_count:
p = label_count[label] / total
if p != 0:
res -= p * math.log(p, 2)
return res
def calculate_information_gain(
H_dataset,
l_branch,
r_branch,
labels_left_count,
labels_right_count,
labels_left_total,
labels_right_total,
):
H_left = H(labels_left_count, labels_left_total)
H_right = H(labels_right_count, labels_right_total)
total = labels_left_total + labels_right_total
remainder = H_left * (labels_left_total / total) + H_right * (
labels_right_total / total
)
return H_dataset - remainder
def find_split(training_dataset):
y_training = training_dataset[:, -1]
labels_training_count = get_labels(y_training)
x_training = training_dataset[:, :-1]
(N, k) = x_training.shape
best_split_rule = None
cur_split_rule = None
best_ldataset = None
best_rdataset = None
max_information_gain = -1
H_dataset = H(labels_training_count, N)
for i in range(k):
arg_sort = np.argsort(x_training[:, i])
y_sorted = y_training[arg_sort]
x_sorted = x_training[arg_sort]
training_sorted = training_dataset[arg_sort]
labels_left_count = {}
labels_right_count = labels_training_count.copy()
labels_left_total = 0
for j in range(N):
sorted_column = x_sorted[:, i]
if j < N - 1:
if sorted_column[j] == sorted_column[j + 1]:
if y_sorted[j] in labels_left_count:
labels_left_count[y_sorted[j]] += 1
else:
labels_left_count[y_sorted[j]] = 1
labels_left_total += 1
labels_right_count[y_sorted[j]] -= 1
continue
if y_sorted[j] in labels_left_count:
labels_left_count[y_sorted[j]] += 1
else:
labels_left_count[y_sorted[j]] = 1
labels_left_total += 1
labels_right_count[y_sorted[j]] -= 1
cur_split_rule = SplitRule(i, sorted_column[j])
(l_dataset, r_dataset) = split(training_sorted, j)
labels_right_total = N - labels_left_total
info_gain = calculate_information_gain(
H_dataset,
l_dataset,
r_dataset,
labels_left_count,
labels_right_count,
labels_left_total,
labels_right_total,
)
if info_gain >= max_information_gain:
max_information_gain = info_gain
best_split_rule = cur_split_rule
best_ldataset = l_dataset
best_rdataset = r_dataset
return (best_split_rule, best_ldataset, best_rdataset)
|
StarcoderdataPython
|
3391597
|
import sqlalchemy.types as types
import json
def _decode(o):
# Note the "unicode" part is only for python2
if isinstance(o, str):
try:
return int(o)
except ValueError:
return o
elif isinstance(o, dict):
return {k: _decode(v) for k, v in o.items()}
elif isinstance(o, list):
return [_decode(v) for v in o]
else:
return o
class JsonDecorator(types.TypeDecorator):
impl = types.String
cache_ok = True
def process_bind_param(self, value, dialect):
return json.dumps(value, ensure_ascii=False)
def process_result_value(self, value, dialect):
return json.loads(
value,
object_hook=lambda d: {int(k) if k.lstrip('-').isdigit() else k: v for k, v in d.items()}
)
def copy(self, **kw):
return JsonDecorator(self.impl.length)
|
StarcoderdataPython
|
6605536
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Z:\wwalker\maya\python\gui\weight_tools\ui\main_window.ui'
#
# Created by: PyQt5 UI code generator 5.7.1
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(168, 228)
MainWindow.setMinimumSize(QtCore.QSize(168, 228))
MainWindow.setMaximumSize(QtCore.QSize(168, 228))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.weight_export = QtWidgets.QPushButton(self.centralwidget)
self.weight_export.setGeometry(QtCore.QRect(10, 10, 151, 61))
self.weight_export.setObjectName("weight_export")
self.weight_import = QtWidgets.QPushButton(self.centralwidget)
self.weight_import.setGeometry(QtCore.QRect(10, 130, 151, 61))
self.weight_import.setObjectName("weight_import")
self.batch_mode = QtWidgets.QCheckBox(self.centralwidget)
self.batch_mode.setGeometry(QtCore.QRect(10, 190, 70, 17))
self.batch_mode.setObjectName("batch_mode")
self.bind_from_file = QtWidgets.QPushButton(self.centralwidget)
self.bind_from_file.setGeometry(QtCore.QRect(10, 70, 151, 61))
self.bind_from_file.setObjectName("bind_from_file")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.weight_export.setText(_translate("MainWindow", "Export"))
self.weight_import.setText(_translate("MainWindow", "Import"))
self.batch_mode.setText(_translate("MainWindow", "Batch"))
self.bind_from_file.setText(_translate("MainWindow", "Bind From File"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
StarcoderdataPython
|
1698694
|
class Solution:
def addBoldTag(self, s: str, words: List[str]) -> str: tag = [False] * len(s)
tag = [False] * len(s)
for w in words:
i = s.find(w)
while i != -1:
for k in range(i, i + len(w)):
tag[k] = True
i = s.find(w, i + 1)
res = ''
i = 0
while i < len(s):
if not tag[i]:
res += s[i]
i += 1
continue
res += '<b>'
while i < len(s) and tag[i]:
res += s[i]
i += 1
res += '</b>'
return res
|
StarcoderdataPython
|
11393377
|
import pygame
import time
from engine.keycodes import KeyCodes
app = None
# EXPORT
class Application(object):
def __init__(self, res=(640, 480), scale=1.0):
global app
app = self
self.res = res
self.screen = pygame.display.set_mode(res)
self.fps = 30
self.keys = []
self.last_ts = time.time()
def calc_dt(self):
cur = time.time()
dt = cur - self.last_ts
# print("dt={}".format(dt))
self.last_ts = cur
self.fps = 0.9 * self.fps + 0.1 / dt
return dt
@staticmethod
def flip():
pygame.display.flip()
def clear(self, color=(192, 128, 255)):
pass
def onKey(self, key):
pass
def onClick(self, pos):
pass
def handleEvents(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
return False
self.keys = pygame.key.get_pressed()
return True
def loop(self, dt):
pass
def run(self):
while self.handleEvents():
if not self.loop(self.calc_dt()):
break
self.flip()
# EXPORT
def get_screen_size():
return app.res
# EXPORT
def get_screen():
return app.screen
# EXPORT
def key_down(key_name):
if key_name not in KeyCodes:
return False
code = KeyCodes.get(key_name)
return app.keys[code]
|
StarcoderdataPython
|
12803732
|
<reponame>roboDocs/RoboChrome
from base64 import b64encode
from xml.sax.saxutils import escape
from .command import moveto, lineto, quadto, curveto, closepath
from .misc import dump
from .text import placedtext, placedoutlines
import re
def parsepath(data):
tokens = re.compile(br'|'.join((
br'([+-]?(?:\d*\.\d+|\d+\.?)(?:[eE][+-]?\d+)?)', # number
br'([Mm])', # moveto
br'([Zz])', # closepath
br'([Ll])', # lineto
br'([Hh])', # horizontal lineto
br'([Vv])', # vertical lineto
br'([Cc])', # curveto
br'([Ss])', # smooth curveto
br'([Qq])', # quadto
br'([Tt])', # smooth quadto
br'([Aa])'))) # elliptical arc
counts = 0, 0, 2, 0, 2, 1, 1, 6, 4, 4, 2, 7
result, arguments = [], []
mx, my = px, py = 0.0, 0.0
previous = None
m = tokens.search(data)
if not m or m.lastindex != 2: # moveto
raise ValueError('Invalid path.')
while m:
index = m.lastindex
count = counts[index]
relative = m.group(index).islower()
while True:
for i in range(count - len(arguments)):
m = tokens.search(data, m.end())
if not m or m.lastindex != 1: # number
raise ValueError('Invalid argument.')
arguments.append(float(m.group(1)))
if index == 2: # moveto
x, y = arguments
if relative:
x += px; y += py
mx, my = px, py = x, y
previous = moveto(x, y)
index = 4
elif index == 3: # closepath
px, py = mx, my
previous = closepath
elif index == 4: # lineto
x, y = arguments
if relative:
x += px; y += py
px, py = x, y
previous = lineto(x, y)
elif index == 5: # horizontal lineto
x, = arguments
if relative:
x += px
px = x
previous = lineto(x, py)
elif index == 6: # vertical lineto
y, = arguments
if relative:
y += py
py = y
previous = lineto(px, y)
elif index == 7: # curveto
x1, y1, x2, y2, x, y = arguments
if relative:
x1 += px; y1 += py; x2 += px; y2 += py; x += px; y += py
px, py = x, y
previous = curveto(x1, y1, x2, y2, x, y)
elif index == 8: # smooth curveto
if type(previous) == curveto:
x1, y1 = px + px - previous.x2, py + py - previous.y2
else:
x1, y1 = px, py
x2, y2, x, y = arguments
if relative:
x2 += px; y2 += py; x += px; y += py
px, py = x, y
previous = curveto(x1, y1, x2, y2, x, y)
elif index == 9: # quadto
x1, y1, x, y = arguments
if relative:
x1 += px; y1 += py; x += px; y += py
px, py = x, y
previous = quadto(x1, y1, x, y)
elif index == 10: # smooth quadto
if type(previous) == quadto:
x1, y1 = px + px - previous.x1, py + py - previous.y1
else:
x1, y1 = px, py
x, y = arguments
if relative:
x += px; y += py
px, py = x, y
previous = quadto(x1, y1, x, y)
else: # elliptical arc
raise NotImplementedError
result.append(previous)
arguments.clear()
m = tokens.search(data, m.end())
if not m or m.lastindex != 1: # number
break
arguments.append(float(m.group(1)))
return result
def serialize(page, compress):
fonts = {}
for item in page.items:
if isinstance(item, placedtext):
if not isinstance(item, placedoutlines):
for height, run in item.layout.runs():
for style, string in run:
name = style.font.name
data = style.font.source.readable.data
if name not in fonts:
fonts[name] = (
b'@font-face {\n'
b' font-family: "%s";\n'
b' src: url("data:font/sfnt;base64,%s");\n'
b'}') % (name, b64encode(data))
if fonts:
defs = (
b'<defs>\n'
b'<style>\n'
b'%s\n'
b'</style>\n'
b'</defs>\n') % b'\n'.join(fonts.values())
else:
defs = b''
return (
b'<?xml version="1.0" encoding="UTF-8"?>\n'
b'<!-- Flat -->\n'
b'<svg version="1.1" '
b'xmlns="http://www.w3.org/2000/svg" '
b'xmlns:xlink="http://www.w3.org/1999/xlink" '
b'width="%spt" height="%spt">\n'
b'<title>%s</title>\n'
b'%s%s\n'
b'</svg>') % (
dump(page.width), dump(page.height),
escape(page.title).encode('utf-8'),
defs, b'\n'.join(item.svg() for item in page.items))
|
StarcoderdataPython
|
3352877
|
from pyramid.view import view_config
from pyramid.renderers import render
from pyramid.response import Response
from pyramid.exceptions import NotFound
from pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound
from datetime import datetime
import calendar
import pymongo
import logging
log = logging.getLogger(__name__)
ignore_keys = (
'_id', 'file_id', 'goes_file_id', 'diagnostic_id',
'timestamp', 'prefix'
)
def find_station_fields(request, station):
fields = {}
env_key_col_name = station['collection'] + '.env.keys'
for r in request.db[env_key_col_name].find():
key = r['_id']
if key not in ignore_keys:
key_record = {}
key_record['uri'] = key
key_part = key.split('-')
key_record['name'] = key_part[0].replace('_', ' ')
if len(key_part) > 1:
key_record['units'] = key_part[1].replace('_', ' ')
else:
key_record['units'] = None
fields[key] = key_record
return fields
def find_station_field(request, station, field_uri):
fields = find_station_fields(request, station)
if field_uri in fields:
return fields[field_uri]
else:
return None
@view_config(route_name="crow_layers", request_method="GET", renderer="jsonp")
def crow_layers_get(request):
retVal = {
'url': request.host
}
retVal['provider'] = {
'name': 'Coastal Ocean Monitoring and Prediction System',
'short_name': 'COMPS',
'url': 'http://comps.marine.usf.edu',
'PIs': ['<NAME>'],
'institution': {
'name': 'University of South Florida College of Marine Science',
'url': 'http://marine.usf.edu/'
},
'group': {
'name': 'Ocean Circulation Group',
'url': 'http://ocg.marine.usf.edu/'
}
}
layers = {}
for station in request.db['stations'].find({}, {'_id': 0}):
station['time_dependent'] = 'end'
# Fill in field records
station['fields'] = find_station_fields(request, station)
layers[station['uri']] = station
retVal['layers'] = layers
return retVal
def verify_station(request, station_uri):
station_collection = request.db['stations']
station = station_collection.find_one({'uri': station_uri})
return station
@view_config(route_name="crow_environment_json", renderer="jsonp")
def environmental_data(request):
try:
station_url = request.GET['layer_uri']
field_uri = request.GET['field_uri']
start = datetime.fromtimestamp(float(request.GET['start']))
end = datetime.fromtimestamp(float(request.GET['end']))
except KeyError, ex:
raise HTTPBadRequest('Missing %s GET parameter' % ex)
station = verify_station(request, station_url)
if station is None:
raise NotFound('No station found with URI "%s"' % station_url)
field = find_station_field(request, station, field_uri)
if field is None:
raise NotFound(
'Field %s not found for stations %s' % (field_uri, station_url)
)
col_name = '%s.env' % station['collection']
collection = request.db[col_name]
data = []
query = collection.find(
{'timestamp': {'$gte': start, '$lte': end}, field_uri: {'$exists': 1}},
{'timestamp': 1, field_uri: 1}
).sort('timestamp', pymongo.ASCENDING)
for doc in query:
unix_timestamp = (
calendar.timegm(doc['timestamp'].utctimetuple())
)
data.append(
[unix_timestamp, doc[field_uri]]
)
return {
'station_uri': station['uri'],
'field': field,
'start': start.isoformat(),
'end': end.isoformat(),
'data': data
}
def get_latest_readings(request, station):
col_name = '%s.env.latest' % station['collection']
data = request.db[col_name].find_one({})
if data is not None:
for key in ignore_keys:
if key in data:
del data[key]
else:
data = {}
return data
import operator
@view_config(route_name='layer_kml')
def layer_kml(request):
try:
station_uri = request.GET['layer_uri']
except KeyError, ex:
raise HTTPNotFound('Missing %s GET parameter' % ex)
station = verify_station(request, station_uri)
latest_readings = get_latest_readings(request, station)
latest_readings = sorted(
latest_readings.items(), key=operator.itemgetter(0)
)
if station is not None:
kml = render('layer_kml.mako', {
'now': datetime.utcnow(),
'station': station,
'latest_readings': latest_readings
}, request)
response = (
Response(body=kml,
content_type="application/vnd.google-earth.kml+xml")
)
return response
else:
raise NotFound()
|
StarcoderdataPython
|
1760809
|
from setuptools import setup, find_packages
install_requires = [
'torch>=1.9.0',
'torchvision>=0.10.0',
'tqdm'
]
setup(
name='anatome',
version='0.0.3',
description='Ἀνατομή is a PyTorch library to analyze representation of neural networks',
author='<NAME>',
author_email='<EMAIL>',
install_requires=install_requires,
packages=find_packages()
)
|
StarcoderdataPython
|
8063664
|
#!/usr/bin/env python
from __future__ import print_function
"""
pytrace.py
"""
import cStringIO
import os
import struct
import sys
# TODO: Two kinds of tracing?
# - FullTracer -> Chrome trace?
# - ReservoirSamplingTracer() -- flame graph that is deterministic?
# TODO: Check this in but just go ahead and fix wild.sh instead.
class Tracer(object):
# Limit to 10M events by default.
def __init__(self, max_events=10e6):
self.pid = os.getpid()
# append
self.event_strs = cStringIO.StringIO()
# After max_events we stop recording
self.max_events = max_events
self.num_events = 0
self.depth = 0
# Python VM callback
def OnEvent(self, frame, event_type, arg):
# Test overhead
# 7.5 seconds. Geez. That's crazy high.
# The baseline is 2.7 seconds, and _lsprof takes 3.8 seconds.
# I guess that's why pytracing is a decorator and only works on one part of
# the program.
# pytracing isn't usable with large programs. It can't run abuild -h.
# What I really want is the nicer visualization. I don't want the silly
# cProfile output.
self.num_events += 1
name = frame.f_code.co_name
filename = frame.f_code.co_filename
if event_type in ('call', 'c_call'):
self.depth += 1
record = '%s%s\t%s\t%s\t%s\t%s\n' % (' ' * self.depth,
event_type, filename, frame.f_lineno, name, arg)
self.event_strs.write(record)
if event_type in ('return', 'c_return'):
self.depth -= 1
return
# NOTE: Do we want a struct.pack version eventually?
#self.event_strs.write('')
def Start(self):
sys.setprofile(self.OnEvent)
def Stop(self, path):
sys.setprofile(None)
# Only one process should write out the file!
if os.getpid() != self.pid:
return
# TODO:
# - report number of events?
# - report number of bytes?
print('num_events: %d' % self.num_events, file=sys.stderr)
print('Writing to %r' % path, file=sys.stderr)
with open(path, 'w') as f:
f.write(self.event_strs.getvalue())
def main(argv):
t = Tracer()
import urlparse
t.Start()
print(urlparse.urlparse('http://example.com/foo'))
t.Stop('demo.pytrace')
if __name__ == '__main__':
try:
main(sys.argv)
except RuntimeError as e:
print >>sys.stderr, 'FATAL: %s' % e
sys.exit(1)
|
StarcoderdataPython
|
5101165
|
# Example, do not modify!
print(5 / 8)
# Print the sum of 7 and 10
print(7+10)
# Division
print(5 / 8)
# Addition
print(7 + 10)
# Addition, subtraction
print(5 + 5)
print(5 - 5)
# Multiplication, division, modulo, and exponentiation
print(3 * 5)
print(10 / 2)
print(18 % 7)
print(4 ** 2)
print(100 * (1.1**7))
|
StarcoderdataPython
|
33965
|
<filename>DialogCalibrate.py
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'DialogCalibrate.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_DialogCalibrate(object):
def setupUi(self, DialogCalibrate):
DialogCalibrate.setObjectName("DialogCalibrate")
DialogCalibrate.resize(451, 240)
self.btnAoAWingTare = QtWidgets.QPushButton(DialogCalibrate)
self.btnAoAWingTare.setEnabled(True)
self.btnAoAWingTare.setGeometry(QtCore.QRect(260, 80, 161, 32))
self.btnAoAWingTare.setObjectName("btnAoAWingTare")
self.lblRawAoA = QtWidgets.QLabel(DialogCalibrate)
self.lblRawAoA.setGeometry(QtCore.QRect(280, 20, 81, 21))
font = QtGui.QFont()
font.setPointSize(14)
self.lblRawAoA.setFont(font)
self.lblRawAoA.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lblRawAoA.setObjectName("lblRawAoA")
self.txtRawAoA = QtWidgets.QLabel(DialogCalibrate)
self.txtRawAoA.setGeometry(QtCore.QRect(370, 20, 56, 20))
font = QtGui.QFont()
font.setPointSize(18)
self.txtRawAoA.setFont(font)
self.txtRawAoA.setObjectName("txtRawAoA")
self.lblRawAirspeed = QtWidgets.QLabel(DialogCalibrate)
self.lblRawAirspeed.setGeometry(QtCore.QRect(10, 20, 131, 21))
font = QtGui.QFont()
font.setPointSize(14)
self.lblRawAirspeed.setFont(font)
self.lblRawAirspeed.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lblRawAirspeed.setObjectName("lblRawAirspeed")
self.txtRawAirspeed = QtWidgets.QLabel(DialogCalibrate)
self.txtRawAirspeed.setGeometry(QtCore.QRect(150, 20, 56, 20))
font = QtGui.QFont()
font.setPointSize(18)
self.txtRawAirspeed.setFont(font)
self.txtRawAirspeed.setObjectName("txtRawAirspeed")
self.btnAirspeedTare = QtWidgets.QPushButton(DialogCalibrate)
self.btnAirspeedTare.setGeometry(QtCore.QRect(30, 50, 161, 32))
self.btnAirspeedTare.setObjectName("btnAirspeedTare")
self.btnDone = QtWidgets.QPushButton(DialogCalibrate)
self.btnDone.setGeometry(QtCore.QRect(310, 190, 110, 32))
self.btnDone.setDefault(True)
self.btnDone.setObjectName("btnDone")
self.btnAoAPlatformTare = QtWidgets.QPushButton(DialogCalibrate)
self.btnAoAPlatformTare.setEnabled(True)
self.btnAoAPlatformTare.setGeometry(QtCore.QRect(260, 50, 161, 32))
self.btnAoAPlatformTare.setObjectName("btnAoAPlatformTare")
self.inpAoAOffset = QtWidgets.QDoubleSpinBox(DialogCalibrate)
self.inpAoAOffset.setGeometry(QtCore.QRect(350, 120, 62, 31))
font = QtGui.QFont()
font.setPointSize(14)
self.inpAoAOffset.setFont(font)
self.inpAoAOffset.setDecimals(1)
self.inpAoAOffset.setMaximum(90.0)
self.inpAoAOffset.setSingleStep(0.1)
self.inpAoAOffset.setProperty("value", 0.0)
self.inpAoAOffset.setObjectName("inpAoAOffset")
self.lblAoAOffset = QtWidgets.QLabel(DialogCalibrate)
self.lblAoAOffset.setGeometry(QtCore.QRect(240, 120, 101, 21))
font = QtGui.QFont()
font.setPointSize(14)
self.lblAoAOffset.setFont(font)
self.lblAoAOffset.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lblAoAOffset.setObjectName("lblAoAOffset")
self.retranslateUi(DialogCalibrate)
self.btnDone.clicked.connect(DialogCalibrate.accept)
QtCore.QMetaObject.connectSlotsByName(DialogCalibrate)
def retranslateUi(self, DialogCalibrate):
_translate = QtCore.QCoreApplication.translate
DialogCalibrate.setWindowTitle(_translate("DialogCalibrate", "Dialog"))
self.btnAoAWingTare.setText(_translate("DialogCalibrate", "Set AoA Wing Tare"))
self.lblRawAoA.setText(_translate("DialogCalibrate", "Raw AoA:"))
self.txtRawAoA.setText(_translate("DialogCalibrate", "N/A"))
self.lblRawAirspeed.setText(_translate("DialogCalibrate", "Raw Airspeed:"))
self.txtRawAirspeed.setText(_translate("DialogCalibrate", "N/A"))
self.btnAirspeedTare.setText(_translate("DialogCalibrate", "Set Airspeed Tare"))
self.btnDone.setText(_translate("DialogCalibrate", "Done"))
self.btnAoAPlatformTare.setText(_translate("DialogCalibrate", "Set AoA Platform Tare"))
self.lblAoAOffset.setText(_translate("DialogCalibrate", "AoA Offset:"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
DialogCalibrate = QtWidgets.QDialog()
ui = Ui_DialogCalibrate()
ui.setupUi(DialogCalibrate)
DialogCalibrate.show()
sys.exit(app.exec_())
|
StarcoderdataPython
|
5019274
|
<gh_stars>10-100
# coding: utf-8
""" DSFP core file
.. module:: dsfp.dsfp
:platform: Linux, Windows, MacOS X
:synopsis: utils for routines
.. moduleauthor:: Tarvitz<<EMAIL>>
"""
import six
from unittest import TestCase
from dsfp import DSSaveFileParser
import bz2
__all__ = ['TestDSFPReader', ]
class TestDSFPReader(TestCase):
"""DSFP parser reader unit tests"""
maxDiff = None
def setUp(self):
self.filename = bz2.BZ2File('saves/DRAKS0005.sl2.bz2')
# slots starts with 0
self.valid_slots = 2
self.slots = [
{
'deaths': 155,
'name': u'Карл'
},
{
'deaths': 0,
'name': u'Максимилиантр'
},
{
'deaths': 0,
'name': u'Smithy'
}
]
self.metadata = {
'slots': 11,
# it's always constant because of fixed block size
# but there's a header that contains block offsets inside of
# meta data block so we should check it
'start_offsets': [704, 394320, 787936, 1181552, 1575168, 1968784,
2362400, 2756016, 3149632, 3543248, 3936864],
'block_stat_size': [396, 396, 396, 396, 396, 396, 396, 396,
396, 396, 396]
}
def test_read_ds_file(self):
""" test get character dark souls file slots """
ds = DSSaveFileParser(filename=self.filename)
data = ds.get_active_slots_amount()
self.assertEqual(data, self.valid_slots)
def test_read_ds_slot_stats(self):
ds = DSSaveFileParser(filename=self.filename)
data = ds.get_stats()
for (idx, slot) in enumerate(data):
self.assertEqual(slot['deaths'], self.slots[idx]['deaths'])
self.assertEqual(slot['name'], self.slots[idx]['name'])
def test_read_ds_file_metadata(self):
""" read file metadata """
ds = DSSaveFileParser(filename=self.filename)
metadata = ds.get_blocks_metadata()
self.assertEqual(len(metadata), self.metadata['slots'])
messages = []
for idx, header in enumerate(metadata):
self.assertEqual(header.block_start_offset,
self.metadata['start_offsets'][idx])
try:
self.assertEqual(header.slot_data.block_stat_size,
self.metadata['block_stat_size'][idx])
except AssertionError as err:
messages.append({'err': err, 'msg': 'Got error'})
if messages:
for msg in messages:
print("%(msg)s: %(err)s" % msg)
raise AssertionError
|
StarcoderdataPython
|
3436401
|
<reponame>vinit2107/Data-Analysis
import mysql.connector
from mysql.connector import errorcode, cursor, Error, connection
from configparser import RawConfigParser
from Scripts.DDL.ddl_scripts import *
from Scripts.DML.dml_scripts import *
class MySQLHandler:
def create_connection(self, config: RawConfigParser):
"""
Function to establish a connection with a MySQL server using the credentials provided in the configuration file.
:param config: configuration file object obtained using configparser
"""
try:
conn = mysql.connector.connect(user=config.get('Database', 'mysql.username'),
password=config.get('Database', 'mysql.password'),
host=config.get('Database', 'mysql.hostname'))
conn.autocommit = True
cur = conn.cursor(buffered=True)
return conn, cur
except mysql.connector.Error as er:
print("Error connecting to the database")
if er.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Error in the configuration file. Please check the credentials!")
print(er)
raise er
def create_database(self, cur: cursor, dbname: str):
"""
Function to create a database of the name defined in the configuration.properties
:param cur: cursor obtained through connection to the database
:param dbname: name of the database to be created
"""
try:
print(create_database_query.format(dbname))
# cur.execute("CREATE DATABASE IF NOT EXISTS `flight-analysis`;")
cur.execute(create_database_query, (dbname, ))
print("executed")
cur.execute(use_database_query, (dbname, ))
# cur.execute("USE `flight-analysis`;")
except Error as er:
print("Error creating database in MySQL")
raise er
def create_table(self, cur: cursor, tableName: str):
"""
Function used to create a table of the given table name. The queries will be picked up from the Scripts folder.
:param cur: cursor obtained from the
:param tableName: name of the table
"""
try:
print("Creating table {}:".format(tableName), end='')
cur.execute(tables.get(tableName))
print('OK')
except Error as er:
if er.errno == errorcode.ER_TABLE_EXISTS_ERROR:
print("already exists")
else:
raise er
def insert(self, cur: cursor, tableName: str, data):
"""
Function to insert records in table of the given name
:param data: data to be inserted in the table
:param cur: cursor obtained for MySQL
:param tableName: name of the table in which the data has to be inserted
"""
try:
print("Inserting single record in {}: ".format(tableName), end='')
cur.execute(insert_map.get(tableName), data)
# cur.commit()
print("OK")
except Error as er:
print("Error inserting records in {}".format(tableName))
raise er
def close_connection(self, cur: cursor, con: connection):
"""
Function to close the connection with MySQL server
:param cur: cursor obtained after connecting with the server
:param con: connection object obtained after connecting with the server
"""
try:
print("Closing connection with MySQL.")
cur.close()
con.close()
except Error as er:
print("Error disconnecting with the server")
raise er
def fetch_records(self, cur, tableName: str):
"""
Function to fetch records from the given table name.
:param cur: cursor obtained for the table
:param tableName: name of the table for which the records need to be fetched
:return:
"""
try:
cur.execute(fetch_map.get(tableName))
print("OK")
return cur.fetchall()
except Exception as ex:
print("Error fetching records from {}".format(tableName))
raise ex
|
StarcoderdataPython
|
261138
|
<reponame>themlphdstudent/pySIM
"""
author: <NAME>
email: <EMAIL>
Licence: BSD 2 clause
Description: Example of using Euclidean distance measure.
"""
import numpy as np
import os
import sys
# temporary solution for relative imports in case pyod is not installed
# if pysim is installed, no need to use the following line
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname("__file__"), '..')))
from pysiml.distances import euclidean
if __name__ == "__main__":
a = np.array([1, 2, 43])
b = np.array([3, 2, 1])
euclidean_dist = euclidean(a, b)
print('Euclidean distance is : {}'.format(euclidean_dist))
|
StarcoderdataPython
|
5067441
|
<filename>While Conditions/asterisk_condition.py
"""
Code to make the asterisk on the left side
according to the number of lines the user wants.
"""
#Declaring Variables
x = 1
#Asking for number of lines
lines = int(input("What is the number of lines you want: "))
#Initiating loop
while x <= lines:
print('*' * x)
x = x + 1
#Stating end of Loop
print("The loop has ended")
|
StarcoderdataPython
|
362417
|
from setuptools import setup, find_packages
from gitdl import __version__
setup(
name='gitdl',
version=__version__,
description='Download git repositories locally',
long_description="",
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=find_packages(),
install_requires=['requests', 'docopt', 'tabulate', 'tqdm'],
entry_points={
'console_scripts': [
'gitdl=gitdl.gitdl:main',
],
},
)
|
StarcoderdataPython
|
5102514
|
import argparse
import json
import subprocess
import tempfile
from glob import glob
from pathlib import Path
from typing import List, Union
from urllib import request
import platform
import os
project_root = Path(__file__).absolute().parent
def run_subprocess(command):
status, output = subprocess.getstatusoutput(command)
if status != 0:
raise RuntimeError(f"Failed to run: {command}\n{output}")
os_name = platform.system()
architecture_name = platform.machine().lower()
def get_release(url: str, version: str):
resp = request.urlopen(url)
releases = json.loads(resp.read())
target_release = None
for release in releases:
if release["tag_name"] == version:
assert (
target_release is None
), f"Multiple releases were found with tag_name: {version}."
target_release = release
if target_release is None:
raise RuntimeError(f"No release was found with version: {version}.")
return target_release
def get_ort_download_link(version: str, use_cuda: bool, use_directml: bool) -> str:
target_release = get_release(
"https://api.github.com/repos/microsoft/onnxruntime/releases", version
)
assets = target_release["assets"]
assert assets
def filter_assets(keywords: Union[str, List[str]]) -> None:
if isinstance(keywords, str):
keywords = [keywords]
nonlocal assets
new_assets = []
for asset in assets:
for keyword in keywords:
if keyword in asset["name"]:
new_assets.append(asset)
break
if not new_assets:
raise RuntimeError(
f"Asset was not found. Most likely the version is too old or new that {__file__} does not support. "
"Try `--ort_download_link` option to manually specify the download link."
)
assets = new_assets
arch_type = architecture_name
if arch_type in ["x86_64", "amd64"]:
arch_type = ["x64", "x86_64"]
if use_directml:
filter_assets("DirectML")
elif os_name == "Windows":
filter_assets("win")
filter_assets(arch_type)
if use_cuda:
filter_assets("gpu")
elif os_name == "Darwin":
if use_cuda or use_directml:
raise RuntimeError("onnxruntime for osx does not support gpu.")
filter_assets("osx")
filter_assets(arch_type)
elif os_name == "Linux":
filter_assets("linux")
filter_assets(arch_type)
if use_cuda:
filter_assets("gpu")
else:
raise RuntimeError(f"Unsupported os type: {os_name}.")
assets = sorted(assets, key=lambda x: x["name"])
return assets[0]["browser_download_url"]
def download_and_extract_ort(download_link):
if (project_root / "onnxruntime").exists():
yn = input("Found existing onnxruntime directory. Overwrite? [yn]: ")
while yn != "y" and yn != "n":
yn = input("Please press y or n: ")
if yn == "n":
return
subprocess.getoutput(f"rm -r {project_root / 'onnxruntime'}")
print(f"Downloading onnxruntime from {download_link}...")
with tempfile.TemporaryDirectory() as tmp_dir:
if(os_name == "Windows"):
run_subprocess(
f'powershell -Command "cd {tmp_dir}; curl.exe {download_link} -L -o archive.zip"')
if "DirectML" in download_link:
run_subprocess(
f'powershell -Command "cd {tmp_dir}; Expand-Archive -Path archive.zip; Copy-Item archive -Recurse {project_root}/onnxruntime"')
else:
run_subprocess(
f'powershell -Command "cd {tmp_dir}; Expand-Archive -Path archive.zip -Destination ./; Copy-Item onnxruntime* -Recurse {project_root}/onnxruntime"')
else:
run_subprocess(f"cd {tmp_dir} && wget {download_link} -O archive")
extract_cmd = "unzip" if download_link.endswith(
".zip") else "tar xzf"
run_subprocess(
f"cd {tmp_dir} && {extract_cmd} archive && cp -r onnxruntime* {project_root}/onnxruntime")
def get_dml_download_link(version: str):
resp = request.urlopen(
"https://api.nuget.org/v3/registration5-semver1/microsoft.ai.directml/index.json")
jsonData = json.loads(resp.read())
releases = jsonData["items"][0]["items"]
target_release = None
for release in releases:
if release["catalogEntry"]["version"] == version:
assert (
target_release is None
), f"Multiple releases were found with tag_name: {version}."
target_release = release
if target_release is None:
raise RuntimeError(f"No release was found with version: {version}.")
return target_release["catalogEntry"]["packageContent"]
def download_and_extract_dml(link):
if(project_root / "directml").exists():
print(
"Skip downloading DirectML because directml directory already exists."
)
return
print(f"Downloading DirectML from {link}")
with tempfile.TemporaryDirectory() as tmp_dir:
run_subprocess(
f'powershell -Command "cd {tmp_dir}; curl.exe {link} -L -o archive.zip"')
run_subprocess(
f'powershell -Command "cd {tmp_dir}; Expand-Archive -Path archive.zip; Copy-Item archive -Recurse {project_root}/directml"')
def get_voicevox_download_link(version) -> str:
target_release = get_release(
"https://api.github.com/repos/VOICEVOX/voicevox_core/releases", version
)
assets = target_release["assets"]
for asset in assets:
if asset["name"] == "core.zip":
return asset["browser_download_url"]
raise RuntimeError(
f"Asset was not found. Most likely the version is too old or new that {__file__} does not support. "
"Try `--voicevox_download_link` option to manually specify the download link."
)
def download_and_extract_voicevox(download_link):
if (project_root / "release").exists():
print(
"Skip downloading voicevox release because release directory already exists."
)
return
print(f"Downloading voicevox from {download_link}...")
with tempfile.TemporaryDirectory() as tmp_dir:
if os_name == "Windows":
run_subprocess(
f'powershell -Command "cd {tmp_dir}; curl.exe {download_link} -L -O;')
run_subprocess(
f'powershell -Command "cd {tmp_dir}; Expand-Archive -Path core.zip -Destination ./; Copy-Item core -Recurse {project_root}/release"')
else:
run_subprocess(
f"cd {tmp_dir} && wget {download_link} && unzip core.zip && cp -r core {project_root}/release"
)
def link_files(use_directml: bool):
lib_prefix = ""
lib_suffix = ""
if os_name == "Darwin":
lib_prefix = "lib"
lib_suffix = ".dylib"
elif os_name == "Linux":
lib_prefix = "lib"
lib_suffix = ".so"
elif os_name == "Windows":
lib_prefix = ""
lib_suffix = ".dll"
else:
raise RuntimeError(f"Unsupported os type: {os_name}.")
core_libs = glob(str(project_root/'release'/(lib_prefix+'*'+lib_suffix)))
assert core_libs
target_core_lib = None
if len(core_libs) == 1:
target_core_lib = core_libs[0]
else:
# TODO: Use better name so that we can decide from os_type or arch_type.
print("Please enter number to select which library to use.")
for i, name in enumerate(core_libs):
print(f"{i}: {name}")
index = input("Your choice: ")
while not index.isdigit():
index = input("Please enter number")
index = int(index)
target_core_lib = core_libs[index]
link_cmd = "copy /y" if os_name == "Windows" else "ln -s"
os.makedirs(project_root / 'core/lib', exist_ok=True)
run_subprocess(
f"{link_cmd} {project_root/'release'/'core.h'} {project_root/'core'/'lib'}")
run_subprocess(
f"{link_cmd} {target_core_lib} {project_root/'core'/'lib'/(f'{lib_prefix}core{lib_suffix}')}")
if use_directml:
arch_type = ""
if architecture_name in ["x86_64", "x64", "amd64"]:
arch_type = "x64"
elif architecture_name in ["i386", "x86"]:
arch_type = "x86"
elif architecture_name == "armv7l":
arch_type = "arm"
elif architecture_name == "aarch64":
arch_type = "arm64"
else:
raise RuntimeError(
f"Unsupported architecture type: {architecture_name}")
dll_file_path = project_root / "directml" / \
'bin' / f"{arch_type}-win" / "DirectML.dll"
run_subprocess(
f"copy /y {dll_file_path} {project_root / 'core'/'lib'/'DirectML.dll'}"
)
ort_libs = glob(str(os.path.join(
project_root, "onnxruntime", "runtimes", f"win-{arch_type}", "native", "*.dll")))
else:
ort_libs = glob(
str(project_root/'onnxruntime'/'lib'/(f"{lib_prefix}*{lib_suffix}*")))
assert ort_libs
for ort_lib in ort_libs:
run_subprocess(f"{link_cmd} {ort_lib} {project_root/'core'/'lib'}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--voicevox_version",
default="0.10.0",
help="voicevox release tag found in https://github.com/VOICEVOX/voicevox_core/releases",
)
parser.add_argument("--voicevox_download_link",
help="voicevox download link")
parser.add_argument(
"--ort_version",
default="v1.10.0",
help="onnxruntime release tag found in https://github.com/microsoft/onnxruntime/releases",
)
parser.add_argument("--ort_download_link",
help="onnxruntime download link")
parser.add_argument(
"--use_cuda", action="store_true", help="enable cuda for onnxruntime"
)
parser.add_argument(
"--use_directml", action="store_true", help="enable directml for onnxruntime"
)
parser.add_argument(
"--dml_version",
default="1.8.0",
help="DirectML version found in https://www.nuget.org/packages/Microsoft.AI.DirectML",
)
parser.add_argument(
"--dml_download_link",
help="directml download link"
)
args = parser.parse_args()
ort_download_link = args.ort_download_link
if args.use_directml and os_name != "Windows":
raise RuntimeError(
"onnxruntime for Mac or Linux don't support DirectML")
if not ort_download_link:
ort_download_link = get_ort_download_link(
args.ort_version, args.use_cuda, args.use_directml)
download_and_extract_ort(ort_download_link)
voicevox_download_link = args.voicevox_download_link
if not voicevox_download_link:
voicevox_download_link = get_voicevox_download_link(
args.voicevox_version)
download_and_extract_voicevox(voicevox_download_link)
if args.use_directml:
dml_download_link = args.dml_download_link
if not dml_download_link:
dml_download_link = get_dml_download_link(args.dml_version)
download_and_extract_dml(dml_download_link)
lib_path = project_root / "core/lib"
if lib_path.exists():
yn = input("Found existing library at core/lib/. Overwrite? [yn]: ")
while yn != "y" and yn != "n":
yn = input("Please press y or n: ")
if yn == "n":
exit()
subprocess.getoutput(f"rm -r {lib_path}")
link_files(args.use_directml)
print("Successfully configured!")
|
StarcoderdataPython
|
4917583
|
def compChooseWord(hand, wordList, n):
"""
Given a hand and a wordList, find the word that gives
the maximum value score, and return it.
This word should be calculated by considering all the words
in the wordList.
If no words in the wordList can be made from the hand, return None.
hand: dictionary (string -> int)
wordList: list (string)
n: integer (HAND_SIZE; i.e., hand size required for additional points)
returns: string or None
"""
bestScore = 0
bestWord = None
for word in wordList:
if isValidWord(word, hand, wordList):
score = getWordScore(word, n)
if score > bestScore:
bestScore = score
bestWord = word
return bestWord
|
StarcoderdataPython
|
11215994
|
"""
artifact_downloader.py: Fetch artifacts into a location, where a Maven repository is being built given
a list of artifacts and a remote repository URL.
"""
import logging
import os
import re
import urlparse
from multiprocessing import Queue
from multiprocessing import Lock
from multiprocessing.pool import ThreadPool
import maven_repo_util
from maven_artifact import MavenArtifact
def downloadArtifacts(remoteRepoUrl, localRepoDir, artifact, checksumMode, mkdirLock, filesetLock, fileset, errors):
"""Download artifact from a remote repository."""
logging.debug("Starting download of %s", str(artifact))
artifactLocalDir = os.path.join(localRepoDir, artifact.getDirPath())
try:
# handle parallelism, when two threads checks if a directory exists and then both tries to create it
mkdirLock.acquire()
if not os.path.exists(artifactLocalDir):
os.makedirs(artifactLocalDir)
mkdirLock.release()
remoteRepoUrl = maven_repo_util.slashAtTheEnd(remoteRepoUrl)
# Download main artifact
artifactUrl = remoteRepoUrl + artifact.getArtifactFilepath()
artifactLocalPath = os.path.join(localRepoDir, artifact.getArtifactFilepath())
maven_repo_util.fetchFile(artifactUrl, artifactLocalPath, checksumMode, True, True, filesetLock, fileset)
except BaseException as ex:
logging.error("Error while downloading artifact %s: %s", artifact, str(ex))
errors.put(ex)
def copyArtifact(remoteRepoPath, localRepoDir, artifact, checksumMode):
"""Copy artifact from a repository on the local file system along with pom and source jar"""
# Copy main artifact
artifactPath = os.path.join(remoteRepoPath, artifact.getArtifactFilepath())
artifactLocalPath = os.path.join(localRepoDir, artifact.getArtifactFilepath())
if os.path.exists(artifactPath) and not os.path.exists(artifactLocalPath):
maven_repo_util.fetchFile(artifactPath, artifactLocalPath, checksumMode)
def depListToArtifactList(depList):
"""Convert the maven GAV to a URL relative path"""
regexComment = re.compile('#.*$')
#regexLog = re.compile('^\[\w*\]')
artifactList = []
for nextLine in depList:
nextLine = regexComment.sub('', nextLine)
nextLine = nextLine.strip()
gav = maven_repo_util.parseGATCVS(nextLine)
if gav:
artifactList.append(MavenArtifact.createFromGAV(gav))
return artifactList
def fetchArtifactList(remoteRepoUrl, localRepoDir, artifactList, checksumMode):
"""Create a Maven repository based on a remote repository url and a list of artifacts"""
logging.info('Retrieving artifacts from repository: %s', remoteRepoUrl)
if not os.path.exists(localRepoDir):
os.makedirs(localRepoDir)
parsedUrl = urlparse.urlparse(remoteRepoUrl)
protocol = parsedUrl[0]
repoPath = parsedUrl[2]
if protocol == 'http' or protocol == 'https':
# Create thread pool
pool = ThreadPool(maven_repo_util.MAX_THREADS)
errors = Queue()
mkdirLock = Lock()
filesetLock = Lock()
fileset = set([])
for artifact in artifactList:
if artifact.isSnapshot():
maven_repo_util.updateSnapshotVersionSuffix(artifact, remoteRepoUrl)
pool.apply_async(
downloadArtifacts,
[remoteRepoUrl, localRepoDir, artifact, checksumMode, mkdirLock, filesetLock, fileset, errors]
)
# Close pool and wait till all workers are finished
pool.close()
pool.join()
# If one of the workers threw an error, log it
if not errors.empty():
logging.error("During fetching files from repository %s %i error(s) occurred.", remoteRepoUrl,
errors.qsize())
elif protocol == 'file':
repoPath = remoteRepoUrl.replace('file://', '')
for artifact in artifactList:
if artifact.isSnapshot():
maven_repo_util.updateSnapshotVersionSuffix(artifact, remoteRepoUrl)
copyArtifact(repoPath, localRepoDir, artifact, checksumMode)
else:
logging.error('Unknown protocol: %s', protocol)
def fetchArtifactLists(urlToMAList, outputDir, checksumMode):
"""
Fetch lists of artifacts each list from its repository.
"""
for repoUrl in urlToMAList.keys():
artifacts = urlToMAList[repoUrl]
fetchArtifactList(repoUrl, outputDir, artifacts, checksumMode)
|
StarcoderdataPython
|
5170195
|
import time
import xml.etree.ElementTree as ET
def handleMessage(oriData):
xmldata = ET.fromstring(oriData)
fromUserName = xmldata.find("FromUserName").text
toUserName = xmldata.find("ToUserName").text
content = xmldata.find("Content").text
xmlDict = {"FromUserName": fromUserName,"ToUserName": toUserName, "Content": content}
return xmlDict
def responseMessage(content_dict={"": ""}, type="text"):
toName = content_dict["FromUserName"]
fromName = content_dict["ToUserName"]
resContent = "Sorry, this account is under development."
reply = """
<xml>
<ToUserName><![CDATA[%s]]></ToUserName>
<FromUserName><![CDATA[%s]]></FromUserName>
<CreateTime>%s</CreateTime>
<MsgType><![CDATA[text]]></MsgType>
<Content><![CDATA[%s]]></Content>
<FuncFlag>0</FuncFlag>
</xml>
"""
resStr = reply % (toName, fromName, int(time.time()), resContent)
return resStr
|
StarcoderdataPython
|
4874931
|
<gh_stars>1-10
from selenosis.settings import * # noqa
INSTALLED_APPS += ('tests',) # noqa
|
StarcoderdataPython
|
3468393
|
# coding=utf-8
"""
__purpose__ = ...
__author__ = JeeysheLu [<EMAIL>] [https://www.lujianxin.com/] [2020/8/12 10:18]
Copyright (c) 2020 JeeysheLu
This software is licensed to you under the MIT License. Looking forward to making it better.
"""
import sys
if __name__ == '__main__':
arg = sys.argv
print(arg)
s = [
{"code": 1000, "message": "xxx"},
{"code": 1003, "message": "xxx"},
{"code": 1004, "message": "xxx"},
{"code": 1002, "message": "xxx"},
{"code": 1005, "message": "xxx"},
]
print(sorted(s, key=lambda x: x.get("code")))
|
StarcoderdataPython
|
3343700
|
import positron
positron.main_level = positron.LogLevel.DEBUG
log = positron.Logger('log', positron.LogLevel.IMPORTANT)
log.enable_file_logging()
log.debug('debug')
log.io('io')
log.info('info')
log.warning('warning')
log.error('error')
log.important('important')
log.critical('critical')
log.iochars = 'MSG'
log.io('msg')
|
StarcoderdataPython
|
9793435
|
# IANA registry
PROFILE = 'profile'
SELF = 'self'
# Local relations
_root = 'http://rels.registronavale.com/'
SHIP_OWNER = _root + 'ship-owner'
SEARCH_SHIPS = _root + 'search-ships'
SHIP_BY_IMO = _root + 'ship-by-imo'
OWNED_SHIPS = _root + 'owned-ships'
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.