blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
195624fb20c54ced15a65be4c1af7cb329cc3b1c | 31f9333012fd7dad7b8b12c1568f59f33420b0a5 | /Alessandria/env/lib/python3.8/site-packages/django/contrib/staticfiles/testing.py | 754bd296574e9e20066c857e41043e1bb11bfcc3 | []
| no_license | jcmloiacono/Django | 0c69131fae569ef8cb72b135ab81c8e957d2a640 | 20b9a4a1b655ae4b8ff2a66d50314ed9732b5110 | refs/heads/master | 2022-11-15T22:18:57.610642 | 2020-07-14T14:43:16 | 2020-07-14T14:43:16 | 255,125,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.test import LiveServerTestCase
class StaticLiveServerTestCase(LiveServerTestCase):
"""
Extend django.test.LiveServerTestCase to transparently overlay at test
execution-time the assets provided by the staticfiles app2 finders. This
means you don't need to run collectstatic before or as a part of your tests
setup.
"""
static_handler = StaticFilesHandler
| [
"[email protected]"
]
| |
e02f4a0c5b78cca43171902e5b8212d0c9bf443a | 2fe18f4babd857381c2251f1c2437ccdae234dd8 | /bookmarks/bookmarks/settings.py | 273af7e947825b97a57cf7f7558397f12874a3f2 | []
| no_license | Akhtyrtsev/bookmarks | 62f23d87c9442aaa2f56c73dd52ddbf8e456f7e1 | c8c52f1a9d4674a7187ad2408af7c090424a9738 | refs/heads/master | 2020-07-03T23:17:44.547699 | 2019-08-15T12:27:04 | 2019-08-15T12:27:04 | 202,083,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,627 | py | """
Django settings for bookmarks project.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '71wl&ele@0v_^508xm(cy)z!%6is^_sb1k_k4b$2=1gzupra-r'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'account.apps.AccountConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bookmarks.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bookmarks.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LOGIN_REDIRECT_URL = 'dashboard'
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'account.authentication.EmailAuthBackend',
]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = 'mytestmail842mytestmail842'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
| [
"[email protected]"
]
| |
440d156989c7d14212ee7acec2a615fa1d0d34cc | f75f9c0e7192170a5846c0b726b10e645d5812b7 | /tests/test_models.py | 845a6eaf73b1e3765e21211184bc835c50c73de7 | [
"MIT"
]
| permissive | mzbotr/betfair.py | 6feff7250fec38c31ef9c89fc15a057c935d7274 | dca804a4eaf999af54c53589e9559409fae26d6f | refs/heads/master | 2021-01-21T06:02:35.902807 | 2015-06-15T04:05:51 | 2015-06-15T04:05:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,563 | py | # -*- coding: utf-8 -*-
import pytest
from enum import Enum
from schematics.types import StringType
from betfair.meta.types import EnumType
from betfair.meta.types import ModelType
from betfair.meta.models import BetfairModel
def test_field_inflection():
class FakeModel(BetfairModel):
underscore_separated_field = StringType()
record = FakeModel(underscoreSeparatedField='test')
assert record.underscore_separated_field == 'test'
serialized = record.serialize()
assert 'underscoreSeparatedField' in serialized
assert serialized['underscoreSeparatedField'] == 'test'
FakeEnum = Enum(
'TestEnum', [
'val1',
'val2',
]
)
@pytest.mark.parametrize(['input', 'expected'], [
('val1', 'val1'),
(FakeEnum.val1, 'val1'),
])
def test_enum_type(input, expected):
class FakeModel(BetfairModel):
enum_field = EnumType(FakeEnum)
datum = FakeModel(enum_field=input)
datum.validate()
serialized = datum.serialize()
assert serialized['enumField'] == expected
class Child(BetfairModel):
child_name = StringType()
class Parent(BetfairModel):
parent_name = StringType()
child = ModelType(Child)
def test_nested_model():
parent = Parent(parent_name='mom', child=dict(child_name='kid'))
expected = {
'parentName': 'mom',
'child': {
'childName': 'kid',
},
}
assert parent.serialize() == expected
def test_nested_model_unserialize_rogue():
Parent(parent_name='dad', child=dict(child_name='kid', rogue='rogue'))
| [
"[email protected]"
]
| |
61511f49964ca71e6a0f6d8c8c5023828b810084 | 55909fd5282ea210f2221fc467f71f9ed41b0bef | /Aula 13/ex056.py | 5a65082bf9d2352ec7ab655f7557494215f5ccf6 | [
"MIT"
]
| permissive | alaanlimaa/Python_CVM1-2-3 | 163ecd8c9145f2d332e6574d8923373b87a2e1f5 | 6d9a9bd693580fd1679a1d0b23afd26841b962a6 | refs/heads/main | 2023-06-18T16:07:59.930804 | 2021-07-20T16:22:01 | 2021-07-20T16:22:01 | 387,841,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | midade = Hmaior = nomevelho = contM20 = 0
for p in range(1, 5):
print('-=-' * 10)
print(f'{p}º pessoa ')
nome = str(input('Nome: ')).strip()
idade = int(input('Idade: '))
sexo = str(input('Sexo [F/M]: ')).strip()[0]
midade += idade
if p == 1 and sexo in 'Mm':
Hmaior = idade
nomevelho = nome
if sexo in 'Mm' and idade > Hmaior:
Hmaior = idade
nomevelho = nome
if sexo in 'Ff' and idade < 20:
contM20 += 1
print(f'A média de idade do grupo é {midade / p:.2f} anos')
print(f'O homem mais velho tem {Hmaior} anos eo seu nome é {nomevelho}')
print(f'São {contM20} mulheres menores de 20 anos')
| [
"[email protected]"
]
| |
169d1b34052601f7372457060040c76fbb71fe6b | 498d65615aeba1f7399344a32a23514e057fb30e | /decode_verify_jwt.py | 224caf0f4e6b9ae7531dc23017880f0ac6b66eee | []
| no_license | gautamamber/Blog-Serverless-chalice | 54fd128f76a3e918a170225bb49ded0874089a61 | e1735c5bb617bdb9720b5ecf847ea32833d7e5bc | refs/heads/master | 2020-08-04T15:17:44.405145 | 2019-10-02T14:33:32 | 2019-10-02T14:33:32 | 212,181,532 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,896 | py | from urllib.request import urlopen
import json
import os
import time
from constant import Constants
from jose import jwk, jwt
from jose.utils import base64url_decode
region = 'us-west-2'
userpool_id = Constants.COGNITO_POOL_ID
app_client_id = Constants.COGNITO_CLIENT
keys_url = 'https://cognito-idp.{}.amazonaws.com/{}/.well-known/jwks.json'.format(region, userpool_id)
# instead of re-downloading the public keys every time
# we download them only on cold start
# https://aws.amazon.com/blogs/compute/container-reuse-in-lambda/
response = urlopen(keys_url)
keys = json.loads(response.read())['keys']
def token_verification(token):
# get the kid from the headers prior to verification
headers = jwt.get_unverified_headers(token)
kid = headers['kid']
# search for the kid in the downloaded public keys
key_index = -1
for i in range(len(keys)):
if kid == keys[i]['kid']:
key_index = i
break
if key_index == -1:
return False
# construct the public key
public_key = jwk.construct(keys[key_index])
# get the last two sections of the token,
# message and signature (encoded in base64)
message, encoded_signature = str(token).rsplit('.', 1)
# decode the signature
decoded_signature = base64url_decode(encoded_signature.encode('utf-8'))
# verify the signature
if not public_key.verify(message.encode("utf8"), decoded_signature):
return False
# since we passed the verification, we can now safely
# use the unverified claims
claims = jwt.get_unverified_claims(token)
# additionally we can verify the token expiration
if time.time() > claims['exp']:
return False
# and the Audience (use claims['client_id'] if verifying an access token)
if claims['aud'] != app_client_id:
return False
# now we can use the claims
return claims
| [
"[email protected]"
]
| |
f135349869cce6877593dc177603adef88a8dd07 | 8eb2e7d0b82e26b8999c1e2f14b4fe0f7dfeab65 | /scripts/run_slim_bpr_cython_baesyan.py | 8262e9aefd632f8690b346aca92562dd0f270d73 | [
"Apache-2.0"
]
| permissive | edervishaj/spotify-recsys-challenge | c8d66cec51495bef85809dbbff183705e53a7bd4 | 4077201ac7e4ed9da433bd10a92c183614182437 | refs/heads/master | 2021-06-28T14:59:02.619439 | 2020-10-03T09:53:50 | 2020-10-03T09:53:50 | 150,008,507 | 0 | 0 | Apache-2.0 | 2020-10-03T09:53:51 | 2018-09-23T17:31:20 | Jupyter Notebook | UTF-8 | Python | false | false | 7,642 | py | from personal.MaurizioFramework.ParameterTuning.BayesianSearch import BayesianSearch
from personal.MaurizioFramework.ParameterTuning.AbstractClassSearch import DictionaryKeys
from utils.definitions import ROOT_DIR
import pickle
from personal.MaurizioFramework.SLIM_BPR.Cython.SLIM_BPR_Cython import SLIM_BPR_Cython
from recommenders.similarity.dot_product import dot_product
from utils.datareader import Datareader
from utils.evaluator import Evaluator
from utils.bot import Bot_v1
from tqdm import tqdm
import scipy.sparse as sps
import numpy as np
import sys
def run_SLIM_bananesyan_search(URM_train, URM_validation, logFilePath = ROOT_DIR+"/results/logs_baysian/"):
recommender_class = SLIM_BPR_Cython
bananesyan_search = BayesianSearch(recommender_class, URM_validation=URM_validation,
evaluation_function=evaluateRecommendationsSpotify_BAYSIAN)
hyperparamethers_range_dictionary = {}
hyperparamethers_range_dictionary["topK"] = [100, 150, 200, 250, 300, 350, 400, 500]
hyperparamethers_range_dictionary["lambda_i"] = [1e-7,1e-6,1e-5,1e-4,1e-3,0.001,0.01,0.05,0.1]
hyperparamethers_range_dictionary["lambda_j"] = [1e-7,1e-6,1e-5,1e-4,1e-3,0.001,0.01,0.05,0.1]
hyperparamethers_range_dictionary["learning_rate"] = [0.1,0.01,0.001,0.0001,0.00005,0.000001, 0.0000001]
hyperparamethers_range_dictionary["minRatingsPerUser"] = [0, 5, 50, 100]
logFile = open(logFilePath + recommender_class.RECOMMENDER_NAME + "_BayesianSearch Results.txt", "a")
recommenderDictionary = {DictionaryKeys.CONSTRUCTOR_POSITIONAL_ARGS: [],
DictionaryKeys.CONSTRUCTOR_KEYWORD_ARGS: {
"URM_train":URM_train,
"positive_threshold":0,
"URM_validation":URM_validation,
"final_model_sparse_weights":True,
"train_with_sparse_weights":True,
"symmetric" : True},
DictionaryKeys.FIT_POSITIONAL_ARGS: dict(),
DictionaryKeys.FIT_KEYWORD_ARGS: {
"epochs" : 5,
"beta_1" : 0.9,
"beta_2" : 0.999,
"validation_function": evaluateRecommendationsSpotify_RECOMMENDER,
"stop_on_validation":True ,
"sgd_mode" : 'adam',
"validation_metric" : "ndcg_t",
"lower_validatons_allowed":3,
"validation_every_n":1},
DictionaryKeys.FIT_RANGE_KEYWORD_ARGS: hyperparamethers_range_dictionary}
best_parameters = bananesyan_search.search(recommenderDictionary,
metric="ndcg_t",
n_cases=200,
output_root_path=""+logFilePath + recommender_class.RECOMMENDER_NAME,
parallelPoolSize=4)
logFile.write("best_parameters: {}".format(best_parameters))
logFile.flush()
logFile.close()
pickle.dump(best_parameters, open(logFilePath + recommender_class.RECOMMENDER_NAME + "_best_parameters", "wb"),
protocol=pickle.HIGHEST_PROTOCOL)
def evaluateRecommendationsSpotify_RECOMMENDER(recommender):
"""
THIS FUNCTION WORKS INSIDE THE RECOMMENDER
:param self:
:return:
"""
user_profile_batch = recommender.URM_train[pids_converted]
eurm = dot_product(user_profile_batch, recommender.W_sparse, k=500).tocsr()
recommendation_list = np.zeros((10000, 500))
for row in tqdm(range(eurm.shape[0]), desc="spotify rec list"):
val = eurm[row].data
ind = val.argsort()[-500:][::-1]
ind = eurm[row].indices[ind]
recommendation_list[row, 0:len(ind)] = ind
prec_t, ndcg_t, clicks_t, prec_a, ndcg_a, clicks_a = ev.evaluate(recommendation_list=recommendation_list,
name=recommender.configuration+"epoca"+
str(recommender.currentEpoch),
return_overall_mean=True, verbose = False,
show_plot=False, do_plot=True)
results_run = {}
results_run["prec_t"] = prec_t
results_run["ndcg_t"] = ndcg_t
results_run["clicks_t"] = clicks_t
results_run["prec_a"] = prec_a
results_run["ndcg_a"] = ndcg_a
results_run["clicks_a"] = clicks_a
return (results_run)
def evaluateRecommendationsSpotify_BAYSIAN(recommender, URM_validation, paramether_dictionary) :
"""
THIS FUNCTION WORKS INSIDE THE BAYSIAN-GRID SEARCH
:param self:
:return:
"""
user_profile_batch = recommender.URM_train[pids_converted]
eurm = dot_product(user_profile_batch, recommender.W_sparse, k=500).tocsr()
recommendation_list = np.zeros((10000, 500))
for row in tqdm(range(eurm.shape[0]), desc="spotify rec list"):
val = eurm[row].data
ind = val.argsort()[-500:][::-1]
ind = eurm[row].indices[ind]
recommendation_list[row, 0:len(ind)] = ind
prec_t, ndcg_t, clicks_t, prec_a, ndcg_a, clicks_a = ev.evaluate(recommendation_list=recommendation_list,
name=recommender.configuration+"epoca"+str(recommender.currentEpoch),
return_overall_mean=True, verbose= False,
show_plot=False, do_plot=True)
results_run = {}
results_run["prec_t"] = prec_t
results_run["ndcg_t"] = ndcg_t
results_run["clicks_t"] = clicks_t
results_run["prec_a"] = prec_a
results_run["ndcg_a"] = ndcg_a
results_run["clicks_a"] = clicks_a
return (results_run)
if __name__ == '__main__':
bot = Bot_v1("keplero bananesyan slim")
try:
######################SHRINKED
dr = Datareader(mode="offline", train_format="100k", only_load=True)
ev = Evaluator(dr)
pids = dr.get_test_pids()
urm, dictns, dict2 = dr.get_urm_shrinked()
urm_evaluation = dr.get_evaluation_urm()[pids]
pids_converted = np.array([dictns[x] for x in pids], dtype=np.int32)
run_SLIM_bananesyan_search(URM_train=urm, URM_validation=urm_evaluation)
# dr = Datareader(mode="offline", only_load=True)
# ev = Evaluator(dr)
# pids = dr.get_test_pids()
#
# urm = dr.get_urm()
# urm_evaluation = dr.get_evaluation_urm()[pids]
# pids_converted = pids
#
# run_SLIM_bananesyan_search(URM_train=urm, URM_validation=urm_evaluation)
except Exception as e:
bot.error("Exception "+str(e))
bot.end() | [
"[email protected]"
]
| |
26d7c06f88ff8b77fb6eb704335b28197ac7b3ac | 49c2492d91789b3c2def7d654a7396e8c6ce6d9f | /ROS/vrep_ros_ws/build/vrep_skeleton_msg_and_srv/catkin_generated/generate_cached_setup.py | 063f8efb2e5b20ed1335dd677a45fae2675a3513 | []
| no_license | DavidHan008/lockdpwn | edd571165f9188e0ee93da7222c0155abb427927 | 5078a1b08916b84c5c3723fc61a1964d7fb9ae20 | refs/heads/master | 2021-01-23T14:10:53.209406 | 2017-09-02T18:02:50 | 2017-09-02T18:02:50 | 102,670,531 | 0 | 2 | null | 2017-09-07T00:11:33 | 2017-09-07T00:11:33 | null | UTF-8 | Python | false | false | 1,508 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/indigo/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/indigo/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/dyros-vehicle/gitrepo/lockdpwn/ROS/vrep_ros_ws/devel;/home/dyros-vehicle/gitrepo/lockdpwn/ROS/catkin_ws/devel;/opt/ros/indigo".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/dyros-vehicle/gitrepo/lockdpwn/ROS/vrep_ros_ws/devel/.private/vrep_skeleton_msg_and_srv/env.sh')
output_filename = '/home/dyros-vehicle/gitrepo/lockdpwn/ROS/vrep_ros_ws/build/vrep_skeleton_msg_and_srv/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"[email protected]"
]
| |
d82de8f764febc64cf530f2dc46b710cd433c73d | e05f8d36c70336a8714cc260c02fe85ecee2e62e | /subject/tests/functional/v1/test_api.py | cfca2ac0204a25f545c1ca0124f77b4b5b32a902 | [
"Apache-2.0"
]
| permissive | laoyigrace/subject | eafa442b5d9ebf83c78a01ce3bb5d088d08d620d | e6ed989fdc250917a19788112b22322b73b3550f | refs/heads/master | 2021-01-11T00:06:54.790751 | 2016-10-24T02:13:32 | 2016-10-24T02:13:32 | 70,754,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37,886 | py | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Functional test case that utilizes httplib2 against the API server"""
import hashlib
import httplib2
import sys
from oslo_serialization import jsonutils
from oslo_utils import units
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
from subject.tests import functional
from subject.tests.utils import minimal_headers
from subject.tests.utils import skip_if_disabled
FIVE_KB = 5 * units.Ki
FIVE_GB = 5 * units.Gi
class TestApi(functional.FunctionalTest):
"""Functional tests using httplib2 against the API server"""
def _check_subject_create(self, headers, status=201,
subject_data="*" * FIVE_KB):
# performs subject_create request, checks the response and returns
# content
http = httplib2.Http()
path = "http://%s:%d/v1/subjects" % ("127.0.0.1", self.api_port)
response, content = http.request(
path, 'POST', headers=headers, body=subject_data)
self.assertEqual(status, response.status)
return content
def test_checksum_32_chars_at_subject_create(self):
self.cleanup()
self.start_servers(**self.__dict__.copy())
headers = minimal_headers('Subject1')
subject_data = "*" * FIVE_KB
# checksum can be no longer that 32 characters (String(32))
headers['X-Subject-Meta-Checksum'] = 'x' * 42
content = self._check_subject_create(headers, 400)
self.assertIn("Invalid checksum", content)
# test positive case as well
headers['X-Subject-Meta-Checksum'] = hashlib.md5(subject_data).hexdigest()
self._check_subject_create(headers)
def test_param_int_too_large_at_create(self):
# currently 2 params min_disk/min_ram can cause DBError on save
self.cleanup()
self.start_servers(**self.__dict__.copy())
# Integer field can't be greater than max 8-byte signed integer
for param in ['min_disk', 'min_ram']:
headers = minimal_headers('Subject1')
# check that long numbers result in 400
headers['X-Subject-Meta-%s' % param] = str(sys.maxint + 1)
content = self._check_subject_create(headers, 400)
self.assertIn("'%s' value out of range" % param, content)
# check that integers over 4 byte result in 400
headers['X-Subject-Meta-%s' % param] = str(2 ** 31)
content = self._check_subject_create(headers, 400)
self.assertIn("'%s' value out of range" % param, content)
# verify positive case as well
headers['X-Subject-Meta-%s' % param] = str((2 ** 31) - 1)
self._check_subject_create(headers)
@skip_if_disabled
def test_get_head_simple_post(self):
"""
We test the following sequential series of actions:
0. GET /subjects
- Verify no public subjects
1. GET /subjects/detail
- Verify no public subjects
2. POST /subjects with public subject named Subject1
and no custom properties
- Verify 201 returned
3. HEAD subject
- Verify HTTP headers have correct information we just added
4. GET subject
- Verify all information on subject we just added is correct
5. GET /subjects
- Verify the subject we just added is returned
6. GET /subjects/detail
- Verify the subject we just added is returned
7. PUT subject with custom properties of "distro" and "arch"
- Verify 200 returned
8. PUT subject with too many custom properties
- Verify 413 returned
9. GET subject
- Verify updated information about subject was stored
10. PUT subject
- Remove a previously existing property.
11. PUT subject
- Add a previously deleted property.
12. PUT subject/members/member1
- Add member1 to subject
13. PUT subject/members/member2
- Add member2 to subject
14. GET subject/members
- List subject members
15. DELETE subject/members/member1
- Delete subject member1
16. PUT subject/members
- Attempt to replace members with an overlimit amount
17. PUT subject/members/member11
- Attempt to add a member while at limit
18. POST /subjects with another public subject named Subject2
- attribute and three custom properties, "distro", "arch" & "foo"
- Verify a 200 OK is returned
19. HEAD subject2
- Verify subject2 found now
20. GET /subjects
- Verify 2 public subjects
21. GET /subjects with filter on user-defined property "distro".
- Verify both subjects are returned
22. GET /subjects with filter on user-defined property 'distro' but
- with non-existent value. Verify no subjects are returned
23. GET /subjects with filter on non-existent user-defined property
- "boo". Verify no subjects are returned
24. GET /subjects with filter 'arch=i386'
- Verify only subject2 is returned
25. GET /subjects with filter 'arch=x86_64'
- Verify only subject1 is returned
26. GET /subjects with filter 'foo=bar'
- Verify only subject2 is returned
27. DELETE subject1
- Delete subject
28. GET subject/members
- List deleted subject members
29. PUT subject/members/member2
- Update existing member2 of deleted subject
30. PUT subject/members/member3
- Add member3 to deleted subject
31. DELETE subject/members/member2
- Delete member2 from deleted subject
32. DELETE subject2
- Delete subject
33. GET /subjects
- Verify no subjects are listed
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
# 0. GET /subjects
# Verify no public subjects
path = "http://%s:%d/v1/subjects" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
self.assertEqual('{"subjects": []}', content)
# 1. GET /subjects/detail
# Verify no public subjects
path = "http://%s:%d/v1/subjects/detail" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
self.assertEqual('{"subjects": []}', content)
# 2. POST /subjects with public subject named Subject1
# attribute and no custom properties. Verify a 200 OK is returned
subject_data = "*" * FIVE_KB
headers = minimal_headers('Subject1')
path = "http://%s:%d/v1/subjects" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers,
body=subject_data)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
subject_id = data['subject']['id']
self.assertEqual(hashlib.md5(subject_data).hexdigest(),
data['subject']['checksum'])
self.assertEqual(FIVE_KB, data['subject']['size'])
self.assertEqual("Subject1", data['subject']['name'])
self.assertTrue(data['subject']['is_public'])
# 3. HEAD subject
# Verify subject found now
path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port,
subject_id)
http = httplib2.Http()
response, content = http.request(path, 'HEAD')
self.assertEqual(200, response.status)
self.assertEqual("Subject1", response['x-subject-meta-name'])
# 4. GET subject
# Verify all information on subject we just added is correct
path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port,
subject_id)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
expected_subject_headers = {
'x-subject-meta-id': subject_id,
'x-subject-meta-name': 'Subject1',
'x-subject-meta-is_public': 'True',
'x-subject-meta-status': 'active',
'x-subject-meta-disk_format': 'raw',
'x-subject-meta-container_format': 'ovf',
'x-subject-meta-size': str(FIVE_KB)}
expected_std_headers = {
'content-length': str(FIVE_KB),
'content-type': 'application/octet-stream'}
for expected_key, expected_value in expected_subject_headers.items():
self.assertEqual(expected_value, response[expected_key],
"For key '%s' expected header value '%s'. "
"Got '%s'" % (expected_key,
expected_value,
response[expected_key]))
for expected_key, expected_value in expected_std_headers.items():
self.assertEqual(expected_value, response[expected_key],
"For key '%s' expected header value '%s'. "
"Got '%s'" % (expected_key,
expected_value,
response[expected_key]))
self.assertEqual("*" * FIVE_KB, content)
self.assertEqual(hashlib.md5("*" * FIVE_KB).hexdigest(),
hashlib.md5(content).hexdigest())
# 5. GET /subjects
# Verify one public subject
path = "http://%s:%d/v1/subjects" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
expected_result = {"subjects": [
{"container_format": "ovf",
"disk_format": "raw",
"id": subject_id,
"name": "Subject1",
"checksum": "c2e5db72bd7fd153f53ede5da5a06de3",
"size": 5120}]}
self.assertEqual(expected_result, jsonutils.loads(content))
# 6. GET /subjects/detail
# Verify subject and all its metadata
path = "http://%s:%d/v1/subjects/detail" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
expected_subject = {
"status": "active",
"name": "Subject1",
"deleted": False,
"container_format": "ovf",
"disk_format": "raw",
"id": subject_id,
"is_public": True,
"deleted_at": None,
"properties": {},
"size": 5120}
subject = jsonutils.loads(content)
for expected_key, expected_value in expected_subject.items():
self.assertEqual(expected_value, subject['subjects'][0][expected_key],
"For key '%s' expected header value '%s'. "
"Got '%s'" % (expected_key,
expected_value,
subject['subjects'][0][expected_key]))
# 7. PUT subject with custom properties of "distro" and "arch"
# Verify 200 returned
headers = {'X-Subject-Meta-Property-Distro': 'Ubuntu',
'X-Subject-Meta-Property-Arch': 'x86_64'}
path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port,
subject_id)
http = httplib2.Http()
response, content = http.request(path, 'PUT', headers=headers)
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual("x86_64", data['subject']['properties']['arch'])
self.assertEqual("Ubuntu", data['subject']['properties']['distro'])
# 8. PUT subject with too many custom properties
# Verify 413 returned
headers = {}
for i in range(11): # configured limit is 10
headers['X-Subject-Meta-Property-foo%d' % i] = 'bar'
path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port,
subject_id)
http = httplib2.Http()
response, content = http.request(path, 'PUT', headers=headers)
self.assertEqual(413, response.status)
# 9. GET /subjects/detail
# Verify subject and all its metadata
path = "http://%s:%d/v1/subjects/detail" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
expected_subject = {
"status": "active",
"name": "Subject1",
"deleted": False,
"container_format": "ovf",
"disk_format": "raw",
"id": subject_id,
"is_public": True,
"deleted_at": None,
"properties": {'distro': 'Ubuntu', 'arch': 'x86_64'},
"size": 5120}
subject = jsonutils.loads(content)
for expected_key, expected_value in expected_subject.items():
self.assertEqual(expected_value, subject['subjects'][0][expected_key],
"For key '%s' expected header value '%s'. "
"Got '%s'" % (expected_key,
expected_value,
subject['subjects'][0][expected_key]))
# 10. PUT subject and remove a previously existing property.
headers = {'X-Subject-Meta-Property-Arch': 'x86_64'}
path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port,
subject_id)
http = httplib2.Http()
response, content = http.request(path, 'PUT', headers=headers)
self.assertEqual(200, response.status)
path = "http://%s:%d/v1/subjects/detail" % ("127.0.0.1", self.api_port)
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)['subjects'][0]
self.assertEqual(1, len(data['properties']))
self.assertEqual("x86_64", data['properties']['arch'])
# 11. PUT subject and add a previously deleted property.
headers = {'X-Subject-Meta-Property-Distro': 'Ubuntu',
'X-Subject-Meta-Property-Arch': 'x86_64'}
path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port,
subject_id)
http = httplib2.Http()
response, content = http.request(path, 'PUT', headers=headers)
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
path = "http://%s:%d/v1/subjects/detail" % ("127.0.0.1", self.api_port)
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)['subjects'][0]
self.assertEqual(2, len(data['properties']))
self.assertEqual("x86_64", data['properties']['arch'])
self.assertEqual("Ubuntu", data['properties']['distro'])
self.assertNotEqual(data['created_at'], data['updated_at'])
# 12. Add member to subject
path = ("http://%s:%d/v1/subjects/%s/members/pattieblack" %
("127.0.0.1", self.api_port, subject_id))
http = httplib2.Http()
response, content = http.request(path, 'PUT')
self.assertEqual(204, response.status)
# 13. Add member to subject
path = ("http://%s:%d/v1/subjects/%s/members/pattiewhite" %
("127.0.0.1", self.api_port, subject_id))
http = httplib2.Http()
response, content = http.request(path, 'PUT')
self.assertEqual(204, response.status)
# 14. List subject members
path = ("http://%s:%d/v1/subjects/%s/members" %
("127.0.0.1", self.api_port, subject_id))
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(2, len(data['members']))
self.assertEqual('pattieblack', data['members'][0]['member_id'])
self.assertEqual('pattiewhite', data['members'][1]['member_id'])
# 15. Delete subject member
path = ("http://%s:%d/v1/subjects/%s/members/pattieblack" %
("127.0.0.1", self.api_port, subject_id))
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(204, response.status)
# 16. Attempt to replace members with an overlimit amount
# Adding 11 subject members should fail since configured limit is 10
path = ("http://%s:%d/v1/subjects/%s/members" %
("127.0.0.1", self.api_port, subject_id))
memberships = []
for i in range(11):
member_id = "foo%d" % i
memberships.append(dict(member_id=member_id))
http = httplib2.Http()
body = jsonutils.dumps(dict(memberships=memberships))
response, content = http.request(path, 'PUT', body=body)
self.assertEqual(413, response.status)
# 17. Attempt to add a member while at limit
# Adding an 11th member should fail since configured limit is 10
path = ("http://%s:%d/v1/subjects/%s/members" %
("127.0.0.1", self.api_port, subject_id))
memberships = []
for i in range(10):
member_id = "foo%d" % i
memberships.append(dict(member_id=member_id))
http = httplib2.Http()
body = jsonutils.dumps(dict(memberships=memberships))
response, content = http.request(path, 'PUT', body=body)
self.assertEqual(204, response.status)
path = ("http://%s:%d/v1/subjects/%s/members/fail_me" %
("127.0.0.1", self.api_port, subject_id))
http = httplib2.Http()
response, content = http.request(path, 'PUT')
self.assertEqual(413, response.status)
# 18. POST /subjects with another public subject named Subject2
# attribute and three custom properties, "distro", "arch" & "foo".
# Verify a 200 OK is returned
subject_data = "*" * FIVE_KB
headers = minimal_headers('Subject2')
headers['X-Subject-Meta-Property-Distro'] = 'Ubuntu'
headers['X-Subject-Meta-Property-Arch'] = 'i386'
headers['X-Subject-Meta-Property-foo'] = 'bar'
path = "http://%s:%d/v1/subjects" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers,
body=subject_data)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
subject2_id = data['subject']['id']
self.assertEqual(hashlib.md5(subject_data).hexdigest(),
data['subject']['checksum'])
self.assertEqual(FIVE_KB, data['subject']['size'])
self.assertEqual("Subject2", data['subject']['name'])
self.assertTrue(data['subject']['is_public'])
self.assertEqual('Ubuntu', data['subject']['properties']['distro'])
self.assertEqual('i386', data['subject']['properties']['arch'])
self.assertEqual('bar', data['subject']['properties']['foo'])
# 19. HEAD subject2
# Verify subject2 found now
path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port,
subject2_id)
http = httplib2.Http()
response, content = http.request(path, 'HEAD')
self.assertEqual(200, response.status)
self.assertEqual("Subject2", response['x-subject-meta-name'])
# 20. GET /subjects
# Verify 2 public subjects
path = "http://%s:%d/v1/subjects" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
subjects = jsonutils.loads(content)['subjects']
self.assertEqual(2, len(subjects))
self.assertEqual(subject2_id, subjects[0]['id'])
self.assertEqual(subject_id, subjects[1]['id'])
# 21. GET /subjects with filter on user-defined property 'distro'.
# Verify both subjects are returned
path = "http://%s:%d/v1/subjects?property-distro=Ubuntu" % (
"127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
subjects = jsonutils.loads(content)['subjects']
self.assertEqual(2, len(subjects))
self.assertEqual(subject2_id, subjects[0]['id'])
self.assertEqual(subject_id, subjects[1]['id'])
# 22. GET /subjects with filter on user-defined property 'distro' but
# with non-existent value. Verify no subjects are returned
path = "http://%s:%d/v1/subjects?property-distro=fedora" % (
"127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
subjects = jsonutils.loads(content)['subjects']
self.assertEqual(0, len(subjects))
# 23. GET /subjects with filter on non-existent user-defined property
# 'boo'. Verify no subjects are returned
path = "http://%s:%d/v1/subjects?property-boo=bar" % ("127.0.0.1",
self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
subjects = jsonutils.loads(content)['subjects']
self.assertEqual(0, len(subjects))
# 24. GET /subjects with filter 'arch=i386'
# Verify only subject2 is returned
path = "http://%s:%d/v1/subjects?property-arch=i386" % ("127.0.0.1",
self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
subjects = jsonutils.loads(content)['subjects']
self.assertEqual(1, len(subjects))
self.assertEqual(subject2_id, subjects[0]['id'])
# 25. GET /subjects with filter 'arch=x86_64'
# Verify only subject1 is returned
path = "http://%s:%d/v1/subjects?property-arch=x86_64" % ("127.0.0.1",
self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
subjects = jsonutils.loads(content)['subjects']
self.assertEqual(1, len(subjects))
self.assertEqual(subject_id, subjects[0]['id'])
# 26. GET /subjects with filter 'foo=bar'
# Verify only subject2 is returned
path = "http://%s:%d/v1/subjects?property-foo=bar" % ("127.0.0.1",
self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
subjects = jsonutils.loads(content)['subjects']
self.assertEqual(1, len(subjects))
self.assertEqual(subject2_id, subjects[0]['id'])
# 27. DELETE subject1
path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port,
subject_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(200, response.status)
# 28. Try to list members of deleted subject
path = ("http://%s:%d/v1/subjects/%s/members" %
("127.0.0.1", self.api_port, subject_id))
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(404, response.status)
# 29. Try to update member of deleted subject
path = ("http://%s:%d/v1/subjects/%s/members" %
("127.0.0.1", self.api_port, subject_id))
http = httplib2.Http()
fixture = [{'member_id': 'pattieblack', 'can_share': 'false'}]
body = jsonutils.dumps(dict(memberships=fixture))
response, content = http.request(path, 'PUT', body=body)
self.assertEqual(404, response.status)
# 30. Try to add member to deleted subject
path = ("http://%s:%d/v1/subjects/%s/members/chickenpattie" %
("127.0.0.1", self.api_port, subject_id))
http = httplib2.Http()
response, content = http.request(path, 'PUT')
self.assertEqual(404, response.status)
# 31. Try to delete member of deleted subject
path = ("http://%s:%d/v1/subjects/%s/members/pattieblack" %
("127.0.0.1", self.api_port, subject_id))
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(404, response.status)
# 32. DELETE subject2
path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port,
subject2_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(200, response.status)
# 33. GET /subjects
# Verify no subjects are listed
path = "http://%s:%d/v1/subjects" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
subjects = jsonutils.loads(content)['subjects']
self.assertEqual(0, len(subjects))
# 34. HEAD /subjects/detail
path = "http://%s:%d/v1/subjects/detail" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'HEAD')
self.assertEqual(405, response.status)
self.assertEqual('GET', response.get('allow'))
self.stop_servers()
def test_download_non_exists_subject_raises_http_forbidden(self):
"""
We test the following sequential series of actions::
0. POST /subjects with public subject named Subject1
and no custom properties
- Verify 201 returned
1. HEAD subject
- Verify HTTP headers have correct information we just added
2. GET subject
- Verify all information on subject we just added is correct
3. DELETE subject1
- Delete the newly added subject
4. GET subject
- Verify that 403 HTTPForbidden exception is raised prior to
404 HTTPNotFound
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
subject_data = "*" * FIVE_KB
headers = minimal_headers('Subject1')
path = "http://%s:%d/v1/subjects" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers,
body=subject_data)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
subject_id = data['subject']['id']
self.assertEqual(hashlib.md5(subject_data).hexdigest(),
data['subject']['checksum'])
self.assertEqual(FIVE_KB, data['subject']['size'])
self.assertEqual("Subject1", data['subject']['name'])
self.assertTrue(data['subject']['is_public'])
# 1. HEAD subject
# Verify subject found now
path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port,
subject_id)
http = httplib2.Http()
response, content = http.request(path, 'HEAD')
self.assertEqual(200, response.status)
self.assertEqual("Subject1", response['x-subject-meta-name'])
# 2. GET /subjects
# Verify one public subject
path = "http://%s:%d/v1/subjects" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
expected_result = {"subjects": [
{"container_format": "ovf",
"disk_format": "raw",
"id": subject_id,
"name": "Subject1",
"checksum": "c2e5db72bd7fd153f53ede5da5a06de3",
"size": 5120}]}
self.assertEqual(expected_result, jsonutils.loads(content))
# 3. DELETE subject1
path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port,
subject_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(200, response.status)
# 4. GET subject
# Verify that 403 HTTPForbidden exception is raised prior to
# 404 HTTPNotFound
rules = {"download_subject": '!'}
self.set_policy_rules(rules)
path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port,
subject_id)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(403, response.status)
self.stop_servers()
def test_download_non_exists_subject_raises_http_not_found(self):
"""
We test the following sequential series of actions:
0. POST /subjects with public subject named Subject1
and no custom properties
- Verify 201 returned
1. HEAD subject
- Verify HTTP headers have correct information we just added
2. GET subject
- Verify all information on subject we just added is correct
3. DELETE subject1
- Delete the newly added subject
4. GET subject
- Verify that 404 HTTPNotFound exception is raised
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
subject_data = "*" * FIVE_KB
headers = minimal_headers('Subject1')
path = "http://%s:%d/v1/subjects" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers,
body=subject_data)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
subject_id = data['subject']['id']
self.assertEqual(hashlib.md5(subject_data).hexdigest(),
data['subject']['checksum'])
self.assertEqual(FIVE_KB, data['subject']['size'])
self.assertEqual("Subject1", data['subject']['name'])
self.assertTrue(data['subject']['is_public'])
# 1. HEAD subject
# Verify subject found now
path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port,
subject_id)
http = httplib2.Http()
response, content = http.request(path, 'HEAD')
self.assertEqual(200, response.status)
self.assertEqual("Subject1", response['x-subject-meta-name'])
# 2. GET /subjects
# Verify one public subject
path = "http://%s:%d/v1/subjects" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
expected_result = {"subjects": [
{"container_format": "ovf",
"disk_format": "raw",
"id": subject_id,
"name": "Subject1",
"checksum": "c2e5db72bd7fd153f53ede5da5a06de3",
"size": 5120}]}
self.assertEqual(expected_result, jsonutils.loads(content))
# 3. DELETE subject1
path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port,
subject_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(200, response.status)
# 4. GET subject
# Verify that 404 HTTPNotFound exception is raised
path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port,
subject_id)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(404, response.status)
self.stop_servers()
def test_status_cannot_be_manipulated_directly(self):
self.cleanup()
self.start_servers(**self.__dict__.copy())
headers = minimal_headers('Subject1')
# Create a 'queued' subject
http = httplib2.Http()
headers = {'Content-Type': 'application/octet-stream',
'X-Subject-Meta-Disk-Format': 'raw',
'X-Subject-Meta-Container-Format': 'bare'}
path = "http://%s:%d/v1/subjects" % ("127.0.0.1", self.api_port)
response, content = http.request(path, 'POST', headers=headers,
body=None)
self.assertEqual(201, response.status)
subject = jsonutils.loads(content)['subject']
self.assertEqual('queued', subject['status'])
# Ensure status of 'queued' subject can't be changed
path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port,
subject['id'])
http = httplib2.Http()
headers = {'X-Subject-Meta-Status': 'active'}
response, content = http.request(path, 'PUT', headers=headers)
self.assertEqual(403, response.status)
response, content = http.request(path, 'HEAD')
self.assertEqual(200, response.status)
self.assertEqual('queued', response['x-subject-meta-status'])
# We allow 'setting' to the same status
http = httplib2.Http()
headers = {'X-Subject-Meta-Status': 'queued'}
response, content = http.request(path, 'PUT', headers=headers)
self.assertEqual(200, response.status)
response, content = http.request(path, 'HEAD')
self.assertEqual(200, response.status)
self.assertEqual('queued', response['x-subject-meta-status'])
# Make subject active
http = httplib2.Http()
headers = {'Content-Type': 'application/octet-stream'}
response, content = http.request(path, 'PUT', headers=headers,
body='data')
self.assertEqual(200, response.status)
subject = jsonutils.loads(content)['subject']
self.assertEqual('active', subject['status'])
# Ensure status of 'active' subject can't be changed
http = httplib2.Http()
headers = {'X-Subject-Meta-Status': 'queued'}
response, content = http.request(path, 'PUT', headers=headers)
self.assertEqual(403, response.status)
response, content = http.request(path, 'HEAD')
self.assertEqual(200, response.status)
self.assertEqual('active', response['x-subject-meta-status'])
# We allow 'setting' to the same status
http = httplib2.Http()
headers = {'X-Subject-Meta-Status': 'active'}
response, content = http.request(path, 'PUT', headers=headers)
self.assertEqual(200, response.status)
response, content = http.request(path, 'HEAD')
self.assertEqual(200, response.status)
self.assertEqual('active', response['x-subject-meta-status'])
# Create a 'queued' subject, ensure 'status' header is ignored
http = httplib2.Http()
path = "http://%s:%d/v1/subjects" % ("127.0.0.1", self.api_port)
headers = {'Content-Type': 'application/octet-stream',
'X-Subject-Meta-Status': 'active'}
response, content = http.request(path, 'POST', headers=headers,
body=None)
self.assertEqual(201, response.status)
subject = jsonutils.loads(content)['subject']
self.assertEqual('queued', subject['status'])
# Create an 'active' subject, ensure 'status' header is ignored
http = httplib2.Http()
path = "http://%s:%d/v1/subjects" % ("127.0.0.1", self.api_port)
headers = {'Content-Type': 'application/octet-stream',
'X-Subject-Meta-Disk-Format': 'raw',
'X-Subject-Meta-Status': 'queued',
'X-Subject-Meta-Container-Format': 'bare'}
response, content = http.request(path, 'POST', headers=headers,
body='data')
self.assertEqual(201, response.status)
subject = jsonutils.loads(content)['subject']
self.assertEqual('active', subject['status'])
self.stop_servers()
| [
"[email protected]"
]
| |
9121aa7623fa31fd8cad9ac6cd3485cb1656a44d | a36501f44a09ca03dd1167e1d7965f782e159097 | /app/modules/auth/params.py | c7dd1d359b51eb056962e44c9b871c1d299d8c4b | [
"Apache-2.0"
]
| permissive | ssfdust/full-stack-flask-smorest | 9429a2cdcaa3ff3538875cc74cff802765678d4b | 4f866b2264e224389c99bbbdb4521f4b0799b2a3 | refs/heads/master | 2023-08-05T08:48:03.474042 | 2023-05-07T01:08:20 | 2023-05-07T01:08:20 | 205,528,296 | 39 | 10 | Apache-2.0 | 2023-08-31T00:18:42 | 2019-08-31T10:12:25 | Python | UTF-8 | Python | false | false | 2,162 | py | # Copyright 2019 RedLotus <[email protected]>
# Author: RedLotus <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
app.modules.auth.params
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
用户验证参数模块
"""
from app.extensions import ma
from marshmallow import fields
class LoginParams(ma.Schema):
"""
登录用参数
:attr email: str 用户邮箱
:attr password: str 密码
:attr captcha: str 验证码
:attr token: str 验证码token
"""
email = fields.Str(required=True, allow_none=False, description="用户邮箱")
password = fields.Str(required=True, allow_none=False, description="密码")
captcha = fields.Str(required=True, allow_none=False, description="验证码")
token = fields.Str(required=True, allow_none=False, description="验证token")
class JwtParam(ma.Schema):
"""
Jwt的Token参数
:attr token: str Jwt token
"""
token = fields.Str(required=False, allow_none=False, description="token")
class PasswdParam(ma.Schema):
"""
验证密码
:attr password: str 原密码
:attr confirm_password: str 确认密码
"""
password = fields.Str(required=True, allow_none=False, description="密码")
confirm_password = fields.Str(required=True, allow_none=False, description="确认密码")
class EmailParam(ma.Schema):
"""
邮箱参数
:attr email: str 邮箱
"""
email = fields.Str(required=True, description="邮箱")
class CaptchaParam(ma.Schema):
"""
验证图片Token参数
:attr token: str 验证码token
"""
token = fields.Str(required=True, description="随机token")
| [
"[email protected]"
]
| |
a42609d3d57b7e0f3298e6dee88c7531e8b4df7b | 32c915adc51bdb5d2deab2a592d9f3ca7b7dc375 | /Chapter_11_programming_tasks/task_2.py | 57291c7a43269738ae347bef625ced59459b1aa2 | []
| no_license | nervig/Starting_Out_With_Python | 603c2b8c9686edcf92c1a90596d552b873fe6229 | d617ee479c7c77038331b5f262e00f59e8e90070 | refs/heads/master | 2023-02-25T07:14:12.685417 | 2021-02-02T18:45:00 | 2021-02-02T18:45:00 | 335,391,362 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | import employee
def main():
name_of_employee = input('Enter a name of employee: ')
number_of_employee = input('Enter a number of employee: ')
annual_salary = input('Enter an annual salary: ')
annual_bonus = input('Enter an annual bonus: ')
data_of_shift_supervisor = employee.ShiftSupervisor(name_of_employee, number_of_employee, annual_salary, annual_bonus)
print('The data of shift supervisor: ')
print('Name: ' + data_of_shift_supervisor.get_name_of_employee())
print('ID: ' + data_of_shift_supervisor.get_number_of_employee())
print('Annual salary: ' + data_of_shift_supervisor.get_annual_salary())
print('Annual bonus: ' + data_of_shift_supervisor.get_annual_bonus())
main() | [
"[email protected]"
]
| |
23e77f8d02e5d307347f08baca5d033626e01412 | 51b7b81cce1e8943926c531ad8763af8fd4074dc | /1260.py | 8280b9c478f211dddcdc27f39f47b057c9ca1dae | []
| no_license | goodsosbva/BOJ_Graph | f65598591b07ea2f637cba2644bdc81386afb36e | 34fe8bfec0543d9884869fe5ebbb536c6fcc3fbf | refs/heads/main | 2023-03-22T08:14:53.735351 | 2021-03-07T09:22:39 | 2021-03-07T09:22:39 | 338,587,428 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 790 | py | N, M, V = map(int, input().split())
matrix = [[0] * (N + 1) for i in range(N + 1)]
for i in range(M):
a, b = map(int, input().split())
matrix[a][b] = matrix[b][a] = 1
visit_list = [0] * (N + 1)
def dfs(V):
visit_list[V] = 1 # 방문한 점 1로 표시
print(V, end=' ')
for i in range(1, N + 1):
if (visit_list[i] == 0 and matrix[V][i] == 1):
dfs(i)
def bfs(V):
queue = [V] # 들려야 할 정점 저장
visit_list[V] = 0 # 방문한 점 0으로 표시
while queue:
V = queue.pop(0)
print(V, end=' ')
for i in range(1, N + 1):
if (visit_list[i] == 1 and matrix[V][i] == 1):
queue.append(i)
visit_list[i] = 0
dfs(V)
print()
bfs(V)
| [
"[email protected]"
]
| |
ed1a80133b79485d1c7d0125da7309754e321eea | d922b02070c11c19ba6104daa3a1544e27a06e40 | /DSA_Project/venv/Scripts/easy_install-3.8-script.py | d71594c04a3b333adb75b4777054c951680c802e | []
| no_license | viharivnv/DSA | 2ca393a8e304ee7b4d540ff435e832d94ee4b2a7 | 777c7281999ad99a0359c44291dddaa868a2525c | refs/heads/master | 2022-10-15T15:26:59.045698 | 2020-06-17T15:55:33 | 2020-06-17T15:55:33 | 273,020,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | #!C:\Users\vihar\PycharmProjects\DSA_Project\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.8'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.8')()
)
| [
"[email protected]"
]
| |
ef40ec48bf2a0cb2ff75da74ffa77734efd92e46 | 7e34f45c4c046f01764583b6317f85200ddf2bcf | /tests/settings.py | 6b203371768fca78fd5a3bcd4c863f83fbb1ae04 | [
"BSD-3-Clause"
]
| permissive | MarkusH/django-jellyglass | 3953de9fb840320db23a8b748df089da2aeb1013 | 2b7c8fcaac76f8833f2880b10f687552530a3ccb | refs/heads/master | 2021-01-18T15:09:08.904899 | 2018-12-03T16:41:06 | 2018-12-03T16:41:06 | 49,637,243 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,540 | py | ALLOWED_HOSTS = []
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ":memory:",
}
}
DEBUG = True
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'jellyglass.apps.JellyGlassConfig',
]
LANGUAGE_CODE = 'en-us'
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tests.urls'
SECRET_KEY = 'test-secret-key'
STATIC_URL = '/static/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
| [
"[email protected]"
]
| |
c6866ffcb6663df60970fd0041ee61d604f921a5 | 930c207e245c320b108e9699bbbb036260a36d6a | /BRICK-RDFAlchemy/generatedCode/brick/brickschema/org/schema/_1_0_2/Brick/Differential_Pressure_Load_Shed_Status.py | 4f44b20dc9c3108fb3505cc8f10804105b148f22 | []
| no_license | InnovationSE/BRICK-Generated-By-OLGA | 24d278f543471e1ce622f5f45d9e305790181fff | 7874dfa450a8a2b6a6f9927c0f91f9c7d2abd4d2 | refs/heads/master | 2021-07-01T14:13:11.302860 | 2017-09-21T12:44:17 | 2017-09-21T12:44:17 | 104,251,784 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | from rdflib import Namespace, Graph, Literal, RDF, URIRef
from rdfalchemy.rdfSubject import rdfSubject
from rdfalchemy import rdfSingle, rdfMultiple, rdfList
from brick.brickschema.org.schema._1_0_2.Brick.Load_Shed_Status import Load_Shed_Status
class Differential_Pressure_Load_Shed_Status(Load_Shed_Status):
rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').Differential_Pressure_Load_Shed_Status
| [
"[email protected]"
]
| |
032be5db944974a1f32618e9395669e88e00c17e | 5dfbfa153f22b3f58f8138f62edaeef30bad46d3 | /old_ws/build/catkin_generated/order_packages.py | e6748be72199deb25e033f5d33a964cf1bf10700 | []
| no_license | adubredu/rascapp_robot | f09e67626bd5a617a569c9a049504285cecdee98 | 29ace46657dd3a0a6736e086ff09daa29e9cf10f | refs/heads/master | 2022-01-19T07:52:58.511741 | 2019-04-01T19:22:48 | 2019-04-01T19:22:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | # generated from catkin/cmake/template/order_packages.context.py.in
source_root_dir = "/home/bill/ros_ws/src"
whitelisted_packages = "".split(';') if "" != "" else []
blacklisted_packages = "".split(';') if "" != "" else []
underlay_workspaces = "/home/bill/ros_ws/devel;/opt/ros/kinetic".split(';') if "/home/bill/ros_ws/devel;/opt/ros/kinetic" != "" else []
| [
"[email protected]"
]
| |
74c1f4bbb34d65beac68174e7c7ab0e18c3f36e6 | f63c4eb29ce57319441f5469d1d049b63bc220de | /swu_cycle_variance/run333.py | 96b5bc1ea46f182e5b80cfcab1a1271e253d1f12 | []
| no_license | a-co/diversion_models | 0237642153668b16035699e9e734ff0538568582 | 69eed2687b1cd2b48f5717d15919eccd24a0eabc | refs/heads/main | 2023-05-02T19:04:26.333677 | 2020-06-18T20:50:18 | 2020-06-18T20:50:18 | 216,904,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,361 | py | SIMULATION = {'simulation': {'agent': [{'name': 'deployer_military', 'prototype': 'military_deployer'}, {'name': 'deployer_civilian', 'prototype': 'civilian_deployer'}, {'name': 'deployer_shared', 'prototype': 'shared_deployer'}], 'archetypes': {'spec': [{'lib': 'cycamore', 'name': 'DeployInst'}, {'lib': 'cycamore', 'name': 'Source'}, {'lib': 'cycamore', 'name': 'Sink'}, {'lib': 'cycamore', 'name': 'Storage'}, {'lib': 'cycamore', 'name': 'Reactor'}, {'lib': 'cycamore', 'name': 'Separations'}, {'lib': 'cycamore', 'name': 'Enrichment'}]}, 'control': {'duration': '144', 'explicit_inventory': 'true', 'startmonth': '1', 'startyear': '2020'}, 'prototype': [{'config': {'Source': {'inventory_size': '1e30', 'outcommod': 'u_ore', 'outrecipe': 'r_u_ore', 'throughput': '1e10'}}, 'name': 'mine'}, {'config': {'Separations': {'feed_commod_prefs': {'val': ['1.0', '10.0', '100.0']}, 'feed_commods': {'val': ['u_ore', 'u_ore1', 'u_ore2']}, 'feedbuf_size': '2e8', 'leftover_commod': 'waste', 'streams': {'item': {'commod': 'u_nat', 'info': {'buf_size': '150000', 'efficiencies': {'item': [{'comp': 'U', 'eff': '.99'}, {'comp': 'O', 'eff': '.99'}]}}}}, 'throughput': '2e8'}}, 'name': 'milling'}, {'config': {'Separations': {'feed_commod_prefs': {'val': '1.0'}, 'feed_commods': {'val': 'u_nat'}, 'feedbuf_size': '200000', 'leftover_commod': 'waste', 'streams': {'item': {'commod': 'uf6', 'info': {'buf_size': '200000', 'efficiencies': {'item': {'comp': 'U', 'eff': '.99'}}}}}, 'throughput': '200000'}}, 'name': 'conversion'}, {'config': {'Enrichment': {'feed_commod_prefs': {'val': '1'}, 'feed_commods': {'val': 'uf6'}, 'feed_recipe': 'r_uox', 'max_feed_inventory': '20000', 'product_commod': 'mil_fiss', 'swu_capacity': '17314.83583510541', 'tails_assay': '0.003', 'tails_commod': 'mil_u_dep'}}, 'name': 'mil_enrichment'}, {'config': {'Storage': {'in_commods': {'val': 'mil_u_dep'}, 'out_commods': {'val': 'mil_u_dep_str'}, 'residence_time': '0'}}, 'name': 'mil_str_u_dep'}, {'config': {'Storage': {'in_commod_prefs': {'val': '1'}, 'in_commods': {'val': 'uf6'}, 'in_recipe': 'r_mil_uox', 'max_inv_size': '30000', 'out_commods': {'val': 'mil_uox'}, 'residence_time': '0'}}, 'name': 'mil_uox_fabrication'}, {'config': {'Reactor': {'assem_size': '14000', 'cycle_time': '-6', 'fuel_incommods': {'val': 'mil_uox'}, 'fuel_inrecipes': {'val': 'r_mil_uox'}, 'fuel_outcommods': {'val': 'mil_uox_spent'}, 'fuel_outrecipes': {'val': 'r_mil_uox_spent'}, 'fuel_prefs': {'val': '1'}, 'n_assem_batch': '1', 'n_assem_core': '1', 'power_cap': '0.15', 'refuel_time': '0'}}, 'lifetime': '960', 'name': 'mil_lwr'}, {'config': {'Storage': {'in_commods': {'val': 'mil_mox_spent'}, 'out_commods': {'val': 'mil_mox_spent_str'}, 'residence_time': '60'}}, 'name': 'mil_str_mox_spent'}, {'config': {'Separations': {'feed_commod_prefs': {'val': '1.0'}, 'feed_commods': {'val': 'mil_uox_spent'}, 'feedbuf_size': '30000000000', 'leftover_commod': 'waste', 'streams': {'item': {'commod': 'mil_fiss', 'info': {'buf_size': '3000000000', 'efficiencies': {'item': {'comp': 'Pu', 'eff': '.95'}}}}}, 'throughput': '1e100'}}, 'name': 'reprocessing'}, {'config': {'Storage': {'in_commod_prefs': {'val': '10'}, 'in_commods': {'val': 'mil_fiss'}, 'in_recipe': 'r_mil_heu', 'max_inv_size': '1e100', 'out_commods': {'val': 'mil_heu'}, 'residence_time': '0'}}, 'name': 'mil_str_fiss'}, {'config': {'Enrichment': {'feed_commod_prefs': {'val': ['1', '20']}, 'feed_commods': {'val': ['uf6', 'mil_uf6']}, 'feed_recipe': 'r_natl_u', 'max_feed_inventory': '100000', 'product_commod': 'civ_leu', 'swu_capacity': '35000', 'tails_assay': '0.003', 'tails_commod': 'u_dep'}}, 'name': 'civ_enrichment'}, {'config': {'Storage': {'in_commods': {'val': 'u_dep'}, 'out_commods': {'val': 'u_dep_str'}, 'residence_time': '0'}}, 'name': 'civ_str_u_dep'}, {'config': {'Storage': {'in_commod_prefs': {'val': '1000'}, 'in_commods': {'val': 'civ_leu'}, 'in_recipe': 'r_uox', 'max_inv_size': '30000', 'out_commods': {'val': 'uox'}, 'residence_time': '1'}}, 'name': 'civ_fabrication'}, {'config': {'Reactor': {'assem_size': '29565', 'cycle_time': '18', 'fuel_incommods': {'val': 'uox'}, 'fuel_inrecipes': {'val': 'r_uox'}, 'fuel_outcommods': {'val': 'uox_spent'}, 'fuel_outrecipes': {'val': 'r_uox_spent'}, 'n_assem_batch': '1', 'n_assem_core': '3', 'power_cap': '900', 'refuel_time': '0'}}, 'lifetime': '960', 'name': 'civ_lwr'}, {'config': {'Storage': {'in_commods': {'val': 'uox_spent'}, 'out_commods': {'val': 'uox_spent_str'}, 'residence_time': '60'}}, 'name': 'civ_str_uox_spent'}, {'config': {'DeployInst': {'build_times': {'val': ['37', '37', '61', '73']}, 'n_build': {'val': ['1', '1', '1', '1']}, 'prototypes': {'val': ['mil_enrichment', 'mil_str_u_dep', 'mil_uox_fabrication', 'mil_str_fiss']}}}, 'name': 'military_deployer'}, {'config': {'DeployInst': {'build_times': {'val': ['121', '121', '121', '145', '157', '169']}, 'n_build': {'val': ['1', '1', '1', '1', '1', '1']}, 'prototypes': {'val': ['civ_enrichment', 'civ_str_u_dep', 'civ_fabrication', 'civ_lwr', 'civ_str_uox_spent', 'civ_lwr']}}}, 'name': 'civilian_deployer'}, {'config': {'DeployInst': {'build_times': {'val': ['1', '1', '1']}, 'n_build': {'val': ['1', '1', '1']}, 'prototypes': {'val': ['mine', 'milling', 'conversion']}}}, 'name': 'shared_deployer'}], 'recipe': [{'basis': 'mass', 'name': 'r_u_ore', 'nuclide': [{'comp': '0.0071', 'id': '922350000'}, {'comp': '0.9929', 'id': '922380000'}, {'comp': '999', 'id': '120240000'}]}, {'basis': 'mass', 'name': 'r_natl_u', 'nuclide': [{'comp': '0.0071', 'id': '922350000'}, {'comp': '0.9929', 'id': '922380000'}]}, {'basis': 'mass', 'name': 'r_uox', 'nuclide': [{'comp': '0.05', 'id': '922350000'}, {'comp': '0.95', 'id': '922380000'}]}, {'basis': 'mass', 'name': 'r_uox_spent', 'nuclide': [{'comp': '0.01', 'id': '922350000'}, {'comp': '0.94', 'id': '922380000'}, {'comp': '0.01', 'id': '942390000'}, {'comp': '0.001', 'id': '952410000'}, {'comp': '0.03', 'id': '551350000'}]}, {'basis': 'mass', 'name': 'r_mil_uox', 'nuclide': [{'comp': '0.0071', 'id': '922350000'}, {'comp': '0.9929', 'id': '922380000'}]}, {'basis': 'mass', 'name': 'r_mil_uox_spent', 'nuclide': [{'comp': '0.0071', 'id': '922350000'}, {'comp': '0.9919', 'id': '922380000'}, {'comp': '0.001', 'id': '942390000'}]}, {'basis': 'mass', 'name': 'r_mil_heu', 'nuclide': [{'comp': '0.90', 'id': '922350000'}, {'comp': '0.10', 'id': '922380000'}]}]}} | [
"[email protected]"
]
| |
d57bafa6b041e14b363221f5424fcc938e2a081a | 4d21da5a3d07f4d05b997e80119cd79692ac0d25 | /Leetcode/201-300/259. 3Sum Smaller.py | fc6828a904244248c20e44b9f93c23460bea2b66 | []
| no_license | ErinC123/Algorithm | 92b2789ec3b36c49f9e65f2e7a702bb4b732e8ba | 4544fee91e811a6625000921c32ad054df550f1e | refs/heads/master | 2021-06-17T14:03:33.955233 | 2017-06-18T21:20:55 | 2017-06-18T21:20:55 | 75,894,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | class Solution(object):
def threeSumSmaller(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
nums.sort()
ret = 0
for i in range(len(nums)):
j, k = i+1, len(nums)-1
while j < k:
if nums[i]+nums[j]+nums[k] < target:
ret += k-j
j += 1
else:
k -= 1
return ret | [
"[email protected]"
]
| |
7935285b6302c1b7277f4c9d4939535c9636fe0d | 159da3fc63ccf20b80dc17bb44b53e9a5578bcfd | /arkav_is_api/arkavauth/migrations/0005_refactor_auth.py | f92058d0d724b71e6dab65b111c6f5e8e2a4a7d7 | [
"MIT"
]
| permissive | arkavidia5/arkav-is | 4338829e7c0a9446393545316e46395e9df111fd | 6c6e8d091ead5bfff664d86f7903c62209800031 | refs/heads/master | 2021-07-16T03:49:15.900812 | 2019-02-08T18:08:32 | 2019-02-08T18:08:32 | 149,406,261 | 3 | 2 | MIT | 2018-11-09T16:49:17 | 2018-09-19T06:58:16 | Python | UTF-8 | Python | false | false | 2,458 | py | import arkav_is_api.arkavauth.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('arkavauth', '0004_user_email_confirmed'),
]
operations = [
migrations.RenameModel('EmailConfirmationAttempt', 'RegistrationConfirmationAttempt'),
migrations.RenameModel('PasswordResetAttempt', 'PasswordResetConfirmationAttempt'),
migrations.RenameField(
model_name='user',
old_name='email_confirmed',
new_name='is_email_confirmed',
),
migrations.RenameField(
model_name='passwordresetconfirmationattempt',
old_name='used',
new_name='is_confirmed',
),
migrations.RenameField(
model_name='registrationconfirmationattempt',
old_name='confirmed',
new_name='is_confirmed',
),
migrations.AddField(
model_name='passwordresetconfirmationattempt',
name='email_last_sent_at',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='registrationconfirmationattempt',
name='email_last_sent_at',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='passwordresetconfirmationattempt',
name='token',
field=models.CharField(default=arkav_is_api.arkavauth.models.generate_email_confirmation_token, max_length=30),
),
migrations.AlterField(
model_name='passwordresetconfirmationattempt',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='password_reset_confirmation_attempt', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='registrationconfirmationattempt',
name='token',
field=models.CharField(default=arkav_is_api.arkavauth.models.generate_email_confirmation_token, max_length=30),
),
migrations.AlterField(
model_name='registrationconfirmationattempt',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='registration_confirmation_attempt', to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
]
| |
32deaed41a4e6581445f42876563cf802299ebe7 | da7149f3182d2421e046828d30fc1c91c14d496d | /chapter16/coro_exc_demo.py | c9aa826582f4e4609e85504e132a7eb87f93559b | []
| no_license | tangkaiyang/fluent_python | 6db2825cfadccb70a886cb822026d69be4b03cc9 | 5f07072d8db5ddf43bfe913b3262b325a8f1ad35 | refs/heads/master | 2020-05-02T20:21:00.404872 | 2019-04-18T02:35:55 | 2019-04-18T02:35:55 | 178,188,495 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,724 | py | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2019/4/12 16:18
# @Author : tangky
# @Site :
# @File : coro_exc_demo.py
# @Software : PyCharm
# 示例16-8 coro_exc_demo.py:学习在协程中处理异常的测试代码
class DemoException(Exception):
"""为这次演示定义的异常类型"""
def demo_exc_handling():
print('-> coroutine started')
while True:
try:
x = yield
except DemoException: # 特别处理DemoException异常
print('*** DemoExceptiion handled. Continuing...')
else: # 如果没有异常,那么显示接受到的值
print('-> coroutine received: {!r}'.format(x))
raise RuntimeError('This line should never run.') # 这一行永远不会执行
# 示例16-8 中的最后一行代码不会执行,因为只有未处理的异常才会中止那个无限循环,而一旦出现未处理的异常,协程会立即终止
# 示例16-9 激活和关闭demo_exc_handling,没有异常
exc_coro = demo_exc_handling()
next(exc_coro)
exc_coro.send(11)
exc_coro.send(22)
exc_coro.close()
from inspect import getgeneratorstate
print(getgeneratorstate(exc_coro))
# 示例16-10 把DemoException异常传入demo_exc_handling不会导致协程中止
exc_coro = demo_exc_handling()
next(exc_coro)
exc_coro.send(11)
exc_coro.throw(DemoException)
print(getgeneratorstate(exc_coro))
# 如果传入协程的异常没有处理,协程会停止,即状态变成'GEN_CLOSED'
# 示例16-11 如果无法处理传入的异常,协程会终止
exc_coro = demo_exc_handling()
next(exc_coro)
exc_coro.send(11)
try:
exc_coro.throw(ZeroDivisionError)
except Exception:
print(ZeroDivisionError)
print(getgeneratorstate(exc_coro))
| [
"[email protected]"
]
| |
05f1381ac472766b4cd06fbd8153c99cc502c0e1 | 84d891b6cb6e1e0d8c5f3e285933bf390e808946 | /Demo/PO_V6/TestCases/test_login_pytest.py | 174dcfab88ddcee38ccb2eb6b5ea51c6f4e0d99d | []
| no_license | zzlzy1989/web_auto_test | 4df71a274eb781e609de1067664264402c49737e | 3e20a55836144e806496e99870f5e8e13a85bb93 | refs/heads/master | 2020-05-24T10:37:29.709375 | 2019-10-28T06:14:31 | 2019-10-28T06:14:31 | 187,230,775 | 2 | 0 | null | 2019-06-20T11:06:32 | 2019-05-17T14:29:11 | null | UTF-8 | Python | false | false | 2,400 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Name: test_login_pytest
# Author: 简
# Time: 2019/6/20
from Demo.PO_V6.PageObjects.login_page import LoginPage
from Demo.PO_V6.PageObjects.index_page import IndexPage
from Demo.PO_V6.TestDatas import login_datas as ld
from Demo.PO_V6.TestDatas import Comm_Datas as cd
import pytest
# pytestmark = pytest.mark.model # 模块级别的标签名
@pytest.mark.demo
@pytest.mark.usefixtures("session_action")
def test_demo():
print("111111111111111")
@pytest.mark.parametrize("a,b,c",[(1,3,4),(10,35,45),(22.22,22.22,44.44)])
def test_add(a,b,c):
res = a + b
assert res == c
# 用例三步曲:前置 、步骤 、 断言
# @ddt.ddt
# @pytest.mark.login # 整个TestLogin类里面,所有测试用例都有login标签。
@pytest.mark.usefixtures("open_url") # 使用函数名称为open_url的fixture
@pytest.mark.usefixtures("refresh_page")
class TestLogin:
pytestmark=pytest.mark.login # 整个TestLogin类里面,所有测试用例都有
# 异常用例 -....
@pytest.mark.parametrize("data", ld.wrong_datas)
def test_login_0_failed_by_wrong_datas(self, data):
# 步骤 - 登陆操作 - 登陆页面 - 密码为空 18684720553
LoginPage(self.driver).login(data["user"], data["passwd"])
# 断言 - 页面的提示内容为:请输入密码
self.assertEqual(data["check"], LoginPage(self.driver).get_error_msg_from_loginForm())
# 正常用例 - 登陆+首页
@pytest.mark.smoke
def test_login_2_success(self,open_url): # open_url = driver
# logging.info("用例1-正常场景-登陆成功-使用到测试数据-")
# 步骤 - 登陆操作 - 登陆页面 - 18684720553、python
LoginPage(open_url).login(ld.success_data["user"],ld.success_data["passwd"]) # 测试对象+测试数据
# 断言 - 页面是否存在 我的帐户 元素 元素定位+元素操作
assert IndexPage(open_url).check_nick_name_exists() == True # 测试对象+测试数据
# url跳转
assert open_url.current_url == ld.success_data["check"] # 测试对象+测试数据 # # 正常用例 - 登陆+首页
class TestTT:
pytestmark = pytest.mark.demo
# pytestmark = [pytest.mark.demo,pytest.mark.demo2]
def test_add(self):
c = 100 +200
assert c == 300
def test_demo(self):
print("demo!!!") | [
"[email protected]"
]
| |
cdd5a31a1454daea675c492521e6a22eed8d06bc | 8997a0bf1e3b6efe5dd9d5f307e1459f15501f5a | /unistream_examples/get_agent_point_info.py | 0278d0f98ae5b4b9aca91d42c4b6f9d5fe4f01f8 | [
"CC-BY-4.0"
]
| permissive | stepik/SimplePyScripts | 01092eb1b2c1c33756427abb2debbd0c0abf533f | 3259d88cb58b650549080d6f63b15910ae7e4779 | refs/heads/master | 2023-05-15T17:35:55.743164 | 2021-06-11T22:59:07 | 2021-06-11T22:59:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,039 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
"""
Получение информации о точке предоставления услуги.
GET https://test.api.unistream.com/v1/agents/{agentId}/poses/{posId}
"""
if __name__ == '__main__':
from utils import get_today_RFC1123_date, get_authorization_header
from config import APPLICATION_ID, SECRET
# Идентификатор партнера
AGENT_ID = -1
# Идентификатор точки предоставления услуги
POS_ID = -1
params = {
'agentId': AGENT_ID,
'posId': POS_ID,
}
URL = 'https://test.api.unistream.com/v1/agents/{agentId}/poses/{posId}'.format(**params)
TODAY_DATE = get_today_RFC1123_date()
headers = dict()
headers['Date'] = TODAY_DATE
headers['Authorization'] = get_authorization_header(APPLICATION_ID, SECRET, TODAY_DATE, URL, headers)
import requests
rs = requests.get(URL, headers=headers)
print(rs)
print(rs.text)
| [
"[email protected]"
]
| |
fcdcbeba752542d4e128ddebf54c68d5df123be8 | 385c01f7337cf5031093147f6731251bfbf17430 | /lms/level/containers/get_by_id.py | d7e05c88e2f38349997208a7d052eb38bf54862a | []
| no_license | lucassimon/lmswebaula | 23a73d6d2d43c78a2f9e3b552113cf50a11a3587 | 671276426685968458f240faa93b313427fa32d9 | refs/heads/master | 2021-01-19T13:26:12.352308 | 2017-08-16T21:07:43 | 2017-08-16T21:07:43 | 88,088,474 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import six
class GetByIdRQ(object):
_level_id = None
_lms_level_id = None
def __init__(self, lms_level_id=None, level_id=None):
if lms_level_id:
if not isinstance(lms_level_id, six.integer_types):
raise ValueError(
'O lms id do nivel precisa ser um inteiro'
)
self._lms_level_id = lms_level_id
if level_id:
self._level_id = level_id
@property
def lms_level_id(self):
return self._lms_level_id
@lms_level_id.setter
def lms_level_id(self, value):
if not isinstance(value, six.integer_types):
raise ValueError(
'O lms id do nível precisa ser um inteiro'
)
self._lms_level_id = value
@property
def level_id(self):
return self._level_id
@level_id.setter
def level_id(self, value):
self._level_id = value
| [
"[email protected]"
]
| |
f68e80676e72be2e4597cabb98f6e8312c69fc60 | d9cd697f76565e8230a98909204a5c516437f977 | /tutorial/tutorial/settings.py | 7b7807db8743db25310455fe110dcac0eed68dba | []
| no_license | huazhicai/webspider | be20d0d3a248ef8cbfaab8e3d1fd0e8ac7551352 | a1defa3778956accbb7617c9a3798d02e0b175f6 | refs/heads/master | 2020-03-22T09:00:23.518744 | 2019-07-11T14:53:37 | 2019-07-11T14:53:37 | 139,807,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,207 | py | # -*- coding: utf-8 -*-
# Scrapy settings for tutorial project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'tutorial'
SPIDER_MODULES = ['tutorial.spiders']
NEWSPIDER_MODULE = 'tutorial.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'tutorial (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'tutorial.middlewares.TutorialSpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# 'tutorial.middlewares.TutorialDownloaderMiddleware': 543,
# }
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'tutorial.pipelines.TextPipeline': 300,
'tutorial.pipelines.MongoPipeline': 400,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
MONGO_URI = '192.168.11.138'
MONGO_DB = 'tutorial'
| [
"[email protected]"
]
| |
01d3691dce55255c364bb881f05bb97a3c770ca9 | 5982cd8db693927e83cd99f8ea1acf4fc90b8b9b | /Configurations/ControlRegions/WgS/torqueBatch/configuration1.py | 7d9ce4b429ce76b6bac43226395ae897e4ab9636 | []
| no_license | cedricpri/PlotsConfigurations | 61fc78ce9f081fd910a25f8101ea8150a7312f25 | 5cb0a87a17f89ea89003508a87487f91736e06f4 | refs/heads/master | 2021-01-17T09:46:55.026779 | 2016-09-01T09:30:09 | 2016-09-01T09:30:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | # example of configuration file
tag = 'WgS'
# used by mkShape to define output directory for root files
outputDir = 'rootFile1'
# file with list of variables
variablesFile = '../variables.py'
# file with list of cuts
cutsFile = '../cuts.py'
# file with list of samples
samplesFile = 'samples1.py'
# file with list of samples
plotFile = 'plot.py'
# luminosity to normalize to (in 1/fb)
# lumi = 2.264
#lumi = 2.318
lumi = 2.6
# used by mkPlot to define output directory for plots
# different from "outputDir" to do things more tidy
outputDirPlots = 'plotWgS'
# used by mkDatacards to define output directory for datacards
outputDirDatacard = 'datacards'
# structure file for datacard
structureFile = 'structure.py'
# nuisances file for mkDatacards and for mkShape
nuisancesFile = 'nuisances.py'
| [
"[email protected]"
]
| |
ae734d529bcbe273e29551f3ccd8c250513c04ad | f0aba1aa9949cc6a8d3678c0b3ecb5503b470c17 | /dtc/__init__.py | d9f20e7ca90c628c2cccd70a6d6bceb53e2bd4f4 | []
| no_license | hugosenari/dtc | 788eafc1a92701332ae54e2f2d74491566d635dd | 9bb2e6f4f9180b7291a5daf6a35903e5c59e3fc4 | refs/heads/master | 2020-12-24T17:44:45.474422 | 2012-08-03T20:02:28 | 2012-08-03T20:02:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,477 | py | '''
Created on Jun 30, 2012
@author: hugosenari
'''
import plugnplay
import logging
from os import path
from dtc.core import interfaces
from dtc.core.interfaces.module import _CoreModule
from dtc.core.interfaces.mainloop import MainLoop
from dtc import modules
class Dtc(object):
def __init__(self, dirs = []):
self.loaded_dirs = []
def set_plugin_dirs(arg, dirpath, files):
logging.debug('add dir: %s to path', dirpath)
self.loaded_dirs.append(dirpath)
path.walk(interfaces.__path__[0], set_plugin_dirs, None)
path.walk(modules.__path__[0], set_plugin_dirs, None)
for directory in dirs:
path.walk(directory, set_plugin_dirs, None)
plugnplay.set_plugin_dirs(*self.loaded_dirs)
logging.debug('Set up plugnplay')
def run(self, logger=logging, *args, **vargs):
logging.debug('load modules')
plugnplay.load_plugins(logger)
#get mainloop implementation
mainloop = vargs.get('mainloop', None)
if not mainloop:
loops = MainLoop.implementors()
if len(loops) > 0:
mainloop = loops[0]
vargs['mainloop'] = mainloop
for module in _CoreModule.implementors():
logging.debug('execute core module: %s', module)
module.execute_modules(*args, **vargs)
if mainloop:
mainloop.run() | [
"[email protected]"
]
| |
6da75afc662601dd4bc0b2aaf0413dede6a4ac94 | 6df18031547b1fde808944b4c8f83d2766c95251 | /UoM_databases_with_python/assignment_2/tracks.py | 094f7bf6650220142fd1d777f5317ba3710277e3 | []
| no_license | skreynolds/UoM_data_science | 6edce9b3d3bf03b6dab6471346e40965464d6adb | 9636c0a784079445f585b830a1d093acea608d6a | refs/heads/master | 2020-05-20T23:06:36.560299 | 2019-06-01T02:30:09 | 2019-06-01T02:30:09 | 185,794,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,719 | py | import xml.etree.ElementTree as ET
import sqlite3
conn = sqlite3.connect('../databases/sqldb_5.sqlite')
cur = conn.cursor()
# Make some fresh tables using executescript()
cur.executescript('''
DROP TABLE IF EXISTS Artist;
DROP TABLE IF EXISTS Genre;
DROP TABLE IF EXISTS Album;
DROP TABLE IF EXISTS Track;
CREATE TABLE Artist (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT UNIQUE
);
CREATE TABLE Genre (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT UNIQUE
);
CREATE TABLE Album (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
artist_id INTEGER,
title TEXT UNIQUE
);
CREATE TABLE Track (
id INTEGER NOT NULL PRIMARY KEY
AUTOINCREMENT UNIQUE,
title TEXT UNIQUE,
album_id INTEGER,
genre_id INTEGER,
len INTEGER, rating INTEGER, count INTEGER
);
''')
fname = input('Enter file name: ')
if ( len(fname) < 1 ) : fname = '../raw_data/Library.xml'
# <key>Track ID</key><integer>369</integer>
# <key>Name</key><string>Another One Bites The Dust</string>
# <key>Artist</key><string>Queen</string>
def lookup(d, key):
found = False
for child in d:
if found : return child.text
if child.tag == 'key' and child.text == key :
found = True
return None
stuff = ET.parse(fname)
all = stuff.findall('dict/dict/dict')
print('Dict count:', len(all))
for entry in all:
if ( lookup(entry, 'Track ID') is None ) : continue
name = lookup(entry, 'Name')
artist = lookup(entry, 'Artist')
album = lookup(entry, 'Album')
genre = lookup(entry, 'Genre')
count = lookup(entry, 'Play Count')
rating = lookup(entry, 'Rating')
length = lookup(entry, 'Total Time')
if name is None or artist is None or album is None or genre is None :
continue
print(name, artist, album, genre, count, rating, length)
cur.execute('''INSERT OR IGNORE INTO Artist (name)
VALUES ( ? )''', ( artist, ) )
cur.execute('SELECT id FROM Artist WHERE name = ? ', (artist, ))
artist_id = cur.fetchone()[0]
cur.execute('''INSERT OR IGNORE INTO Genre (name)
VALUES ( ? )''', (genre, ) )
cur.execute('SELECT id FROM Genre WHERE name = ?', (genre, ))
genre_id = cur.fetchone()[0]
cur.execute('''INSERT OR IGNORE INTO Album (title, artist_id)
VALUES ( ?, ? )''', ( album, artist_id ) )
cur.execute('SELECT id FROM Album WHERE title = ? ', (album, ))
album_id = cur.fetchone()[0]
cur.execute('''INSERT OR REPLACE INTO Track
(title, album_id, genre_id, len, rating, count)
VALUES ( ?, ?, ?, ?, ?, ? )''',
( name, album_id, genre_id, length, rating, count ) )
conn.commit()
| [
"[email protected]"
]
| |
3e108d215330ee3c14ab7f7957e3cbc55dfcb5f9 | aecad2b0e89d72aca6c80bf63c424ee7904257ce | /pending_deletes/named_entity_recognition/NER_co_occurrence.py | deeeaa377fae611e679786374f32a94ecd4dcd2c | []
| no_license | humlab/text_analytic_tools | fdf4ba814263672b05ec188aac9a059b55d085b6 | 32fc444ed11649a948a7bf59653ec792396f06e3 | refs/heads/master | 2022-03-02T06:56:29.223039 | 2019-10-28T13:06:49 | 2019-10-28T13:06:49 | 74,679,680 | 2 | 1 | null | 2019-10-26T21:33:23 | 2016-11-24T14:19:46 | Python | UTF-8 | Python | false | false | 2,705 | py |
import pandas as pd
# %%
writer = pd.ExcelWriter('C:\\TEMP\\papacy.xlsx')
# %%
#pope = 'benedict-xvi'
#pope = 'francesco'
pope = 'john-paul-ii'
#df = pd.read_excel('./Data/' + pope + '.xlsx', 'Data')
df = pd.read_excel('C:\\Users\\roma0050\\Documents\\Projects\\papacy_scraper\\data\\' + pope + '.xlsx', 'Data', dtype={'Concept': 'str'})
# %%
df_locations = df.loc[(df.Classifier=='LOCATION')]
# %%
df_place_occurrences_counts = df_locations.groupby(['Document', 'Year', 'Genre', 'Concept'])[['Count']].sum().reset_index()
df_place_occurrences_counts.columns = ['Document', 'Year', 'Genre', 'Concept', 'PlaceOccurenceCount']
df_place_distinct_counts = df_locations.groupby(['Document', 'Year', 'Genre'])[['Count']].sum().reset_index()
df_place_distinct_counts.columns = ['Document', 'Year', 'Genre', 'PlaceCount']
# %%
df_place_counts = pd.merge(df_place_distinct_counts, df_place_occurrences_counts, left_on="Document", right_on="Document")[['Document', 'Year_x', 'Concept', 'PlaceOccurenceCount', 'PlaceCount']]
df_place_counts.columns = ['Document', 'Year', 'Concept', 'PlaceOccurenceCount', 'PlaceCount']
df_place_counts['Weight'] = df_place_counts['PlaceOccurenceCount'] / df_place_counts['PlaceCount']
# %%
#df_place_counts.loc[(df_place_counts.Document=='benedict-xvi_en_travels_2008_trav-ben-xvi-usa-program-20080415')]
df_place_cooccurrence_document = pd.merge(df_place_counts,
df_place_counts,
left_on=["Document", "Year"],
right_on=["Document", "Year"])[[ 'Document', 'Year', 'Concept_x', 'Concept_y', 'Weight_x', 'Weight_y' ]]
# %%
df_place_cooccurrence_document['Weight'] = df_place_cooccurrence_document['Weight_x'] * df_place_cooccurrence_document['Weight_y']
# Note: Concept had set as string to allow for comparison below, i.e. to use '<'
df_place_cooccurrence_document = df_place_cooccurrence_document.loc[(df_place_cooccurrence_document.Concept_x < df_place_cooccurrence_document.Concept_y)]
df_place_cooccurrence_document = df_place_cooccurrence_document [['Document', 'Year', 'Concept_x', 'Concept_y', 'Weight']]
# %%
df_place_cooccurrence_document.to_excel(writer, pope + '_cooc_doc')
# %%
df_place_cooccurrence_document = df_place_cooccurrence_document.set_index(['Concept_x', 'Concept_y'])
# %%
df_place_cooccurrence_corpus = df_place_cooccurrence_document.groupby(['Concept_x', 'Concept_y'])[['Weight']].sum().reset_index()
# %%
#df_place_cooccurrence_corpus = df_place_cooccurrence_document [['Document', 'Year', 'Concept_x', 'Concept_y', 'Weight']]
df_place_cooccurrence_corpus.to_excel(writer, pope + '_cooc_corpus')
#%%
writer.save() | [
"[email protected]"
]
| |
f1b9d705860e3e5f69e290b188025d10c52789f1 | c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce | /flask/flaskenv/Lib/site-packages/tensorflow/python/autograph/core/unsupported_features_checker.py | 9ecab32e6c5f01db35c77233cc55c757a9f80212 | []
| no_license | AhsonAslam/webapi | 54cf7466aac4685da1105f9fb84c686e38f92121 | 1b2bfa4614e7afdc57c9210b0674506ea70b20b5 | refs/heads/master | 2020-07-27T06:05:36.057953 | 2019-09-17T06:35:33 | 2019-09-17T06:35:33 | 208,895,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:d0644b74e1e6d4d41084b1c1d32d62fc2a1adb15cd7c6141bd2a62448c182854
size 1815
| [
"github@cuba12345"
]
| github@cuba12345 |
f954afca286ead0f30eadda260fb7ed77017edd1 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/FJZJJMM/YW_FJZJJMM_SZSJ_258.py | e1df1921dc57e77398bc293709c609467ced5724 | []
| no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,338 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
from SqlData_Transfer import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from env_restart import *
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_FJZJJMM_SZSJ_258(xtp_test_case):
def setUp(self):
sql_transfer = SqlData_Transfer()
sql_transfer.transfer_fund_asset('YW_FJZJJMM_SZSJ_258')
clear_data_and_restart_sz()
Api.trade.Logout()
Api.trade.Login()
def test_YW_FJZJJMM_SZSJ_258(self):
title = '可用资金正好-深A本方最优买(可用资金=下单金额+费用)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': ['未成交','全成','部成'][trade_type],
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('151133', '2', '24', '2', '0', 'B', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':trade_type + 1,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_BUY'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_FORWARD_BEST'],
'price': stkparm['涨停价'],
'quantity': 200,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 5
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
bf342befc93f6e874f5a82c83db670ea0dcd7f9b | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/numpy/2016/8/system_info.py | 014223b745ac3bc735add615d462bac5093d83af | []
| no_license | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 84,885 | py | #!/bin/env python
"""
This file defines a set of system_info classes for getting
information about various resources (libraries, library directories,
include directories, etc.) in the system. Currently, the following
classes are available:
atlas_info
atlas_threads_info
atlas_blas_info
atlas_blas_threads_info
lapack_atlas_info
lapack_atlas_threads_info
atlas_3_10_info
atlas_3_10_threads_info
atlas_3_10_blas_info,
atlas_3_10_blas_threads_info,
lapack_atlas_3_10_info
lapack_atlas_3_10_threads_info
blas_info
lapack_info
openblas_info
blis_info
blas_opt_info # usage recommended
lapack_opt_info # usage recommended
fftw_info,dfftw_info,sfftw_info
fftw_threads_info,dfftw_threads_info,sfftw_threads_info
djbfft_info
x11_info
lapack_src_info
blas_src_info
numpy_info
numarray_info
numpy_info
boost_python_info
agg2_info
wx_info
gdk_pixbuf_xlib_2_info
gdk_pixbuf_2_info
gdk_x11_2_info
gtkp_x11_2_info
gtkp_2_info
xft_info
freetype2_info
umfpack_info
Usage:
info_dict = get_info(<name>)
where <name> is a string 'atlas','x11','fftw','lapack','blas',
'lapack_src', 'blas_src', etc. For a complete list of allowed names,
see the definition of get_info() function below.
Returned info_dict is a dictionary which is compatible with
distutils.setup keyword arguments. If info_dict == {}, then the
asked resource is not available (system_info could not find it).
Several *_info classes specify an environment variable to specify
the locations of software. When setting the corresponding environment
variable to 'None' then the software will be ignored, even when it
is available in system.
Global parameters:
system_info.search_static_first - search static libraries (.a)
in precedence to shared ones (.so, .sl) if enabled.
system_info.verbosity - output the results to stdout if enabled.
The file 'site.cfg' is looked for in
1) Directory of main setup.py file being run.
2) Home directory of user running the setup.py file as ~/.numpy-site.cfg
3) System wide directory (location of this file...)
The first one found is used to get system configuration options The
format is that used by ConfigParser (i.e., Windows .INI style). The
section ALL has options that are the default for each section. The
available sections are fftw, atlas, and x11. Appropriate defaults are
used if nothing is specified.
The order of finding the locations of resources is the following:
1. environment variable
2. section in site.cfg
3. ALL section in site.cfg
Only the first complete match is returned.
Example:
----------
[ALL]
library_dirs = /usr/lib:/usr/local/lib:/opt/lib
include_dirs = /usr/include:/usr/local/include:/opt/include
src_dirs = /usr/local/src:/opt/src
# search static libraries (.a) in preference to shared ones (.so)
search_static_first = 0
[fftw]
fftw_libs = rfftw, fftw
fftw_opt_libs = rfftw_threaded, fftw_threaded
# if the above aren't found, look for {s,d}fftw_libs and {s,d}fftw_opt_libs
[atlas]
library_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas
# for overriding the names of the atlas libraries
atlas_libs = lapack, f77blas, cblas, atlas
[x11]
library_dirs = /usr/X11R6/lib
include_dirs = /usr/X11R6/include
----------
Authors:
Pearu Peterson <[email protected]>, February 2002
David M. Cooke <[email protected]>, April 2002
Copyright 2002 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
"""
from __future__ import division, absolute_import, print_function
import sys
import os
import re
import copy
import warnings
from glob import glob
from functools import reduce
if sys.version_info[0] < 3:
from ConfigParser import NoOptionError
from ConfigParser import RawConfigParser as ConfigParser
else:
from configparser import NoOptionError
from configparser import RawConfigParser as ConfigParser
# It seems that some people are importing ConfigParser from here so is
# good to keep its class name. Use of RawConfigParser is needed in
# order to be able to load path names with percent in them, like
# `feature%2Fcool` which is common on git flow branch names.
from distutils.errors import DistutilsError
from distutils.dist import Distribution
import distutils.sysconfig
from distutils import log
from distutils.util import get_platform
from numpy.distutils.exec_command import \
find_executable, exec_command, get_pythonexe
from numpy.distutils.misc_util import is_sequence, is_string, \
get_shared_lib_extension
from numpy.distutils.command.config import config as cmd_config
from numpy.distutils.compat import get_exception
import distutils.ccompiler
import tempfile
import shutil
# Determine number of bits
import platform
_bits = {'32bit': 32, '64bit': 64}
platform_bits = _bits[platform.architecture()[0]]
def libpaths(paths, bits):
"""Return a list of library paths valid on 32 or 64 bit systems.
Inputs:
paths : sequence
A sequence of strings (typically paths)
bits : int
An integer, the only valid values are 32 or 64. A ValueError exception
is raised otherwise.
Examples:
Consider a list of directories
>>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib']
For a 32-bit platform, this is already valid:
>>> np.distutils.system_info.libpaths(paths,32)
['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib']
On 64 bits, we prepend the '64' postfix
>>> np.distutils.system_info.libpaths(paths,64)
['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib',
'/usr/lib64', '/usr/lib']
"""
if bits not in (32, 64):
raise ValueError("Invalid bit size in libpaths: 32 or 64 only")
# Handle 32bit case
if bits == 32:
return paths
# Handle 64bit case
out = []
for p in paths:
out.extend([p + '64', p])
return out
if sys.platform == 'win32':
default_lib_dirs = ['C:\\',
os.path.join(distutils.sysconfig.EXEC_PREFIX,
'libs')]
default_runtime_dirs = []
default_include_dirs = []
default_src_dirs = ['.']
default_x11_lib_dirs = []
default_x11_include_dirs = []
else:
default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib',
'/opt/local/lib', '/sw/lib'], platform_bits)
default_runtime_dirs = []
default_include_dirs = ['/usr/local/include',
'/opt/include', '/usr/include',
# path of umfpack under macports
'/opt/local/include/ufsparse',
'/opt/local/include', '/sw/include',
'/usr/include/suitesparse']
default_src_dirs = ['.', '/usr/local/src', '/opt/src', '/sw/src']
default_x11_lib_dirs = libpaths(['/usr/X11R6/lib', '/usr/X11/lib',
'/usr/lib'], platform_bits)
default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include',
'/usr/include']
if os.path.exists('/usr/lib/X11'):
globbed_x11_dir = glob('/usr/lib/*/libX11.so')
if globbed_x11_dir:
x11_so_dir = os.path.split(globbed_x11_dir[0])[0]
default_x11_lib_dirs.extend([x11_so_dir, '/usr/lib/X11'])
default_x11_include_dirs.extend(['/usr/lib/X11/include',
'/usr/include/X11'])
import subprocess as sp
tmp = None
try:
# Explicitly open/close file to avoid ResourceWarning when
# tests are run in debug mode Python 3.
tmp = open(os.devnull, 'w')
p = sp.Popen(["gcc", "-print-multiarch"], stdout=sp.PIPE,
stderr=tmp)
except (OSError, DistutilsError):
# OSError if gcc is not installed, or SandboxViolation (DistutilsError
# subclass) if an old setuptools bug is triggered (see gh-3160).
pass
else:
triplet = str(p.communicate()[0].decode().strip())
if p.returncode == 0:
# gcc supports the "-print-multiarch" option
default_x11_lib_dirs += [os.path.join("/usr/lib/", triplet)]
default_lib_dirs += [os.path.join("/usr/lib/", triplet)]
finally:
if tmp is not None:
tmp.close()
if os.path.join(sys.prefix, 'lib') not in default_lib_dirs:
default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib'))
default_include_dirs.append(os.path.join(sys.prefix, 'include'))
default_src_dirs.append(os.path.join(sys.prefix, 'src'))
default_lib_dirs = [_m for _m in default_lib_dirs if os.path.isdir(_m)]
default_runtime_dirs = [_m for _m in default_runtime_dirs if os.path.isdir(_m)]
default_include_dirs = [_m for _m in default_include_dirs if os.path.isdir(_m)]
default_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)]
so_ext = get_shared_lib_extension()
def get_standard_file(fname):
"""Returns a list of files named 'fname' from
1) System-wide directory (directory-location of this module)
2) Users HOME directory (os.environ['HOME'])
3) Local directory
"""
# System-wide file
filenames = []
try:
f = __file__
except NameError:
f = sys.argv[0]
else:
sysfile = os.path.join(os.path.split(os.path.abspath(f))[0],
fname)
if os.path.isfile(sysfile):
filenames.append(sysfile)
# Home directory
# And look for the user config file
try:
f = os.path.expanduser('~')
except KeyError:
pass
else:
user_file = os.path.join(f, fname)
if os.path.isfile(user_file):
filenames.append(user_file)
# Local file
if os.path.isfile(fname):
filenames.append(os.path.abspath(fname))
return filenames
def get_info(name, notfound_action=0):
"""
notfound_action:
0 - do nothing
1 - display warning message
2 - raise error
"""
cl = {'atlas': atlas_info, # use lapack_opt or blas_opt instead
'atlas_threads': atlas_threads_info, # ditto
'atlas_blas': atlas_blas_info,
'atlas_blas_threads': atlas_blas_threads_info,
'lapack_atlas': lapack_atlas_info, # use lapack_opt instead
'lapack_atlas_threads': lapack_atlas_threads_info, # ditto
'atlas_3_10': atlas_3_10_info, # use lapack_opt or blas_opt instead
'atlas_3_10_threads': atlas_3_10_threads_info, # ditto
'atlas_3_10_blas': atlas_3_10_blas_info,
'atlas_3_10_blas_threads': atlas_3_10_blas_threads_info,
'lapack_atlas_3_10': lapack_atlas_3_10_info, # use lapack_opt instead
'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto
'mkl': mkl_info,
# openblas which may or may not have embedded lapack
'openblas': openblas_info, # use blas_opt instead
# openblas with embedded lapack
'openblas_lapack': openblas_lapack_info, # use blas_opt instead
'blis': blis_info, # use blas_opt instead
'lapack_mkl': lapack_mkl_info, # use lapack_opt instead
'blas_mkl': blas_mkl_info, # use blas_opt instead
'x11': x11_info,
'fft_opt': fft_opt_info,
'fftw': fftw_info,
'fftw2': fftw2_info,
'fftw3': fftw3_info,
'dfftw': dfftw_info,
'sfftw': sfftw_info,
'fftw_threads': fftw_threads_info,
'dfftw_threads': dfftw_threads_info,
'sfftw_threads': sfftw_threads_info,
'djbfft': djbfft_info,
'blas': blas_info, # use blas_opt instead
'lapack': lapack_info, # use lapack_opt instead
'lapack_src': lapack_src_info,
'blas_src': blas_src_info,
'numpy': numpy_info,
'f2py': f2py_info,
'Numeric': Numeric_info,
'numeric': Numeric_info,
'numarray': numarray_info,
'numerix': numerix_info,
'lapack_opt': lapack_opt_info,
'blas_opt': blas_opt_info,
'boost_python': boost_python_info,
'agg2': agg2_info,
'wx': wx_info,
'gdk_pixbuf_xlib_2': gdk_pixbuf_xlib_2_info,
'gdk-pixbuf-xlib-2.0': gdk_pixbuf_xlib_2_info,
'gdk_pixbuf_2': gdk_pixbuf_2_info,
'gdk-pixbuf-2.0': gdk_pixbuf_2_info,
'gdk': gdk_info,
'gdk_2': gdk_2_info,
'gdk-2.0': gdk_2_info,
'gdk_x11_2': gdk_x11_2_info,
'gdk-x11-2.0': gdk_x11_2_info,
'gtkp_x11_2': gtkp_x11_2_info,
'gtk+-x11-2.0': gtkp_x11_2_info,
'gtkp_2': gtkp_2_info,
'gtk+-2.0': gtkp_2_info,
'xft': xft_info,
'freetype2': freetype2_info,
'umfpack': umfpack_info,
'amd': amd_info,
}.get(name.lower(), system_info)
return cl().get_info(notfound_action)
class NotFoundError(DistutilsError):
"""Some third-party program or library is not found."""
class AtlasNotFoundError(NotFoundError):
"""
Atlas (http://math-atlas.sourceforge.net/) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [atlas]) or by setting
the ATLAS environment variable."""
class LapackNotFoundError(NotFoundError):
"""
Lapack (http://www.netlib.org/lapack/) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [lapack]) or by setting
the LAPACK environment variable."""
class LapackSrcNotFoundError(LapackNotFoundError):
"""
Lapack (http://www.netlib.org/lapack/) sources not found.
Directories to search for the sources can be specified in the
numpy/distutils/site.cfg file (section [lapack_src]) or by setting
the LAPACK_SRC environment variable."""
class BlasNotFoundError(NotFoundError):
"""
Blas (http://www.netlib.org/blas/) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [blas]) or by setting
the BLAS environment variable."""
class BlasSrcNotFoundError(BlasNotFoundError):
"""
Blas (http://www.netlib.org/blas/) sources not found.
Directories to search for the sources can be specified in the
numpy/distutils/site.cfg file (section [blas_src]) or by setting
the BLAS_SRC environment variable."""
class FFTWNotFoundError(NotFoundError):
"""
FFTW (http://www.fftw.org/) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [fftw]) or by setting
the FFTW environment variable."""
class DJBFFTNotFoundError(NotFoundError):
"""
DJBFFT (http://cr.yp.to/djbfft.html) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [djbfft]) or by setting
the DJBFFT environment variable."""
class NumericNotFoundError(NotFoundError):
"""
Numeric (http://www.numpy.org/) module not found.
Get it from above location, install it, and retry setup.py."""
class X11NotFoundError(NotFoundError):
"""X11 libraries not found."""
class UmfpackNotFoundError(NotFoundError):
"""
UMFPACK sparse solver (http://www.cise.ufl.edu/research/sparse/umfpack/)
not found. Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [umfpack]) or by setting
the UMFPACK environment variable."""
class system_info(object):
""" get_info() is the only public method. Don't use others.
"""
section = 'ALL'
dir_env_var = None
search_static_first = 0 # XXX: disabled by default, may disappear in
# future unless it is proved to be useful.
verbosity = 1
saved_results = {}
notfounderror = NotFoundError
def __init__(self,
default_lib_dirs=default_lib_dirs,
default_include_dirs=default_include_dirs,
verbosity=1,
):
self.__class__.info = {}
self.local_prefixes = []
defaults = {'library_dirs': os.pathsep.join(default_lib_dirs),
'include_dirs': os.pathsep.join(default_include_dirs),
'runtime_library_dirs': os.pathsep.join(default_runtime_dirs),
'rpath': '',
'src_dirs': os.pathsep.join(default_src_dirs),
'search_static_first': str(self.search_static_first),
'extra_compile_args': '', 'extra_link_args': ''}
self.cp = ConfigParser(defaults)
self.files = []
self.files.extend(get_standard_file('.numpy-site.cfg'))
self.files.extend(get_standard_file('site.cfg'))
self.parse_config_files()
if self.section is not None:
self.search_static_first = self.cp.getboolean(
self.section, 'search_static_first')
assert isinstance(self.search_static_first, int)
def parse_config_files(self):
self.cp.read(self.files)
if not self.cp.has_section(self.section):
if self.section is not None:
self.cp.add_section(self.section)
def calc_libraries_info(self):
libs = self.get_libraries()
dirs = self.get_lib_dirs()
# The extensions use runtime_library_dirs
r_dirs = self.get_runtime_lib_dirs()
# Intrinsic distutils use rpath, we simply append both entries
# as though they were one entry
r_dirs.extend(self.get_runtime_lib_dirs(key='rpath'))
info = {}
for lib in libs:
i = self.check_libs(dirs, [lib])
if i is not None:
dict_append(info, **i)
else:
log.info('Library %s was not found. Ignoring' % (lib))
if r_dirs:
i = self.check_libs(r_dirs, [lib])
if i is not None:
# Swap library keywords found to runtime_library_dirs
# the libraries are insisting on the user having defined
# them using the library_dirs, and not necessarily by
# runtime_library_dirs
del i['libraries']
i['runtime_library_dirs'] = i.pop('library_dirs')
dict_append(info, **i)
else:
log.info('Runtime library %s was not found. Ignoring' % (lib))
return info
def set_info(self, **info):
if info:
lib_info = self.calc_libraries_info()
dict_append(info, **lib_info)
# Update extra information
extra_info = self.calc_extra_info()
dict_append(info, **extra_info)
self.saved_results[self.__class__.__name__] = info
def has_info(self):
return self.__class__.__name__ in self.saved_results
def calc_extra_info(self):
""" Updates the information in the current information with
respect to these flags:
extra_compile_args
extra_link_args
"""
info = {}
for key in ['extra_compile_args', 'extra_link_args']:
# Get values
opt = self.cp.get(self.section, key)
if opt:
tmp = {key : [opt]}
dict_append(info, **tmp)
return info
def get_info(self, notfound_action=0):
""" Return a dictonary with items that are compatible
with numpy.distutils.setup keyword arguments.
"""
flag = 0
if not self.has_info():
flag = 1
log.info(self.__class__.__name__ + ':')
if hasattr(self, 'calc_info'):
self.calc_info()
if notfound_action:
if not self.has_info():
if notfound_action == 1:
warnings.warn(self.notfounderror.__doc__)
elif notfound_action == 2:
raise self.notfounderror(self.notfounderror.__doc__)
else:
raise ValueError(repr(notfound_action))
if not self.has_info():
log.info(' NOT AVAILABLE')
self.set_info()
else:
log.info(' FOUND:')
res = self.saved_results.get(self.__class__.__name__)
if self.verbosity > 0 and flag:
for k, v in res.items():
v = str(v)
if k in ['sources', 'libraries'] and len(v) > 270:
v = v[:120] + '...\n...\n...' + v[-120:]
log.info(' %s = %s', k, v)
log.info('')
return copy.deepcopy(res)
def get_paths(self, section, key):
dirs = self.cp.get(section, key).split(os.pathsep)
env_var = self.dir_env_var
if env_var:
if is_sequence(env_var):
e0 = env_var[-1]
for e in env_var:
if e in os.environ:
e0 = e
break
if not env_var[0] == e0:
log.info('Setting %s=%s' % (env_var[0], e0))
env_var = e0
if env_var and env_var in os.environ:
d = os.environ[env_var]
if d == 'None':
log.info('Disabled %s: %s',
self.__class__.__name__, '(%s is None)'
% (env_var,))
return []
if os.path.isfile(d):
dirs = [os.path.dirname(d)] + dirs
l = getattr(self, '_lib_names', [])
if len(l) == 1:
b = os.path.basename(d)
b = os.path.splitext(b)[0]
if b[:3] == 'lib':
log.info('Replacing _lib_names[0]==%r with %r' \
% (self._lib_names[0], b[3:]))
self._lib_names[0] = b[3:]
else:
ds = d.split(os.pathsep)
ds2 = []
for d in ds:
if os.path.isdir(d):
ds2.append(d)
for dd in ['include', 'lib']:
d1 = os.path.join(d, dd)
if os.path.isdir(d1):
ds2.append(d1)
dirs = ds2 + dirs
default_dirs = self.cp.get(self.section, key).split(os.pathsep)
dirs.extend(default_dirs)
ret = []
for d in dirs:
if len(d) > 0 and not os.path.isdir(d):
warnings.warn('Specified path %s is invalid.' % d)
continue
if d not in ret:
ret.append(d)
log.debug('( %s = %s )', key, ':'.join(ret))
return ret
def get_lib_dirs(self, key='library_dirs'):
return self.get_paths(self.section, key)
def get_runtime_lib_dirs(self, key='runtime_library_dirs'):
path = self.get_paths(self.section, key)
if path == ['']:
path = []
return path
def get_include_dirs(self, key='include_dirs'):
return self.get_paths(self.section, key)
def get_src_dirs(self, key='src_dirs'):
return self.get_paths(self.section, key)
def get_libs(self, key, default):
try:
libs = self.cp.get(self.section, key)
except NoOptionError:
if not default:
return []
if is_string(default):
return [default]
return default
return [b for b in [a.strip() for a in libs.split(',')] if b]
def get_libraries(self, key='libraries'):
if hasattr(self, '_lib_names'):
return self.get_libs(key, default=self._lib_names)
else:
return self.get_libs(key, '')
def library_extensions(self):
static_exts = ['.a']
if sys.platform == 'win32':
static_exts.append('.lib') # .lib is used by MSVC
if self.search_static_first:
exts = static_exts + [so_ext]
else:
exts = [so_ext] + static_exts
if sys.platform == 'cygwin':
exts.append('.dll.a')
if sys.platform == 'darwin':
exts.append('.dylib')
return exts
def check_libs(self, lib_dirs, libs, opt_libs=[]):
"""If static or shared libraries are available then return
their info dictionary.
Checks for all libraries as shared libraries first, then
static (or vice versa if self.search_static_first is True).
"""
exts = self.library_extensions()
info = None
for ext in exts:
info = self._check_libs(lib_dirs, libs, opt_libs, [ext])
if info is not None:
break
if not info:
log.info(' libraries %s not found in %s', ','.join(libs),
lib_dirs)
return info
def check_libs2(self, lib_dirs, libs, opt_libs=[]):
"""If static or shared libraries are available then return
their info dictionary.
Checks each library for shared or static.
"""
exts = self.library_extensions()
info = self._check_libs(lib_dirs, libs, opt_libs, exts)
if not info:
log.info(' libraries %s not found in %s', ','.join(libs),
lib_dirs)
return info
def _find_lib(self, lib_dir, lib, exts):
assert is_string(lib_dir)
# under windows first try without 'lib' prefix
if sys.platform == 'win32':
lib_prefixes = ['', 'lib']
else:
lib_prefixes = ['lib']
# for each library name, see if we can find a file for it.
for ext in exts:
for prefix in lib_prefixes:
p = self.combine_paths(lib_dir, prefix + lib + ext)
if p:
break
if p:
assert len(p) == 1
# ??? splitext on p[0] would do this for cygwin
# doesn't seem correct
if ext == '.dll.a':
lib += '.dll'
return lib
return False
def _find_libs(self, lib_dirs, libs, exts):
# make sure we preserve the order of libs, as it can be important
found_dirs, found_libs = [], []
for lib in libs:
for lib_dir in lib_dirs:
found_lib = self._find_lib(lib_dir, lib, exts)
if found_lib:
found_libs.append(found_lib)
if lib_dir not in found_dirs:
found_dirs.append(lib_dir)
break
return found_dirs, found_libs
def _check_libs(self, lib_dirs, libs, opt_libs, exts):
"""Find mandatory and optional libs in expected paths.
Missing optional libraries are silently forgotten.
"""
if not is_sequence(lib_dirs):
lib_dirs = [lib_dirs]
# First, try to find the mandatory libraries
found_dirs, found_libs = self._find_libs(lib_dirs, libs, exts)
if len(found_libs) > 0 and len(found_libs) == len(libs):
# Now, check for optional libraries
opt_found_dirs, opt_found_libs = self._find_libs(lib_dirs, opt_libs, exts)
found_libs.extend(opt_found_libs)
for lib_dir in opt_found_dirs:
if lib_dir not in found_dirs:
found_dirs.append(lib_dir)
info = {'libraries': found_libs, 'library_dirs': found_dirs}
return info
else:
return None
def combine_paths(self, *args):
"""Return a list of existing paths composed by all combinations
of items from the arguments.
"""
return combine_paths(*args, **{'verbosity': self.verbosity})
class fft_opt_info(system_info):
def calc_info(self):
info = {}
fftw_info = get_info('fftw3') or get_info('fftw2') or get_info('dfftw')
djbfft_info = get_info('djbfft')
if fftw_info:
dict_append(info, **fftw_info)
if djbfft_info:
dict_append(info, **djbfft_info)
self.set_info(**info)
return
class fftw_info(system_info):
#variables to override
section = 'fftw'
dir_env_var = 'FFTW'
notfounderror = FFTWNotFoundError
ver_info = [{'name':'fftw3',
'libs':['fftw3'],
'includes':['fftw3.h'],
'macros':[('SCIPY_FFTW3_H', None)]},
{'name':'fftw2',
'libs':['rfftw', 'fftw'],
'includes':['fftw.h', 'rfftw.h'],
'macros':[('SCIPY_FFTW_H', None)]}]
def calc_ver_info(self, ver_param):
"""Returns True on successful version detection, else False"""
lib_dirs = self.get_lib_dirs()
incl_dirs = self.get_include_dirs()
incl_dir = None
libs = self.get_libs(self.section + '_libs', ver_param['libs'])
info = self.check_libs(lib_dirs, libs)
if info is not None:
flag = 0
for d in incl_dirs:
if len(self.combine_paths(d, ver_param['includes'])) \
== len(ver_param['includes']):
dict_append(info, include_dirs=[d])
flag = 1
incl_dirs = [d]
break
if flag:
dict_append(info, define_macros=ver_param['macros'])
else:
info = None
if info is not None:
self.set_info(**info)
return True
else:
log.info(' %s not found' % (ver_param['name']))
return False
def calc_info(self):
for i in self.ver_info:
if self.calc_ver_info(i):
break
class fftw2_info(fftw_info):
#variables to override
section = 'fftw'
dir_env_var = 'FFTW'
notfounderror = FFTWNotFoundError
ver_info = [{'name':'fftw2',
'libs':['rfftw', 'fftw'],
'includes':['fftw.h', 'rfftw.h'],
'macros':[('SCIPY_FFTW_H', None)]}
]
class fftw3_info(fftw_info):
#variables to override
section = 'fftw3'
dir_env_var = 'FFTW3'
notfounderror = FFTWNotFoundError
ver_info = [{'name':'fftw3',
'libs':['fftw3'],
'includes':['fftw3.h'],
'macros':[('SCIPY_FFTW3_H', None)]},
]
class dfftw_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'dfftw',
'libs':['drfftw', 'dfftw'],
'includes':['dfftw.h', 'drfftw.h'],
'macros':[('SCIPY_DFFTW_H', None)]}]
class sfftw_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'sfftw',
'libs':['srfftw', 'sfftw'],
'includes':['sfftw.h', 'srfftw.h'],
'macros':[('SCIPY_SFFTW_H', None)]}]
class fftw_threads_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'fftw threads',
'libs':['rfftw_threads', 'fftw_threads'],
'includes':['fftw_threads.h', 'rfftw_threads.h'],
'macros':[('SCIPY_FFTW_THREADS_H', None)]}]
class dfftw_threads_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'dfftw threads',
'libs':['drfftw_threads', 'dfftw_threads'],
'includes':['dfftw_threads.h', 'drfftw_threads.h'],
'macros':[('SCIPY_DFFTW_THREADS_H', None)]}]
class sfftw_threads_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'sfftw threads',
'libs':['srfftw_threads', 'sfftw_threads'],
'includes':['sfftw_threads.h', 'srfftw_threads.h'],
'macros':[('SCIPY_SFFTW_THREADS_H', None)]}]
class djbfft_info(system_info):
section = 'djbfft'
dir_env_var = 'DJBFFT'
notfounderror = DJBFFTNotFoundError
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend(self.combine_paths(d, ['djbfft']) + [d])
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
lib_dirs = self.get_lib_dirs()
incl_dirs = self.get_include_dirs()
info = None
for d in lib_dirs:
p = self.combine_paths(d, ['djbfft.a'])
if p:
info = {'extra_objects': p}
break
p = self.combine_paths(d, ['libdjbfft.a', 'libdjbfft' + so_ext])
if p:
info = {'libraries': ['djbfft'], 'library_dirs': [d]}
break
if info is None:
return
for d in incl_dirs:
if len(self.combine_paths(d, ['fftc8.h', 'fftfreq.h'])) == 2:
dict_append(info, include_dirs=[d],
define_macros=[('SCIPY_DJBFFT_H', None)])
self.set_info(**info)
return
return
class mkl_info(system_info):
section = 'mkl'
dir_env_var = 'MKLROOT'
_lib_mkl = ['mkl_rt']
def get_mkl_rootdir(self):
mklroot = os.environ.get('MKLROOT', None)
if mklroot is not None:
return mklroot
paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep)
ld_so_conf = '/etc/ld.so.conf'
if os.path.isfile(ld_so_conf):
for d in open(ld_so_conf, 'r'):
d = d.strip()
if d:
paths.append(d)
intel_mkl_dirs = []
for path in paths:
path_atoms = path.split(os.sep)
for m in path_atoms:
if m.startswith('mkl'):
d = os.sep.join(path_atoms[:path_atoms.index(m) + 2])
intel_mkl_dirs.append(d)
break
for d in paths:
dirs = glob(os.path.join(d, 'mkl', '*'))
dirs += glob(os.path.join(d, 'mkl*'))
for d in dirs:
if os.path.isdir(os.path.join(d, 'lib')):
return d
return None
def __init__(self):
mklroot = self.get_mkl_rootdir()
if mklroot is None:
system_info.__init__(self)
else:
from .cpuinfo import cpu
if cpu.is_Itanium():
plt = '64'
elif cpu.is_Intel() and cpu.is_64bit():
plt = 'intel64'
else:
plt = '32'
system_info.__init__(
self,
default_lib_dirs=[os.path.join(mklroot, 'lib', plt)],
default_include_dirs=[os.path.join(mklroot, 'include')])
def calc_info(self):
lib_dirs = self.get_lib_dirs()
incl_dirs = self.get_include_dirs()
mkl_libs = self.get_libs('mkl_libs', self._lib_mkl)
info = self.check_libs2(lib_dirs, mkl_libs)
if info is None:
return
dict_append(info,
define_macros=[('SCIPY_MKL_H', None),
('HAVE_CBLAS', None)],
include_dirs=incl_dirs)
if sys.platform == 'win32':
pass # win32 has no pthread library
else:
dict_append(info, libraries=['pthread'])
self.set_info(**info)
class lapack_mkl_info(mkl_info):
pass
class blas_mkl_info(mkl_info):
pass
class atlas_info(system_info):
section = 'atlas'
dir_env_var = 'ATLAS'
_lib_names = ['f77blas', 'cblas']
if sys.platform[:7] == 'freebsd':
_lib_atlas = ['atlas_r']
_lib_lapack = ['alapack_r']
else:
_lib_atlas = ['atlas']
_lib_lapack = ['lapack']
notfounderror = AtlasNotFoundError
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend(self.combine_paths(d, ['atlas*', 'ATLAS*',
'sse', '3dnow', 'sse2']) + [d])
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
lib_dirs = self.get_lib_dirs()
info = {}
atlas_libs = self.get_libs('atlas_libs',
self._lib_names + self._lib_atlas)
lapack_libs = self.get_libs('lapack_libs', self._lib_lapack)
atlas = None
lapack = None
atlas_1 = None
for d in lib_dirs:
atlas = self.check_libs2(d, atlas_libs, [])
lapack_atlas = self.check_libs2(d, ['lapack_atlas'], [])
if atlas is not None:
lib_dirs2 = [d] + self.combine_paths(d, ['atlas*', 'ATLAS*'])
lapack = self.check_libs2(lib_dirs2, lapack_libs, [])
if lapack is not None:
break
if atlas:
atlas_1 = atlas
log.info(self.__class__)
if atlas is None:
atlas = atlas_1
if atlas is None:
return
include_dirs = self.get_include_dirs()
h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])
h = h[0]
if h:
h = os.path.dirname(h)
dict_append(info, include_dirs=[h])
info['language'] = 'c'
if lapack is not None:
dict_append(info, **lapack)
dict_append(info, **atlas)
elif 'lapack_atlas' in atlas['libraries']:
dict_append(info, **atlas)
dict_append(info,
define_macros=[('ATLAS_WITH_LAPACK_ATLAS', None)])
self.set_info(**info)
return
else:
dict_append(info, **atlas)
dict_append(info, define_macros=[('ATLAS_WITHOUT_LAPACK', None)])
message = """
*********************************************************************
Could not find lapack library within the ATLAS installation.
*********************************************************************
"""
warnings.warn(message)
self.set_info(**info)
return
# Check if lapack library is complete, only warn if it is not.
lapack_dir = lapack['library_dirs'][0]
lapack_name = lapack['libraries'][0]
lapack_lib = None
lib_prefixes = ['lib']
if sys.platform == 'win32':
lib_prefixes.append('')
for e in self.library_extensions():
for prefix in lib_prefixes:
fn = os.path.join(lapack_dir, prefix + lapack_name + e)
if os.path.exists(fn):
lapack_lib = fn
break
if lapack_lib:
break
if lapack_lib is not None:
sz = os.stat(lapack_lib)[6]
if sz <= 4000 * 1024:
message = """
*********************************************************************
Lapack library (from ATLAS) is probably incomplete:
size of %s is %sk (expected >4000k)
Follow the instructions in the KNOWN PROBLEMS section of the file
numpy/INSTALL.txt.
*********************************************************************
""" % (lapack_lib, sz / 1024)
warnings.warn(message)
else:
info['language'] = 'f77'
atlas_version, atlas_extra_info = get_atlas_version(**atlas)
dict_append(info, **atlas_extra_info)
self.set_info(**info)
class atlas_blas_info(atlas_info):
_lib_names = ['f77blas', 'cblas']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
info = {}
atlas_libs = self.get_libs('atlas_libs',
self._lib_names + self._lib_atlas)
atlas = self.check_libs2(lib_dirs, atlas_libs, [])
if atlas is None:
return
include_dirs = self.get_include_dirs()
h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])
h = h[0]
if h:
h = os.path.dirname(h)
dict_append(info, include_dirs=[h])
info['language'] = 'c'
info['define_macros'] = [('HAVE_CBLAS', None)]
atlas_version, atlas_extra_info = get_atlas_version(**atlas)
dict_append(atlas, **atlas_extra_info)
dict_append(info, **atlas)
self.set_info(**info)
return
class atlas_threads_info(atlas_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['ptf77blas', 'ptcblas']
class atlas_blas_threads_info(atlas_blas_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['ptf77blas', 'ptcblas']
class lapack_atlas_info(atlas_info):
_lib_names = ['lapack_atlas'] + atlas_info._lib_names
class lapack_atlas_threads_info(atlas_threads_info):
_lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names
class atlas_3_10_info(atlas_info):
_lib_names = ['satlas']
_lib_atlas = _lib_names
_lib_lapack = _lib_names
class atlas_3_10_blas_info(atlas_3_10_info):
_lib_names = ['satlas']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
info = {}
atlas_libs = self.get_libs('atlas_libs',
self._lib_names)
atlas = self.check_libs2(lib_dirs, atlas_libs, [])
if atlas is None:
return
include_dirs = self.get_include_dirs()
h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])
h = h[0]
if h:
h = os.path.dirname(h)
dict_append(info, include_dirs=[h])
info['language'] = 'c'
info['define_macros'] = [('HAVE_CBLAS', None)]
atlas_version, atlas_extra_info = get_atlas_version(**atlas)
dict_append(atlas, **atlas_extra_info)
dict_append(info, **atlas)
self.set_info(**info)
return
class atlas_3_10_threads_info(atlas_3_10_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['tatlas']
_lib_atlas = _lib_names
_lib_lapack = _lib_names
class atlas_3_10_blas_threads_info(atlas_3_10_blas_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['tatlas']
class lapack_atlas_3_10_info(atlas_3_10_info):
pass
class lapack_atlas_3_10_threads_info(atlas_3_10_threads_info):
pass
class lapack_info(system_info):
section = 'lapack'
dir_env_var = 'LAPACK'
_lib_names = ['lapack']
notfounderror = LapackNotFoundError
def calc_info(self):
lib_dirs = self.get_lib_dirs()
lapack_libs = self.get_libs('lapack_libs', self._lib_names)
info = self.check_libs(lib_dirs, lapack_libs, [])
if info is None:
return
info['language'] = 'f77'
self.set_info(**info)
class lapack_src_info(system_info):
section = 'lapack_src'
dir_env_var = 'LAPACK_SRC'
notfounderror = LapackSrcNotFoundError
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['LAPACK*/SRC', 'SRC']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'dgesv.f')):
src_dir = d
break
if not src_dir:
#XXX: Get sources from netlib. May be ask first.
return
# The following is extracted from LAPACK-3.0/SRC/Makefile.
# Added missing names from lapack-lite-3.1.1/SRC/Makefile
# while keeping removed names for Lapack-3.0 compatibility.
allaux = '''
ilaenv ieeeck lsame lsamen xerbla
iparmq
''' # *.f
laux = '''
bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1
laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2
lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre
larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4
lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1
lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf
stebz stedc steqr sterf
larra larrc larrd larr larrk larrj larrr laneg laisnan isnan
lazq3 lazq4
''' # [s|d]*.f
lasrc = '''
gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak
gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv
gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2
geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd
gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal
gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd
ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein
hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0
lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb
lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp
laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv
lartv larz larzb larzt laswp lasyf latbs latdf latps latrd
latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv
pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2
potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri
pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs
spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv
sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2
tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs
trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs
tzrqf tzrzf
lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5
''' # [s|c|d|z]*.f
sd_lasrc = '''
laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l
org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr
orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3
ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx
sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd
stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd
sygvx sytd2 sytrd
''' # [s|d]*.f
cz_lasrc = '''
bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev
heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv
hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd
hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf
hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7
laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe
laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv
spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq
ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2
unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr
''' # [c|z]*.f
#######
sclaux = laux + ' econd ' # s*.f
dzlaux = laux + ' secnd ' # d*.f
slasrc = lasrc + sd_lasrc # s*.f
dlasrc = lasrc + sd_lasrc # d*.f
clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f
zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f
oclasrc = ' icmax1 scsum1 ' # *.f
ozlasrc = ' izmax1 dzsum1 ' # *.f
sources = ['s%s.f' % f for f in (sclaux + slasrc).split()] \
+ ['d%s.f' % f for f in (dzlaux + dlasrc).split()] \
+ ['c%s.f' % f for f in (clasrc).split()] \
+ ['z%s.f' % f for f in (zlasrc).split()] \
+ ['%s.f' % f for f in (allaux + oclasrc + ozlasrc).split()]
sources = [os.path.join(src_dir, f) for f in sources]
# Lapack 3.1:
src_dir2 = os.path.join(src_dir, '..', 'INSTALL')
sources += [os.path.join(src_dir2, p + 'lamch.f') for p in 'sdcz']
# Lapack 3.2.1:
sources += [os.path.join(src_dir, p + 'larfp.f') for p in 'sdcz']
sources += [os.path.join(src_dir, 'ila' + p + 'lr.f') for p in 'sdcz']
sources += [os.path.join(src_dir, 'ila' + p + 'lc.f') for p in 'sdcz']
# Should we check here actual existence of source files?
# Yes, the file listing is different between 3.0 and 3.1
# versions.
sources = [f for f in sources if os.path.isfile(f)]
info = {'sources': sources, 'language': 'f77'}
self.set_info(**info)
atlas_version_c_text = r'''
/* This file is generated from numpy/distutils/system_info.py */
void ATL_buildinfo(void);
int main(void) {
ATL_buildinfo();
return 0;
}
'''
_cached_atlas_version = {}
def get_atlas_version(**config):
libraries = config.get('libraries', [])
library_dirs = config.get('library_dirs', [])
key = (tuple(libraries), tuple(library_dirs))
if key in _cached_atlas_version:
return _cached_atlas_version[key]
c = cmd_config(Distribution())
atlas_version = None
info = {}
try:
s, o = c.get_output(atlas_version_c_text,
libraries=libraries, library_dirs=library_dirs,
use_tee=(system_info.verbosity > 0))
if s and re.search(r'undefined reference to `_gfortran', o, re.M):
s, o = c.get_output(atlas_version_c_text,
libraries=libraries + ['gfortran'],
library_dirs=library_dirs,
use_tee=(system_info.verbosity > 0))
if not s:
warnings.warn("""
*****************************************************
Linkage with ATLAS requires gfortran. Use
python setup.py config_fc --fcompiler=gnu95 ...
when building extension libraries that use ATLAS.
Make sure that -lgfortran is used for C++ extensions.
*****************************************************
""")
dict_append(info, language='f90',
define_macros=[('ATLAS_REQUIRES_GFORTRAN', None)])
except Exception: # failed to get version from file -- maybe on Windows
# look at directory name
for o in library_dirs:
m = re.search(r'ATLAS_(?P<version>\d+[.]\d+[.]\d+)_', o)
if m:
atlas_version = m.group('version')
if atlas_version is not None:
break
# final choice --- look at ATLAS_VERSION environment
# variable
if atlas_version is None:
atlas_version = os.environ.get('ATLAS_VERSION', None)
if atlas_version:
dict_append(info, define_macros=[(
'ATLAS_INFO', '"\\"%s\\""' % atlas_version)
])
else:
dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)])
return atlas_version or '?.?.?', info
if not s:
m = re.search(r'ATLAS version (?P<version>\d+[.]\d+[.]\d+)', o)
if m:
atlas_version = m.group('version')
if atlas_version is None:
if re.search(r'undefined symbol: ATL_buildinfo', o, re.M):
atlas_version = '3.2.1_pre3.3.6'
else:
log.info('Status: %d', s)
log.info('Output: %s', o)
if atlas_version == '3.2.1_pre3.3.6':
dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)])
else:
dict_append(info, define_macros=[(
'ATLAS_INFO', '"\\"%s\\""' % atlas_version)
])
result = _cached_atlas_version[key] = atlas_version, info
return result
class lapack_opt_info(system_info):
notfounderror = LapackNotFoundError
def calc_info(self):
lapack_mkl_info = get_info('lapack_mkl')
if lapack_mkl_info:
self.set_info(**lapack_mkl_info)
return
openblas_info = get_info('openblas_lapack')
if openblas_info:
self.set_info(**openblas_info)
return
atlas_info = get_info('atlas_3_10_threads')
if not atlas_info:
atlas_info = get_info('atlas_3_10')
if not atlas_info:
atlas_info = get_info('atlas_threads')
if not atlas_info:
atlas_info = get_info('atlas')
if sys.platform == 'darwin' and not atlas_info:
# Use the system lapack from Accelerate or vecLib under OSX
args = []
link_args = []
if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \
'x86_64' in get_platform() or \
'i386' in platform.platform():
intel = 1
else:
intel = 0
if os.path.exists('/System/Library/Frameworks'
'/Accelerate.framework/'):
if intel:
args.extend(['-msse3'])
else:
args.extend(['-faltivec'])
link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])
elif os.path.exists('/System/Library/Frameworks'
'/vecLib.framework/'):
if intel:
args.extend(['-msse3'])
else:
args.extend(['-faltivec'])
link_args.extend(['-Wl,-framework', '-Wl,vecLib'])
if args:
self.set_info(extra_compile_args=args,
extra_link_args=link_args,
define_macros=[('NO_ATLAS_INFO', 3),
('HAVE_CBLAS', None)])
return
need_lapack = 0
need_blas = 0
info = {}
if atlas_info:
l = atlas_info.get('define_macros', [])
if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \
or ('ATLAS_WITHOUT_LAPACK', None) in l:
need_lapack = 1
info = atlas_info
else:
warnings.warn(AtlasNotFoundError.__doc__)
need_blas = 1
need_lapack = 1
dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)])
if need_lapack:
lapack_info = get_info('lapack')
#lapack_info = {} ## uncomment for testing
if lapack_info:
dict_append(info, **lapack_info)
else:
warnings.warn(LapackNotFoundError.__doc__)
lapack_src_info = get_info('lapack_src')
if not lapack_src_info:
warnings.warn(LapackSrcNotFoundError.__doc__)
return
dict_append(info, libraries=[('flapack_src', lapack_src_info)])
if need_blas:
blas_info = get_info('blas')
if blas_info:
dict_append(info, **blas_info)
else:
warnings.warn(BlasNotFoundError.__doc__)
blas_src_info = get_info('blas_src')
if not blas_src_info:
warnings.warn(BlasSrcNotFoundError.__doc__)
return
dict_append(info, libraries=[('fblas_src', blas_src_info)])
self.set_info(**info)
return
class blas_opt_info(system_info):
notfounderror = BlasNotFoundError
def calc_info(self):
blas_mkl_info = get_info('blas_mkl')
if blas_mkl_info:
self.set_info(**blas_mkl_info)
return
blis_info = get_info('blis')
if blis_info:
self.set_info(**blis_info)
return
openblas_info = get_info('openblas')
if openblas_info:
self.set_info(**openblas_info)
return
atlas_info = get_info('atlas_3_10_blas_threads')
if not atlas_info:
atlas_info = get_info('atlas_3_10_blas')
if not atlas_info:
atlas_info = get_info('atlas_blas_threads')
if not atlas_info:
atlas_info = get_info('atlas_blas')
if sys.platform == 'darwin' and not atlas_info:
# Use the system BLAS from Accelerate or vecLib under OSX
args = []
link_args = []
if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \
'x86_64' in get_platform() or \
'i386' in platform.platform():
intel = 1
else:
intel = 0
if os.path.exists('/System/Library/Frameworks'
'/Accelerate.framework/'):
if intel:
args.extend(['-msse3'])
else:
args.extend(['-faltivec'])
args.extend([
'-I/System/Library/Frameworks/vecLib.framework/Headers'])
link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])
elif os.path.exists('/System/Library/Frameworks'
'/vecLib.framework/'):
if intel:
args.extend(['-msse3'])
else:
args.extend(['-faltivec'])
args.extend([
'-I/System/Library/Frameworks/vecLib.framework/Headers'])
link_args.extend(['-Wl,-framework', '-Wl,vecLib'])
if args:
self.set_info(extra_compile_args=args,
extra_link_args=link_args,
define_macros=[('NO_ATLAS_INFO', 3),
('HAVE_CBLAS', None)])
return
need_blas = 0
info = {}
if atlas_info:
info = atlas_info
else:
warnings.warn(AtlasNotFoundError.__doc__)
need_blas = 1
dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)])
if need_blas:
blas_info = get_info('blas')
if blas_info:
dict_append(info, **blas_info)
else:
warnings.warn(BlasNotFoundError.__doc__)
blas_src_info = get_info('blas_src')
if not blas_src_info:
warnings.warn(BlasSrcNotFoundError.__doc__)
return
dict_append(info, libraries=[('fblas_src', blas_src_info)])
self.set_info(**info)
return
class blas_info(system_info):
section = 'blas'
dir_env_var = 'BLAS'
_lib_names = ['blas']
notfounderror = BlasNotFoundError
def calc_info(self):
lib_dirs = self.get_lib_dirs()
blas_libs = self.get_libs('blas_libs', self._lib_names)
info = self.check_libs(lib_dirs, blas_libs, [])
if info is None:
return
if platform.system() == 'Windows':
# The check for windows is needed because has_cblas uses the
# same compiler that was used to compile Python and msvc is
# often not installed when mingw is being used. This rough
# treatment is not desirable, but windows is tricky.
info['language'] = 'f77' # XXX: is it generally true?
else:
lib = self.has_cblas(info)
if lib is not None:
info['language'] = 'c'
info['libraries'] = [lib]
info['define_macros'] = [('HAVE_CBLAS', None)]
self.set_info(**info)
def has_cblas(self, info):
# primitive cblas check by looking for the header and trying to link
# cblas or blas
res = False
c = distutils.ccompiler.new_compiler()
c.customize('')
tmpdir = tempfile.mkdtemp()
s = """#include <cblas.h>
int main(int argc, const char *argv[])
{
double a[4] = {1,2,3,4};
double b[4] = {5,6,7,8};
return cblas_ddot(4, a, 1, b, 1) > 10;
}"""
src = os.path.join(tmpdir, 'source.c')
try:
with open(src, 'wt') as f:
f.write(s)
try:
# check we can compile (find headers)
obj = c.compile([src], output_dir=tmpdir,
include_dirs=self.get_include_dirs())
# check we can link (find library)
# some systems have separate cblas and blas libs. First
# check for cblas lib, and if not present check for blas lib.
try:
c.link_executable(obj, os.path.join(tmpdir, "a.out"),
libraries=["cblas"],
library_dirs=info['library_dirs'],
extra_postargs=info.get('extra_link_args', []))
res = "cblas"
except distutils.ccompiler.LinkError:
c.link_executable(obj, os.path.join(tmpdir, "a.out"),
libraries=["blas"],
library_dirs=info['library_dirs'],
extra_postargs=info.get('extra_link_args', []))
res = "blas"
except distutils.ccompiler.CompileError:
res = None
finally:
shutil.rmtree(tmpdir)
return res
class openblas_info(blas_info):
section = 'openblas'
dir_env_var = 'OPENBLAS'
_lib_names = ['openblas']
notfounderror = BlasNotFoundError
def check_embedded_lapack(self, info):
return True
def calc_info(self):
lib_dirs = self.get_lib_dirs()
openblas_libs = self.get_libs('libraries', self._lib_names)
if openblas_libs == self._lib_names: # backward compat with 1.8.0
openblas_libs = self.get_libs('openblas_libs', self._lib_names)
info = self.check_libs(lib_dirs, openblas_libs, [])
if info is None:
return
# Add extra info for OpenBLAS
extra_info = self.calc_extra_info()
dict_append(info, **extra_info)
if not self.check_embedded_lapack(info):
return
info['language'] = 'c'
info['define_macros'] = [('HAVE_CBLAS', None)]
self.set_info(**info)
class openblas_lapack_info(openblas_info):
section = 'openblas'
dir_env_var = 'OPENBLAS'
_lib_names = ['openblas']
notfounderror = BlasNotFoundError
def check_embedded_lapack(self, info):
res = False
c = distutils.ccompiler.new_compiler()
c.customize('')
tmpdir = tempfile.mkdtemp()
s = """void zungqr();
int main(int argc, const char *argv[])
{
zungqr_();
return 0;
}"""
src = os.path.join(tmpdir, 'source.c')
out = os.path.join(tmpdir, 'a.out')
# Add the additional "extra" arguments
try:
extra_args = info['extra_link_args']
except:
extra_args = []
try:
with open(src, 'wt') as f:
f.write(s)
obj = c.compile([src], output_dir=tmpdir)
try:
c.link_executable(obj, out, libraries=info['libraries'],
library_dirs=info['library_dirs'],
extra_postargs=extra_args)
res = True
except distutils.ccompiler.LinkError:
res = False
finally:
shutil.rmtree(tmpdir)
return res
class blis_info(blas_info):
section = 'blis'
dir_env_var = 'BLIS'
_lib_names = ['blis']
notfounderror = BlasNotFoundError
def calc_info(self):
lib_dirs = self.get_lib_dirs()
blis_libs = self.get_libs('libraries', self._lib_names)
if blis_libs == self._lib_names:
blis_libs = self.get_libs('blis_libs', self._lib_names)
info = self.check_libs2(lib_dirs, blis_libs, [])
if info is None:
return
# Add include dirs
incl_dirs = self.get_include_dirs()
dict_append(info,
language='c',
define_macros=[('HAVE_CBLAS', None)],
include_dirs=incl_dirs)
self.set_info(**info)
class blas_src_info(system_info):
section = 'blas_src'
dir_env_var = 'BLAS_SRC'
notfounderror = BlasSrcNotFoundError
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['blas']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'daxpy.f')):
src_dir = d
break
if not src_dir:
#XXX: Get sources from netlib. May be ask first.
return
blas1 = '''
caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot
dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2
srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg
dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax
snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap
scabs1
'''
blas2 = '''
cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv
chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv
dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv
sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger
stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc
zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2
ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv
'''
blas3 = '''
cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k
dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm
ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm
'''
sources = [os.path.join(src_dir, f + '.f') \
for f in (blas1 + blas2 + blas3).split()]
#XXX: should we check here actual existence of source files?
sources = [f for f in sources if os.path.isfile(f)]
info = {'sources': sources, 'language': 'f77'}
self.set_info(**info)
class x11_info(system_info):
section = 'x11'
notfounderror = X11NotFoundError
def __init__(self):
system_info.__init__(self,
default_lib_dirs=default_x11_lib_dirs,
default_include_dirs=default_x11_include_dirs)
def calc_info(self):
if sys.platform in ['win32']:
return
lib_dirs = self.get_lib_dirs()
include_dirs = self.get_include_dirs()
x11_libs = self.get_libs('x11_libs', ['X11'])
info = self.check_libs(lib_dirs, x11_libs, [])
if info is None:
return
inc_dir = None
for d in include_dirs:
if self.combine_paths(d, 'X11/X.h'):
inc_dir = d
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir])
self.set_info(**info)
class _numpy_info(system_info):
section = 'Numeric'
modulename = 'Numeric'
notfounderror = NumericNotFoundError
def __init__(self):
include_dirs = []
try:
module = __import__(self.modulename)
prefix = []
for name in module.__file__.split(os.sep):
if name == 'lib':
break
prefix.append(name)
# Ask numpy for its own include path before attempting
# anything else
try:
include_dirs.append(getattr(module, 'get_include')())
except AttributeError:
pass
include_dirs.append(distutils.sysconfig.get_python_inc(
prefix=os.sep.join(prefix)))
except ImportError:
pass
py_incl_dir = distutils.sysconfig.get_python_inc()
include_dirs.append(py_incl_dir)
py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True)
if py_pincl_dir not in include_dirs:
include_dirs.append(py_pincl_dir)
for d in default_include_dirs:
d = os.path.join(d, os.path.basename(py_incl_dir))
if d not in include_dirs:
include_dirs.append(d)
system_info.__init__(self,
default_lib_dirs=[],
default_include_dirs=include_dirs)
def calc_info(self):
try:
module = __import__(self.modulename)
except ImportError:
return
info = {}
macros = []
for v in ['__version__', 'version']:
vrs = getattr(module, v, None)
if vrs is None:
continue
macros = [(self.modulename.upper() + '_VERSION',
'"\\"%s\\""' % (vrs)),
(self.modulename.upper(), None)]
break
dict_append(info, define_macros=macros)
include_dirs = self.get_include_dirs()
inc_dir = None
for d in include_dirs:
if self.combine_paths(d,
os.path.join(self.modulename,
'arrayobject.h')):
inc_dir = d
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir])
if info:
self.set_info(**info)
return
class numarray_info(_numpy_info):
section = 'numarray'
modulename = 'numarray'
class Numeric_info(_numpy_info):
section = 'Numeric'
modulename = 'Numeric'
class numpy_info(_numpy_info):
section = 'numpy'
modulename = 'numpy'
class numerix_info(system_info):
section = 'numerix'
def calc_info(self):
which = None, None
if os.getenv("NUMERIX"):
which = os.getenv("NUMERIX"), "environment var"
# If all the above fail, default to numpy.
if which[0] is None:
which = "numpy", "defaulted"
try:
import numpy
which = "numpy", "defaulted"
except ImportError:
msg1 = str(get_exception())
try:
import Numeric
which = "numeric", "defaulted"
except ImportError:
msg2 = str(get_exception())
try:
import numarray
which = "numarray", "defaulted"
except ImportError:
msg3 = str(get_exception())
log.info(msg1)
log.info(msg2)
log.info(msg3)
which = which[0].strip().lower(), which[1]
if which[0] not in ["numeric", "numarray", "numpy"]:
raise ValueError("numerix selector must be either 'Numeric' "
"or 'numarray' or 'numpy' but the value obtained"
" from the %s was '%s'." % (which[1], which[0]))
os.environ['NUMERIX'] = which[0]
self.set_info(**get_info(which[0]))
class f2py_info(system_info):
def calc_info(self):
try:
import numpy.f2py as f2py
except ImportError:
return
f2py_dir = os.path.join(os.path.dirname(f2py.__file__), 'src')
self.set_info(sources=[os.path.join(f2py_dir, 'fortranobject.c')],
include_dirs=[f2py_dir])
return
class boost_python_info(system_info):
section = 'boost_python'
dir_env_var = 'BOOST'
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['boost*']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'libs', 'python', 'src',
'module.cpp')):
src_dir = d
break
if not src_dir:
return
py_incl_dirs = [distutils.sysconfig.get_python_inc()]
py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True)
if py_pincl_dir not in py_incl_dirs:
py_incl_dirs.append(py_pincl_dir)
srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src')
bpl_srcs = glob(os.path.join(srcs_dir, '*.cpp'))
bpl_srcs += glob(os.path.join(srcs_dir, '*', '*.cpp'))
info = {'libraries': [('boost_python_src',
{'include_dirs': [src_dir] + py_incl_dirs,
'sources':bpl_srcs}
)],
'include_dirs': [src_dir],
}
if info:
self.set_info(**info)
return
class agg2_info(system_info):
section = 'agg2'
dir_env_var = 'AGG2'
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['agg2*']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'src', 'agg_affine_matrix.cpp')):
src_dir = d
break
if not src_dir:
return
if sys.platform == 'win32':
agg2_srcs = glob(os.path.join(src_dir, 'src', 'platform',
'win32', 'agg_win32_bmp.cpp'))
else:
agg2_srcs = glob(os.path.join(src_dir, 'src', '*.cpp'))
agg2_srcs += [os.path.join(src_dir, 'src', 'platform',
'X11',
'agg_platform_support.cpp')]
info = {'libraries':
[('agg2_src',
{'sources': agg2_srcs,
'include_dirs': [os.path.join(src_dir, 'include')],
}
)],
'include_dirs': [os.path.join(src_dir, 'include')],
}
if info:
self.set_info(**info)
return
class _pkg_config_info(system_info):
section = None
config_env_var = 'PKG_CONFIG'
default_config_exe = 'pkg-config'
append_config_exe = ''
version_macro_name = None
release_macro_name = None
version_flag = '--modversion'
cflags_flag = '--cflags'
def get_config_exe(self):
if self.config_env_var in os.environ:
return os.environ[self.config_env_var]
return self.default_config_exe
def get_config_output(self, config_exe, option):
cmd = config_exe + ' ' + self.append_config_exe + ' ' + option
s, o = exec_command(cmd, use_tee=0)
if not s:
return o
def calc_info(self):
config_exe = find_executable(self.get_config_exe())
if not config_exe:
log.warn('File not found: %s. Cannot determine %s info.' \
% (config_exe, self.section))
return
info = {}
macros = []
libraries = []
library_dirs = []
include_dirs = []
extra_link_args = []
extra_compile_args = []
version = self.get_config_output(config_exe, self.version_flag)
if version:
macros.append((self.__class__.__name__.split('.')[-1].upper(),
'"\\"%s\\""' % (version)))
if self.version_macro_name:
macros.append((self.version_macro_name + '_%s'
% (version.replace('.', '_')), None))
if self.release_macro_name:
release = self.get_config_output(config_exe, '--release')
if release:
macros.append((self.release_macro_name + '_%s'
% (release.replace('.', '_')), None))
opts = self.get_config_output(config_exe, '--libs')
if opts:
for opt in opts.split():
if opt[:2] == '-l':
libraries.append(opt[2:])
elif opt[:2] == '-L':
library_dirs.append(opt[2:])
else:
extra_link_args.append(opt)
opts = self.get_config_output(config_exe, self.cflags_flag)
if opts:
for opt in opts.split():
if opt[:2] == '-I':
include_dirs.append(opt[2:])
elif opt[:2] == '-D':
if '=' in opt:
n, v = opt[2:].split('=')
macros.append((n, v))
else:
macros.append((opt[2:], None))
else:
extra_compile_args.append(opt)
if macros:
dict_append(info, define_macros=macros)
if libraries:
dict_append(info, libraries=libraries)
if library_dirs:
dict_append(info, library_dirs=library_dirs)
if include_dirs:
dict_append(info, include_dirs=include_dirs)
if extra_link_args:
dict_append(info, extra_link_args=extra_link_args)
if extra_compile_args:
dict_append(info, extra_compile_args=extra_compile_args)
if info:
self.set_info(**info)
return
class wx_info(_pkg_config_info):
section = 'wx'
config_env_var = 'WX_CONFIG'
default_config_exe = 'wx-config'
append_config_exe = ''
version_macro_name = 'WX_VERSION'
release_macro_name = 'WX_RELEASE'
version_flag = '--version'
cflags_flag = '--cxxflags'
class gdk_pixbuf_xlib_2_info(_pkg_config_info):
section = 'gdk_pixbuf_xlib_2'
append_config_exe = 'gdk-pixbuf-xlib-2.0'
version_macro_name = 'GDK_PIXBUF_XLIB_VERSION'
class gdk_pixbuf_2_info(_pkg_config_info):
section = 'gdk_pixbuf_2'
append_config_exe = 'gdk-pixbuf-2.0'
version_macro_name = 'GDK_PIXBUF_VERSION'
class gdk_x11_2_info(_pkg_config_info):
section = 'gdk_x11_2'
append_config_exe = 'gdk-x11-2.0'
version_macro_name = 'GDK_X11_VERSION'
class gdk_2_info(_pkg_config_info):
section = 'gdk_2'
append_config_exe = 'gdk-2.0'
version_macro_name = 'GDK_VERSION'
class gdk_info(_pkg_config_info):
section = 'gdk'
append_config_exe = 'gdk'
version_macro_name = 'GDK_VERSION'
class gtkp_x11_2_info(_pkg_config_info):
section = 'gtkp_x11_2'
append_config_exe = 'gtk+-x11-2.0'
version_macro_name = 'GTK_X11_VERSION'
class gtkp_2_info(_pkg_config_info):
section = 'gtkp_2'
append_config_exe = 'gtk+-2.0'
version_macro_name = 'GTK_VERSION'
class xft_info(_pkg_config_info):
section = 'xft'
append_config_exe = 'xft'
version_macro_name = 'XFT_VERSION'
class freetype2_info(_pkg_config_info):
section = 'freetype2'
append_config_exe = 'freetype2'
version_macro_name = 'FREETYPE2_VERSION'
class amd_info(system_info):
section = 'amd'
dir_env_var = 'AMD'
_lib_names = ['amd']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
amd_libs = self.get_libs('amd_libs', self._lib_names)
info = self.check_libs(lib_dirs, amd_libs, [])
if info is None:
return
include_dirs = self.get_include_dirs()
inc_dir = None
for d in include_dirs:
p = self.combine_paths(d, 'amd.h')
if p:
inc_dir = os.path.dirname(p[0])
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir],
define_macros=[('SCIPY_AMD_H', None)],
swig_opts=['-I' + inc_dir])
self.set_info(**info)
return
class umfpack_info(system_info):
section = 'umfpack'
dir_env_var = 'UMFPACK'
notfounderror = UmfpackNotFoundError
_lib_names = ['umfpack']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
umfpack_libs = self.get_libs('umfpack_libs', self._lib_names)
info = self.check_libs(lib_dirs, umfpack_libs, [])
if info is None:
return
include_dirs = self.get_include_dirs()
inc_dir = None
for d in include_dirs:
p = self.combine_paths(d, ['', 'umfpack'], 'umfpack.h')
if p:
inc_dir = os.path.dirname(p[0])
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir],
define_macros=[('SCIPY_UMFPACK_H', None)],
swig_opts=['-I' + inc_dir])
amd = get_info('amd')
dict_append(info, **get_info('amd'))
self.set_info(**info)
return
def combine_paths(*args, **kws):
""" Return a list of existing paths composed by all combinations of
items from arguments.
"""
r = []
for a in args:
if not a:
continue
if is_string(a):
a = [a]
r.append(a)
args = r
if not args:
return []
if len(args) == 1:
result = reduce(lambda a, b: a + b, map(glob, args[0]), [])
elif len(args) == 2:
result = []
for a0 in args[0]:
for a1 in args[1]:
result.extend(glob(os.path.join(a0, a1)))
else:
result = combine_paths(*(combine_paths(args[0], args[1]) + args[2:]))
verbosity = kws.get('verbosity', 1)
log.debug('(paths: %s)', ','.join(result))
return result
language_map = {'c': 0, 'c++': 1, 'f77': 2, 'f90': 3}
inv_language_map = {0: 'c', 1: 'c++', 2: 'f77', 3: 'f90'}
def dict_append(d, **kws):
languages = []
for k, v in kws.items():
if k == 'language':
languages.append(v)
continue
if k in d:
if k in ['library_dirs', 'include_dirs',
'extra_compile_args', 'extra_link_args',
'runtime_library_dirs', 'define_macros']:
[d[k].append(vv) for vv in v if vv not in d[k]]
else:
d[k].extend(v)
else:
d[k] = v
if languages:
l = inv_language_map[max([language_map.get(l, 0) for l in languages])]
d['language'] = l
return
def parseCmdLine(argv=(None,)):
import optparse
parser = optparse.OptionParser("usage: %prog [-v] [info objs]")
parser.add_option('-v', '--verbose', action='store_true', dest='verbose',
default=False,
help='be verbose and print more messages')
opts, args = parser.parse_args(args=argv[1:])
return opts, args
def show_all(argv=None):
import inspect
if argv is None:
argv = sys.argv
opts, args = parseCmdLine(argv)
if opts.verbose:
log.set_threshold(log.DEBUG)
else:
log.set_threshold(log.INFO)
show_only = []
for n in args:
if n[-5:] != '_info':
n = n + '_info'
show_only.append(n)
show_all = not show_only
_gdict_ = globals().copy()
for name, c in _gdict_.items():
if not inspect.isclass(c):
continue
if not issubclass(c, system_info) or c is system_info:
continue
if not show_all:
if name not in show_only:
continue
del show_only[show_only.index(name)]
conf = c()
conf.verbosity = 2
r = conf.get_info()
if show_only:
log.info('Info classes not defined: %s', ','.join(show_only))
if __name__ == "__main__":
show_all()
| [
"[email protected]"
]
| |
ca72163d672b64afaf83f1f5891e2c4f1c2d573c | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Shapely_numpy/source/numpy/doc/structured_arrays.py | 5289e6d0bd859f00231e416fc338c3c4d6e6ee3e | [
"MIT"
]
| permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 11,442 | py | """
=================
Structured Arrays
=================
Introduction
============
NumPy provides powerful capabilities to create arrays of structured datatype.
These arrays permit one to manipulate the data by named fields. A simple
example will show what is meant.: ::
>>> x = np.array([(1,2.,'Hello'), (2,3.,"World")],
... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'S10')])
>>> x
array([(1, 2.0, 'Hello'), (2, 3.0, 'World')],
dtype=[('foo', '>i4'), ('bar', '>f4'), ('baz', '|S10')])
Here we have created a one-dimensional array of length 2. Each element of
this array is a structure that contains three items, a 32-bit integer, a 32-bit
float, and a string of length 10 or less. If we index this array at the second
position we get the second structure: ::
>>> x[1]
(2,3.,"World")
Conveniently, one can access any field of the array by indexing using the
string that names that field. ::
>>> y = x['bar']
>>> y
array([ 2., 3.], dtype=float32)
>>> y[:] = 2*y
>>> y
array([ 4., 6.], dtype=float32)
>>> x
array([(1, 4.0, 'Hello'), (2, 6.0, 'World')],
dtype=[('foo', '>i4'), ('bar', '>f4'), ('baz', '|S10')])
In these examples, y is a simple float array consisting of the 2nd field
in the structured type. But, rather than being a copy of the data in the structured
array, it is a view, i.e., it shares exactly the same memory locations.
Thus, when we updated this array by doubling its values, the structured
array shows the corresponding values as doubled as well. Likewise, if one
changes the structured array, the field view also changes: ::
>>> x[1] = (-1,-1.,"Master")
>>> x
array([(1, 4.0, 'Hello'), (-1, -1.0, 'Master')],
dtype=[('foo', '>i4'), ('bar', '>f4'), ('baz', '|S10')])
>>> y
array([ 4., -1.], dtype=float32)
Defining Structured Arrays
==========================
One defines a structured array through the dtype object. There are
**several** alternative ways to define the fields of a record. Some of
these variants provide backward compatibility with Numeric, numarray, or
another module, and should not be used except for such purposes. These
will be so noted. One specifies record structure in
one of four alternative ways, using an argument (as supplied to a dtype
function keyword or a dtype object constructor itself). This
argument must be one of the following: 1) string, 2) tuple, 3) list, or
4) dictionary. Each of these is briefly described below.
1) String argument.
In this case, the constructor expects a comma-separated list of type
specifiers, optionally with extra shape information. The fields are
given the default names 'f0', 'f1', 'f2' and so on.
The type specifiers can take 4 different forms: ::
a) b1, i1, i2, i4, i8, u1, u2, u4, u8, f2, f4, f8, c8, c16, a<n>
(representing bytes, ints, unsigned ints, floats, complex and
fixed length strings of specified byte lengths)
b) int8,...,uint8,...,float16, float32, float64, complex64, complex128
(this time with bit sizes)
c) older Numeric/numarray type specifications (e.g. Float32).
Don't use these in new code!
d) Single character type specifiers (e.g H for unsigned short ints).
Avoid using these unless you must. Details can be found in the
NumPy book
These different styles can be mixed within the same string (but why would you
want to do that?). Furthermore, each type specifier can be prefixed
with a repetition number, or a shape. In these cases an array
element is created, i.e., an array within a record. That array
is still referred to as a single field. An example: ::
>>> x = np.zeros(3, dtype='3int8, float32, (2,3)float64')
>>> x
array([([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])],
dtype=[('f0', '|i1', 3), ('f1', '>f4'), ('f2', '>f8', (2, 3))])
By using strings to define the record structure, it precludes being
able to name the fields in the original definition. The names can
be changed as shown later, however.
2) Tuple argument: The only relevant tuple case that applies to record
structures is when a structure is mapped to an existing data type. This
is done by pairing in a tuple, the existing data type with a matching
dtype definition (using any of the variants being described here). As
an example (using a definition using a list, so see 3) for further
details): ::
>>> x = np.zeros(3, dtype=('i4',[('r','u1'), ('g','u1'), ('b','u1'), ('a','u1')]))
>>> x
array([0, 0, 0])
>>> x['r']
array([0, 0, 0], dtype=uint8)
In this case, an array is produced that looks and acts like a simple int32 array,
but also has definitions for fields that use only one byte of the int32 (a bit
like Fortran equivalencing).
3) List argument: In this case the record structure is defined with a list of
tuples. Each tuple has 2 or 3 elements specifying: 1) The name of the field
('' is permitted), 2) the type of the field, and 3) the shape (optional).
For example::
>>> x = np.zeros(3, dtype=[('x','f4'),('y',np.float32),('value','f4',(2,2))])
>>> x
array([(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]),
(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]),
(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]])],
dtype=[('x', '>f4'), ('y', '>f4'), ('value', '>f4', (2, 2))])
4) Dictionary argument: two different forms are permitted. The first consists
of a dictionary with two required keys ('names' and 'formats'), each having an
equal sized list of values. The format list contains any type/shape specifier
allowed in other contexts. The names must be strings. There are two optional
keys: 'offsets' and 'titles'. Each must be a correspondingly matching list to
the required two where offsets contain integer offsets for each field, and
titles are objects containing metadata for each field (these do not have
to be strings), where the value of None is permitted. As an example: ::
>>> x = np.zeros(3, dtype={'names':['col1', 'col2'], 'formats':['i4','f4']})
>>> x
array([(0, 0.0), (0, 0.0), (0, 0.0)],
dtype=[('col1', '>i4'), ('col2', '>f4')])
The other dictionary form permitted is a dictionary of name keys with tuple
values specifying type, offset, and an optional title. ::
>>> x = np.zeros(3, dtype={'col1':('i1',0,'title 1'), 'col2':('f4',1,'title 2')})
>>> x
array([(0, 0.0), (0, 0.0), (0, 0.0)],
dtype=[(('title 1', 'col1'), '|i1'), (('title 2', 'col2'), '>f4')])
Accessing and modifying field names
===================================
The field names are an attribute of the dtype object defining the structure.
For the last example: ::
>>> x.dtype.names
('col1', 'col2')
>>> x.dtype.names = ('x', 'y')
>>> x
array([(0, 0.0), (0, 0.0), (0, 0.0)],
dtype=[(('title 1', 'x'), '|i1'), (('title 2', 'y'), '>f4')])
>>> x.dtype.names = ('x', 'y', 'z') # wrong number of names
<type 'exceptions.ValueError'>: must replace all names at once with a sequence of length 2
Accessing field titles
====================================
The field titles provide a standard place to put associated info for fields.
They do not have to be strings. ::
>>> x.dtype.fields['x'][2]
'title 1'
Accessing multiple fields at once
====================================
You can access multiple fields at once using a list of field names: ::
>>> x = np.array([(1.5,2.5,(1.0,2.0)),(3.,4.,(4.,5.)),(1.,3.,(2.,6.))],
dtype=[('x','f4'),('y',np.float32),('value','f4',(2,2))])
Notice that `x` is created with a list of tuples. ::
>>> x[['x','y']]
array([(1.5, 2.5), (3.0, 4.0), (1.0, 3.0)],
dtype=[('x', '<f4'), ('y', '<f4')])
>>> x[['x','value']]
array([(1.5, [[1.0, 2.0], [1.0, 2.0]]), (3.0, [[4.0, 5.0], [4.0, 5.0]]),
(1.0, [[2.0, 6.0], [2.0, 6.0]])],
dtype=[('x', '<f4'), ('value', '<f4', (2, 2))])
The fields are returned in the order they are asked for.::
>>> x[['y','x']]
array([(2.5, 1.5), (4.0, 3.0), (3.0, 1.0)],
dtype=[('y', '<f4'), ('x', '<f4')])
Filling structured arrays
=========================
Structured arrays can be filled by field or row by row. ::
>>> arr = np.zeros((5,), dtype=[('var1','f8'),('var2','f8')])
>>> arr['var1'] = np.arange(5)
If you fill it in row by row, it takes a take a tuple
(but not a list or array!)::
>>> arr[0] = (10,20)
>>> arr
array([(10.0, 20.0), (1.0, 0.0), (2.0, 0.0), (3.0, 0.0), (4.0, 0.0)],
dtype=[('var1', '<f8'), ('var2', '<f8')])
Record Arrays
=============
For convenience, numpy provides "record arrays" which allow one to access
fields of structured arrays by attribute rather than by index. Record arrays
are structured arrays wrapped using a subclass of ndarray,
:class:`numpy.recarray`, which allows field access by attribute on the array
object, and record arrays also use a special datatype, :class:`numpy.record`,
which allows field access by attribute on the individual elements of the array.
The simplest way to create a record array is with :func:`numpy.rec.array`: ::
>>> recordarr = np.rec.array([(1,2.,'Hello'),(2,3.,"World")],
... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'S10')])
>>> recordarr.bar
array([ 2., 3.], dtype=float32)
>>> recordarr[1:2]
rec.array([(2, 3.0, 'World')],
dtype=[('foo', '<i4'), ('bar', '<f4'), ('baz', 'S10')])
>>> recordarr[1:2].foo
array([2], dtype=int32)
>>> recordarr.foo[1:2]
array([2], dtype=int32)
>>> recordarr[1].baz
'World'
numpy.rec.array can convert a wide variety of arguments into record arrays,
including normal structured arrays: ::
>>> arr = array([(1,2.,'Hello'),(2,3.,"World")],
... dtype=[('foo', 'i4'), ('bar', 'f4'), ('baz', 'S10')])
>>> recordarr = np.rec.array(arr)
The numpy.rec module provides a number of other convenience functions for
creating record arrays, see :ref:`record array creation routines
<routines.array-creation.rec>`.
A record array representation of a structured array can be obtained using the
appropriate :ref:`view`: ::
>>> arr = np.array([(1,2.,'Hello'),(2,3.,"World")],
... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'a10')])
>>> recordarr = arr.view(dtype=dtype((np.record, arr.dtype)),
... type=np.recarray)
For convenience, viewing an ndarray as type `np.recarray` will automatically
convert to `np.record` datatype, so the dtype can be left out of the view: ::
>>> recordarr = arr.view(np.recarray)
>>> recordarr.dtype
dtype((numpy.record, [('foo', '<i4'), ('bar', '<f4'), ('baz', 'S10')]))
To get back to a plain ndarray both the dtype and type must be reset. The
following view does so, taking into account the unusual case that the
recordarr was not a structured type: ::
>>> arr2 = recordarr.view(recordarr.dtype.fields or recordarr.dtype, np.ndarray)
Record array fields accessed by index or by attribute are returned as a record
array if the field has a structured type but as a plain ndarray otherwise. ::
>>> recordarr = np.rec.array([('Hello', (1,2)),("World", (3,4))],
... dtype=[('foo', 'S6'),('bar', [('A', int), ('B', int)])])
>>> type(recordarr.foo)
<type 'numpy.ndarray'>
>>> type(recordarr.bar)
<class 'numpy.core.records.recarray'>
Note that if a field has the same name as an ndarray attribute, the ndarray
attribute takes precedence. Such fields will be inaccessible by attribute but
may still be accessed by index.
"""
from __future__ import division, absolute_import, print_function
| [
"[email protected]"
]
| |
7933e38da4d9057e66aacf8c9acc9ba0b3e8b4e3 | af61c369e3550643d47fba2445d9f279e412e15c | /basicSprite.py | 2f0bfb2df06b261977e9f782873c230385348b8d | []
| no_license | Rabidza/pygame_learningpython | 45e900b5a8458a14e7df317de16a9e7cd18737fa | ef58d9ca977e2ea1406200ce04c3a32a440be66a | refs/heads/master | 2020-06-03T15:13:38.419015 | 2015-02-18T16:16:30 | 2015-02-18T16:16:30 | 30,924,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | import pygame
from helpers import *
class Sprite(pygame.sprite.Sprite):
def __init__(self, centerPoint, image):
pygame.sprite.Sprite.__init__(self)
# Set the image and the rect
self.image = image
self.rect = image.get_rect()
# Move the rect into the correct position
self.rect.center = centerPoint
class Pellet(pygame.sprite.Sprite):
def __init__(self, top_left, image = None):
pygame.sprite.Sprite.__init__(self)
if image == None:
self.image, self.rect = load_image('pellet.png',-1)
else:
self.image = image
self.rect = image.get_rect()
self.rect.topleft = top_left | [
"[email protected]"
]
| |
67a20ce0e3e82ec860d6764209e162fd4fe77b4d | 00cb405170a6a9572bef0ec8f373813eada08c03 | /Game Structure/geometry/version5/myentitygroup.py | 48d7e8cdcb9636189cb9f36e9451420877bcb85d | []
| no_license | MarcPartensky/Python-Games | c0ad2857be5832d6029642bb0a96bc8e403a12e3 | ebfcaaf4a028eddb36bbc99184eb3f7a86eb24ed | refs/heads/master | 2022-09-03T00:04:16.402288 | 2022-08-12T17:10:22 | 2022-08-12T17:10:22 | 166,606,022 | 2 | 1 | null | 2021-03-07T16:20:15 | 2019-01-19T23:56:04 | Python | UTF-8 | Python | false | false | 11,746 | py | from mymanager import Manager
from myentity import Entity
from mymotion import Motion
from mygroup import Group
import numpy as np
import random
class EntityGroup(Group):
"""An entity group is a group of entities. Entity specific features are added."""
@staticmethod
def getCollided(group1, group2):
"""Determine the collisions between 2 groups."""
collisions = []
for e1 in group1:
for e2 in group2:
if (e1.position - e2.position).norm < e1.born + e2.born:
if e1.cross(e2):
collisions.append((e1, e2))
return collisions
@staticmethod
def killOnCollision(group1, group2):
"""We suppose the entities of group1 and group2 alives."""
for e1 in group1:
for e2 in group2:
if (e1.position - e2.position).norm < e1.born + e2.born:
if e1.cross(e2):
e1.die()
e2.die()
@classmethod
def randomOfType(cls, etype, n=0, **kwargs):
"""Create a group of n random entities of type 'etype'."""
entities = [etype.random() for i in range(n)]
return cls(*entities, **kwargs)
@classmethod
def randomOfTypes(cls, *types, n=0, **kwargs):
"""Create a group of n random entities of type a all of the given types."""
class etype(*types):
pass
return cls.randomOfType(etype, **kwargs)
@classmethod
def random(cls, n=10, **kwargs):
"""Create n random entities."""
entities = [Entity.random() for i in range(n)]
return cls(*entities, **kwargs)
@classmethod
def randomWithSizeSparse(self, n, size, sparse, **kwargs):
"""Create a random group using the size and sparse parameters."""
g = super().random(n, **kwargs)
g.enlarge(size)
g.spread(sparse)
return g
def __init__(self, *entities, alive=False, active=False, activate=False):
"""Create a entity group."""
super().__init__(*entities)
self.active = active
self.alive = alive
if activate:
self.activate()
# Binding the entities to the elements
entities = property(Group.getElements, Group.setElements, Group.delElements)
def randomEntity(self):
"""Return a random entity of the group."""
chosen = []
for entity in self.entities:
if isinstance(entity, EntityGroup):
chosen.append(entity.randomEntity())
else:
chosen.append(entity)
return random.choice(chosen)
def spawn(self):
"""Spawn each entity."""
self.alive = True
for entity in self:
entity.spawn()
def updateActivation(self):
"""Determine if the group is active if any of the entities is active."""
self.active = False
for entity in self:
if entity.active:
self.active = True
def activate(self):
"""Reactivate all entities."""
self.active = True
for entity in self:
entity.activate()
def deactivate(self):
"""Deactivate all entities."""
self.active = False
for entity in self:
entity.deactivate()
def reactKeyDown(self, key):
"""Make each entity react to the key down event."""
for entity in self:
if entity.active:
entity.reactKeyDown(key)
def reactMouseMotion(self, position):
"""Make each entity react to a mouse motion event."""
for entity in self:
if entity.active:
entity.reactMouseMotion(position)
def reactMouseButtonDown(self, button, position):
"""Make all entities react to a mouse button down event."""
for entity in self:
if entity.active:
entity.reactMouseButtonDown(button, position)
def respawn(self):
"""Respawn all dead entities."""
for entity in self:
entity.respawn()
def clean(self):
"""Delete all dead entities."""
i = 0
while i < len(self):
if self[i].alive:
if isinstance(self[i], EntityGroup):
self[i].clean()
i += 1
else:
del self[i]
def show(self, context):
"""Show all entities."""
for entity in self:
entity.show(context)
def showBorn(self, context):
for entity in self:
entity.showBorn(context)
def __str__(self, name=None):
"""Return the str of the types of the entities."""
if name is None:
name = type(self).__name__
return super().__str__(name)
def update(self, dt):
"""Update all entities."""
for entity in self:
entity.update(dt)
def setFriction(self, friction):
"""Set the friction of the entities to a given friction."""
for entity in self:
entity.setFriction(friction)
def enlarge(self, n):
"""Enlarge the anatomies of the entities."""
for entity in self:
entity.enlarge(n)
def spread(self, n):
"""Spread the bodies of the entities."""
for entity in self:
entity.spread(n)
def control(self, controller):
"""Return the controlled entity using the controller."""
# print(self[:])
if len(controller) > 1:
return self[controller[0]].control(controller[1:])
else:
return self[controller[0]]
class AliveEntityGroup:
"""Group of entities that handle themselves."""
@classmethod
def random(cls, n=5, np=3, nm=2, nv=2, dv=2):
"""Create a random entity group using the optional number of entities 'n'."""
entities = [Entity.random(n=np, nm=nm, nv=nv, d=dv) for i in range(n)]
entities = dict(zip(range(len(entities)), entities))
return cls(entities)
def __init__(self, entities):
"""Create a body group using the dictionary of entities."""
self.entities = entities
self.updateAlives()
self.updateMaxBorn()
def updateAlives(self):
"""Update the ids of alive entities."""
self.alives = dict([(id, entity) for (id, entity)
in self.entities.items() if entity.alive])
# Recurrent data that must be updated.
# It is better to proceed that way for efficiency
@property
def deads(self):
"""Return the ids of dead entities."""
return {k: v for k, v in self.entities.items() if k not in self.alives}
def spawnEach(self):
"""Spawn each entity."""
for entity in self.entities.values():
entity.spawn()
self.alives = self.entities.keys()
def update(self, dt):
"""Update the group."""
self.updateEach(dt)
collisions = self.getCollisionsWithCircles()
if len(collisions) > 0:
collided = self.getCollided(collisions)
if len(collided) != 0:
self.killEach(collided)
self.updateAlives()
def updateEach(self, dt):
"""Update each entity alive."""
for entity in self.alives.values():
entity.update(dt)
def showEach(self, context):
"""Show each entity alive."""
for entity in self.alives.values():
entity.show(context)
def respawnDeads(self):
"""Respawn each dead entity."""
for entity in self.deads.values():
entity.respawn()
def getCollisions(self):
"""Return the list of couples of collisions detected between alive entities."""
collisions = []
keys = list(self.alives.keys())
n = len(keys)
for i in range(n):
for j in range(i + 1, n):
id1 = keys[i]
id2 = keys[j]
e1 = self.alives[id1]
e2 = self.alives[id2]
if e1.cross(e2):
collisions.append((id1, id2))
return collisions
def getCollided(self, collisions):
"""Return the ids of collided entities."""
ids = list(set(np.reshape(collisions, 2 * len(collisions))))
return dict([(id, self.entities[id]) for id in ids])
def killEach(self, collided):
"""Kill entities with their ids."""
for entity in collided.values():
entity.die()
def spread(self, n=10):
"""Spread randomly the entities."""
for entity in self.entities.values():
entity.motion = n * Motion.random()
def followEach(self, point):
"""Make each entity follow the point."""
for entity in self.alives.values():
entity.follow(point)
def getMaxBorn(self):
"""Return the borns of all entities."""
return self._max_born
def updateMaxBorn(self, ):
"""Set the max born of all the entities."""
self._max_born = max([e.born for e in self.alives.values()])
def getCollisionsWithCircles(self):
"""Return all circle collisions."""
collisions = []
keys = list(self.alives.keys())
n = len(keys)
for i in range(n):
for j in range(i + 1, n):
id1 = keys[i]
id2 = keys[j]
e1 = self.alives[id1]
e2 = self.alives[id2]
if (e1.position - e2.position).norm < e1.born + e2.born:
if e1.cross(e2):
collisions.append((id1, id2))
return collisions
@property
def alive(self):
"""Return true if any of the entity is alive."""
return len(self.alives) != 0
@property
def dead(self):
"""Return true if all entities are dead."""
return len(self.alives) == 0
class GroupManager(Manager):
@classmethod
def random(cls, **kwargs):
"""Create a random entity group."""
group = EntityGroup.random(**kwargs)
return cls(group)
def __init__(self, group, **kwargs):
"""Create a body group manager using the group and optional arguments."""
super().__init__(**kwargs)
self.group = group
def update(self):
"""Update the group."""
collisions = self.group.getCollisions()
collided = self.group.getCollided(collisions)
self.group.killEach(collided)
self.group.updateAlives()
self.group.updateEach(self.dt)
def show(self):
"""Show the group."""
self.group.showEach(self.context)
class GroupTester(GroupManager):
def __init__(self, *args, **kwargs):
super().__init__(*args)
self.group.spread(100)
self.following = True
def update(self):
"""Update without collisions checks."""
# self.group.updateEach(self.dt)
self.updateWithCollisions()
def updateWithCollisions(self):
"""Update the group."""
self.group.followEach(self.context.point())
collisions = self.group.getCollisionsWithCircles()
if len(collisions) > 0:
self.context.console.append(collisions)
collided = self.group.getCollided(collisions)
if len(collided) != 0:
self.group.killEach(collided)
self.group.updateAlives()
self.group.updateEach(self.dt)
if __name__ == "__main__":
# bm = SpaceShipTester.random(following=True, dt=0.1)
# bm()
# gt = GroupTester.random(n=50)
# print(gt.group.alives)
# gt()
b1 = EntityGroup.random()
b2 = EntityGroup.random()
b1.enlarge(100)
print(b1 + b2)
| [
"[email protected]"
]
| |
a0b40c1e4cfc595d8bc11fa49ffb5e77e2d600c3 | 238ebc43c3d54d2842de75fd8ddf0b0b0261906e | /SimulateData.py | eb704550b17512faa02f5b718ec6ed67b6f373b5 | []
| no_license | johndowen/CrossMgr | 17c114ab80382b24ce0cdd228782bd000f513ea8 | fc9eaf8ae5d4919cef3f1a3680c169be70cf356b | refs/heads/master | 2021-06-28T03:14:41.682880 | 2017-09-17T00:35:26 | 2017-09-17T00:35:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,686 | py | import random
import bisect
from Names import GetNameTeam
def SimulateData( riders=200 ):
# Generate random rider events.
random.seed( 10101021 )
raceMinutes = 8
mean = 8*60.0 / 8 # Average lap time.
var = mean/20.0 # Variance between riders.
lapsTotal = int(raceMinutes * 60 / mean + 3)
raceTime = mean * lapsTotal
errorPercent = 1.0/25.0
for nMid in (10,100,200,500,1000,2000,5000,10000,20000,50000):
if nMid >= riders:
break
numStart = nMid - riders//2
startOffset = 10
lapTimes = []
riderInfo = []
for num in xrange(numStart,numStart+riders+1):
t = 0
if num < numStart + riders // 2:
mu = random.normalvariate( mean, mean/20.0 ) # Rider's random average lap time.
riderInfo.append( [num] + list(GetNameTeam(True)) )
else:
mu = random.normalvariate( mean * 1.15, mean/20.0 ) # These riders are slower, on average.
riderInfo.append( [num] + list(GetNameTeam(False)) )
t += startOffset # Account for offset start.
for laps in xrange(lapsTotal):
t += random.normalvariate( mu, var/2.0 ) # Rider's lap time.
if random.random() > errorPercent: # Respect error rate.
lapTimes.append( (t, num) )
lapTimes.sort()
# Get the times and leaders for each lap.
leaderTimes = [lapTimes[0][0]]
leaderNums = [lapTimes[0][1]]
numSeen = set()
for t, n in lapTimes:
if n in numSeen:
leaderTimes.append( t )
leaderNums.append( n )
numSeen.clear()
numSeen.add( n )
# Find the leader's time after the end of the race.
iLast = bisect.bisect_left( leaderTimes, raceMinutes * 60.0, hi = len(leaderTimes) - 1 )
if leaderTimes[iLast] < raceMinutes * 60.0:
iLast += 1
# Trim out everything except next arrivals after the finish time.
tLeaderLast = leaderTimes[iLast]
numSeen = set()
afterLeaderFinishEvents = [evt for evt in lapTimes if evt[0] >= tLeaderLast]
lapTimes = [evt for evt in lapTimes if evt[0] < tLeaderLast]
# Find the next unique arrival of all finishers.
lastLapFinishers = []
tStop = raceMinutes * 60.0
numSeen = set()
for t, n in afterLeaderFinishEvents:
if n not in numSeen:
numSeen.add( n )
lastLapFinishers.append( (t, n) )
lapTimes.extend( lastLapFinishers )
categories = [
{'name':'Junior', 'catStr':'{}-{}'.format(nMid-riders//2,nMid-1), 'startOffset':'00:00', 'distance':0.5, 'gender':'Men', 'numLaps':5},
{'name':'Senior', 'catStr':'{}-{}'.format(nMid,nMid+riders//2), 'startOffset':'00:{:02d}'.format(startOffset), 'distance':0.5, 'gender':'Women', 'numLaps':4}
]
return {
'raceMinutes': raceMinutes,
'lapTimes': lapTimes,
'categories': categories,
'riderInfo': riderInfo,
}
if __name__ == '__main__':
print SimulateData()['riderInfo']
| [
"[email protected]"
]
| |
2ef45d0901e7aa9952c147ec2d1daccaef373028 | e5d130e183b5dea1b7aad23a047c703fa0d2b3bf | /lightbus/transports/pool.py | b680ac6858890624f59a6c14e96bddbe072a9cae | [
"Apache-2.0"
]
| permissive | adamcharnock/lightbus | 4a86428b8203bfe98f77a32375ac961ef398ce16 | cf892779a9a9a8f69c789ffa83c24acfb7f9a336 | refs/heads/master | 2023-08-26T04:19:39.395735 | 2023-08-23T11:07:44 | 2023-08-23T11:07:44 | 94,617,214 | 193 | 22 | Apache-2.0 | 2023-08-10T21:21:51 | 2017-06-17T10:39:23 | Python | UTF-8 | Python | false | false | 7,438 | py | import threading
from inspect import iscoroutinefunction, isasyncgenfunction
from typing import NamedTuple, List, TypeVar, Type, Generic, TYPE_CHECKING
from lightbus.exceptions import (
TransportPoolIsClosed,
CannotShrinkEmptyPool,
CannotProxySynchronousMethod,
CannotProxyPrivateMethod,
CannotProxyProperty,
)
if TYPE_CHECKING:
# pylint: disable=unused-import,cyclic-import
from lightbus.config import Config
from lightbus.transports.base import Transport
VT = TypeVar("VT", bound=Transport)
else:
VT = TypeVar("VT")
class TransportPool(Generic[VT]):
"""Pool for managing access to transports
This pool with function as a transparent proxy to the underlying transports.
In most cases you shouldn't need to access the underlying transports. If you
do you can use the context manage as follows:
async with transport_pool as transport:
transport.send_event(...)
Note that this pool will only perform pooling within the thread in which the
pool was created. If another thread uses the pool then the pool will be bypassed.
In this case, a new transport will always be created on checkout, and this
transport will then be immediately closed when checked back in.
This is because the pool will normally be closed sometime after the thread has
completed, at which point each transport in the pool will be closed. However, closing
the transport requires access to the event loop for the specific transport, but that
loop would have been closed when the thread shutdown. It therefore becomes impossible to
the transport cleanly. Therefore, in the case of threads, we create new transports on
checkout, and close and discard the transport on checkin.
This will have some performance impact for non-async user-provided-callables which need to
access the bus. These callables area run in a thread, and so will need fresh connections.
"""
def __init__(self, transport_class: Type[VT], transport_config: NamedTuple, config: "Config"):
self.transport_class = transport_class
self.transport_config = transport_config
self.config = config
self.closed = False
self.lock = threading.RLock()
self.pool: List[VT] = []
self.checked_out = set()
self.context_stack: List[VT] = []
self.home_thread = threading.current_thread()
def __repr__(self):
return f"<Pool of {self.transport_class.__name__} at 0x{id(self):02x} to {self}>"
def __hash__(self):
return hash((self.transport_class, self.transport_config))
def __eq__(self, other):
return hash(self) == hash(other)
def __str__(self):
# Here we create an un-opened transport and stringify it.
# This means we can display nice redis URLs when displaying the pool
# for debugging output.
transport = self._instantiate_transport()
return str(transport)
async def grow(self):
with self.lock:
new_transport = await self._create_transport()
self.pool.append(new_transport)
async def shrink(self):
with self.lock:
try:
old_transport = self.pool.pop(0)
except IndexError:
raise CannotShrinkEmptyPool(
"Transport pool is already empty, cannot shrink it further"
)
await self._close_transport(old_transport)
async def checkout(self) -> VT:
if self.closed:
raise TransportPoolIsClosed("Cannot get a connection, transport pool is closed")
if threading.current_thread() != self.home_thread:
return await self._create_transport()
else:
with self.lock:
if not self.pool:
await self.grow()
transport = self.pool.pop(0)
self.checked_out.add(transport)
return transport
async def checkin(self, transport: VT):
if threading.current_thread() != self.home_thread:
return await self._close_transport(transport)
else:
with self.lock:
self.checked_out.discard(transport)
self.pool.append(transport)
if self.closed:
await self._close_all()
@property
def free(self) -> int:
return len(self.pool)
@property
def in_use(self) -> int:
return len(self.checked_out)
@property
def total(self) -> int:
return self.free + self.in_use
async def __aenter__(self) -> VT:
transport = await self.checkout()
self.context_stack.append(transport)
return transport
async def __aexit__(self, exc_type, exc_val, exc_tb):
transport = self.context_stack.pop()
await self.checkin(transport)
async def close(self):
with self.lock:
self.closed = True
await self._close_all()
async def _close_all(self):
with self.lock:
while self.pool:
await self._close_transport(self.pool.pop())
def _instantiate_transport(self) -> VT:
"""Instantiate a transport without opening it"""
return self.transport_class.from_config(
config=self.config, **self.transport_config._asdict()
)
async def _create_transport(self) -> VT:
"""Return an opened transport"""
new_transport = self._instantiate_transport()
await new_transport.open()
return new_transport
async def _close_transport(self, transport: VT):
"""Close a specific transport"""
await transport.close()
def __getattr__(self, item):
async def fn_pool_wrapper(*args, **kwargs):
async with self as transport:
return await getattr(transport, item)(*args, **kwargs)
async def gen_pool_wrapper(*args, **kwargs):
async with self as transport:
async for value in getattr(transport, item)(*args, **kwargs):
yield value
attr = getattr(self.transport_class, item, None)
if not attr:
raise AttributeError(
f"Neither the transport pool {repr(self)} nor the transport with class "
f"{repr(self.transport_class)} has an attribute named {item}"
)
elif item[0] == "_":
raise CannotProxyPrivateMethod(
f"Cannot proxy private method calls to transport. Use the pool's async context or "
f"checkout() method if you really need to access private methods. (Private methods "
f"are ones whose name starts with an underscore)"
)
elif not callable(attr):
raise CannotProxyProperty(
f"Cannot proxy property access on transports. Use the pool's async context or "
f"checkout() method to get access to a transport directly."
)
else:
if iscoroutinefunction(attr):
return fn_pool_wrapper
elif isasyncgenfunction(attr):
return gen_pool_wrapper
else:
raise CannotProxySynchronousMethod(
f"{self.transport_class.__name__}.{item}() is synchronous "
"and must be accessed directly and not via the pool"
)
| [
"[email protected]"
]
| |
563dfccd2fd271a2ae0edc1613952e7947965a62 | 58afefdde86346760bea40690b1675c6639c8b84 | /leetcode/global-and-local-inversions/288943653.py | 0f1c892c5fa61990ec2ad92c40c0f4af8ae7abd2 | []
| no_license | ausaki/data_structures_and_algorithms | aaa563f713cbab3c34a9465039d52b853f95548e | 4f5f5124534bd4423356a5f5572b8a39b7828d80 | refs/heads/master | 2021-06-21T10:44:44.549601 | 2021-04-06T11:30:21 | 2021-04-06T11:30:21 | 201,942,771 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 712 | py | # title: global-and-local-inversions
# detail: https://leetcode.com/submissions/detail/288943653/
# datetime: Fri Dec 27 19:09:22 2019
# runtime: 388 ms
# memory: 13.4 MB
class Solution:
def isIdealPermutation(self, A: List[int]) -> bool:
N = len(A)
k = -1
i = 0
while i < N:
j = i + 1
while j < N and A[j] > A[j - 1]:
k = A[j - 1]
j += 1
if j == N:
break
i = j
if A[i] < k:
return False
if i + 1 < N and (A[i] > A[i + 1] or A[i + 1] < A[i - 1]):
return False
k = A[i - 1]
i += 1
return True | [
"[email protected]"
]
| |
3cb9259d4f4214fc9346777f14b80e8f08b66957 | e34dfe70b30e584d8b1992377b1b4f8a08235824 | /cloudmesh/common/console.py | 7042af40082ed1d6fcf2d07ae6ca9ec0509d795b | [
"Python-2.0",
"Apache-2.0"
]
| permissive | juaco77/cloudmesh-common | 09efd91310f1d6fc5d34f60f4c34e63e8c6fc9ae | 0bb330da363b8edb9e509a8138a3054978a8a390 | refs/heads/master | 2020-06-08T05:04:18.070674 | 2019-05-17T10:33:13 | 2019-05-17T10:33:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,233 | py | """
Printing messages in a console
"""
from __future__ import print_function
import textwrap
import traceback
import colorama
from colorama import Fore, Back, Style
colorama.init()
def indent(text, indent=2, width=128):
"""
indents the given text by the indent specified and wrapping to the given width
:param text: the text to print
:param indent: indent characters
:param width: the width of the text
:return:
"""
return "\n".join(
textwrap.wrap(text,
width=width,
initial_indent=" " * indent,
subsequent_indent=" " * indent))
class Console(object):
"""
A simple way to print in a console terminal in color. Instead of using
simply the print statement you can use special methods to indicate
warnings, errors, ok and regular messages.
Example Usage::
Console.warning("Warning")
Console.error("Error")
Console.info("Info")
Console.msg("msg")
Console.ok("Success")
One can switch the color mode off with::
Console.color = False
Console.error("Error")
The color will be switched on by default.
"""
color = True
debug = True
theme_color = {
'HEADER': Fore.MAGENTA,
'BLACK': Fore.BLACK,
'CYAN': Fore.CYAN,
'WHITE': Fore.WHITE,
'BLUE': Fore.BLUE,
'OKBLUE': Fore.BLUE,
'OKGREEN': Fore.GREEN,
'GREEN': Fore.GREEN,
'FAIL': Fore.RED,
'WARNING': Fore.MAGENTA,
'RED': Fore.RED,
'ENDC': '\033[0m',
'BOLD': "\033[1m",
}
theme_bw = {
'HEADER': '',
'BLACK': '',
'CYAN': '',
'WHITE': '',
'BLUE': '',
'OKBLUE': '',
'OKGREEN': '',
'GREEN': '',
'FAIL': '',
'WARNING': '',
'RED': '',
'ENDC': '',
'BOLD': "",
}
theme = theme_color
@classmethod
def set_debug(cls, on=True):
"""
sets debugging on or of
:param on: if on debugging is set
:return:
"""
cls.debug = on
@staticmethod
def set_theme(color=True):
"""
defines if the console messages are printed in color
:param color: if True its printed in color
:return:
"""
if color:
Console.theme = Console.theme_color
else:
Console.theme = Console.theme_bw
Console.color = color
@staticmethod
def get(name):
"""
returns the default theme for printing console messages
:param name: the name of the theme
:return:
"""
if name in Console.theme:
return Console.theme[name]
else:
return Console.theme['BLACK']
@staticmethod
def txt_msg(message, width=79):
"""
prints a message to the screen
:param message: the message to print
:param width: teh width of the line
:return:
"""
return textwrap.fill(message, width=width)
@staticmethod
def msg(*message):
"""
prints a message
:param message: the message to print
:return:
"""
str = " ".join(message)
print(str)
@classmethod
def error(cls, message, prefix=True, traceflag=False):
"""
prints an error message
:param message: the message
:param prefix: a prefix for the message
:param traceflag: if true the stack trace is retrieved and printed
:return:
"""
# print (message, prefix)
message = message or ""
if prefix:
text = "ERROR: "
else:
text = ""
if cls.color:
cls.cprint('FAIL', text, str(message))
else:
print(cls.txt_msg(text + str(message)))
if traceflag and cls.debug:
trace = traceback.format_exc().strip()
if trace:
print()
print("Trace:")
print("\n ".join(str(trace).splitlines()))
print()
@staticmethod
def TODO(message, prefix=True, traceflag=True):
"""
prints an TODO message
:param message: the message
:param prefix: if set to true it prints TODO: as prefix
:param traceflag: if true the stack trace is retrieved and printed
:return:
"""
message = message or ""
if prefix:
text = "TODO: "
else:
text = ""
if Console.color:
Console.cprint('FAIL', text, str(message))
else:
print(Console.msg(text + str(message)))
trace = traceback.format_exc().strip()
if traceflag and trace != "None":
print()
print("\n".join(str(trace).splitlines()))
print()
@staticmethod
def debug_msg(message):
"""
print a debug message
:param message: the message
:return:
"""
message = message or ""
if Console.color:
Console.cprint('RED', 'DEBUG: ', message)
else:
print(Console.msg('DEBUG: ' + message))
@staticmethod
def info(message):
"""
prints an informational message
:param message: the message
:return:
"""
message = message or ""
if Console.color:
Console.cprint('OKBLUE', "INFO: ", message)
else:
print(Console.msg("INFO: " + message))
@staticmethod
def warning(message):
"""
prints a warning
:param message: the message
:return:
"""
message = message or ""
if Console.color:
Console.cprint('WARNING', "WARNING: ", message)
else:
print(Console.msg("WARNING: " + message))
@staticmethod
def ok(message):
"""
prints an ok message
:param message: the message<
:return:
"""
message = message or ""
if Console.color:
Console.cprint('OKGREEN', "", message)
else:
print(Console.msg(message))
@staticmethod
def cprint(color, prefix, message):
"""
prints a message in a given color
:param color: the color as defined in the theme
:param prefix: the prefix (a string)
:param message: the message
:return:
"""
message = message or ""
prefix = prefix or ""
print((Console.theme[color] +
prefix +
message +
Console.theme['ENDC']))
#
# Example
#
if __name__ == "__main__":
print(Console.color)
print(Console.theme)
Console.warning("Warning")
Console.error("Error")
Console.info("Info")
Console.msg("msg")
Console.ok("Ok")
Console.color = False
print(Console.color)
Console.error("Error")
print(Fore.RED + 'some red text')
print(Back.GREEN + 'and with a green background')
print(Style.DIM + 'and in dim text')
print(Fore.RESET + Back.RESET + Style.RESET_ALL)
print('back to normal now')
| [
"[email protected]"
]
| |
6f9cd1e5b7498d442628bca6592c84f90f1d02c0 | 82f993631da2871933edf83f7648deb6c59fd7e4 | /w1/L1/12.py | 4e40656a6ec9bba93b7855da255ff4c9ddd100ee | []
| no_license | bobur554396/PPII2021Summer | 298f26ea0e74c199af7b57a5d40f65e20049ecdd | 7ef38fb4ad4f606940d2ba3daaa47cbd9ca8bcd2 | refs/heads/master | 2023-06-26T05:42:08.523345 | 2021-07-24T12:40:05 | 2021-07-24T12:40:05 | 380,511,125 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | # line = input()
# print(len(line))
'''
4
4 10 -1 100
'''
n = int(input())
# [<returning iter val> for <iter> in <list> condition ]
numbers = [int(n) for n in input().split()]
print(numbers)
# nums = []
# for n in numbers:
# nums.append(int(n))
# print(nums)
s = 0
for i in numbers:
if i > 0:
s += i
# print(s)
print(sum([n for n in numbers if n > 0])) | [
"[email protected]"
]
| |
9304946f7f5ed9562d7a3dbb6c52486fd296a7a1 | 9ef502b92bd218e919c65513e835c15c32667e8f | /samsung_load_0113.py | 75e8319216cf77777569806bc31afb952c0b80c3 | []
| no_license | YoungriKIM/samsung_stock | 034bc586440ab04531bb8d0b951747377c340966 | f15b6a3ebc3db76f960fc8f138dba7e43e345ef4 | refs/heads/main | 2023-04-14T03:20:51.169497 | 2021-03-25T08:35:48 | 2021-03-25T08:35:48 | 351,362,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 794 | py | import numpy as np
x_train = np.load('../data/npy/samsung_x_train.npy')
y_train = np.load('../data/npy/samsung_y_train.npy')
x_val = np.load('../data/npy/samsung_x_val.npy')
y_val = np.load('../data/npy/samsung_y_val.npy')
x_test = np.load('../data/npy/samsung_x_test.npy')
y_test = np.load('../data/npy/samsung_y_test.npy')
x_pred = np.load('../data/npy/samsung_x_pred.npy')
from tensorflow.keras.models import load_model
model = load_model('../data/modelcheckpoint/samsung_14-891193.4375.hdf5')
#4. 평가, 예측
result = model.evaluate(x_test, y_test, batch_size=1)
print('mse: ', result[0])
print('mae: ', result[1])
y_pred = model.predict(x_pred)
print('1/14일 삼성주식 종가: ', y_pred)
# mse: 1286656.875
# mae: 825.32763671875
# 1/14일 삼성주식 종가: [[90572.59]] | [
"[email protected]"
]
| |
e6ba2e66f4df8af86c5e31215b5c3d8973ecf055 | 81302ee42c1b3c25ce1566d70a782ab5525c7892 | /nr/nr_band_matching/autocorrelation_full_chain.py | aba89bd8971c7b2b106fb1a5a0ea7d38951568ae | []
| no_license | mdanthony17/neriX | 5dd8ce673cd340888d3d5e4d992f7296702c6407 | 2c4ddbb0b64e7ca54f30333ba4fb8f601bbcc32e | refs/heads/master | 2020-04-04T06:01:25.200835 | 2018-06-05T00:37:08 | 2018-06-05T00:46:11 | 49,095,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,812 | py | #!/usr/bin/python
import sys, array, os
#sys.path.insert(0, '..')
import ROOT as root
from rootpy.plotting import Hist, Hist2D, Canvas, Legend
import nr_band_config
import numpy as np
import corner
import cPickle as pickle
import time, emcee
if len(sys.argv) != 5:
print 'Use is python perform_full_matching.py <filename> <anode setting> <cathode setting> <num walkers> [<deviation_from_nest(efficiency fit only!!!)>]'
sys.exit()
filename = sys.argv[1]
anode_setting = float(sys.argv[2])
cathode_setting = float(sys.argv[3])
num_walkers = int(sys.argv[4])
nameOfResultsDirectory = nr_band_config.results_directory_name
l_plots = ['plots', filename]
dir_specifier_name = '%.3fkV_%.1fkV' % (cathode_setting, anode_setting)
nameOfResultsDirectory += '/yields_fit'
sPathToFile = '%s/%s/%s/sampler_dictionary.p' % (nameOfResultsDirectory, dir_specifier_name, filename)
if os.path.exists(sPathToFile):
dSampler = pickle.load(open(sPathToFile, 'r'))
l_chains = []
for sampler in dSampler[num_walkers]:
l_chains.append(sampler['_chain'])
a_full_chain = np.concatenate(l_chains, axis=1)
#print a_full_chain.shape
l_chains = dSampler[num_walkers][-1]['_chain'] # look at last sampler only (can change)
print 'Successfully loaded sampler!'
else:
print sPathToFile
print 'Could not find file!'
sys.exit()
print emcee.autocorr.integrated_time(np.mean(a_full_chain, axis=0), axis=0,
low=10, high=None, step=1, c=2,
fast=False)
"""
# need to figure this out
if not fit_efficiency:
numDim = 36
else:
numDim = 3
lLabelsForCorner = ['py_0', 'py_1', 'py_2', 'py_3', 'py_4', 'py_5', 'py_6', 'py_7', 'qy_0', 'qy_1', 'qy_2', 'qy_3', 'qy_4', 'qy_5', 'qy_6', 'qy_7', 'intrinsic_res_s1', 'intrinsic_res_s2', 'g1_value', 'spe_res_rv', 'g2_value', 'gas_gain_rv', 'gas_gain_width_rv', 'pf_eff_par0', 'pf_eff_par1', 's1_eff_par0', 's1_eff_par1', 's2_eff_par0', 's2_eff_par1', 'pf_stdev_par0', 'pf_stdev_par1', 'pf_stdev_par2', 'exciton_to_ion_par0_rv', 'exciton_to_ion_par1_rv', 'exciton_to_ion_par2_rv', 'scale_par']
if fit_efficiency:
lLabelsForCorner = ['scale', 's2_eff_par0', 's2_eff_par1']
samples = aSampler[:, -5:, :].reshape((-1, numDim))
start_time = time.time()
print 'Starting corner plot...\n'
fig = corner.corner(samples, labels=lLabelsForCorner, quantiles=[0.16, 0.5, 0.84], show_titles=True, title_kwargs={"fontsize": 12})
print 'Corner plot took %.3f minutes.\n\n' % ((time.time()-start_time)/60.)
# path for save
sPathForSave = './'
for directory in l_plots:
sPathForSave += directory + '/'
if not os.path.exists(sPathForSave):
os.makedirs(sPathForSave)
plot_name = 'nr_band_corner_%s' % (filename)
plot_name = 'yields_fit_%s' % (plot_name)
fig.savefig('%s%s.png' % (sPathForSave, plot_name))
"""
| [
"[email protected]"
]
| |
27f1d1e42412bfb3574bdec543ba0703469f2fce | 82f6a6c50a1fef2d7522a43cc4f60e5ff80b37a8 | /solutions/Missing Number/solution.py | 0bf89957ba6d64c0deea0d059f647ac75434429a | [
"MIT"
]
| permissive | nilax97/leetcode-solutions | ca0f9545ce70975617738f053e0935fac00b04d4 | d3c12f2b289662d199510e0431e177bbf3cda121 | refs/heads/master | 2023-05-14T02:21:48.893716 | 2021-06-08T13:16:53 | 2021-06-08T13:16:53 | 374,466,870 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | class Solution:
def missingNumber(self, nums: List[int]) -> int:
return (len(nums) * (len(nums)+1))//2 - sum(nums)
| [
"[email protected]"
]
| |
173de47073bcfee2292415ce0e9b944d48e315cb | d912423117d96cd67d23bab87c0773a07d962cc1 | /backend/socket_chat/consumers/main.py | a923f06cb91d42b37282f3545803320df8b675de | []
| no_license | modekano/ChatApp | b98f9081235c976642d024d56d1531b5120a04cf | 22cca9f3d4c25a93ca255d6616f61773da757d18 | refs/heads/master | 2020-08-19T06:03:45.010063 | 2019-10-17T11:17:07 | 2019-10-17T11:17:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,557 | py | from backend.socket_chat.consumers.base import BaseConsumer
from channels.db import database_sync_to_async
from backend.profiles.models import Profile
from backend.socket_chat.consumers.dialog import DialogConsumer
class MainConsumer(DialogConsumer, BaseConsumer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dialogs = []
self._groups = []
async def channels_message(self, message):
""" Redirect Group messages to each person """
await self._send_message(message['data'], event=message['event'])
async def connect_users(self, message):
""" Connect user to rooms """
users = message['data']['users']
room = message['data']['room']
room_data = message['data']['room_data']
event = message['event']
if self.user.id in users:
print('connecting %s to %s' % (self.user.id, room))
print(room_data, room)
await self.channel_layer.group_add(room, self.channel_name)
await self._send_message(room_data[self.user.id], event=event)
async def on_authenticate_success(self):
""" Execute after user authenticate """
await self.get_user_channels(self.user)
await self.channel_layer.group_add('general', self.channel_name)
# connect to channel for all groups
if self.dialogs:
for dialog in self.dialogs:
await self.channel_layer.group_add(f'dialog_{dialog}', self.channel_name)
if self._groups:
for group in self._groups:
await self.channel_layer.group_add(f'group_{group}', self.channel_name)
async def disconnect(self, *args, **kwargs):
""" Discard from all channels """
if self.dialogs:
for dialog in self.dialogs:
await self.channel_layer.group_discard(
f'dialog_{dialog}',
self.channel_name
)
if self._groups:
for group in self._groups:
await self.channel_layer.group_discard(
f'group_{group}',
self.channel_name
)
@database_sync_to_async
def get_user_channels(self, user):
""" Get all user's dialogs & groups id """
profile = Profile.objects.get(user=user)
for dialog in profile.dialogs.values():
self.dialogs.append(dialog.get('id'))
for group in profile.groups.values():
self._groups.append(group.get('id'))
| [
"[email protected]"
]
| |
4d9b59df5f0fe4ca4796d0121a12dc0208a93d3e | f5b7b87d0de1459c284b6ebf3aa21c6a96e52207 | /broadgauge/views/auth.py | 8d91aca9aa0d2097097fb9062d97b809ab2611b1 | []
| no_license | iambibhas/broadgauge | cfbce9bbebdc5337918df7b378810a53c9a68f8b | 381816cb9c288b071b44f189d662611cdc57e58b | refs/heads/master | 2021-01-18T09:01:32.155941 | 2014-08-15T11:42:58 | 2014-08-15T11:42:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,323 | py | import web
import json
from .. import account
from .. import oauth
from .. import forms
from ..sendmail import sendmail
from ..flash import flash
from ..models import User, Organization
from ..template import render_template
urls = (
"/login", "login",
"/logout", "logout",
"/oauth/(github|google|facebook)", "oauth_callback",
"(/trainers/signup|/orgs/signup|/login)/reset", "signup_reset",
"(/trainers/signup|/orgs/signup|/login)/(github|google|facebook)", "signup_redirect",
"/trainers/signup", "trainer_signup",
"/orgs/signup", "org_signup",
)
def get_oauth_redirect_url(provider):
home = web.ctx.home
if provider == 'google' and home == 'http://0.0.0.0:8080':
# google doesn't like 0.0.0.0
home = 'http://127.0.0.1:8080'
elif provider == 'facebook' and home == 'http://127.0.0.1:8080':
# facebook doesn't like 127.0.0.1
home = 'http://0.0.0.0:8080'
return "{home}/oauth/{provider}".format(home=home, provider=provider)
def get_oauth_data():
userdata_json = web.cookies().get('oauth')
if userdata_json:
try:
return json.loads(userdata_json)
except ValueError:
pass
class login:
def GET(self):
userdata = get_oauth_data()
if userdata:
user = User.find(email=userdata['email'])
if user:
account.set_login_cookie(user.email)
raise web.seeother("/dashboard")
else:
return render_template("login.html", userdata=userdata,
error=True)
else:
return render_template("login.html", userdata=None)
class logout:
def POST(self):
account.logout()
referer = web.ctx.env.get('HTTP_REFERER', '/')
raise web.seeother(referer)
class oauth_callback:
def GET(self, service):
i = web.input(code=None, state="/")
if i.code:
redirect_uri = get_oauth_redirect_url(service)
client = oauth.oauth_service(service, redirect_uri)
userdata = client.get_userdata(i.code)
if userdata:
# login or signup
t = User.find(email=userdata['email'])
if t:
account.set_login_cookie(t.email)
raise web.seeother("/dashboard")
else:
web.setcookie("oauth", json.dumps(userdata))
raise web.seeother(i.state)
flash("Authorization failed, please try again.", category="error")
raise web.seeother(i.state)
class signup_redirect:
def GET(self, base, provider):
redirect_uri = get_oauth_redirect_url(provider)
client = oauth.oauth_service(provider, redirect_uri)
url = client.get_authorize_url(state=base)
raise web.seeother(url)
class signup_reset:
def GET(self, base):
# TODO: This should be a POST request, not GET
web.setcookie("oauth", "", expires=-1)
raise web.seeother(base)
class trainer_signup:
FORM = forms.TrainerSignupForm
TEMPLATE = "trainers/signup.html"
def GET(self):
userdata = get_oauth_data()
if userdata:
# if already logged in, send him to dashboard
user = self.find_user(email=userdata['email'])
if user:
if not user.is_trainer():
user.make_trainer()
account.set_login_cookie(user.email)
raise web.seeother("/dashboard")
form = self.FORM(userdata)
return render_template(self.TEMPLATE, form=form, userdata=userdata)
def POST(self):
userdata = get_oauth_data()
if not userdata:
return self.GET()
i = web.input()
form = self.FORM(i)
if not form.validate():
return render_template(self.TEMPLATE, form=form)
return self.signup(i, userdata)
def signup(self, i, userdata):
user = User.new(
name=i.name,
email=userdata['email'],
username=i.username,
phone=i.phone,
city=i.city,
github=userdata.get('github'),
is_trainer=True)
account.set_login_cookie(user.email)
flash("Thank you for signing up as a trainer!")
sendmail("emails/trainers/welcome.html",
subject="Welcome to Python Express",
to=user.email,
trainer=user)
raise web.seeother("/dashboard")
def find_user(self, email):
return User.find(email=email)
class org_signup(trainer_signup):
FORM = forms.OrganizationSignupForm
TEMPLATE = "orgs/signup.html"
def find_user(self, email):
# We don't limit numer of org signups per person
return None
def signup(self, i, userdata):
user = User.find(email=userdata['email'])
if not user:
user = User.new(name=userdata['name'], email=userdata['email'])
org = Organization.new(name=i.name,
city=i.city)
org.add_member(user, i.role)
account.set_login_cookie(user.email)
flash("Thank you for registering your organization with Python Express!")
raise web.seeother("/orgs/{}".format(org.id))
| [
"[email protected]"
]
| |
630c9af1fd5f87769d2cd87621e901ba2e383c7c | 99c4d4a6592fded0e8e59652484ab226ac0bd38c | /code/batch-2/dn13 - objektni minobot/M-17021-1547.py | e7ab6dbf8eecabc84d9990edc02404615aaba381 | []
| no_license | benquick123/code-profiling | 23e9aa5aecb91753e2f1fecdc3f6d62049a990d5 | 0d496d649247776d121683d10019ec2a7cba574c | refs/heads/master | 2021-10-08T02:53:50.107036 | 2018-12-06T22:56:38 | 2018-12-06T22:56:38 | 126,011,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,849 | py | class Minobot:
def __init__(self):
self.x=0
self.y=0
self.direction=90
self.tab=[]
def koordinate(self):
return self.x,self.y
def naprej(self, d):
self.tab.append(['naprej',d])
if self.direction == 0 or self.direction == 360:
self.y+=d
elif self.direction == 90 or self.direction == -90:
self.x+=d
elif self.direction == 180 or self.direction == -180:
self.y-=d
elif self.direction == 270 or self.direction == -270:
self.x-=d
def desno(self):
self.tab.append(['desno',90])
self.direction += 90
if self.direction >= 360:
self.direction = 0
def levo(self):
self.tab.append(['levo',-90])
self.direction -= 90
if self.direction <= 0:
self.direction = 360
def razdalja(self):
return abs(self.x)+abs(self.y)
def razveljavi(self):
print(self.tab)
if self.tab:
if self.tab[len(self.tab)-1][0] == 'naprej':
self.naprej(-(self.tab[len(self.tab)-1][1]))
elif self.tab[len(self.tab)-1][0] == 'desno':
self.levo()
elif self.tab[len(self.tab)-1][0] == 'levo':
self.desno()
self.tab.pop()
self.tab.pop()
import unittest
class TestObvezna(unittest.TestCase):
def test_minobot(self):
a = Minobot()
b = Minobot()
self.assertEqual(a.koordinate(), (0, 0))
self.assertEqual(b.koordinate(), (0, 0))
self.assertEqual(a.razdalja(), 0)
self.assertEqual(b.razdalja(), 0)
a.naprej(1)
self.assertEqual(a.koordinate(), (1, 0))
self.assertEqual(b.koordinate(), (0, 0))
self.assertEqual(a.razdalja(), 1)
self.assertEqual(b.razdalja(), 0)
a.naprej(2)
self.assertEqual(a.koordinate(), (3, 0))
self.assertEqual(b.koordinate(), (0, 0))
self.assertEqual(a.razdalja(), 3)
self.assertEqual(b.razdalja(), 0)
b.naprej(2)
self.assertEqual(a.koordinate(), (3, 0))
self.assertEqual(b.koordinate(), (2, 0))
self.assertEqual(a.razdalja(), 3)
self.assertEqual(b.razdalja(), 2)
a.desno() # zdaj je obrnjen dol
a.naprej(4)
self.assertEqual(a.koordinate(), (3, -4))
self.assertEqual(b.koordinate(), (2, 0))
self.assertEqual(a.razdalja(), 7)
self.assertEqual(b.razdalja(), 2)
a.desno() # obrnjen je levo
a.naprej(1)
self.assertEqual(a.koordinate(), (2, -4))
self.assertEqual(b.koordinate(), (2, 0))
self.assertEqual(a.razdalja(), 6)
self.assertEqual(b.razdalja(), 2)
a.desno() # obrnjen je gor
a.naprej(1)
self.assertEqual(a.koordinate(), (2, -3))
self.assertEqual(b.koordinate(), (2, 0))
self.assertEqual(a.razdalja(), 5)
self.assertEqual(b.razdalja(), 2)
a.desno() # obrnjen desno
a.naprej(3)
self.assertEqual(a.koordinate(), (5, -3))
self.assertEqual(b.koordinate(), (2, 0))
self.assertEqual(a.razdalja(), 8)
self.assertEqual(b.razdalja(), 2)
b.levo() # obrnjen gor
b.naprej(3)
self.assertEqual(b.koordinate(), (2, 3))
self.assertEqual(b.razdalja(), 5)
b.levo() # obrnjen levo
b.naprej(3)
self.assertEqual(b.koordinate(), (-1, 3))
self.assertEqual(b.razdalja(), 4)
a.naprej(5)
self.assertEqual(a.koordinate(), (10, -3))
self.assertEqual(a.razdalja(), 13)
class TestDodatna(unittest.TestCase):
def test_undo(self):
a = Minobot()
a.desno() # gleda dol
a.naprej(4)
a.levo() # gleda desno
a.naprej(1)
a.naprej(2)
self.assertEqual(a.koordinate(), (3, -4))
a.razveljavi()
self.assertEqual(a.koordinate(), (1, -4))
a.naprej(1)
self.assertEqual(a.koordinate(), (2, -4))
a.razveljavi()
self.assertEqual(a.koordinate(), (1, -4))
a.razveljavi()
self.assertEqual(a.koordinate(), (0, -4))
a.naprej(1)
self.assertEqual(a.koordinate(), (1, -4))
a.razveljavi()
self.assertEqual(a.koordinate(), (0, -4))
a.razveljavi() # spet gleda dol
self.assertEqual(a.koordinate(), (0, -4))
a.naprej(2)
self.assertEqual(a.koordinate(), (0, -6))
a.razveljavi()
self.assertEqual(a.koordinate(), (0, -4))
a.razveljavi()
self.assertEqual(a.koordinate(), (0, 0))
a.naprej(3)
self.assertEqual(a.koordinate(), (0, -3))
a.razveljavi()
self.assertEqual(a.koordinate(), (0, 0))
a.razveljavi() # spet gleda desno
self.assertEqual(a.koordinate(), (0, 0))
a.naprej(3)
self.assertEqual(a.koordinate(), (3, 0))
a.razveljavi()
self.assertEqual(a.koordinate(), (0, 0))
a.razveljavi() # se ne usuje
self.assertEqual(a.koordinate(), (0, 0))
a.naprej(2)
self.assertEqual(a.koordinate(), (2, 0))
a.razveljavi()
self.assertEqual(a.koordinate(), (0, 0))
a.razveljavi() # se ne usuje
self.assertEqual(a.koordinate(), (0, 0))
a.razveljavi() # se ne usuje
self.assertEqual(a.koordinate(), (0, 0))
a.razveljavi() # se ne usuje
self.assertEqual(a.koordinate(), (0, 0))
a.razveljavi() # se ne usuje
self.assertEqual(a.koordinate(), (0, 0))
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
]
| |
d4b371038a871ea6c4c51c8868534d2b5ff67817 | c333b3cfb05f4bc08a682ca5f4d70b212e9624ff | /punyty/objects.py | 45d95c22a7c1a12f50b8844fd42352e55fd3d51a | [
"MIT"
]
| permissive | jsheedy/punyty | a450f7daaf9e8b2acf5d861ac258e07e762c46c6 | 34d5bffc4cf85985537e199567c5ba2aa9105a05 | refs/heads/master | 2020-05-09T19:58:37.665508 | 2019-12-25T18:22:00 | 2019-12-25T18:22:00 | 181,391,798 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,551 | py | from math import sqrt
import numpy as np
from .object3d import Object3D
class Tetrahedron(Object3D):
vertices = np.array([
[1, 1, 1],
[-1, -1, 1],
[1, -1, -1],
[-1, 1, -1],
], dtype=np.float64)
edges = (
(0, 1),
(1, 2),
(2, 3),
(1, 3),
(0, 2),
(0, 3),
)
polys = (
(0, 1, 2),
(0, 2, 3),
(0, 3, 1),
(3, 2, 1)
)
class Cube(Object3D):
vertices = np.array([
[1, 1, -1],
[-1, 1, -1],
[-1, -1, -1],
[1, -1, -1],
[1, 1, 1],
[-1, 1, 1],
[-1, -1, 1],
[1, -1, 1]
], dtype=np.float64)
edges = (
(0, 1),
(1, 2),
(2, 3),
(3, 0),
(4, 5),
(5, 6),
(6, 7),
(7, 4),
(0, 4),
(1, 5),
(2, 6),
(3, 7),
)
polys = (
(0, 1, 2),
(2, 3, 0),
(4, 7, 6),
(6, 5, 4),
(1, 5, 6),
(6, 2, 1),
(0, 3, 7),
(7, 4, 0),
(3, 2, 6),
(6, 7, 3),
(5, 1, 0),
(0, 4, 5),
)
class Octahedron(Object3D):
vertices = np.array([
[1, 0, 0],
[-1, 0, 0],
[0, 1, 0],
[0, -1, 0],
[0, 0, 1],
[0, 0, -1],
], dtype=np.float64)
edges = (
(0, 2),
(0, 3),
(0, 4),
(0, 5),
(1, 2),
(1, 3),
(1, 4),
(1, 5),
(2, 4),
(2, 5),
(3, 4),
(3, 5),
)
polys = (
(2, 4, 0),
(2, 0, 5),
(2, 5, 1),
(2, 1, 4),
(3, 0, 4),
(3, 5, 0),
(3, 1, 5),
(3, 4, 1),
)
class Dodecahedron(Object3D):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# lay out as cube + 3 rects as on
# https://en.wikipedia.org/wiki/Regular_dodecahedron?oldformat=true#Cartesian_coordinates
phi = (1 + sqrt(5)) / 2
vertices = np.array([
# cube
[1, 1, 1],
[1, -1, 1],
[-1, -1, 1],
[-1, 1, 1],
[1, 1, -1],
[1, -1, -1],
[-1, -1, -1],
[-1, 1, -1],
[phi, 1/phi, 0],
[phi, -1/phi, 0],
[-phi, -1/phi, 0],
[-phi, 1/phi, 0],
[0, phi, 1/phi],
[0, phi, -1/phi],
[0, -phi, -1/phi],
[0, -phi, 1/phi],
[1/phi, 0, phi],
[1/phi, 0, -phi],
[-1/phi, 0, -phi],
[-1/phi, 0, phi]
], dtype=np.float64)
self.edges = (
# one r/g/b vertex for each cube corner vertex
(0, 8),
(0, 12),
(0, 16),
(1, 9),
(1, 15),
(1, 16),
(2, 10),
(2, 15),
(2, 19),
(3, 11),
(3, 12),
(3, 19),
(4, 8),
(4, 13),
(4, 17),
(5, 9),
(5, 14),
(5, 17),
(6, 10),
(6, 14),
(6, 18),
(7, 11),
(7, 13),
(7, 18),
# lace up the rects exterior edges
# r
(8, 9),
(10, 11),
# g
(12, 13),
(14, 15),
# b
(17, 18),
(19, 16)
)
self.vertices = self.to_homogenous_coords(vertices / (2*phi))
| [
"[email protected]"
]
| |
584f70937fd6dd88eaa1f8f64e86437ca7008d88 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/arc026/A/4566526.py | ac69b81f3ed3647b8383cbae78a92382afa0955c | []
| no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 61 | py | n,a,b=map(int,input().split());print(min(n,5)*b+max(n-5,0)*a) | [
"[email protected]"
]
| |
8c958e900b806f0503625aae951c03d030a5cea1 | ebd6f68d47e192da7f81c528312358cfe8052c8d | /swig/Examples/test-suite/python/template_typedef_cplx4_runme.py | 25ac851fbff3855719300e610179db627047c152 | [
"Apache-2.0",
"LicenseRef-scancode-swig",
"GPL-3.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"GPL-3.0-only"
]
| permissive | inishchith/DeepSpeech | 965ad34d69eb4d150ddf996d30d02a1b29c97d25 | dcb7c716bc794d7690d96ed40179ed1996968a41 | refs/heads/master | 2021-01-16T16:16:05.282278 | 2020-05-19T08:00:33 | 2020-05-19T08:00:33 | 243,180,319 | 1 | 0 | Apache-2.0 | 2020-02-26T05:54:51 | 2020-02-26T05:54:50 | null | UTF-8 | Python | false | false | 431 | py | import string
from template_typedef_cplx4 import *
#
# this is OK
#
s = Sin()
s.get_base_value()
s.get_value()
s.get_arith_value()
my_func_r(s)
make_Multiplies_double_double_double_double(s, s)
z = CSin()
z.get_base_value()
z.get_value()
z.get_arith_value()
my_func_c(z)
make_Multiplies_complex_complex_complex_complex(z, z)
#
# Here we fail
#
d = make_Identity_double()
my_func_r(d)
c = make_Identity_complex()
my_func_c(c)
| [
"[email protected]"
]
| |
27ccdbea81862874e0b78a77232a7d471e5f184a | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /Av2u6FKvzFvrtGEKS_18.py | 4e2d4cf4fdcb9ad90f1ec69e7cba9c1c762d567b | []
| no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 846 | py |
# Do not touch this starter code but implement the reverse function at the
# end of the LinkedList class
class Node(object):
def __init__(self, data):
self.data = data
self.next = None
class LinkedList(object):
def __init__(self):
self.head = None
self.tail = None
def insert(self, data):
new_node = Node(data)
if self.head == None:
self.head = self.tail = new_node
else:
self.tail.next = new_node
self.tail = new_node
def traverse(self):
if self.head == None:
return []
temp = self.head
result = []
while temp!=None:
result.append(temp.data)
temp = temp.next
return result
def reverse(self):
nodes = self.traverse()
self.head = self.tail = None
while nodes:
self.insert(nodes.pop(-1))
| [
"[email protected]"
]
| |
366e5b6c1921361a7577480414955fd30e18ee39 | 0547c3ebab814e3fdf2616ae63f8f6c87a0ff6c5 | /846.hand-of-straights.py | 1efee8792025199a30b3260fd14120bab6d55e5d | []
| no_license | livepo/lc | b8792d2b999780af5d5ef3b6050d71170a272ca6 | 605d19be15ece90aaf09b994098716f3dd84eb6a | refs/heads/master | 2020-05-15T03:57:15.367240 | 2019-07-30T03:11:46 | 2019-07-30T03:11:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | class Solution(object):
def isNStraightHand(self, hand, W):
"""
:type hand: List[int]
:type W: int
:rtype: bool
"""
| [
"[email protected]"
]
| |
b67b5e6d66ad477d22a129a6bb6faf2a37a69867 | ad846a63f010b808a72568c00de016fbe86d6c35 | /algotradingenv/lib/python3.8/site-packages/IPython/external/decorators/_numpy_testing_noseclasses.py | 9f8f382391de958a20ccb9a35664f5c7c66ba463 | []
| no_license | krishansinghal29/algotrade | 74ee8b1c9113812b1c7c00ded95d966791cf76f5 | 756bc2e3909558e9ae8b2243bb4dabc530f12dde | refs/heads/master | 2023-06-02T01:53:24.924672 | 2021-06-10T09:17:55 | 2021-06-10T09:17:55 | 375,641,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,417 | py | # IPython: modified copy of numpy.testing.noseclasses, so
# IPython.external._decorators works without numpy being installed.
# These classes implement a "known failure" error class.
import os
from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
class KnownFailureTest(Exception):
"""Raise this exception to mark a test as a known failing test."""
pass
class KnownFailure(ErrorClassPlugin):
"""Plugin that installs a KNOWNFAIL error class for the
KnownFailureClass exception. When KnownFailureTest is raised,
the exception will be logged in the knownfail attribute of the
result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the
exception will not be counted as an error or failure."""
enabled = True
knownfail = ErrorClass(KnownFailureTest, label="KNOWNFAIL", isfailure=False)
def options(self, parser, env=os.environ):
env_opt = "NOSE_WITHOUT_KNOWNFAIL"
parser.add_option(
"--no-knownfail",
action="store_true",
dest="noKnownFail",
default=env.get(env_opt, False),
help="Disable special handling of KnownFailureTest " "exceptions",
)
def configure(self, options, conf):
if not self.can_configure:
return
self.conf = conf
disable = getattr(options, "noKnownFail", False)
if disable:
self.enabled = False
| [
"[email protected]"
]
| |
9a91b60c24903f61054fed747c3be85c66cb2793 | 256f817910dd698970fab89871c6ce66a3c416e7 | /1. solvedProblems/340. Longest Substring with At Most K Distinct Characters/340.py | e1fd7e173bc2c9b114189909699c70c7543f9303 | []
| no_license | tgaochn/leetcode | 5926c71c1555d2659f7db4eff9e8cb9054ea9b60 | 29f1bd681ae823ec6fe755c8f91bfe1ca80b6367 | refs/heads/master | 2023-02-25T16:12:42.724889 | 2021-02-04T21:05:34 | 2021-02-04T21:05:34 | 319,225,860 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,982 | py | # !/usr/bin/env python
# coding: utf-8
"""
Author:
Tian Gao ([email protected])
CreationDate:
Sat, 11/28/2020, 20:48
# !! Description:
"""
import sys
from typing import List
sys.path.append('..')
from utils import binaryTree, nTree, singleLinkedList
from utils.utils import (
printMatrix,
printDict,
printList,
isMatrix,
)
ListNode = singleLinkedList.ListNode
TreeNode = binaryTree.TreeNode
Node = nTree.Node
null = None
testCaseCnt = 6
# maxFuncInputParaCnt = 8
# !! step1: replace these two lines with the given code
class Solution:
def lengthOfLongestSubstringKDistinct(self, s: str, k: int) -> int:
if not k or not s: return 0
from collections import deque
n = len(s)
l, r = 0, 0
win = deque()
self.freqHash = {}
# maxStr = ''
maxLen = -float('inf')
def isValidRlt():
return len(self.freqHash) <= k
def removeEle(ele):
if self.freqHash[ele] == 1:
del self.freqHash[ele]
else:
self.freqHash[ele] -= 1
def addEle(ele):
self.freqHash.setdefault(ele, 0)
self.freqHash[ele] += 1
while r < n:
if not isValidRlt():
eleL = win.popleft()
removeEle(eleL)
l += 1
else:
if len(win) > maxLen:
maxLen = len(win)
# maxStr = ''.join(list(win))
eleR = s[r]
win.append(eleR)
addEle(eleR)
r += 1
# while not maxStr and l < n:
while maxLen >= 0 and l < n:
if isValidRlt():
if len(win) > maxLen:
maxLen = len(win)
eleL = win.popleft()
removeEle(eleL)
l += 1
return maxLen
# endFunc
# endClass
def func():
# !! step2: change function name
s = Solution()
myFuncLis = [
s.lengthOfLongestSubstringKDistinct,
# optional: add another function for comparison
]
onlyDisplayError = True
enableInput = [True] * testCaseCnt
input = [None] * testCaseCnt
expectedRlt = [None] * testCaseCnt
# enableInput[0] = False
# enableInput[1] = False
# enableInput[2] = False
# enableInput[3] = False
# enableInput[4] = False
# enableInput[5] = False
# !! step3: change input para, input para can be found in "run code" - "test case"
# ! para1
input[0] = (
"eceba",
2,
# binaryTree.buildTree(None)
# singleLinkedList.buildSingleList(None)
# nTree.buildTree(None)
)
expectedRlt[0] = 3
# ! para2
input[1] = (
None
# binaryTree.buildTree(None),
# singleLinkedList.buildSingleList(None),
# nTree.buildTree(None),
)
expectedRlt[1] = None
# ! para3
input[2] = (
None
# singleLinkedList.buildSingleList(None),
# binaryTree.buildTree(None),
# nTree.buildTree(None),
)
expectedRlt[2] = None
# ! para4
input[3] = (
None
# singleLinkedList.buildSingleList(None),
# binaryTree.buildTree(None),
# nTree.buildTree(None),
)
expectedRlt[3] = None
# ! para5
input[4] = (
None
# singleLinkedList.buildSingleList(None),
# binaryTree.buildTree(None),
# nTree.buildTree(None),
)
expectedRlt[4] = None
# ! para6
input[5] = (
None
# singleLinkedList.buildSingleList(None),
# binaryTree.buildTree(None),
# nTree.buildTree(None),
)
expectedRlt[5] = None
# !! ====================================
# function and parameters count
allInput = [(input[i], enableInput[i], expectedRlt[i]) for i in range(testCaseCnt)]
if not input[0]:
print("ERROR: please assign at least one input for input[0]!")
exit()
funcParaCnt = 1 if not isinstance(input[0], tuple) else len(input[0])
funcCnt = len(myFuncLis)
# for each test case
for inputPara, enableInput, expectedRlt in allInput:
if not enableInput or not inputPara: continue
inputParaList = [None] * funcParaCnt
if not isinstance(inputPara, tuple):
inputPara = [inputPara]
for j in range(funcParaCnt):
inputParaList[j] = inputPara[j]
# for each function
for j in range(funcCnt):
print('==' * 20)
myFunc = myFuncLis[j]
# ! manually call function, max para count: 8
rlt = None
if funcParaCnt == 1:
rlt = myFunc(inputPara[0])
if funcParaCnt == 2:
rlt = myFunc(inputPara[0], inputPara[1])
if funcParaCnt == 3:
rlt = myFunc(inputPara[0], inputPara[1], inputPara[2])
if funcParaCnt == 4:
rlt = myFunc(inputPara[0], inputPara[1], inputPara[2], inputPara[3])
if funcParaCnt == 5:
rlt = myFunc(inputPara[0], inputPara[1], inputPara[2], inputPara[3], inputPara[4])
if funcParaCnt == 6:
rlt = myFunc(inputPara[0], inputPara[1], inputPara[2], inputPara[3], inputPara[4], inputPara[5])
if funcParaCnt == 7:
rlt = myFunc(inputPara[0], inputPara[1], inputPara[2], inputPara[3], inputPara[4], inputPara[5], inputPara[6])
if funcParaCnt == 8:
rlt = myFunc(inputPara[0], inputPara[1], inputPara[2], inputPara[3], inputPara[4], inputPara[5], inputPara[6], inputPara[7])
# only output when the result is not expected
if onlyDisplayError and expectedRlt is not None and expectedRlt == rlt: continue
# output function name
if funcCnt > 1:
print('func: \t%s' % myFunc.__name__)
# output para
for k in range(funcParaCnt):
para = inputParaList[k]
formatPrint('input %s:' % (k + 1), para)
# output result
print()
if not rlt:
print('rlt:\t', rlt)
else:
formatPrint('rlt:', rlt)
if expectedRlt is not None:
if not expectedRlt:
print('expRlt:\t', expectedRlt)
else:
formatPrint('expRlt:', expectedRlt)
print('==' * 20)
# endFunc
def isSpecialInstance(myInstance):
for curType in [TreeNode, Node]:
if isinstance(myInstance, curType):
return True
return False
# endFunc
def formatPrint(prefix, data):
if isMatrix(data):
print('%s' % prefix)
printMatrix(data)
else:
splitter = '\n' if isSpecialInstance(data) else '\t'
print('%s%s%s' % (prefix, splitter, data))
# endFunc
def main():
func()
# endMain
if __name__ == "__main__":
main()
# endIf
| [
"[email protected]"
]
| |
ea693066e5c2cfa3a129e92b9162b3156c200ed6 | 60598454222bc1e6d352993f9c4cd164cd6cc9cd | /core/migrations/0014_auto_20200723_1127.py | f07013abc7d877cba2d16a2195b83a8886e01144 | []
| no_license | nicksonlangat/mychurch | 12be8911ce1497d7c6a595d06275f21ecf58b185 | e503828cab165c9edcde89b3ef6d7c06b5eb7fdb | refs/heads/master | 2023-08-10T15:36:06.208376 | 2020-07-23T09:52:19 | 2020-07-23T09:52:19 | 281,030,716 | 0 | 1 | null | 2021-09-22T19:35:09 | 2020-07-20T06:15:58 | Python | UTF-8 | Python | false | false | 498 | py | # Generated by Django 3.0.8 on 2020-07-23 08:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0013_attendance_status'),
]
operations = [
migrations.RemoveField(
model_name='service',
name='seat_capacity',
),
migrations.AddField(
model_name='service',
name='seats',
field=models.ManyToManyField(to='core.Seat'),
),
]
| [
"[email protected]"
]
| |
78f3b9f5927206d15c77dd073f490b9202ab0fc2 | cac93d697f9b3a75f059d725dee0251a8a81bf61 | /robot/install/lib/python2.7/dist-packages/ur_dashboard_msgs/msg/_SetModeGoal.py | 7628590a2f33e2c657df2d3e8743b53b989e0882 | [
"BSD-3-Clause"
]
| permissive | satvu/TeachBot | c1394f2833649fdd72aa5b32719fef4c04bc4f70 | 5888aea544fea952afa36c097a597c5d575c8d6d | refs/heads/master | 2020-07-25T12:21:34.240127 | 2020-03-09T20:51:54 | 2020-03-09T20:51:54 | 208,287,475 | 0 | 0 | BSD-3-Clause | 2019-09-13T15:00:35 | 2019-09-13T15:00:35 | null | UTF-8 | Python | false | false | 5,203 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from ur_dashboard_msgs/SetModeGoal.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class SetModeGoal(genpy.Message):
_md5sum = "6832df07338535cc06b3835f89ba9555"
_type = "ur_dashboard_msgs/SetModeGoal"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
# This action is for setting the robot into a desired mode (e.g. RUNNING) and safety mode into a
# non-critical state (e.g. NORMAL or REDUCED), for example after a safety incident happened.
# goal
int8 target_robot_mode
# Stop program execution before restoring the target mode. Can be used together with 'play_program'.
bool stop_program
# Play the currently loaded program after target mode is reached.#
# NOTE: Requesting mode RUNNING in combination with this will make the robot continue the motion it
# was doing before. This might probably lead into the same problem (protective stop, EM-Stop due to
# faulty motion, etc.) If you want to be safe, set the 'stop_program' flag below and manually play
# the program after robot state is returned to normal.
# This flag will only be used when requesting mode RUNNING
bool play_program
"""
__slots__ = ['target_robot_mode','stop_program','play_program']
_slot_types = ['int8','bool','bool']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
target_robot_mode,stop_program,play_program
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(SetModeGoal, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.target_robot_mode is None:
self.target_robot_mode = 0
if self.stop_program is None:
self.stop_program = False
if self.play_program is None:
self.play_program = False
else:
self.target_robot_mode = 0
self.stop_program = False
self.play_program = False
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_b2B().pack(_x.target_robot_mode, _x.stop_program, _x.play_program))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 3
(_x.target_robot_mode, _x.stop_program, _x.play_program,) = _get_struct_b2B().unpack(str[start:end])
self.stop_program = bool(self.stop_program)
self.play_program = bool(self.play_program)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_b2B().pack(_x.target_robot_mode, _x.stop_program, _x.play_program))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 3
(_x.target_robot_mode, _x.stop_program, _x.play_program,) = _get_struct_b2B().unpack(str[start:end])
self.stop_program = bool(self.stop_program)
self.play_program = bool(self.play_program)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_b2B = None
def _get_struct_b2B():
global _struct_b2B
if _struct_b2B is None:
_struct_b2B = struct.Struct("<b2B")
return _struct_b2B
| [
"[email protected]"
]
| |
e77b9bf7ab6d5437d6b040caef3e6915f04fffca | a71582e89e84a4fae2595f034d06af6d8ad2d43a | /tensorflow/python/data/experimental/kernel_tests/optimization/make_numa_aware_test.py | d79ae4387c868d4821ac65787ba0bc04d47cc7d3 | [
"Apache-2.0"
]
| permissive | tfboyd/tensorflow | 5328b1cabb3e24cb9534480fe6a8d18c4beeffb8 | 865004e8aa9ba630864ecab18381354827efe217 | refs/heads/master | 2021-07-06T09:41:36.700837 | 2019-04-01T20:21:03 | 2019-04-01T20:26:09 | 91,494,603 | 3 | 0 | Apache-2.0 | 2018-07-17T22:45:10 | 2017-05-16T19:06:01 | C++ | UTF-8 | Python | false | false | 1,813 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `MakeNumaAware` optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class MakeNumaAwareTest(test_base.DatasetTestBase):
def testMakeNumaAware(self):
dataset = dataset_ops.Dataset.range(10).apply(
optimization.assert_next(["NumaMapAndBatch"])).apply(
batching.map_and_batch(lambda x: x * x, 10))
options = dataset_ops.Options()
options.experimental_numa_aware = True
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
self.assertDatasetProduces(
dataset, expected_output=[[x * x for x in range(10)]])
if __name__ == "__main__":
test.main()
| [
"[email protected]"
]
| |
0430b585c6f5da83bef5507cb158267ac18d89c4 | 63b1a78452cb4204e501e023bd9f3c8a364b723c | /test_nbdev/_nbdev.py | 358f0ff246827f6c9ce7115b0bbb8ec347081e0d | [
"Apache-2.0"
]
| permissive | teddyxiong53/test_nbdev | 03e22ef361a1768bc14f83cf617b8ab5fd172663 | 11d4ca82eedb45f4a3f687bd3e3d06336ebcbe9c | refs/heads/master | 2023-07-17T21:19:41.518320 | 2021-09-12T01:52:50 | 2021-09-12T01:52:50 | 405,512,137 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | # AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"say_hello": "00_core.ipynb"}
modules = ["core.py"]
doc_url = "https://teddyxiong53.github.io/test_nbdev/"
git_url = "https://github.com/teddyxiong53/test_nbdev/tree/master/"
def custom_doc_links(name): return None
| [
"[email protected]"
]
| |
241cafabc1786d18738a3dbb2c5762712ff8cf93 | 98ca37f5dd2751efaa060cca19e0b83f871d7765 | /sdk/translation/azure-ai-translation-document/tests/test_all_document_statuses.py | 57e1a2f437a4cfaf8be9034e89d309b760822451 | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
]
| permissive | jayhebe/azure-sdk-for-python | 5ea99732ebb9929d3f6f77c08cc640d5915970b1 | f4455f85d9fe747fa4de2fdc691b975c07bfeea5 | refs/heads/main | 2023-06-24T01:22:06.602194 | 2021-07-28T02:12:25 | 2021-07-28T02:12:25 | 390,290,984 | 1 | 0 | MIT | 2021-07-28T09:23:46 | 2021-07-28T09:23:46 | null | UTF-8 | Python | false | false | 8,190 | py | # coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from datetime import datetime
import functools
from testcase import DocumentTranslationTest
from preparer import DocumentTranslationPreparer, DocumentTranslationClientPreparer as _DocumentTranslationClientPreparer
from azure.ai.translation.document import DocumentTranslationClient
import pytest
DocumentTranslationClientPreparer = functools.partial(_DocumentTranslationClientPreparer, DocumentTranslationClient)
class TestAllDocumentStatuses(DocumentTranslationTest):
@DocumentTranslationPreparer()
@DocumentTranslationClientPreparer()
def test_list_document_statuses(self, client):
docs_count = 5
target_language = "es"
# submit and validate operation
poller = self._begin_and_validate_translation_with_multiple_docs(client, docs_count, language_code=target_language, wait=True)
# list docs statuses
doc_statuses = list(client.list_all_document_statuses(poller.id)) # convert from generic iterator to list
self.assertEqual(len(doc_statuses), docs_count)
for document in doc_statuses:
self._validate_doc_status(document, target_language)
@DocumentTranslationPreparer()
@DocumentTranslationClientPreparer()
def test_list_document_statuses_with_pagination(self, client):
docs_count = 10
results_per_page = 2
no_of_pages = docs_count // results_per_page
target_language = "es"
# submit and validate operation
poller = self._begin_and_validate_translation_with_multiple_docs(client, docs_count, language_code=target_language, wait=True)
# check doc statuses
doc_statuses_pages = list(client.list_all_document_statuses(translation_id=poller.id, results_per_page=results_per_page).by_page())
self.assertEqual(len(doc_statuses_pages), no_of_pages)
# iterate by page
for page in doc_statuses_pages:
page_items = list(page)
self.assertLessEqual(len(page_items), results_per_page)
for document in page_items:
self._validate_doc_status(document, target_language)
@DocumentTranslationPreparer()
@DocumentTranslationClientPreparer()
def test_list_document_statuses_with_skip(self, client):
docs_count = 10
skip = 2
target_language = "es"
# submit and validate operation
poller = self._begin_and_validate_translation_with_multiple_docs(client, docs_count, language_code=target_language, wait=True)
# check doc statuses
doc_statuses = list(client.list_all_document_statuses(translation_id=poller.id, skip=skip))
self.assertEqual(len(doc_statuses), docs_count - skip)
# iterate over docs
for document in doc_statuses:
self._validate_doc_status(document, target_language)
@DocumentTranslationPreparer()
@DocumentTranslationClientPreparer()
def test_list_document_statuses_filter_by_status(self, client):
docs_count = 10
target_language = "es"
# submit and validate operation
poller = self._begin_and_validate_translation_with_multiple_docs(client, docs_count, language_code=target_language, wait=True)
# list operations
statuses = ["NotStarted"]
doc_statuses = list(client.list_all_document_statuses(poller.id, statuses=statuses))
assert(len(doc_statuses) == 0)
statuses = ["Succeeded"]
doc_statuses = list(client.list_all_document_statuses(poller.id, statuses=statuses))
assert(len(doc_statuses) == docs_count)
statuses = ["Failed"]
doc_statuses = list(client.list_all_document_statuses(poller.id, statuses=statuses))
assert(len(doc_statuses) == 0)
@DocumentTranslationPreparer()
@DocumentTranslationClientPreparer()
def test_list_document_statuses_filter_by_ids(self, client):
docs_count = 5
target_language = "es"
# submit and validate operation
poller = self._begin_and_validate_translation_with_multiple_docs(client, docs_count, language_code=target_language, wait=True)
# filter ids
doc_statuses = list(client.list_all_document_statuses(poller.id)) # convert from generic iterator to list
self.assertEqual(len(doc_statuses), docs_count)
ids = [doc.id for doc in doc_statuses]
ids = ids[:docs_count//2]
# do the testing
doc_statuses = list(client.list_all_document_statuses(poller.id, document_ids=ids))
self.assertEqual(len(doc_statuses), len(ids))
for document in doc_statuses:
self._validate_doc_status(document, target_language, ids=ids)
@DocumentTranslationPreparer()
@DocumentTranslationClientPreparer()
def test_list_document_statuses_order_by_creation_time_asc(self, client):
docs_count = 5
target_language = "es"
# submit and validate operation
poller = self._begin_and_validate_translation_with_multiple_docs(client, docs_count, language_code=target_language, wait=True)
# check doc statuses
doc_statuses = list(client.list_all_document_statuses(poller.id, order_by=["created_on asc"])) # convert from generic iterator to list
self.assertEqual(len(doc_statuses), docs_count)
curr = datetime.min
for document in doc_statuses:
assert(document.created_on.replace(tzinfo=None) >= curr.replace(tzinfo=None))
curr = document.created_on
@DocumentTranslationPreparer()
@DocumentTranslationClientPreparer()
def test_list_document_statuses_order_by_creation_time_desc(self, client):
docs_count = 5
target_language = "es"
# submit and validate operation
poller = self._begin_and_validate_translation_with_multiple_docs(client, docs_count, language_code=target_language, wait=True)
# check doc statuses
doc_statuses = list(client.list_all_document_statuses(poller.id, order_by=["created_on desc"])) # convert from generic iterator to list
self.assertEqual(len(doc_statuses), docs_count)
curr = datetime.max
for document in doc_statuses:
assert(document.created_on.replace(tzinfo=None) <= curr.replace(tzinfo=None))
curr = document.created_on
@DocumentTranslationPreparer()
@DocumentTranslationClientPreparer()
def test_list_document_statuses_mixed_filters(self, client):
docs_count = 10
target_language = "es"
skip = 1
results_per_page = 2
statuses = ["Succeeded"]
# submit and validate operation
poller = self._begin_and_validate_translation_with_multiple_docs(client, docs_count, language_code=target_language, wait=True)
# get ids
doc_statuses = list(client.list_all_document_statuses(poller.id)) # convert from generic iterator to list
self.assertEqual(len(doc_statuses), docs_count)
ids = [doc.id for doc in doc_statuses]
ids = ids[:docs_count//2]
filtered_docs = client.list_all_document_statuses(
poller.id,
# filters
document_ids=ids,
statuses=statuses,
# ordering
order_by=["created_on asc"],
# paging
skip=skip,
results_per_page=results_per_page
).by_page()
self.assertIsNotNone(filtered_docs)
# check statuses
counter = 0
curr_time = datetime.min
for page in filtered_docs:
page_docs = list(page)
self.assertLessEqual(len(page_docs), results_per_page) # assert paging
for doc in page_docs:
counter += 1
# assert ordering
assert(doc.created_on.replace(tzinfo=None) >= curr_time.replace(tzinfo=None))
curr_time = doc.created_on
# assert filters
self.assertIn(doc.status, statuses)
self.assertIn(doc.id, ids)
assert(counter == len(ids) - skip)
| [
"[email protected]"
]
| |
f06015cdef49de3c91fb4a6212eece1e0d38b437 | c0bc042e73825a89949c1df1daefc41796903ae1 | /youtube/urls.py | aac35fa9fa3fba7ce66c1b936fd40c853849efae | []
| no_license | Pennylele/pennyfang_portfolio | 389aa93f392701ef5fa4f1a129d98c0ddd373dbc | b29706123b860d2378d89f0daa462b33e4609a68 | refs/heads/master | 2023-09-05T11:33:29.847845 | 2021-11-24T19:32:15 | 2021-11-24T19:32:15 | 325,154,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | from .views import VideoListView, SearchResultsView, FilterByViews, VideoDetailView, Sync
from django.urls import path
app_name='youtube'
urlpatterns = [
path('', VideoListView.as_view(), name='home'),
path('search/', SearchResultsView.as_view(), name='search_results'),
path('sort-by-views/', FilterByViews.as_view(), name='sort_views'),
path('video-detail/<slug>/', VideoDetailView.as_view(), name='video_detail'),
path('sync/', Sync, name='sync'),
] | [
"[email protected]"
]
| |
5bd4b629e2c1439c220548e9247835c48992f28e | fac96b4c97150e02f1405f7430c89b115e4c27f7 | /ch08/ex8-20.printing_models.py | bfef927128761d040af4e85f8ced90253a583a32 | []
| no_license | gustavonvp/PYTHON-CRASH-COURSE | 37478990ff3c3c368da505eb9e5a35dee5d1960b | 8033e2eb84cf6d85fd4ff42ae0550f38dcd23f62 | refs/heads/master | 2023-04-03T00:42:20.333183 | 2017-10-24T05:47:01 | 2017-10-24T05:47:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | # Start whith some designs that need to be printed.
unprinted_designs = ['iphone case', 'robot pendant', 'dodecahedron']
completed_models = []
# Simulate printing each design, until none are left.
# Move each design to completed_models after printing.
while unprinted_designs:
current_design = unprinted_designs.pop()
# Simulate creating a 3D print form the design.
print("Printing model: " + current_design)
completed_models.append(current_design)
# Display all completed models.
print("\nThe following models have been printed:")
for completed_model in completed_models:
print(completed_model)
| [
"[email protected]"
]
| |
0a7e4ac3a6aa381a2be9b21e6ff39af814db7972 | 8acffb8c4ddca5bfef910e58d3faa0e4de83fce8 | /ml-flask/Lib/site-packages/joblib/parallel.py | dff07a7420ad4d2662baa74a296be91ffc236c13 | [
"MIT"
]
| permissive | YaminiHP/SimilitudeApp | 8cbde52caec3c19d5fa73508fc005f38f79b8418 | 005c59894d8788c97be16ec420c0a43aaec99b80 | refs/heads/master | 2023-06-27T00:03:00.404080 | 2021-07-25T17:51:27 | 2021-07-25T17:51:27 | 389,390,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:10afb49d59c3778a8fd053bbec2d63b85f3b24f63a308df37f309126a62f3571
size 46534
| [
"[email protected]"
]
| |
439bbcad449efc5d0400e7a0ddcae109c2ef7bc2 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02682/s885339008.py | 44ace31b98dd71329482c26565d401a8c6238465 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | a,b,c,k=list(map(int,input().split()))
if k<=a+b:
print(min(a,k))
else:
print(2*a-k+b) | [
"[email protected]"
]
| |
5d8c2c21c425289bf070970045fc338486eb2e08 | 66f383fec502102bfec58ed8cb9c43a71e599c55 | /apps/accounts/events.py | f34b3398d34130125064588eeea3a67c4b10f9ab | [
"MIT"
]
| permissive | hacktoolkit/django-htk | 0a984a28f7fbc7eed8e2b1975d210792ddbee829 | 935c4913e33d959f8c29583825f72b238f85b380 | refs/heads/master | 2023-08-08T11:52:54.298160 | 2023-07-21T19:08:37 | 2023-07-21T19:08:37 | 15,924,904 | 210 | 65 | MIT | 2023-09-08T23:59:28 | 2014-01-15T04:23:40 | Python | UTF-8 | Python | false | false | 1,082 | py | # Python Standard Library Imports
# Third Party (PyPI) Imports
import rollbar
# HTK Imports
from htk.utils import htk_setting
from htk.utils.notifications import slack_notify
def failed_recaptcha_on_login(user, request=None):
extra_data = {
'user' : {
'id': user.id,
'username': user.username,
'email': user.email,
},
}
message = 'Failed reCAPTCHA. Suspicious login detected.'
rollbar.report_message(
message,
request=request,
extra_data=extra_data
)
if htk_setting('HTK_SLACK_NOTIFICATIONS_ENABLED'):
slack_message = '%s User: %s <%s>' % (
message,
user.username,
user.email,
)
slack_notify(slack_message, level='warning')
def failed_recaptcha_on_account_register(request=None):
message = 'Failed reCAPTCHA. Suspicious account registration detected.'
rollbar.report_message(message, request=request)
if htk_setting('HTK_SLACK_NOTIFICATIONS_ENABLED'):
slack_notify(message, level='warning')
| [
"[email protected]"
]
| |
a70d21ed2a48720e2b9c9a258bcb1b746dda2e1a | 73e277935ef28fd05935c93a3f155c9cc6dc6de7 | /ctf/crypto/source/rbtree/rb2.py | 81ec357f0f8e932791d02d3999f89d0fbf068e95 | []
| no_license | ohmygodlin/snippet | 5ffe6b8fec99abd67dd5d7f819520e28112eae4b | 21d02015492fb441b2ad93b4a455dc4a145f9913 | refs/heads/master | 2023-01-08T14:59:38.618791 | 2022-12-28T11:23:23 | 2022-12-28T11:23:23 | 190,989,347 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,501 | py | # -*- coding: utf-8 -*-
BLACK = 0
RED = 1
#graphic elements of rbtree for printing
VC = '│'
HC = '─'
SIZE = 3
RIG = '┌' + HC * SIZE
LEF = '└' + HC * SIZE
SP = chr(32)
IND1 = SP * (SIZE + 1)
IND2 = VC + SP * SIZE
class rbnode(object):
def __init__(self, key=None, value=None, color=BLACK,left=None,right=None,p=None):
self.key = key
self.value = value
self.color = color
self.left = left
self.right = right
self.p = p
def __repr__(self):
return '%s%s%s' % (self.key,'◆' if self.color is BLACK else '◇',self.value )
_NONE=rbnode()
class rbtree(object):
def __init__(self, data=False,default_value=0, nodes=None):
if nodes:
self.root = nodes[28]
self.default_value = default_value #for method: force_search
self.nil = _NONE
else:
self.nil = _NONE
self.root = self.nil
self.default_value = default_value #for method: force_search
if hasattr(data, '__iter__'):
for key, value in data:
self.insert(rbnode(key,value))
def __repr__(self):
return '\n'.join(self.graph())
def graph(self, x=False, prefix=''):
"beautifully print rbtree, big key node first"
if x is False:
x = self.root
if x is not self.nil:
p = x.p
last_prefix = ''
if p is not self.nil:
pp = p.p
last_prefix = LEF if p.left is x else RIG
if pp is not self.nil:
if (pp.left is p) is (p.left is x):
prefix = prefix + IND1
else:
prefix = prefix + IND2
yield from self.graph(x.right, prefix)
yield '%s%s%s' % (prefix, last_prefix, x)
yield from self.graph(x.left, prefix)
def search(self, key, x=False):
"find node according to key, return self.nil if not found"
if x is False:
x = self.root
while (x is not self.nil) and (key != x.key):
if key < x.key:
x = x.left
else:
x = x.right
return x
def insert(self, z):
"insert z node with key and value"
y = self.nil
x = self.root
while x is not self.nil:
y = x
if z.key < x.key:
x = x.left
else:
x = x.right
z.p = y
if y is self.nil:
self.root = z
elif z.key < y.key:
y.left = z
else:
y.right = z
z.left = self.nil
z.right = self.nil
z.color = RED
self.insert_fixup(z)
def delete(self, z):
y = z
y_original_color = y.color
if z.left is self.nil:
x = z.right
self.transplant(z, x)
elif z.right is self.nil:
x = z.left
self.transplant(z, x)
else:
y = self.minimum(z.right)
y_original_color = y.color
x = y.right
if y.p is z:
x.p = y
else:
self.transplant(y, x)
y.right = z.right
y.right.p = y
self.transplant(z, y)
y.left = z.left
y.left.p = y
y.color = z.color
if y_original_color is BLACK:
self.delete_fixup(x)
def is_empty(self):
return self.root is self.nil
def right_walk(self, x=False):
if x is False:
x = self.root
if x is not self.nil:
yield from self.right_walk(x.right)
yield x
yield from self.right_walk(x.left)
def left_walk(self, x=False):
if x is False:
x = self.root
if x is not self.nil:
yield from self.left_walk(x.left)
yield x
yield from self.left_walk(x.right)
def force_search(self,key):
y = self.nil
x = self.root
while x is not self.nil:
if key == x.key:
return x
y = x
if key < x.key:
x = x.left
else:
x = x.right
z = rbnode()
original_z = z
z.key = key
z.value = self.default_value
z.p = y
if y is self.nil:
self.root = z
elif z.key < y.key:
y.left = z
else:
y.right = z
z.left = self.nil
z.right = self.nil
z.color = RED
self.insert_fixup(z)
return original_z
def maximum(self, x=False):
if x is False:
x = self.root
while x.right is not self.nil:
x = x.right
return x
def minimum(self, x=False):
if x is False:
x = self.root
while x.left is not self.nil:
x = x.left
return x
def successor(self, x):
"return node with smallest key greater than x.key"
if x.right is not self.nil:
return self.minimum(x.right)
y = x.p
while (y is not self.nil) and (x is y.right):
x = y
y = y.p
return y
def predecessor(self, x):
"return node with biggest key lower than x.key"
if x.left is not self.nil:
return self.maximum(x.left)
y = x.p
while (y is not self.nil) and (x is y.left):
x = y
y = y.p
return y
def left_rotate(self, x):
y = x.right
x.right = y.left
if y.left is not self.nil:
y.left.p = x
y.p = x.p
if x.p is self.nil:
self.root = y
else:
if x is x.p.left:
x.p.left = y
else:
x.p.right = y
y.left = x
x.p = y
def right_rotate(self, x):
y = x.left
x.left = y.right
if y.right is not self.nil:
y.right.p = x
y.p = x.p
if x.p is self.nil:
self.root = y
else:
if x is x.p.right:
x.p.right = y
else:
x.p.left = y
y.right = x
x.p = y
def insert_fixup(self, z):
while z.p.color is RED:
if z.p is z.p.p.left:
y = z.p.p.right
if y.color is RED:
z.p.color = BLACK
y.color = BLACK
z.p.p.color = RED
z = z.p.p
else:
if z is z.p.right:
z = z.p
self.left_rotate(z)
z.p.color = BLACK
z.p.p.color = RED
self.right_rotate(z.p.p)
else:
y = z.p.p.left
if y.color is RED:
z.p.color = BLACK
y.color = BLACK
z.p.p.color = RED
z = z.p.p
else:
if z is z.p.left:
z = z.p
self.right_rotate(z)
z.p.color = BLACK
z.p.p.color = RED
self.left_rotate(z.p.p)
self.root.color = BLACK
def delete_fixup(self, x):
while (x is not self.root) and (x.color is BLACK):
if x is x.p.left:
w = x.p.right
if w.color is RED:
w.color = BLACK
x.p.color = RED
self.left_rotate(x.p)
w = x.p.right
if (w.left.color is BLACK) and (w.right.color is BLACK):
w.color = RED
x = x.p
else:
if w.right.color is BLACK:
w.left.color = BLACK
w.color = RED
self.right_rotate(w)
w = x.p.right
w.color = x.p.color
x.p.color = BLACK
w.right.color = BLACK
self.left_rotate(x.p)
x = self.root
else:
w = x.p.left
if w.color is RED:
w.color = BLACK
x.p.color = RED
self.right_rotate(x.p)
w = x.p.left
if (w.right.color is BLACK) and (w.left.color is BLACK):
w.color = RED
x = x.p
else:
if w.left.color is BLACK:
w.right.color = BLACK
w.color = RED
self.left_rotate(w)
w = x.p.left
w.color = x.p.color
x.p.color = BLACK
w.left.color = BLACK
self.right_rotate(x.p)
x = self.root
x.color = BLACK
def transplant(self, u, v):
if u.p is self.nil:
self.root = v
elif u is u.p.left:
u.p.left = v
else:
u.p.right = v
v.p = u.p
if __name__ == '__main__':
_str=" ek`~3c:gf017b744/b38fd~abm7g5489e2{lf6z8d16hae`98}b|-21m.e:"
nodes=[_NONE]
for i in range(1,60):
nodes.append( rbnode(key=i,value=_str[i]) )
# node, color, l,r,p
tree=[
[1,BLACK,0,2,3],
[2,RED,0,0,1],
[3,RED,1,4,6],
[4,BLACK,0,5,3],
[5,RED,0,0,4],
[6,BLACK,3,8,10],
[7,RED,0,0,8],
[8,BLACK,7,9,6],
[9,RED,0,0,8],
[10,RED,6,18,23],
[11,RED,0,0,12],
[12,BLACK,11,13,14],
[13,RED,0,0,12],
[14,RED,12,16,18],
[15,RED,0,0,16],
[16,BLACK,15,17,14],
[17,RED,0,0,16],
[18,BLACK,14,20,10],
[19,BLACK,0,0,20],
[20,RED,19,21,18],
[21,BLACK,0,22,20],
[22,RED,0,0,21],
[23,BLACK,10,26,28],
[24,RED,0,0,25],
[25,BLACK,24,0,26],
[26,BLACK,25,27,23],
[27,BLACK,0,0,26],
[28,BLACK,23,43,0],
[29,RED,0,0,30],
[30,BLACK,29,31,32],
[31,RED,0,0,30],
[32,BLACK,30,34,35],
[33,RED,0,0,34],
[34,BLACK,33,0,32],
[35,RED,32,37,43],
[36,BLACK,0,0,37],
[37,BLACK,36,40,35],
[38,BLACK,0,39,40],
[39,RED,0,0,38],
[40,RED,38,41,37],
[41,BLACK,0,42,40],
[42,RED,0,0,41],
[43,BLACK,35,53,28],
[44,BLACK,0,0,45],
[45,RED,44,46,48],
[46,BLACK,0,47,45],
[47,RED,0,0,46],
[48,BLACK,45,50,53],
[49,BLACK,0,0,50],
[50,RED,49,51,48],
[51,BLACK,0,52,50],
[52,RED,0,0,51],
[53,RED,48,57,43],
[54,RED,0,0,55],
[55,BLACK,54,56,57],
[56,RED,0,0,55],
[57,BLACK,55,59,53],
[58,RED,0,0,59],
[59,BLACK,58,0,57],
]
for i in range(len(tree)):
nodes[tree[i][0]].color=tree[i][1]
nodes[tree[i][0]].left=nodes[tree[i][2]]
nodes[tree[i][0]].right=nodes[tree[i][3]]
nodes[tree[i][0]].p=nodes[tree[i][4]]
tr=rbtree(nodes=nodes)
# print(tr.nil is _NONE)
# print("after build from dict:")
# print(tr)
# tr=rbtree(data={'1':'1','2':'2'}.items())
# null=tr.root.p
print(tr)
for i in [18,35,53,50,14,28,19,6,54,36]:
tr.delete(tr.force_search(i))
s=""
for i in [8,56,47,37,52,34,17,8,8,29,7,47,40,57,46,24,34,34,57,29,22,5,16,57,24,29,8,12,57,12,12,21,33,34,55,51,22,45,34,31,1,23]:
node=tr.force_search(i)
if node.color==BLACK:
print(chr(ord(node.value)-1)),
else:
print(chr(ord(node.value)+1)),
# s=s+node.value
# print(s)
# print(.value)
# for c in 'fghij':
# nd = tr.force_search(c)
# nd.value += 1
# print("\nafter insert from a string:")
# print(tr)
# while tr.root!=tr.nil:
# tr.delete(tr.root)
# print("\nafter delete all node:")
# print(tr)
| [
"[email protected]"
]
| |
cb662f3f1ec68283494e7dd6ab627411ce475ddd | 4bd818bc9bd83ed39c9d48b0e4e4821a2b8c45d9 | /src/etherollapp/etheroll/customtoolbar.py | d31f906fb38e24b9465769aeaa3b7894757e17a9 | [
"MIT"
]
| permissive | AndreMiras/EtherollApp | 8ef158e9e5886922bb56a42d836daa392e5d5f2e | 2ccc30fad736a6fee0cba8b99c521bee6ad13087 | refs/heads/develop | 2021-09-11T14:41:04.753290 | 2021-09-01T07:28:44 | 2021-09-01T07:28:44 | 125,426,260 | 59 | 29 | MIT | 2021-06-08T20:29:42 | 2018-03-15T21:07:28 | Python | UTF-8 | Python | false | false | 846 | py | from kivy.app import App
from kivy.clock import Clock
from kivymd.toolbar import Toolbar
from etherollapp.etheroll.ui_utils import load_kv_from_py
load_kv_from_py(__file__)
class CustomToolbar(Toolbar):
"""Toolbar with helper method for loading default/back buttons."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
Clock.schedule_once(self.load_default_buttons)
def load_default_buttons(self, dt=None):
app = App.get_running_app()
self.left_action_items = [
['menu', lambda x: app.root.navigation.toggle_nav_drawer()]]
self.right_action_items = [[
'dots-vertical',
lambda x: app.root.navigation.toggle_nav_drawer()]]
def load_back_button(self, function):
self.left_action_items = [['arrow-left', lambda x: function()]]
| [
"[email protected]"
]
| |
b0fe4b6f20442f6ba76f885dbb49a445c8df729a | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/d59101416366fa8d50bd2d8218e772b2c6a8bd7f-<remove_from_device>-fix.py | 58894645341f61f607af0fa3f9de1bd569631b46 | []
| no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | def remove_from_device(self):
name = self.want.name
if self.want.parent_policy:
uri = 'https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}'.format(self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.parent_policy), name.replace('/', '_'))
else:
uri = 'https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/{3}'.format(self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.parent_rule_list), name.replace('/', '_'))
resp = self.client.api.delete(uri)
if (resp.status == 200):
return True | [
"[email protected]"
]
| |
ac528187a330a1170469d7253b64bf4680d05ce3 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_evaporating.py | c26673a9d07507032d353945f4ce1d7da9ede1af | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py |
from xai.brain.wordbase.verbs._evaporate import _EVAPORATE
#calss header
class _EVAPORATING(_EVAPORATE, ):
def __init__(self,):
_EVAPORATE.__init__(self)
self.name = "EVAPORATING"
self.specie = 'verbs'
self.basic = "evaporate"
self.jsondata = {}
| [
"[email protected]"
]
| |
04a50dc0e33be89b7e0b3dcf0a41fb02d629f963 | b0cdbad299f6174bfdb0fba173dbcf3889b82209 | /Object Oriented Programming/oops/exercise_4.py | 57620e40b0f7418b80594400b89402e36f343253 | []
| no_license | deesaw/PythonD-06 | a33e676f1e0cfc13b4ea645c8b60547b198239ac | 3c6f065d7be2e3e10cafb6cef79d6cae9d55a7fa | refs/heads/master | 2023-03-18T08:24:42.030935 | 2021-03-02T14:15:09 | 2021-03-02T14:15:09 | 343,797,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 864 | py | class Dog(object):
def speak(self):
print( "bhou..bhou" )
def guard(self):
print( "I am guarding your home" )
class Cat(object):
def speak(self):
print( "meau..meau" )
def hunt(self):
print( "I am hunting mice" )
class Dd(Dog):
def hobby(self):
print( "Biting" )
def guard(self):
print( "Guarding house" )
def oldguard(self):
super(Dd,self).guard()
ginger=Dd()
ginger.guard()
ginger.speak()
ginger.hobby()
ginger.oldguard()
print("*******************************************************")
class Doat(Cat,Dog):
def hobby(self):
print( "programming in python" )
def speak(self):print( "bhou..meau" )
def oldspeak(self):
super(Doat,self).speak()
ginger1=Doat()
ginger1.speak()
ginger1.guard()
ginger1.hunt()
ginger1.hobby()
ginger1.oldspeak()
| [
"[email protected]"
]
| |
29a1a2c46aa99f941385f809339cfe85914cf4d6 | 9d8acc20d2ee1d1957849dfb71c22e0dae2d8c5c | /baomoicrawl/venv/Lib/site-packages/scrapy/utils/sitemap.py | c9f5b4ef42d0d29efc71b43d2c9e9ba8ded9a1a6 | []
| no_license | thuy4tbn99/TranTruongThuy_17021178_Nhom4_Crawler | b0fdedee2942a12d9f64dfed93f43802dc5ab340 | 87c8c07433466bbc43a24ea089f75baeb467c356 | refs/heads/master | 2022-11-27T21:36:33.917491 | 2020-08-10T23:24:42 | 2020-08-10T23:24:42 | 286,583,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,548 | py | """
Module for processing Sitemaps.
Note: The main purpose of this module is to provide support for the
SitemapSpider, its API is subject to change without notice.
"""
from urllib.parse import urljoin
import lxml.etree
class Sitemap:
"""Class to parse Sitemap (type=urlset) and Sitemap Index
(type=sitemapindex) files"""
def __init__(self, xmltext):
xmlp = lxml.etree.XMLParser(recover=True, remove_comments=True, resolve_entities=False)
self._root = lxml.etree.fromstring(xmltext, parser=xmlp)
rt = self._root.tag
self.type = self._root.tag.split('}', 1)[1] if '}' in rt else rt
def __iter__(self):
for elem in self._root.getchildren():
d = {}
for el in elem.getchildren():
tag = el.tag
name = tag.split('}', 1)[1] if '}' in tag else tag
if name == 'link':
if 'href' in el.attrib:
d.setdefault('alternate', []).append(el.get('href'))
else:
d[name] = el.text.strip() if el.text else ''
if 'loc' in d:
yield d
def sitemap_urls_from_robots(robots_text, base_url=None):
"""Return an iterator over all sitemap urls contained in the given
robots.txt file
"""
for line in robots_text.splitlines():
if line.lstrip().lower().startswith('sitemap:'):
url = line.split(':', 1)[1].strip()
yield urljoin(base_url, url)
| [
"[email protected]"
]
| |
653e892c9b0e8d7676e7419a4cd8223861cf33d8 | 4912cbd47c19c58d142e6833911d70f5ea037357 | /question_bank/reverse-string/reverse-string.py | c47284c0fefc2d681bb7e99d7485ae06dcaf5e97 | [
"Apache-2.0"
]
| permissive | yatengLG/leetcode-python | a09a17cd9e60cafd9ff8ca9c068f5b70719c436f | 5d48aecb578c86d69835368fad3d9cc21961c226 | refs/heads/master | 2023-07-13T16:10:01.920716 | 2021-09-06T02:51:46 | 2021-09-06T02:51:46 | 286,969,109 | 13 | 6 | null | 2021-02-16T10:19:44 | 2020-08-12T09:13:02 | Python | UTF-8 | Python | false | false | 643 | py | # -*- coding: utf-8 -*-
# @Author : LG
"""
执行用时:36 ms, 在所有 Python3 提交中击败了98.98% 的用户
内存消耗:14.4 MB, 在所有 Python3 提交中击败了19.40% 的用户
解题思路:
双指针。分别指向列表首尾,然后交换指针指向元素
"""
class Solution:
def reverseString(self, s: List[str]) -> None:
"""
Do not return anything, modify s in-place instead.
"""
n = len(s)
l, r = 0, n-1 # 左右指针分别指向首尾
while r > l:
s[l], s[r] = s[r], s[l] # 交换元素
l += 1 # 移动指针
r -= 1 | [
"[email protected]"
]
| |
d28f93833de104995b112c67f309aeca3665e1a5 | 8e75f2ba056e5bd75647f1e3f9773e1015c0dd0e | /628_maximum_product_of_three_numbers.py | 9df4c2022f676107d77fa0ca3142287a66e826c7 | []
| no_license | eazow/leetcode | 96cbcba143ce04c6e83c5c985e19320f48c60b0d | c1c5ee72b8fe608b278ca20a58bc240fdc62b599 | refs/heads/master | 2022-12-10T00:06:06.676066 | 2022-11-29T09:02:04 | 2022-11-29T09:02:04 | 46,109,860 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | class Solution(object):
def maximumProduct(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums = sorted(nums)
return max(nums[-3] * nums[-2] * nums[-1], nums[0] * nums[1] * nums[-1])
assert Solution().maximumProduct([1,2,3,4]) == 24
assert Solution().maximumProduct([-4,-3,-2,-1,60]) == 720 | [
"[email protected]"
]
| |
9848cbcc79703b08c9c0e9ee9bbbd69fb4c86624 | b7add0d1b1effc50b27d3316fa5889a5227e5b19 | /Atlasbuggy/atlasbuggy/files/videofile.py | 205f824be3d6e10d3c1e8934bf447a376387c7fd | []
| no_license | Woz4tetra/Atlas | efb83a7c7b2698bf8b36b023f7aa573cc38284f6 | c7380868a9efef9d1594ed7aa87187f03a7e4612 | refs/heads/master | 2020-04-04T06:25:50.657631 | 2017-04-05T01:53:15 | 2017-04-05T01:53:15 | 50,269,756 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,588 | py | import cv2
import time
from atlasbuggy.files.atlasbuggyfile import AtlasWriteFile, AtlasReadFile
from atlasbuggy.files.logfile import default_log_dir_name, default_log_file_name
class VideoPlayer:
def __init__(self, video_name, video_dir, window_name, capture, width=None, height=None, frame_skip=0,
loop_video=False, start_frame=0, slider_callback=None):
video_name, video_dir = AtlasReadFile.format_path_as_time(video_name, video_dir, default_log_dir_name,
default_log_file_name)
self.read_file = AtlasReadFile(video_name, video_dir, False, ("avi", "mov"), "videos")
self.window_name = window_name
self.frame = None
self.current_frame_num = 0
self.current_time = 0.0
self.capture = capture
self.cv_capture = cv2.VideoCapture(self.full_path)
cv2.namedWindow(self.window_name)
self.fps = self.cv_capture.get(cv2.CAP_PROP_FPS)
self.num_frames = int(self.cv_capture.get(cv2.CAP_PROP_FRAME_COUNT))
if self.num_frames <= 0:
raise FileNotFoundError("Video failed to load!")
self.length_sec = self.num_frames / self.fps
self.length_msec = int(self.length_sec * 1000)
self.slider_pos = 0
self.slider_ticks = int(self.cv_capture.get(cv2.CAP_PROP_FRAME_WIDTH) // 3)
if self.slider_ticks > self.num_frames:
self.slider_ticks = self.num_frames
self.track_bar_name = "frame:"
cv2.createTrackbar(self.track_bar_name, self.window_name, 0, self.slider_ticks,
self.on_slider)
self.slider_callback = slider_callback
self.width = int(self.cv_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
self.height = int(self.cv_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.resize_frame = False
if width is None:
self.resize_width = self.width
else:
self.resize_width = width
self.resize_frame = True
if height is None:
self.resize_height = self.height
else:
self.resize_height = height
self.resize_frame = True
self.frame_skip = frame_skip
self.loop_video = loop_video
if start_frame > 0:
self.set_frame(start_frame)
self.sync_up_error = 0.01
def video_len(self):
return self.num_frames
def current_pos(self):
return int(self.cv_capture.get(cv2.CAP_PROP_POS_FRAMES))
def on_slider(self, slider_index):
slider_pos = int(slider_index * self.video_len() / self.slider_ticks)
if abs(slider_pos - self.current_pos()) > 1:
self.set_frame(slider_pos)
self.capture.show_frame(self.get_frame())
self.current_frame_num = self.current_pos()
self.slider_pos = slider_index
if self.slider_callback is not None:
self.slider_callback()
def set_frame(self, position):
if position >= self.video_len():
position = self.video_len()
if position >= 0:
self.cv_capture.set(cv2.CAP_PROP_POS_FRAMES, int(position))
def get_frame(self, current_time=None, advance_frame=True):
if current_time is not None:
self.current_time = self.current_pos() * self.length_sec / self.num_frames
if abs(current_time - self.current_time) > self.sync_up_error:
goal_frame = int(current_time * self.num_frames / self.length_sec)
self.set_frame(goal_frame)
return self.get_frame()
if self.frame_skip > 0:
self.set_frame(self.current_pos() + self.frame_skip)
success, self.frame = self.cv_capture.read()
if not advance_frame:
self.set_frame(self.current_pos() - 1)
if not success or self.frame is None:
if self.loop_video:
self.set_frame(0)
while success is False or self.frame is None:
success, self.frame = self.cv_capture.read()
else:
self.close()
return None
if self.resize_frame:
self.frame = cv2.resize(self.frame,
(self.resize_width, self.resize_height),
interpolation=cv2.INTER_NEAREST)
if self.current_pos() != self.current_frame_num:
self.current_frame_num = self.current_pos()
self.slider_pos = int(self.current_frame_num * self.slider_ticks / self.video_len())
cv2.setTrackbarPos(self.track_bar_name, self.window_name, self.slider_pos)
return self.frame
def close(self):
cv2.destroyWindow(self.window_name)
class VideoRecorder(AtlasWriteFile):
def __init__(self, video_name, video_dir, width, height, enable_recording, capture, cam_number, cv_capture):
super(VideoRecorder, self).__init__(video_name, video_dir, False, "avi", "videos")
if cv_capture is not None:
self.cv_capture = cv_capture
elif cam_number is not None:
self.cv_capture = cv2.VideoCapture(cam_number)
else:
raise ValueError("Capture number or capture instance not supplied!")
print("Sampling for FPS...", end="")
time0 = time.time()
samples = 15
for frame_num in range(samples):
success, self.frame = self.cv_capture.read()
if not success:
raise FileNotFoundError("Failed to retrieve from camera")
capture.show_frame(self.frame)
fps = samples / (time.time() - time0)
print("done: ", fps)
self.enable_recording = enable_recording
self.width = width
self.height = height
if width is not None:
self.recorder_width = width
self.width = width
else:
self.recorder_width = self.frame.shape[1]
self.width = self.frame.shape[1]
if height is not None:
self.recorder_height = height
self.height = height
else:
self.recorder_height = self.frame.shape[0]
self.height = self.frame.shape[0]
self.resize_frame = self.frame.shape[0:2] != (self.height, self.width)
if self.enable_recording:
codec = 'MJPG'
fourcc = cv2.VideoWriter_fourcc(*codec)
self.video = cv2.VideoWriter()
self.video.open(self.full_path, fourcc, fps, (self.recorder_width, self.recorder_height), True)
self._is_open = True
print("Writing video to:", self.full_path)
else:
self.video = None
def write(self, frame):
if frame.shape[0:2] != (self.recorder_height, self.recorder_width):
frame = cv2.resize(frame, (self.recorder_height, self.recorder_width))
if len(frame.shape) == 2:
self.video.write(cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR))
else:
self.video.write(frame)
def get_frame(self):
success, self.frame = self.cv_capture.read()
if self.resize_frame and self.frame.shape[0:2] != (self.height, self.width):
self.frame = cv2.resize(self.frame, (self.width, self.height))
if self.enable_recording:
self.write(self.frame)
return self.frame
def close(self):
if self._is_open:
self.video.release()
self._is_open = False
print("Wrote video to:", self.full_path)
| [
"[email protected]"
]
| |
08b3051adaf303a2d19d7736a97fbe771d06b6ae | 80760d4c8a6b2c45b4b529bdd98d33c9c5509438 | /Practice/atcoder/ABC/130/src/d2.py | 82230a1e0cc7c786640ababca689faaaffcd8866 | []
| no_license | prrn-pg/Shojin | f1f46f8df932df0be90082b475ec02b52ddd882e | 3a20f1122d8bf7d95d9ecd205a62fc36168953d2 | refs/heads/master | 2022-12-30T22:26:41.020473 | 2020-10-17T13:53:52 | 2020-10-17T13:53:52 | 93,830,182 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | # 尺取の練習と聞いて
n, k = map(int, input().split())
arr = list(map(int, input().split()))
ans = 0
r = 0
tmp_sum = 0
for l in range(n):
# 条件を満たすまで右端を伸ばす
while r < n and tmp_sum < k:
tmp_sum += arr[r]
r += 1
# whileを抜けた時は条件を満たしている
if r == n:
while tmp_sum >= k and l <= n:
tmp_sum -= arr[l]
ans += 1
l += 1
break
else:
ans += n - r + 1
tmp_sum -= arr[l]
print(ans)
| [
"[email protected]"
]
| |
c112d5ce3c5633ee755fd9211c360485d7c5e38f | 82a9077bcb5a90d88e0a8be7f8627af4f0844434 | /google-cloud-sdk/lib/tests/unit/surface/apigee/operations_describe_test.py | e7433df43a3aae0f564d4c7c695e60939f77816c | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
]
| permissive | piotradamczyk5/gcloud_cli | 1ae2553595e569fad6ce84af62b91a7ee5489017 | 384ece11040caadcd64d51da74e0b8491dd22ca3 | refs/heads/master | 2023-01-01T23:00:27.858583 | 2020-10-21T04:21:23 | 2020-10-21T04:21:23 | 290,238,061 | 0 | 0 | null | 2020-10-19T16:43:36 | 2020-08-25T14:31:00 | Python | UTF-8 | Python | false | false | 1,920 | py | # -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that exercise the 'gcloud apigee operations describe' command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import json
from tests.lib.surface.apigee import base
class OperationsDescribeTest(base.ApigeeSurfaceTest):
def testSimpleDescribe(self):
canned_response = {
"metadata": {
"@type":
"type.googleapis.com/google.cloud.apigee.v1.OperationMetadata",
"operationType":
"INSERT",
"state":
"IN_PROGRESS",
"targetResourceName":
"organizations/cwajh-test-project"
},
"name":
"organizations/test-org/operations/20b4ba00-0806-0000-997a-522a4adf027f"
}
self.AddHTTPResponse(
"https://apigee.googleapis.com/v1/organizations/test-org/operations/20b4ba00-0806-0000-997a-522a4adf027f",
status=200,
body=json.dumps(canned_response))
self.RunApigee("operations describe 20b4ba00-0806-0000-997a-522a4adf027f "
"--organization=test-org --format=json")
canned_response["uuid"] = "20b4ba00-0806-0000-997a-522a4adf027f"
canned_response["organization"] = "test-org"
self.AssertJsonOutputMatches(canned_response)
| [
"[email protected]"
]
| |
7ac3108667a2fc73e496511aca4aa994b5413c18 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/bps_cs22950-10/sdB_bps_cs22950-10_coadd.py | e98ff4ffa0393f2b27e60d204c16b04f0ab364d4 | []
| no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[304.840292,-15.674492], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_bps_cs22950-10/sdB_bps_cs22950-10_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_bps_cs22950-10/sdB_bps_cs22950-10_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
9d96d467699e2db6bbc9dacc1e91761aab92a6dc | 2d921bb03eade0763ddb3a9cc5cb637730ecbde1 | /python/plot/PlotStyle.py | 12d2ecb4732ab7424ed515bca8ebc8ce4145f6a4 | []
| no_license | rmanzoni/WTau3Mu | 10c57971b80f9769578284abd69009008901eea7 | 5ad336df976d5a1b39e4b516641661921b06ba20 | refs/heads/92X | 2021-01-18T15:10:41.887147 | 2019-05-09T12:48:00 | 2019-05-09T12:48:00 | 84,342,825 | 0 | 7 | null | 2018-07-19T09:08:19 | 2017-03-08T16:35:42 | Python | UTF-8 | Python | false | false | 5,537 | py | from ROOT import gROOT, gStyle, TFile, TH1F, TH2F, TCanvas, TLegend, TF1, TGraph, TVectorF, TGraphErrors, TObjArray, THStack, TStyle, TGaxis, kWhite
""" Initialises default ROOT plot style.
In order to support old instances of the PlotStyle that
depended on a given ntuple, it is setup as a class.
"""
class PlotStyle(object):
""" Main class for creating ROOT objects from PyRootObjects.
"""
@staticmethod
def initStyle():
gROOT.SetStyle("Plain")
# For the canvas:
gStyle.SetCanvasBorderMode(0)
gStyle.SetCanvasColor(kWhite)
gStyle.SetCanvasDefH(700) #Height of canvas
gStyle.SetCanvasDefW(700) #Width of canvas
gStyle.SetCanvasDefX(0) #Position on screen
gStyle.SetCanvasDefY(0)
# For the line:
gStyle.SetLineWidth(2)
# For the Pad:
gStyle.SetPadBorderMode(0)
# gStyle.SetPadBorderSize(Width_t size = 1)
gStyle.SetPadColor(kWhite)
gStyle.SetPadGridX(True)
gStyle.SetPadGridY(True)
gStyle.SetGridColor(0)
gStyle.SetGridStyle(3)
gStyle.SetGridWidth(1)
# For the frame:
gStyle.SetFrameBorderMode(0)
gStyle.SetFrameBorderSize(1)
gStyle.SetFrameFillColor(0)
gStyle.SetFrameFillStyle(0)
gStyle.SetFrameLineColor(1)
gStyle.SetFrameLineStyle(1)
gStyle.SetFrameLineWidth(1)
# For the histo:
# gStyle.SetHistFillColor(1)
# gStyle.SetHistFillStyle(0)
gStyle.SetHistLineColor(1)
gStyle.SetHistLineStyle(0)
gStyle.SetHistLineWidth(2)
# gStyle.SetLegoInnerR(Float_t rad = 0.5)
# gStyle.SetNumberContours(Int_t number = 20)
gStyle.SetEndErrorSize(2)
#gStyle.SetErrorMarker(20)
gStyle.SetErrorX(0.)
gStyle.SetMarkerStyle(8)
gStyle.SetMarkerSize(1)
#For the fit/function:
gStyle.SetOptFit(0)
gStyle.SetFitFormat("5.4g")
gStyle.SetFuncColor(2)
gStyle.SetFuncStyle(1)
gStyle.SetFuncWidth(1)
#For the date:
gStyle.SetOptDate(0)
# gStyle.SetDateX(Float_t x = 0.01)
# gStyle.SetDateY(Float_t y = 0.01)
# For the statistics box:
gStyle.SetOptFile(0)
gStyle.SetOptStat(0) # To display the mean and RMS: SetOptStat("mr")
gStyle.SetStatColor(kWhite)
gStyle.SetStatFont(42)
gStyle.SetStatFontSize(0.025)
gStyle.SetStatTextColor(1)
gStyle.SetStatFormat("6.4g")
gStyle.SetStatBorderSize(1)
gStyle.SetStatH(0.1)
gStyle.SetStatW(0.15)
# gStyle.SetStatStyle(Style_t style = 1001)
# gStyle.SetStatX(Float_t x = 0)
# gStyle.SetStatY(Float_t y = 0)
# Margins:
gStyle.SetPadTopMargin(0.11)
gStyle.SetPadBottomMargin(0.13)
gStyle.SetPadLeftMargin(0.17)
gStyle.SetPadRightMargin(0.07)
# For the Global title:
gStyle.SetOptTitle(0)
gStyle.SetTitleFont(42)
gStyle.SetTitleColor(1)
gStyle.SetTitleTextColor(1)
gStyle.SetTitleFillColor(10)
gStyle.SetTitleFontSize(0.04)
# gStyle.SetTitleH(0) # Set the height of the title box
# gStyle.SetTitleW(0) # Set the width of the title box
#gStyle.SetTitleX(0.35) # Set the position of the title box
#gStyle.SetTitleY(0.986) # Set the position of the title box
# gStyle.SetTitleStyle(Style_t style = 1001)
#gStyle.SetTitleBorderSize(0)
# For the axis titles:
gStyle.SetTitleColor(1, "XYZ")
gStyle.SetTitleFont(42, "XYZ")
gStyle.SetTitleSize(0.05, "XYZ")
# gStyle.SetTitleXSize(Float_t size = 0.02) # Another way to set the size?
# gStyle.SetTitleYSize(Float_t size = 0.02)
gStyle.SetTitleXOffset(1.)
gStyle.SetTitleYOffset(1.3)
#gStyle.SetTitleOffset(1.1, "Y") # Another way to set the Offset
# For the axis labels:
gStyle.SetLabelColor(1, "XYZ")
gStyle.SetLabelFont(42, "XYZ")
gStyle.SetLabelOffset(0.007, "XYZ")
gStyle.SetLabelSize(0.035, "XYZ")
# For the axis:
gStyle.SetAxisColor(1, "XYZ")
gStyle.SetStripDecimals(True)
gStyle.SetTickLength(0.03, "XYZ")
gStyle.SetNdivisions(510, "XYZ")
gStyle.SetPadTickX(1) # To get tick marks on the opposite side of the frame
gStyle.SetPadTickY(1)
# Change for log plots:
gStyle.SetOptLogx(0)
gStyle.SetOptLogy(0)
gStyle.SetOptLogz(0)
gStyle.SetPalette(1) #(1,0)
# another top group addition
gStyle.SetHatchesSpacing(1.0)
# Postscript options:
gStyle.SetPaperSize(20., 20.)
#gStyle.SetPaperSize(TStyle.kA4)
#gStyle.SetPaperSize(27., 29.7)
#TGaxis.SetMaxDigits(3)
# gStyle.SetLineScalePS(Float_t scale = 3)
# gStyle.SetLineStyleString(Int_t i, const char* text)
# gStyle.SetHeaderPS(const char* header)
# gStyle.SetTitlePS(const char* pstitle)
#gStyle.SetColorModelPS(1)
# gStyle.SetBarOffset(Float_t baroff = 0.5)
# gStyle.SetBarWidth(Float_t barwidth = 0.5)
# gStyle.SetPaintTextFormat(const char* format = "g")
# gStyle.SetPalette(Int_t ncolors = 0, Int_t* colors = 0)
# gStyle.SetTimeOffset(Double_t toffset)
# gStyle.SetHistMinimumZero(kTRUE)
#gStyle.cd()
print "TDR Style initialized" | [
"[email protected]"
]
| |
635bd4085a4fdd6fef954d62dc513a0220d56cfd | 04b494a2286e7d0ec3bbe8d25c15d575486a0f91 | /_exercises/exercise113/exercise113.py | b555ca05f49c798a2c6370f04eec96053da588c3 | []
| no_license | ViniciusGranado/_studies_Python | ea6adc35edccfbd81a67a613e8cd468fd8485856 | af645fa777a408a8ff1b8ed89911971f5b537ac7 | refs/heads/master | 2023-02-01T19:57:04.117047 | 2020-12-19T00:56:10 | 2020-12-19T00:56:10 | 258,855,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,060 | py | def read_int(msg):
while True:
try:
number = int(input(msg))
except (ValueError, TypeError):
print('[ERRO] Digite um número inteiro válido.')
print()
except KeyboardInterrupt:
print('[ERRO] Entrada de dados interrompida.')
print('Considerando valor 0')
return 0
else:
return number
def read_float(msg):
while True:
try:
number = float(input(msg).replace(',', '.'))
except (ValueError, TypeError):
print('[ERRO] Digite um número real válido.')
print()
except KeyboardInterrupt:
print('[ERRO] Entrada de dados interrompida.')
print('Considerando valor 0')
return 0
else:
if number.is_integer():
return int(number)
return number
int_number = read_int('Digite um valor inteiro: ')
float_number = read_float('Digite um valor real: ')
print(f'Você digitou {int_number} e {float_number}')
| [
"[email protected]"
]
| |
de89526204340fed105a0efb1a4cfd7137b26f44 | b08d42933ac06045905d7c005ca9c114ed3aecc0 | /src/coefSubset/evaluate/ranks/tenth/rank_2i26_I.py | 4c80ed792391aea378782cf42541231cd379011d | []
| no_license | TanemuraKiyoto/PPI-native-detection-via-LR | d148d53f5eb60a4dda5318b371a3048e3f662725 | 897e7188b0da94e87126a4acc0c9a6ff44a64574 | refs/heads/master | 2022-12-05T11:59:01.014309 | 2020-08-10T00:41:17 | 2020-08-10T00:41:17 | 225,272,083 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,204 | py | # 9 July 2019
# Kiyoto Aramis Tanemura
# Several metrics are used to assess the performance of the trained RF model, notably native ranking. This script returns a ranking of the native protein-protein complex among a decoy set. For convenience, I will define as a function and will call in a general performance assessment script.
# Modified 11 July 2019 by Kiyoto Aramis Tanemura. To parallelize the process, I will replace the for loop for the testFileList to a multiprocessing pool.
# Modified 9 September 2019 by Kiyoto Aramis Tanemura. I will use the function to perform the calculation on one CSV file only. Thus instead of a function to import in other scripts, they will be individual jobs parallelized as individual jobs in the queue.
import os
import pandas as pd
import numpy as np
import pickle
os.chdir('/mnt/scratch/tanemur1/')
# Read the model and trainFile
testFile = '2i26.csv'
identifier = 'I'
thresholdCoef = 0.1
testFilePath = '/mnt/scratch/tanemur1/CASF-PPI/nonb_descriptors/complete/'
modelPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/tenth/'
outputPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/evaluate/tenth/ranks/'
pdbID = testFile[:4]
with open(modelPath + 'model' + identifier + '.pkl', 'rb') as f:
clf = pickle.load(f)
result = pd.DataFrame()
scoreList = []
df1 = pd.read_csv(testFilePath + testFile)
dropList = ['Unnamed: 0', 'Unnamed: 0.1', 'ref']
df1 = df1.drop(dropList, axis = 1)
df1 = df1.set_index('Pair_name')
df1 = pd.DataFrame(df1.values.T, columns = df1.index, index = df1.columns)
df1.fillna(0.0, inplace = True)
df1 = df1.reindex(sorted(df1.columns), axis = 1)
# Drop features with coefficients below threshold
coefs = pd.read_csv('/mnt/home/tanemur1/6May2019/2019-11-11/results/medianCoefs.csv', index_col = 0, header = None, names = ['coefficients'])
coefs = coefs[np.abs(coefs['coefficients']) < thresholdCoef]
dropList = list(coefs.index)
del coefs
df1.drop(dropList, axis = 1, inplace = True)
with open(modelPath + 'standardScaler' + identifier + '.pkl', 'rb') as g:
scaler = pickle.load(g)
for i in range(len(df1)):
# subtract from one row each row of the dataframe, then remove the trivial row[[i]] - row[[i]]. Also some input files have 'class' column. This is erroneous and is removed.
df2 = pd.DataFrame(df1.iloc[[i]].values - df1.values, index = df1.index, columns = df1.columns)
df2 = df2.drop(df1.iloc[[i]].index[0], axis = 0)
# Standardize inut DF using the standard scaler used for training data.
df2 = scaler.transform(df2)
# Predict class of each comparison descriptor and sum the classes to obtain score. Higher score corresponds to more native-like complex
predictions = clf.predict(df2)
score = sum(predictions)
scoreList.append(score)
# Make a new DataFrame to store the score and corresponding descriptorID. Add rank as column. Note: lower rank corresponds to more native-like complex
result = pd.DataFrame(data = {'score': scoreList}, index = df1.index.tolist()).sort_values(by = 'score', ascending = False)
result['rank'] = range(1, len(result) + 1)
with open(outputPath + pdbID + identifier + '.csv', 'w') as h:
result.to_csv(h)
| [
"[email protected]"
]
| |
a65a90b47466d6936a96eccf140c06ee21b57225 | ddf1267a1a7cb01e70e3b12ad4a7bfaf291edb3e | /src/user/migrations/0023_action_read_date.py | d87e85c86569ba17531d435462e2b08d515436fd | [
"MIT"
]
| permissive | Garinmckayl/researchhub-backend | 46a17513c2c9928e51db4b2ce5a5b62df453f066 | cd135076d9a3b49a08456f7ca3bb18ff35a78b95 | refs/heads/master | 2023-06-17T04:37:23.041787 | 2021-05-18T01:26:46 | 2021-05-18T01:26:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | # Generated by Django 2.2.9 on 2020-01-10 20:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0022_action'),
]
operations = [
migrations.AddField(
model_name='action',
name='read_date',
field=models.DateTimeField(default=None, null=True),
),
]
| [
"[email protected]"
]
| |
6b8fd04745c1d69c819cc2f7e2ca46f5d6b25e51 | de23310ac55a7e72e853ca43ebdbce28358a4bb9 | /models/residual_viz.py | 35bfe4f16c0edf9e74ad6977b2482c5ad2f72ede | []
| no_license | mehdidc/zoo | ea5f97b1402e9501db53cd418271614afe378dc0 | 194efb0098679c065de51b0f4d4864cb415b17f7 | refs/heads/master | 2020-04-28T02:28:37.576822 | 2016-10-31T06:06:21 | 2016-10-31T06:06:21 | 174,899,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,157 | py | #!/usr/bin/env python
"""
Lasagne implementation of CIFAR-10 examples from "Deep Residual Learning for Image Recognition" (http://arxiv.org/abs/1512.03385)
With n=5, i.e. 32-layer network from the paper, this achieves a validation error of 6.88% (vs 7.51% in the paper).
The accuracy has not yet been tested for the other values of n.
"""
from __future__ import print_function
import sys
import os
import time
import string
import random
import pickle
import numpy as np
import theano
import theano.tensor as T
import lasagne
# ##################### Load data from CIFAR-10 dataset #######################
# this code assumes the cifar dataset from 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
# has been extracted in current working directory
def unpickle(file):
import cPickle
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
return dict
def load_data():
xs = []
ys = []
for j in range(5):
d = unpickle('cifar-10-batches-py/data_batch_'+`j+1`)
x = d['data']
y = d['labels']
xs.append(x)
ys.append(y)
d = unpickle('cifar-10-batches-py/test_batch')
xs.append(d['data'])
ys.append(d['labels'])
x = np.concatenate(xs)/np.float32(255)
y = np.concatenate(ys)
x = np.dstack((x[:, :1024], x[:, 1024:2048], x[:, 2048:]))
x = x.reshape((x.shape[0], 32, 32, 3)).transpose(0,3,1,2)
# subtract per-pixel mean
pixel_mean = np.mean(x[0:50000],axis=0)
#pickle.dump(pixel_mean, open("cifar10-pixel_mean.pkl","wb"))
x -= pixel_mean
# create mirrored images
X_train = x[0:50000,:,:,:]
Y_train = y[0:50000]
X_train_flip = X_train[:,:,:,::-1]
Y_train_flip = Y_train
X_train = np.concatenate((X_train,X_train_flip),axis=0)
Y_train = np.concatenate((Y_train,Y_train_flip),axis=0)
# shuffle arrays
from random import shuffle
train_index = [i for i in range(100000)]
test_index = [i for i in range(10000)]
random.shuffle(train_index)
random.shuffle(test_index)
train_index = np.array(train_index)
test_index = np.array(test_index)
X_train = X_train[train_index,:,:,:]
Y_train = Y_train[train_index]
X_test = x[test_index+50000,:,:,:]
Y_test = y[test_index+50000]
return dict(
X_train=lasagne.utils.floatX(X_train),
Y_train=Y_train.astype('int32'),
X_test = lasagne.utils.floatX(X_test),
Y_test = Y_test.astype('int32'),)
# ##################### Build the neural network model #######################
#from lasagne.layers import Conv2DLayer as ConvLayer
from lasagne.layers.conv import Conv2DLayer as ConvLayer
from lasagne.layers import ElemwiseSumLayer
from lasagne.layers import InputLayer
from lasagne.layers import DenseLayer
from lasagne.layers import GlobalPoolLayer
from lasagne.layers import PadLayer
from lasagne.layers import Pool2DLayer
from lasagne.layers import NonlinearityLayer
from lasagne.nonlinearities import softmax, rectify
# NB! from pull request #461 : https://github.com/f0k/Lasagne/blob/98b5581fa830cda3d3f838506ef14e5811a35ef7/lasagne/layers/normalization.py
from normalization import batch_norm
def build_cnn(input_var=None, n=5):
# create a residual learning building block with two stacked 3x3 convlayers as in paper
def residual_block(l, increase_dim=False, projection=False):
input_num_filters = l.output_shape[1]
if increase_dim:
first_stride = (2,2)
out_num_filters = input_num_filters*2
else:
first_stride = (1,1)
out_num_filters = input_num_filters
stack_1 = batch_norm(ConvLayer(l, num_filters=out_num_filters, filter_size=(3,3), stride=first_stride, nonlinearity=rectify, pad='same', W=lasagne.init.HeNormal(gain='relu')))
stack_2 = batch_norm(ConvLayer(stack_1, num_filters=out_num_filters, filter_size=(3,3), stride=(1,1), nonlinearity=None, pad='same', W=lasagne.init.HeNormal(gain='relu')))
# add shortcut connections
if increase_dim:
if projection:
# projection shortcut, as option B in paper
projection = ConvLayer(l, num_filters=out_num_filters, filter_size=(1,1), stride=(2,2), nonlinearity=None, pad='same', b=None)
block = NonlinearityLayer(batch_norm(ElemwiseSumLayer([stack_2, projection])),nonlinearity=rectify)
else:
# identity shortcut, as option A in paper
# we use a pooling layer to get identity with strides, since identity layers with stride don't exist in Lasagne
identity = Pool2DLayer(l, pool_size=1, stride=(2,2), mode='average_exc_pad')
padding = PadLayer(identity, [out_num_filters/4,0,0], batch_ndim=1)
block = NonlinearityLayer(batch_norm(ElemwiseSumLayer([stack_2, padding])),nonlinearity=rectify)
else:
block = NonlinearityLayer(batch_norm(ElemwiseSumLayer([stack_2, l])),nonlinearity=rectify)
return block
# Building the network
l_in = InputLayer(shape=(None, 3, 32, 32), input_var=input_var)
# first layer, output is 16 x 32 x 32
l = batch_norm(ConvLayer(l_in, num_filters=16, filter_size=(3,3), stride=(1,1), nonlinearity=rectify, pad='same', W=lasagne.init.HeNormal(gain='relu')))
# first stack of residual blocks, output is 16 x 32 x 32
for _ in range(n):
l = residual_block(l)
# second stack of residual blocks, output is 32 x 16 x 16
l = residual_block(l, increase_dim=True)
for _ in range(1,n):
l = residual_block(l)
# third stack of residual blocks, output is 64 x 8 x 8
l = residual_block(l, increase_dim=True)
for _ in range(1,n):
l = residual_block(l)
# average pooling
l = GlobalPoolLayer(l)
# fully connected layer
network = DenseLayer(
l, num_units=10,
nonlinearity=softmax)
return network
# ############################# Batch iterator ###############################
def iterate_minibatches(inputs, targets, batchsize, shuffle=False, augment=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
if augment:
# as in paper :
# pad feature arrays with 4 pixels on each side
# and do random cropping of 32x32
padded = np.pad(inputs[excerpt],((0,0),(0,0),(4,4),(4,4)),mode='constant')
random_cropped = np.zeros(inputs[excerpt].shape, dtype=np.float32)
crops = np.random.random_integers(0,high=8,size=(batchsize,2))
for r in range(batchsize):
random_cropped[r,:,:,:] = padded[r,:,crops[r,0]:(crops[r,0]+32),crops[r,1]:(crops[r,1]+32)]
inp_exc = random_cropped
else:
inp_exc = inputs[excerpt]
yield inp_exc, targets[excerpt]
# ############################## Main program ################################
def main(n=5, num_epochs=82):
# Load the dataset
print("Loading data...")
data = load_data()
X_train = data['X_train']
Y_train = data['Y_train']
X_test = data['X_test']
Y_test = data['Y_test']
# Prepare Theano variables for inputs and targets
input_var = T.tensor4('inputs')
target_var = T.ivector('targets')
# Create neural network model
print("Building model and compiling functions...")
network = build_cnn(input_var, n)
print("number of parameters in model: %d" % lasagne.layers.count_params(network))
# Create a loss expression for training, i.e., a scalar objective we want
# to minimize (for our multi-class problem, it is the cross-entropy loss):
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
# add weight decay
all_layers = lasagne.layers.get_all_layers(network)
l2_penalty = lasagne.regularization.regularize_layer_params(all_layers, lasagne.regularization.l2) * 0.0001
loss = loss + l2_penalty
# Create update expressions for training
# Stochastic Gradient Descent (SGD) with momentum
params = lasagne.layers.get_all_params(network, trainable=True)
lr = 0.1
sh_lr = theano.shared(lasagne.utils.floatX(lr))
updates = lasagne.updates.momentum(
loss, params, learning_rate=sh_lr, momentum=0.9)
# Create a loss expression for validation/testing
test_prediction = lasagne.layers.get_output(network)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
target_var)
test_loss = test_loss.mean()
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
dtype=theano.config.floatX)
# Compile a function performing a training step on a mini-batch (by giving
# the updates dictionary) and returning the corresponding training loss:
train_fn = theano.function([input_var, target_var], loss, updates=updates)
# Compile a second function computing the validation loss and accuracy:
val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
# Finally, launch the training loop.
print("Starting training...")
# We iterate over epochs:
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
train_err = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(X_train, Y_train, 128, shuffle=True, augment=True):
inputs, targets = batch
train_err += train_fn(inputs, targets)
train_batches += 1
# And a full pass over the validation data:
val_err = 0
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(X_test, Y_test, 500, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
val_err += err
val_acc += acc
val_batches += 1
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" validation loss:\t\t{:.6f}".format(val_err / val_batches))
print(" validation accuracy:\t\t{:.2f} %".format(
val_acc / val_batches * 100))
# adjust learning rate as in paper
# 32k and 48k iterations should be roughly equivalent to 41 and 61 epochs
if (epoch+1) == 41 or (epoch+1) == 61:
new_lr = sh_lr.get_value() * 0.1
print("New LR:"+str(new_lr))
sh_lr.set_value(lasagne.utils.floatX(new_lr))
# After training, we compute and print the test error:
test_err = 0
test_acc = 0
test_batches = 0
for batch in iterate_minibatches(X_test, Y_test, 500, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
test_err += err
test_acc += acc
test_batches += 1
print("Final results:")
print(" test loss:\t\t\t{:.6f}".format(test_err / test_batches))
print(" test accuracy:\t\t{:.2f} %".format(
test_acc / test_batches * 100))
# dump the network weights to a file :
np.savez('cifar10_deep_residual_model.npz', *lasagne.layers.get_all_param_values(network))
#
# And load them again later on like this:
# with np.load('cifar10_deep_residual_model.npz') as f:
# param_values = [f['arr_%d' % i] for i in range(len(f.files))]
# lasagne.layers.set_all_param_values(network, param_values)
if __name__ == '__main__':
from lasagnekit.misc.draw_net import draw_to_file
from lasagne.layers import get_all_layers
import residual
import residualv2
import residualv3
import residualv4
from hp_toolkit.hp import instantiate_default
cnn = build_cnn(input_var=None, n=5)
layers = get_all_layers(cnn)
draw_to_file(layers, "residual_other.svg")
hp = instantiate_default(residual.params)
cnn = residual.build_model(**hp)
cnn = cnn.output_layers[0]
layers = get_all_layers(cnn)
draw_to_file(layers, "residual.svg")
hp = instantiate_default(residualv2.params)
cnn = residualv2.build_model(**hp)
cnn = cnn.output_layers[0]
layers = get_all_layers(cnn)
draw_to_file(layers, "residualv2.svg")
hp = instantiate_default(residualv3.params)
cnn = residualv3.build_model(**hp)
cnn = cnn.output_layers[0]
layers = get_all_layers(cnn)
draw_to_file(layers, "residualv3.svg")
hp = instantiate_default(residualv4.params)
cnn = residualv4.build_model(**hp)
cnn = cnn.output_layers[0]
layers = get_all_layers(cnn)
draw_to_file(layers, "residualv4.svg")
| [
"[email protected]"
]
| |
03600cc8214045434b642323a45c09a881382679 | 077c91b9d5cb1a6a724da47067483c622ce64be6 | /syn_mem_corruption_3switch_fuzzer_mcs/intermcs_5_/replay_config.py | 7aef30971cd0c4b996594cb00d2313e431ebf28b | []
| no_license | Spencerx/experiments | 0edd16398725f6fd9365ddbb1b773942e4878369 | aaa98b0f67b0d0c0c826b8a1565916bf97ae3179 | refs/heads/master | 2020-04-03T10:11:40.671606 | 2014-06-11T23:55:11 | 2014-06-11T23:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,318 | py |
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow import Replayer
from sts.simulation_state import SimulationConfig
from sts.input_traces.input_logger import InputLogger
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='./pox.py --verbose openflow.of_01 --address=__address__ --port=__port__ openflow.discovery forwarding.l2_multi_syn_mem_corruption', label='c1', address='127.0.0.1', cwd='pox')],
topology_class=MeshTopology,
topology_params="num_switches=4",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=False,
kill_controllers_on_exit=True)
control_flow = Replayer(simulation_config, "experiments/syn_mem_corruption_3switch_fuzzer_mcs/intermcs_5_/mcs.trace.notimeouts",
input_logger=InputLogger(),
wait_on_deterministic_values=False,
allow_unexpected_messages=False,
delay_flow_mods=False,
default_dp_permit=False,
pass_through_whitelisted_messages=False,
invariant_check_name='InvariantChecker.check_liveness',
bug_signature="c1")
| [
"[email protected]"
]
| |
1abd82cd32e985e35728a631c81c33ef0fe62b70 | 481ce69bd3611715fef0be99c655c95d67f16d5f | /riopy/tests/test_symops.py | a44f6bd76716d46e50bf17f299cbedb403e45b81 | [
"BSD-3-Clause"
]
| permissive | fsimkovic/riopy | 0ffed18c72573e824affa97d5c17ca462c5f2031 | 5dc4083d1c0919d94ceeac802d3fb40748b947f3 | refs/heads/master | 2021-03-24T10:14:25.904758 | 2018-08-30T13:16:02 | 2018-08-30T13:16:02 | 117,836,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py |
import unittest
from riopy.symops import SymmetryOperator
class SymmetryOperatorTest(unittest.TestCase):
def test___init___1(self):
symops = SymmetryOperator.ops("P1")
self.assertTrue(len(symops) == 1)
self.assertTupleEqual((0.0, 0.0, 0.0), symops[0].t().as_double())
self.assertTupleEqual((1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0), symops[0].r().as_double())
if __name__ == "__main__":
unittest.main(verbosity=2)
| [
"[email protected]"
]
| |
83a1bb3a2cdd1a52239b03c71eef467737b35324 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-4096.py | 8f4fbd00afb059796231d50db43e5910e4bdb267 | []
| no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,757 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> $Type:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"[email protected]"
]
| |
59e1363d026e1cf5c641f40106aba606a342065e | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_337/ch169_2020_06_21_16_48_03_433219.py | d50363959fd13d06ed505512e563e82d36dc80ab | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | login = input('Login?')
lista = []
while login != 'fim':
if not login in lista:
lista.append(login)
else:
i = 1
k = True
while k:
login2 = login+str(i)
if not login2 in lista:
lista.append(login2)
k = False
i+=1
login = input('Login?')
for nome in lista:
print(nome) | [
"[email protected]"
]
| |
c71e156f811307de345da807ee15cfe276b92a55 | f23c1741a63acd9d431077c4b2068e4072a72d51 | /permutation.py | a92b11d13ab718087d9f9ce651ba2472f6a711a6 | []
| no_license | Martin9527/LeetCodeTest | b188c997ab01a38201bd5ba792cdc104ca79d1d4 | 5f860c8fd2d7d7ff94eca6065d643cc4ea204abf | refs/heads/master | 2020-05-23T11:21:54.543063 | 2019-12-08T10:37:42 | 2019-12-08T10:37:42 | 186,735,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,301 | py | class Solution(object):
def permute(self,nums):
size = len(nums)
if not size :
return []
result = []
curAns = []
usedNums = set()
self.backTrack(nums,size,curAns,usedNums,result)
return result
def backTrack(self,nums,size,curAns,usedNums,result):
if size == len(curAns):
import copy
ans = copy.deepcopy(curAns)
result.append(ans)
return
for j in range(size):
if nums[j] not in usedNums:
usedNums.add(nums[j])
curAns.append(nums[j])
self.backTrack(nums,size,curAns,usedNums,result)
usedNums.remove(nums[j])
curAns.pop()
def permuteUnique(self,nums):
size = len(nums)
if size < 1:
return []
res = []
usedNums = set()
def backTrack(nums,begin,curAns,usedNums):
if len(curAns) == size:
res.append(curAns[:])
return
hashMap = set()
for j in xrange(size):
if nums[j] in hashMap:
continue
else:
hashMap.add(nums[j])
if nums[j] not in usedNums:
usedNums.add(nums[j])
curAns.append(nums[j])
self.backTrack(nums,size,curAns,usedNums)
usedNums.remove(nums[j])
curAns.pop()
nums.sort()
backTrack(nums,0,[],usedNums)
print 'length: ',len(res)
return res
if __name__ == '__main__':
s = Solution()
nums = [1,1,2]
ans = s.permute(nums)
print 'AA: ',len(ans),ans | [
"="
]
| = |
96aac0b4b4bb06d1a1361336110a66ef306f8784 | cbda89443b351bb2047180dad4e300c13dc3df7f | /Crystals/Morpurgo_sp_outer/Jobs/TIPS_Pc/TIPS_Pc_cation_neut_inner0_outer2/TIPS_Pc_cation_neut_inner0_outer2.py | a0c28b5d437cb4a23e82114742f6ee0128900f05 | []
| no_license | sheridanfew/pythonpolarisation | 080f52979f98d26360a46412a10c8e3f51ee4549 | 178e2684e9a239a8e60af5f7b1eb414ac5f31e92 | refs/heads/master | 2021-07-10T01:07:40.978790 | 2021-03-11T16:56:37 | 2021-03-11T16:56:37 | 96,101,351 | 0 | 0 | null | 2017-07-03T13:37:06 | 2017-07-03T10:54:52 | null | UTF-8 | Python | false | false | 6,693 | py | import sys
sys.path.append('../../../../../')
from BasicElements import *
from BasicElements.Register import GetRegister
from BasicElements.MoleculeFactory import ReadMoleculeType
from BasicElements.MoleculeFactory import GetMolecule
from BasicElements.Crystal import *
from Polarizability.GetDipoles import get_dipoles,split_dipoles_onto_atoms
from Polarizability import *
from Polarizability.GetEnergyFromDips import *
from Polarizability.JMatrix import JMatrix
import numpy as np
from math import *
from time import gmtime, strftime
import os
print strftime("%a, %d %b %Y %X +0000", gmtime())
name='TIPS_Pc_cation_neut_inner0_outer2'
#For crystals here, all cubic and centred at centre
insize=0
#number of TVs in each dir central mol is from edge of inner region
outsize=2
mols_cen=['TIPS_Pc_cation_aniso_cifstruct_chelpg.xyz']
mols_sur=['TIPS_Pc_neut_aniso_cifstruct_chelpg.xyz']
mols_outer=['sp_TIPS_Pc_neut.xyz']
#From cif:
'''
TIPS
data_k01029
_cell_length_a 7.5650(15)
_cell_length_b 7.7500(15)
_cell_length_c 16.835(3)
_cell_angle_alpha 89.15(3)
_cell_angle_beta 78.42(3)
_cell_angle_gamma 83.63(3)
_cell_volume 960.9(3)
'''
#Get translation vectors:
a=7.565015/0.5291772109217
b=7.750015/0.5291772109217
c=16.8353/0.5291772109217
alpha=89.153*(pi/180)
beta=78.423*(pi/180)
gamma=83.633*(pi/180)
cif_unit_cell_volume=960.9/(a*b*c*(0.5291772109217**3))
cell_volume=sqrt(1 - (cos(alpha)**2) - (cos(beta)**2) - (cos(gamma)**2) + (2*cos(alpha)*cos(beta)*cos(gamma)))
#Converts frac coords to carts
matrix_to_cartesian=np.matrix( [[a, b*cos(gamma), c*cos(beta)],
[0, b*sin(gamma), c*(cos(alpha) - cos(beta)*cos(gamma))/sin(gamma)],
[0, 0, c*cell_volume/sin(gamma)]])
#carts to frac
matrix_to_fractional=matrix_to_cartesian.I
#TVs, TV[0,1,2] are the three translation vectors.
TV=matrix_to_cartesian.T
cut=8.0
totsize=insize+outsize
#number of TVs in each dir nearest c inner mol is from edge of outer region
cenpos=[totsize,totsize,totsize]
length=[2*totsize+1,2*totsize+1,2*totsize+1]
maxTVs=insize
outer_maxTVs=insize+outsize
#for diamond outer, don't specify for cube and will fill to cube edges.
print 'name: ',name,'mols_cen: ', mols_cen,' mols_sur: ',mols_sur,' TVs: ', TV
# Place Molecules
prot_neut_cry=Crystal(name=name,mols_cen=mols_cen,mols_sur=mols_sur,cenpos=cenpos,length=length,TVs=TV,maxTVs=maxTVs,mols_outer=mols_outer,outer_maxTVs=outer_maxTVs)
#prot_neut_cry._mols contains all molecules.
#mols[0] contains a list of all molecules in position a, mols[1] all mols in pos'n b, etc.
#mols[0][x,y,z] contains molecule a in position x,y,z
#mols may as such be iterated over in a number of ways to consider different molecules.
prot_neut_cry().print_posns()
#Calculate Properties:
print strftime("%a, %d %b %Y %X +0000", gmtime())
E0 = np.matrix([0.,0.,0.])
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Calc jm'
jm = JMatrix(cutoff=cut)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Calc dips:'
d = get_dipoles(E0=E0,jm=jm._m,cutoff=cut)
print strftime("%a, %d %b %Y %X +0000", gmtime())
Efield = get_electric_field(E0)
potential = get_potential()
print strftime("%a, %d %b %Y %X +0000", gmtime())
#print 'dips', d
print 'splitting dips onto atoms'
split_d = split_dipoles_onto_atoms(d)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'summing dips:'
tot = np.matrix([0.,0.,0.])
for dd in split_d:
tot += dd
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'total dip moment', tot
Uqq = np.multiply(get_U_qq(potential=potential),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Uqq', Uqq
Uqd = np.multiply(get_U_qdip(dips=d,Efield=Efield),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Uqd', Uqd
Udd = np.multiply(get_U_dipdip(jm=jm._m,dips=d.T),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Udd', Udd
energyev = Udd+Uqd+Uqq
print 'energyev', energyev
energy=energyev/27.211
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Making .dat cross sections for gnuplot'
# print TVs
if not os.path.exists('Dips_Posns_TVs'): os.makedirs('Dips_Posns_TVs')
f = open('Dips_Posns_TVs/%s_TVs.dat' % name, 'w')
TVstr=str(str(TV[0,0]) + ' ' + str(TV[0,1]) + ' ' + str(TV[0,2]) + '\n' + str(TV[1,0]) + ' ' + str(TV[1,1]) + ' ' + str(TV[1,2]) + '\n' + str(TV[2,0]) + ' ' + str(TV[2,1]) + ' ' + str(TV[2,2])+ '\n')
f.write(TVstr)
f.flush()
f.close()
# print dipoles
if not os.path.exists('Dips_Posns_TVs'): os.makedirs('Dips_Posns_TVs')
f = open('Dips_Posns_TVs/%s_dipoles.dat' % name, 'w')
for dd in split_d:
dstr=str(dd)
f.write(dstr)
f.write('\n')
f.flush()
f.close()
# print properties for charge in centrepos
time=strftime("%a, %d %b %Y %X +0000", gmtime())
f = open('%s_properties.csv' % name, 'w')
f.write ('time\tname\tmols_cen\tmols_sur\tmols_outer\tinsize\toutsize\tenergyev\tUqq\tUqd\tUdd\tTotdip_x\tTotdip_y\tTotdip_z')
f.write ('\n%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % (time,name,mols_cen,mols_sur,mols_outer,insize,outsize,energyev,Uqq,Uqd,Udd,tot[0,0],tot[0,1],tot[0,2]))
f.flush()
f.close()
# print header for reorgs
f = open('reorg_energies_%s_properties.csv' % name, 'w')
f.write ('time\tname\tmols_cen\tmols_sur\tmols_outer\tinsize\toutsize\ta\tb\tc\tmolincell\tReorg(eV)')
f.flush()
f.close()
# REORGANISATION ENERGIES
#Note that this assumes a cube, and values for which
for dist in range(0,(length[0]/2)+1,1):
print '\n\nDIST: ', dist, '\n'
for a in range(prot_neut_cry()._cenpos[0]-dist,prot_neut_cry()._cenpos[0]+dist+1,1):
for b in range(prot_neut_cry()._cenpos[1]-dist,prot_neut_cry()._cenpos[1]+dist+1,1):
for c in range(prot_neut_cry()._cenpos[2]-dist,prot_neut_cry()._cenpos[2]+dist+1,1):
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'a,b,c',a,b,c
for molincell in range(0,len(prot_neut_cry()._mols),1):
prot_neut_cry().calc_reorg(a1=prot_neut_cry()._cenpos[0],b1=prot_neut_cry()._cenpos[1],c1=prot_neut_cry()._cenpos[2],molincell1=0,a2=a,b2=b,c2=c,molincell2=molincell,dips=d,oldUqd=Uqd)
print 'Reorg: ', prot_neut_cry()._reorgs[molincell][a][b][c]
f = open('reorg_energies_%s_properties.csv' % name, 'a')
f.write ('\n%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % (time,name,mols_cen,mols_sur,mols_outer,insize,outsize,a,b,c,molincell,prot_neut_cry()._reorgs[molincell][a][b][c]))
f.flush()
f.close()
# Redo this and overwrite after each set to ensure we have some even if not all reorgs complete
prot_neut_cry().print_reorgs()
print 'Job Completed Successfully.'
| [
"[email protected]"
]
| |
e5af3a05af1c55f4de514b9b82f99141101c9200 | 8aa0d1d407bb1c66d01261f7e2c4e9832e856a2d | /experiments/experiments_gdsc/hyperparameter/plots/plot_nmtf_gibbs_hyperparameter.py | dd3218e5fb59f547aca48d1125e82075eea0af28 | []
| no_license | garedaba/BNMTF_ARD | 59e3ec1dbfd2a9ab9f4ec61368ec06e3783c3ee4 | 0a89e4b4971ff66c25010bd53ee2622aeaf69ae9 | refs/heads/master | 2022-01-16T06:57:12.581285 | 2018-06-10T10:22:12 | 2018-06-10T10:22:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,261 | py | """
Plot the performances of NMTF Gibbs for different hyperparameter values, for
three different sparsity levels.
"""
import matplotlib.pyplot as plt
import numpy
''' Plot settings. '''
MSE_min, MSE_max = 600, 1400
values_lambda = [0.0001, 0.001, 0.01, 0.1, 1., 10., 100.]
fractions_unknown = [0.2, 0.5, 0.8]
folder_plots = "./"
folder_results = "./../results/"
plot_file = folder_plots+"nmtf_gibbs_hyperparameter.png"
''' Load in the performances. '''
performances = eval(open(folder_results+'nmtf_gibbs.txt','r').read())
average_performances = {
fraction: [
numpy.mean(performances[fraction][lamb])
for lamb in values_lambda
]
for fraction in fractions_unknown
}
''' Plot the performances - one line per fraction. '''
fig = plt.figure(figsize=(2.5,1.9))
fig.subplots_adjust(left=0.17, right=0.98, bottom=0.17, top=0.98)
plt.xlabel('lambdaF, lambdaS, lambdaG', fontsize=8, labelpad=1)
plt.xscale("log")
plt.xticks(fontsize=6)
plt.ylabel('MSE', fontsize=8, labelpad=1)
plt.yticks(range(0,MSE_max+1,200),fontsize=6)
plt.ylim(MSE_min, MSE_max)
for fraction in fractions_unknown:
x = values_lambda
y = average_performances[fraction]
plt.plot(x, y, label='Fraction %s' % fraction)
plt.savefig(plot_file, dpi=600) | [
"[email protected]"
]
| |
94e70bf6deabed67dd9378651c4c5af909762b47 | 0d8486c1d55c40bebea7c5428930f18165d2d0e9 | /tests/asp/AllAnswerSets/tight/7-queens.asp.test.py | 58599c3c79d3455d9b87dcc51342b0dc08b3fe6f | [
"Apache-2.0"
]
| permissive | bernardocuteri/wasp | 6f81bf6aa8fb273c91bbf68ecce4ecb195a55953 | 05c8f961776dbdbf7afbf905ee00fc262eba51ad | refs/heads/master | 2021-06-08T11:58:25.080818 | 2020-10-05T16:57:37 | 2020-10-05T16:57:37 | 124,245,808 | 0 | 0 | Apache-2.0 | 2018-03-07T14:13:16 | 2018-03-07T14:13:16 | null | UTF-8 | Python | false | false | 11,082 | py | input = """
1 2 0 0
1 3 0 0
1 4 0 0
1 5 0 0
1 6 0 0
1 7 0 0
1 8 0 0
1 9 0 0
1 10 7 6 11 12 13 14 15 16 17
1 11 7 6 10 12 13 14 15 16 17
1 12 7 6 10 11 13 14 15 16 17
1 13 7 6 10 11 12 14 15 16 17
1 14 7 6 10 11 12 13 15 16 17
1 15 7 6 10 11 12 13 14 16 17
1 16 7 6 10 11 12 13 14 15 17
1 17 0 0
1 18 7 6 19 20 21 22 23 24 25
1 19 7 6 18 20 21 22 23 24 25
1 20 7 6 18 19 21 22 23 24 25
1 21 7 6 18 19 20 22 23 24 25
1 22 7 6 18 19 20 21 23 24 25
1 23 7 6 18 19 20 21 22 24 25
1 24 7 6 18 19 20 21 22 23 25
1 25 0 0
1 26 7 6 27 28 29 30 31 32 33
1 27 7 6 26 28 29 30 31 32 33
1 28 7 6 26 27 29 30 31 32 33
1 29 7 6 26 27 28 30 31 32 33
1 30 7 6 26 27 28 29 31 32 33
1 31 7 6 26 27 28 29 30 32 33
1 32 7 6 26 27 28 29 30 31 33
1 33 0 0
1 34 7 6 35 36 37 38 39 40 41
1 35 7 6 34 36 37 38 39 40 41
1 36 7 6 34 35 37 38 39 40 41
1 37 7 6 34 35 36 38 39 40 41
1 38 7 6 34 35 36 37 39 40 41
1 39 7 6 34 35 36 37 38 40 41
1 40 7 6 34 35 36 37 38 39 41
1 41 0 0
1 42 7 6 43 44 45 46 47 48 49
1 43 7 6 42 44 45 46 47 48 49
1 44 7 6 42 43 45 46 47 48 49
1 45 7 6 42 43 44 46 47 48 49
1 46 7 6 42 43 44 45 47 48 49
1 47 7 6 42 43 44 45 46 48 49
1 48 7 6 42 43 44 45 46 47 49
1 49 0 0
1 50 7 6 51 52 53 54 55 56 57
1 51 7 6 50 52 53 54 55 56 57
1 52 7 6 50 51 53 54 55 56 57
1 53 7 6 50 51 52 54 55 56 57
1 54 7 6 50 51 52 53 55 56 57
1 55 7 6 50 51 52 53 54 56 57
1 56 7 6 50 51 52 53 54 55 57
1 57 0 0
1 58 7 6 59 60 61 62 63 64 65
1 59 7 6 58 60 61 62 63 64 65
1 60 7 6 58 59 61 62 63 64 65
1 61 7 6 58 59 60 62 63 64 65
1 62 7 6 58 59 60 61 63 64 65
1 63 7 6 58 59 60 61 62 64 65
1 64 7 6 58 59 60 61 62 63 65
1 65 0 0
1 1 2 0 64 56
1 1 2 0 64 48
1 1 2 0 64 40
1 1 2 0 64 32
1 1 2 0 64 24
1 1 2 0 64 16
1 1 2 0 63 55
1 1 2 0 63 47
1 1 2 0 63 39
1 1 2 0 63 31
1 1 2 0 63 23
1 1 2 0 63 15
1 1 2 0 62 54
1 1 2 0 62 46
1 1 2 0 62 38
1 1 2 0 62 30
1 1 2 0 62 22
1 1 2 0 62 14
1 1 2 0 61 53
1 1 2 0 61 45
1 1 2 0 61 37
1 1 2 0 61 29
1 1 2 0 61 21
1 1 2 0 61 13
1 1 2 0 60 52
1 1 2 0 60 44
1 1 2 0 60 36
1 1 2 0 60 28
1 1 2 0 60 20
1 1 2 0 60 12
1 1 2 0 59 51
1 1 2 0 59 43
1 1 2 0 59 35
1 1 2 0 59 27
1 1 2 0 59 19
1 1 2 0 59 11
1 1 2 0 58 50
1 1 2 0 58 42
1 1 2 0 58 34
1 1 2 0 58 26
1 1 2 0 58 18
1 1 2 0 58 10
1 1 2 0 56 64
1 1 2 0 56 48
1 1 2 0 56 40
1 1 2 0 56 32
1 1 2 0 56 24
1 1 2 0 56 16
1 1 2 0 55 63
1 1 2 0 55 47
1 1 2 0 55 39
1 1 2 0 55 31
1 1 2 0 55 23
1 1 2 0 55 15
1 1 2 0 54 62
1 1 2 0 54 46
1 1 2 0 54 38
1 1 2 0 54 30
1 1 2 0 54 22
1 1 2 0 54 14
1 1 2 0 53 61
1 1 2 0 53 45
1 1 2 0 53 37
1 1 2 0 53 29
1 1 2 0 53 21
1 1 2 0 53 13
1 1 2 0 52 60
1 1 2 0 52 44
1 1 2 0 52 36
1 1 2 0 52 28
1 1 2 0 52 20
1 1 2 0 52 12
1 1 2 0 51 59
1 1 2 0 51 43
1 1 2 0 51 35
1 1 2 0 51 27
1 1 2 0 51 19
1 1 2 0 51 11
1 1 2 0 50 58
1 1 2 0 50 42
1 1 2 0 50 34
1 1 2 0 50 26
1 1 2 0 50 18
1 1 2 0 50 10
1 1 2 0 48 64
1 1 2 0 48 56
1 1 2 0 48 40
1 1 2 0 48 32
1 1 2 0 48 24
1 1 2 0 48 16
1 1 2 0 47 63
1 1 2 0 47 55
1 1 2 0 47 39
1 1 2 0 47 31
1 1 2 0 47 23
1 1 2 0 47 15
1 1 2 0 46 62
1 1 2 0 46 54
1 1 2 0 46 38
1 1 2 0 46 30
1 1 2 0 46 22
1 1 2 0 46 14
1 1 2 0 45 61
1 1 2 0 45 53
1 1 2 0 45 37
1 1 2 0 45 29
1 1 2 0 45 21
1 1 2 0 45 13
1 1 2 0 44 60
1 1 2 0 44 52
1 1 2 0 44 36
1 1 2 0 44 28
1 1 2 0 44 20
1 1 2 0 44 12
1 1 2 0 43 59
1 1 2 0 43 51
1 1 2 0 43 35
1 1 2 0 43 27
1 1 2 0 43 19
1 1 2 0 43 11
1 1 2 0 42 58
1 1 2 0 42 50
1 1 2 0 42 34
1 1 2 0 42 26
1 1 2 0 42 18
1 1 2 0 42 10
1 1 2 0 40 64
1 1 2 0 40 56
1 1 2 0 40 48
1 1 2 0 40 32
1 1 2 0 40 24
1 1 2 0 40 16
1 1 2 0 39 63
1 1 2 0 39 55
1 1 2 0 39 47
1 1 2 0 39 31
1 1 2 0 39 23
1 1 2 0 39 15
1 1 2 0 38 62
1 1 2 0 38 54
1 1 2 0 38 46
1 1 2 0 38 30
1 1 2 0 38 22
1 1 2 0 38 14
1 1 2 0 37 61
1 1 2 0 37 53
1 1 2 0 37 45
1 1 2 0 37 29
1 1 2 0 37 21
1 1 2 0 37 13
1 1 2 0 36 60
1 1 2 0 36 52
1 1 2 0 36 44
1 1 2 0 36 28
1 1 2 0 36 20
1 1 2 0 36 12
1 1 2 0 35 59
1 1 2 0 35 51
1 1 2 0 35 43
1 1 2 0 35 27
1 1 2 0 35 19
1 1 2 0 35 11
1 1 2 0 34 58
1 1 2 0 34 50
1 1 2 0 34 42
1 1 2 0 34 26
1 1 2 0 34 18
1 1 2 0 34 10
1 1 2 0 32 64
1 1 2 0 32 56
1 1 2 0 32 48
1 1 2 0 32 40
1 1 2 0 32 24
1 1 2 0 32 16
1 1 2 0 31 63
1 1 2 0 31 55
1 1 2 0 31 47
1 1 2 0 31 39
1 1 2 0 31 23
1 1 2 0 31 15
1 1 2 0 30 62
1 1 2 0 30 54
1 1 2 0 30 46
1 1 2 0 30 38
1 1 2 0 30 22
1 1 2 0 30 14
1 1 2 0 29 61
1 1 2 0 29 53
1 1 2 0 29 45
1 1 2 0 29 37
1 1 2 0 29 21
1 1 2 0 29 13
1 1 2 0 28 60
1 1 2 0 28 52
1 1 2 0 28 44
1 1 2 0 28 36
1 1 2 0 28 20
1 1 2 0 28 12
1 1 2 0 27 59
1 1 2 0 27 51
1 1 2 0 27 43
1 1 2 0 27 35
1 1 2 0 27 19
1 1 2 0 27 11
1 1 2 0 26 58
1 1 2 0 26 50
1 1 2 0 26 42
1 1 2 0 26 34
1 1 2 0 26 18
1 1 2 0 26 10
1 1 2 0 24 64
1 1 2 0 24 56
1 1 2 0 24 48
1 1 2 0 24 40
1 1 2 0 24 32
1 1 2 0 24 16
1 1 2 0 23 63
1 1 2 0 23 55
1 1 2 0 23 47
1 1 2 0 23 39
1 1 2 0 23 31
1 1 2 0 23 15
1 1 2 0 22 62
1 1 2 0 22 54
1 1 2 0 22 46
1 1 2 0 22 38
1 1 2 0 22 30
1 1 2 0 22 14
1 1 2 0 21 61
1 1 2 0 21 53
1 1 2 0 21 45
1 1 2 0 21 37
1 1 2 0 21 29
1 1 2 0 21 13
1 1 2 0 20 60
1 1 2 0 20 52
1 1 2 0 20 44
1 1 2 0 20 36
1 1 2 0 20 28
1 1 2 0 20 12
1 1 2 0 19 59
1 1 2 0 19 51
1 1 2 0 19 43
1 1 2 0 19 35
1 1 2 0 19 27
1 1 2 0 19 11
1 1 2 0 18 58
1 1 2 0 18 50
1 1 2 0 18 42
1 1 2 0 18 34
1 1 2 0 18 26
1 1 2 0 18 10
1 1 2 0 16 64
1 1 2 0 16 56
1 1 2 0 16 48
1 1 2 0 16 40
1 1 2 0 16 32
1 1 2 0 16 24
1 1 2 0 15 63
1 1 2 0 15 55
1 1 2 0 15 47
1 1 2 0 15 39
1 1 2 0 15 31
1 1 2 0 15 23
1 1 2 0 14 62
1 1 2 0 14 54
1 1 2 0 14 46
1 1 2 0 14 38
1 1 2 0 14 30
1 1 2 0 14 22
1 1 2 0 13 61
1 1 2 0 13 53
1 1 2 0 13 45
1 1 2 0 13 37
1 1 2 0 13 29
1 1 2 0 13 21
1 1 2 0 12 60
1 1 2 0 12 52
1 1 2 0 12 44
1 1 2 0 12 36
1 1 2 0 12 28
1 1 2 0 12 20
1 1 2 0 11 59
1 1 2 0 11 51
1 1 2 0 11 43
1 1 2 0 11 35
1 1 2 0 11 27
1 1 2 0 11 19
1 1 2 0 10 58
1 1 2 0 10 50
1 1 2 0 10 42
1 1 2 0 10 34
1 1 2 0 10 26
1 1 2 0 10 18
1 1 2 0 63 56
1 1 2 0 62 55
1 1 2 0 62 48
1 1 2 0 61 54
1 1 2 0 61 47
1 1 2 0 61 40
1 1 2 0 60 53
1 1 2 0 60 46
1 1 2 0 60 39
1 1 2 0 60 32
1 1 2 0 59 52
1 1 2 0 59 45
1 1 2 0 59 38
1 1 2 0 59 31
1 1 2 0 59 24
1 1 2 0 58 51
1 1 2 0 58 44
1 1 2 0 58 37
1 1 2 0 58 30
1 1 2 0 58 23
1 1 2 0 58 16
1 1 2 0 55 48
1 1 2 0 54 47
1 1 2 0 54 40
1 1 2 0 53 46
1 1 2 0 53 39
1 1 2 0 53 32
1 1 2 0 52 45
1 1 2 0 52 38
1 1 2 0 52 31
1 1 2 0 52 24
1 1 2 0 51 44
1 1 2 0 51 37
1 1 2 0 51 30
1 1 2 0 51 23
1 1 2 0 51 16
1 1 2 0 50 43
1 1 2 0 50 36
1 1 2 0 50 29
1 1 2 0 50 22
1 1 2 0 50 15
1 1 2 0 47 40
1 1 2 0 46 39
1 1 2 0 46 32
1 1 2 0 45 38
1 1 2 0 45 31
1 1 2 0 45 24
1 1 2 0 44 37
1 1 2 0 44 30
1 1 2 0 44 23
1 1 2 0 44 16
1 1 2 0 43 36
1 1 2 0 43 29
1 1 2 0 43 22
1 1 2 0 43 15
1 1 2 0 42 35
1 1 2 0 42 28
1 1 2 0 42 21
1 1 2 0 42 14
1 1 2 0 39 32
1 1 2 0 38 31
1 1 2 0 38 24
1 1 2 0 37 30
1 1 2 0 37 23
1 1 2 0 37 16
1 1 2 0 36 29
1 1 2 0 36 22
1 1 2 0 36 15
1 1 2 0 35 28
1 1 2 0 35 21
1 1 2 0 35 14
1 1 2 0 34 27
1 1 2 0 34 20
1 1 2 0 34 13
1 1 2 0 31 24
1 1 2 0 30 23
1 1 2 0 30 16
1 1 2 0 29 22
1 1 2 0 29 15
1 1 2 0 28 21
1 1 2 0 28 14
1 1 2 0 27 20
1 1 2 0 27 13
1 1 2 0 26 19
1 1 2 0 26 12
1 1 2 0 23 16
1 1 2 0 22 15
1 1 2 0 21 14
1 1 2 0 20 13
1 1 2 0 19 12
1 1 2 0 18 11
1 1 2 0 64 55
1 1 2 0 64 46
1 1 2 0 64 37
1 1 2 0 64 28
1 1 2 0 64 19
1 1 2 0 64 10
1 1 2 0 63 54
1 1 2 0 63 45
1 1 2 0 63 36
1 1 2 0 63 27
1 1 2 0 63 18
1 1 2 0 62 53
1 1 2 0 62 44
1 1 2 0 62 35
1 1 2 0 62 26
1 1 2 0 61 52
1 1 2 0 61 43
1 1 2 0 61 34
1 1 2 0 60 51
1 1 2 0 60 42
1 1 2 0 59 50
1 1 2 0 56 47
1 1 2 0 56 38
1 1 2 0 56 29
1 1 2 0 56 20
1 1 2 0 56 11
1 1 2 0 55 46
1 1 2 0 55 37
1 1 2 0 55 28
1 1 2 0 55 19
1 1 2 0 55 10
1 1 2 0 54 45
1 1 2 0 54 36
1 1 2 0 54 27
1 1 2 0 54 18
1 1 2 0 53 44
1 1 2 0 53 35
1 1 2 0 53 26
1 1 2 0 52 43
1 1 2 0 52 34
1 1 2 0 51 42
1 1 2 0 48 39
1 1 2 0 48 30
1 1 2 0 48 21
1 1 2 0 48 12
1 1 2 0 47 38
1 1 2 0 47 29
1 1 2 0 47 20
1 1 2 0 47 11
1 1 2 0 46 37
1 1 2 0 46 28
1 1 2 0 46 19
1 1 2 0 46 10
1 1 2 0 45 36
1 1 2 0 45 27
1 1 2 0 45 18
1 1 2 0 44 35
1 1 2 0 44 26
1 1 2 0 43 34
1 1 2 0 40 31
1 1 2 0 40 22
1 1 2 0 40 13
1 1 2 0 39 30
1 1 2 0 39 21
1 1 2 0 39 12
1 1 2 0 38 29
1 1 2 0 38 20
1 1 2 0 38 11
1 1 2 0 37 28
1 1 2 0 37 19
1 1 2 0 37 10
1 1 2 0 36 27
1 1 2 0 36 18
1 1 2 0 35 26
1 1 2 0 32 23
1 1 2 0 32 14
1 1 2 0 31 22
1 1 2 0 31 13
1 1 2 0 30 21
1 1 2 0 30 12
1 1 2 0 29 20
1 1 2 0 29 11
1 1 2 0 28 19
1 1 2 0 28 10
1 1 2 0 27 18
1 1 2 0 24 15
1 1 2 0 23 14
1 1 2 0 22 13
1 1 2 0 21 12
1 1 2 0 20 11
1 1 2 0 19 10
0
10 q(6,1)
11 q(6,2)
12 q(6,3)
13 q(6,4)
14 q(6,5)
15 q(6,6)
16 q(6,7)
18 q(5,1)
19 q(5,2)
20 q(5,3)
21 q(5,4)
22 q(5,5)
23 q(5,6)
24 q(5,7)
26 q(4,1)
27 q(4,2)
28 q(4,3)
29 q(4,4)
30 q(4,5)
31 q(4,6)
32 q(4,7)
34 q(3,1)
35 q(3,2)
36 q(3,3)
37 q(3,4)
38 q(3,5)
39 q(3,6)
40 q(3,7)
42 q(2,1)
43 q(2,2)
44 q(2,3)
45 q(2,4)
46 q(2,5)
47 q(2,6)
48 q(2,7)
50 q(1,1)
51 q(1,2)
52 q(1,3)
53 q(1,4)
54 q(1,5)
55 q(1,6)
56 q(1,7)
58 q(0,1)
59 q(0,2)
60 q(0,3)
61 q(0,4)
62 q(0,5)
63 q(0,6)
64 q(0,7)
0
B+
0
B-
1
0
1
"""
output = """
{q(6,6), q(5,3), q(4,7), q(3,4), q(2,1), q(1,5), q(0,2)}
{q(6,6), q(5,3), q(4,1), q(3,4), q(2,7), q(1,5), q(0,2)}
{q(6,5), q(5,1), q(4,6), q(3,4), q(2,2), q(1,7), q(0,3)}
{q(6,3), q(5,1), q(4,6), q(3,4), q(2,2), q(1,7), q(0,5)}
{q(6,5), q(5,7), q(4,2), q(3,4), q(2,6), q(1,1), q(0,3)}
{q(6,2), q(5,5), q(4,7), q(3,4), q(2,1), q(1,3), q(0,6)}
{q(6,2), q(5,5), q(4,1), q(3,4), q(2,7), q(1,3), q(0,6)}
{q(6,3), q(5,7), q(4,2), q(3,4), q(2,6), q(1,1), q(0,5)}
{q(6,5), q(5,1), q(4,4), q(3,7), q(2,3), q(1,6), q(0,2)}
{q(6,5), q(5,2), q(4,6), q(3,3), q(2,7), q(1,4), q(0,1)}
{q(6,5), q(5,7), q(4,2), q(3,6), q(2,3), q(1,1), q(0,4)}
{q(6,5), q(5,3), q(4,1), q(3,6), q(2,4), q(1,2), q(0,7)}
{q(6,3), q(5,1), q(4,6), q(3,2), q(2,5), q(1,7), q(0,4)}
{q(6,6), q(5,1), q(4,3), q(3,5), q(2,7), q(1,2), q(0,4)}
{q(6,4), q(5,1), q(4,3), q(3,6), q(2,2), q(1,7), q(0,5)}
{q(6,4), q(5,1), q(4,5), q(3,2), q(2,6), q(1,3), q(0,7)}
{q(6,1), q(5,6), q(4,4), q(3,2), q(2,7), q(1,5), q(0,3)}
{q(6,7), q(5,2), q(4,4), q(3,6), q(2,1), q(1,3), q(0,5)}
{q(6,3), q(5,7), q(4,4), q(3,1), q(2,5), q(1,2), q(0,6)}
{q(6,2), q(5,4), q(4,1), q(3,7), q(2,5), q(1,3), q(0,6)}
{q(6,6), q(5,4), q(4,2), q(3,7), q(2,5), q(1,3), q(0,1)}
{q(6,3), q(5,6), q(4,2), q(3,5), q(2,1), q(1,4), q(0,7)}
{q(6,3), q(5,5), q(4,7), q(3,2), q(2,4), q(1,6), q(0,1)}
{q(6,4), q(5,7), q(4,3), q(3,6), q(2,2), q(1,5), q(0,1)}
{q(6,7), q(5,4), q(4,1), q(3,5), q(2,2), q(1,6), q(0,3)}
{q(6,6), q(5,2), q(4,5), q(3,1), q(2,4), q(1,7), q(0,3)}
{q(6,2), q(5,6), q(4,3), q(3,7), q(2,4), q(1,1), q(0,5)}
{q(6,2), q(5,4), q(4,6), q(3,1), q(2,3), q(1,5), q(0,7)}
{q(6,7), q(5,3), q(4,6), q(3,2), q(2,5), q(1,1), q(0,4)}
{q(6,2), q(5,5), q(4,3), q(3,1), q(2,7), q(1,4), q(0,6)}
{q(6,4), q(5,7), q(4,5), q(3,2), q(2,6), q(1,1), q(0,3)}
{q(6,7), q(5,5), q(4,3), q(3,1), q(2,6), q(1,4), q(0,2)}
{q(6,2), q(5,7), q(4,5), q(3,3), q(2,1), q(1,6), q(0,4)}
{q(6,1), q(5,3), q(4,5), q(3,7), q(2,2), q(1,4), q(0,6)}
{q(6,6), q(5,3), q(4,5), q(3,7), q(2,1), q(1,4), q(0,2)}
{q(6,4), q(5,6), q(4,1), q(3,3), q(2,5), q(1,7), q(0,2)}
{q(6,1), q(5,5), q(4,2), q(3,6), q(2,3), q(1,7), q(0,4)}
{q(6,4), q(5,2), q(4,7), q(3,5), q(2,3), q(1,1), q(0,6)}
{q(6,6), q(5,4), q(4,7), q(3,1), q(2,3), q(1,5), q(0,2)}
{q(6,1), q(5,4), q(4,7), q(3,3), q(2,6), q(1,2), q(0,5)}
"""
| [
"[email protected]"
]
| |
285b5d35eb6f94c89715ad4fe68307437cf9ffc0 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/302/92006/submittedfiles/testes.py | 8d4dc26344d08e3707ea45e11e79240ce3625d53 | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,405 | py | lista1 = [1, 3, 4,]
lista1[len(lista1)-1]
print(len(lista1))
'''a = [8.0 , 5.0 , 10.0 , 5.0]
print(a)
print(len(a))
a.append(0.0)
print(len(a))
for i in range(len(a)-1, 0 , -1):
if i ==1:
a[1] = 2.0
else:
a[i] = a[i-1]
print(a)
print(len(a))
'''
'''
a = []
for i in range(1,5,1):
a.append(float(input('Digite o elemento: ')))
print(a)
print(sum(a))
print(len(a))
del a[1]
print(' a é igual: ', a)
print(len(a))
'''
'''
a = []
for i in range(1,11,1):
a.append(float(input('Digite o elemento: ')))
print(a)
for i in range(9, -1, -1):
print(a[i])
'''
'''
while(True):
n = int(input('DIgite o número de notas: '))
if n > 0:
break
notas = []
for i in range(0,n,1):
notas.append(float(input('Digite a nota%d: ' %(i+1))))
media = 0
for i in range(0,n,1):
media += notas[i]/n
print(notas)
print(media)
'''
'''
from minha_bib import primo
n = int(input('Digite n: '))
if primo(n):
print('Primo')
else:
print('Não é primo ')
'''
#exercício 15
'''
n = int(input('Digite o valor de n: '))
if n > 9999999 and n <=99999999:
soma = 0
while(n!=0):
resto = n%10
n = (n-resto)//10
soma = soma + resto
print(soma)
else:
print('Não Sei')
'''
#exercício 16
'''
while(True):
t1 = int(input('Digite o número de tomadas da T1: '))
t2 = int(input('Digite o número de tomadas da T2: '))
t3 = int(input('Digite o número de tomadas da T3: '))
t4 = int(input('Digite o número de tomadas da T4: '))
if t1 > 0 and t2 > 0 and t3 > 0 and t4 > 0:
n = t1 + (t2-1) + (t3-1) + (t4-1)
print(n)
break
else:
print("O NÚMERO DE TOMADAS TEM QUE SER MAIOR QUE 0, DIGITE NOVAMENTE\n")
'''
#Exercício 17
'''
a = int(input('Digite o primeiro número: '))
b = int(input('Digite o segundo número: '))
c = int(input('Digite o terceiro número: '))
d = int(input('Digite o quarto número: '))
if a > b and b < c and c > d:
print('S')
elif a < b and b > c and c > d:
print('S')
elif c > b and c > d and a < b:
print('S')
elif d > c and c > b and b > a:
print('S')
elif a > b and b == c and c == d:
print('S')
elif a > b and b < c and c == d:
print('S')
elif b > a and b > c and c == d:
print('S')
elif c > b and c > d and a == b:
print('S')
elif d > c and b == c and b == a:
print('S')
elif d > c and c < b and a == b:
print('S')
else:
print('N')
'''
#Exercício 20
'''
a = int(input('Digite o primeiro número: '))
b = int(input('Digite o segundo número: '))
for i in range(1000000,0,-1):
if a%i == 0 and b%i == 0:
print(i)
break
'''
#Exercício 21
'''
n = int(input('Digite n: '))
a = int(input('Digite a: '))
b = int(input('Digite b: '))
i = 2
while i <= n+1:
if i%a!=0 and i%b!=0:
n = n+1
if i%a == 0 or i%b == 0:
print(i)
i = i +1
'''
#Exercício 22
'''
while(True):
p = int(input(' Digite p: '))
q = int(input(' Digite q: '))
if q >= p :
break
if str(p) in str(q):
print('S')
else:
print('N')
'''
#Fatorial
'''
while(True):
while(True):
n = int(input('Digite um numero positivo: '))
if n >=0:
break
f = 1
for i in range(2,n+1,1):
f = f*i
print('%d!=%d' %(n,f))
opt = input('deseja continuar? [S ou N]')
if opt == 'N':
print('\n\nATE BREVE!')
break
'''
| [
"[email protected]"
]
| |
7e74abaeb0078b3ee92242a7cc866c13d76bc37f | 81982a278946fab96d74e3f711c937647faec036 | /Trabalhos/a1.py | 32584fb6bf8a53c7a44f632933f6fc2cdb41d8aa | []
| no_license | juanengml/Prog1UTFPR | 3f1b71888a0883a4e12922a0c09cce622ca27458 | aca289ffece71b4ca4339fa8779a1d2a9076aecc | refs/heads/master | 2021-06-23T09:58:37.167188 | 2019-06-14T01:21:51 | 2019-06-14T01:21:51 | 145,451,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | #Escreva um programa que leia duas matrizes 3x3 e apresente na tela o resultado da multiplicacao destas matrizes.
import numpy as np
a = np.matrix('1 2 3 ; 4 5 6; 7 8 9')
b = np.matrix('1 2 3 ; 4 5 6; 7 8 9')
print np.dot(a,b)
| [
"[email protected]"
]
| |
4d56eaa289df490614282d92d296eda8adb8a58a | c43c88015f9498aed5f3b5a339d245c31781444e | /Free/plm/report/component_report.py | cbebbbcac4378084e142a272266da61b6cff36e8 | []
| no_license | mulaudzicalvin/perpul | 65106d41d5197fea17628ac1a7fa7e581d29d75e | 00e3a5ee1771d2e09a48460ca23c2e9c2ef507d6 | refs/heads/master | 2020-03-09T18:39:33.131420 | 2018-02-05T05:17:36 | 2018-02-05T05:17:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,346 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OmniaSolutions, Your own solutions
# Copyright (C) 2010 OmniaSolutions (<http://omniasolutions.eu>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from .book_collector import BookCollector
from .book_collector import packDocuments
from datetime import datetime
from dateutil import tz
import base64
from flectra import _
from flectra import api
from flectra import models
from flectra.exceptions import UserError
from flectra.addons.plm.report.book_collector import getBottomMessage
def getEmptyDocument():
return base64.b64decode(b"""JVBERi0xLjQKJcOkw7zDtsOfCjIgMCBvYmoKPDwvTGVuZ3RoIDMgMCBSL0ZpbHRlci9GbGF0ZURl
Y29kZT4+CnN0cmVhbQp4nG2NTQvCMBBE7/kVexYSZ2M2aSEErG0P3goBD+LNj5tgL/59t/QgiCzM
Djx4A8f0Ni8CQZu04jw1gV1D882cNvRcmd78MF01EhWlGFyieqXtyOQ91fs5gwtneOyK1b9mgCAi
lks9mqGa6a+Lgw7/uJKKBM1ibIv1GfulShHJ6EpKGQf0GDCiLzZkhmLm785EH25LLk8KZW5kc3Ry
ZWFtCmVuZG9iagoKMyAwIG9iagoxNTAKZW5kb2JqCgo1IDAgb2JqCjw8L0xlbmd0aCA2IDAgUi9G
aWx0ZXIvRmxhdGVEZWNvZGUvTGVuZ3RoMSAxMDU5Mj4+CnN0cmVhbQp4nOV5f1hb15XgvffptwR6
EkIICdCTH49fkhBG/LBjMM8gQAQMGIwjsAHJSPxwDMKS7MROGpM28Q9S1+40zSSTr60nm2nTjSd+
OJ4Jme7GpHG6O9O69X7pzkybeuLOZOfrbEOdzib90sSGPfdJYDuTdubb2e/bP/YhvXfOueece+65
55x7HkolDsaQAc0hBolj05FZsVzgEEI/QAibxw6luLPi/TzA1xEiqvHZieky/9u/Qoj5BUJq5cT+
w+M/P1z+EkK69xEyPz8Zi0SbXvrreoQKnwQddZNACK0cVgMO+lDx5HTqwT7Vww2AAz8K7o+PRa5X
/NdyhIo2AV4yHXlwdrfyUQbwMODcTGQ69j9KfgXzF80hpJmdjSdTb6DiVWCl8txsIja7f+FUFKFS
FuzbDjQMf/QyAKiiOGEUSpVao9XpDVnZRtaE/j+7lKdQLoorG5Exc7/rYs6hfHQBodX3KHb7vtK1
+vH/TSs08h2bsQu9iX6L/Zigh3EOGkJRFEcPo3nsv5Mbb8FdMPY59DMYn0GnsPqztWIXLsFZoGFI
5vscuoL+/jMZD6DX0Pt3zwG0p9Dz6Byl4zbQ9SR+A3fhKOigmrvgtuezVJF9cDsN3wfhPk1whnoD
MuZv0R7yGnkXnUEvZezLRu/hADw7wcJXMgo6Uf+/ULoIVujQBDqMjoG0fCkbb/4UaVf/F+i6F70O
hA70EDq1LvERludgdGh1nXbfuo1R8gTOwSXo6+gjFFCa8EWExNbB0MDO/r4dvT3d27s67+0Itre1
Blqat4lNWxsbttyzeVN9Xe3GKl+l11NWWiIU8xtcTpvFxBqzs/Q6rUatUioYWK2nlW8Lc1JJWFKU
8MGgl+J8BAiROwhhiQNS2908EheW2bi7OUXgHP8Up5jmFNc5Mcs1oAavh2vlOelKgOcW8dCOEMCn
AvwgJy3L8HYZVpTISBYgLhdIcK22yQAn4TDXKrUdmpxvDQdA34Je18K3xHReD1rQ6QHUAySV8bML
uGwrlgFS1nrPAkGaLDqtxAitkajUuyPUGnC4XINeT4eUzQfkIdQiq5RULZJaVslNUdPRE9yCZ2n+
i4ss2ht2G6J8NLInJDERkJ1nWufnj0smt1TOB6TyI+/aYOUxycMHWiU31drZtz5P5+0psaQUWJ6b
/xDBcvjl9+6mRDIUlcB+iCjYBu6dn2/jubb58HxkcXVuL8+x/PyCwTA/2woeRr0hkFpc/YsnHFLb
FwclNjyJ78kstq2vU8rZsTskEaGNm4wABT5NvGuTw2UaXOPp/V3DCBwB7gCfulx04U8simgvINLc
jlAa59BexwUk+tyDEgnTkaW1kdwBOjK3NrIuHuZhNzv7Q/OSQuiI8q3g4yci0txeiKd9dCt4Vsr+
jcPFz5tN3GbfoMzLgVUd0SlOUpaAW0DqTgGIFCoyz8pI9m/Sj2UHTFBiMnObeVBD9bTyreHM59Ck
DRRwXo8UdKe3fmdIEgMAiJHMHrUuVPlAIhKGLZoKyNsn+fhZycI3r+8nNat1qj8ki2TEJEuLhMJj
GSnJ1xqgM3Ot8+FA2gSqi98RehX5V68v1HCOl/2oBg0GKLO1BeKqpHU+FB2XnGFHFDJtnAs5XJI4
CBs8yIdigzTQwEPl12E6lzyjRFp2hjr7+c4dQ6FNGUPSA1SdQmj9lBo+5EirgZCTNIKGCxEHMwiM
LBC4NgD45ga4S2pBA18WHC5Taag2N3Ah7EBr3GCGVM61xgIZPorfpVRJw6kluKZNRVHQ0xJ0uAZd
6cvrITDMZSYGCQ11anBtiBGgEgCNgBqZRH1pozHPhfgYP8hPcpLYG6Jro+6RvZxxhuzzzF7tvAu7
w1ngJuSC4TWEOlNqczvudK7ULuPraPBTwx1rw9y8hu/sn6fK+YxCBJZ3SIiGsLjJ5JCzn+Yz3xaB
JIaMlvN5fkEUaS5P0rSd5zui83x/qEHmhgryOccROpcZdeLOnc1eDxSz5gUen9ixIOIT/UOhV1lo
oU7sDF0gmLSEmwcXimEs9CoHZ4VMJZRKiRThKEI19QGikfkdr4oIzcmjCpkg42OLGMk0zRoNo7FF
kqaxazQCNEWaJso0esEu2SbBx1C/W7ko3Z+HByfnw4M0xpEVPAIfLGF+K3iH37qAicog6fhYs6Tn
mym9idKb0nQVpashMrAVez1H5tlW/kObVz4cUYCekMoB6HjVqHIBI1/DBbUCLVcvqJQ/a7jAEADR
AkPJSkq+oFbhmw0XMKX7TS6T4DK5AoRbKcZPr0wqBz5+MaCgPQNG06vvKQqUXWgLekEsU/gsPlLj
xX4Ptniw0oPtZqw14yF2H0sYZ5FoMAaLijaOGBZXl8R2vSmIDKyBEIOhMdfX2NTY08hAZ1E2gnJx
7qBhykACZamyx8o+KFMYymzq0c9jnMK4FeN6jHFdDj96nxM7bQrU5B9ehi9cy+bNm4eHsW942e+7
Mmwyb/aNDruXq33uKxur0OgwHk5fOf68IsZfvZXU1lSqamvq/NVFJM/El1ZifkM2ybUUqXIt2YTf
UElKp0PeXLF3t697JuBsjM0dn4s1/voXG2PhUAlsXKI3ENlasDX66PFHo1s3HXnt+La55N4N+Nm/
sZVzZn7rfTUN3ZvcVVtHT4wtvKo2sJqV1xe5CkdVoKIuWFOxsWn0RGTv1+MNBos9C/ane/U95gLz
JjIhAZ0UK0+y+LgBM8c1+ASDFciCiBa1K8wWc7GZ0ZvNpYrSx0ovlzJNl0t/XEpKwZ8vb24M0qdY
UeYOXi+FjSsVS8OlS6VXS5XfKMViKS5oF3W9uqs6RpfXY2RdPUoralpuWqbuGh4+MHrA7U6MDIPP
RoaXR4Y3Vg0Pj4IvM46ordlK/NVWZU0lyfiIUAcyFzbu+9ah+HNT1dX7/iT5zn9bedvAbfJ66gp1
usI6j3cTZ8BvP7j4yDbxkVcffOCVh8Xf/jryB3urqvb+QWTvV6LV1dGvyA0jgncnJsL8FbKhr4k5
+VasseL8HKzJwUo2FwJHs7j6vpilNQQ1j6l/rCZqtd1OF1q8sSYYtmPC2kV7r50J22ftZ+ySfcl+
1a5CxnZkYS3EQj1TuCFIn6LNbA1acrttRqOl25ybjVa1WCuq51QsarJfsV8Zrq5uWq6mDnG7h5er
ZZeYNvuG/Rur3KNy4OCtzBYMgQIeUJvycl219abSWhe+Yt++e6K2btMWV3dXu/0Pb/3wyBH8BPmn
wu62qpVvP8I6XOytN23NzTbyka1ZXq9lpY15D95CeOISi/KN2JaFS3R1OlKirdOS+9QTaqJU56pJ
QImVsPQ/h5UHFFhBvdACsBXjIMJWNVarVfCOAYnap9oOSjFG2uz8bJKtBirWIqxBWWwW0WUJWnu+
vdzOGBh7fr4KU1esQiZilVAitAnHhO8JSqsM7hLGAf0mEP5R0KqA8NcC89Yu4Sn61ArlAvmtgH8p
4FeFvxTIiwI+LJwUyP0C7hAGBeIWsFbIFwiM/6XwE4E8K7woEJljt3C/QMqFzUKHwDgoF37un4Tf
CuRtAX9boLqYZwU8LhyCqRlB/MpTQb2AfyJQFuaKgPGLwncEclrAMFGPgIlR8AnyrUeIC6eF88KP
hBuCJuEUmoRR4ajwDeGS8I6gvg2uCipBjM0GkcAKosDUzwkYCZyYRxEAhLAwJ5wVloTrwvuCRi1Q
51gLy4LgxOKiXntOQZZGhbv1OgVjhELj9y9XQ5T4l00AYvfI8AFIGffwgREaLHDBLeFO0GskU2uG
D6xdlHYg4XP7/T7/6Mgw+72R4WrT5pFhUDV8nHW72cvscc0SFCpap9zwl76UGEIsV+1IP3A2I9el
WihhmGZgPf7hyolm/PPZa2/OYH9g5RTXvGOytaBcEKwN7mw+v7yx2m0rZOIQf/ZbHxEdPC03431f
nLgHYkz5z4/oDRXto7UQk0Or/4C/iX0oBzWJBYdMx0zkUNaxLHJYfVJNDjMnGWJ8alY5pyRK6iG9
LiuoVOZadM9S/mWTGW/2Qc7QogEWr9eLIkILKUZ8g9du9zbwGxrps7HwDngDHaM5UQpvf+fgXT0H
JcSWCTOO5WB1Tl5OaQ5zTIO1Gpxjxn0adY5aM2TOsZhzNGa1chih3N5cXGfGZu1wthIZRzXEzGiz
R3PUBjPdLchmecewD9KZlrZlP/v6cYWbxfL9MnU2TevhtHNNPL4jqxVfuIivnTt4882LK/y5c/j7
5MuKn9o7OuyflCjqblY6OjocikP2jpvfgtptX+lScFC7nciLnhPLbYXYZseKPEseOabHCvioHlPB
SxUsox1BI8Nxs9wcpzRz1JVecCXH+ZAPjmTWx/mYLb2+qz5S5RN9vb5Z31mf5FvyqcuRtf2GAzuo
RJ42O+go6C4yOqzdNoe7l82yCr1ICVXMv0w/mRoGJax6mMYcrNu9XtMzZQxqefqMKyktYgrx7VXL
BV6lNln91XX4GWttU1fl5Uv+fc/F6yY3YQbj+Vs3xifw53C0oKqJ99/n2hEa2sU8bHTkGP7xo8R3
Hu/IytaXuMuNV2i9U5yzNa/UTH95gLeyt7bkfk/uQ/qhX9ijPIUKkBudEQvy7FirP6kn8yqshU9+
uwXRTJ0VGLuci+3arKAgeJEXIy/r5bzgHu9VL6nyit5e76z3rFfyLnnVqBAXths1WNT0aq5qGE1+
d0FubnceKu8xZrEbwDnWdefIB94BOPFuuwZiZGOVnLACTa+SdPDmVTKNNC5oSGD5vKurN9EWQUWu
5XnrWyuv/qB66o/j03FCMA7jRydWnlo5VuRv5v3bS8qD3qFoLfXKjY8Si59vc+jLvB7jr2zNH4Nj
8NszXxksseWQy3rdm/J5YATHvKZ8DvH4nLjavgG3u7DSiTUqm4poGaiGq58I+KJwWSAO4QnhWYEx
Cvhd4QOBPCTQKsvsFPAWIV0nn6Rgp0AUgkUgP/y+8IlAnhcuCkQPguRdActVeacQpapoKb8s/JiW
8rQklHAFsHxfeFv4pcA8KTxPq/pu4SGQVcgKb0A1BVX3p0lumOhZ4P1EUDoFDGZOfZCefTet4+Fo
0CdgitwvG60UxJ2hYJNcwZ1QquNQoc8Lqs03BCyIrR3BqwK+REvzy2cEIu98s7sy+CM64VkBHxUw
rdPv06U7wXYHa7Qbi9qZqzzm+eIivtuViwp7GLvR0isaWSfrYxl2TovhXP/BsJ3uOz3ZTdjvG66G
rPD54VSnhZmWatr0wEEv1+0RuV7fUb7TdTuNZeAEzSOo34Dn8LX1a4UuL3etfyzE/lw+XUe+9qUv
1URODVp93pJssVDrzLMKduOlS8/cuj7OBFpLY2NfHatmlGrF9Wmt0dEYaTs5ectKE4fmCeSBclEZ
hNgwo0vikaA6pCb3qXCJqk1FFCqLipiyjazRqFCy5izookMGrFLjPgOrYg1DapUFeoNOFiOWZQlS
s2qigBurYBiNBtoiziJaei1zljMWyaKusswCeNZy1XLdomKyssNmkwmzSoXRoFaM6jAtpOlDD5IF
IJo9Jr/PnLc532fzgR/p4cWi148roaDiYfb40hI42uS3+QDaWCX33AJ1BnalywzjYjDjwudWJqP4
Gt6A3x6/9eKzc7duPYQfvYYvdtC6+u4nBbS+4t0rf6Kw3zov5wiCukH/c5uNVsStmwx4kx7L7ZKg
rqUtYZ6aCKpaFVGr8lQkRPAkfhATBmtwH2HIdq3GotVqdMiq/aaWaBdXr7+cDS8fNMqC+uwgg7RI
yzBKRfblbKLLZp9lv8O+zTIKtpgNsFH2MXBGBngeDul3WXV9DcCXKYcFyCnKwVJlJXxp8BMWUy4S
ZqEFZzlWZHtZhVqp0yCm26A0ajGhtcifdiqGZpu2lm7aRxw4wF6GdiBvcyN1LPSgJj+49rj7MoYO
dHStnxjWYv7OloD5TysnDq88QFuAX/8EWgBm182nmHH5uP8FscMTYqkZ3i1+zryOylAtelE8XG7F
OqvdSu6FY9uMK8xYb3aYSR3TxhAFY2GKGcauU7XP1s7VElTL1p6pvVqr0NbWWupRPe7srYd+Xazv
rQ/Xz9Vfr1eJMsBs4Hl6fhFfu2WDCDV7w4ai8m67HVXv0Bmtqm5tblE3YuX3Ddo9wUlMSy+cy8PQ
DcldlEl+8ziw7PevHVNwUNEOgqZSabrtocnWhGtrSqAIGzFfu5XJUWczuRZ6WNWTDds/P1IzsvJN
a46/ua+6Lx4oCiSf2XUk0F6/u6KkubJ/1+jhnR7RndtQVRXgmNftjdGOW1+zNQezuIKcis6Jhkiq
yUKYk/33OXMP/oPaoFOt5DHE4uu6Z6AXXhZoTsKrNHHA2aVFhSgl7rDc26s7oyNXdVinVtigQ8+D
F1PUDkex8qqSUSqLnKKz10nCzrNOyck4nVhyLjmvOxmfs8l52skY8535JL/fiMA9RqTsZnKRfHY3
LdMOEIoNdQl9EfFDJPz4QAIOKeGOg/p2g0Vdgy9OrJjH4Rha+e+2ik0ct6ncZiunzwrb0Nq7Bw7e
SQY2mldlsCarvKZRkX+G4GcwzrtrFTq9qO/Vk7D+rF7SM3oa54Xw5uXUY0m/pL+uZ3z6Jv1pPaPq
1qA7VkGX4F5bA2xpInGH8eczxsqmKX4ivxbRPgpynANbWFSB/kgsfbAYH+PwY06c5yx1EjgRH2cw
2oC1G9oRsHBoFs0hpVlO443QRSHkQR6MPKyH80Cb4LnqIVUe0dPrmfWc9UieJY86V9turMA3KlYr
SAWVMkEnVWHoKbXalL2FrKk7B8kR6k/3TweWf1f/tNYkpVtwuUmwqNS0q1rbkJJS8om9uqOyZKiQ
Gyiru9dnuXUU1qtUWhvb7+Wnnhz1bE78aSr84eP41+Mn+3mT6dZGjaZu6o+Yb+VtW/mWMFllLsjV
b05JDyb+88mu0kLwTdvqMhNi3oDIK0PTYvcD+cfzySH9MT0xFjuLCcueUWFVG1dcVSwWny2WipeK
VcXFFb6KporRinjF0YrzFZcqflRxo0LLq9uvQiOp6ikuRqasHVZrUU86MW9doQ0yliNO3rThap+8
ZJqFhdiVm+mBctdysS6P/ifAD87Av+o/NuJfydFvjA1umfXntfXv8R4+N1391l8VVzp0P1XmlDFv
lEX++HAf2/LQ6Cazfnt2QW6W+Mjig7/551hF10xLy0xXhVzjETb9kv8bVDZqbPgQOdO/n731Pwdm
bv9otNIGp+JzwKsBn2R+cEJI7Vppvf1LUObX0NuXmbyHAookmlYg1M2cQvXwtBA55NCQ8r+gUhiz
w7ef/EdkBNwC4wi+zWQz8sK3jI6DXBuiMXYVd+PvMdBhML3MS4qA4ofKv1WFVSvq5zQGTZ/murYv
Y4EZNWRshIMA+dAQQsyq4j3EyNQCvGvdzvC6zXBiA4YzUgoI8TTMQLinMrAC5aInM7ASzsNvZ2AV
9AyvZGA1OoK+n4E1yIJbM7AWZePBDKwHGybWfzmuxF/IwFkojhcycDbaSjiYHSu0gC2RkQyMEccU
ZWB4s2G2ZmAGiUxbBlagCuZYBlaiAkbKwCpUxvwgA6vRB8wHGVgDfv5FBtaiAnihScN6tEnpycAG
tEcZzcBZ6O+USxk4Gz2s+npLfPZwYmpiMsWVjZVz1VVV9VxfLMoFIykP1zEzVslt27+fkxmSXCKW
jCUOxaKVXFdHc2vftp0dPd3cVJKLcKlEJBqbjiTu5+Ljd8t3Te2NJSKpqfgM1x9LTI03x/dHtyXH
YjPRWILzcp8e5uj4ZxJ3xRJJStlYWVVfWXObRebwfkrsXzEKVjIxlUzFEkCcmuEGKvsrud5IKjaT
4iIzUW7numDP+PjUWEwmjsUSqQgwx1OTYPm+g4mpZHRqjM6WrFxfUEs8MRvP2JWKHYpx2yOpVCwZ
n5lMpWbv8fkeeOCBykiGeQx4K8fi077fN5Y6PBuLxpJTEzOw/MrJ1PT+LjBoJgmGH5RnBGvudGJb
fAY2an+ax8MlYzGOqk+C/vFYFEybTcT3xcZSlfHEhO+BqfunfGl9UzMTvttqqJbMPP8+adSC4pCP
h1ECTaEJNAn5yEEtHkPl8KxGVfBXD1AfiqEoPIMoAhwegDrQDHBVArQN7Yc/7g4NSRmLwTMGz0Oy
LOXsAqlm1AratqGdAPegbqBOyfwRuRYk4BkF/ml4JtD9QIuj8d87fxfI75XnoSNTwD8Do/0yZQpk
m4GyH2S3wSxjQJ2R9SeAxytb9PuluXX5fzvnLpmWXOfZCFZSL1aims/UcluH91+Z7d/nqfSeTMha
UrLuNOeUrHsAOPplrl5ZknoqJc82I3Pt/IwZe2DGcZCnfr3NOSbrTgGe1hwHeDLj833ooLzWJHBS
ubW1JWHmf7lDNDYTEJ3xT/mLWndInnO7TE/JsUbHJmVsFt0DJ5MPPSD/VQLP3ZrHMnorZWgaOP9P
5VKQObOyH2Pyjk8Ab3r3K2Wd07CbXRkPzcj5QD108I41pn3zuyKxTX6mM2r/XXroztInlV2zPpmx
f1yeJ+21WbjHwe8x2duVMnVCXuMU7OEUQHfaR3dsIkP7tDVrtty9nv+XczPpRmPVhd5An3GJG7V/
f73W+Y7/2sDf+X82UHWt99rcNema4hpmBn7GWJ3xt/DoWzfeIj1v4abvYud33/kuof3zf1jSZbX1
Xgpfmr3EvNZe4USL2PfK6CunXzn/yjuvKOOfYOfHNz4m8Y+PfkzEj3H8z7DxovMiiV/Ezpd7Xl59
mXnpXLPT+MLRF8j5F/DsC7jpBcw+zT1d9TQz+zT+w6cKnL6vNn2VfPnxqPP8l/AXe5xO9Hj4cXLm
cXzmC/jzgLIHuYMkFV51JkdXnbMwfxy+M+2rzny/bUDtZwZUzKqT2nl+pdLftrQXX4/g8GiNcxRk
nTd9N79xkzl/E6MRLI5os9qO7jm95xt7mN1DbqdvCKOh8BA5M/T+EHEO4Ry/eUAJrlCATiPjZJqY
HibOnGZUmv57Xc5eUBfvPtp9upvZ3s47723nnMYgFoN6Y1sbGGRsd7aTgqBjwOrPHTBh4wDrNw4Q
jAawHw34jKtGYjSOGo8a6Q8MiMxZsRIv4jMLO/vd7s5F9Wpfp6Tu3S3hE5LQT+/ijiFJdUJCA0O7
QwsYf2nw8VOnUHNhp1TdH5LChYOdUhQAkQJzALCFC1bUPJhMptzyhZNud8qN4OseScp4MnUQsFQy
hdzuZFLmgS8gKQw4UJPuJECQWVRJEidTFEiiJIyjJP2mgHaQSlNR2wjE0/8GPq6LbQplbmRzdHJl
YW0KZW5kb2JqCgo2IDAgb2JqCjY5MDUKZW5kb2JqCgo3IDAgb2JqCjw8L1R5cGUvRm9udERlc2Ny
aXB0b3IvRm9udE5hbWUvQkFBQUFBK0xpYmVyYXRpb25TZXJpZi1Cb2xkCi9GbGFncyA0Ci9Gb250
QkJveFstMTgyIC0zMDMgMTA4MyAxMDA3XS9JdGFsaWNBbmdsZSAwCi9Bc2NlbnQgODkxCi9EZXNj
ZW50IC0yMTYKL0NhcEhlaWdodCAxMDA3Ci9TdGVtViA4MAovRm9udEZpbGUyIDUgMCBSCj4+CmVu
ZG9iagoKOCAwIG9iago8PC9MZW5ndGggMzAxL0ZpbHRlci9GbGF0ZURlY29kZT4+CnN0cmVhbQp4
nF2Ry26DMBBF9/4KL9tFhE0S0kgIKSVBYtGHSvsBxB5SS8VYxlnw97Vn0lbqAnTGc689j6xuj601
IXv1k+og8MFY7WGerl4BP8PFWCZzro0Ktwj/auwdy6K3W+YAY2uHqSxZ9hZzc/ALvzvo6Qz3LHvx
GryxF373UXcx7q7OfcEINnDBqoprGOI9T7177kfI0LVqdUybsKyi5U/wvjjgOcaSSlGThtn1Cnxv
L8BKISpeNk3FwOp/Obkjy3lQn72PUhmlQmzXVeQcuWgSr5F3eeIN8T7xFjkXiQviU+Id8gb5gfSo
2dOdm8QH4iLxI+ll4pr0eH4kDZ6fiPHdhriOLAVxqk1S/cUWm711ldpOe/kZJ1dX7+MocXk4wzQ9
Y4H/LthNLtnw+wb8aJMuCmVuZHN0cmVhbQplbmRvYmoKCjkgMCBvYmoKPDwvVHlwZS9Gb250L1N1
YnR5cGUvVHJ1ZVR5cGUvQmFzZUZvbnQvQkFBQUFBK0xpYmVyYXRpb25TZXJpZi1Cb2xkCi9GaXJz
dENoYXIgMAovTGFzdENoYXIgMTcKL1dpZHRoc1szNjUgNTU2IDUwMCA0NDMgNTAwIDI1MCAyNTAg
NzIyIDU1NiA1NTYgMzMzIDcyMiA1MDAgNTAwIDI3NyAyNzcKNTU2IDQ0MyBdCi9Gb250RGVzY3Jp
cHRvciA3IDAgUgovVG9Vbmljb2RlIDggMCBSCj4+CmVuZG9iagoKMTAgMCBvYmoKPDwvRjEgOSAw
IFIKPj4KZW5kb2JqCgoxMSAwIG9iago8PC9Gb250IDEwIDAgUgovUHJvY1NldFsvUERGL1RleHRd
Cj4+CmVuZG9iagoKMSAwIG9iago8PC9UeXBlL1BhZ2UvUGFyZW50IDQgMCBSL1Jlc291cmNlcyAx
MSAwIFIvTWVkaWFCb3hbMCAwIDU5NSA4NDJdL0dyb3VwPDwvUy9UcmFuc3BhcmVuY3kvQ1MvRGV2
aWNlUkdCL0kgdHJ1ZT4+L0NvbnRlbnRzIDIgMCBSPj4KZW5kb2JqCgo0IDAgb2JqCjw8L1R5cGUv
UGFnZXMKL1Jlc291cmNlcyAxMSAwIFIKL01lZGlhQm94WyAwIDAgNTk1IDg0MiBdCi9LaWRzWyAx
IDAgUiBdCi9Db3VudCAxPj4KZW5kb2JqCgoxMiAwIG9iago8PC9UeXBlL0NhdGFsb2cvUGFnZXMg
NCAwIFIKL09wZW5BY3Rpb25bMSAwIFIgL1hZWiBudWxsIG51bGwgMF0KL0xhbmcoZW4tR0IpCj4+
CmVuZG9iagoKMTMgMCBvYmoKPDwvQXV0aG9yPEZFRkYwMDREMDA2MTAwNzQwMDc0MDA2NTAwNkYw
MDIwMDA0MjAwNkYwMDczMDA2MzAwNkYwMDZDMDA2Rj4KL0NyZWF0b3I8RkVGRjAwNTcwMDcyMDA2
OTAwNzQwMDY1MDA3Mj4KL1Byb2R1Y2VyPEZFRkYwMDRDMDA2OTAwNjIwMDcyMDA2NTAwNEYwMDY2
MDA2NjAwNjkwMDYzMDA2NTAwMjAwMDM1MDAyRTAwMzI+Ci9DcmVhdGlvbkRhdGUoRDoyMDE3MTAy
NDE3MTgwNiswMicwMCcpPj4KZW5kb2JqCgp4cmVmCjAgMTQKMDAwMDAwMDAwMCA2NTUzNSBmIAow
MDAwMDA4MTYzIDAwMDAwIG4gCjAwMDAwMDAwMTkgMDAwMDAgbiAKMDAwMDAwMDI0MCAwMDAwMCBu
IAowMDAwMDA4MzA2IDAwMDAwIG4gCjAwMDAwMDAyNjAgMDAwMDAgbiAKMDAwMDAwNzI1MCAwMDAw
MCBuIAowMDAwMDA3MjcxIDAwMDAwIG4gCjAwMDAwMDc0NzMgMDAwMDAgbiAKMDAwMDAwNzg0MyAw
MDAwMCBuIAowMDAwMDA4MDc2IDAwMDAwIG4gCjAwMDAwMDgxMDggMDAwMDAgbiAKMDAwMDAwODQw
NSAwMDAwMCBuIAowMDAwMDA4NTAyIDAwMDAwIG4gCnRyYWlsZXIKPDwvU2l6ZSAxNC9Sb290IDEy
IDAgUgovSW5mbyAxMyAwIFIKL0lEIFsgPEMzRDZBMzFBMTcxNkU1QjAyMjkxN0Y4QzkxQUM1MDk3
Pgo8QzNENkEzMUExNzE2RTVCMDIyOTE3RjhDOTFBQzUwOTc+IF0KL0RvY0NoZWNrc3VtIC8wQjMy
RjYxNzJGNDFCNzYwNjRBM0NDQjFEMTgxOTFCQgo+PgpzdGFydHhyZWYKODc0NwolJUVPRgo=""")
def commonInfos(env):
docRepository = env['plm.document']._get_filestore()
user = env['res.users'].browse(env.uid)
msg = getBottomMessage(user, env.context)
mainBookCollector = BookCollector(jumpFirst=False,
customTest=(False, msg),
bottomHeight=10)
return docRepository, mainBookCollector
class ReportProductPdf(models.AbstractModel):
_name = 'report.plm.product_pdf'
@api.model
def render_qweb_pdf(self, products=None, level=0, checkState=False):
docRepository, mainBookCollector = commonInfos(self.env)
documents = []
def getDocument(products, check):
out = []
for product in products:
for doc in product.linkeddocuments:
if check:
if doc.state in ['released', 'undermodify']:
out.append(doc)
continue
out.append(doc)
return out
for product in products:
documents.extend(getDocument(product, checkState))
if level > -1:
for childProduct in product._getChildrenBom(product, level):
documents.extend(getDocument(childProduct, checkState))
if len(documents) == 0:
content = getEmptyDocument()
else:
documentContent = packDocuments(docRepository,
documents,
mainBookCollector)
content = documentContent[0]
byteString = b"data:application/pdf;base64," + base64.b64encode(content)
return byteString.decode('UTF-8')
@api.model
def get_report_values(self, docids, data=None):
products = self.env['product.product'].browse(docids)
return {'docs': products,
'get_content': self.render_qweb_pdf}
class ReportOneLevelProductPdf(ReportProductPdf):
_name = 'report.plm.one_product_pdf'
class ReportAllLevelProductPdf(ReportProductPdf):
_name = 'report.plm.all_product_pdf'
class ReportProductionProductPdf(ReportProductPdf):
_name = 'report.plm.product_production_pdf_latest'
class ReportProductionOneProductPdf(ReportProductPdf):
_name = 'report.plm.product_production_one_pdf_latest'
class ReportProductionAllProductPdf(ReportProductPdf):
_name = 'report.plm.product_production_all_pdf_latest'
| [
"[email protected]"
]
| |
7235263640152d56990e710466dad45c4b7f817f | 162e0e4791188bd44f6ce5225ff3b1f0b1aa0b0d | /mrex/metrics/tests/test_common.py | 27be6f705fd1d384f9a902c6518ff891fc0eb1e9 | []
| no_license | testsleeekGithub/trex | 2af21fa95f9372f153dbe91941a93937480f4e2f | 9d27a9b44d814ede3996a37365d63814214260ae | refs/heads/master | 2020-08-01T11:47:43.926750 | 2019-11-06T06:47:19 | 2019-11-06T06:47:19 | 210,987,245 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52,155 | py |
from functools import partial
from itertools import product
from itertools import chain
from itertools import permutations
import numpy as np
import scipy.sparse as sp
import pytest
from mrex.datasets import make_multilabel_classification
from mrex.preprocessing import LabelBinarizer
from mrex.utils.multiclass import type_of_target
from mrex.utils.validation import _num_samples
from mrex.utils.validation import check_random_state
from mrex.utils import shuffle
from mrex.utils.testing import assert_allclose
from mrex.utils.testing import assert_almost_equal
from mrex.utils.testing import assert_array_equal
from mrex.utils.testing import assert_array_less
from mrex.utils.testing import assert_raise_message
from mrex.utils.testing import assert_raises
from mrex.utils.testing import ignore_warnings
from mrex.metrics import accuracy_score
from mrex.metrics import average_precision_score
from mrex.metrics import balanced_accuracy_score
from mrex.metrics import brier_score_loss
from mrex.metrics import cohen_kappa_score
from mrex.metrics import confusion_matrix
from mrex.metrics import coverage_error
from mrex.metrics import explained_variance_score
from mrex.metrics import f1_score
from mrex.metrics import fbeta_score
from mrex.metrics import hamming_loss
from mrex.metrics import hinge_loss
from mrex.metrics import jaccard_score
from mrex.metrics import label_ranking_average_precision_score
from mrex.metrics import label_ranking_loss
from mrex.metrics import log_loss
from mrex.metrics import max_error
from mrex.metrics import matthews_corrcoef
from mrex.metrics import mean_absolute_error
from mrex.metrics import mean_squared_error
from mrex.metrics import mean_tweedie_deviance
from mrex.metrics import mean_poisson_deviance
from mrex.metrics import mean_gamma_deviance
from mrex.metrics import median_absolute_error
from mrex.metrics import multilabel_confusion_matrix
from mrex.metrics import precision_recall_curve
from mrex.metrics import precision_score
from mrex.metrics import r2_score
from mrex.metrics import recall_score
from mrex.metrics import roc_auc_score
from mrex.metrics import roc_curve
from mrex.metrics import zero_one_loss
from mrex.metrics import ndcg_score
from mrex.metrics import dcg_score
from mrex.metrics.base import _average_binary_score
# Note toward developers about metric testing
# -------------------------------------------
# It is often possible to write one general test for several metrics:
#
# - invariance properties, e.g. invariance to sample order
# - common behavior for an argument, e.g. the "normalize" with value True
# will return the mean of the metrics and with value False will return
# the sum of the metrics.
#
# In order to improve the overall metric testing, it is a good idea to write
# first a specific test for the given metric and then add a general test for
# all metrics that have the same behavior.
#
# Two types of datastructures are used in order to implement this system:
# dictionaries of metrics and lists of metrics wit common properties.
#
# Dictionaries of metrics
# ------------------------
# The goal of having those dictionaries is to have an easy way to call a
# particular metric and associate a name to each function:
#
# - REGRESSION_METRICS: all regression metrics.
# - CLASSIFICATION_METRICS: all classification metrics
# which compare a ground truth and the estimated targets as returned by a
# classifier.
# - THRESHOLDED_METRICS: all classification metrics which
# compare a ground truth and a score, e.g. estimated probabilities or
# decision function (format might vary)
#
# Those dictionaries will be used to test systematically some invariance
# properties, e.g. invariance toward several input layout.
#
REGRESSION_METRICS = {
"max_error": max_error,
"mean_absolute_error": mean_absolute_error,
"mean_squared_error": mean_squared_error,
"median_absolute_error": median_absolute_error,
"explained_variance_score": explained_variance_score,
"r2_score": partial(r2_score, multioutput='variance_weighted'),
"mean_normal_deviance": partial(mean_tweedie_deviance, p=0),
"mean_poisson_deviance": mean_poisson_deviance,
"mean_gamma_deviance": mean_gamma_deviance,
"mean_compound_poisson_deviance":
partial(mean_tweedie_deviance, p=1.4),
}
CLASSIFICATION_METRICS = {
"accuracy_score": accuracy_score,
"balanced_accuracy_score": balanced_accuracy_score,
"adjusted_balanced_accuracy_score": partial(balanced_accuracy_score,
adjusted=True),
"unnormalized_accuracy_score": partial(accuracy_score, normalize=False),
# `confusion_matrix` returns absolute values and hence behaves unnormalized
# . Naming it with an unnormalized_ prefix is neccessary for this module to
# skip sample_weight scaling checks which will fail for unnormalized
# metrics.
"unnormalized_confusion_matrix": confusion_matrix,
"normalized_confusion_matrix": lambda *args, **kwargs: (
confusion_matrix(*args, **kwargs).astype('float') / confusion_matrix(
*args, **kwargs).sum(axis=1)[:, np.newaxis]
),
"unnormalized_multilabel_confusion_matrix": multilabel_confusion_matrix,
"unnormalized_multilabel_confusion_matrix_sample":
partial(multilabel_confusion_matrix, samplewise=True),
"hamming_loss": hamming_loss,
"zero_one_loss": zero_one_loss,
"unnormalized_zero_one_loss": partial(zero_one_loss, normalize=False),
# These are needed to test averaging
"jaccard_score": jaccard_score,
"precision_score": precision_score,
"recall_score": recall_score,
"f1_score": f1_score,
"f2_score": partial(fbeta_score, beta=2),
"f0.5_score": partial(fbeta_score, beta=0.5),
"matthews_corrcoef_score": matthews_corrcoef,
"weighted_f0.5_score": partial(fbeta_score, average="weighted", beta=0.5),
"weighted_f1_score": partial(f1_score, average="weighted"),
"weighted_f2_score": partial(fbeta_score, average="weighted", beta=2),
"weighted_precision_score": partial(precision_score, average="weighted"),
"weighted_recall_score": partial(recall_score, average="weighted"),
"weighted_jaccard_score": partial(jaccard_score, average="weighted"),
"micro_f0.5_score": partial(fbeta_score, average="micro", beta=0.5),
"micro_f1_score": partial(f1_score, average="micro"),
"micro_f2_score": partial(fbeta_score, average="micro", beta=2),
"micro_precision_score": partial(precision_score, average="micro"),
"micro_recall_score": partial(recall_score, average="micro"),
"micro_jaccard_score": partial(jaccard_score, average="micro"),
"macro_f0.5_score": partial(fbeta_score, average="macro", beta=0.5),
"macro_f1_score": partial(f1_score, average="macro"),
"macro_f2_score": partial(fbeta_score, average="macro", beta=2),
"macro_precision_score": partial(precision_score, average="macro"),
"macro_recall_score": partial(recall_score, average="macro"),
"macro_jaccard_score": partial(jaccard_score, average="macro"),
"samples_f0.5_score": partial(fbeta_score, average="samples", beta=0.5),
"samples_f1_score": partial(f1_score, average="samples"),
"samples_f2_score": partial(fbeta_score, average="samples", beta=2),
"samples_precision_score": partial(precision_score, average="samples"),
"samples_recall_score": partial(recall_score, average="samples"),
"samples_jaccard_score": partial(jaccard_score, average="samples"),
"cohen_kappa_score": cohen_kappa_score,
}
def precision_recall_curve_padded_thresholds(*args, **kwargs):
"""
The dimensions of precision-recall pairs and the threshold array as
returned by the precision_recall_curve do not match. See
func:`mrex.metrics.precision_recall_curve`
This prevents implicit conversion of return value triple to an higher
dimensional np.array of dtype('float64') (it will be of dtype('object)
instead). This again is needed for assert_array_equal to work correctly.
As a workaround we pad the threshold array with NaN values to match
the dimension of precision and recall arrays respectively.
"""
precision, recall, thresholds = precision_recall_curve(*args, **kwargs)
pad_threshholds = len(precision) - len(thresholds)
return np.array([
precision,
recall,
np.pad(thresholds,
pad_width=(0, pad_threshholds),
mode='constant',
constant_values=[np.nan])
])
CURVE_METRICS = {
"roc_curve": roc_curve,
"precision_recall_curve": precision_recall_curve_padded_thresholds,
}
THRESHOLDED_METRICS = {
"coverage_error": coverage_error,
"label_ranking_loss": label_ranking_loss,
"log_loss": log_loss,
"unnormalized_log_loss": partial(log_loss, normalize=False),
"hinge_loss": hinge_loss,
"brier_score_loss": brier_score_loss,
"roc_auc_score": roc_auc_score, # default: average="macro"
"weighted_roc_auc": partial(roc_auc_score, average="weighted"),
"samples_roc_auc": partial(roc_auc_score, average="samples"),
"micro_roc_auc": partial(roc_auc_score, average="micro"),
"ovr_roc_auc": partial(roc_auc_score, average="macro", multi_class='ovr'),
"weighted_ovr_roc_auc": partial(roc_auc_score, average="weighted",
multi_class='ovr'),
"ovo_roc_auc": partial(roc_auc_score, average="macro", multi_class='ovo'),
"weighted_ovo_roc_auc": partial(roc_auc_score, average="weighted",
multi_class='ovo'),
"partial_roc_auc": partial(roc_auc_score, max_fpr=0.5),
"average_precision_score":
average_precision_score, # default: average="macro"
"weighted_average_precision_score":
partial(average_precision_score, average="weighted"),
"samples_average_precision_score":
partial(average_precision_score, average="samples"),
"micro_average_precision_score":
partial(average_precision_score, average="micro"),
"label_ranking_average_precision_score":
label_ranking_average_precision_score,
"ndcg_score": ndcg_score,
"dcg_score": dcg_score
}
ALL_METRICS = dict()
ALL_METRICS.update(THRESHOLDED_METRICS)
ALL_METRICS.update(CLASSIFICATION_METRICS)
ALL_METRICS.update(REGRESSION_METRICS)
ALL_METRICS.update(CURVE_METRICS)
# Lists of metrics with common properties
# ---------------------------------------
# Lists of metrics with common properties are used to test systematically some
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that
# are symmetric with respect to their input argument y_true and y_pred.
#
# When you add a new metric or functionality, check if a general test
# is already written.
# Those metrics don't support binary inputs
METRIC_UNDEFINED_BINARY = {
"samples_f0.5_score",
"samples_f1_score",
"samples_f2_score",
"samples_precision_score",
"samples_recall_score",
"samples_jaccard_score",
"coverage_error",
"unnormalized_multilabel_confusion_matrix_sample",
"label_ranking_loss",
"label_ranking_average_precision_score",
"dcg_score",
"ndcg_score"
}
# Those metrics don't support multiclass inputs
METRIC_UNDEFINED_MULTICLASS = {
"brier_score_loss",
"micro_roc_auc",
"samples_roc_auc",
"partial_roc_auc",
"roc_auc_score",
"weighted_roc_auc",
"average_precision_score",
"weighted_average_precision_score",
"micro_average_precision_score",
"samples_average_precision_score",
"jaccard_score",
# with default average='binary', multiclass is prohibited
"precision_score",
"recall_score",
"f1_score",
"f2_score",
"f0.5_score",
# curves
"roc_curve",
"precision_recall_curve",
}
# Metric undefined with "binary" or "multiclass" input
METRIC_UNDEFINED_BINARY_MULTICLASS = METRIC_UNDEFINED_BINARY.union(
METRIC_UNDEFINED_MULTICLASS)
# Metrics with an "average" argument
METRICS_WITH_AVERAGING = {
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"jaccard_score"
}
# Threshold-based metrics with an "average" argument
THRESHOLDED_METRICS_WITH_AVERAGING = {
"roc_auc_score", "average_precision_score", "partial_roc_auc",
}
# Metrics with a "pos_label" argument
METRICS_WITH_POS_LABEL = {
"roc_curve",
"precision_recall_curve",
"brier_score_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"jaccard_score",
"average_precision_score",
"weighted_average_precision_score",
"micro_average_precision_score",
"samples_average_precision_score",
# pos_label support deprecated; to be removed in 0.18:
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
}
# Metrics with a "labels" argument
# TODO: Handle multi_class metrics that has a labels argument as well as a
# decision function argument. e.g hinge_loss
METRICS_WITH_LABELS = {
"unnormalized_confusion_matrix",
"normalized_confusion_matrix",
"roc_curve",
"precision_recall_curve",
"hamming_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"jaccard_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"weighted_jaccard_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"micro_jaccard_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"macro_jaccard_score",
"unnormalized_multilabel_confusion_matrix",
"unnormalized_multilabel_confusion_matrix_sample",
"cohen_kappa_score",
}
# Metrics with a "normalize" option
METRICS_WITH_NORMALIZE_OPTION = {
"accuracy_score",
"zero_one_loss",
}
# Threshold-based metrics with "multilabel-indicator" format support
THRESHOLDED_MULTILABEL_METRICS = {
"log_loss",
"unnormalized_log_loss",
"roc_auc_score", "weighted_roc_auc", "samples_roc_auc",
"micro_roc_auc", "partial_roc_auc",
"average_precision_score", "weighted_average_precision_score",
"samples_average_precision_score", "micro_average_precision_score",
"coverage_error", "label_ranking_loss",
"ndcg_score",
"dcg_score",
"label_ranking_average_precision_score",
}
# Classification metrics with "multilabel-indicator" format
MULTILABELS_METRICS = {
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"zero_one_loss", "unnormalized_zero_one_loss",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"weighted_jaccard_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"macro_jaccard_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"micro_jaccard_score",
"unnormalized_multilabel_confusion_matrix",
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
"samples_jaccard_score",
}
# Regression metrics with "multioutput-continuous" format support
MULTIOUTPUT_METRICS = {
"mean_absolute_error", "mean_squared_error", "r2_score",
"explained_variance_score"
}
# Symmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) == metric(y_pred, y_true).
SYMMETRIC_METRICS = {
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"zero_one_loss", "unnormalized_zero_one_loss",
"micro_jaccard_score", "macro_jaccard_score",
"jaccard_score",
"samples_jaccard_score",
"f1_score", "micro_f1_score", "macro_f1_score",
"weighted_recall_score",
# P = R = F = accuracy in multiclass case
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"matthews_corrcoef_score", "mean_absolute_error", "mean_squared_error",
"median_absolute_error", "max_error",
"cohen_kappa_score", "mean_normal_deviance"
}
# Asymmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) != metric(y_pred, y_true).
NOT_SYMMETRIC_METRICS = {
"balanced_accuracy_score",
"adjusted_balanced_accuracy_score",
"explained_variance_score",
"r2_score",
"unnormalized_confusion_matrix",
"normalized_confusion_matrix",
"roc_curve",
"precision_recall_curve",
"precision_score", "recall_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_jaccard_score",
"unnormalized_multilabel_confusion_matrix",
"macro_f0.5_score", "macro_f2_score", "macro_precision_score",
"macro_recall_score", "log_loss", "hinge_loss",
"mean_gamma_deviance", "mean_poisson_deviance",
"mean_compound_poisson_deviance"
}
# No Sample weight support
METRICS_WITHOUT_SAMPLE_WEIGHT = {
"median_absolute_error",
"max_error",
"ovo_roc_auc",
"weighted_ovo_roc_auc"
}
METRICS_REQUIRE_POSITIVE_Y = {
"mean_poisson_deviance",
"mean_gamma_deviance",
"mean_compound_poisson_deviance",
}
def _require_positive_targets(y1, y2):
"""Make targets strictly positive"""
offset = abs(min(y1.min(), y2.min())) + 1
y1 += offset
y2 += offset
return y1, y2
def test_symmetry_consistency():
# We shouldn't forget any metrics
assert (SYMMETRIC_METRICS.union(
NOT_SYMMETRIC_METRICS, set(THRESHOLDED_METRICS),
METRIC_UNDEFINED_BINARY_MULTICLASS) ==
set(ALL_METRICS))
assert (
SYMMETRIC_METRICS.intersection(NOT_SYMMETRIC_METRICS) ==
set())
@pytest.mark.parametrize("name", sorted(SYMMETRIC_METRICS))
def test_symmetric_metric(name):
# Test the symmetry of score and loss functions
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
if name in METRICS_REQUIRE_POSITIVE_Y:
y_true, y_pred = _require_positive_targets(y_true, y_pred)
y_true_bin = random_state.randint(0, 2, size=(20, 25))
y_pred_bin = random_state.randint(0, 2, size=(20, 25))
metric = ALL_METRICS[name]
if name in METRIC_UNDEFINED_BINARY:
if name in MULTILABELS_METRICS:
assert_allclose(metric(y_true_bin, y_pred_bin),
metric(y_pred_bin, y_true_bin),
err_msg="%s is not symmetric" % name)
else:
assert False, "This case is currently unhandled"
else:
assert_allclose(metric(y_true, y_pred),
metric(y_pred, y_true),
err_msg="%s is not symmetric" % name)
@pytest.mark.parametrize("name", sorted(NOT_SYMMETRIC_METRICS))
def test_not_symmetric_metric(name):
# Test the symmetry of score and loss functions
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
if name in METRICS_REQUIRE_POSITIVE_Y:
y_true, y_pred = _require_positive_targets(y_true, y_pred)
metric = ALL_METRICS[name]
# use context manager to supply custom error message
with assert_raises(AssertionError) as cm:
assert_array_equal(metric(y_true, y_pred), metric(y_pred, y_true))
cm.msg = ("%s seems to be symmetric" % name)
@pytest.mark.parametrize(
'name',
sorted(set(ALL_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS))
def test_sample_order_invariance(name):
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
if name in METRICS_REQUIRE_POSITIVE_Y:
y_true, y_pred = _require_positive_targets(y_true, y_pred)
y_true_shuffle, y_pred_shuffle = shuffle(y_true, y_pred, random_state=0)
with ignore_warnings():
metric = ALL_METRICS[name]
assert_allclose(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant" % name)
@ignore_warnings
def test_sample_order_invariance_multilabel_and_multioutput():
random_state = check_random_state(0)
# Generate some data
y_true = random_state.randint(0, 2, size=(20, 25))
y_pred = random_state.randint(0, 2, size=(20, 25))
y_score = random_state.normal(size=y_true.shape)
y_true_shuffle, y_pred_shuffle, y_score_shuffle = shuffle(y_true,
y_pred,
y_score,
random_state=0)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
assert_allclose(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant" % name)
for name in THRESHOLDED_MULTILABEL_METRICS:
metric = ALL_METRICS[name]
assert_allclose(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant" % name)
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_allclose(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant" % name)
assert_allclose(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant" % name)
@pytest.mark.parametrize(
'name',
sorted(set(ALL_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS))
def test_format_invariance_with_1d_vectors(name):
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
if name in METRICS_REQUIRE_POSITIVE_Y:
y1, y2 = _require_positive_targets(y1, y2)
y1_list = list(y1)
y2_list = list(y2)
y1_1d, y2_1d = np.array(y1), np.array(y2)
assert_array_equal(y1_1d.ndim, 1)
assert_array_equal(y2_1d.ndim, 1)
y1_column = np.reshape(y1_1d, (-1, 1))
y2_column = np.reshape(y2_1d, (-1, 1))
y1_row = np.reshape(y1_1d, (1, -1))
y2_row = np.reshape(y2_1d, (1, -1))
with ignore_warnings():
metric = ALL_METRICS[name]
measure = metric(y1, y2)
assert_allclose(metric(y1_list, y2_list), measure,
err_msg="%s is not representation invariant with list"
"" % name)
assert_allclose(metric(y1_1d, y2_1d), measure,
err_msg="%s is not representation invariant with "
"np-array-1d" % name)
assert_allclose(metric(y1_column, y2_column), measure,
err_msg="%s is not representation invariant with "
"np-array-column" % name)
# Mix format support
assert_allclose(metric(y1_1d, y2_list), measure,
err_msg="%s is not representation invariant with mix "
"np-array-1d and list" % name)
assert_allclose(metric(y1_list, y2_1d), measure,
err_msg="%s is not representation invariant with mix "
"np-array-1d and list" % name)
assert_allclose(metric(y1_1d, y2_column), measure,
err_msg="%s is not representation invariant with mix "
"np-array-1d and np-array-column" % name)
assert_allclose(metric(y1_column, y2_1d), measure,
err_msg="%s is not representation invariant with mix "
"np-array-1d and np-array-column" % name)
assert_allclose(metric(y1_list, y2_column), measure,
err_msg="%s is not representation invariant with mix "
"list and np-array-column" % name)
assert_allclose(metric(y1_column, y2_list), measure,
err_msg="%s is not representation invariant with mix "
"list and np-array-column" % name)
# These mix representations aren't allowed
assert_raises(ValueError, metric, y1_1d, y2_row)
assert_raises(ValueError, metric, y1_row, y2_1d)
assert_raises(ValueError, metric, y1_list, y2_row)
assert_raises(ValueError, metric, y1_row, y2_list)
assert_raises(ValueError, metric, y1_column, y2_row)
assert_raises(ValueError, metric, y1_row, y2_column)
# NB: We do not test for y1_row, y2_row as these may be
# interpreted as multilabel or multioutput data.
if (name not in (MULTIOUTPUT_METRICS | THRESHOLDED_MULTILABEL_METRICS |
MULTILABELS_METRICS)):
assert_raises(ValueError, metric, y1_row, y2_row)
@pytest.mark.parametrize(
'name',
sorted(set(CLASSIFICATION_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS))
def test_classification_invariance_string_vs_numbers_labels(name):
# Ensure that classification metrics with string labels are invariant
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_str = np.array(["eggs", "spam"])[y1]
y2_str = np.array(["eggs", "spam"])[y2]
pos_label_str = "spam"
labels_str = ["eggs", "spam"]
with ignore_warnings():
metric = CLASSIFICATION_METRICS[name]
measure_with_number = metric(y1, y2)
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number invariance "
"test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
if name in METRICS_WITH_LABELS:
metric_str = partial(metric_str, labels=labels_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string vs number "
"invariance test".format(name))
@pytest.mark.parametrize('name', THRESHOLDED_METRICS)
def test_thresholded_invariance_string_vs_numbers_labels(name):
# Ensure that thresholded metrics with string labels are invariant
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_str = np.array(["eggs", "spam"])[y1]
pos_label_str = "spam"
with ignore_warnings():
metric = THRESHOLDED_METRICS[name]
if name not in METRIC_UNDEFINED_BINARY:
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_number = metric(y1, y2)
measure_with_str = metric_str(y1_str, y2)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'), y2)
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
else:
# TODO those metrics doesn't support string label yet
assert_raises(ValueError, metric, y1_str, y2)
assert_raises(ValueError, metric, y1_str.astype('O'), y2)
invalids = [([0, 1], [np.inf, np.inf]),
([0, 1], [np.nan, np.nan]),
([0, 1], [np.nan, np.inf])]
@pytest.mark.parametrize(
'metric',
chain(THRESHOLDED_METRICS.values(), REGRESSION_METRICS.values()))
def test_regression_thresholded_inf_nan_input(metric):
for y_true, y_score in invalids:
assert_raise_message(ValueError,
"contains NaN, infinity",
metric, y_true, y_score)
@pytest.mark.parametrize('metric', CLASSIFICATION_METRICS.values())
def test_classification_inf_nan_input(metric):
# Classification metrics all raise a mixed input exception
for y_true, y_score in invalids:
assert_raise_message(ValueError,
"Input contains NaN, infinity or a "
"value too large",
metric, y_true, y_score)
@ignore_warnings
def check_single_sample(name):
# Non-regression test: scores should work with a single sample.
# This is important for leave-one-out cross validation.
# Score functions tested are those that formerly called np.squeeze,
# which turns an array of size 1 into a 0-d array (!).
metric = ALL_METRICS[name]
# assert that no exception is thrown
if name in METRICS_REQUIRE_POSITIVE_Y:
values = [1, 2]
else:
values = [0, 1]
for i, j in product(values, repeat=2):
metric([i], [j])
@ignore_warnings
def check_single_sample_multioutput(name):
metric = ALL_METRICS[name]
for i, j, k, l in product([0, 1], repeat=4):
metric(np.array([[i, j]]), np.array([[k, l]]))
@pytest.mark.parametrize(
'name',
sorted(
set(ALL_METRICS)
# Those metrics are not always defined with one sample
# or in multiclass classification
- METRIC_UNDEFINED_BINARY_MULTICLASS - set(THRESHOLDED_METRICS)))
def test_single_sample(name):
check_single_sample(name)
@pytest.mark.parametrize('name',
sorted(MULTIOUTPUT_METRICS | MULTILABELS_METRICS))
def test_single_sample_multioutput(name):
check_single_sample_multioutput(name)
@pytest.mark.parametrize('name', sorted(MULTIOUTPUT_METRICS))
def test_multioutput_number_of_output_differ(name):
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0], [1, 0], [0, 0]])
metric = ALL_METRICS[name]
assert_raises(ValueError, metric, y_true, y_pred)
@pytest.mark.parametrize('name', sorted(MULTIOUTPUT_METRICS))
def test_multioutput_regression_invariance_to_dimension_shuffling(name):
# test invariance to dimension shuffling
random_state = check_random_state(0)
y_true = random_state.uniform(0, 2, size=(20, 5))
y_pred = random_state.uniform(0, 2, size=(20, 5))
metric = ALL_METRICS[name]
error = metric(y_true, y_pred)
for _ in range(3):
perm = random_state.permutation(y_true.shape[1])
assert_allclose(metric(y_true[:, perm], y_pred[:, perm]),
error,
err_msg="%s is not dimension shuffling invariant" % (
name))
@ignore_warnings
def test_multilabel_representation_invariance():
# Generate some data
n_classes = 4
n_samples = 50
_, y1 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=0, n_samples=n_samples,
allow_unlabeled=True)
_, y2 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=1, n_samples=n_samples,
allow_unlabeled=True)
# To make sure at least one empty label is present
y1 = np.vstack([y1, [[0] * n_classes]])
y2 = np.vstack([y2, [[0] * n_classes]])
y1_sparse_indicator = sp.coo_matrix(y1)
y2_sparse_indicator = sp.coo_matrix(y2)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
# XXX cruel hack to work with partial functions
if isinstance(metric, partial):
metric.__module__ = 'tmp'
metric.__name__ = name
measure = metric(y1, y2)
# Check representation invariance
assert_allclose(metric(y1_sparse_indicator, y2_sparse_indicator),
measure,
err_msg="%s failed representation invariance between "
"dense and sparse indicator formats." % name)
@pytest.mark.parametrize('name', sorted(MULTILABELS_METRICS))
def test_raise_value_error_multilabel_sequences(name):
# make sure the multilabel-sequence format raises ValueError
multilabel_sequences = [
[[0, 1]],
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
[[]],
[()],
np.array([[], [1, 2]], dtype='object')]
metric = ALL_METRICS[name]
for seq in multilabel_sequences:
assert_raises(ValueError, metric, seq, seq)
@pytest.mark.parametrize('name', sorted(METRICS_WITH_NORMALIZE_OPTION))
def test_normalize_option_binary_classification(name):
# Test in the binary case
n_samples = 20
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_array_less(-1.0 * measure, 0,
err_msg="We failed to test correctly the normalize "
"option")
assert_allclose(metrics(y_true, y_pred, normalize=False) / n_samples,
measure)
@pytest.mark.parametrize('name', sorted(METRICS_WITH_NORMALIZE_OPTION))
def test_normalize_option_multiclass_classification(name):
# Test in the multiclass case
random_state = check_random_state(0)
y_true = random_state.randint(0, 4, size=(20, ))
y_pred = random_state.randint(0, 4, size=(20, ))
n_samples = y_true.shape[0]
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_array_less(-1.0 * measure, 0,
err_msg="We failed to test correctly the normalize "
"option")
assert_allclose(metrics(y_true, y_pred, normalize=False) / n_samples,
measure)
def test_normalize_option_multilabel_classification():
# Test in the multilabel case
n_classes = 4
n_samples = 100
# for both random_state 0 and 1, y_true and y_pred has at least one
# unlabelled entry
_, y_true = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=0,
allow_unlabeled=True,
n_samples=n_samples)
_, y_pred = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=1,
allow_unlabeled=True,
n_samples=n_samples)
# To make sure at least one empty label is present
y_true += [0]*n_classes
y_pred += [0]*n_classes
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_array_less(-1.0 * measure, 0,
err_msg="We failed to test correctly the normalize "
"option")
assert_allclose(metrics(y_true, y_pred, normalize=False) / n_samples,
measure, err_msg="Failed with %s" % name)
@ignore_warnings
def _check_averaging(metric, y_true, y_pred, y_true_binarize, y_pred_binarize,
is_multilabel):
n_samples, n_classes = y_true_binarize.shape
# No averaging
label_measure = metric(y_true, y_pred, average=None)
assert_allclose(label_measure,
[metric(y_true_binarize[:, i], y_pred_binarize[:, i])
for i in range(n_classes)])
# Micro measure
micro_measure = metric(y_true, y_pred, average="micro")
assert_allclose(micro_measure,
metric(y_true_binarize.ravel(), y_pred_binarize.ravel()))
# Macro measure
macro_measure = metric(y_true, y_pred, average="macro")
assert_allclose(macro_measure, np.mean(label_measure))
# Weighted measure
weights = np.sum(y_true_binarize, axis=0, dtype=int)
if np.sum(weights) != 0:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_allclose(weighted_measure,
np.average(label_measure, weights=weights))
else:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_allclose(weighted_measure, 0)
# Sample measure
if is_multilabel:
sample_measure = metric(y_true, y_pred, average="samples")
assert_allclose(sample_measure,
np.mean([metric(y_true_binarize[i], y_pred_binarize[i])
for i in range(n_samples)]))
assert_raises(ValueError, metric, y_true, y_pred, average="unknown")
assert_raises(ValueError, metric, y_true, y_pred, average="garbage")
def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize,
y_score):
is_multilabel = type_of_target(y_true).startswith("multilabel")
metric = ALL_METRICS[name]
if name in METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel)
elif name in THRESHOLDED_METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_score, y_true_binarize,
y_score, is_multilabel)
else:
raise ValueError("Metric is not recorded as having an average option")
@pytest.mark.parametrize('name', sorted(METRICS_WITH_AVERAGING))
def test_averaging_multiclass(name):
n_samples, n_classes = 50, 3
random_state = check_random_state(0)
y_true = random_state.randint(0, n_classes, size=(n_samples, ))
y_pred = random_state.randint(0, n_classes, size=(n_samples, ))
y_score = random_state.uniform(size=(n_samples, n_classes))
lb = LabelBinarizer().fit(y_true)
y_true_binarize = lb.transform(y_true)
y_pred_binarize = lb.transform(y_pred)
check_averaging(name, y_true, y_true_binarize,
y_pred, y_pred_binarize, y_score)
@pytest.mark.parametrize(
'name',
sorted(METRICS_WITH_AVERAGING | THRESHOLDED_METRICS_WITH_AVERAGING))
def test_averaging_multilabel(name):
n_samples, n_classes = 40, 5
_, y = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=5, n_samples=n_samples,
allow_unlabeled=False)
y_true = y[:20]
y_pred = y[20:]
y_score = check_random_state(0).normal(size=(20, n_classes))
y_true_binarize = y_true
y_pred_binarize = y_pred
check_averaging(name, y_true, y_true_binarize,
y_pred, y_pred_binarize, y_score)
@pytest.mark.parametrize('name', sorted(METRICS_WITH_AVERAGING))
def test_averaging_multilabel_all_zeroes(name):
y_true = np.zeros((20, 3))
y_pred = np.zeros((20, 3))
y_score = np.zeros((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
check_averaging(name, y_true, y_true_binarize,
y_pred, y_pred_binarize, y_score)
def test_averaging_binary_multilabel_all_zeroes():
y_true = np.zeros((20, 3))
y_pred = np.zeros((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
# Test _average_binary_score for weight.sum() == 0
binary_metric = (lambda y_true, y_score, average="macro":
_average_binary_score(
precision_score, y_true, y_score, average))
_check_averaging(binary_metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel=True)
@pytest.mark.parametrize('name', sorted(METRICS_WITH_AVERAGING))
def test_averaging_multilabel_all_ones(name):
y_true = np.ones((20, 3))
y_pred = np.ones((20, 3))
y_score = np.ones((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
check_averaging(name, y_true, y_true_binarize,
y_pred, y_pred_binarize, y_score)
@ignore_warnings
def check_sample_weight_invariance(name, metric, y1, y2):
rng = np.random.RandomState(0)
sample_weight = rng.randint(1, 10, size=len(y1))
# check that unit weights gives the same score as no weight
unweighted_score = metric(y1, y2, sample_weight=None)
assert_allclose(
unweighted_score,
metric(y1, y2, sample_weight=np.ones(shape=len(y1))),
err_msg="For %s sample_weight=None is not equivalent to "
"sample_weight=ones" % name)
# check that the weighted and unweighted scores are unequal
weighted_score = metric(y1, y2, sample_weight=sample_weight)
# use context manager to supply custom error message
with assert_raises(AssertionError) as cm:
assert_allclose(unweighted_score, weighted_score)
cm.msg = ("Unweighted and weighted scores are unexpectedly almost "
"equal (%s) and (%s) for %s" % (unweighted_score,
weighted_score, name))
# check that sample_weight can be a list
weighted_score_list = metric(y1, y2,
sample_weight=sample_weight.tolist())
assert_allclose(
weighted_score, weighted_score_list,
err_msg=("Weighted scores for array and list "
"sample_weight input are not equal (%s != %s) for %s") % (
weighted_score, weighted_score_list, name))
# check that integer weights is the same as repeated samples
repeat_weighted_score = metric(
np.repeat(y1, sample_weight, axis=0),
np.repeat(y2, sample_weight, axis=0), sample_weight=None)
assert_allclose(
weighted_score, repeat_weighted_score,
err_msg="Weighting %s is not equal to repeating samples" % name)
# check that ignoring a fraction of the samples is equivalent to setting
# the corresponding weights to zero
sample_weight_subset = sample_weight[1::2]
sample_weight_zeroed = np.copy(sample_weight)
sample_weight_zeroed[::2] = 0
y1_subset = y1[1::2]
y2_subset = y2[1::2]
weighted_score_subset = metric(y1_subset, y2_subset,
sample_weight=sample_weight_subset)
weighted_score_zeroed = metric(y1, y2,
sample_weight=sample_weight_zeroed)
assert_allclose(
weighted_score_subset, weighted_score_zeroed,
err_msg=("Zeroing weights does not give the same result as "
"removing the corresponding samples (%s != %s) for %s" %
(weighted_score_zeroed, weighted_score_subset, name)))
if not name.startswith('unnormalized'):
# check that the score is invariant under scaling of the weights by a
# common factor
for scaling in [2, 0.3]:
assert_allclose(
weighted_score,
metric(y1, y2, sample_weight=sample_weight * scaling),
err_msg="%s sample_weight is not invariant "
"under scaling" % name)
# Check that if number of samples in y_true and sample_weight are not
# equal, meaningful error is raised.
error_message = ("Found input variables with inconsistent numbers of "
"samples: [{}, {}, {}]".format(
_num_samples(y1), _num_samples(y2),
_num_samples(sample_weight) * 2))
assert_raise_message(ValueError, error_message, metric, y1, y2,
sample_weight=np.hstack([sample_weight,
sample_weight]))
@pytest.mark.parametrize(
'name',
sorted(
set(ALL_METRICS).intersection(set(REGRESSION_METRICS)) -
METRICS_WITHOUT_SAMPLE_WEIGHT))
def test_regression_sample_weight_invariance(name):
n_samples = 50
random_state = check_random_state(0)
# regression
y_true = random_state.random_sample(size=(n_samples,))
y_pred = random_state.random_sample(size=(n_samples,))
metric = ALL_METRICS[name]
check_sample_weight_invariance(name, metric, y_true, y_pred)
@pytest.mark.parametrize(
'name',
sorted(
set(ALL_METRICS) - set(REGRESSION_METRICS) -
METRICS_WITHOUT_SAMPLE_WEIGHT - METRIC_UNDEFINED_BINARY))
def test_binary_sample_weight_invariance(name):
# binary
n_samples = 50
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples,))
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
check_sample_weight_invariance(name, metric, y_true, y_score)
else:
check_sample_weight_invariance(name, metric, y_true, y_pred)
@pytest.mark.parametrize(
'name',
sorted(
set(ALL_METRICS) - set(REGRESSION_METRICS) -
METRICS_WITHOUT_SAMPLE_WEIGHT - METRIC_UNDEFINED_BINARY_MULTICLASS))
def test_multiclass_sample_weight_invariance(name):
# multiclass
n_samples = 50
random_state = check_random_state(0)
y_true = random_state.randint(0, 5, size=(n_samples, ))
y_pred = random_state.randint(0, 5, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples, 5))
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
# softmax
temp = np.exp(-y_score)
y_score_norm = temp / temp.sum(axis=-1).reshape(-1, 1)
check_sample_weight_invariance(name, metric, y_true, y_score_norm)
else:
check_sample_weight_invariance(name, metric, y_true, y_pred)
@pytest.mark.parametrize(
'name',
sorted((MULTILABELS_METRICS | THRESHOLDED_MULTILABEL_METRICS
| MULTIOUTPUT_METRICS) - METRICS_WITHOUT_SAMPLE_WEIGHT))
def test_multilabel_sample_weight_invariance(name):
# multilabel indicator
random_state = check_random_state(0)
_, ya = make_multilabel_classification(n_features=1, n_classes=10,
random_state=0, n_samples=50,
allow_unlabeled=False)
_, yb = make_multilabel_classification(n_features=1, n_classes=10,
random_state=1, n_samples=50,
allow_unlabeled=False)
y_true = np.vstack([ya, yb])
y_pred = np.vstack([ya, ya])
y_score = random_state.randint(1, 4, size=y_true.shape)
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
check_sample_weight_invariance(name, metric, y_true, y_score)
else:
check_sample_weight_invariance(name, metric, y_true, y_pred)
@ignore_warnings
def test_no_averaging_labels():
# test labels argument when not using averaging
# in multi-class and multi-label cases
y_true_multilabel = np.array([[1, 1, 0, 0], [1, 1, 0, 0]])
y_pred_multilabel = np.array([[0, 0, 1, 1], [0, 1, 1, 0]])
y_true_multiclass = np.array([0, 1, 2])
y_pred_multiclass = np.array([0, 2, 3])
labels = np.array([3, 0, 1, 2])
_, inverse_labels = np.unique(labels, return_inverse=True)
for name in METRICS_WITH_AVERAGING:
for y_true, y_pred in [[y_true_multiclass, y_pred_multiclass],
[y_true_multilabel, y_pred_multilabel]]:
if name not in MULTILABELS_METRICS and y_pred.ndim > 1:
continue
metric = ALL_METRICS[name]
score_labels = metric(y_true, y_pred, labels=labels, average=None)
score = metric(y_true, y_pred, average=None)
assert_array_equal(score_labels, score[inverse_labels])
@pytest.mark.parametrize(
'name',
sorted(MULTILABELS_METRICS - {"unnormalized_multilabel_confusion_matrix"}))
def test_multilabel_label_permutations_invariance(name):
random_state = check_random_state(0)
n_samples, n_classes = 20, 4
y_true = random_state.randint(0, 2, size=(n_samples, n_classes))
y_score = random_state.randint(0, 2, size=(n_samples, n_classes))
metric = ALL_METRICS[name]
score = metric(y_true, y_score)
for perm in permutations(range(n_classes), n_classes):
y_score_perm = y_score[:, perm]
y_true_perm = y_true[:, perm]
current_score = metric(y_true_perm, y_score_perm)
assert_almost_equal(score, current_score)
@pytest.mark.parametrize(
'name', sorted(THRESHOLDED_MULTILABEL_METRICS | MULTIOUTPUT_METRICS))
def test_thresholded_multilabel_multioutput_permutations_invariance(name):
random_state = check_random_state(0)
n_samples, n_classes = 20, 4
y_true = random_state.randint(0, 2, size=(n_samples, n_classes))
y_score = random_state.normal(size=y_true.shape)
# Makes sure all samples have at least one label. This works around errors
# when running metrics where average="sample"
y_true[y_true.sum(1) == 4, 0] = 0
y_true[y_true.sum(1) == 0, 0] = 1
metric = ALL_METRICS[name]
score = metric(y_true, y_score)
for perm in permutations(range(n_classes), n_classes):
y_score_perm = y_score[:, perm]
y_true_perm = y_true[:, perm]
current_score = metric(y_true_perm, y_score_perm)
assert_almost_equal(score, current_score)
@pytest.mark.parametrize(
'name',
sorted(set(THRESHOLDED_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS))
def test_thresholded_metric_permutation_invariance(name):
n_samples, n_classes = 100, 3
random_state = check_random_state(0)
y_score = random_state.rand(n_samples, n_classes)
temp = np.exp(-y_score)
y_score = temp / temp.sum(axis=-1).reshape(-1, 1)
y_true = random_state.randint(0, n_classes, size=n_samples)
metric = ALL_METRICS[name]
score = metric(y_true, y_score)
for perm in permutations(range(n_classes), n_classes):
inverse_perm = np.zeros(n_classes, dtype=int)
inverse_perm[list(perm)] = np.arange(n_classes)
y_score_perm = y_score[:, inverse_perm]
y_true_perm = np.take(perm, y_true)
current_score = metric(y_true_perm, y_score_perm)
assert_almost_equal(score, current_score)
| [
"[email protected]"
]
| |
a5ea641c931a8768c01f47ceb5f09ed009af4204 | 633944f913050debf0764c2a29cf3e88f912670e | /v8/depot_tools/bootstrap-3.8.0b1.chromium.1_bin/python3/lib/python3.8/encodings/mbcs.py | 5f56e6e4b8e55e8e1447875202fe15cbdaa2a26d | [
"BSD-3-Clause",
"bzip2-1.0.6",
"SunPro",
"Apache-2.0"
]
| permissive | bopopescu/V8-lgtm | 0474c2ff39baf754f556ef57619ceae93e7320fd | da307e2f7abfca5fa0e860a809de6cd07fd1b72b | refs/heads/master | 2022-02-16T19:10:54.008520 | 2019-09-25T07:51:13 | 2019-09-25T07:51:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | ../../../../.cipd/pkgs/2/_current/lib/python3.8/encodings/mbcs.py | [
"[email protected]"
]
| |
e4d38da92d86aa517c776e552be806858ea7e31e | 948d84d2e3fc04e353a11384d8570308174242f5 | /17-Numpy/numpy-indexing.py | 11653d3652d5b8b607738f0216cf7655bc401292 | []
| no_license | omerfarukcelenk/PythonMaster | a0084a800b8a41cd2ad538a7ca3687c26dc679ec | 0db8f8b0ea2e1c2d810c542068cfcf1a3615f581 | refs/heads/main | 2023-04-16T17:42:05.501904 | 2021-04-26T21:19:27 | 2021-04-26T21:19:27 | 361,896,109 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | import numpy as np
numbers = np.array([0,5,10,15,20,25,50,75])
result = numbers[5]
result = numbers[-1]
result = numbers[0:3]
result = numbers[:3]
result = numbers[3:]
result = numbers[::]
result = numbers[::-1]
numbers2 = np.array([[0,5,10],[15,20,25],[50,75,85]])
result = numbers2[0]
result = numbers2[2]
result = numbers2[0,2]
result = numbers2[2,1]
result = numbers2[:,2]
result = numbers2[:,0]
result = numbers2[:,0:2]
result = numbers2[-1,:]
result = numbers2[:2,:2]
# print(result)
arr1 = np.arange(0,10)
# arr2 = arr1 # referans
arr2 = arr1.copy()
arr2[0] = 20
print(arr1)
print(arr2)
| [
"[email protected]"
]
| |
68d8399c5199cd6c1ac9a2c275edb439b8a5ab47 | c66955c6fc178955c2024e0318ec7a91a8386c2d | /programQuick/chapterEleven/mapIt.py | f66811ddbe725a952063e3f0d855d57f0bd18aa5 | []
| no_license | duheng18/python-study | a98642d6ee1b0043837c3e7c5b91bf1e28dfa588 | 13c0571ac5d1690bb9e615340482bdb2134ecf0e | refs/heads/master | 2022-11-30T17:36:57.060130 | 2019-11-18T07:31:40 | 2019-11-18T07:31:40 | 147,268,053 | 1 | 0 | null | 2022-11-22T03:36:51 | 2018-09-04T00:49:42 | Python | UTF-8 | Python | false | false | 353 | py | import webbrowser, sys, pyperclip
if len(sys.argv) > 1:
# Get address from command line.
address = ' '.join(sys.argv[1:])
else:
# Get address from clipboard.
pyperclip.copy('mapit 870 Valencia St, San Francisco, CA 94110')
address = pyperclip.paste()
print(address)
webbrowser.open('https://www.google.com/maps/place/'+address)
| [
"[email protected]"
]
| |
a41fbaec0c7870b206597745a26e289cb91943e7 | 4c9c2940ef3a07e2756fcceddf01acd384ebde01 | /Python/[5 kyu] emirps.py | 4550d94ea211e128c3446713211ba9db63e83b25 | [
"MIT"
]
| permissive | KonstantinosAng/CodeWars | 7d3501a605f7ffecb7f0b761b5ffe414e2f1983a | 157818ece648454e882c171a71b4c81245ab0214 | refs/heads/master | 2023-04-11T09:44:27.480064 | 2023-03-26T21:37:07 | 2023-03-26T21:37:07 | 245,296,762 | 6 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,160 | py | # see https://www.codewars.com/kata/55a29405bc7d2efaff00007c/train/python
from TestFunction import Test
def is_prime(num):
if num % 2 == 0: return False
for i in range(3, int(num**0.5+1), 2):
if (num % i) == 0:
return False
else:
return True
return False
def is_emrip(num):
s = int(''.join([s for s in reversed(str(num))]))
if s == num: return False
return is_prime(s)
def primes(n):
return [x for x in range(3, n, 2) if is_prime(x)]
def find_emirp(n):
generator = set(primes(10**6))
primes_ = [num for num in generator if num < n]
emrips = [num for num in primes_ if is_emrip(num)]
return [len(emrips), max(emrips) if emrips != [] else 0, sum(emrips)]
test = Test(None)
test.assert_equals(find_emirp(10), [0, 0, 0])
test.assert_equals(find_emirp(50), [4, 37, 98])
test.assert_equals(find_emirp(100), [8, 97, 418])
test.assert_equals(find_emirp(200), [15, 199, 1489])
test.assert_equals(find_emirp(500), [20, 389, 3232])
test.assert_equals(find_emirp(750), [25, 743, 6857])
test.assert_equals(find_emirp(915505), [9278, 915283, 3303565930])
test.assert_equals(find_emirp(530492), [6700, 399941, 1317845448])
| [
"[email protected]"
]
| |
5d9ddcd5643b7d3a09a7a2df7d052784a9a314f5 | 30302d215a012a079edf05a4e14e932888385def | /ddht/v5_1/alexandria/typing.py | e73bd07c5b075b4ed6d6eac7f99be2677b7a8cae | [
"MIT"
]
| permissive | NhlanhlaHasane/ddht | e54975a7fcf4e9bfa29771ee6b78c5e9a5991aff | 142911d134ff839f3f79ff8fe9e45d3fe5a58cd0 | refs/heads/master | 2023-05-31T05:09:06.371320 | 2021-06-03T22:31:22 | 2021-06-03T22:31:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | from typing import NewType
ContentID = NewType("ContentID", bytes)
ContentKey = NewType("ContentKey", bytes)
| [
"[email protected]"
]
| |
da327466a9c5966169ed0e73790c57d204126c2b | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_74/244.py | 1aad3f70f9fbf37a7323274a79680d37008e458c | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,237 | py | #!/usr/bin/env python
# encoding: utf-8
import os
def parse_sequence(fileDescriptor):
# Returns a sequence to complete
f = fileDescriptor
sequence = f.readline()
sequence = sequence.split(' ')
n = int(sequence[0])
sequence_a = []
sequence_b = []
sequence_r = []
have_color = False
last_color = 'O'
for i in xrange(1,len(sequence)):
if not have_color and (sequence[i] == 'O' or sequence[i] == 'B'):
have_color = True
last_color = sequence[i]
elif have_color and (sequence[i] != 'O' and sequence[i] != 'B'):
t = (int(sequence[i]), last_color)
if t[1] == 'O':
sequence_a.append(t)
else:
sequence_b.append(t)
sequence_r.append(t)
have_color = False
else:
print "Badformed Input"
exit()
return n, sequence_r, sequence_a, sequence_b
def min_time(n, sequence, seqO, seqB):
posO = 1
posB = 1
cTime = 0
for step in sequence:
if step[1] == 'O':
toComplete = timeToComplete(posO, step[0])
cTime += toComplete
posO = step[0]
seqO.pop(0)
if seqB:
# Is not empty
posB = newPosition(posB, seqB[0][0], toComplete)
else:
toComplete = timeToComplete(posB, step[0])
cTime += toComplete
posB = step[0]
seqB.pop(0)
if seqO:
# Is not empty
posO = newPosition(posO, seqO[0][0], toComplete)
return cTime
def timeToComplete(currPos, destPos):
return (max(currPos, destPos) - min(currPos, destPos) + 1)
def newPosition(currPos, destPos, time):
result = 0
advance = min(timeToComplete(currPos, destPos) -1, time)
if currPos < destPos:
result = currPos + advance
else:
result = currPos - advance
return result
def solve(fileName):
try:
f = open(fileName, "r")
except:
exit()
test_cases = int(f.readline())
for i in xrange(test_cases):
args = parse_sequence(f)
result = min_time(*args)
print "Case #%d: %d" %(i+1, result)
| [
"[email protected]"
]
| |
200a0c214acff2cccff7133ae68f381b0699de4b | d6265afea582ef9d0b282d0dbaf582ef2015a6f4 | /tests/satosa/metadata_creation/test_saml_metadata.py | 49cff97a4cadfb8c3cca7baeb70e08e9ac3e0e73 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | peppelinux/SATOSA | c94b0d2f7fa07b3b8a751f548b8166452e9e084f | 12d9f2532e334978e9a614946d77cc5b217b4383 | refs/heads/master | 2023-08-10T08:08:22.199322 | 2020-04-13T17:26:27 | 2020-04-13T17:26:27 | 180,346,947 | 3 | 0 | Apache-2.0 | 2021-08-24T08:23:33 | 2019-04-09T10:56:02 | Python | UTF-8 | Python | false | false | 18,113 | py | import copy
from base64 import urlsafe_b64encode
import pytest
from saml2.config import SPConfig, Config
from saml2.mdstore import InMemoryMetaData
from saml2.metadata import entity_descriptor
from saml2.sigver import security_context
from saml2.time_util import in_a_while
from satosa.metadata_creation.saml_metadata import create_entity_descriptors, create_signed_entities_descriptor, \
create_signed_entity_descriptor
from satosa.satosa_config import SATOSAConfig
from tests.conftest import BASE_URL
from tests.util import create_metadata_from_config_dict
class TestCreateEntityDescriptors:
def assert_single_sign_on_endpoints_for_saml_frontend(self, entity_descriptor, saml_frontend_config, backend_names):
metadata = InMemoryMetaData(None, str(entity_descriptor))
metadata.load()
sso = metadata.service(saml_frontend_config["config"]["idp_config"]["entityid"], "idpsso_descriptor",
"single_sign_on_service")
for backend_name in backend_names:
for binding, path in saml_frontend_config["config"]["endpoints"]["single_sign_on_service"].items():
sso_urls_for_binding = [endpoint["location"] for endpoint in sso[binding]]
expected_url = "{}/{}/{}".format(BASE_URL, backend_name, path)
assert expected_url in sso_urls_for_binding
def assert_single_sign_on_endpoints_for_saml_mirror_frontend(self, entity_descriptors, encoded_target_entity_id,
saml_mirror_frontend_config, backend_names):
expected_entity_id = saml_mirror_frontend_config["config"]["idp_config"][
"entityid"] + "/" + encoded_target_entity_id
metadata = InMemoryMetaData(None, None)
for ed in entity_descriptors:
metadata.parse(str(ed))
sso = metadata.service(expected_entity_id, "idpsso_descriptor", "single_sign_on_service")
for backend_name in backend_names:
for binding, path in saml_mirror_frontend_config["config"]["endpoints"]["single_sign_on_service"].items():
sso_urls_for_binding = [endpoint["location"] for endpoint in sso[binding]]
expected_url = "{}/{}/{}/{}".format(BASE_URL, backend_name, encoded_target_entity_id, path)
assert expected_url in sso_urls_for_binding
def assert_assertion_consumer_service_endpoints_for_saml_backend(self, entity_descriptor, saml_backend_config):
metadata = InMemoryMetaData(None, str(entity_descriptor))
metadata.load()
acs = metadata.service(saml_backend_config["config"]["sp_config"]["entityid"], "spsso_descriptor",
"assertion_consumer_service")
for url, binding in saml_backend_config["config"]["sp_config"]["service"]["sp"]["endpoints"][
"assertion_consumer_service"]:
assert acs[binding][0]["location"] == url
def test_saml_frontend_with_saml_backend(self, satosa_config_dict, saml_frontend_config, saml_backend_config):
satosa_config_dict["FRONTEND_MODULES"] = [saml_frontend_config]
satosa_config_dict["BACKEND_MODULES"] = [saml_backend_config]
satosa_config = SATOSAConfig(satosa_config_dict)
frontend_metadata, backend_metadata = create_entity_descriptors(satosa_config)
assert len(frontend_metadata) == 1
assert len(frontend_metadata[saml_frontend_config["name"]]) == 1
entity_descriptor = frontend_metadata[saml_frontend_config["name"]][0]
self.assert_single_sign_on_endpoints_for_saml_frontend(entity_descriptor, saml_frontend_config,
[saml_backend_config["name"]])
assert len(backend_metadata) == 1
self.assert_assertion_consumer_service_endpoints_for_saml_backend(
backend_metadata[saml_backend_config["name"]][0],
saml_backend_config)
def test_saml_frontend_with_oidc_backend(self, satosa_config_dict, saml_frontend_config, oidc_backend_config):
satosa_config_dict["FRONTEND_MODULES"] = [saml_frontend_config]
satosa_config_dict["BACKEND_MODULES"] = [oidc_backend_config]
satosa_config = SATOSAConfig(satosa_config_dict)
frontend_metadata, backend_metadata = create_entity_descriptors(satosa_config)
assert len(frontend_metadata) == 1
assert len(frontend_metadata[saml_frontend_config["name"]]) == 1
entity_descriptor = frontend_metadata[saml_frontend_config["name"]][0]
self.assert_single_sign_on_endpoints_for_saml_frontend(entity_descriptor, saml_frontend_config,
[oidc_backend_config["name"]])
# OIDC backend does not produce any SAML metadata
assert not backend_metadata
def test_saml_frontend_with_multiple_backends(self, satosa_config_dict, saml_frontend_config, saml_backend_config,
oidc_backend_config):
satosa_config_dict["FRONTEND_MODULES"] = [saml_frontend_config]
satosa_config_dict["BACKEND_MODULES"] = [saml_backend_config, oidc_backend_config]
satosa_config = SATOSAConfig(satosa_config_dict)
frontend_metadata, backend_metadata = create_entity_descriptors(satosa_config)
assert len(frontend_metadata) == 1
assert len(frontend_metadata[saml_frontend_config["name"]]) == 1
entity_descriptor = frontend_metadata[saml_frontend_config["name"]][0]
self.assert_single_sign_on_endpoints_for_saml_frontend(entity_descriptor, saml_frontend_config,
[saml_backend_config["name"],
oidc_backend_config["name"]])
# only the SAML backend produces SAML metadata
assert len(backend_metadata) == 1
self.assert_assertion_consumer_service_endpoints_for_saml_backend(
backend_metadata[saml_backend_config["name"]][0],
saml_backend_config)
def test_saml_mirror_frontend_with_saml_backend_with_multiple_target_providers(self, satosa_config_dict, idp_conf,
saml_mirror_frontend_config,
saml_backend_config):
idp_conf2 = copy.deepcopy(idp_conf)
idp_conf2["entityid"] = "https://idp2.example.com"
satosa_config_dict["FRONTEND_MODULES"] = [saml_mirror_frontend_config]
saml_backend_config["config"]["sp_config"]["metadata"] = {"inline": [create_metadata_from_config_dict(idp_conf),
create_metadata_from_config_dict(
idp_conf2)]}
satosa_config_dict["BACKEND_MODULES"] = [saml_backend_config]
satosa_config = SATOSAConfig(satosa_config_dict)
frontend_metadata, backend_metadata = create_entity_descriptors(satosa_config)
assert len(frontend_metadata) == 1
assert len(frontend_metadata[saml_mirror_frontend_config["name"]]) == 2
entity_descriptors = frontend_metadata[saml_mirror_frontend_config["name"]]
for target_entity_id in [idp_conf["entityid"], idp_conf2["entityid"]]:
encoded_target_entity_id = urlsafe_b64encode(target_entity_id.encode("utf-8")).decode("utf-8")
self.assert_single_sign_on_endpoints_for_saml_mirror_frontend(entity_descriptors, encoded_target_entity_id,
saml_mirror_frontend_config,
[saml_backend_config["name"]])
assert len(backend_metadata) == 1
self.assert_assertion_consumer_service_endpoints_for_saml_backend(
backend_metadata[saml_backend_config["name"]][0],
saml_backend_config)
def test_saml_mirror_frontend_with_oidc_backend(self, satosa_config_dict, saml_mirror_frontend_config,
oidc_backend_config):
satosa_config_dict["FRONTEND_MODULES"] = [saml_mirror_frontend_config]
satosa_config_dict["BACKEND_MODULES"] = [oidc_backend_config]
satosa_config = SATOSAConfig(satosa_config_dict)
frontend_metadata, backend_metadata = create_entity_descriptors(satosa_config)
assert len(frontend_metadata) == 1
assert len(frontend_metadata[saml_mirror_frontend_config["name"]]) == 1
entity_descriptors = frontend_metadata[saml_mirror_frontend_config["name"]]
target_entity_id = oidc_backend_config["config"]["provider_metadata"]["issuer"]
encoded_target_entity_id = urlsafe_b64encode(target_entity_id.encode("utf-8")).decode("utf-8")
self.assert_single_sign_on_endpoints_for_saml_mirror_frontend(entity_descriptors, encoded_target_entity_id,
saml_mirror_frontend_config,
[oidc_backend_config["name"]])
# OIDC backend does not produce any SAML metadata
assert not backend_metadata
def test_saml_mirror_frontend_with_multiple_backends(self, satosa_config_dict, idp_conf,
saml_mirror_frontend_config,
saml_backend_config, oidc_backend_config):
satosa_config_dict["FRONTEND_MODULES"] = [saml_mirror_frontend_config]
saml_backend_config["config"]["sp_config"]["metadata"] = {
"inline": [create_metadata_from_config_dict(idp_conf)]}
satosa_config_dict["BACKEND_MODULES"] = [saml_backend_config, oidc_backend_config]
satosa_config = SATOSAConfig(satosa_config_dict)
frontend_metadata, backend_metadata = create_entity_descriptors(satosa_config)
assert len(frontend_metadata) == 1
assert len(frontend_metadata[saml_mirror_frontend_config["name"]]) == 2
params = zip([idp_conf["entityid"], oidc_backend_config["config"]["provider_metadata"]["issuer"]],
[saml_backend_config["name"], oidc_backend_config["name"]])
entity_descriptors = frontend_metadata[saml_mirror_frontend_config["name"]]
for target_entity_id, backend_name in params:
encoded_target_entity_id = urlsafe_b64encode(target_entity_id.encode("utf-8")).decode("utf-8")
self.assert_single_sign_on_endpoints_for_saml_mirror_frontend(entity_descriptors, encoded_target_entity_id,
saml_mirror_frontend_config,
[backend_name])
# only the SAML backend produces SAML metadata
assert len(backend_metadata)
self.assert_assertion_consumer_service_endpoints_for_saml_backend(
backend_metadata[saml_backend_config["name"]][0],
saml_backend_config)
def test_two_saml_frontends(self, satosa_config_dict, saml_frontend_config, saml_mirror_frontend_config,
oidc_backend_config):
satosa_config_dict["FRONTEND_MODULES"] = [saml_frontend_config, saml_mirror_frontend_config]
satosa_config_dict["BACKEND_MODULES"] = [oidc_backend_config]
satosa_config = SATOSAConfig(satosa_config_dict)
frontend_metadata, backend_metadata = create_entity_descriptors(satosa_config)
assert len(frontend_metadata) == 2
saml_entities = frontend_metadata[saml_frontend_config["name"]]
assert len(saml_entities) == 1
entity_descriptor = saml_entities[0]
self.assert_single_sign_on_endpoints_for_saml_frontend(entity_descriptor, saml_frontend_config,
[oidc_backend_config["name"]])
mirrored_saml_entities = frontend_metadata[saml_mirror_frontend_config["name"]]
assert len(mirrored_saml_entities) == 1
target_entity_id = oidc_backend_config["config"]["provider_metadata"]["issuer"]
encoded_target_entity_id = urlsafe_b64encode(target_entity_id.encode("utf-8")).decode("utf-8")
self.assert_single_sign_on_endpoints_for_saml_mirror_frontend(mirrored_saml_entities, encoded_target_entity_id,
saml_mirror_frontend_config,
[oidc_backend_config["name"]])
# OIDC backend does not produce any SAML metadata
assert not backend_metadata
def test_create_mirrored_metadata_does_not_contain_target_contact_info(self, satosa_config_dict, idp_conf,
saml_mirror_frontend_config,
saml_backend_config):
satosa_config_dict["FRONTEND_MODULES"] = [saml_mirror_frontend_config]
saml_backend_config["config"]["sp_config"]["metadata"] = {
"inline": [create_metadata_from_config_dict(idp_conf)]}
satosa_config_dict["BACKEND_MODULES"] = [saml_backend_config]
satosa_config = SATOSAConfig(satosa_config_dict)
frontend_metadata, backend_metadata = create_entity_descriptors(satosa_config)
assert len(frontend_metadata) == 1
entity_descriptors = frontend_metadata[saml_mirror_frontend_config["name"]]
metadata = InMemoryMetaData(None, str(entity_descriptors[0]))
metadata.load()
entity_info = list(metadata.values())[0]
expected_entity_info = saml_mirror_frontend_config["config"]["idp_config"]
assert len(entity_info["contact_person"]) == len(expected_entity_info["contact_person"])
for i, contact in enumerate(expected_entity_info["contact_person"]):
assert entity_info["contact_person"][i]["contact_type"] == contact["contact_type"]
assert entity_info["contact_person"][i]["email_address"][0]["text"] == contact["email_address"][0]
assert entity_info["contact_person"][i]["given_name"]["text"] == contact["given_name"]
assert entity_info["contact_person"][i]["sur_name"]["text"] == contact["sur_name"]
expected_org_info = expected_entity_info["organization"]
assert entity_info["organization"]["organization_display_name"][0]["text"] == \
expected_org_info["display_name"][0][0]
assert entity_info["organization"]["organization_name"][0]["text"] == expected_org_info["name"][0][0]
assert entity_info["organization"]["organization_url"][0]["text"] == expected_org_info["url"][0][0]
class TestCreateSignedEntitiesDescriptor:
@pytest.fixture
def entity_desc(self, sp_conf):
return entity_descriptor(SPConfig().load(sp_conf, metadata_construction=True))
@pytest.fixture
def verification_security_context(self, cert_and_key):
conf = Config()
conf.cert_file = cert_and_key[0]
return security_context(conf)
@pytest.fixture
def signature_security_context(self, cert_and_key):
conf = Config()
conf.cert_file = cert_and_key[0]
conf.key_file = cert_and_key[1]
return security_context(conf)
def test_signed_metadata(self, entity_desc, signature_security_context, verification_security_context):
signed_metadata = create_signed_entities_descriptor([entity_desc, entity_desc], signature_security_context)
md = InMemoryMetaData(None, security=verification_security_context)
md.parse(signed_metadata)
assert md.signed() is True
assert md.parse_and_check_signature(signed_metadata) is True
assert not md.entities_descr.valid_until
def test_valid_for(self, entity_desc, signature_security_context):
valid_for = 4 # metadata valid for 4 hours
expected_validity = in_a_while(hours=valid_for)
signed_metadata = create_signed_entities_descriptor([entity_desc], signature_security_context,
valid_for=valid_for)
md = InMemoryMetaData(None)
md.parse(signed_metadata)
assert md.entities_descr.valid_until == expected_validity
class TestCreateSignedEntityDescriptor:
@pytest.fixture
def entity_desc(self, sp_conf):
return entity_descriptor(SPConfig().load(sp_conf, metadata_construction=True))
@pytest.fixture
def verification_security_context(self, cert_and_key):
conf = Config()
conf.cert_file = cert_and_key[0]
return security_context(conf)
@pytest.fixture
def signature_security_context(self, cert_and_key):
conf = Config()
conf.cert_file = cert_and_key[0]
conf.key_file = cert_and_key[1]
return security_context(conf)
def test_signed_metadata(self, entity_desc, signature_security_context, verification_security_context):
signed_metadata = create_signed_entity_descriptor(entity_desc, signature_security_context)
md = InMemoryMetaData(None, security=verification_security_context)
md.parse(signed_metadata)
assert md.signed() is True
assert md.parse_and_check_signature(signed_metadata) is True
assert not md.entity_descr.valid_until
def test_valid_for(self, entity_desc, signature_security_context):
valid_for = 4 # metadata valid for 4 hours
expected_validity = in_a_while(hours=valid_for)
signed_metadata = create_signed_entity_descriptor(entity_desc, signature_security_context,
valid_for=valid_for)
md = InMemoryMetaData(None)
md.parse(signed_metadata)
assert md.entity_descr.valid_until == expected_validity
| [
"[email protected]"
]
| |
58bbcb0b913a6f6d65e9cc3f765cf80b1e6d8d8d | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/fabric/rssnmppol.py | 4738ca78412d8a6382b312d0f46b6ee434811e5c | []
| no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 8,086 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RsSnmpPol(Mo):
"""
A source relation to the SNMP policy.
"""
meta = NamedSourceRelationMeta("cobra.model.fabric.RsSnmpPol", "cobra.model.snmp.Pol")
meta.targetNameProps["name"] = "tnSnmpPolName"
meta.cardinality = SourceRelationMeta.N_TO_ONE
meta.moClassName = "fabricRsSnmpPol"
meta.rnFormat = "rssnmpPol"
meta.category = MoCategory.RELATIONSHIP_TO_LOCAL
meta.label = "SNMP Policy"
meta.writeAccessMask = 0x8e700000001
meta.readAccessMask = 0x8e700000001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Inst")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Inst", "fault-"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.parentClasses.add("cobra.model.fabric.PodPGrp")
meta.superClasses.add("cobra.model.reln.Inst")
meta.superClasses.add("cobra.model.reln.To")
meta.superClasses.add("cobra.model.pol.NToRef")
meta.rnPrefixes = [
('rssnmpPol', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "forceResolve", "forceResolve", 107, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = True
prop.defaultValueStr = "yes"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("forceResolve", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 13999, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "rType", "rType", 106, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("local", "local", 3)
prop._addConstant("mo", "mo", 1)
prop._addConstant("service", "service", 2)
meta.props.add("rType", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "state", "state", 103, PropCategory.REGULAR)
prop.label = "State"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "unformed"
prop._addConstant("cardinality-violation", "cardinality-violation", 5)
prop._addConstant("formed", "formed", 1)
prop._addConstant("invalid-target", "invalid-target", 4)
prop._addConstant("missing-target", "missing-target", 2)
prop._addConstant("unformed", "unformed", 0)
meta.props.add("state", prop)
prop = PropMeta("str", "stateQual", "stateQual", 104, PropCategory.REGULAR)
prop.label = "State Qualifier"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("default-target", "default-target", 2)
prop._addConstant("mismatch-target", "mismatch-target", 1)
prop._addConstant("none", "none", 0)
meta.props.add("stateQual", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 11558, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 4571
prop.defaultValueStr = "snmpPol"
prop._addConstant("snmpPol", None, 4571)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tContextDn", "tContextDn", 4990, PropCategory.REGULAR)
prop.label = "Target-context"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tContextDn", prop)
prop = PropMeta("str", "tDn", "tDn", 100, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tDn", prop)
prop = PropMeta("str", "tRn", "tRn", 4989, PropCategory.REGULAR)
prop.label = "Target-rn"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("tRn", prop)
prop = PropMeta("str", "tType", "tType", 4988, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "name"
prop._addConstant("all", "all", 2)
prop._addConstant("mo", "mo", 1)
prop._addConstant("name", "name", 0)
meta.props.add("tType", prop)
prop = PropMeta("str", "tnSnmpPolName", "tnSnmpPolName", 11557, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("tnSnmpPolName", prop)
prop = PropMeta("str", "uid", "uid", 8, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("uid", prop)
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
]
| |
4a7f9b779862e39bed7fde83a238b96e4b69f2f1 | fe4c3905ec0e2d8fa5100454c49a863bda3d05ab | /Code/Mantid/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IndirectResolution.py | 3fe3e42c49c3011afbab8d24a9adf8e2cf6fcb2b | []
| no_license | mkoennecke/mantid | 11f16fe573056d70c119c4d6fb6984b7008cb8e6 | c0a8e5d97cde6cc28abb8c7b1b5c056986a81fec | refs/heads/master | 2021-01-18T11:51:28.997458 | 2015-02-13T10:48:51 | 2015-02-13T10:48:51 | 11,472,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,587 | py | from mantid.simpleapi import *
from mantid.api import *
from mantid.kernel import *
from mantid import config, logger
class IndirectResolution(DataProcessorAlgorithm):
def category(self):
return 'Workflow\\Inelastic;PythonAlgorithms;Inelastic'
def summary(self):
return 'Creates a resolution workspace'
def PyInit(self):
self.declareProperty(StringArrayProperty(name='InputFiles'),
doc='Comma seperated list if input files')
self.declareProperty(WorkspaceProperty('OutputWorkspace', '',
optional=PropertyMode.Optional,
direction=Direction.Output),
doc='Output resolution workspace (if left blank a name will be gernerated automatically)')
self.declareProperty(name='Instrument', defaultValue='',
validator=StringListValidator(['IRIS', 'OSIRIS', 'TOSCA']),
doc='Instrument used during run')
self.declareProperty(name='Analyser', defaultValue='',
validator=StringListValidator(['graphite', 'mica', 'fmica']),
doc='Analyser used during run')
self.declareProperty(name='Reflection', defaultValue='',
validator=StringListValidator(['002', '004', '006']),
doc='Reflection used during run')
self.declareProperty(IntArrayProperty(name='DetectorRange', values=[0, 1]),
doc='Range of detetcors to use in resolution calculation')
self.declareProperty(FloatArrayProperty(name='BackgroundRange', values=[0.0, 0.0]),
doc='Energy range to use as background')
self.declareProperty(name='RebinParam', defaultValue='', doc='Rebinning parameters (min,width,max)')
self.declareProperty(name='ScaleFactor', defaultValue=1.0, doc='Factor to scale resolution curve by')
self.declareProperty(name='Smooth', defaultValue=False, doc='Apply WienerSmooth to resolution')
self.declareProperty(name='Plot', defaultValue=False, doc='Plot resolution curve')
self.declareProperty(name='Save', defaultValue=False, doc='Save resolution workspace as a Nexus file')
def PyExec(self):
from IndirectCommon import StartTime, EndTime, getWSprefix
import inelastic_indirect_reducer
StartTime('IndirectResolution')
self._setup()
InelasticIndirectReduction(Instrument=self._instrument,
Analyser=self._analyser,
Reflection=self._reflection,
Grouping='All',
SumFiles=True,
InputFiles=self._input_files,
DetectorRange=self._detector_range,
OutputWorkspace='__icon_ws_group')
icon_ws = mtd['__icon_ws_group'].getItem(0).getName()
if self._out_ws == "":
self._out_ws = getWSprefix(icon_ws) + 'res'
if self._scale_factor != 1.0:
Scale(InputWorkspace=icon_ws, OutputWorkspace=icon_ws, Factor=self._scale_factor)
CalculateFlatBackground(InputWorkspace=icon_ws, OutputWorkspace=self._out_ws,
StartX=self._background[0], EndX=self._background[1],
Mode='Mean', OutputMode='Subtract Background')
Rebin(InputWorkspace=self._out_ws, OutputWorkspace=self._out_ws, Params=self._rebin_string)
if self._smooth:
WienerSmooth(InputWorkspace=self._out_ws, OutputWorkspace='__smooth_temp')
CopyLogs(InputWorkspace=self._out_ws, OutputWorkspace='__smooth_temp')
RenameWorkspace(InputWorkspace='__smooth_temp', OutputWorkspace=self._out_ws)
self._post_process()
self.setProperty('OutputWorkspace', self._out_ws)
EndTime('IndirectResolution')
def _setup(self):
"""
Gets algorithm properties.
"""
self._input_files = self.getProperty('InputFiles').value
self._out_ws = self.getPropertyValue('OutputWorkspace')
self._instrument = self.getProperty('Instrument').value
self._analyser = self.getProperty('Analyser').value
self._reflection = self.getProperty('Reflection').value
self._detector_range = self.getProperty('DetectorRange').value
self._background = self.getProperty('BackgroundRange').value
self._rebin_string = self.getProperty('RebinParam').value
self._scale_factor = self.getProperty('ScaleFactor').value
self._smooth = self.getProperty('Smooth').value
self._plot = self.getProperty('Plot').value
self._save = self.getProperty('Save').value
def _post_process(self):
"""
Handles adding logs, saving and plotting.
"""
use_scale_factor = self._scale_factor == 1.0
AddSampleLog(Workspace=self._out_ws, LogName='scale', LogType='String', LogText=str(use_scale_factor))
if use_scale_factor:
AddSampleLog(Workspace=self._out_ws, LogName='scale_factor', LogType='Number', LogText=str(self._scale_factor))
AddSampleLog(Workspace=self._out_ws, LogName='res_smoothing_applied', LogType='String', LogText=str(self._smooth))
AddSampleLog(Workspace=self._out_ws, LogName='back_start', LogType='Number', LogText=str(self._background[0]))
AddSampleLog(Workspace=self._out_ws, LogName='back_end', LogType='Number', LogText=str(self._background[1]))
rebin_params = self._rebin_string.split(',')
if len(rebin_params) == 3:
AddSampleLog(Workspace=self._out_ws, LogName='rebin_low', LogType='Number', LogText=rebin_params[0])
AddSampleLog(Workspace=self._out_ws, LogName='rebin_width', LogType='Number', LogText=rebin_params[1])
AddSampleLog(Workspace=self._out_ws, LogName='rebin_high', LogType='Number', LogText=rebin_params[2])
self.setProperty('OutputWorkspace', self._out_ws)
if self._save:
logger.information("Resolution file saved to default save directory.")
SaveNexusProcessed(InputWorkspace=self._out_ws, Filename=self._out_ws + '.nxs')
if self._plot:
from IndirectImport import import_mantidplot
mtd_plot = import_mantidplot()
mtd_plot.plotSpectrum(self._out_ws, 0)
AlgorithmFactory.subscribe(IndirectResolution)
| [
"[email protected]"
]
| |
435e90d2b0debc710dd2aca553b76e51ea39e688 | f4434c85e3814b6347f8f8099c081ed4af5678a5 | /sdk/synapse/azure-synapse-artifacts/azure/synapse/artifacts/operations/_big_data_pools_operations.py | 2b2366e730881713afa1086c0e769bf1a8d28656 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
]
| permissive | yunhaoling/azure-sdk-for-python | 5da12a174a37672ac6ed8e3c1f863cb77010a506 | c4eb0ca1aadb76ad892114230473034830116362 | refs/heads/master | 2022-06-11T01:17:39.636461 | 2020-12-08T17:42:08 | 2020-12-08T17:42:08 | 177,675,796 | 1 | 0 | MIT | 2020-03-31T20:35:17 | 2019-03-25T22:43:40 | Python | UTF-8 | Python | false | false | 6,664 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class BigDataPoolsOperations(object):
"""BigDataPoolsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.synapse.artifacts.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> "_models.BigDataPoolResourceInfoListResult"
"""List Big Data Pools.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BigDataPoolResourceInfoListResult, or the result of cls(response)
:rtype: ~azure.synapse.artifacts.models.BigDataPoolResourceInfoListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BigDataPoolResourceInfoListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorContract, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('BigDataPoolResourceInfoListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/bigDataPools'} # type: ignore
def get(
self,
big_data_pool_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.BigDataPoolResourceInfo"
"""Get Big Data Pool.
:param big_data_pool_name: The Big Data Pool name.
:type big_data_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BigDataPoolResourceInfo, or the result of cls(response)
:rtype: ~azure.synapse.artifacts.models.BigDataPoolResourceInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BigDataPoolResourceInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'bigDataPoolName': self._serialize.url("big_data_pool_name", big_data_pool_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorContract, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('BigDataPoolResourceInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/bigDataPools/{bigDataPoolName}'} # type: ignore
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.