content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
"""
Django settings for toDoList project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'r=cr&4z(#t-&vbyp_71-sy&edioe73mt48%)1ur^g1&@p$m69e'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps.todo',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'toDoList.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'builtins': ['django.templatetags.static']
},
},
]
WSGI_APPLICATION = 'toDoList.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'things_to_do',
'USER': 'root',
'PASSWORD': '123456',
'PORT': 3306,
'HOST': '127.0.0.1'
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
| 27.046875 | 92 | 0.659445 | [
"MIT"
] | ruoyunruyan/toDoList | toDoList/toDoList/settings.py | 3,462 | Python |
from .spark_cluster import SparkCluster
from staroid import Staroid
import requests
import os, stat, time
from pathlib import Path
class Ods:
def __init__(self, staroid=None, ske=None, cache_dir=None):
self.__ske = None
if staroid == None:
self._staroid = Staroid()
else:
self._staroid = staroid
if cache_dir == None:
self.__cache_dir = "{}/.ods".format(str(Path.home()))
else:
self.__cache_dir = cache_dir
# configure from env var
if "STAROID_SKE" in os.environ:
self.__ske = os.environ["STAROID_SKE"]
# configure from args
if ske != None:
self.__ske = ske
def create_or_get_cache_dir(self, module = ""):
"create (if not exists) or return cache dir path for module"
cache_dir = "{}/{}".format(self.__cache_dir, module)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
return cache_dir
def download_chisel_if_not_exists(self):
self._staroid.get_chisel_path()
def _start_instance_on_staroid(self, instance_name, commit_url):
cluster = self._staroid.cluster().get(self.__ske)
if cluster == None:
raise Exception("Can't get ske cluster")
ns_api = self._staroid.namespace(cluster)
ns = ns_api.create(instance_name, commit_url)
if ns == None:
raise Exception("Can't create instance")
# if instnace is stopped, restart
if ns.status() == "PAUSE":
ns_api.start(instance_name)
# wait for phase to become RUNNING
return self.__wait_for_ns_phase(ns_api, ns, "RUNNING", 600)
def _start_tunnel(self, instance_name, tunnels):
cluster = self._staroid.cluster().get(self.__ske)
if cluster == None:
raise Exception("Can't get ske cluster")
ns_api = self._staroid.namespace(cluster)
ns = ns_api.get(instance_name)
ns_api.shell_start(instance_name)
ns_api.start_tunnel(instance_name, tunnels)
def _stop_tunnel(self, instance_name):
cluster = self._staroid.cluster().get(self.__ske)
if cluster == None:
raise Exception("Can't get ske cluster")
ns_api = self._staroid.namespace(cluster)
ns_api.stop_tunnel(instance_name)
ns_api.shell_stop(instance_name)
def _stop_instance_on_staroid(self, instance_name):
cluster = self._staroid.cluster().get(self.__ske)
if cluster == None:
raise Exception("Can't get ske cluster")
ns_api = self._staroid.namespace(cluster)
ns = ns_api.stop(instance_name)
ns = self.__wait_for_ns_phase(ns_api, ns, "PAUSED", 600)
return ns
def _delete_instance_on_staroid(self, instance_name):
cluster = self._staroid.cluster().get(self.__ske)
if cluster == None:
raise Exception("Can't get ske cluster")
ns_api = self._staroid.namespace(cluster)
ns = ns_api.delete(instance_name)
ns = self.__wait_for_ns_phase(ns_api, ns, "REMOVED", 600)
def __wait_for_ns_phase(self, ns_api, ns, phase, timeout):
start_time = time.time()
sleep_time = 1
max_sleep_time = 7
while ns.phase() != phase:
if time.time() - start_time > timeout:
raise Exception("Timeout")
# sleep
time.sleep(sleep_time)
if sleep_time < max_sleep_time:
sleep_time += 1
# check
ns = ns_api.get_by_id(ns.id())
return ns
__singleton = {}
def init(ske=None, reinit=True):
if "instance" not in __singleton or reinit:
__singleton["instance"] = Ods(ske=ske)
return __singleton["instance"]
def spark(
name,
spark_conf=None,
spark_version="3.0.1",
spark_home=None,
worker_num=1,
worker_type="standard-4",
worker_isolation="dedicated",
delta=False,
aws=True):
init(reinit=False)
cluster = SparkCluster(
__singleton["instance"],
name,
spark_conf=spark_conf,
spark_version=spark_version,
spark_home=spark_home,
worker_num=worker_num,
worker_type=worker_type,
worker_isolation=worker_isolation,
delta=delta,
aws=aws)
return cluster
| 31.542857 | 68 | 0.611413 | [
"MIT"
] | open-datastudio/ods | ods/ods.py | 4,416 | Python |
# -*- coding:utf-8 -*-
from sqlalchemy.sql import text
def getAttributesTaxon(
connection, cd_ref, attrDesc, attrComment, attrMilieu, attrChoro
):
sql = """
SELECT *
FROM atlas.vm_cor_taxon_attribut
WHERE id_attribut IN (:thisattrDesc, :thisattrComment, :thisattrMilieu, :thisattrChoro)
AND cd_ref = :thiscdref
"""
req = connection.execute(
text(sql),
thiscdref=cd_ref,
thisattrDesc=attrDesc,
thisattrComment=attrComment,
thisattrMilieu=attrMilieu,
thisattrChoro=attrChoro
)
descTaxon = {
'description': None,
'commentaire': None,
'milieu': None,
'chorologie': None
}
for r in req:
if r.id_attribut == attrDesc:
descTaxon['description'] = r.valeur_attribut
elif r.id_attribut == attrComment:
descTaxon['commentaire'] = r.valeur_attribut
elif r.id_attribut == attrMilieu:
descTaxon['milieu'] = r.valeur_attribut.replace("&" , " | ")
elif r.id_attribut == attrChoro:
descTaxon['chorologie'] = r.valeur_attribut
return descTaxon
| 28.268293 | 95 | 0.610009 | [
"BSD-2-Clause"
] | Splendens/atlas_biodiv_pdl | main/modeles/repositories/vmCorTaxonAttribut.py | 1,159 | Python |
# -*- coding: utf-8 -*-
# Copyright 2020 The PsiZ Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Example that infers a shared embedding for three groups.
Fake data is generated from a ground truth model for three different
groups. In this example, these groups represent groups of agents with
varying levels of skill: novices, intermediates, and experts. Each group
has a different set of attention weights. An embedding model is
inferred from the simulated data and compared to the ground truth
model.
Example output:
Attention weights:
Novice | [3.38 3.32 0.49 0.43]
Intermediate | [2.06 2.18 2.04 2.18]
Expert | [0.55 0.50 3.40 3.32]
Model Comparison (R^2)
================================
True | Inferred
| Novice Interm Expert
--------+-----------------------
Novice | 0.95 0.68 0.16
Interm | 0.64 0.96 0.54
Expert | 0.16 0.61 0.96
"""
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" # noqa
import numpy as np
from scipy.stats import pearsonr
import tensorflow as tf
import psiz
# Uncomment the following line to force eager execution.
# tf.config.run_functions_eagerly(True)
# Uncomment and edit the following to control GPU visibility.
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def main():
"""Run the simulation that infers an embedding for three groups."""
# Settings.
n_stimuli = 30
n_dim = 4
n_group = 3
n_restart = 1
epochs = 1000
n_trial = 2000
batch_size = 128
model_true = ground_truth(n_stimuli, n_dim, n_group)
# Generate a random docket of trials to show each group.
generator = psiz.trials.RandomRank(
n_stimuli, n_reference=8, n_select=2
)
docket = generator.generate(n_trial)
# Create virtual agents for each group.
agent_novice = psiz.agents.RankAgent(model_true, groups=[0])
agent_interm = psiz.agents.RankAgent(model_true, groups=[1])
agent_expert = psiz.agents.RankAgent(model_true, groups=[2])
# Simulate similarity judgments for each group.
obs_novice = agent_novice.simulate(docket)
obs_interm = agent_interm.simulate(docket)
obs_expert = agent_expert.simulate(docket)
obs = psiz.trials.stack((obs_novice, obs_interm, obs_expert))
# Partition observations into 80% train, 10% validation and 10% test set.
obs_train, obs_val, obs_test = psiz.utils.standard_split(obs)
# Convert to TF dataset.
ds_obs_train = obs_train.as_dataset().shuffle(
buffer_size=obs_train.n_trial, reshuffle_each_iteration=True
).batch(batch_size, drop_remainder=False)
ds_obs_val = obs_val.as_dataset().batch(
batch_size, drop_remainder=False
)
ds_obs_test = obs_test.as_dataset().batch(
batch_size, drop_remainder=False
)
# Use early stopping.
early_stop = psiz.keras.callbacks.EarlyStoppingRe(
'val_cce', patience=15, mode='min', restore_best_weights=True
)
callbacks = [early_stop]
compile_kwargs = {
'loss': tf.keras.losses.CategoricalCrossentropy(),
'optimizer': tf.keras.optimizers.Adam(lr=.001),
'weighted_metrics': [
tf.keras.metrics.CategoricalCrossentropy(name='cce')
]
}
model_inferred = build_model(n_stimuli, n_dim, n_group)
# Infer embedding with restarts.
restarter = psiz.keras.Restarter(
model_inferred, compile_kwargs=compile_kwargs, monitor='val_loss',
n_restart=n_restart
)
restart_record = restarter.fit(
x=ds_obs_train, validation_data=ds_obs_val, epochs=epochs,
callbacks=callbacks, verbose=0
)
model_inferred = restarter.model
# Compare the inferred model with ground truth by comparing the
# similarity matrices implied by each model.
simmat_truth = (
model_similarity(model_true, groups=[0]),
model_similarity(model_true, groups=[1]),
model_similarity(model_true, groups=[2])
)
simmat_inferred = (
model_similarity(model_inferred, groups=[0]),
model_similarity(model_inferred, groups=[1]),
model_similarity(model_inferred, groups=[2])
)
r_squared = np.empty((n_group, n_group))
for i_truth in range(n_group):
for j_infer in range(n_group):
rho, _ = pearsonr(simmat_truth[i_truth], simmat_inferred[j_infer])
r_squared[i_truth, j_infer] = rho**2
# Display attention weights.
# Permute inferred dimensions to best match ground truth.
attention_weight = tf.stack(
[
model_inferred.kernel.subnets[0].distance.w,
model_inferred.kernel.subnets[1].distance.w,
model_inferred.kernel.subnets[2].distance.w
],
axis=0
).numpy()
idx_sorted = np.argsort(-attention_weight[0, :])
attention_weight = attention_weight[:, idx_sorted]
group_labels = ["Novice", "Intermediate", "Expert"]
print("\n Attention weights:")
for i_group in range(attention_weight.shape[0]):
print(" {0:>12} | {1}".format(
group_labels[i_group],
np.array2string(
attention_weight[i_group, :],
formatter={'float_kind': lambda x: "%.2f" % x})
)
)
# Display comparison results. A good inferred model will have a high
# R^2 value on the diagonal elements (max is 1) and relatively low R^2
# values on the off-diagonal elements.
print('\n Model Comparison (R^2)')
print(' ================================')
print(' True | Inferred')
print(' | Novice Interm Expert')
print(' --------+-----------------------')
print(' Novice | {0: >6.2f} {1: >6.2f} {2: >6.2f}'.format(
r_squared[0, 0], r_squared[0, 1], r_squared[0, 2]))
print(' Interm | {0: >6.2f} {1: >6.2f} {2: >6.2f}'.format(
r_squared[1, 0], r_squared[1, 1], r_squared[1, 2]))
print(' Expert | {0: >6.2f} {1: >6.2f} {2: >6.2f}'.format(
r_squared[2, 0], r_squared[2, 1], r_squared[2, 2]))
print('\n')
def ground_truth(n_stimuli, n_dim, n_group):
"""Return a ground truth embedding."""
stimuli = tf.keras.layers.Embedding(
n_stimuli+1, n_dim, mask_zero=True,
embeddings_initializer=tf.keras.initializers.RandomNormal(
stddev=.17
)
)
shared_similarity = psiz.keras.layers.ExponentialSimilarity(
trainable=False,
beta_initializer=tf.keras.initializers.Constant(10.),
tau_initializer=tf.keras.initializers.Constant(1.),
gamma_initializer=tf.keras.initializers.Constant(0.)
)
# Define group-specific kernels.
kernel_0 = psiz.keras.layers.DistanceBased(
distance=psiz.keras.layers.Minkowski(
rho_trainable=False,
rho_initializer=tf.keras.initializers.Constant(2.),
w_initializer=tf.keras.initializers.Constant(
[1.8, 1.8, .2, .2]
),
w_constraint=psiz.keras.constraints.NonNegNorm(
scale=n_dim, p=1.
),
),
similarity=shared_similarity
)
kernel_1 = psiz.keras.layers.DistanceBased(
distance=psiz.keras.layers.Minkowski(
rho_trainable=False,
rho_initializer=tf.keras.initializers.Constant(2.),
w_initializer=tf.keras.initializers.Constant(
[1., 1., 1., 1.]
),
w_constraint=psiz.keras.constraints.NonNegNorm(
scale=n_dim, p=1.
),
),
similarity=shared_similarity
)
kernel_2 = psiz.keras.layers.DistanceBased(
distance=psiz.keras.layers.Minkowski(
rho_trainable=False,
rho_initializer=tf.keras.initializers.Constant(2.),
w_initializer=tf.keras.initializers.Constant(
[.2, .2, 1.8, 1.8]
),
w_constraint=psiz.keras.constraints.NonNegNorm(
scale=n_dim, p=1.
),
),
similarity=shared_similarity
)
kernel_group = psiz.keras.layers.GateMulti(
subnets=[kernel_0, kernel_1, kernel_2], group_col=0
)
model = psiz.keras.models.Rank(
stimuli=stimuli, kernel=kernel_group, use_group_kernel=True
)
return model
def build_model(n_stimuli, n_dim, n_group):
"""Build model.
Arguments:
n_stimuli: Integer indicating the number of stimuli in the
embedding.
n_dim: Integer indicating the dimensionality of the embedding.
Returns:
model: A TensorFlow Keras model.
"""
stimuli = tf.keras.layers.Embedding(
n_stimuli+1, n_dim, mask_zero=True,
)
shared_similarity = psiz.keras.layers.ExponentialSimilarity(
trainable=False,
beta_initializer=tf.keras.initializers.Constant(10.),
tau_initializer=tf.keras.initializers.Constant(1.),
gamma_initializer=tf.keras.initializers.Constant(0.)
)
kernel_0 = build_kernel(shared_similarity, n_dim)
kernel_1 = build_kernel(shared_similarity, n_dim)
kernel_2 = build_kernel(shared_similarity, n_dim)
kernel_group = psiz.keras.layers.GateMulti(
subnets=[kernel_0, kernel_1, kernel_2], group_col=0
)
model = psiz.keras.models.Rank(
stimuli=stimuli, kernel=kernel_group, use_group_kernel=True
)
return model
def build_kernel(similarity, n_dim):
"""Build kernel for single group."""
mink = psiz.keras.layers.Minkowski(
rho_trainable=False,
rho_initializer=tf.keras.initializers.Constant(2.),
w_constraint=psiz.keras.constraints.NonNegNorm(
scale=n_dim, p=1.
),
)
kernel = psiz.keras.layers.DistanceBased(
distance=mink,
similarity=similarity
)
return kernel
def model_similarity(model, groups=[]):
ds_pairs, ds_info = psiz.utils.pairwise_index_dataset(
model.n_stimuli, mask_zero=True, groups=groups
)
simmat = psiz.utils.pairwise_similarity(
model.stimuli, model.kernel, ds_pairs, use_group_kernel=True
).numpy()
return simmat
if __name__ == "__main__":
main()
| 33.266871 | 78 | 0.636146 | [
"Apache-2.0"
] | rgerkin/psiz | examples/rank/mle_3g.py | 10,845 | Python |
#Reverse the input array
# input : {5,4,3,2,1}
# output : {1,2,3,4,5}
arr = list(map(int,input().split()))
for i in range(len(arr)):
arr.push(a[-1])
arr.remove(a[-1])
print(arr) | 18.7 | 36 | 0.582888 | [
"MIT"
] | montukv/Coding-problem-solutions | Python/test.py | 187 | Python |
import logging
import random
import string
import sys
import time
import unittest
import apiritif
def setup():
target = apiritif.http.target('http://localhost:8000/')
target.keep_alive(True)
target.auto_assert_ok(True)
target.use_cookies(True)
target.allow_redirects(True)
vars = {
'an': 'av',
}
apiritif.put_into_thread_store(vars, target)
class TestAPI(unittest.TestCase):
def setUp(self):
(self.vars, self.target) = apiritif.get_from_thread_store()
def test_1_an(self):
with apiritif.transaction(self.vars['an']):
response = self.target.get(self.vars['an'])
def test_2_set_variables(self):
self.vars['an'] = 'another_path1'
self.vars['bn'] = 'another_path2'
def test_3_an(self):
with apiritif.transaction(self.vars['an']):
response = self.target.get(self.vars['an'])
| 21.666667 | 67 | 0.648352 | [
"Apache-2.0"
] | adrianantonypillai/taurus | tests/resources/apiritif/test_vars.py | 910 | Python |
import pafy
url = "https://www.youtube.com/watch?v=OE7wUUpJw6I&list=PL2_aWCzGMAwLPEZrZIcNEq9ukGWPfLT4A"
video = pafy.new(url)
print(video.title)
stream=pafy.new(url).streams
best=video.getbest()
for i in stream:
print(i)
print(best.resolution,best.extension)
print(best.url)
best.download(quiet=False) | 22 | 91 | 0.775974 | [
"MIT"
] | bgoonz/bash-commands-walkthrough | steps/3-clean-up-fluf/DS-ALGO-OFFICIAL-master/CONTENT/DS-n-Algos/ALGO/__PYTHON/YT_DOWN.py | 308 | Python |
import pandas as pd
import time
from google import google
import sys
from A00_File_name import file_name
file_df = pd.read_csv(file_name, sep=';', encoding='latin-1')
print(file_df.head())
brand_names_list = file_df['Official Chain Name'].tolist()
'''
create a column with Official Brand WWWs
'''
# https://github.com/abenassi/Google-Search-API
WWW = []
for index in range(len(brand_names_list)):
search_results = google.search(str(brand_names_list[index]) +
' ' + str(file_df.iloc[index]['Category']) + " official website")
time.sleep(3)
result_nb = 0
try:
for i in range(len(search_results)):
if "wiki" in str(search_results[i].link) or 'facebook' in str(search_results[i].link).lower() \
or'stackoverflow' in str(search_results[i].link).lower():
print(str(index), 'wiki or facebook or stackoverflow')
pass
else:
print(search_results[i].link)
WWW.append("/".join(search_results[i].link.split("/", 3)[:3]))
print(index, i)
result_nb += 1
break
if result_nb == 0:
WWW.append('[]')
except OSError:
WWW.append('Permission denial ' + str(sys.exc_info()[0]))
except:
WWW.append(sys.exc_info()[0])
print(len(brand_names_list))
print(len(WWW))
'''
create a column with .com domain
'''
def create_www_brand_COM(brand_name):
newstr = brand_name.replace("'", "")
newstr = newstr.replace(" ", "")
newstr = newstr.replace(".", "")
newstr = newstr.replace("&", "")
newstr = newstr.replace("-", "")
newstr = newstr + '.com'
newstr = newstr.lower()
print(newstr)
return newstr
brands_wwws = []
for name in file_df['Official Chain Name']:
brands_wwws.append(create_www_brand_COM(name))
print(brands_wwws)
file_df['Official Web Page'] = WWW
file_df['.com Web Page'] = brands_wwws
print(file_df.head())
file_df.to_csv(file_name[:-4] + '_URLs_from_WB.csv', sep=';')
| 27.202532 | 108 | 0.585389 | [
"MIT"
] | ancago/search-download-favicons-from-web | A01_WEB_BROWSER_get_Official_WWWs_create_COM_domain.py | 2,149 | Python |
"""Representation of a WeMo Motion device."""
from .api.long_press import LongPressMixin
from .switch import Switch
class LightSwitch(Switch, LongPressMixin):
"""Representation of a WeMo Motion device."""
def __repr__(self):
"""Return a string representation of the device."""
return '<WeMo LightSwitch "{name}">'.format(name=self.name)
@property
def device_type(self):
"""Return what kind of WeMo this device is."""
return "LightSwitch"
| 28.823529 | 67 | 0.677551 | [
"MIT"
] | GarlicToum/pywemo | pywemo/ouimeaux_device/lightswitch.py | 490 | Python |
import requests
import json
class BuddyAPI():
'''
An API of buddymojo.com
:returns: An API
'''
def __init__(self):
self.payload = {'type': 'friend',
'action': 'finish'}
self.payloadf = {'userQuizId': 1,
'type': 'friend',
'stats': '1'}
self.url = 'https://cn.buddymojo.com/api/v1/quiz/18'
self.match = 'https://cn.buddymojo.com/match/'
def send_single_ans(self, ID, name: str):
'''
Send a single message to specific id with a specific name.
:params ID: User quiz id.
:type ID: int
:params name: Name you want on the message.
:type name: str
'''
self.data = {'userFullName': name,
'userQuizId': 1}
self.data.update(userQuizId=ID)
self.payloadf.update(userQuizId=ID)
try:
req = requests.request('GET', self.url, params=self.payloadf)
questions = json.loads(req.text).get('data').get('questions')
# d = text.get('data')
# questions = d.get('questions')
for j, q in enumerate(questions):
qval = q.get('choosenOption')
self.data.update(
{'questions['+str(j)+'][choosenOption]': qval})
reqi = requests.post(self.url, params=self.payload, data=self.data)
print('sending post to userQuizId: '+str(ID))
except:
print('User not found')
def send_range_ans(self, start, end, name: str):
'''
Send messages to a range of users id.
:params start: The start user id.
:type start: int
:params end: The end user id.
:type end: int
:params name: The name you want.
:type name: str
'''
for i in range(start, end):
data = {'userFullName': name,
'userQuizId': 1}
data.update(userQuizId=i)
self.payloadf.update(userQuizId=i)
try:
req = requests.request('GET', self.url, params=self.payloadf)
questions = json.loads(req.text).get('data').get('questions')
# d = text.get('data')
# questions = d.get('questions')
for j, q in enumerate(questions):
qval = q.get('choosenOption')
data.update({'questions['+str(j)+'][choosenOption]': qval})
reqi = requests.post(self.url, params=self.payload, data=data)
print('sending post to userQuizId: '+str(i))
except:
continue
# Still working out
def get_userQuizId(self, encUserQuizId):
'''
Returns a user id string of the encUserQuizId.
'''
try:
req = requests.request('GET', str(match+encUserQuizId))
data = json.loads(req.text)
print(data)
except:
return 'User not found'
def get_link(self, ID):
'''
Returns a url string of the id.
:params ID: The id to get the url from.
:type ID: int
:returns: A url string.
:rtype: String
'''
self.payloadf.update(userQuizId=ID)
try:
req = requests.request('GET', self.url, params=self.payloadf)
data = json.loads(req.text).get('data').get('encUserQuizId')
return self.match + data
except:
return 'User not found'
| 31.04386 | 79 | 0.514552 | [
"MIT"
] | jasonjustin/BuddymojoAPI | buddymojoAPI/BuddyMojoAPI.py | 3,539 | Python |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for factory.py."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.vision.beta.configs import backbones
from official.vision.beta.configs import backbones_3d
from official.vision.beta.configs import image_classification as classification_cfg
from official.vision.beta.configs import maskrcnn as maskrcnn_cfg
from official.vision.beta.configs import retinanet as retinanet_cfg
from official.vision.beta.configs import video_classification as video_classification_cfg
from official.vision.beta.modeling import factory
from official.vision.beta.modeling import factory_3d
class ClassificationModelBuilderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
('resnet', (224, 224), 5e-5),
('resnet', (224, 224), None),
('resnet', (None, None), 5e-5),
('resnet', (None, None), None),
)
def test_builder(self, backbone_type, input_size, weight_decay):
num_classes = 2
input_specs = tf.keras.layers.InputSpec(
shape=[None, input_size[0], input_size[1], 3])
model_config = classification_cfg.ImageClassificationModel(
num_classes=num_classes,
backbone=backbones.Backbone(type=backbone_type))
l2_regularizer = (
tf.keras.regularizers.l2(weight_decay) if weight_decay else None)
_ = factory.build_classification_model(
input_specs=input_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
class MaskRCNNBuilderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
('resnet', (640, 640)),
('resnet', (None, None)),
)
def test_builder(self, backbone_type, input_size):
num_classes = 2
input_specs = tf.keras.layers.InputSpec(
shape=[None, input_size[0], input_size[1], 3])
model_config = maskrcnn_cfg.MaskRCNN(
num_classes=num_classes,
backbone=backbones.Backbone(type=backbone_type))
l2_regularizer = tf.keras.regularizers.l2(5e-5)
_ = factory.build_maskrcnn(
input_specs=input_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
class RetinaNetBuilderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
('resnet', (640, 640), False),
('resnet', (None, None), True),
)
def test_builder(self, backbone_type, input_size, has_att_heads):
num_classes = 2
input_specs = tf.keras.layers.InputSpec(
shape=[None, input_size[0], input_size[1], 3])
if has_att_heads:
attribute_heads_config = [
retinanet_cfg.AttributeHead(name='att1'),
retinanet_cfg.AttributeHead(
name='att2', type='classification', size=2),
]
else:
attribute_heads_config = None
model_config = retinanet_cfg.RetinaNet(
num_classes=num_classes,
backbone=backbones.Backbone(type=backbone_type),
head=retinanet_cfg.RetinaNetHead(
attribute_heads=attribute_heads_config))
l2_regularizer = tf.keras.regularizers.l2(5e-5)
_ = factory.build_retinanet(
input_specs=input_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
if has_att_heads:
self.assertEqual(model_config.head.attribute_heads[0].as_dict(),
dict(name='att1', type='regression', size=1))
self.assertEqual(model_config.head.attribute_heads[1].as_dict(),
dict(name='att2', type='classification', size=2))
class VideoClassificationModelBuilderTest(parameterized.TestCase,
tf.test.TestCase):
@parameterized.parameters(
('resnet_3d', (8, 224, 224), 5e-5),
('resnet_3d', (None, None, None), 5e-5),
)
def test_builder(self, backbone_type, input_size, weight_decay):
input_specs = tf.keras.layers.InputSpec(
shape=[None, input_size[0], input_size[1], input_size[2], 3])
model_config = video_classification_cfg.VideoClassificationModel(
backbone=backbones_3d.Backbone3D(type=backbone_type))
l2_regularizer = (
tf.keras.regularizers.l2(weight_decay) if weight_decay else None)
_ = factory_3d.build_video_classification_model(
input_specs=input_specs,
model_config=model_config,
num_classes=2,
l2_regularizer=l2_regularizer)
if __name__ == '__main__':
tf.test.main()
| 37.736842 | 89 | 0.709305 | [
"Apache-2.0"
] | 1-punchMan/models | official/vision/beta/modeling/factory_test.py | 5,019 | Python |
from model import *
from data import *
from keras.preprocessing.image import ImageDataGenerator
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
data_gen_args = dict(rotation_range=0.2,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.05,
zoom_range=0.05,
horizontal_flip=True,
fill_mode='nearest')
myGene = trainGenerator(2,'data/membrane/train','image','label',data_gen_args,save_to_dir = None)
model = unet()
model_checkpoint = ModelCheckpoint('unet_membrane.hdf5', monitor='loss',verbose=1, save_best_only=True)
model.fit_generator(myGene,steps_per_epoch=300,epochs=1,callbacks=[model_checkpoint])
# test_dir = "data/membrane/test"
# test_datagen = ImageDataGenerator(rescale=1./255)
# test_generator = test_datagen.flow_from_directory(
# test_dir,
# target_size=(256, 256),
# color_mode="grayscale",
# batch_size=1)
# test_path = "data/membrane/test"
# image_datagen = ImageDataGenerator(**data_gen_args)
# image_generator = image_datagen.flow_from_directory(
# test_path,
# class_mode = None,
# color_mode = "grayscale",
# target_size = (256,256),
# batch_size = 1,
# save_to_dir = None,
# seed = 2)
# filenames = test_generator.filenames
# nb_samples = len(filenames)
# print(nb_samples)
# predict = model.predict_generator(test_generator,steps = nb_samples)
# testGene = testGenerator("data/membrane/test")
# filenames = testGene.filenames
# nb_samples = len(filenames)
# results = model.predict_generator(testGene,30,verbose=1)
# saveResult("data/membrane/test",results)
test_path = "data/membrane/test"
target_size = (256,256)
flag_multi_class = False
img = io.imread(os.path.join(test_path,"%d.png"%30),as_gray = True)
img = img / 255
img = trans.resize(img,target_size)
img = np.reshape(img,img.shape+(1,)) if (not flag_multi_class) else img
img = np.reshape(img,(1,)+img.shape)
results = model.predict(img)
print(results)
COLOR_DICT = np.array([Sky, Building, Pole, Road, Pavement,
Tree, SignSymbol, Fence, Car, Pedestrian, Bicyclist, Unlabelled])
saveResult("data/membrane/test",results)
#io.imsave(os.path.join(save_path,"%d_predict.png"%31),results)
# testGene = testGenerator("data/membrane/test")
# results = model.predict_generator(testGene,31)
# saveResult("data/membrane/test",results) | 33.283784 | 103 | 0.688185 | [
"MIT"
] | twinkle0331/unet | main.py | 2,463 | Python |
# © Copyright IBM Corporation 2020.
#
# LICENSE: Apache License 2.0 (Apache-2.0)
# http://www.apache.org/licenses/LICENSE-2.0
"""
...
"""
# init file
# import cython created shared object files
import sib.c_package # cython with cpp version
# import core functionality
from .sib_main import *
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| 17.818182 | 47 | 0.734694 | [
"Apache-2.0"
] | IBM/sib | src/sib/__init__.py | 393 | Python |
#!/usr/bin/python3
from sys import version_info
from setuptools import setup
if version_info < (3, 5, 3):
raise RuntimeError("aiopm requires Python 3.5.3+")
setup(
name='aiopm',
version='1.1',
description='Async Postmark client (asyncio)',
classifiers=[
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Operating System :: POSIX',
# 'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Development Status :: 4 - Beta',
# 'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
],
author='Vitold Sedyshev',
author_email='[email protected]',
maintainer=', '.join([
'Vitold Sedyshev <[email protected]>',
]),
maintainer_email='[email protected]',
url='https://github.com/vit1251/aiopm',
project_urls={
# 'CI: Travis': '...',
# 'Coverage: codecov': '...',
# 'GitHub: issues': '',
# 'GitHub: repo': '',
},
license='MIT',
packages=['aiopm'],
python_requires='>=3.5.3',
install_requires=['aiohttp'],
include_package_data=True,
)
| 30.086957 | 55 | 0.584538 | [
"MIT"
] | vit1251/aiopm | setup.py | 1,384 | Python |
from .basic import models as basic_models
models = {} | 18 | 41 | 0.759259 | [
"MIT"
] | R-N/sistem_gaji_vue_thrift | backend/models/manager.py | 54 | Python |
import halide as hl
import simple_stub
import complex_stub
def _realize_and_check(f, offset = 0):
b = hl.Buffer(hl.Float(32), [2, 2])
f.realize(b)
assert b[0, 0] == 3.5 + offset + 123
assert b[0, 1] == 4.5 + offset + 123
assert b[1, 0] == 4.5 + offset + 123
assert b[1, 1] == 5.5 + offset + 123
def test_simple(gen):
x, y = hl.Var(), hl.Var()
target = hl.get_jit_target_from_environment()
b_in = hl.Buffer(hl.UInt(8), [2, 2])
b_in.fill(123)
f_in = hl.Func("f")
f_in[x, y] = x + y
# ----------- Inputs by-position
f = gen(target, b_in, f_in, 3.5)
_realize_and_check(f)
# ----------- Inputs by-name
f = gen(target, buffer_input=b_in, func_input=f_in, float_arg=3.5)
_realize_and_check(f)
f = gen(target, float_arg=3.5, buffer_input=b_in, func_input=f_in)
_realize_and_check(f)
# ----------- Above set again, w/ GeneratorParam mixed in
k = 42
# (positional)
f = gen(target, b_in, f_in, 3.5, offset=k)
_realize_and_check(f, k)
# (keyword)
f = gen(target, offset=k, buffer_input=b_in, func_input=f_in, float_arg=3.5)
_realize_and_check(f, k)
f = gen(target, buffer_input=b_in, offset=k, func_input=f_in, float_arg=3.5)
_realize_and_check(f, k)
f = gen(target, buffer_input=b_in, func_input=f_in, offset=k, float_arg=3.5)
_realize_and_check(f, k)
f = gen(target, buffer_input=b_in, float_arg=3.5, func_input=f_in, offset=k)
_realize_and_check(f, k)
# ----------- Test various failure modes
try:
# Inputs w/ mixed by-position and by-name
f = gen(target, b_in, f_in, float_arg=3.5)
except RuntimeError as e:
assert 'Cannot use both positional and keyword arguments for inputs.' in str(e)
else:
assert False, 'Did not see expected exception!'
try:
# too many positional args
f = gen(target, b_in, f_in, 3.5, 4)
except RuntimeError as e:
assert 'Expected exactly 3 positional args for inputs, but saw 4.' in str(e)
else:
assert False, 'Did not see expected exception!'
try:
# too few positional args
f = gen(target, b_in, f_in)
except RuntimeError as e:
assert 'Expected exactly 3 positional args for inputs, but saw 2.' in str(e)
else:
assert False, 'Did not see expected exception!'
try:
# Inputs that can't be converted to what the receiver needs (positional)
f = gen(target, hl.f32(3.141592), "happy", k)
except RuntimeError as e:
assert 'Unable to cast Python instance' in str(e)
else:
assert False, 'Did not see expected exception!'
try:
# Inputs that can't be converted to what the receiver needs (named)
f = gen(target, b_in, f_in, float_arg="bogus")
except RuntimeError as e:
assert 'Unable to cast Python instance' in str(e)
else:
assert False, 'Did not see expected exception!'
try:
# Input specified by both pos and kwarg
f = gen(target, b_in, f_in, 3.5, float_arg=4.5)
except RuntimeError as e:
assert "Cannot use both positional and keyword arguments for inputs." in str(e)
else:
assert False, 'Did not see expected exception!'
try:
# Bad input name
f = gen(target, buffer_input=b_in, float_arg=3.5, offset=k, funk_input=f_in)
except RuntimeError as e:
assert "Expected exactly 3 keyword args for inputs, but saw 2." in str(e)
else:
assert False, 'Did not see expected exception!'
try:
# Bad gp name
f = gen(target, buffer_input=b_in, float_arg=3.5, offset=k, func_input=f_in, nonexistent_generator_param="wat")
except RuntimeError as e:
assert "has no GeneratorParam named: nonexistent_generator_param" in str(e)
else:
assert False, 'Did not see expected exception!'
def test_looplevel(gen):
x, y = hl.Var('x'), hl.Var('y')
target = hl.get_jit_target_from_environment()
buffer_input = hl.Buffer(hl.UInt(8), [4, 4])
buffer_input.fill(123)
func_input = hl.Func("func_input")
func_input[x, y] = x + y
simple_compute_at = hl.LoopLevel()
simple = gen(target, buffer_input, func_input, 3.5,
compute_level=simple_compute_at)
computed_output = hl.Func('computed_output')
computed_output[x, y] = simple[x, y] + 3
simple_compute_at.set(hl.LoopLevel(computed_output, x))
_realize_and_check(computed_output, 3)
def _make_constant_image():
constant_image = hl.Buffer(hl.UInt(8), [32, 32, 3], 'constant_image')
for x in range(32):
for y in range(32):
for c in range(3):
constant_image[x, y, c] = x + y + c
return constant_image
def test_complex(gen):
constant_image = _make_constant_image()
input = hl.ImageParam(hl.UInt(8), 3, 'input')
input.set(constant_image)
x, y, c = hl.Var(), hl.Var(), hl.Var()
target = hl.get_jit_target_from_environment()
float_arg = 1.25
int_arg = 33
func_input = hl.Func("func_input")
func_input[x, y, c] = hl.u16(x + y + c)
r = gen(target,
typed_buffer_input=constant_image,
untyped_buffer_input=constant_image,
simple_input=input,
array_input=[ input, input ],
float_arg=float_arg,
int_arg=[ int_arg, int_arg ],
untyped_buffer_output_type="uint8",
extra_func_input=func_input,
vectorize=True)
# return value is a tuple; unpack separately to avoid
# making the callsite above unreadable
(simple_output,
tuple_output,
array_output,
typed_buffer_output,
untyped_buffer_output,
static_compiled_buffer_output,
scalar_output,
extra_func_output) = r
b = simple_output.realize([32, 32, 3], target)
assert b.type() == hl.Float(32)
for x in range(32):
for y in range(32):
for c in range(3):
expected = constant_image[x, y, c]
actual = b[x, y, c]
assert expected == actual, "Expected %s Actual %s" % (expected, actual)
b = tuple_output.realize([32, 32, 3], target)
assert b[0].type() == hl.Float(32)
assert b[1].type() == hl.Float(32)
assert len(b) == 2
for x in range(32):
for y in range(32):
for c in range(3):
expected1 = constant_image[x, y, c] * float_arg
expected2 = expected1 + int_arg
actual1, actual2 = b[0][x, y, c], b[1][x, y, c]
assert expected1 == actual1, "Expected1 %s Actual1 %s" % (expected1, actual1)
assert expected2 == actual2, "Expected2 %s Actual1 %s" % (expected2, actual2)
assert len(array_output) == 2
for a in array_output:
b = a.realize([32, 32], target)
assert b.type() == hl.Int(16)
for x in range(32):
for y in range(32):
expected = constant_image[x, y, 0] + int_arg
actual = b[x, y]
assert expected == actual, "Expected %s Actual %s" % (expected, actual)
# TODO: Output<Buffer<>> has additional behaviors useful when a Stub
# is used within another Generator; this isn't yet implemented since there
# isn't yet Python bindings for Generator authoring. This section
# of the test may need revision at that point.
b = typed_buffer_output.realize([32, 32, 3], target)
assert b.type() == hl.Float(32)
for x in range(32):
for y in range(32):
for c in range(3):
expected = constant_image[x, y, c]
actual = b[x, y, c]
assert expected == actual, "Expected %s Actual %s" % (expected, actual)
b = untyped_buffer_output.realize([32, 32, 3], target)
assert b.type() == hl.UInt(8)
for x in range(32):
for y in range(32):
for c in range(3):
expected = constant_image[x, y, c]
actual = b[x, y, c]
assert expected == actual, "Expected %s Actual %s" % (expected, actual)
b = static_compiled_buffer_output.realize([4, 4, 1], target)
assert b.type() == hl.UInt(8)
for x in range(4):
for y in range(4):
for c in range(1):
expected = constant_image[x, y, c] + 42
actual = b[x, y, c]
assert expected == actual, "Expected %s Actual %s" % (expected, actual)
b = scalar_output.realize([], target)
assert b.type() == hl.Float(32)
assert b[()] == 34.25
b = extra_func_output.realize([32, 32], target)
assert b.type() == hl.Float(64)
for x in range(32):
for y in range(32):
expected = x + y + 1
actual = b[x, y]
assert expected == actual, "Expected %s Actual %s" % (expected, actual)
if __name__ == "__main__":
test_simple(simple_stub.generate)
test_looplevel(simple_stub.generate)
test_complex(complex_stub.generate)
| 33.656716 | 119 | 0.600665 | [
"Apache-2.0"
] | derek-gerstmann/Halide | python_bindings/correctness/pystub.py | 9,020 | Python |
from logger import elog, mlog, alog
from db_engine import mysql_connect, mysql_reconnect, get_qs, \
estr, valid_pass, SQLParamError, sql_selectall, \
sql_insertinto, do_param_error, sq, sql_update
import random, time, json, os, os.path, sys, math, types
from utils import *
from math import *
from auth import do_auth, gen_token, toktypes, rot_userid, unrot_userid
import datetime
from config import *
from db_engine import *
import db_engine
import base64
import os, os.path, sys, stat
from fileapi import file_restricted_fields, FOLDER_MIME, EMPTY_TAG, ROOT_PARENT_ID
import urllib
#stupid unicode!
def jsondumps(obj):
if type(obj) in [int, float, long]:
return str(obj);
elif type(obj) in [list, tuple, set]:
s = "["
for i, item in enumerate(obj):
if i > 0: s += ", "
s += jsondumps(item)
s += "]"
return s
elif type(obj) == dict:
s = "{"
for i, k in enumerate(obj):
if i > 0: s += ", "
s += '"' + k + '" : '
s += jsondumps(obj[k])
s += "}"
return s;
else: #XXX type(obj) == str:
return '"' + str(obj) + '"'
#else:
# raise RuntimeError("unknown object " + str(type(obj)));
WIN32 = sys.platform.startswith("win")
if not WIN32: #unix functions; need to test these!
def unixnorm(path):
#strip out '.', so ./path works
while path[0] == ".":
path = path[1:]
return path
def listdir(path):
path = unixnorm(path)
return os.listdir(path)
def exists(path):
path = unixnorm(path)
return os.path.exists(path)
def dostat(path):
path = unixnorm(path)
return os.stat(path)
def local_to_real(path):
path = unixnorm(path)
if path == "/.settings.bin":
print("APPDATA", get_appdata()) #os.environ["APPDATA"])
dir = get_appdata() + os.path.sep + ".fairmotion" #os.path.join(get_appdata(), "/.fairmotion")
if not os.path.exists(dir):
print("make dirs", dir)
os.makedirs(dir)
path = os.path.join(dir, ".settings.bin")
print("DIRPATH", dir)
print("PATH", path)
if not os.path.exists(path):
templ = config.server_root + "/default_settings_bin"
f = open(templ, "rb")
buf = f.read()
f.close()
f = open(path, "wb")
f.write(buf)
f.close()
return os.path.abspath(os.path.normpath(path))
if not serv_all_local:
path = files_root + os.path.sep + path
return os.path.abspath(os.path.normpath(path))
def real_to_local(path):
path = unixnorm(path)
if os.path.abspath(os.path.normpath(path)) == unixnorm(local_to_real("/.settings.bin")):
return "/.settings.bin"
path = os.path.abspath(os.path.normpath(path))
froot = os.path.abspath(os.path.normpath(files_root))
path = path[len(froot):].replace(os.path.sep, "/")
return path
if WIN32:
import win_util
local_to_real = win_util.local_to_real
real_to_local = win_util.real_to_local
listdir = win_util.listdir
dostat = win_util.dostat
exists = win_util.exists
get_appdata = win_util.get_appdata
else:
def get_appdata():
return os.environ["HOME"]
FOLDER_MIME = "application/vnd.google-apps.folder"
import fileapi_db
ROOT_PARENT_ID = fileapi_db.ROOT_PARENT_ID
def is_folder(file):
return file.mimeType == FOLDER_MIME or file.id == ROOT_PARENT_ID
def is_valid_file(file):
return file["realpath"] != EMPTY_TAG
try:
a = FileNotFoundError
except:
FileNotFoundError = OSError
class FileClass (dict):
def __init__(self, path, userid):
print(" FCLS PATH", path, userid)
path = os.path.normpath(path).replace(os.path.sep, "/")
diskpath = local_to_real(path)
froot = local_to_real("/")
if not os.path.exists(diskpath):
self.bad = True
return
else:
try:
nstat = dostat(diskpath)
except:
self.bad = True
return
rootid = fileapi_db.fileid_to_publicid(userid, ROOT_PARENT_ID)
if stat.S_ISDIR(nstat.st_mode):
mime = FOLDER_MIME
self.is_dir = True
else:
mime = "application/x-javascript"
self.is_dir = False
self.name = ""
self.bad = False
if not serv_all_local and not diskpath.startswith(froot):
elog("Error! " + diskpath)
print("Error!", diskpath, froot)
self.bad = True
return
self.diskpath = diskpath
self.mimeType = mime
self.id = fileid_to_publicid(path, userid)
#print("Final relative path:", path, len(froot));
oname = path
while len(oname) > 0 and oname[0] in ["\\", "/"]:
oname = oname[1:]
name = oname[oname.rfind("/")+1:].strip()
name = name.replace("/", "")
if name == "":
name = oname
self.name = name
#print("Final name:", self.name)
parentpath = path[:path.rfind("/")].strip()
if "/" not in path:
parentpath = "/"
#print("PARENT PATH", "'"+parentpath+"'", fileid_to_publicid(parentpath, userid))
if name == "/" or parentpath == "/" or parentpath == "":
self.parentid = rootid
else:
self.parentid = fileid_to_publicid(parentpath, userid)
def File(path, userid):
f = FileClass(path, userid)
if f.bad: return None
return f
#for local serving, encode file path as the id
def fileid_to_publicid(path, userid):
if ".." in path: return "-1"
path = bytes(path, "latin-1")
path = str(base64.b64encode(path), "latin-1")
return path
def publicid_to_fileid(publicid):
if len(publicid) == 17:
userid, fileid = fileapi_db.publicid_to_fileid(publicid)
if fileid == ROOT_PARENT_ID:
return "/"
if publicid == "/":
return publicid
#print(":::", publicid)
path = base64.b64decode(bytes(publicid, "latin-1"));
path = str(path, "latin-1")
if ".." in path: return "-1"
return path
class FileAPI_DirList:
basepath = "/api/files/dir/list"
def __init__(self):
pass
def do_GET(self, serv):
qs = get_qs(serv.path)
if "accessToken" not in qs or ("path" not in qs and "id" not in qs):
serv.send_error(400)
return
tok = qs["accessToken"][0]
userid = do_auth(tok)
if userid == None:
elog("Invalid access in file api")
serv.send_error(401)
return
if "id" in qs:
path = publicid_to_fileid(qs["id"][0])
else:
path = qs["path"][0]
path = urllib.unquote(path).strip();
print("PATHPATH", path);
dir = File(path, userid)
if ".." in path:
serv.send_error(401)
return
if not serv_all_local:
prefix = files_root#+rot_userid(userid)
try:
os.makedirs(prefix)
except FileExistsError:
pass
dirpath = local_to_real(path)
files = []
for f in listdir(dirpath):
path2 = path + os.path.sep + f
file = File(path2, userid)
f = {}
if file == None:
continue
print("error!", dirpath)
#if file == None: continue
f["name"] = file.name
f["id"] = file.id
f["mimeType"] = file.mimeType
f["is_dir"] = 1 if file.is_dir else 0
f["parentid"] = file.parentid
files.append(f)
body = jsondumps({"items": files})
body = bstr(body)
serv.gen_headers("GET", len(body), json_mimetype)
serv.wfile.write(body)
class FileAPI_MakeFolder:
basepath = "/api/files/dir/new"
def __init__(self):
pass
def do_GET(self, serv):
qs = get_qs(serv.path)
if "name" not in qs or "accessToken" not in qs or ("path" not in qs and "id" not in qs):
serv.send_error(400)
return
if ".." in qs["name"][0]:
serv.send_error(403)
return
tok = qs["accessToken"][0]
userid = do_auth(tok)
if userid == None:
serv.send_error(401)
return
if "id" in qs:
folderid = publicid_to_fileid(qs["id"][0])
else:
folderid = qs["path"][0]
if folderid == None:
serv.send_error(400)
return
path = local_to_real(folderid + "/" + qs["name"][0])
print("PATH", path, exists(path))
#see if folder (or a file) already exists
if exists(path):
serv.send_error(400)
return
os.makedirs(path)
body = json.dumps({"success": True})
body = bstr(body)
serv.gen_headers("GET", len(body), json_mimetype)
serv.wfile.write(body)
class FileAPI_GetMeta:
basepath = "/api/files/get/meta"
def __init__(self):
pass
def do_POST(self, serv):
buf = serv.rfile.read()
try:
obj = json.loads(buf)
except:
self.send_error(401)
return
def do_GET(self, serv):
qs = get_qs(serv.path)
if "accessToken" not in qs or ("path" not in qs and "id" not in qs):
serv.send_error(400)
return
tok = qs["accessToken"][0]
userid = do_auth(tok)
if userid == None:
serv.send_error(401)
return
if "path" in qs:
fileid = qs["path"][0]
fileid = urllib.unquote(fileid);
else:
fileid = qs["id"][0]
path = local_to_real(fileid);
if not os.path.exists(path):
serv.send_error(404);
return
st = os.stat(path)
fname = fileid.replace("\\", "/").strip()
dir = ""
if "/" in fname and fname[-1] != "/":
dir = fname[:fname.rfind("/")].strip()
fname = fname[len(dir):]
while fname[0] == "/":
fname = fname[1:]
#ROOT_PARENT_ID
mime = "unknown"
if stat.S_ISDIR(st.st_mode):
mime = FOLDER_MIME
else:
pass #deal with later
#stupid quoting
#id = urllib.quote(fileid, "").strip()
id = fileid_to_publicid(fileid, userid).strip()
#if id[0] == "'" or id[0] == "\"" and id[0] == id[-1]:
f = {
'name' : fname,
'id' : id,
'parentid' : dir,
'mimeType' : mime,
'modified' : st.st_mtime,
'is_dir' : stat.S_ISDIR(st.st_mode)
};
f2 = {}
for k in f:
if k in file_restricted_fields: continue
f2[k] = f[k]
body = json.dumps(f2)
body = bstr(body)
serv.gen_headers("GET", len(body), json_mimetype)
serv.wfile.write(body)
class UploadStatus:
def __init__(self, uploadToken=None):
self.invalid = False
if uploadToken != None:
self.from_sql(uploadToken)
def from_sql(self, utoken):
cur, con = mysql_connect()
try:
qstr = sql_selectall("uploadtokens", ["tokenid"], [utoken], [sq.token])
except SQLParamError:
do_param_error("UploadToken.from_sql")
raise SQLParamError()
#qstr = "SELECT * FROM uploadtokens WHERE tokenid="+estr(utoken)
cur.execute(qstr)
ret = cur.fetchone()
if ret == None:
self.invalid = True
return
self.token = ret["tokenid"]
self.path = ret["path"]
self.time = ret["time"]
self.name = ret["name"]
self.fileid = ret["fileid"]
self.realpath = ret["realpath"]
self.userid = ret["userid"]
self.permissions = ret["permissions"]
self.expiration = ret["expiration"]
self.size = ret["size"]
self.cur = ret["cur"]
def toJSON(self):
obj = {}
for k in this.__dict__:
val = getattr(self, k)
if type(val) in [types.MethodType, types.FunctionType]: continue
obj[k] = getattr(self, k)
return obj
def commit(self):
cur, con = mysql_connect()
dnow = datetime.datetime.now()
dend = datetime.datetime.now()+datetime.timedelta(days=1)
types = [sq.token, sq.path, sq.datetime, sq.int ]
cols = ["tokenid", "path", "time", "fileid" ]
values = [self.token, self.path, dnow, 32423423] #we don't use database fileids in local mode
types += [sq.str(100), sq.path, sq.int, sq.int ]
cols += ["name", "realpath", "userid", "permissions"]
values += [self.name, self.realpath, self.userid, 0 ]
types += [sq.datetime, sq.int, sq.int ]
cols += ["expiration", "size", "cur" ]
values += [dend, self.size, self.cur]
try:
qstr = sql_insertinto("uploadtokens", cols, values, types)
except SQLParamError:
#do_param_error(json.dumps(self));
raise SQLParamError("upload token error; see error.log for details")
print("QSTR", qstr)
cur.execute(qstr)
con.commit()
def create(self, token, path, userid, fileid, parentid=ROOT_PARENT_ID):
self.token = token
self.path = path
cs = os.path.split(path)
self.dir = cs[0];
self.time = time.time();
self.size = -1
self.cur = 0
self.file = None
self.file_init = False
self.fileid = fileid
self.userid = userid;
self.parentid = parentid; #note: not cached in database
if len(cs) == 1 or cs[1] == "" or cs[1] == None:
self.name = cs[0]
else:
self.name = cs[1]
self.gen_realpath()
def gen_realpath(self):
f = File(self.fileid, self.userid)
fpath = os.path.split(f.diskpath)[0]
if not os.path.exists(fpath):
os.makedirs(fpath)
self.realpath = f.diskpath
return f.diskpath
class FileAPI_UploadStart:
basepath = "/api/files/upload/start"
def __init__(self):
pass
def do_GET(self, serv):
elog("fileapi access" + serv.path)
qs = get_qs(serv.path)
if "accessToken" not in qs or ("path" not in qs and "id" not in qs):
serv.send_error(400)
return
tok = qs["accessToken"][0]
userid = do_auth(tok)
if userid == None:
elog("Need user id")
print("Bad auth")
serv.send_error(401)
return
path = qs["path"][0]
if "id" in qs:
fileid = publicid_to_fileid(qs["id"][0])
else:
fileid = urllib.unquote(path)
meta = File(fileid, userid)
if meta != None:
print("DISKPATH", meta.diskpath)
if meta == None or not os.path.exists(meta.diskpath):
elog("creating new file")
cs = os.path.split(path)
folderid = cs[0]
f = File(folderid, userid)
if not os.path.exists(f.diskpath):
elog("invalid folder " + f.diskpath)
print("invalid folder " + f.diskpath)
serv.send_error(401);
return
if len(cs) == 1 or cs[1] == "":
fname = cs[0]
else:
fname = cs[1]
mime = "application/octet-stream"
#create empty file
f = open(f.diskpath+"/"+fname, "w")
f.close()
meta = File(fileid, userid)
if meta == None:
elog("Invalid file id")
serv.send_error(400)
return
print("\n\nFILE", meta, "\n\n")
if is_folder(meta):
elog("target file is a folder" + meta["name"])
serv.send_error(401)
return
utoken = gen_token("U", userid);
ustatus = UploadStatus()
#ignore fileid/parentid in upload status token
ustatus.create(utoken, path, userid, fileid, -1)
try:
ustatus.commit()
except:
import traceback
elog("USTATUS.COMMIT failed!")
traceback.print_exc()
f = open(ustatus.realpath, "w");
f.close();
realpath = ustatus.realpath
body = json.dumps({"uploadToken" : utoken});
body = bstr(body)
print("\nupload start result:", body, "\n\n\n")
serv.gen_headers("GET", len(body), json_mimetype)
serv.wfile.write(body)
cur_uploads = {}
class FileAPI_UploadChunk:
basepath = "/api/files/upload"
def __init__(self):
pass
def do_PUT(self, serv):
alog("fileapi access" + serv.path)
qs = get_qs(serv.path)
if "accessToken" not in qs or "uploadToken" not in qs:
elog("fileapi: invalid tokens")
serv.send_error(400)
return
tok = qs["accessToken"][0]
utoken = qs["uploadToken"][0]
userid = do_auth(tok)
if userid == None:
elog("invalid authorization")
serv.send_error(401)
return
status = UploadStatus(utoken)
if status.invalid:
elog("invalid upload token ", utoken)
serv.send_error(401)
return
if "Content-Range" not in serv.headers:
elog("missing header " + json.dumps(serv.headers))
serv.send_error(400)
return
r = serv.headers["Content-Range"].strip()
if not r.startswith("bytes"):
elog("malformed request 1")
serv.send_error(400)
return
r = r[len("bytes"):].strip()
r = r.split("/")
if r == None or len(r) != 2:
elog("malformed request 2")
serv.send_error(400)
return
try:
max_size = int(r[1])
except ValueError:
elog("malformed request 3")
serv.send_error(400)
return
r = r[0].split("-")
if r == None or len(r) != 2:
elog("malformed request 4")
serv.send_error(400)
return
try:
r = [int(r[0]), int(r[1])]
except ValueError:
elog("malformed request 4")
serv.send_error(400)
return
if r[0] < 0 or r[1] < 0 or r[0] >= max_size or r[1] >= max_size \
or r[0] > r[1]:
elog("malformed request 5")
serv.send_error(400)
return
if status.size == -1:
status.size = max_size
buflen = r[1]-r[0]+1
if serv.rfile == None:
elog("serv.rfile was None! eek! " + str(buflen));
serv.send_error(500)
return;
buf = serv.rfile.read(buflen)
if len(buf) != buflen:
elog("malformed request 6")
serv.send_error(400)
return
if r[0] == 0:
mode = "wb"
else:
mode = "ab"
status.file = open(status.realpath, mode);
status.file.seek(r[0]);
status.file.write(buf);
status.file.flush()
status.file.close()
status.commit()
body = json.dumps({"success" : True});
body = bstr(body)
serv.gen_headers("PUT", len(body), json_mimetype)
serv.wfile.write(body)
class FileAPI_GetFile:
basepath = "/api/files/get"
def __init__(self):
pass
def do_GET(self, serv):
qs = get_qs(serv.path)
if "accessToken" not in qs or ("path" not in qs and "id" not in qs):
serv.send_error(400)
return
tok = qs["accessToken"][0]
userid = do_auth(tok)
if userid == None:
serv.send_error(401)
return
if "path" in qs:
path = qs["path"][0]
else:
path = publicid_to_fileid(qs["id"][0])
if path == None:
serv.send_error(404)
return
alog("fetching file %s" % path);
f = File(path, userid)
if f == None:
serv.send_error(400)
return
if is_folder(f):
serv.send_error(401)
return
print("diskpath:", f.diskpath)
try:
file = open(f.diskpath, "rb")
except OSError:
serv.send_error(404)
return
body = file.read()
file.close()
serv.gen_headers("GET", len(body), "application/octet-stream")
serv.send_header("Content-Disposition", "attachment; filename=\"%s\"" % f.name)
#Content-Disposition: attachment; filename=FILENAME
serv.wfile.write(body)
| 23.996333 | 109 | 0.564777 | [
"MIT"
] | joeedh/fairmotion | pyserver/fileapi_local.py | 19,629 | Python |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.utils.translation import ugettext as _, ugettext_lazy as _t
from desktop.lib.conf import Config, validate_path
SPARK_MASTER = Config(
key="spark_master",
help=_t("Address of the Spark master, e.g spark://localhost:7077. If empty use the current configuration. "
"Can be overriden in the script too."),
default=""
)
SPARK_HOME = Config(
key="spark_home",
help=_t("Local path to Spark Home on all the nodes of the cluster."),
default="/usr/lib/spark"
)
def config_validator(user):
res = []
res.extend(validate_path(SPARK_HOME, is_dir=True))
return res
| 31.613636 | 109 | 0.744788 | [
"Apache-2.0"
] | jesman/hue | apps/spark/src/spark/conf.py | 1,391 | Python |
"""
Module holds JMX handlers implementations
Copyright 2017 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
from distutils.version import LooseVersion
from lxml import etree
from bzt import TaurusInternalException, TaurusConfigError
from bzt.engine import Scenario
from bzt.jmx import JMX
from bzt.jmx.base import cond_int
from bzt.jmx.threadgroups import ThreadGroup, ConcurrencyThreadGroup, ThreadGroupHandler
from bzt.requests_model import RequestVisitor, has_variable_pattern, HierarchicRequestParser
from bzt.utils import iteritems, numeric_types
from bzt.utils import BetterDict, dehumanize_time, ensure_is_dict, load_class, guess_delimiter
class RequestCompiler(RequestVisitor):
def __init__(self, jmx_builder):
super(RequestCompiler, self).__init__()
self.jmx_builder = jmx_builder
def visit_mqttrequest(self, request):
return self.jmx_builder.compile_request(request)
def visit_hierarchichttprequest(self, request):
return self.jmx_builder.compile_request(request)
def visit_ifblock(self, block):
return self.jmx_builder.compile_if_block(block)
def visit_onceblock(self, block):
return self.jmx_builder.compile_once_block(block)
def visit_loopblock(self, block):
return self.jmx_builder.compile_loop_block(block)
def visit_whileblock(self, block):
return self.jmx_builder.compile_while_block(block)
def visit_foreachblock(self, block):
return self.jmx_builder.compile_foreach_block(block)
def visit_transactionblock(self, block):
return self.jmx_builder.compile_transaction_block(block)
def visit_includescenarioblock(self, block):
scenario_name = block.scenario_name
if scenario_name in self.path:
msg = "Mutual recursion detected in include-scenario blocks (scenario %s)"
raise TaurusConfigError(msg % scenario_name)
self.record_path(scenario_name)
return self.jmx_builder.compile_include_scenario_block(block)
def visit_actionblock(self, block):
return self.jmx_builder.compile_action_block(block)
def visit_setvariables(self, block):
return self.jmx_builder.compile_set_variables_block(block)
class LoadSettingsProcessor(object):
TG = ThreadGroup.__name__
CTG = ConcurrencyThreadGroup.__name__
def __init__(self, executor):
self.log = executor.log.getChild(self.__class__.__name__)
self.load = executor.get_specific_load()
self.raw_load = executor.get_raw_load()
self.log.debug("Load: %s", self.load)
self.force_ctg = executor.settings.get("force-ctg", True)
self.tg = self._detect_thread_group(executor)
self.tg_handler = ThreadGroupHandler(self.log)
def _detect_thread_group(self, executor):
"""
Detect preferred thread group
:param executor:
:return:
"""
tg = self.TG
if not self.force_ctg:
return tg
msg = 'Thread group detection: %s, regular ThreadGroup will be used'
if not self.load.duration:
self.log.debug(msg, 'duration not found')
elif self.load.iterations:
self.log.debug(msg, 'iterations are found')
elif not executor.tool:
msg = 'You must set executor tool (%s) for choosing of ConcurrencyThreadGroup'
raise TaurusInternalException(msg % executor.tool_name)
elif not executor.tool.ctg_plugin_installed():
self.log.warning(msg % 'plugin for ConcurrentThreadGroup not found')
else:
tg = self.CTG
return tg
def modify(self, jmx, is_jmx_generated=False):
if not (self.raw_load.iterations or self.raw_load.concurrency or self.load.duration):
self.log.debug('No iterations/concurrency/duration found, thread group modification is skipped')
return
# IMPORTANT: fix groups order as changing of element type changes order of getting of groups
groups = list(self.tg_handler.groups(jmx))
# user concurrency is jmeter variable, write it to tg as is
if isinstance(self.load.concurrency, str):
target_list = [(group, self.load.concurrency) for group in groups]
else: # concurrency is numeric or empty
raw = self.load.concurrency is None # keep existed concurrency if self.load.concurrency is omitted
concurrency_list = []
for group in groups:
concurrency = group.get_concurrency(raw=raw)
if concurrency is None:
concurrency = 1
concurrency_list.append(concurrency)
if not raw: # divide numeric concurrency
self._divide_concurrency(concurrency_list)
target_list = zip(groups, concurrency_list)
for group, concurrency in target_list:
iterations = None
existed_tg = (not is_jmx_generated) and (group.gtype == self.TG)
if not self.force_ctg and existed_tg:
iterations = group.get_iterations()
self.tg_handler.convert(source=group, target_gtype=self.tg, load=self.load,
concurrency=concurrency, iterations=iterations)
if self.load.throughput:
self._add_shaper(jmx)
if self.tg == self.TG and self.load.steps:
self.log.warning("Stepping ramp-up isn't supported for regular ThreadGroup")
def _divide_concurrency(self, concurrency_list):
"""
calculate target concurrency for every thread group
"""
total_old_concurrency = sum(concurrency_list)
for idx, concurrency in enumerate(concurrency_list):
if total_old_concurrency and concurrency_list[idx] != 0:
part_of_load = 1.0 * self.load.concurrency * concurrency / total_old_concurrency
concurrency_list[idx] = int(round(part_of_load))
if concurrency_list[idx] == 0:
concurrency_list[idx] = 1
else:
concurrency_list[idx] = 0
total_new_concurrency = sum(concurrency_list)
leftover = self.load.concurrency - total_new_concurrency
if leftover < 0:
msg = "Had to add %s more threads to maintain thread group proportion"
self.log.warning(msg, -leftover)
elif leftover > 0:
msg = "%s threads left undistributed due to thread group proportion"
self.log.warning(msg, leftover)
def _add_shaper(self, jmx):
"""
Add shaper
:param jmx: JMX
:return:
"""
if not self.load.duration:
self.log.warning("You must set 'ramp-up' and/or 'hold-for' when using 'throughput' option")
return
etree_shaper = jmx.get_rps_shaper()
if self.load.ramp_up:
if isinstance(self.load.throughput, numeric_types) and self.load.duration:
start_rps = self.load.throughput / float(self.load.duration)
start_rps = max(start_rps, 0.001) # avoid zeroing
start_rps = min(start_rps, 1.0) # avoid starting too fast
else:
start_rps = 1
if not self.load.steps:
jmx.add_rps_shaper_schedule(etree_shaper, start_rps, self.load.throughput, self.load.ramp_up)
else:
step_h = self.load.throughput / self.load.steps
step_w = float(self.load.ramp_up) / self.load.steps
accum_time = 0
for step in range(1, self.load.steps + 1):
jmx.add_rps_shaper_schedule(etree_shaper, step_h * step, step_h * step,
step_w * step - accum_time)
accum_time += cond_int(step_w * step - accum_time)
if self.load.hold:
jmx.add_rps_shaper_schedule(etree_shaper, self.load.throughput, self.load.throughput, self.load.hold)
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree_shaper)
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree.Element("hashTree"))
class ProtocolHandler(object):
def __init__(self, sys_props, engine):
super(ProtocolHandler, self).__init__()
self.system_props = sys_props
self.engine = engine
def get_toplevel_elements(self, scenario):
return []
def get_sampler_pair(self, request):
return None, None
@staticmethod
def safe_time(any_time):
try:
smart_time = int(1000 * dehumanize_time(any_time))
except TaurusInternalException:
smart_time = any_time
return smart_time
class JMeterScenarioBuilder(JMX):
"""
Helper to build JMeter test plan from Scenario
:type protocol_handlers: dict[str,ProtocolHandler]
"""
def __init__(self, executor, original=None):
"""
:type executor: ScenarioExecutor
:type original: JMX
"""
super(JMeterScenarioBuilder, self).__init__(original)
self.executor = executor
self.scenario = executor.get_scenario()
self.engine = executor.engine
self.system_props = BetterDict()
self.request_compiler = None
self.default_protocol = self.executor.settings.get('default-protocol', 'http')
self.protocol_handlers = {}
for protocol, cls_name in iteritems(self.executor.settings.get("protocol-handlers")):
cls_obj = load_class(cls_name)
instance = cls_obj(self.system_props, self.engine)
self.protocol_handlers[protocol] = instance
self.FIELD_KEYSTORE_CONFIG = 'keystore-config'
@staticmethod
def _get_timer(req):
think_time = req.get_think_time(full=True)
if not think_time:
return []
if not isinstance(think_time, list): # constant
return JMX.get_constant_timer(delay=ProtocolHandler.safe_time(think_time))
mean = ProtocolHandler.safe_time(think_time[1])
dev = ProtocolHandler.safe_time(think_time[2])
if think_time[0] == "uniform":
return JMX.get_uniform_timer(maximum=dev * 2, offset=mean - dev)
elif think_time[0] == "gaussian":
return JMX.get_gaussian_timer(dev=dev, offset=mean)
elif think_time[0] == "poisson":
return JMX.get_poisson_timer(lam=mean - dev, delay=dev)
else:
raise TaurusConfigError("Wrong timer type: %s" % think_time[0])
def __add_extractors(self, children, req):
self.__add_boundary_ext(children, req)
self.__add_regexp_ext(children, req)
self.__add_json_ext(children, req)
self.__add_jquery_ext(children, req)
self.__add_xpath_ext(children, req)
def __add_boundary_ext(self, children, req):
extractors = req.config.get("extract-boundary")
for varname, cfg in iteritems(extractors):
subj = cfg.get('subject', 'body')
left = cfg.get('left', TaurusConfigError("Left boundary is missing for boundary extractor %s" % varname))
right = cfg.get('right', TaurusConfigError("Right boundary is missing for boundary extractor %s" % varname))
match_no = cfg.get('match-no', 1)
defvalue = cfg.get('default', 'NOT_FOUND')
scope = cfg.get("scope", None)
from_var = cfg.get("from-variable", None)
extractor = JMX._get_boundary_extractor(varname, subj, left, right, match_no, defvalue, scope, from_var)
children.append(extractor)
children.append(etree.Element("hashTree"))
def __add_regexp_ext(self, children, req):
extractors = req.config.get("extract-regexp")
for varname in extractors:
cfg = ensure_is_dict(extractors, varname, "regexp")
scope = cfg.get("scope", None)
from_var = cfg.get("from-variable", None)
extractor = JMX._get_extractor(varname, cfg.get('subject', 'body'), cfg['regexp'], cfg.get('template', 1),
cfg.get('match-no', 1), cfg.get('default', 'NOT_FOUND'), scope, from_var)
children.append(extractor)
children.append(etree.Element("hashTree"))
def __add_json_ext(self, children, req):
jextractors = req.config.get("extract-jsonpath")
for varname in jextractors:
cfg = ensure_is_dict(jextractors, varname, "jsonpath")
if LooseVersion(str(self.executor.settings.get("version"))) < LooseVersion("3.0"):
extractor = JMX._get_json_extractor(varname,
cfg["jsonpath"],
cfg.get("default", "NOT_FOUND"),
cfg.get("from-variable", None))
else:
extractor = JMX._get_internal_json_extractor(varname,
cfg["jsonpath"],
cfg.get("default", "NOT_FOUND"),
cfg.get("scope", None),
cfg.get("from-variable", None),
cfg.get("match-no", "0"),
cfg.get("concat", False))
children.append(extractor)
children.append(etree.Element("hashTree"))
def __add_jquery_ext(self, children, req):
css_jquery_extors = req.config.get("extract-css-jquery")
for varname in css_jquery_extors:
cfg = ensure_is_dict(css_jquery_extors, varname, "expression")
extractor = self._get_jquerycss_extractor(varname,
cfg['expression'],
cfg.get('attribute', ""),
cfg.get('match-no', 0),
cfg.get('default', 'NOT_FOUND'),
cfg.get("scope", None),
cfg.get("from-variable", None))
children.append(extractor)
children.append(etree.Element("hashTree"))
def __add_xpath_ext(self, children, req):
xpath_extractors = req.config.get("extract-xpath")
for varname in xpath_extractors:
cfg = ensure_is_dict(xpath_extractors, varname, "xpath")
children.append(JMX._get_xpath_extractor(varname,
cfg['xpath'],
cfg.get('default', 'NOT_FOUND'),
cfg.get('validate-xml', False),
cfg.get('ignore-whitespace', True),
cfg.get("match-no", "-1"),
cfg.get('use-namespaces', False),
cfg.get('use-tolerant-parser', False),
cfg.get("scope", None),
cfg.get("from-variable", None)))
children.append(etree.Element("hashTree"))
@staticmethod
def __add_assertions(children, req):
assertions = req.config.get("assert", [])
for idx, assertion in enumerate(assertions):
assertion = ensure_is_dict(assertions, idx, "contains")
if not isinstance(assertion['contains'], list):
assertion['contains'] = [assertion['contains']]
children.append(JMX._get_resp_assertion(assertion.get("subject", Scenario.FIELD_BODY),
assertion['contains'],
assertion.get('regexp', True),
assertion.get('not', False),
assertion.get('assume-success', False)))
children.append(etree.Element("hashTree"))
jpath_assertions = req.config.get("assert-jsonpath", [])
for idx, assertion in enumerate(jpath_assertions):
assertion = ensure_is_dict(jpath_assertions, idx, "jsonpath")
exc = TaurusConfigError('JSON Path not found in assertion: %s' % assertion)
component = JMX._get_json_path_assertion(assertion.get('jsonpath', exc),
assertion.get('expected-value', ''),
assertion.get('validate', False),
assertion.get('expect-null', False),
assertion.get('invert', False),
assertion.get('regexp', True))
children.append(component)
children.append(etree.Element("hashTree"))
xpath_assertions = req.config.get("assert-xpath", [])
for idx, assertion in enumerate(xpath_assertions):
assertion = ensure_is_dict(xpath_assertions, idx, "xpath")
exc = TaurusConfigError('XPath not found in assertion: %s' % assertion)
component = JMX._get_xpath_assertion(assertion.get('xpath', exc),
assertion.get('validate-xml', False),
assertion.get('ignore-whitespace', True),
assertion.get('use-tolerant-parser', False),
assertion.get('invert', False))
children.append(component)
children.append(etree.Element("hashTree"))
@staticmethod
def __add_jsr_elements(children, req, get_from_config=True):
"""
:type children: etree.Element
:type req: Request
"""
jsrs = []
if get_from_config:
jsrs = req.config.get("jsr223", [])
else:
jsrs = req.get("jsr223", [])
if not isinstance(jsrs, list):
jsrs = [jsrs]
for idx, _ in enumerate(jsrs):
jsr = ensure_is_dict(jsrs, idx, sub_key='script-text')
lang = jsr.get("language", "groovy")
script_file = jsr.get("script-file", None)
script_text = jsr.get("script-text", None)
if not script_file and not script_text:
raise TaurusConfigError("jsr223 element must specify one of 'script-file' or 'script-text'")
parameters = jsr.get("parameters", "")
execute = jsr.get("execute", "after")
cache_key = str(jsr.get("compile-cache", True)).lower()
children.append(JMX._get_jsr223_element(lang, script_file, parameters, execute, script_text, cache_key))
children.append(etree.Element("hashTree"))
def __gen_requests(self, scenario):
http_protocol = scenario.data.get('protocol', 'http') == 'http'
requests = scenario.get_requests(parser=HierarchicRequestParser, require_url=http_protocol)
elements = []
for compiled in self.compile_requests(requests):
elements.extend(compiled)
return elements
def compile_scenario(self, scenario):
elements = []
for _, protocol in iteritems(self.protocol_handlers):
elements.extend(protocol.get_toplevel_elements(scenario))
elements.extend(self.__gen_authorization(scenario))
elements.extend(self.__gen_keystore_config(scenario))
elements.extend(self.__gen_data_sources(scenario))
elements.extend(self.__gen_requests(scenario))
self.__add_jsr_elements(elements, scenario, False)
return elements
def compile_request(self, request):
"""
:type request: HierarchicHTTPRequest
:return:
"""
sampler = children = None
protocol_name = request.priority_option('protocol', default=self.default_protocol)
if protocol_name in self.protocol_handlers:
protocol = self.protocol_handlers[protocol_name]
sampler, children = protocol.get_sampler_pair(request)
if sampler is None:
self.log.warning("Problematic request: %s", request.config)
raise TaurusInternalException("Unable to handle request, please review missing options")
children.extend(self._get_timer(request))
self.__add_assertions(children, request)
self.__add_extractors(children, request)
self.__add_jsr_elements(children, request)
return [sampler, children]
def compile_if_block(self, block):
elements = []
if_controller = JMX._get_if_controller(block.condition)
then_children = etree.Element("hashTree")
for compiled in self.compile_requests(block.then_clause):
for element in compiled:
then_children.append(element)
elements.extend([if_controller, then_children])
if block.else_clause:
inverted_condition = "!(" + block.condition + ")"
else_controller = JMX._get_if_controller(inverted_condition)
else_children = etree.Element("hashTree")
for compiled in self.compile_requests(block.else_clause):
for element in compiled:
else_children.append(element)
elements.extend([else_controller, else_children])
return elements
def compile_once_block(self, block):
elements = []
once_controller = JMX._get_once_controller()
children = etree.Element("hashTree")
for compiled in self.compile_requests(block.requests):
for element in compiled:
children.append(element)
elements.extend([once_controller, children])
return elements
def compile_loop_block(self, block):
elements = []
loop_controller = JMX._get_loop_controller(block.loops)
children = etree.Element("hashTree")
for compiled in self.compile_requests(block.requests):
for element in compiled:
children.append(element)
elements.extend([loop_controller, children])
return elements
def compile_while_block(self, block):
elements = []
controller = JMX._get_while_controller(block.condition)
children = etree.Element("hashTree")
for compiled in self.compile_requests(block.requests):
for element in compiled:
children.append(element)
elements.extend([controller, children])
return elements
def compile_foreach_block(self, block):
"""
:type block: ForEachBlock
"""
elements = []
controller = JMX._get_foreach_controller(block.input_var, block.loop_var)
children = etree.Element("hashTree")
for compiled in self.compile_requests(block.requests):
for element in compiled:
children.append(element)
elements.extend([controller, children])
return elements
def compile_transaction_block(self, block):
elements = []
controller = JMX._get_transaction_controller(block.label,
block.priority_option('force-parent-sample', False),
block.include_timers)
children = etree.Element("hashTree")
for compiled in self.compile_requests(block.requests):
for element in compiled:
children.append(element)
elements.extend([controller, children])
return elements
def compile_include_scenario_block(self, block):
elements = []
controller = JMX._get_simple_controller(block.scenario_name)
children = etree.Element("hashTree")
scenario = self.executor.get_scenario(name=block.scenario_name)
for element in self.compile_scenario(scenario):
children.append(element)
elements.extend([controller, children])
return elements
def compile_action_block(self, block):
"""
:type block: ActionBlock
:return:
"""
actions = {
'stop': 0,
'pause': 1,
'stop-now': 2,
'continue': 3,
}
targets = {'current-thread': 0, 'all-threads': 2}
action = actions[block.action]
target = targets[block.target]
duration = 0
if block.duration is not None:
duration = int(block.duration * 1000)
test_action = JMX._get_action_block(action, target, duration)
children = etree.Element("hashTree")
self.__add_jsr_elements(children, block)
return [test_action, children]
@staticmethod
def compile_set_variables_block(block):
set_var_action = JMX.get_set_var_action(block.mapping)
hashtree = etree.Element("hashTree")
return [set_var_action, hashtree]
def compile_requests(self, requests):
if self.request_compiler is None:
self.request_compiler = RequestCompiler(self)
compiled = []
for request in requests:
compiled.append(self.request_compiler.visit(request))
self.request_compiler.clear_path_cache()
return compiled
def __generate(self):
"""
Generate the test plan
"""
thread_group = JMX.get_thread_group(testname=self.executor.label)
thread_group_ht = etree.Element("hashTree", type="tg")
self.request_compiler = RequestCompiler(self)
for element in self.compile_scenario(self.scenario):
thread_group_ht.append(element)
results_tree = self._get_results_tree()
results_tree_ht = etree.Element("hashTree")
self.append(self.TEST_PLAN_SEL, thread_group)
self.append(self.TEST_PLAN_SEL, thread_group_ht)
self.append(self.TEST_PLAN_SEL, results_tree)
self.append(self.TEST_PLAN_SEL, results_tree_ht)
def save(self, filename):
"""
Generate test plan and save
:type filename: str
"""
self.__generate()
super(JMeterScenarioBuilder, self).save(filename)
@staticmethod
def __gen_authorization(scenario):
"""
Generates HTTP Authorization Manager
"""
elements = []
authorizations = scenario.get("authorization")
if authorizations:
clear_flag = False
if isinstance(authorizations, dict):
if "clear" in authorizations or "list" in authorizations: # full form
clear_flag = authorizations.get("clear", False)
authorizations = authorizations.get("list", [])
else:
authorizations = [authorizations] # short form
if not isinstance(authorizations, list):
raise TaurusConfigError("Wrong authorization format: %s" % authorizations)
auth_manager = JMX.get_auth_manager(authorizations, clear_flag)
elements.append(auth_manager)
elements.append(etree.Element("hashTree"))
return elements
def __gen_data_sources(self, scenario):
elements = []
for source in scenario.get_data_sources():
source_path = source["path"]
delimiter = source.get("delimiter")
if has_variable_pattern(source_path):
msg = "Path to CSV contains JMeter variable/function, can't check for file existence: %s"
self.log.warning(msg, source_path)
if not delimiter:
delimiter = ','
self.log.warning("Can't detect CSV dialect, default delimiter will be '%s'", delimiter)
else:
source_path = self.executor.engine.find_file(source_path)
if not os.path.isfile(source_path):
raise TaurusConfigError("data-sources path not found: %s" % source_path)
if not delimiter:
delimiter = guess_delimiter(source_path)
if source.get("random-order"):
config = JMX._get_csv_config_random(source_path, delimiter, source.get("loop", True),
source.get("variable-names", ""))
else:
config = JMX._get_csv_config(source_path, delimiter, source.get("loop", True),
source.get("variable-names", ""), source.get("quoted", False))
elements.append(config)
elements.append(etree.Element("hashTree"))
return elements
def __gen_keystore_config(self, scenario):
elements = []
keystore_config = scenario.get(self.FIELD_KEYSTORE_CONFIG)
if keystore_config:
variable_name = keystore_config["variable-name"]
start_index = keystore_config["start-index"]
end_index = keystore_config["end-index"]
preload = keystore_config["preload"]
config = JMX.get_keystore_config_elements(variable_name, start_index, end_index, preload)
elements.append(config)
elements.append(etree.Element("hashTree"))
return elements
| 42.322129 | 120 | 0.596863 | [
"Apache-2.0"
] | greyfenrir/taurus | bzt/jmx/tools.py | 30,218 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import socket
import struct
import asyncio
@asyncio.coroutine
def proxy_data(reader, writer):
try:
while 1:
buf = yield from reader.read(4096)
if not buf:
break
writer.write(buf)
yield from writer.drain()
writer.close()
except Exception:
pass
class SOCKS5Server:
def __init__(self, host, port, username=None, password=None, *, loop=None):
self.username = self.password = None
if username and password:
self.username = username.encode('utf-8')
self.password = password.encode('utf-8')
self.loop = loop or asyncio.get_event_loop()
coro = asyncio.start_server(self.handle_socks, host, port, loop=self.loop)
self.server = self.loop.run_until_complete(coro)
# for random ports
self.host, self.port = self.server.sockets[0].getsockname()[:2]
def handle_socks(self, reader, writer):
version, authnum = yield from reader.read(2)
if version != 0x05:
writer.close()
return
methods = yield from reader.read(authnum)
if self.username and 0x02 in methods:
# Username/password
writer.write(b'\x05\x02')
version, ulen = yield from reader.read(2)
username = yield from reader.read(ulen)
ulen = (yield from reader.read(1))[0]
password = yield from reader.read(ulen)
if version == 0x01 and (
username == self.username and password == self.password):
writer.write(b'\x01\x00')
else:
writer.write(b'\x01\xFF')
writer.close()
return
elif self.username is None and 0x00 in methods:
# No authentication
writer.write(b'\x05\x00')
else:
writer.write(b'\x05\xFF')
writer.close()
return
try:
version, command, reserved, addrtype = yield from reader.read(4)
except ValueError:
writer.close()
return
if version != 0x05:
writer.close()
return
if addrtype == 0x01:
host = yield from reader.read(4)
hostname = socket.inet_ntop(socket.AF_INET, host)
elif addrtype == 0x03:
length = (yield from reader.read(1))[0]
hostname = (yield from reader.read(length)).decode('utf-8')
elif addrtype == 0x04:
host = yield from reader.read(16)
hostname = socket.inet_ntop(socket.AF_INET6, host)
port = struct.unpack('!H', (yield from reader.read(2)))[0]
sockname = writer.get_extra_info('sockname')
# a (address, port) 2-tuple for AF_INET,
# a (address, port, flow info, scope id) 4-tuple for AF_INET6
if len(sockname) == 2:
bndinfo = b'\x01' + socket.inet_pton(socket.AF_INET, sockname[0])
else:
bndinfo = b'\x04' + socket.inet_pton(socket.AF_INET6, sockname[0])
bndinfo += struct.pack('!H', sockname[1])
if command == 0x01:
writer.write(b'\x05\x00\x00' + bndinfo)
else:
writer.write(b'\x05\x07\x00' + bndinfo)
writer.close()
return
r_reader, r_writer = yield from asyncio.open_connection(hostname, port)
asyncio.ensure_future(proxy_data(reader, r_writer), loop=self.loop)
asyncio.ensure_future(proxy_data(r_reader, writer), loop=self.loop)
def run_forever(self):
self.loop.run_forever()
def close(self):
self.server.close()
self.loop.run_until_complete(self.server.wait_closed())
if __name__ == '__main__':
try:
host = '0.0.0.0'
port = 1080
if len(sys.argv) == 1:
pass
elif len(sys.argv) == 2:
if sys.argv[1] in ('-h', '--help'):
print('usage: python3 %s [port|listen port]' % __file__)
sys.exit(0)
else:
port = int(sys.argv[1])
elif len(sys.argv) == 3:
host = sys.argv[1]
port = int(sys.argv[2])
except Exception as ex:
print(ex)
print('usage: python3 %s [port|listen port]' % sys.argv[0])
sys.exit(1)
srv = SOCKS5Server(host, port)
print('Listening on %s:%d' % (host, port))
try:
srv.run_forever()
except KeyboardInterrupt:
pass
finally:
srv.close()
| 33.386861 | 82 | 0.554438 | [
"MIT"
] | gumblex/ptproxy | socksserver.py | 4,574 | Python |
# coding: utf-8
"""
Properties
All HubSpot objects store data in default and custom properties. These endpoints provide access to read and modify object properties in HubSpot. # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.crm.properties.configuration import Configuration
class PropertyGroupUpdate(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'label': 'str',
'display_order': 'int'
}
attribute_map = {
'label': 'label',
'display_order': 'displayOrder'
}
def __init__(self, label=None, display_order=None, local_vars_configuration=None): # noqa: E501
"""PropertyGroupUpdate - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._label = None
self._display_order = None
self.discriminator = None
if label is not None:
self.label = label
if display_order is not None:
self.display_order = display_order
@property
def label(self):
"""Gets the label of this PropertyGroupUpdate. # noqa: E501
A human-readable label that will be shown in HubSpot. # noqa: E501
:return: The label of this PropertyGroupUpdate. # noqa: E501
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this PropertyGroupUpdate.
A human-readable label that will be shown in HubSpot. # noqa: E501
:param label: The label of this PropertyGroupUpdate. # noqa: E501
:type: str
"""
self._label = label
@property
def display_order(self):
"""Gets the display_order of this PropertyGroupUpdate. # noqa: E501
Property groups are displayed in order starting with the lowest positive integer value. Values of -1 will cause the property group to be displayed after any positive values. # noqa: E501
:return: The display_order of this PropertyGroupUpdate. # noqa: E501
:rtype: int
"""
return self._display_order
@display_order.setter
def display_order(self, display_order):
"""Sets the display_order of this PropertyGroupUpdate.
Property groups are displayed in order starting with the lowest positive integer value. Values of -1 will cause the property group to be displayed after any positive values. # noqa: E501
:param display_order: The display_order of this PropertyGroupUpdate. # noqa: E501
:type: int
"""
self._display_order = display_order
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PropertyGroupUpdate):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PropertyGroupUpdate):
return True
return self.to_dict() != other.to_dict()
| 31.15894 | 195 | 0.609564 | [
"Apache-2.0"
] | cclauss/hubspot-api-python | hubspot/crm/properties/models/property_group_update.py | 4,705 | Python |
from __future__ import print_function
import sys
import logging
import os
os.environ['ENABLE_CNNL_TRYCATCH'] = 'OFF' # pylint: disable=C0413
from itertools import product
import unittest
import torch
import torch_mlu.core.mlu_model as ct
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(cur_dir + "/../../")
from common_utils import testinfo, TestCase # pylint: disable=C0413,C0411
logging.basicConfig(level=logging.DEBUG)
class TestTypeOp(TestCase):
# @unittest.skip("not test")
@testinfo()
def test_type_param_empty(self):
shape_list = [(512, 1024, 2, 2, 4), (2, 3, 4),
(254, 254, 112, 1, 1, 3), (1000,), ()]
dtype_list = [torch.half, torch.float,
torch.uint8, torch.int8, torch.short,
torch.int, torch.long, torch.bool]
for shape, src_type in product(shape_list, dtype_list):
if src_type in [torch.half, torch.float]:
x = torch.randn(shape, dtype=src_type)
elif src_type == torch.uint8:
x = torch.randint(0, 255, shape).to(src_type)
else:
x = torch.randint(-128, 128, shape).to(src_type)
out_cpu_type = x.type()
out_mlu_type = x.to(ct.mlu_device()).type()
l_tmp = out_cpu_type.split('.')
l_tmp.insert(1, 'mlu')
self.assertEqual('.'.join(l_tmp), out_mlu_type)
# @unittest.skip("not test")
@testinfo()
def test_type_param_empty_channels_last(self):
shape_list = [(512, 1024, 2, 2), (2, 3, 4, 5),
(254, 254, 112, 1), (2, 3, 24, 30), (1, 1, 1, 30)]
dtype_list = [torch.half, torch.float,
torch.uint8, torch.int8, torch.short,
torch.int, torch.long, torch.bool]
for shape, src_type in product(shape_list, dtype_list):
if src_type in [torch.half, torch.float]:
x = torch.randn(shape, dtype=src_type).to(memory_format = torch.channels_last)
elif src_type == torch.uint8:
x = torch.randint(0, 255, shape).to(src_type).to(
memory_format = torch.channels_last)
else:
x = torch.randint(-128, 128, shape).to(src_type).to(
memory_format = torch.channels_last)
out_cpu_type = x.type()
out_mlu_type = x.to(ct.mlu_device()).type()
l_tmp = out_cpu_type.split('.')
l_tmp.insert(1, 'mlu')
self.assertEqual('.'.join(l_tmp), out_mlu_type)
# @unittest.skip("not test")
@testinfo()
def test_type_param_empty_not_dense(self):
shape_list = [(16, 32, 2, 30), (2, 3, 4, 32),
(24, 26, 112, 64), (2, 3, 24, 30), (1, 1, 1, 30)]
dtype_list = [torch.half, torch.float,
torch.uint8, torch.int8, torch.short,
torch.int, torch.long, torch.bool]
for shape, src_type in product(shape_list, dtype_list):
if src_type in [torch.half, torch.float]:
x = torch.randn(shape, dtype=src_type)[:, :, :, :15]
elif src_type == torch.uint8:
x = torch.randint(0, 255, shape).to(src_type)[:, :, :, :15]
else:
x = torch.randint(-128, 128, shape).to(src_type)[:, :, :, :15]
out_cpu_type = x.type()
out_mlu_type = x.to(ct.mlu_device()).type()
l_tmp = out_cpu_type.split('.')
l_tmp.insert(1, 'mlu')
self.assertEqual('.'.join(l_tmp), out_mlu_type)
# @unittest.skip("not test")
@testinfo()
def test_type_param_dtype(self):
shape_list = [(512, 1024, 2, 2, 4), (2, 3, 4),
(254, 254, 112, 1, 1, 3), (1000,), ()]
cast_map = {torch.float: {torch.half, torch.int, torch.short, torch.int8, torch.bool},
torch.half: {torch.float, torch.int, torch.short, torch.int8, torch.bool},
torch.long: {torch.float, torch.half, torch.short, torch.int8},
torch.int: {torch.float, torch.half, torch.short, torch.int8},
torch.short: {torch.float, torch.half, torch.int},
torch.int8: {torch.float, torch.half, torch.int},
torch.uint8: {torch.float, torch.half},
torch.bool: {torch.float, torch.half, torch.int},
}
for shape, src_type in product(shape_list, cast_map.keys()):
for dst_type in cast_map[src_type]:
if src_type in [torch.half, torch.float]:
x = torch.randn(shape, dtype=src_type)
elif src_type == torch.uint8:
x = torch.randint(0, 255, shape).to(src_type)
else:
x = torch.randint(-128, 128, shape).to(src_type)
for is_async in [False, True]:
out_cpu = x.type(dst_type, non_blocking=is_async)
out_mlu = x.to(ct.mlu_device()).type(dst_type, non_blocking=is_async)
self.assertEqual(out_mlu.dtype, dst_type)
self.assertEqual(out_cpu, out_mlu.cpu())
# @unittest.skip("not test")
@testinfo()
def test_type_param_dtype_channels_last(self):
shape_list = [(512, 1024, 2, 2), (2, 3, 4, 16),
(254, 254, 112, 1), (2, 3, 24, 30), (1, 1, 1, 30)]
cast_map = {torch.float: {torch.half, torch.int, torch.short, torch.int8, torch.bool},
torch.half: {torch.float, torch.int, torch.short, torch.int8, torch.bool},
torch.long: {torch.float, torch.half, torch.short, torch.int8},
torch.int: {torch.float, torch.half, torch.short, torch.int8},
torch.short: {torch.float, torch.half, torch.int},
torch.int8: {torch.float, torch.half, torch.int},
torch.uint8: {torch.float, torch.half},
torch.bool: {torch.float, torch.half, torch.int},
}
for shape, src_type in product(shape_list, cast_map.keys()):
for dst_type in cast_map[src_type]:
if src_type in [torch.half, torch.float]:
x = torch.randn(shape, dtype=src_type).to(memory_format = torch.channels_last)
elif src_type == torch.uint8:
x = torch.randint(0, 255, shape).to(src_type).to(
memory_format = torch.channels_last)
else:
x = torch.randint(-128, 128, shape).to(src_type).to(
memory_format = torch.channels_last)
for is_async in [False, True]:
out_cpu = x.type(dst_type, non_blocking=is_async)
out_mlu = x.to(ct.mlu_device()).type(dst_type, non_blocking=is_async)
self.assertEqual(out_mlu.dtype, dst_type)
self.assertEqual(out_cpu, out_mlu.cpu())
# @unittest.skip("not test")
@testinfo()
def test_type_param_dtype_not_dense(self):
shape_list = [(16, 32, 2, 30), (2, 3, 4, 32),
(24, 26, 112, 64), (2, 3, 24, 30), (1, 1, 1, 30)]
cast_map = {torch.float: {torch.half, torch.int, torch.short, torch.int8, torch.bool},
torch.half: {torch.float, torch.int, torch.short, torch.int8, torch.bool},
torch.long: {torch.float, torch.half, torch.short, torch.int8},
torch.int: {torch.float, torch.half, torch.short, torch.int8},
torch.short: {torch.float, torch.half, torch.int},
torch.int8: {torch.float, torch.half, torch.int},
torch.uint8: {torch.float, torch.half},
torch.bool: {torch.float, torch.half, torch.int},
}
for shape, src_type in product(shape_list, cast_map.keys()):
for dst_type in cast_map[src_type]:
if src_type in [torch.half, torch.float]:
x = torch.randn(shape, dtype=src_type)[:, :, :, :15]
elif src_type == torch.uint8:
x = torch.randint(0, 255, shape).to(src_type)[:, :, :, :15]
else:
x = torch.randint(-128, 128, shape).to(src_type)[:, :, :, :15]
for is_async in [False, True]:
out_cpu = x.type(dst_type, non_blocking=is_async)
out_mlu = x.to(ct.mlu_device()).type(dst_type, non_blocking=is_async)
self.assertEqual(out_mlu.dtype, dst_type)
self.assertEqual(out_cpu, out_mlu.cpu())
if __name__ == '__main__':
unittest.main()
| 51.25731 | 98 | 0.539875 | [
"BSD-2-Clause"
] | Cambricon/catch | test/cnnl/op_test/test_type.py | 8,765 | Python |
# Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: [email protected]
"""email package exception classes."""
class MessageError(Exception):
"""Base class for errors in the email package."""
class MessageParseError(MessageError):
"""Base class for message parsing errors."""
class HeaderParseError(MessageParseError):
"""Error while parsing headers."""
class BoundaryError(MessageParseError):
"""Couldn't find terminating boundary."""
class MultipartConversionError(MessageError, TypeError):
"""Conversion to a multipart is prohibited."""
class CharsetError(MessageError):
"""An illegal charset was given."""
# These are parsing defects which the parser was able to work around.
class MessageDefect(ValueError):
"""Base class for a message defect."""
def __init__(self, line=None):
if line is not None:
super().__init__(line)
self.line = line
class NoBoundaryInMultipartDefect(MessageDefect):
"""A message claimed to be a multipart but had no boundary parameter."""
class StartBoundaryNotFoundDefect(MessageDefect):
"""The claimed start boundary was never found."""
class CloseBoundaryNotFoundDefect(MessageDefect):
"""A start boundary was found, but not the corresponding close boundary."""
class FirstHeaderLineIsContinuationDefect(MessageDefect):
"""A message had a continuation line as its first header line."""
class MisplacedEnvelopeHeaderDefect(MessageDefect):
"""A 'Unix-from' header was found in the middle of a header block."""
class MissingHeaderBodySeparatorDefect(MessageDefect):
"""Found line with no leading whitespace and no colon before blank line."""
# XXX: backward compatibility, just in case (it was never emitted).
MalformedHeaderDefect = MissingHeaderBodySeparatorDefect
class MultipartInvariantViolationDefect(MessageDefect):
"""A message claimed to be a multipart but no subparts were found."""
class InvalidMultipartContentTransferEncodingDefect(MessageDefect):
"""An invalid content transfer encoding was set on the multipart itself."""
class UndecodableBytesDefect(MessageDefect):
"""Header contained bytes that could not be decoded"""
class InvalidBase64PaddingDefect(MessageDefect):
"""base64 encoded sequence had an incorrect length"""
class InvalidBase64CharactersDefect(MessageDefect):
"""base64 encoded sequence had characters not in base64 alphabet"""
# These errors are specific to header parsing.
class HeaderDefect(MessageDefect):
"""Base class for a header defect."""
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
class InvalidHeaderDefect(HeaderDefect):
"""Header is not valid, message gives details."""
class HeaderMissingRequiredValue(HeaderDefect):
"""A header that must have a value had none"""
class NonPrintableDefect(HeaderDefect):
"""ASCII characters outside the ascii-printable range found"""
def __init__(self, non_printables):
super().__init__(non_printables)
self.non_printables = non_printables
def __str__(self):
return ("the following ASCII non-printables found in header: "
"{}".format(self.non_printables))
class ObsoleteHeaderDefect(HeaderDefect):
"""Header uses syntax declared obsolete by RFC 5322"""
class NonASCIILocalPartDefect(HeaderDefect):
"""local_part contains non-ASCII characters"""
# This defect only occurs during unicode parsing, not when
# parsing messages decoded from binary.
| 32.731481 | 79 | 0.742857 | [
"Apache-2.0"
] | 0xFireball/exascript2 | Src/StdLib/Lib/email/errors.py | 3,535 | Python |
from unittest.mock import ANY, AsyncMock, MagicMock, create_autospec, patch
import aioredis
import pytest
from tests.utils import Keys
from aiocache.backends.redis import RedisBackend, RedisCache, conn
from aiocache.base import BaseCache
from aiocache.serializers import JsonSerializer
pytest.skip("aioredis code is broken", allow_module_level=True)
@pytest.fixture # type: ignore[unreachable]
def redis_connection():
return create_autospec(aioredis.RedisConnection)
@pytest.fixture
def redis_pool(redis_connection):
class FakePool:
def __await__(self):
yield
return redis_connection
pool = FakePool()
pool._conn = redis_connection
pool.release = AsyncMock()
pool.clear = AsyncMock()
pool.acquire = AsyncMock(return_value=redis_connection)
pool.__call__ = MagicMock(return_value=pool)
return pool
@pytest.fixture
def redis(redis_pool):
redis = RedisBackend()
redis._pool = redis_pool
yield redis
@pytest.fixture
def create_pool():
with patch("aiocache.backends.redis.aioredis.create_pool") as create_pool:
yield create_pool
@pytest.fixture(autouse=True)
def mock_redis_v1(mocker, redis_connection):
mocker.patch("aiocache.backends.redis.aioredis.Redis", return_value=redis_connection)
class TestRedisBackend:
def test_setup(self):
redis_backend = RedisBackend()
assert redis_backend.endpoint == "127.0.0.1"
assert redis_backend.port == 6379
assert redis_backend.db == 0
assert redis_backend.password is None
assert redis_backend.pool_min_size == 1
assert redis_backend.pool_max_size == 10
def test_setup_override(self):
redis_backend = RedisBackend(db=2, password="pass")
assert redis_backend.endpoint == "127.0.0.1"
assert redis_backend.port == 6379
assert redis_backend.db == 2
assert redis_backend.password == "pass"
def test_setup_casts(self):
redis_backend = RedisBackend(
db="2",
port="6379",
pool_min_size="1",
pool_max_size="10",
create_connection_timeout="1.5",
)
assert redis_backend.db == 2
assert redis_backend.port == 6379
assert redis_backend.pool_min_size == 1
assert redis_backend.pool_max_size == 10
assert redis_backend.create_connection_timeout == 1.5
@pytest.mark.asyncio
async def test_acquire_conn(self, redis, redis_connection):
assert await redis.acquire_conn() == redis_connection
@pytest.mark.asyncio
async def test_release_conn(self, redis):
conn = await redis.acquire_conn()
await redis.release_conn(conn)
redis._pool.release.assert_called_with(conn)
@pytest.mark.asyncio
async def test_get_pool_sets_pool(self, redis, redis_pool, create_pool):
redis._pool = None
await redis._get_pool()
assert redis._pool == create_pool.return_value
@pytest.mark.asyncio
async def test_get_pool_reuses_existing_pool(self, redis):
redis._pool = "pool"
await redis._get_pool()
assert redis._pool == "pool"
@pytest.mark.asyncio
async def test_get_pool_locked(self, mocker, redis, create_pool):
redis._pool = None
mocker.spy(redis._pool_lock, "acquire")
mocker.spy(redis._pool_lock, "release")
assert await redis._get_pool() == create_pool.return_value
assert redis._pool_lock.acquire.call_count == 1
assert redis._pool_lock.release.call_count == 1
@pytest.mark.asyncio
async def test_get_pool_calls_create_pool(self, redis, create_pool):
redis._pool = None
await redis._get_pool()
create_pool.assert_called_with(
(redis.endpoint, redis.port),
db=redis.db,
password=redis.password,
loop=redis._loop,
encoding="utf-8",
minsize=redis.pool_min_size,
maxsize=redis.pool_max_size,
create_connection_timeout=redis.create_connection_timeout,
)
@pytest.mark.asyncio
async def test_get(self, redis, redis_connection):
await redis._get(Keys.KEY)
redis_connection.get.assert_called_with(Keys.KEY, encoding="utf-8")
@pytest.mark.asyncio
async def test_gets(self, mocker, redis, redis_connection):
mocker.spy(redis, "_get")
await redis._gets(Keys.KEY)
redis._get.assert_called_with(Keys.KEY, encoding="utf-8", _conn=ANY)
@pytest.mark.asyncio
async def test_set(self, redis, redis_connection):
await redis._set(Keys.KEY, "value")
redis_connection.set.assert_called_with(Keys.KEY, "value")
await redis._set(Keys.KEY, "value", ttl=1)
redis_connection.setex.assert_called_with(Keys.KEY, 1, "value")
@pytest.mark.asyncio
async def test_set_cas_token(self, mocker, redis, redis_connection):
mocker.spy(redis, "_cas")
await redis._set(Keys.KEY, "value", _cas_token="old_value", _conn=redis_connection)
redis._cas.assert_called_with(
Keys.KEY, "value", "old_value", ttl=None, _conn=redis_connection
)
@pytest.mark.asyncio
async def test_cas(self, mocker, redis, redis_connection):
mocker.spy(redis, "_raw")
await redis._cas(Keys.KEY, "value", "old_value", ttl=10, _conn=redis_connection)
redis._raw.assert_called_with(
"eval",
redis.CAS_SCRIPT,
[Keys.KEY],
["value", "old_value", "EX", 10],
_conn=redis_connection,
)
@pytest.mark.asyncio
async def test_cas_float_ttl(self, mocker, redis, redis_connection):
mocker.spy(redis, "_raw")
await redis._cas(Keys.KEY, "value", "old_value", ttl=0.1, _conn=redis_connection)
redis._raw.assert_called_with(
"eval",
redis.CAS_SCRIPT,
[Keys.KEY],
["value", "old_value", "PX", 100],
_conn=redis_connection,
)
@pytest.mark.asyncio
async def test_multi_get(self, redis, redis_connection):
await redis._multi_get([Keys.KEY, Keys.KEY_1])
redis_connection.mget.assert_called_with(Keys.KEY, Keys.KEY_1, encoding="utf-8")
@pytest.mark.asyncio
async def test_multi_set(self, redis, redis_connection):
await redis._multi_set([(Keys.KEY, "value"), (Keys.KEY_1, "random")])
redis_connection.mset.assert_called_with(Keys.KEY, "value", Keys.KEY_1, "random")
@pytest.mark.asyncio
async def test_multi_set_with_ttl(self, redis, redis_connection):
await redis._multi_set([(Keys.KEY, "value"), (Keys.KEY_1, "random")], ttl=1)
assert redis_connection.multi_exec.call_count == 1
redis_connection.mset.assert_called_with(Keys.KEY, "value", Keys.KEY_1, "random")
redis_connection.expire.assert_any_call(Keys.KEY, timeout=1)
redis_connection.expire.assert_any_call(Keys.KEY_1, timeout=1)
assert redis_connection.execute.call_count == 1
@pytest.mark.asyncio
async def test_add(self, redis, redis_connection):
await redis._add(Keys.KEY, "value")
redis_connection.set.assert_called_with(Keys.KEY, "value", exist=ANY, expire=None)
await redis._add(Keys.KEY, "value", 1)
redis_connection.set.assert_called_with(Keys.KEY, "value", exist=ANY, expire=1)
@pytest.mark.asyncio
async def test_add_existing(self, redis, redis_connection):
redis_connection.set.return_value = False
with pytest.raises(ValueError):
await redis._add(Keys.KEY, "value")
@pytest.mark.asyncio
async def test_add_float_ttl(self, redis, redis_connection):
await redis._add(Keys.KEY, "value", 0.1)
redis_connection.set.assert_called_with(Keys.KEY, "value", exist=ANY, pexpire=100)
@pytest.mark.asyncio
async def test_exists(self, redis, redis_connection):
redis_connection.exists.return_value = 1
await redis._exists(Keys.KEY)
redis_connection.exists.assert_called_with(Keys.KEY)
@pytest.mark.asyncio
async def test_expire(self, redis, redis_connection):
await redis._expire(Keys.KEY, ttl=1)
redis_connection.expire.assert_called_with(Keys.KEY, 1)
@pytest.mark.asyncio
async def test_increment(self, redis, redis_connection):
await redis._increment(Keys.KEY, delta=2)
redis_connection.incrby.assert_called_with(Keys.KEY, 2)
@pytest.mark.asyncio
async def test_increment_typerror(self, redis, redis_connection):
redis_connection.incrby.side_effect = aioredis.errors.ReplyError("msg")
with pytest.raises(TypeError):
await redis._increment(Keys.KEY, 2)
@pytest.mark.asyncio
async def test_expire_0_ttl(self, redis, redis_connection):
await redis._expire(Keys.KEY, ttl=0)
redis_connection.persist.assert_called_with(Keys.KEY)
@pytest.mark.asyncio
async def test_delete(self, redis, redis_connection):
await redis._delete(Keys.KEY)
redis_connection.delete.assert_called_with(Keys.KEY)
@pytest.mark.asyncio
async def test_clear(self, redis, redis_connection):
redis_connection.keys.return_value = ["nm:a", "nm:b"]
await redis._clear("nm")
redis_connection.delete.assert_called_with("nm:a", "nm:b")
@pytest.mark.asyncio
async def test_clear_no_keys(self, redis, redis_connection):
redis_connection.keys.return_value = []
await redis._clear("nm")
redis_connection.delete.assert_not_called()
@pytest.mark.asyncio
async def test_clear_no_namespace(self, redis, redis_connection):
await redis._clear()
assert redis_connection.flushdb.call_count == 1
@pytest.mark.asyncio
async def test_raw(self, redis, redis_connection):
await redis._raw("get", Keys.KEY)
await redis._raw("set", Keys.KEY, 1)
redis_connection.get.assert_called_with(Keys.KEY, encoding=ANY)
redis_connection.set.assert_called_with(Keys.KEY, 1)
@pytest.mark.asyncio
async def test_redlock_release(self, mocker, redis):
mocker.spy(redis, "_raw")
await redis._redlock_release(Keys.KEY, "random")
redis._raw.assert_called_with("eval", redis.RELEASE_SCRIPT, [Keys.KEY], ["random"])
@pytest.mark.asyncio
async def test_close_when_connected(self, redis):
await redis._raw("set", Keys.KEY, 1)
await redis._close()
assert redis._pool.clear.call_count == 1
@pytest.mark.asyncio
async def test_close_when_not_connected(self, redis, redis_pool):
redis._pool = None
await redis._close()
assert redis_pool.clear.call_count == 0
class TestConn:
async def dummy(self, *args, _conn=None, **kwargs):
pass
@pytest.mark.asyncio
async def test_conn(self, redis, redis_connection, mocker):
mocker.spy(self, "dummy")
d = conn(self.dummy)
await d(redis, "a", _conn=None)
self.dummy.assert_called_with(redis, "a", _conn=redis_connection)
@pytest.mark.asyncio
async def test_conn_reuses(self, redis, redis_connection, mocker):
mocker.spy(self, "dummy")
d = conn(self.dummy)
await d(redis, "a", _conn=redis_connection)
self.dummy.assert_called_with(redis, "a", _conn=redis_connection)
await d(redis, "a", _conn=redis_connection)
self.dummy.assert_called_with(redis, "a", _conn=redis_connection)
class TestRedisCache:
@pytest.fixture
def set_test_namespace(self, redis_cache):
redis_cache.namespace = "test"
yield
redis_cache.namespace = None
def test_name(self):
assert RedisCache.NAME == "redis"
def test_inheritance(self):
assert isinstance(RedisCache(), BaseCache)
def test_default_serializer(self):
assert isinstance(RedisCache().serializer, JsonSerializer)
@pytest.mark.parametrize(
"path,expected", [("", {}), ("/", {}), ("/1", {"db": "1"}), ("/1/2/3", {"db": "1"})]
)
def test_parse_uri_path(self, path, expected):
assert RedisCache().parse_uri_path(path) == expected
@pytest.mark.parametrize(
"namespace, expected",
([None, "test:" + Keys.KEY], ["", Keys.KEY], ["my_ns", "my_ns:" + Keys.KEY]),
)
def test_build_key_double_dot(self, set_test_namespace, redis_cache, namespace, expected):
assert redis_cache.build_key(Keys.KEY, namespace=namespace) == expected
def test_build_key_no_namespace(self, redis_cache):
assert redis_cache.build_key(Keys.KEY, namespace=None) == Keys.KEY
| 36.177143 | 94 | 0.675407 | [
"BSD-3-Clause"
] | argaen/aiocache | tests/ut/backends/test_redis.py | 12,662 | Python |
from __future__ import absolute_import
from mock import patch
from kombu import Connection, Exchange, Queue
from kombu import compat
from .mocks import Transport, Channel
from .utils import TestCase
from .utils import Mock
class test_misc(TestCase):
def test_iterconsume(self):
class MyConnection(object):
drained = 0
def drain_events(self, *args, **kwargs):
self.drained += 1
return self.drained
class Consumer(object):
active = False
def consume(self, *args, **kwargs):
self.active = True
conn = MyConnection()
consumer = Consumer()
it = compat._iterconsume(conn, consumer)
self.assertEqual(next(it), 1)
self.assertTrue(consumer.active)
it2 = compat._iterconsume(conn, consumer, limit=10)
self.assertEqual(list(it2), [2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
def test_Queue_from_dict(self):
defs = {'binding_key': 'foo.#',
'exchange': 'fooex',
'exchange_type': 'topic',
'durable': True,
'auto_delete': False}
q1 = Queue.from_dict('foo', **dict(defs))
self.assertEqual(q1.name, 'foo')
self.assertEqual(q1.routing_key, 'foo.#')
self.assertEqual(q1.exchange.name, 'fooex')
self.assertEqual(q1.exchange.type, 'topic')
self.assertTrue(q1.durable)
self.assertTrue(q1.exchange.durable)
self.assertFalse(q1.auto_delete)
self.assertFalse(q1.exchange.auto_delete)
q2 = Queue.from_dict('foo', **dict(defs,
exchange_durable=False))
self.assertTrue(q2.durable)
self.assertFalse(q2.exchange.durable)
q3 = Queue.from_dict('foo', **dict(defs,
exchange_auto_delete=True))
self.assertFalse(q3.auto_delete)
self.assertTrue(q3.exchange.auto_delete)
q4 = Queue.from_dict('foo', **dict(defs,
queue_durable=False))
self.assertFalse(q4.durable)
self.assertTrue(q4.exchange.durable)
q5 = Queue.from_dict('foo', **dict(defs,
queue_auto_delete=True))
self.assertTrue(q5.auto_delete)
self.assertFalse(q5.exchange.auto_delete)
self.assertEqual(Queue.from_dict('foo', **dict(defs)),
Queue.from_dict('foo', **dict(defs)))
class test_Publisher(TestCase):
def setUp(self):
self.connection = Connection(transport=Transport)
def test_constructor(self):
pub = compat.Publisher(self.connection,
exchange='test_Publisher_constructor',
routing_key='rkey')
self.assertIsInstance(pub.backend, Channel)
self.assertEqual(pub.exchange.name, 'test_Publisher_constructor')
self.assertTrue(pub.exchange.durable)
self.assertFalse(pub.exchange.auto_delete)
self.assertEqual(pub.exchange.type, 'direct')
pub2 = compat.Publisher(self.connection,
exchange='test_Publisher_constructor2',
routing_key='rkey',
auto_delete=True,
durable=False)
self.assertTrue(pub2.exchange.auto_delete)
self.assertFalse(pub2.exchange.durable)
explicit = Exchange('test_Publisher_constructor_explicit',
type='topic')
pub3 = compat.Publisher(self.connection,
exchange=explicit)
self.assertEqual(pub3.exchange, explicit)
compat.Publisher(self.connection,
exchange='test_Publisher_constructor3',
channel=self.connection.default_channel)
def test_send(self):
pub = compat.Publisher(self.connection,
exchange='test_Publisher_send',
routing_key='rkey')
pub.send({'foo': 'bar'})
self.assertIn('basic_publish', pub.backend)
pub.close()
def test__enter__exit__(self):
pub = compat.Publisher(self.connection,
exchange='test_Publisher_send',
routing_key='rkey')
x = pub.__enter__()
self.assertIs(x, pub)
x.__exit__()
self.assertTrue(pub._closed)
class test_Consumer(TestCase):
def setUp(self):
self.connection = Connection(transport=Transport)
@patch('kombu.compat._iterconsume')
def test_iterconsume_calls__iterconsume(self, it, n='test_iterconsume'):
c = compat.Consumer(self.connection, queue=n, exchange=n)
c.iterconsume(limit=10, no_ack=True)
it.assert_called_with(c.connection, c, True, 10)
def test_constructor(self, n='test_Consumer_constructor'):
c = compat.Consumer(self.connection, queue=n, exchange=n,
routing_key='rkey')
self.assertIsInstance(c.backend, Channel)
q = c.queues[0]
self.assertTrue(q.durable)
self.assertTrue(q.exchange.durable)
self.assertFalse(q.auto_delete)
self.assertFalse(q.exchange.auto_delete)
self.assertEqual(q.name, n)
self.assertEqual(q.exchange.name, n)
c2 = compat.Consumer(self.connection, queue=n + '2',
exchange=n + '2',
routing_key='rkey', durable=False,
auto_delete=True, exclusive=True)
q2 = c2.queues[0]
self.assertFalse(q2.durable)
self.assertFalse(q2.exchange.durable)
self.assertTrue(q2.auto_delete)
self.assertTrue(q2.exchange.auto_delete)
def test__enter__exit__(self, n='test__enter__exit__'):
c = compat.Consumer(self.connection, queue=n, exchange=n,
routing_key='rkey')
x = c.__enter__()
self.assertIs(x, c)
x.__exit__()
self.assertTrue(c._closed)
def test_revive(self, n='test_revive'):
c = compat.Consumer(self.connection, queue=n, exchange=n)
with self.connection.channel() as c2:
c.revive(c2)
self.assertIs(c.backend, c2)
def test__iter__(self, n='test__iter__'):
c = compat.Consumer(self.connection, queue=n, exchange=n)
c.iterqueue = Mock()
c.__iter__()
c.iterqueue.assert_called_with(infinite=True)
def test_iter(self, n='test_iterqueue'):
c = compat.Consumer(self.connection, queue=n, exchange=n,
routing_key='rkey')
c.close()
def test_process_next(self, n='test_process_next'):
c = compat.Consumer(self.connection, queue=n, exchange=n,
routing_key='rkey')
with self.assertRaises(NotImplementedError):
c.process_next()
c.close()
def test_iterconsume(self, n='test_iterconsume'):
c = compat.Consumer(self.connection, queue=n, exchange=n,
routing_key='rkey')
c.close()
def test_discard_all(self, n='test_discard_all'):
c = compat.Consumer(self.connection, queue=n, exchange=n,
routing_key='rkey')
c.discard_all()
self.assertIn('queue_purge', c.backend)
def test_fetch(self, n='test_fetch'):
c = compat.Consumer(self.connection, queue=n, exchange=n,
routing_key='rkey')
self.assertIsNone(c.fetch())
self.assertIsNone(c.fetch(no_ack=True))
self.assertIn('basic_get', c.backend)
callback_called = [False]
def receive(payload, message):
callback_called[0] = True
c.backend.to_deliver.append('42')
self.assertEqual(c.fetch().payload, '42')
c.backend.to_deliver.append('46')
c.register_callback(receive)
self.assertEqual(c.fetch(enable_callbacks=True).payload, '46')
self.assertTrue(callback_called[0])
def test_discard_all_filterfunc_not_supported(self, n='xjf21j21'):
c = compat.Consumer(self.connection, queue=n, exchange=n,
routing_key='rkey')
with self.assertRaises(NotImplementedError):
c.discard_all(filterfunc=lambda x: x)
c.close()
def test_wait(self, n='test_wait'):
class C(compat.Consumer):
def iterconsume(self, limit=None):
for i in range(limit):
yield i
c = C(self.connection,
queue=n, exchange=n, routing_key='rkey')
self.assertEqual(c.wait(10), list(range(10)))
c.close()
def test_iterqueue(self, n='test_iterqueue'):
i = [0]
class C(compat.Consumer):
def fetch(self, limit=None):
z = i[0]
i[0] += 1
return z
c = C(self.connection,
queue=n, exchange=n, routing_key='rkey')
self.assertEqual(list(c.iterqueue(limit=10)), list(range(10)))
c.close()
class test_ConsumerSet(TestCase):
def setUp(self):
self.connection = Connection(transport=Transport)
@patch('kombu.compat._iterconsume')
def test_iterconsume(self, _iterconsume, n='test_iterconsume'):
c = compat.Consumer(self.connection, queue=n, exchange=n)
cs = compat.ConsumerSet(self.connection, consumers=[c])
cs.iterconsume(limit=10, no_ack=True)
_iterconsume.assert_called_with(c.connection, cs, True, 10)
def test_revive(self, n='test_revive'):
c = compat.Consumer(self.connection, queue=n, exchange=n)
cs = compat.ConsumerSet(self.connection, consumers=[c])
with self.connection.channel() as c2:
cs.revive(c2)
self.assertIs(cs.backend, c2)
def test_constructor(self, prefix='0daf8h21'):
dcon = {'%s.xyx' % prefix: {'exchange': '%s.xyx' % prefix,
'routing_key': 'xyx'},
'%s.xyz' % prefix: {'exchange': '%s.xyz' % prefix,
'routing_key': 'xyz'}}
consumers = [compat.Consumer(self.connection, queue=prefix + str(i),
exchange=prefix + str(i))
for i in range(3)]
c = compat.ConsumerSet(self.connection, consumers=consumers)
c2 = compat.ConsumerSet(self.connection, from_dict=dcon)
self.assertEqual(len(c.queues), 3)
self.assertEqual(len(c2.queues), 2)
c.add_consumer(compat.Consumer(self.connection,
queue=prefix + 'xaxxxa',
exchange=prefix + 'xaxxxa'))
self.assertEqual(len(c.queues), 4)
for cq in c.queues:
self.assertIs(cq.channel, c.channel)
c2.add_consumer_from_dict({
'%s.xxx' % prefix: {
'exchange': '%s.xxx' % prefix,
'routing_key': 'xxx',
},
})
self.assertEqual(len(c2.queues), 3)
for c2q in c2.queues:
self.assertIs(c2q.channel, c2.channel)
c.discard_all()
self.assertEqual(c.channel.called.count('queue_purge'), 4)
c.consume()
c.close()
c2.close()
self.assertIn('basic_cancel', c.channel)
self.assertIn('close', c.channel)
self.assertIn('close', c2.channel)
| 35.716049 | 76 | 0.571638 | [
"BSD-3-Clause"
] | chartbeat/kombu | kombu/tests/test_compat.py | 11,572 | Python |
from django.db.models import Max
from datahub.company.models import Company as DBCompany, CompanyPermission
from datahub.core.query_utils import get_aggregate_subquery
from datahub.search.apps import SearchApp
from datahub.search.company.models import Company
class CompanySearchApp(SearchApp):
"""SearchApp for company."""
name = 'company'
es_model = Company
view_permissions = (f'company.{CompanyPermission.view_company}',)
export_permission = f'company.{CompanyPermission.export_company}'
queryset = DBCompany.objects.select_related(
'archived_by',
'business_type',
'employee_range',
'export_experience_category',
'headquarter_type',
'one_list_account_owner',
'global_headquarters__one_list_account_owner',
'global_headquarters',
'address_country',
'registered_address_country',
'sector',
'sector__parent',
'sector__parent__parent',
'turnover_range',
'uk_region',
).prefetch_related(
'export_countries__country',
).annotate(
latest_interaction_date=get_aggregate_subquery(
DBCompany,
Max('interactions__date'),
),
)
| 30.675 | 74 | 0.685412 | [
"MIT"
] | reupen/data-hub-api | datahub/search/company/apps.py | 1,227 | Python |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from app import create_app
app, db = create_app(minimal=True)
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| 21.875 | 49 | 0.788571 | [
"Apache-2.0"
] | Info-ag/labplaner | manage.py | 350 | Python |
import Gramatica.Gramatica as g
import graphviz
import sys
import threading
import Errores.Nodo_Error as error
import Errores.ListaErrores as lista_err
from tkinter import *
from tkinter import filedialog
from tkinter import font
from tkinter import ttk
#------------------------------------ Interfaz ----------------------------------------------------------
root = Tk()
root.title('TytusDB - Team 19')
root.geometry("1000x750")
def ejecutar():
reporteg=[]
errores=lista_err.ListaErrores()
entrada = my_text.get("1.0",END)
SQLparser = g.parse(entrada, errores);
print(SQLparser);
Output.delete(1.0,"end")
Output.insert("1.0","Salida");
def open_File():
text_file = filedialog.askopenfilename(initialdir="C:/gui/", title="Text File", filetypes=(("Text Files", "*.txt"), ))
text_file = open(text_file, 'r')
stuff = text_file.read()
my_text.insert(END, stuff)
text_file.close()
def get_line_numbers():
output = ''
if show_line_number.get():
row, col = my_text.index("end").split('.')
for i in range(1, int(row)):
output += str(i) + '\n'
return output
def on_content_changed(event=None):
update_line_numbers()
def update_line_numbers(event=None):
line_numbers = get_line_numbers()
line_number_bar.config(state='normal')
line_number_bar.delete('1.0', 'end')
line_number_bar.insert('1.0', line_numbers)
line_number_bar.config(state='disabled')
menu_bar = Menu(root)
file_menu = Menu(menu_bar, tearoff=0)
file_menu.add_command(label='Open', compound='left', underline=0, command=open_File)
file_menu.add_command(label='Ejecutar', compound='left', underline=0, command=ejecutar)
menu_bar.add_cascade(label='File', menu=file_menu)
reportes_menu = Menu(menu_bar, tearoff=0)
reportes_menu.add_command(label='Errores', compound='left', underline=0)
reportes_menu.add_separator()
reportes_menu.add_command(label='Gramaticas',compound='left', underline=0)
reportes_menu.add_separator()
reportes_menu.add_command(label='AST', compound='left', underline=0)
reportes_menu.add_separator()
reportes_menu.add_command(label='TS',compound='left', underline=0)
menu_bar.add_cascade(label='Reportes', menu=reportes_menu)
show_line_number=IntVar()
show_line_number.set(1)
root.config(menu=menu_bar)
my_frame = Frame(root)
my_frame.pack(pady=10)
text_scroll = Scrollbar(my_frame)
text_scroll.pack(side=RIGHT, fill=Y)
line_number_bar = Text(my_frame, width=4, padx=3, takefocus=0, fg='white', border=0, background='#282828',state='disabled', wrap='none')
line_number_bar.pack(side='left', fill='y')
my_text = Text(my_frame, width=110, height=30, selectforeground="black", yscrollcommand=text_scroll.set)
text_scroll.config(command=my_text.yview)
separator = ttk.Separator(root, orient='horizontal')
separator.place(relx=0, rely=0.47, relwidth=1, relheight=1)
Output = Text(root, height = 10,width = 115,bg = "light cyan")
my_text.bind('<Any-KeyPress>', on_content_changed)
entrada = my_text.get("1.0",END)
my_text.pack()
separator.pack()
Output.pack()
root.mainloop() | 31.649485 | 137 | 0.713355 | [
"MIT"
] | 3liezerSong/tytus | parser/team19/BDTytus/main.py | 3,070 | Python |
import random
# averaging the embeddings between 2 words
# return the averaged embeddings
def average_two_embeddings_vectors(a, b):
avg_embeddings = []
i = 0
for embed in a:
z = (embed + b[i]) / 2.0
avg_embeddings.append(z)
i += 1
return avg_embeddings
# helper func; updates tokens and embeddings with the new combined tokens and averaged embeddings
# return the updated tokens string and embeddings vector
def update_tok_and_embed(tokens, embeddings, index, embed2_index, averaged_embeddings):
# update tokens
if embed2_index > index:
tokens[index] = tokens[index] + " " + tokens[embed2_index]
else:
tokens[index] = tokens[embed2_index] + " " + tokens[index]
# update embeddings
embeddings[index] = averaged_embeddings
# delete old tokens and embeddings
del tokens[embed2_index]
del embeddings[embed2_index]
return tokens, embeddings
# helper func
def preprocessing_helper(tokens, embeddings, e, combine_with):
index = 0
avg_embeddings = []
index = tokens.index(e)
first, last = False, False
if (index - 1) == -1:
first = True
if (index + 1) == len(tokens):
last = True
embed1 = embeddings[index]
embed2 = []
embed2_index = 0
# the words following these type of words usually have some relation syntactically and semantically
if combine_with == "after":
if last: # check if element is the last element
return tokens, embeddings
embed2_index = index + 1
embed2 = embeddings[embed2_index]
else: # the words before
if first: # check if first element
return tokens, embeddings
embed2_index = index - 1
embed2 = embeddings[embed2_index]
averaged_embeddings = average_two_embeddings_vectors(embed1, embed2)
return update_tok_and_embed(tokens, embeddings, index, embed2_index, averaged_embeddings)
# common tokens that might fit well with other tokens based on syntactic rules of english
# therefore, standardize with these before running the default algorithm
# return updated tokens and embeddings
def syntactic_rules_for_preprocessing(tokens, embeddings, std_length):
# not comprehensive but a start.
combined_after_set = {"a", "an", "the", "some", "each", "all", "to", "for", "in", "on", "of", "about", "with",
"from", "at", "have", "has", "is", "are", "was", "were", "be", "been", "being", "should",
"would", "will", "do", "don't", "did", "no", "not", "my", "his", "her", "your", "their",
"our", "its", "whose", "go", "going", "went", "come", "came", "coming"}
combined_before_set = {"him", "her", "them", "us", ",", ".", "!", "?", "...", ";", "-", "~"}
if len(tokens) > std_length:
for e in tokens:
# average embeddings with the token that follows the current token
if e in combined_after_set:
tokens, embeddings = preprocessing_helper(tokens, embeddings, e, "after")
if len(tokens) == std_length:
break
continue
# avg embedding with the token that precedes the current token
elif e in combined_before_set:
tokens, embeddings = preprocessing_helper(tokens, embeddings, e, "before")
if len(tokens) == std_length:
break
continue
return tokens, embeddings
# takes in tokens list and corresponding embeddings
# shortens the list until the specified length(default 10)
# shortens by averaging the embedding vectors and combining the corresponding tokens
# combined tokens separated by a space even if it's punctuation. e.g. 'end' + '.' -> "end ."
# returns the standardized tokens and embeddings lists
# implementation: averaging some words that might go together first (e.g. "the cat", "to her")
# then, just randomly select tokens and their adjacent token and average those embedding vectors
def standardize_by_averaging(tokens, embeddings, std_length=10):
flag = True
# so as to not change the original lists
tokens = tokens.copy()
embeddings = embeddings.copy()
while len(tokens) > std_length:
# attempt to standardize with some regards to syntactical knowledge first
if flag:
flag = False
tokens, embeddings = syntactic_rules_for_preprocessing(tokens, embeddings, std_length)
continue
length = len(tokens)
index = random.randint(1, length - 1) # uses randomizer so to vary the averaging place
embed1 = embeddings[index]
embed2 = embeddings[index - 1]
averaged_embeddings = average_two_embeddings_vectors(embed1, embed2)
token, embeddings = update_tok_and_embed(tokens, embeddings, index, index - 1, averaged_embeddings)
return tokens, embeddings
def standardize_by_duplicating(tokens, embeddings, std_length=10):
token_copy, embeddings_copy = tokens[:], embeddings[:]
while len(tokens) < std_length:
# duplicate the whole message once
tokens += token_copy
embeddings += embeddings_copy
return standardize_by_averaging(tokens, embeddings, std_length)
def main():
# fill
long_tokens = ["this", "is", "a", "sentence", "that", "is", "over", "ten", "embeddings",
"long", "and", "that", "there", "are", "punctuations", ".",
"this", "is", "a", "sentence", "that", "is", "over", "ten", "embeddings",
"long", "and", "that", "there", "are", "punctuations", "."]
long_tokens2 = [".", ".", "gonna", "be", "a", "long", "in", "order", "for", "the",
"testing", "of", "the", "code", ".", "there", "will", "be", "some", "weird",
"tokens", "hello", "this", "spellings", "to", "see", "how", "that's", "this", "will", "be", "the"]
long_embeddings = [[1.2, 3.34], [2.3, 3.5], [5.6, 6.6], [5.1, 2.3], [2.3, 4.4], [3.3, 5.8], [8.8, 7.7], [1.1, 2.3],
[9.9, 1.2], [2.1, 2.1], [1.0, 1.0], [1.1, 3.4], [1.2, 3.2], [3.4, 4.0], [1.1, 2.3], [1.1, 1.1],
[1.2, 3.34], [2.3, 3.5], [5.6, 6.6], [5.1, 2.3], [2.3, 4.4], [3.3, 5.8], [8.8, 7.7], [1.1, 2.3],
[9.9, 1.2], [2.1, 2.1], [1.0, 1.0], [1.1, 3.4], [1.2, 3.2], [3.4, 4.0], [1.1, 2.3], [1.1, 1.1]]
# for testing purposes
print("test standardize_by_averaging")
print("before; tokens:\n", long_tokens) # before standardizing
print("before; embeddings:\n", long_embeddings, "\n\n")
tokens, embeddings = standardize_by_averaging(long_tokens, long_embeddings)
print("after; tokens:\n", tokens) # after standardizing
print("after; embeddings:\n", embeddings, "\n\n")
# test standardize_by_averaging #2, uses the same embeddings as test #1
print("test standardize_by_averaging#2")
print("before; tokens:\n", long_tokens2) # before standardizing
print("before; embeddings:\n", long_embeddings, "\n\n")
tokens, embeddings = standardize_by_averaging(long_tokens2, long_embeddings)
print("after; tokens:\n", tokens) # after standardizing
print("after; embeddings:\n", embeddings, "\n\n")
# standardize by duplicating
short_tokens = ["This", "is", "looking", "Bullish"]
short_embeddings = [[1.2, 3.34], [2.3, 3.5], [5.6, 6.6], [5.1, 2.3]]
# for testing purposes
print("test standardize_by_duplicating")
print("before; tokens:\n", short_tokens) # before standardizing
print("before embeddings:\n", short_embeddings, "\n\n")
tokens, embeddings = standardize_by_duplicating(short_tokens, short_embeddings)
print("after; tokens:\n", tokens) # after standardizing
print("after; embeddings:\n", embeddings, "\n\n")
return
if __name__ == "__main__":
# execute only if run as a script
main()
| 40.423469 | 119 | 0.616686 | [
"MIT"
] | AMOOOMA/stocktwits-svm-nlp | support/standardize.py | 7,923 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class VirtualMachine(Resource):
"""Describes a Virtual Machine.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Required. Resource location
:type location: str
:param tags: Resource tags
:type tags: dict[str, str]
:param plan: Specifies information about the marketplace image used to
create the virtual machine. This element is only used for marketplace
images. Before you can use a marketplace image from an API, you must
enable the image for programmatic use. In the Azure portal, find the
marketplace image that you want to use and then click **Want to deploy
programmatically, Get Started ->**. Enter any required information and
then click **Save**.
:type plan: ~azure.mgmt.compute.v2017_03_30.models.Plan
:param hardware_profile: Specifies the hardware settings for the virtual
machine.
:type hardware_profile:
~azure.mgmt.compute.v2017_03_30.models.HardwareProfile
:param storage_profile: Specifies the storage settings for the virtual
machine disks.
:type storage_profile:
~azure.mgmt.compute.v2017_03_30.models.StorageProfile
:param os_profile: Specifies the operating system settings for the virtual
machine.
:type os_profile: ~azure.mgmt.compute.v2017_03_30.models.OSProfile
:param network_profile: Specifies the network interfaces of the virtual
machine.
:type network_profile:
~azure.mgmt.compute.v2017_03_30.models.NetworkProfile
:param diagnostics_profile: Specifies the boot diagnostic settings state.
<br><br>Minimum api-version: 2015-06-15.
:type diagnostics_profile:
~azure.mgmt.compute.v2017_03_30.models.DiagnosticsProfile
:param availability_set: Specifies information about the availability set
that the virtual machine should be assigned to. Virtual machines specified
in the same availability set are allocated to different nodes to maximize
availability. For more information about availability sets, see [Manage
the availability of virtual
machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
<br><br> For more information on Azure planned maintainance, see [Planned
maintenance for virtual machines in
Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)
<br><br> Currently, a VM can only be added to availability set at creation
time. An existing VM cannot be added to an availability set.
:type availability_set: ~azure.mgmt.compute.v2017_03_30.models.SubResource
:ivar provisioning_state: The provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:ivar instance_view: The virtual machine instance view.
:vartype instance_view:
~azure.mgmt.compute.v2017_03_30.models.VirtualMachineInstanceView
:param license_type: Specifies that the image or disk that is being used
was licensed on-premises. This element is only used for images that
contain the Windows Server operating system. <br><br> Possible values are:
<br><br> Windows_Client <br><br> Windows_Server <br><br> If this element
is included in a request for an update, the value must match the initial
value. This value cannot be updated. <br><br> For more information, see
[Azure Hybrid Use Benefit for Windows
Server](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-licensing?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)
<br><br> Minimum api-version: 2015-06-15
:type license_type: str
:ivar vm_id: Specifies the VM unique ID which is a 128-bits identifier
that is encoded and stored in all Azure IaaS VMs SMBIOS and can be read
using platform BIOS commands.
:vartype vm_id: str
:ivar resources: The virtual machine child extension resources.
:vartype resources:
list[~azure.mgmt.compute.v2017_03_30.models.VirtualMachineExtension]
:param identity: The identity of the virtual machine, if configured.
:type identity:
~azure.mgmt.compute.v2017_03_30.models.VirtualMachineIdentity
:param zones: The virtual machine zones.
:type zones: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'instance_view': {'readonly': True},
'vm_id': {'readonly': True},
'resources': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'plan': {'key': 'plan', 'type': 'Plan'},
'hardware_profile': {'key': 'properties.hardwareProfile', 'type': 'HardwareProfile'},
'storage_profile': {'key': 'properties.storageProfile', 'type': 'StorageProfile'},
'os_profile': {'key': 'properties.osProfile', 'type': 'OSProfile'},
'network_profile': {'key': 'properties.networkProfile', 'type': 'NetworkProfile'},
'diagnostics_profile': {'key': 'properties.diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'availability_set': {'key': 'properties.availabilitySet', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineInstanceView'},
'license_type': {'key': 'properties.licenseType', 'type': 'str'},
'vm_id': {'key': 'properties.vmId', 'type': 'str'},
'resources': {'key': 'resources', 'type': '[VirtualMachineExtension]'},
'identity': {'key': 'identity', 'type': 'VirtualMachineIdentity'},
'zones': {'key': 'zones', 'type': '[str]'},
}
def __init__(self, *, location: str, tags=None, plan=None, hardware_profile=None, storage_profile=None, os_profile=None, network_profile=None, diagnostics_profile=None, availability_set=None, license_type: str=None, identity=None, zones=None, **kwargs) -> None:
super(VirtualMachine, self).__init__(location=location, tags=tags, **kwargs)
self.plan = plan
self.hardware_profile = hardware_profile
self.storage_profile = storage_profile
self.os_profile = os_profile
self.network_profile = network_profile
self.diagnostics_profile = diagnostics_profile
self.availability_set = availability_set
self.provisioning_state = None
self.instance_view = None
self.license_type = license_type
self.vm_id = None
self.resources = None
self.identity = identity
self.zones = zones
| 51.655629 | 265 | 0.686026 | [
"MIT"
] | AlexanderYukhanov/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/models/virtual_machine_py3.py | 7,800 | Python |
# a = 15*15 + 14*14 + 13*13 + 12*12 + 11*11
# print(a)
# a = 3**33333333 % 100
# b = 7**77777777 % 10
# print(a * b)
# x = 6
# y = 4
# z = 0
# for i in range(1,14):
# z += x*y
# y += 1
# x +=
# print(x, y, z)
# print(z)
# idx = 0
# a = [5,10,20,50,100]
# for i in a:
ax = 6
a = 6
b = 4
while b <= 210:
c = a*b
print(a,b,c)
a
b += 1 | 13.433333 | 44 | 0.37469 | [
"Unlicense"
] | Mhmdaris15/free-python-project | Code Politan/KSI POSI INFORMATIKA.py | 403 | Python |
import dj_database_url
SECRET_KEY = 'django-migration-docs'
# Install the tests as an app so that we can make test models
INSTALLED_APPS = ['migration_docs', 'migration_docs.tests']
# Database url comes from the DATABASE_URL env var
DATABASES = {'default': dj_database_url.config()}
| 31.666667 | 61 | 0.778947 | [
"BSD-3-Clause"
] | Opus10/django-migration-docs | settings.py | 285 | Python |
# -*- coding: utf-8 -*-
# Copyright (C) 2018 by
# Marta Grobelna <[email protected]>
# Petre Petrov <[email protected]>
# Rudi Floren <[email protected]>
# Tobias Winkler <[email protected]>
# All rights reserved.
# BSD license.
#
# Authors: Marta Grobelna <[email protected]>
# Petre Petrov <[email protected]>
# Rudi Floren <[email protected]>
# Tobias Winkler <[email protected]>
import random as rnd
from collections import deque
import networkx as nx
from planar_graph_sampler.combinatorial_classes.half_edge_graph import HalfEdgeGraph
class IrreducibleDissection(HalfEdgeGraph):
"""
Represents the class 'I' of irreducible dissections from the paper.
It is however also used for rooted and derived dissections (sizes are incorrect then).
Parameters
----------
half_edge: ClosureHalfEdge
A half-edge on the hexagonal boundary of a closed binary tree.
"""
def __init__(self, half_edge):
assert half_edge.is_hexagonal
if half_edge.color is not 'black':
half_edge = half_edge.opposite.next
assert half_edge.color is 'black'
super(IrreducibleDissection, self).__init__(half_edge)
@property
def is_consistent(self):
super_ok = super(IrreducibleDissection, self).is_consistent
root_is_black = self.half_edge.color is 'black'
root_is_hex = self.half_edge.is_hexagonal
twelve_hex_he = len([he for he in self.half_edge.get_all_half_edges() if he.is_hexagonal]) == 12
return all([super_ok, root_is_black, root_is_hex, twelve_hex_he, self.is_admissible])
@property
def hexagonal_edges(self):
"""Gets the three half-edges on the hexagonal boundary incident to a black node and point in ccw direction."""
first = self.half_edge
res = [first]
second = first.opposite.next.opposite.next
res.append(second)
third = second.opposite.next.opposite.next
res.append(third)
for he in res:
assert he.is_hexagonal and he.color is 'black'
return res
def root_at_random_hexagonal_edge(self):
"""Selects a random hexagonal half-edge and makes it the root."""
self._half_edge = rnd.choice(self.hexagonal_edges)
@property
def is_admissible_slow(self):
"""Checks if there is a path of length 3 with an inner edge from the root to the opposite outer vertex."""
start_node = self.half_edge
assert start_node.color is 'black'
end_node = self.half_edge.opposite.next.opposite.next.opposite
assert end_node.color is 'white'
start_node = start_node.node_nr
end_node = end_node.node_nr
g = self.to_networkx_graph()
# There are always 2 path of length 4 (meaning 4 nodes) from start to end (on the hexagon boundary).
# If there is one more, then this is a forbidden path!
paths = nx.shortest_simple_paths(g, start_node, end_node)
path_1 = next(paths)
assert len(path_1) == 4
path_2 = next(paths)
assert len(path_2) == 4
path_3 = next(paths)
return len(path_3) > 4
@property
def is_admissible(self):
"""Checks if there is a path of length 3 with an inner edge from the root to the opposite outer vertex."""
start_node = self.half_edge
assert start_node.color is 'black'
end_node = self.half_edge.opposite.next.opposite.next.opposite
assert end_node.color is 'white'
# Creates the queue for the BFS.
queue = deque(list())
# Put the init half edge into the queue.
queue.append((self.half_edge, 0, False, set()))
while len(queue) != 0:
# Pop the _first element from the FIFO queue.
top_element = queue.popleft()
# Extract the components from the top element.
top_half_edge = top_element[0]
distance = top_element[1]
has_been_inner_edge_included = top_element[2]
visited_nodes = top_element[3]
# Updated the visited_nodes_set.
visited_nodes.add(top_half_edge.node_nr)
# Start BFS for the half edges connected with the specific node.
incident_half_edges = top_half_edge.incident_half_edges()
for walker_half_edge in incident_half_edges:
opposite = walker_half_edge.opposite
# Skip the vertex if it was already visited.
if opposite in visited_nodes: continue
# Prepare the new components of the element.
updated_distance = distance + 1
new_visited_nodes = set()
new_visited_nodes.update(visited_nodes)
inner_edge_included = has_been_inner_edge_included or (opposite.is_hexagonal is False)
# If the distance is smaller than 3 then the element is added into the queue.
if updated_distance < 3:
queue.append((opposite, updated_distance, inner_edge_included, new_visited_nodes))
else:
# If the distance is equal to 3 than we check whether the new vertex is the outer one and
# does an inner edge have been included in the path. If both conditions are True, then a path
# has been found which means that the dissection is not irreducible. -> Return false.
if opposite.node_nr == end_node.node_nr and inner_edge_included:
return False
# A path has not been found, therefore the dissection is irreducible and we return True.
return True
# CombinatorialClass interface.
@property
def u_size(self):
"""The u-size is the number of inner faces."""
return (self.number_of_half_edges - 6) / 4
@property
def l_size(self):
"""The l-size is the number of black inner vertices."""
node_dict = self.half_edge.node_dict()
black_vertices = len([node_nr for node_nr in node_dict if node_dict[node_nr][0].color is 'black'])
# There are always 3 hexagonal outer black vertices.
return black_vertices - 3
# Networkx related functionality.
def to_networkx_graph(self, include_unpaired=None):
"""Converts to networkx graph, encodes hexagonal nodes with colors."""
from planar_graph_sampler.combinatorial_classes.half_edge_graph import color_scale
# Get dict of nodes.
nodes = self.half_edge.node_dict()
# Include the leaves as well.
G = super(IrreducibleDissection, self).to_networkx_graph(include_unpaired=False)
for v in G:
if any([he.is_hexagonal for he in nodes[v]]):
G.nodes[v]['color'] = '#e8f442'
else:
G.nodes[v]['color'] = '#aaaaaa'
if nodes[v][0].color is 'black':
# Make black nodes darker.
G.nodes[v]['color'] = color_scale(G.nodes[v]['color'], 0.5)
return G
| 40.293785 | 118 | 0.642316 | [
"BSD-3-Clause"
] | petrovp/networkx-related | planar_graph_sampler/combinatorial_classes/dissection.py | 7,132 | Python |
from flask import Flask, render_template, request, flash, url_for
from flask_mail import Message, Mail
import json
from typing import Dict, List
from pathlib import Path
from forms import ContactForm
from development_config import Config
"""
This file launches the application.
"""
# init application
app = Flask(__name__)
# add secretkey, mail and debug configurations
app.config.from_object(Config)
# attaching mail to the flask app
mail = Mail(app)
def read_json(json_file: str, debug=False) -> List[Dict]:
"""
reads the json files, and formats the description that
is associated with each of the json dictionaries that are read in.
:param json_file: json file to parse from
:param debug: if set to true, will print the json dictionaries as
they are read in
:return: list of all of the json dictionaries
"""
# parsing json file
with open(json_file, "r") as json_desc:
# read json file
project_list: List[Dict] = json.load(json_desc)
# formats the description data which I stored in a json list
for project in project_list:
project['description'] = " ".join(project['description'])
if debug:
print(project)
return project_list
@app.route("/")
@app.route("/home")
def home_page():
return render_template("home.html", title="home")
@app.route("/portfolio")
def portfolio():
# json file to parse
json_file = "static/json/projects.json"
project_list = read_json(json_file)
# grouping portfolio into two's
project_groups = [[project_list[i*2], project_list[i*2+1]]
for i in range(len(project_list) // 2)]
# getting the last project
project_singles = False
if len(project_list) % 2 != 0:
project_singles = project_list[-1:]
return render_template("portfolio.html",
title="portfolio",
project_groups=project_groups,
project_singles=project_singles)
@app.route("/talks")
def talks():
# json file to parse
json_file = "static/json/talks.json"
# parsed json results
project_list = read_json(json_file)
return render_template("talks.html",
project_list=project_list,
title="talks")
@app.route("/contact", methods=['GET', 'POST'])
def contact():
# although I am recreating this form object for every call
# - it's state seems to persist...
form = ContactForm()
if request.method == 'POST':
if form.validate() is False:
flash("All fields are required", "flash")
return render_template("contact.html", form=form)
else:
msg = Message(form.subject.data,
sender='[email protected]',
recipients=['[email protected]'])
msg.body = """
From: {} <{}>
{}
""".format(form.name.data, form.email.data, form.message.data)
mail.send(msg)
return render_template('contact.html', success=True)
elif request.method == 'GET':
return render_template("contact.html", form=form, title="email")
if __name__ == "__main__":
app.run()
| 27.898305 | 80 | 0.620899 | [
"MIT"
] | Jim-Shaddix/Personal-Website | app.py | 3,292 | Python |
import numpy as np
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
from caffe2.proto import caffe2_pb2
class TestPrependDim(TestCase):
def _test_fwd_bwd(self):
old_shape = (128, 2, 4)
new_shape = (8, 16, 2, 4)
X = np.random.rand(*old_shape).astype(np.float32)
Y = np.random.rand(*new_shape).astype(np.float32)
net = core.Net('net')
net.GivenTensorFill([], 'X', shape=old_shape, values=X.flatten())
net.GivenTensorFill([], 'Y', shape=new_shape, values=Y.flatten())
net.PrependDim(['X'], ['X_out'], dim_size=8)
net.DotProduct(['X_out', 'Y'], 'Z')
net.AddGradientOperators(['Z'])
workspace.RunNetOnce(net)
X_out = workspace.FetchBlob('X_out')
X_grad = workspace.FetchBlob('X_grad')
Y_grad = workspace.FetchBlob('Y_grad')
# Check the shape of the gradient
np.testing.assert_array_equal(X_out.shape, Y.shape)
np.testing.assert_array_equal(X_grad.shape, X.shape)
np.testing.assert_array_equal(Y_grad.shape, Y.shape)
def test_prepend_dim(self):
devices = [core.DeviceOption(caffe2_pb2.CPU, 0)]
if workspace.NumGpuDevices() > 0:
devices.append(core.DeviceOption(workspace.GpuDeviceType, 0))
for device_opt in devices:
with core.DeviceScope(device_opt):
self._test_fwd_bwd()
if __name__ == "__main__":
import unittest
unittest.main()
| 29.923077 | 74 | 0.619537 | [
"MIT"
] | Westlanderz/AI-Plat1 | venv/Lib/site-packages/caffe2/python/operator_test/prepend_dim_test.py | 1,556 | Python |
# coding: utf-8
import attr
from ..util.log import sanitize_dictionary
@attr.s(slots=True)
class BoxRequest:
"""Represents a Box API request.
:param url: The URL being requested.
:type url: `unicode`
:param method: The HTTP method to use for the request.
:type method: `unicode` or None
:param headers: HTTP headers to include with the request.
:type headers: `dict` or None
:param auto_session_renewal: Whether or not the session can be automatically renewed if the request fails.
:type auto_session_renewal: `bool` or None
:param expect_json_response: Whether or not the API response must be JSON.
:type expect_json_response: `bool` or None
"""
url = attr.ib()
method = attr.ib(default='GET')
headers = attr.ib(default=attr.Factory(dict))
auto_session_renewal = attr.ib(default=True)
expect_json_response = attr.ib(default=True)
def __repr__(self):
return '<BoxRequest for {self.method} {self.url} with headers {headers}'.format(
self=self,
headers=sanitize_dictionary(self.headers),
)
| 36.147059 | 113 | 0.619203 | [
"Apache-2.0"
] | DaveSawyer/box-python-sdk | boxsdk/session/box_request.py | 1,229 | Python |
#!/usr/bin/env python
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
setup(
author="Faris A Chugthai",
author_email='[email protected]',
description="Python Boilerplate contains all the boilerplate you need to create a Python package.",
entry_points={
'console_scripts': [
'fatal_police_shootings=fatal_police_shootings.core:main',
],
},
license="MIT license",
include_package_data=True,
keywords='fatal_police_shootings',
name='fatal_police_shootings',
packages=find_packages(
include=[
'fatal_police_shootings', 'fatal_police_shootings.*'
]),
test_suite='tests',
url='https://github.com/farisachugthai/fatal_police_shootings',
version='0.1.0',
zip_safe=False,
)
| 25.676471 | 103 | 0.683849 | [
"MIT"
] | farisachugthai/fatal_police_shootings | setup.py | 873 | Python |
import tensorflow as tf
import numpy as np
import os
from tqdm import tqdm
import argparse
from utils.utils import create_tfr_files, prob_to_secondary_structure
from utils.FastaMLtoSL import FastaMLtoSL
import time
start = time.time()
from argparse import RawTextHelpFormatter
parser = argparse.ArgumentParser()
parser.add_argument('--inputs', default='sample_inputs/2zzm-B.fasta', type=str, help='Path to input file in fasta format, accept multiple sequences as well in fasta format; default = ''sample_inputs/single_seq.fasta''\n', metavar='')
parser.add_argument('--outputs',default='outputs/', type=str, help='Path to output files; SPOT-RNA outputs at least three files .ct, .bpseq, and .prob files; default = ''outputs/\n', metavar='')
parser.add_argument('--gpu', default=-1, type=int, help='To run on GPU, specifiy GPU number. If only one GPU in computer specifiy 0; default = -1 (no GPU)\n', metavar='')
parser.add_argument('--plots',default=False, type=bool, help='Set this to "True" to get the 2D plots of predicted secondary structure by SPOT-RNA; default = False\n', metavar='')
parser.add_argument('--motifs',default=False, type=bool, help='Set this to "True" to get the motifs of predicted secondary structure by SPOT-RNA; default = False\n', metavar='')
parser.add_argument('--cpu',default=16, type=int, help='Specify number of cpu threads that SPOT-RNA can use; default = 16\n', metavar='')
#parser.add_argument('--NC',default=True, type=bool, help='Set this to "False" to predict only canonical pairs; default = True\n', metavar='')
args = parser.parse_args()
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
FastaMLtoSL(args.inputs)
base_path = os.path.dirname(os.path.realpath(__file__))
input_file = os.path.basename(args.inputs)
create_tfr_files(args.inputs, base_path, input_file)
with open(args.inputs) as file:
input_data = [line.strip() for line in file.read().splitlines() if line.strip()]
count = int(len(input_data)/2)
ids = [input_data[2*i].replace(">", "") for i in range(count)]
sequences = {}
for i,I in enumerate(ids):
sequences[I] = input_data[2*i+1].replace(" ", "").upper().replace("T", "U")
os.environ["CUDA_VISIBLE_DEVICES"]= str(args.gpu)
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
NUM_MODELS = 5
test_loc = [os.path.join(base_path, 'input_tfr_files', input_file+'.tfrecords')]
outputs = {}
mask = {}
def sigmoid(x):
return 1/(1+np.exp(-np.array(x, dtype=np.float128)))
for MODEL in range(NUM_MODELS):
if args.gpu==-1:
config = tf.ConfigProto(intra_op_parallelism_threads=args.cpu, inter_op_parallelism_threads=args.cpu)
else:
config = tf.compat.v1.ConfigProto()
config.allow_soft_placement=True
config.log_device_placement=False
print('\nPredicting for SPOT-RNA model '+str(MODEL))
with tf.compat.v1.Session(config=config) as sess:
saver = tf.compat.v1.train.import_meta_graph(os.path.join(base_path, 'SPOT-RNA-models', 'model' + str(MODEL) + '.meta'))
saver.restore(sess,os.path.join(base_path, 'SPOT-RNA-models', 'model' + str(MODEL)))
graph = tf.compat.v1.get_default_graph()
init_test = graph.get_operation_by_name('make_initializer_2')
tmp_out = graph.get_tensor_by_name('output_FC/fully_connected/BiasAdd:0')
name_tensor = graph.get_tensor_by_name('tensors_2/component_0:0')
RNA_name = graph.get_tensor_by_name('IteratorGetNext:0')
label_mask = graph.get_tensor_by_name('IteratorGetNext:4')
sess.run(init_test,feed_dict={name_tensor:test_loc})
pbar = tqdm(total = count)
while True:
try:
out = sess.run([tmp_out,RNA_name,label_mask],feed_dict={'dropout:0':1})
out[1] = out[1].decode()
mask[out[1]] = out[2]
if MODEL == 0:
outputs[out[1]] = [sigmoid(out[0])]
else:
outputs[out[1]].append(sigmoid(out[0]))
#print('RNA name: %s'%(out[1]))
pbar.update(1)
except:
break
pbar.close()
tf.compat.v1.reset_default_graph()
RNA_ids = [i for i in list(outputs.keys())]
ensemble_outputs = {}
print('\nPost Processing and Saving Output')
for i in RNA_ids:
ensemble_outputs[i] = np.mean(outputs[i],0)
prob_to_secondary_structure(ensemble_outputs[i], mask[i], sequences[i], i, args, base_path)
print('\nFinished!')
end = time.time()
print('\nProcesssing Time {} seconds'.format(end - start))
| 44.163462 | 233 | 0.679948 | [
"MPL-2.0"
] | MuhammedHasan/SPOT-RNA | SPOT-RNA.py | 4,593 | Python |
import os,sys
import cytnx as cy
class Hising(cy.LinOp):
def __init__(self,L,J,Hx):
cy.LinOp.__init__(self,"mv_elem",2**L,cy.Type.Double,cy.Device.cpu)
## custom members:
self.J = J
self.Hx = Hx
self.L = L
def SzSz(self,i,j,ipt_id):
return ipt_id,(1. - 2.*(((ipt_id>>i)&0x1)^((ipt_id>>j)&0x1)))
def Sx(self,i,ipt_id):
out_id = ipt_id^((0x1)<<i)
return out_id,1.0
## let's overload this with custom operation:
def pre_construct(self):
for a in range(self.nx()):
tmp = [[],[]]
for i in range(self.L):
oid,amp = self.SzSz(i,(i+1)%self.L,a)
if not oid in tmp[0]:
tmp[0].append(oid)
tmp[1].append(amp*self.J)
else:
idx = tmp[0].index(oid)
tmp[1][idx] += amp*self.J
#self.set_elem(oid,a,amp*self.J)
oid,amp = self.Sx(i,a)
if not oid in tmp[0]:
tmp[0].append(oid)
tmp[1].append(amp*(-self.Hx))
else:
idx = tmp[0].index(oid)
tmp[1][idx]+=amp*(-self.Hx)
for i in range(len(tmp[0])):
self.set_elem(tmp[0][i],a,tmp[1][i])
#def matvec(self,v):
# out = cy.zeros(v.shape()[0],v.dtype(),v.device());
# return out
L = 4
J = 1
Hx = 0.3
H = Hising(L,J,Hx)
H.pre_construct()
v = cy.ones(16)
print(cy.linalg.Lanczos_ER(H,3))
| 25.31746 | 75 | 0.450784 | [
"Apache-2.0"
] | j9263178/Cytnx | example/ED/ed_ising_mve.py | 1,595 | Python |
#!/usr/bin/env python
""" Translator Class and builder """
from __future__ import print_function
import codecs
import os
import math
import torch
from tensorboardX import SummaryWriter
from others.utils import rouge_results_to_str, test_rouge, tile
from translate.beam import GNMTGlobalScorer
def build_predictor(args, tokenizer, symbols, model, logger=None):
scorer = GNMTGlobalScorer(args.alpha,length_penalty='wu')
translator = Translator(args, model, tokenizer, symbols, global_scorer=scorer, logger=logger)
return translator
class Translator(object):
"""
Uses a model to translate a batch of sentences.
Args:
model (:obj:`onmt.modules.NMTModel`):
NMT model to use for translation
fields (dict of Fields): data fields
beam_size (int): size of beam to use
n_best (int): number of translations produced
max_length (int): maximum length output to produce
global_scores (:obj:`GlobalScorer`):
object to rescore final translations
copy_attn (bool): use copy attention during translation
cuda (bool): use cuda
beam_trace (bool): trace beam search for debugging
logger(logging.Logger): logger.
"""
def __init__(self,
args,
model,
vocab,
symbols,
global_scorer=None,
logger=None,
dump_beam=""):
self.logger = logger
self.cuda = args.visible_gpus != '-1'
self.args = args
self.model = model
self.generator = self.model.generator
self.vocab = vocab
self.symbols = symbols
self.start_token = symbols['BOS']
self.end_token = symbols['EOS']
self.global_scorer = global_scorer
self.beam_size = args.beam_size
self.min_length = args.min_length
self.max_length = args.max_length
self.dump_beam = dump_beam
# for debugging
self.beam_trace = self.dump_beam != ""
self.beam_accum = None
tensorboard_log_dir = args.model_path
self.tensorboard_writer = SummaryWriter(tensorboard_log_dir, comment="Unmt")
if self.beam_trace:
self.beam_accum = {
"predicted_ids": [],
"beam_parent_ids": [],
"scores": [],
"log_probs": []}
def _build_target_tokens(self, pred):
# vocab = self.fields["tgt"].vocab
tokens = []
for tok in pred:
tok = int(tok)
tokens.append(tok)
if tokens[-1] == self.end_token:
tokens = tokens[:-1]
break
tokens = [t for t in tokens if t < len(self.vocab)]
tokens = self.vocab.DecodeIds(tokens).split(' ')
return tokens
def from_batch(self, translation_batch):
batch = translation_batch["batch"]
assert (len(translation_batch["gold_score"]) ==
len(translation_batch["predictions"]))
batch_size = batch.batch_size
preds, pred_score, gold_score, tgt_str, src = translation_batch["predictions"],translation_batch["scores"],translation_batch["gold_score"],batch.tgt_str, batch.src
translations = []
for b in range(batch_size):
pred_sents = self.vocab.convert_ids_to_tokens([int(n) for n in preds[b][0]])
pred_sents = ' '.join(pred_sents).replace(' ##','')
gold_sent = ' '.join(tgt_str[b].split())
# translation = Translation(fname[b],src[:, b] if src is not None else None,
# src_raw, pred_sents,
# attn[b], pred_score[b], gold_sent,
# gold_score[b])
# src = self.spm.DecodeIds([int(t) for t in translation_batch['batch'].src[0][5] if int(t) != len(self.spm)])
raw_src = [self.vocab.ids_to_tokens[int(t)] for t in src[b]][:500]
raw_src = ' '.join(raw_src)
translation = (pred_sents, gold_sent, raw_src)
# translation = (pred_sents[0], gold_sent)
translations.append(translation)
return translations
def translate(self,
data_iter, step,
attn_debug=False):
self.model.eval()
gold_path = self.args.result_path + '.%d.gold' % step
can_path = self.args.result_path + '.%d.candidate' % step
self.gold_out_file = codecs.open(gold_path, 'w', 'utf-8')
self.can_out_file = codecs.open(can_path, 'w', 'utf-8')
# raw_gold_path = self.args.result_path + '.%d.raw_gold' % step
# raw_can_path = self.args.result_path + '.%d.raw_candidate' % step
self.gold_out_file = codecs.open(gold_path, 'w', 'utf-8')
self.can_out_file = codecs.open(can_path, 'w', 'utf-8')
raw_src_path = self.args.result_path + '.%d.raw_src' % step
self.src_out_file = codecs.open(raw_src_path, 'w', 'utf-8')
# pred_results, gold_results = [], []
ct = 0
with torch.no_grad():
for batch in data_iter:
if(self.args.recall_eval):
gold_tgt_len = batch.tgt.size(1)
self.min_length = gold_tgt_len + 20
self.max_length = gold_tgt_len + 60
batch_data = self.translate_batch(batch)
translations = self.from_batch(batch_data)
for trans in translations:
pred, gold, src = trans
pred_str = pred.replace('[unused1]', '').replace('[unused4]', '').replace('[PAD]', '').replace('[unused2]', '').replace(r' +', ' ').replace(' [unused3] ', '<q>').replace('[unused3]', '').strip()
gold_str = gold.strip()
if(self.args.recall_eval):
_pred_str = ''
gap = 1e3
for sent in pred_str.split('<q>'):
can_pred_str = _pred_str+ '<q>'+sent.strip()
can_gap = math.fabs(len(_pred_str.split())-len(gold_str.split()))
# if(can_gap>=gap):
if(len(can_pred_str.split())>=len(gold_str.split())+10):
pred_str = _pred_str
break
else:
gap = can_gap
_pred_str = can_pred_str
# pred_str = ' '.join(pred_str.split()[:len(gold_str.split())])
# self.raw_can_out_file.write(' '.join(pred).strip() + '\n')
# self.raw_gold_out_file.write(' '.join(gold).strip() + '\n')
self.can_out_file.write(pred_str + '\n')
self.gold_out_file.write(gold_str + '\n')
self.src_out_file.write(src.strip() + '\n')
ct += 1
self.can_out_file.flush()
self.gold_out_file.flush()
self.src_out_file.flush()
self.can_out_file.close()
self.gold_out_file.close()
self.src_out_file.close()
if (step != -1):
rouges = self._report_rouge(gold_path, can_path)
self.logger.info('Rouges at step %d \n%s' % (step, rouge_results_to_str(rouges)))
if self.tensorboard_writer is not None:
self.tensorboard_writer.add_scalar('test/rouge1-F', rouges['rouge_1_f_score'], step)
self.tensorboard_writer.add_scalar('test/rouge2-F', rouges['rouge_2_f_score'], step)
self.tensorboard_writer.add_scalar('test/rougeL-F', rouges['rouge_l_f_score'], step)
def _report_rouge(self, gold_path, can_path):
self.logger.info("Calculating Rouge")
results_dict = test_rouge(self.args.temp_dir, can_path, gold_path)
return results_dict
def translate_batch(self, batch, fast=False):
"""
Translate a batch of sentences.
Mostly a wrapper around :obj:`Beam`.
Args:
batch (:obj:`Batch`): a batch from a dataset object
data (:obj:`Dataset`): the dataset object
fast (bool): enables fast beam search (may not support all features)
Todo:
Shouldn't need the original dataset.
"""
with torch.no_grad():
return self._fast_translate_batch(
batch,
self.max_length,
min_length=self.min_length)
def _fast_translate_batch(self,
batch,
max_length,
min_length=0):
# TODO: faster code path for beam_size == 1.
# TODO: support these blacklisted features.
assert not self.dump_beam
beam_size = self.beam_size
batch_size = batch.batch_size
src = batch.src
segs = batch.segs
mask_src = batch.mask_src
src_features = self.model.bert(src, segs, mask_src)
dec_states = self.model.decoder.init_decoder_state(src, src_features, with_cache=True)
device = src_features.device
# Tile states and memory beam_size times.
dec_states.map_batch_fn(
lambda state, dim: tile(state, beam_size, dim=dim))
src_features = tile(src_features, beam_size, dim=0)
batch_offset = torch.arange(
batch_size, dtype=torch.long, device=device)
beam_offset = torch.arange(
0,
batch_size * beam_size,
step=beam_size,
dtype=torch.long,
device=device)
alive_seq = torch.full(
[batch_size * beam_size, 1],
self.start_token,
dtype=torch.long,
device=device)
# Give full probability to the first beam on the first step.
topk_log_probs = (
torch.tensor([0.0] + [float("-inf")] * (beam_size - 1),
device=device).repeat(batch_size))
# Structure that holds finished hypotheses.
hypotheses = [[] for _ in range(batch_size)] # noqa: F812
results = {}
results["predictions"] = [[] for _ in range(batch_size)] # noqa: F812
results["scores"] = [[] for _ in range(batch_size)] # noqa: F812
results["gold_score"] = [0] * batch_size
results["batch"] = batch
for step in range(max_length):
decoder_input = alive_seq[:, -1].view(1, -1)
# Decoder forward.
decoder_input = decoder_input.transpose(0,1)
dec_out, dec_states = self.model.decoder(decoder_input, src_features, dec_states,
step=step)
# Generator forward.
log_probs = self.generator.forward(dec_out.transpose(0,1).squeeze(0))
vocab_size = log_probs.size(-1)
if step < min_length:
log_probs[:, self.end_token] = -1e20
# Multiply probs by the beam probability.
log_probs += topk_log_probs.view(-1).unsqueeze(1)
alpha = self.global_scorer.alpha
length_penalty = ((5.0 + (step + 1)) / 6.0) ** alpha
# Flatten probs into a list of possibilities.
curr_scores = log_probs / length_penalty
if(self.args.block_trigram):
cur_len = alive_seq.size(1)
if(cur_len>3):
for i in range(alive_seq.size(0)):
fail = False
words = [int(w) for w in alive_seq[i]]
words = [self.vocab.ids_to_tokens[w] for w in words]
words = ' '.join(words).replace(' ##','').split()
if(len(words)<=3):
continue
trigrams = [(words[i-1],words[i],words[i+1]) for i in range(1,len(words)-1)]
trigram = tuple(trigrams[-1])
if trigram in trigrams[:-1]:
fail = True
if fail:
curr_scores[i] = -10e20
curr_scores = curr_scores.reshape(-1, beam_size * vocab_size)
topk_scores, topk_ids = curr_scores.topk(beam_size, dim=-1)
# Recover log probs.
topk_log_probs = topk_scores * length_penalty
# Resolve beam origin and true word ids.
topk_beam_index = topk_ids.div(vocab_size)
topk_ids = topk_ids.fmod(vocab_size)
# Map beam_index to batch_index in the flat representation.
batch_index = (
topk_beam_index
+ beam_offset[:topk_beam_index.size(0)].unsqueeze(1))
select_indices = batch_index.view(-1)
# Append last prediction.
alive_seq = torch.cat(
[alive_seq.index_select(0, select_indices),
topk_ids.view(-1, 1)], -1)
is_finished = topk_ids.eq(self.end_token)
if step + 1 == max_length:
is_finished.fill_(1)
# End condition is top beam is finished.
end_condition = is_finished[:, 0].eq(1)
# Save finished hypotheses.
if is_finished.any():
predictions = alive_seq.view(-1, beam_size, alive_seq.size(-1))
for i in range(is_finished.size(0)):
b = batch_offset[i]
if end_condition[i]:
is_finished[i].fill_(1)
finished_hyp = is_finished[i].nonzero().view(-1)
# Store finished hypotheses for this batch.
for j in finished_hyp:
hypotheses[b].append((
topk_scores[i, j],
predictions[i, j, 1:]))
# If the batch reached the end, save the n_best hypotheses.
if end_condition[i]:
best_hyp = sorted(
hypotheses[b], key=lambda x: x[0], reverse=True)
score, pred = best_hyp[0]
results["scores"][b].append(score)
results["predictions"][b].append(pred)
non_finished = end_condition.eq(0).nonzero().view(-1)
# If all sentences are translated, no need to go further.
if len(non_finished) == 0:
break
# Remove finished batches for the next step.
topk_log_probs = topk_log_probs.index_select(0, non_finished)
batch_index = batch_index.index_select(0, non_finished)
batch_offset = batch_offset.index_select(0, non_finished)
alive_seq = predictions.index_select(0, non_finished) \
.view(-1, alive_seq.size(-1))
# Reorder states.
select_indices = batch_index.view(-1)
src_features = src_features.index_select(0, select_indices)
dec_states.map_batch_fn(
lambda state, dim: state.index_select(dim, select_indices))
return results
class Translation(object):
"""
Container for a translated sentence.
Attributes:
src (`LongTensor`): src word ids
src_raw ([str]): raw src words
pred_sents ([[str]]): words from the n-best translations
pred_scores ([[float]]): log-probs of n-best translations
attns ([`FloatTensor`]) : attention dist for each translation
gold_sent ([str]): words from gold translation
gold_score ([float]): log-prob of gold translation
"""
def __init__(self, fname, src, src_raw, pred_sents,
attn, pred_scores, tgt_sent, gold_score):
self.fname = fname
self.src = src
self.src_raw = src_raw
self.pred_sents = pred_sents
self.attns = attn
self.pred_scores = pred_scores
self.gold_sent = tgt_sent
self.gold_score = gold_score
def log(self, sent_number):
"""
Log translation.
"""
output = '\nSENT {}: {}\n'.format(sent_number, self.src_raw)
best_pred = self.pred_sents[0]
best_score = self.pred_scores[0]
pred_sent = ' '.join(best_pred)
output += 'PRED {}: {}\n'.format(sent_number, pred_sent)
output += "PRED SCORE: {:.4f}\n".format(best_score)
if self.gold_sent is not None:
tgt_sent = ' '.join(self.gold_sent)
output += 'GOLD {}: {}\n'.format(sent_number, tgt_sent)
output += ("GOLD SCORE: {:.4f}\n".format(self.gold_score))
if len(self.pred_sents) > 1:
output += '\nBEST HYP:\n'
for score, sent in zip(self.pred_scores, self.pred_sents):
output += "[{:.4f}] {}\n".format(score, sent)
return output
| 39.641026 | 214 | 0.545337 | [
"MIT"
] | SebastianVeile/PreSumm | src/models/predictor.py | 17,006 | Python |
# -*- coding: utf-8 -*-
__version__ = "3.0.0.dev0"
try:
__EMCEE3_SETUP__
except NameError:
__EMCEE3_SETUP__ = False
if not __EMCEE3_SETUP__:
__all__ = [
"moves",
"pools",
"autocorr",
"Model",
"SimpleModel",
"Sampler",
"Ensemble",
"State",
]
from . import moves, pools, autocorr
from .model import Model, SimpleModel
from .ensemble import Ensemble
from .samplers import Sampler
from .state import State
| 18.740741 | 41 | 0.583004 | [
"MIT"
] | dfm/emcee3 | emcee3/__init__.py | 506 | Python |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorboard.uploader.exporter."""
from unittest import mock
import numpy as np
import pandas
from tensorboard import test as tb_test
from tensorboard.data.experimental import experiment_from_dev
from tensorboard.uploader import test_util
from tensorboard.uploader.proto import export_service_pb2
from tensorboard.util import grpc_util
class ExperimentFromDevTest(tb_test.TestCase):
def test_get_scalars_works(self):
mock_api_client = mock.Mock()
def stream_experiment_data(request, **kwargs):
self.assertEqual(request.experiment_id, "789")
self.assertEqual(kwargs["metadata"], grpc_util.version_metadata())
for run in ("train", "test"):
for tag in ("accuracy", "loss"):
response = export_service_pb2.StreamExperimentDataResponse()
response.run_name = run
response.tag_name = tag
display_name = "%s:%s" % (request.experiment_id, tag)
response.tag_metadata.CopyFrom(
test_util.scalar_metadata(display_name)
)
for step in range(10):
response.points.steps.append(step)
if tag == "loss":
if run == "train":
value = 1.0 / (step + 1)
seconds = step
else:
value = -1.0 / (step + 1)
seconds = 600 + step
else: # "accuracy"
if run == "train":
value = 1.0 / (10 - step)
seconds = step * 2
else:
value = -1.0 / (10 - step)
seconds = 600 + step * 2
response.points.values.append(value)
response.points.wall_times.add(seconds=seconds, nanos=0)
yield response
mock_api_client.StreamExperimentData = mock.Mock(
wraps=stream_experiment_data
)
with mock.patch.object(
experiment_from_dev,
"get_api_client",
lambda api_endpoint: mock_api_client,
):
experiment = experiment_from_dev.ExperimentFromDev("789")
for pivot in (False, True):
for include_wall_time in (False, True):
with self.subTest(
"pivot=%s; include_wall_time=%s"
% (pivot, include_wall_time)
):
dataframe = experiment.get_scalars(
pivot=pivot, include_wall_time=include_wall_time
)
if pivot:
run_key = (
("run", "") if include_wall_time else "run"
)
step_key = (
("step", "") if include_wall_time else "step"
)
accuracy_value_key = (
("value", "accuracy")
if include_wall_time
else "accuracy"
)
loss_value_key = (
("value", "loss")
if include_wall_time
else "loss"
)
data = {
run_key: ["test"] * 10 + ["train"] * 10,
step_key: np.concatenate(
[np.arange(0, 10), np.arange(0, 10)]
),
accuracy_value_key: np.concatenate(
[
-1.0 / (10.0 - np.arange(0, 10)),
1.0 / (10.0 - np.arange(0, 10)),
],
),
loss_value_key: np.concatenate(
[
-1.0 / (1.0 + np.arange(0, 10)),
1.0 / (1.0 + np.arange(0, 10)),
],
),
}
if include_wall_time:
data[
("wall_time", "accuracy")
] = np.concatenate(
[
600.0 + 2.0 * np.arange(0, 10),
2.0 * np.arange(0, 10),
]
)
data[("wall_time", "loss")] = np.concatenate(
[
600.0 + np.arange(0, 10),
1.0 * np.arange(0, 10),
]
)
expected = pandas.DataFrame(data)
else: # No pivot_table.
data = {
"run": ["train"] * 20 + ["test"] * 20,
"tag": (["accuracy"] * 10 + ["loss"] * 10) * 2,
"step": list(np.arange(0, 10)) * 4,
"value": np.concatenate(
[
1.0 / (10.0 - np.arange(0, 10)),
1.0 / (1.0 + np.arange(0, 10)),
-1.0 / (10.0 - np.arange(0, 10)),
-1.0 / (1.0 + np.arange(0, 10)),
]
),
}
if include_wall_time:
data["wall_time"] = np.concatenate(
[
2.0 * np.arange(0, 10),
1.0 * np.arange(0, 10),
600.0 + 2.0 * np.arange(0, 10),
600.0 + np.arange(0, 10),
]
)
expected = pandas.DataFrame(data)
pandas.testing.assert_frame_equal(
dataframe,
expected,
check_names=True,
)
def test_get_scalars_with_pivot_table_with_missing_value(self):
mock_api_client = mock.Mock()
def stream_experiment_data(request, **kwargs):
self.assertEqual(request.experiment_id, "789")
self.assertEqual(kwargs["metadata"], grpc_util.version_metadata())
response = export_service_pb2.StreamExperimentDataResponse()
response.run_name = "train"
response.tag_name = "batch_loss"
response.points.steps.append(0)
response.points.values.append(0.5)
response.points.wall_times.add(seconds=0, nanos=0)
response.points.steps.append(1)
response.points.values.append(0.25)
response.points.wall_times.add(seconds=1, nanos=0)
yield response
response = export_service_pb2.StreamExperimentDataResponse()
response.run_name = "train"
response.tag_name = "epoch_loss"
response.points.steps.append(0)
response.points.values.append(0.375)
response.points.wall_times.add(seconds=2, nanos=0)
yield response
mock_api_client.StreamExperimentData = mock.Mock(
wraps=stream_experiment_data
)
with mock.patch.object(
experiment_from_dev,
"get_api_client",
lambda api_endpoint: mock_api_client,
):
experiment = experiment_from_dev.ExperimentFromDev("789")
with self.assertRaisesRegexp(
ValueError,
r"contains missing value\(s\).*different sets of "
r"steps.*pivot=False",
):
experiment.get_scalars(pivot=True)
def test_get_scalars_with_actual_inf_and_nan(self):
"""Test for get_scalars() call that involve inf and nan in user data."""
mock_api_client = mock.Mock()
def stream_experiment_data(request, **kwargs):
self.assertEqual(request.experiment_id, "789")
self.assertEqual(kwargs["metadata"], grpc_util.version_metadata())
response = export_service_pb2.StreamExperimentDataResponse()
response.run_name = "train"
response.tag_name = "batch_loss"
response.points.steps.append(0)
response.points.values.append(np.nan)
response.points.wall_times.add(seconds=0, nanos=0)
response.points.steps.append(1)
response.points.values.append(np.inf)
response.points.wall_times.add(seconds=10, nanos=0)
yield response
mock_api_client.StreamExperimentData = mock.Mock(
wraps=stream_experiment_data
)
with mock.patch.object(
experiment_from_dev,
"get_api_client",
lambda api_endpoint: mock_api_client,
):
experiment = experiment_from_dev.ExperimentFromDev("789")
dataframe = experiment.get_scalars(pivot=True)
expected = pandas.DataFrame(
{
"run": ["train"] * 2,
"step": [0, 1],
"batch_loss": [np.nan, np.inf],
}
)
pandas.testing.assert_frame_equal(dataframe, expected, check_names=True)
if __name__ == "__main__":
tb_test.main()
| 43.836653 | 80 | 0.436154 | [
"Apache-2.0"
] | AseiSugiyama/tensorboard | tensorboard/data/experimental/experiment_from_dev_test.py | 11,003 | Python |
import asyncio
import functools
import importlib
import inspect
import logging
from typing import Text, Dict, Optional, Any, List, Callable, Collection, Type
from rasa.shared.exceptions import RasaException
logger = logging.getLogger(__name__)
def class_from_module_path(
module_path: Text, lookup_path: Optional[Text] = None
) -> Type:
"""Given the module name and path of a class, tries to retrieve the class.
The loaded class can be used to instantiate new objects.
Args:
module_path: either an absolute path to a Python class,
or the name of the class in the local / global scope.
lookup_path: a path where to load the class from, if it cannot
be found in the local / global scope.
Returns:
a Python class
Raises:
ImportError, in case the Python class cannot be found.
RasaException, in case the imported result is something other than a class
"""
klass = None
if "." in module_path:
module_name, _, class_name = module_path.rpartition(".")
m = importlib.import_module(module_name)
klass = getattr(m, class_name, None)
elif lookup_path:
# try to import the class from the lookup path
m = importlib.import_module(lookup_path)
klass = getattr(m, module_path, None)
if klass is None:
raise ImportError(f"Cannot retrieve class from path {module_path}.")
if not inspect.isclass(klass):
raise RasaException(
f"`class_from_module_path()` is expected to return a class, "
f"but for {module_path} we got a {type(klass)}."
)
return klass
def all_subclasses(cls: Any) -> List[Any]:
"""Returns all known (imported) subclasses of a class."""
classes = cls.__subclasses__() + [
g for s in cls.__subclasses__() for g in all_subclasses(s)
]
return [subclass for subclass in classes if not inspect.isabstract(subclass)]
def module_path_from_instance(inst: Any) -> Text:
"""Return the module path of an instance's class."""
return inst.__module__ + "." + inst.__class__.__name__
def sort_list_of_dicts_by_first_key(dicts: List[Dict]) -> List[Dict]:
"""Sorts a list of dictionaries by their first key."""
return sorted(dicts, key=lambda d: list(d.keys())[0])
def lazy_property(function: Callable) -> Any:
"""Allows to avoid recomputing a property over and over.
The result gets stored in a local var. Computation of the property
will happen once, on the first call of the property. All
succeeding calls will use the value stored in the private property."""
attr_name = "_lazy_" + function.__name__
@property
def _lazyprop(self: Any) -> Any:
if not hasattr(self, attr_name):
setattr(self, attr_name, function(self))
return getattr(self, attr_name)
return _lazyprop
def cached_method(f: Callable[..., Any]) -> Callable[..., Any]:
"""Caches method calls based on the call's `args` and `kwargs`.
Works for `async` and `sync` methods. Don't apply this to functions.
Args:
f: The decorated method whose return value should be cached.
Returns:
The return value which the method gives for the first call with the given
arguments.
"""
assert "self" in arguments_of(f), "This decorator can only be used with methods."
class Cache:
"""Helper class to abstract the caching details."""
def __init__(self, caching_object: object, args: Any, kwargs: Any) -> None:
self.caching_object = caching_object
self.cache = getattr(caching_object, self._cache_name(), {})
# noinspection PyUnresolvedReferences
self.cache_key = functools._make_key(args, kwargs, typed=False)
def _cache_name(self) -> Text:
return f"_cached_{self.caching_object.__class__.__name__}_{f.__name__}"
def is_cached(self) -> bool:
return self.cache_key in self.cache
def cache_result(self, result: Any) -> None:
self.cache[self.cache_key] = result
setattr(self.caching_object, self._cache_name(), self.cache)
def cached_result(self) -> Any:
return self.cache[self.cache_key]
if asyncio.iscoroutinefunction(f):
@functools.wraps(f)
async def decorated(self: object, *args: Any, **kwargs: Any) -> Any:
cache = Cache(self, args, kwargs)
if not cache.is_cached():
# Store the task immediately so that other concurrent calls of the
# method can re-use the same task and don't schedule a second execution.
to_cache = asyncio.ensure_future(f(self, *args, **kwargs))
cache.cache_result(to_cache)
return await cache.cached_result()
return decorated
else:
@functools.wraps(f)
def decorated(self: object, *args: Any, **kwargs: Any) -> Any:
cache = Cache(self, args, kwargs)
if not cache.is_cached():
to_cache = f(self, *args, **kwargs)
cache.cache_result(to_cache)
return cache.cached_result()
return decorated
def transform_collection_to_sentence(collection: Collection[Text]) -> Text:
"""Transforms e.g. a list like ['A', 'B', 'C'] into a sentence 'A, B and C'."""
x = list(collection)
if len(x) >= 2:
return ", ".join(map(str, x[:-1])) + " and " + x[-1]
return "".join(collection)
def minimal_kwargs(
kwargs: Dict[Text, Any], func: Callable, excluded_keys: Optional[List] = None
) -> Dict[Text, Any]:
"""Returns only the kwargs which are required by a function. Keys, contained in
the exception list, are not included.
Args:
kwargs: All available kwargs.
func: The function which should be called.
excluded_keys: Keys to exclude from the result.
Returns:
Subset of kwargs which are accepted by `func`.
"""
excluded_keys = excluded_keys or []
possible_arguments = arguments_of(func)
return {
k: v
for k, v in kwargs.items()
if k in possible_arguments and k not in excluded_keys
}
def mark_as_experimental_feature(feature_name: Text) -> None:
"""Warns users that they are using an experimental feature."""
logger.warning(
f"The {feature_name} is currently experimental and might change or be "
"removed in the future 🔬 Please share your feedback on it in the "
"forum (https://forum.rasa.com) to help us make this feature "
"ready for production."
)
def arguments_of(func: Callable) -> List[Text]:
"""Return the parameters of the function `func` as a list of names."""
import inspect
return list(inspect.signature(func).parameters.keys())
| 33.607843 | 88 | 0.647025 | [
"Apache-2.0"
] | GCES-2021-1/rasa | rasa/shared/utils/common.py | 6,859 | Python |
from datetime import datetime
class Worklog:
def __init__(self, name: str, date: datetime, issue_id: str, duration: float):
self.issue_id = issue_id
self.name = name
self.date = date
self.duration = duration
| 24.6 | 82 | 0.650407 | [
"Apache-2.0"
] | dimterex/outlook2tracker | modules/models/worklog.py | 246 | Python |
# Generated by Django 3.1 on 2020-08-08 05:58
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Battle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time_created', models.DateTimeField(auto_now_add=True)),
('archived', models.BooleanField(default=False)),
('blue_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='battles_as_blue', to=settings.AUTH_USER_MODEL)),
('red_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='battles_as_red', to=settings.AUTH_USER_MODEL)),
],
),
]
| 36.392857 | 164 | 0.663395 | [
"MIT"
] | LaudateCorpus1/summer-code-jam-2020 | prickly-pufferfish/arena/battle/migrations/0001_initial.py | 1,019 | Python |
from django.contrib import admin
from leaflet.admin import LeafletGeoAdmin
from .models import ProblemLabel, ProblemStatus
# Register your models here.
admin.site.register(ProblemLabel, LeafletGeoAdmin)
admin.site.register(ProblemStatus)
| 22 | 50 | 0.834711 | [
"MIT"
] | 3ANov/spbauto_map | app/problem_register/admin.py | 242 | Python |
from pygments.lexers import Python3Lexer
from pygments.token import Comment, Keyword, Name, Number, Operator, Punctuation, String
def is_comment(token):
return token in Comment
def is_decorator(token):
return token in Name.Decorator
def is_function(token):
return token in Name.Function
def is_builtin(token):
return token in Name.Builtin
def is_classname(token):
return token in Name.Class
def is_keyword(token):
return token in Keyword
def is_number(token):
return token in Number
def is_operator(token):
return token in Operator
def is_punctuation(token):
return token in Punctuation
def is_string(token):
return token in String
tokenizer_map = {
"keyword": is_keyword,
"builtin": is_builtin,
"class-name": is_classname,
"punctuation": is_punctuation,
"decorator": is_decorator,
"function": is_function,
"operator": is_operator,
"comment": is_comment,
"string": is_string,
"number": is_number,
}
def format_code(code):
pp = Python3Lexer()
tokens = pp.get_tokens(code)
formatted = ""
for token, string in tokens:
updated = False
for span_class, checker in tokenizer_map.items():
if checker(token):
formatted += f'<span class="token {span_class}">{string}</span>'
updated = True
break
if not updated:
formatted += string
return formatted
| 19.986301 | 88 | 0.663468 | [
"Apache-2.0"
] | 4bhishek10/prefect | docs/tokenizer.py | 1,459 | Python |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Transform a roidb into a trainable roidb by adding a bunch of metadata."""
import numpy as np
from lib.fast_rcnn.config import cfg
import lib.utils.cython_bbox
def prepare_roidb(imdb):
"""Enrich the imdb's roidb by adding some derived quantities that
are useful for training. This function precomputes the maximum
overlap, taken over ground-truth boxes, between each ROI and
each ground-truth box. The class with maximum overlap is also
recorded.
"""
roidb = imdb.roidb
for i in xrange(len(imdb.image_index)):
roidb[i]['image'] = imdb.image_path_at(i)
# need gt_overlaps as a dense array for argmax
gt_overlaps = roidb[i]['gt_overlaps'].toarray()
# max overlap with gt over classes (columns)
max_overlaps = gt_overlaps.max(axis=1)
# gt class that had the max overlap
max_classes = gt_overlaps.argmax(axis=1)
roidb[i]['max_classes'] = max_classes
roidb[i]['max_overlaps'] = max_overlaps
# sanity checks
# max overlap of 0 => class should be zero (background)
zero_inds = np.where(max_overlaps == 0)[0]
assert all(max_classes[zero_inds] == 0)
# max overlap > 0 => class should not be zero (must be a fg class)
nonzero_inds = np.where(max_overlaps > 0)[0]
assert all(max_classes[nonzero_inds] != 0)
def add_bbox_regression_targets(roidb):
"""Add information needed to train bounding-box regressors."""
assert len(roidb) > 0
assert 'max_classes' in roidb[0], 'Did you call prepare_roidb first?'
num_images = len(roidb)
# Infer number of classes from the number of columns in gt_overlaps
num_classes = roidb[0]['gt_overlaps'].shape[1]
for im_i in xrange(num_images):
rois = roidb[im_i]['boxes']
max_overlaps = roidb[im_i]['max_overlaps']
max_classes = roidb[im_i]['max_classes']
roidb[im_i]['bbox_targets'] = \
_compute_targets(rois, max_overlaps, max_classes)
# Compute values needed for means and stds
# var(x) = E(x^2) - E(x)^2
class_counts = np.zeros((num_classes, 1)) + cfg.EPS
sums = np.zeros((num_classes, 4))
squared_sums = np.zeros((num_classes, 4))
for im_i in xrange(num_images):
targets = roidb[im_i]['bbox_targets']
for cls in xrange(1, num_classes):
cls_inds = np.where(targets[:, 0] == cls)[0]
if cls_inds.size > 0:
class_counts[cls] += cls_inds.size
sums[cls, :] += targets[cls_inds, 1:].sum(axis=0)
squared_sums[cls, :] += (targets[cls_inds, 1:] ** 2).sum(axis=0)
means = sums / class_counts
stds = np.sqrt(squared_sums / class_counts - means ** 2)
# Normalize targets
for im_i in xrange(num_images):
targets = roidb[im_i]['bbox_targets']
for cls in xrange(1, num_classes):
cls_inds = np.where(targets[:, 0] == cls)[0]
roidb[im_i]['bbox_targets'][cls_inds, 1:] -= means[cls, :]
roidb[im_i]['bbox_targets'][cls_inds, 1:] /= stds[cls, :]
# These values will be needed for making predictions
# (the predicts will need to be unnormalized and uncentered)
return means.ravel(), stds.ravel()
def _compute_targets(rois, overlaps, labels):
"""Compute bounding-box regression targets for an image."""
# Ensure ROIs are floats
rois = rois.astype(np.float, copy=False)
# Indices of ground-truth ROIs
gt_inds = np.where(overlaps == 1)[0]
# Indices of examples for which we try to make predictions
ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0]
# Get IoU overlap between each ex ROI and gt ROI
ex_gt_overlaps = utils.cython_bbox.bbox_overlaps(rois[ex_inds, :],
rois[gt_inds, :])
# Find which gt ROI each ex ROI has max overlap with:
# this will be the ex ROI's gt target
gt_assignment = ex_gt_overlaps.argmax(axis=1)
gt_rois = rois[gt_inds[gt_assignment], :]
ex_rois = rois[ex_inds, :]
ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + cfg.EPS
ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + cfg.EPS
ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths
ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights
gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + cfg.EPS
gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + cfg.EPS
gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths
gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights
targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = np.log(gt_widths / ex_widths)
targets_dh = np.log(gt_heights / ex_heights)
targets = np.zeros((rois.shape[0], 5), dtype=np.float32)
targets[ex_inds, 0] = labels[ex_inds]
targets[ex_inds, 1] = targets_dx
targets[ex_inds, 2] = targets_dy
targets[ex_inds, 3] = targets_dw
targets[ex_inds, 4] = targets_dh
return targets
| 41.142857 | 80 | 0.626157 | [
"MIT"
] | sx14/hierarchical-relationship | lib/roi_data_layer/roidb.py | 5,184 | Python |
"""This module contains the general information for AdaptorFruCapRef ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class AdaptorFruCapRefConsts():
IS_SUPPORTED_NO = "no"
IS_SUPPORTED_YES = "yes"
class AdaptorFruCapRef(ManagedObject):
"""This is AdaptorFruCapRef class."""
consts = AdaptorFruCapRefConsts()
naming_props = set([u'vendor', u'model', u'revision'])
mo_meta = MoMeta("AdaptorFruCapRef", "adaptorFruCapRef", "manufacturer-[vendor]-model-[model]-revision-[revision]", VersionMeta.Version141i, "InputOutput", 0xff, [], [""], [u'equipmentBladeCapProvider', u'equipmentRackUnitCapProvider'], [], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version141i, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"is_supported": MoPropertyMeta("is_supported", "isSupported", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, None, None, None, ["no", "yes"], []),
"model": MoPropertyMeta("model", "model", "string", VersionMeta.Version141i, MoPropertyMeta.NAMING, 0x8, 1, 510, None, [], []),
"revision": MoPropertyMeta("revision", "revision", "string", VersionMeta.Version141i, MoPropertyMeta.NAMING, 0x10, 1, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, 0x20, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version141i, MoPropertyMeta.READ_WRITE, 0x40, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"vendor": MoPropertyMeta("vendor", "vendor", "string", VersionMeta.Version141i, MoPropertyMeta.NAMING, 0x80, 1, 510, None, [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"isSupported": "is_supported",
"model": "model",
"revision": "revision",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"vendor": "vendor",
}
def __init__(self, parent_mo_or_dn, vendor, model, revision, **kwargs):
self._dirty_mask = 0
self.vendor = vendor
self.model = model
self.revision = revision
self.child_action = None
self.is_supported = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "AdaptorFruCapRef", parent_mo_or_dn, **kwargs)
| 52.413793 | 253 | 0.652632 | [
"Apache-2.0"
] | anoop1984/python_sdk | ucsmsdk/mometa/adaptor/AdaptorFruCapRef.py | 3,040 | Python |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This example provides an end-to-end pipeline for a common Kaggle competition.
# The entire pipeline includes common utilities such as k-fold cross validation
# and data pre-processing.
#
# Specifically, the example studies the `House Prices: Advanced Regression
# Techniques` challenge as a case study.
#
# The link to the problem on Kaggle:
# https://www.kaggle.com/c/house-prices-advanced-regression-techniques
import numpy as np
import pandas as pd
from mxnet import autograd
from mxnet import gluon
from mxnet import ndarray as nd
# After logging in www.kaggle.com, the training and testing data sets can be downloaded at:
# https://www.kaggle.com/c/house-prices-advanced-regression-techniques/download/train.csv
# https://www.kaggle.com/c/house-prices-advanced-regression-techniques/download/test.csv
train = pd.read_csv("train.csv")
test = pd.read_csv("test.csv")
all_X = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'],
test.loc[:, 'MSSubClass':'SaleCondition']))
# Get all the numerical features and apply standardization.
numeric_feas = all_X.dtypes[all_X.dtypes != "object"].index
all_X[numeric_feas] = all_X[numeric_feas].apply(lambda x:
(x - x.mean()) / (x.std()))
# Convert categorical feature values to numerical (including N/A).
all_X = pd.get_dummies(all_X, dummy_na=True)
# Approximate N/A feature value by the mean value of the current feature.
all_X = all_X.fillna(all_X.mean())
num_train = train.shape[0]
# Convert data formats to NDArrays to feed into gluon.
X_train = all_X[:num_train].as_matrix()
X_test = all_X[num_train:].as_matrix()
y_train = train.SalePrice.as_matrix()
X_train = nd.array(X_train)
y_train = nd.array(y_train)
y_train.reshape((num_train, 1))
X_test = nd.array(X_test)
square_loss = gluon.loss.L2Loss()
def get_rmse_log(net, X_train, y_train):
"""Gets root mse between the logarithms of the prediction and the truth."""
num_train = X_train.shape[0]
clipped_preds = nd.clip(net(X_train), 1, float('inf'))
return np.sqrt(2 * nd.sum(square_loss(
nd.log(clipped_preds), nd.log(y_train))).asscalar() / num_train)
def get_net():
"""Gets a neural network. Better results are obtained with modifications."""
net = gluon.nn.Sequential()
with net.name_scope():
net.add(gluon.nn.Dense(50, activation="relu"))
net.add(gluon.nn.Dense(1))
net.initialize()
return net
def train(net, X_train, y_train, epochs, verbose_epoch, learning_rate,
weight_decay, batch_size):
"""Trains the model."""
dataset_train = gluon.data.ArrayDataset(X_train, y_train)
data_iter_train = gluon.data.DataLoader(dataset_train, batch_size,
shuffle=True)
trainer = gluon.Trainer(net.collect_params(), 'adam',
{'learning_rate': learning_rate,
'wd': weight_decay})
net.collect_params().initialize(force_reinit=True)
for epoch in range(epochs):
for data, label in data_iter_train:
with autograd.record():
output = net(data)
loss = square_loss(output, label)
loss.backward()
trainer.step(batch_size)
avg_loss = get_rmse_log(net, X_train, y_train)
if epoch > verbose_epoch:
print("Epoch %d, train loss: %f" % (epoch, avg_loss))
return avg_loss
def k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train,
learning_rate, weight_decay, batch_size):
"""Conducts k-fold cross validation for the model."""
assert k > 1
fold_size = X_train.shape[0] // k
train_loss_sum = 0.0
test_loss_sum = 0.0
for test_idx in range(k):
X_val_test = X_train[test_idx * fold_size: (test_idx + 1) *
fold_size, :]
y_val_test = y_train[test_idx * fold_size: (test_idx + 1) * fold_size]
val_train_defined = False
for i in range(k):
if i != test_idx:
X_cur_fold = X_train[i * fold_size: (i + 1) * fold_size, :]
y_cur_fold = y_train[i * fold_size: (i + 1) * fold_size]
if not val_train_defined:
X_val_train = X_cur_fold
y_val_train = y_cur_fold
val_train_defined = True
else:
X_val_train = nd.concat(X_val_train, X_cur_fold, dim=0)
y_val_train = nd.concat(y_val_train, y_cur_fold, dim=0)
net = get_net()
train_loss = train(net, X_val_train, y_val_train, epochs, verbose_epoch,
learning_rate, weight_decay, batch_size)
train_loss_sum += train_loss
test_loss = get_rmse_log(net, X_val_test, y_val_test)
print("Test loss: %f" % test_loss)
test_loss_sum += test_loss
return train_loss_sum / k, test_loss_sum / k
# The sets of parameters. Better results are obtained with modifications.
# These parameters can be fine-tuned with k-fold cross-validation.
k = 5
epochs = 100
verbose_epoch = 95
learning_rate = 0.3
weight_decay = 100
batch_size = 100
train_loss, test_loss = \
k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train,
learning_rate, weight_decay, batch_size)
print("%d-fold validation: Avg train loss: %f, Avg test loss: %f" %
(k, train_loss, test_loss))
def learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate,
weight_decay, batch_size):
"""Trains the model and predicts on the test data set."""
net = get_net()
_ = train(net, X_train, y_train, epochs, verbose_epoch, learning_rate,
weight_decay, batch_size)
preds = net(X_test).asnumpy()
test['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])
submission = pd.concat([test['Id'], test['SalePrice']], axis=1)
submission.to_csv('submission.csv', index=False)
learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate,
weight_decay, batch_size)
| 41.642424 | 91 | 0.665842 | [
"Apache-2.0"
] | ABAPPLO/Mxnetonspark | example/gluon/kaggle_k_fold_cross_validation.py | 6,871 | Python |
#!/usr/bin/env python3
class Solution:
def removeDuplicates(self, nums):
i, ret = 0, 0
for j, n in enumerate(nums):
if nums[i] == n and j-i < 2:
ret += 1
elif nums[i] != n:
i = j
ret += 1
return ret
sol = Solution()
nums = [0,0,1,1,1,1,2,3,3]
nums = [1,1,1,2,2,3]
nums = []
nums = [1]
nums = [1,1]
print(sol.removeDuplicates(nums))
| 20.666667 | 40 | 0.465438 | [
"MIT"
] | eroicaleo/LearningPython | interview/leet/80_Remove_Duplicates_from_Sorted_Array_II_v2.py | 434 | Python |
#!/usr/bin/env python3
from string import ascii_uppercase
from re import fullmatch
from time import sleep
from random import Random
# Default game presets.
testing_preset = {'height': 10, 'width': 10, '5_ships': 0, '4_ships': 0, '3_ships': 0, '2_ships': 2, '1_ships': 0, 'allow_mines': True, 'allow_moves': True, 'mine_turns': 5, 'p_type': 'Player', 'player_timer': 0}
normal_mode_preset = {'height': 10, 'width': 10, '5_ships': 1, '4_ships': 1, '3_ships': 2, '2_ships': 1, '1_ships': 0, 'allow_mines': False, 'allow_moves': False, 'mine_turns': None, 'p_type': 'CPU', 'player_timer': 5}
advanced_mode_preset = {'height': 15, 'width': 15, '5_ships': 2, '4_ships': 2, '3_ships': 2, '2_ships': 1, '1_ships': 0, 'allow_mines': True, 'allow_moves': True, 'mine_turns': 5, 'p_type': 'CPU', 'player_timer': 5}
# Miscellaneous global values.
letters = ascii_uppercase
# Global user-variables.
PAD_AMOUNT = 50
class Utils(object):
"""
Utility class used for getting input and other common functions.
Contains many functions to save space by condensing input and custom string formatting methods into one place.
All methods are static, and do not modify parameters in-place.
"""
@staticmethod
def box_string(string, min_width=-1, print_string=False):
"""
Place a string into an ASCII box.
The result is placed inside of a ASCII box consisting of '+' characters for the corners and '-' characters for the edges.
Parameters
----------
string : str
String to be boxed.
min_width : int, optional
Specifies that the box be of a certain minimum width. Defaults to input string width.
print_string : bool, optional
If True, prints the string after building it. Defaults to False.
Returns
-------
str
Input string with a box around it.
"""
# Parameters.
split_string = string.split('\n')
height = len(split_string)
length = max(min_width, *[len(x) for x in split_string])
# String builder.
result = '+' + '-' * (length + 2) + '+\n'
for i in range(height):
result += '| %s |\n' % split_string[i].center(length)
result += '+' + '-' * (length + 2) + '+'
# Print and return result.
if print_string:
print(result)
return result
@staticmethod
def num_input(question, *choices):
"""
Take user input based on several different options.
The input question will be repeated until valid input is given.
The choices will be displayed in order with a number next to them indicating their id.
Responses can be given as the choice id or the full choice name.
Parameters
----------
question : str
String to be displayed as the input question. Will be boxed with Utils#box_string before printing.
*choices : *str
Options for the user to choose from.
Returns
-------
int
Number of the answer choice, corresponding to the index of the choice in *choices.
"""
error = ''
while True:
# Print question and ask for input.
Utils.box_string((error + '\n' + question).strip(), print_string=True)
for i in range(len(choices)):
print('%d: %s' % (i, choices[i]))
response = input('Response: ')
# Test whether input is an integer or string.
if fullmatch(r'\d+', response.strip()):
to_int = int(response.strip())
# Determine if input integer corresponds to one of the answer choices.
if to_int < len(choices):
return to_int
else:
error = 'ERROR: Invalid input! Input integer is not one of the available choices! Please try again.'
continue
else:
# Determine if input string is one of the answer choices.
for i in range(len(choices)):
if response.strip().lower() == choices[i].strip().lower():
return i
error = 'ERROR: Invalid input! Input string is not one of the available choices! Please try again.'
continue
@staticmethod
def string_input(question, condition=r'.+'):
"""
Take string-based user input.
The input question will be repeated until valid input is given, determined by the condition regex.
Parameters
----------
question : str
String to be displayed as the input question. Will be boxed with Utils#box_string before printing.
condition : r-string, optional
Regex to test input string off of.
Returns
-------
str
Input string.
"""
error = ''
while True:
# Print question and ask for input.
Utils.box_string((error + '\n' + question).strip(), print_string=True)
response = input()
# Test if input is valid.
if fullmatch(condition, response):
return response
else:
error = 'ERROR: Invalid input! Please try again.'
continue
@staticmethod
def print_settings(settings):
"""
Pretty-print a settings dictionary.
Parameters
----------
settings : dict
The settings dictionary to pretty-print.
Returns
-------
None
"""
Utils.box_string('Current Settings', print_string=True)
print('Grid Size:')
print('\tWidth: %d' % settings['width'])
print('\tHeight: %d' % settings['height'])
print('Ship Amount:')
print('\t5-Long Ships: %d' % settings['5_ships'])
print('\t4-Long Ships: %d' % settings['4_ships'])
print('\t3-Long Ships: %d' % settings['3_ships'])
print('\t2-Long Ships: %d' % settings['2_ships'])
print('\t1-Long Ships: %d' % settings['1_ships'])
print('Special Abilities:')
print('\tShip Moving: %s' % str(settings['allow_moves']))
print('\tMines: %s' % str(settings['allow_mines']))
if settings['allow_mines']:
print('\tTurns Between Mines: %d' % settings['mine_turns'])
print('Game Type: Player vs. %s' % settings['p_type'])
@staticmethod
def grid_pos_input(height, width, question='Enter a Position:'):
"""
Take user-input in coordinate form.
The input question will be repeated until valid input is given.
The input must be a valid coordinate in battleship form (r'[A-Z]\d+').
The input coordinate must be inside of the grid defined by height and width.
Parameters
----------
height : int
Specifies the height of the grid.
width : int
Specifies the width of the grid.
question : str, optional
String to be displayed as the input question. Will be boxed with Utils#box_string before printing. Defaults to 'Enter a Position'.
Returns
-------
tuple
Contains the following:
int
Height-aligned position (y-position) of input.
int
Width-aligned position (x-position) of input.
"""
error = ''
while True:
# Print the question and ask for input.
Utils.box_string((error + '\n' + question).strip(), print_string=True)
loc = input().upper()
# Test if input is a valid coordinate and is in the grid.
if not fullmatch(r'[A-Z][1-2]?[0-9]', loc):
error = 'ERROR: Invalid input! Input string is not a valid coordinate! Please try again.'
continue
elif loc[0] in letters[:height] and 0 < int(loc[1:]) <= width:
return letters.index(loc[0]), int(loc[1:]) - 1
else:
error = 'ERROR: Invalid input! Input string is not in the grid! Please try again.'
continue
class BattleshipGame(object):
"""
Class that handles game execution and running.
Controls game setup based off of a certain settings preset.
Handles all input and output for the game.
Attributes
----------
settings : dict
Settings that the game is running based off of.
height : int
Height of the grids used for the game.
width : int
Width of the grids used for the game.
p1_grid : list
Two dimensional list of ints containing player 1's board.
p1_grid_2 : list
Two dimensional list of ints containing player 1's guesses.
p1_ships : list
List of player 1's ship dicts with position, direction, and size data.
p2_grid : list
Two dimensional list of ints containing player 2's board.
p2_grid_2 : list
Two dimensional list of ints containing player 2's guesses.
p2_ships : list
List of player 2's ship dicts with position, direction, and size data.
p2_cpu : bool
True if player 2 is not a human player, False otherwise.
turn : int
Current turn number.
p1_mines : int
Current amount of mines available to Player 1.
p2_mines : int
Current amount of mines available to Player 2.
p1_move : str
Return message to display to Player 2 on their turn.
p2_move : str
Return message to display to Player 1 on their turn.
"""
def __init__(self, settings):
"""
Constructor for the BattleshipGame class.
Parameters
----------
settings : dict
Settings to create the game based off of.
"""
# Grid attributes.
self.settings = settings
self.height = settings['height']
self.width = settings['width']
# Player 1 grids.
self.p1_grid = [[0 for _ in range(self.width)] for _ in range(self.height)]
self.p1_grid_2 = [[0 for _ in range(self.width)] for _ in range(self.height)]
self.p1_ships = []
# Player 2 grids.
self.p2_grid = [[0 for _ in range(self.width)] for _ in range(self.height)]
self.p2_grid_2 = [[0 for _ in range(self.width)] for _ in range(self.height)]
self.p2_ships = []
# Miscellaneous attributes.
self.p2_cpu = settings['p_type'] == 'CPU'
self.turn = 0
self.p1_mines = 0
self.p2_mines = 0
self.p1_move = ''
self.p2_move = ''
# CPU attributes.
self.cpu_data = {'shots': [], 'misses': [], 'hits': [], 'p1_ships': None}
def update_board(self, player):
"""
Update both grids for a player.
Adds new ships and puts them into the right locations.
Parameters
----------
player : int
Determines which player's grids to print. Zero-indexed.
"""
# Place ships into grid, if not already.
if player == 0: # Player 1
board = self.p1_grid
for ship in self.p1_ships:
if not ship['setup']:
if ship['direction'] == 0:
for i in range(ship['size']):
if not (1 <= board[ship['y_pos']][ship['x_pos'] + i] <= 26 or board[ship['y_pos']][ship['x_pos'] + i] == 26):
board[ship['y_pos']][ship['x_pos'] + i] = ship['num'] + 1
else:
for j in range(ship['size']):
if not (1 <= board[ship['y_pos'] + j][ship['x_pos']] <= 26 or board[ship['y_pos'] + j][ship['x_pos']] == 26):
board[ship['y_pos'] + j][ship['x_pos']] = ship['num'] + 1
ship['setup'] = True
else: # Player 2
board = self.p2_grid
for ship in self.p2_ships:
if not ship['setup']:
if ship['direction'] == 0:
for i in range(ship['size']):
if not (1 <= board[ship['y_pos']][ship['x_pos'] + i] <= 26 or board[ship['y_pos']][ship['x_pos'] + i] == 26):
board[ship['y_pos']][ship['x_pos'] + i] = ship['num'] + 1
else:
for j in range(ship['size']):
if not (1 <= board[ship['y_pos'] + j][ship['x_pos']] <= 26 or board[ship['y_pos'] + j][ship['x_pos']] == 26):
board[ship['y_pos'] + j][ship['x_pos']] = ship['num'] + 1
ship['setup'] = True
def print_board(self, player):
"""
Pretty-print the current boards of a player.
Prints both boards for a player, along with coordinate references, titles, and boxes around the grids.
Parameters
----------
player : int
Determines which player's grids to print. Zero-indexed.
Returns
-------
str
Same as the string that is printed.
"""
# Characters to use while printing.
characters = '.' + letters + '*0#' # 0:Null, 1-26:Ships, 27:Hit, 28:Miss, 29:Mine
# Update board.
self.update_board(player)
# Get boards to print.
if player == 0: # Player 1
board = self.p1_grid
board_2 = self.p1_grid_2
else: # Player 2
board = self.p2_grid
board_2 = self.p2_grid_2
# Build header.
result = ' +' + '-' * (self.width * 2 + 1) + '+' + '-' * (self.width * 2 + 1) + '+\n'
result += ' |' + 'Your Board'.center(self.width * 2 + 1) + '|' + 'Enemy Board'.center(self.width * 2 + 1) + '|\n'
result += ' +' + '-' * (self.width * 2 + 1) + '+' + '-' * (self.width * 2 + 1) + '+\n'
# Build x-coordinate reference.
if self.width > 9:
result += ' | ' + ' '.join([str(x + 1).rjust(2)[0] for x in range(self.width)]) + ' | ' + ' '.join([str(x + 1).rjust(2)[0] for x in range(self.width)]) + ' |\n'
result += ' | ' + ' '.join([str(x + 1).rjust(2)[1] for x in range(self.width)]) + ' | ' + ' '.join([str(x + 1).rjust(2)[1] for x in range(self.width)]) + ' |\n'
result += '+---+' + '-' * (self.width * 2 + 1) + '+' + '-' * (self.width * 2 + 1) + '+\n'
# Build y-coordinate reference and grid.
for i in range(self.height):
result += '| ' + letters[i] + ' | ' + ' '.join([characters[x] for x in board[i]]) + ' | ' + ' '.join([characters[x] for x in board_2[i]]) + ' |\n'
result += '+---+' + '-' * (self.width * 2 + 1) + '+' + '-' * (self.width * 2 + 1) + '+'
# Print and return result.
print(result)
return result
def setup_ship(self, pos, direction, player, count, size):
"""
Create a ship.
Creates a ship dictionary based on positional, directional, player, and size data and tests if placement is legal.
Parameters
----------
pos : tuple
(y,x) coordinate pair of top-left corner of the ship.
direction : int
Determines the direction of the ship:
0: Horizontal.
1: Vertical.
player : int
Determines which player to assign the ship to. Zero-indexed.
count : int
Current ship count for internal tracking use.
size : int
Length of the ship.
Returns
-------
str
Error string if an error occurred, None otherwise.
"""
try:
# Test if the ship does not overlap another ship.
if player == 0: # Player 1
board = self.p1_grid
if direction == 0:
for i in range(size):
if board[pos[0]][pos[1] + i] != 0:
return 'ERROR: You cannot place a ship on top of another!'
else:
for j in range(size):
if board[pos[0] + j][pos[1]] != 0:
return 'ERROR: You cannot place a ship on top of another!'
else: # Player 2
board = self.p2_grid
if direction == 0:
for i in range(size):
if board[pos[0]][pos[1] + i] != 0:
return 'ERROR: You cannot place a ship on top of another!'
else:
for j in range(size):
if board[pos[0] + j][pos[1]] != 0:
return 'ERROR: You cannot place a ship on top of another!'
except IndexError:
# Catch if ship would be placed out-of-bounds.
return 'ERROR: You must place a ship inside the grid boundaries!'
# Create the ship's dictionary and append it to the player's ship list.
if player == 0:
self.p1_ships.append({'num': count, 'size': size, 'x_pos': pos[1], 'y_pos': pos[0], 'direction': direction, 'setup': False, 'health': size, 'hits': []})
else:
self.p2_ships.append({'num': count, 'size': size, 'x_pos': pos[1], 'y_pos': pos[0], 'direction': direction, 'setup': False, 'health': size, 'hits': []})
return None
def setup_ships(self, size, player, count):
"""
Setup all the ships of a particular size for a certain player.
Sets up all of the length-n size ships for a player.
Count is not updated in-place.
Parameters
----------
size : int
Length of the ships.
player : int
Determines which player to assign the ships to. Zero-indexed.
count : int
Current ship count for internal tracking use.
Returns
-------
int
The updated cumulative ship count.
"""
# Setup number of ships based on value defined in game settings.
for i in range(self.settings['%d_ships' % size]):
error = ''
while True:
# Print current board for player reference.
self.print_board(player)
# Take ship details from player.
pos = Utils.grid_pos_input(self.height, self.width, question=(error + '\nWhere do you want to place ship \'%s\' (%d-long)?' % (letters[count], size)).strip())
direction = Utils.num_input('Which direction?', 'Horizontal', 'Vertical')
# Determine if the ship needs to be inputted again.
error = self.setup_ship(pos, direction, player, count, size)
if error is None:
break
count += 1
# Return updated cumulative ship total.
return count
def p1_turn(self):
"""
Execute a turn for Player 1.
Handles input and output for the turn and updates both player's grids.
Returns
-------
bool
True if game ends after the move, False otherwise
"""
print('\n' * PAD_AMOUNT) # Pad previous output.
Utils.box_string('Player 1\'s Turn', min_width=self.width * 4 + 5, print_string=True)
self.p1_move = ''
# Test if Player 2 is a human.
if not self.p2_cpu:
# Alert Player 2 to look away.
Utils.box_string('Player 2, please look away.', min_width=self.width * 4 + 5, print_string=True)
sleep(self.settings['player_timer'])
self.print_board(0)
# Notify player if a ship moved.
if self.p2_move != '':
Utils.box_string('Note: ' + self.p2_move, min_width=self.width * 4 + 5, print_string=True)
# Determine input method based on possible actions.
if self.settings['allow_moves']:
if self.settings['allow_mines'] and self.p1_mines > 0:
action = Utils.num_input('What do you want to do?', 'Fire Missile', 'Move a Ship', 'Clear Misses', 'Clear Hits', 'Place a Mine')
else:
action = Utils.num_input('What do you want to do?', 'Fire Missile', 'Move a Ship', 'Clear Misses', 'Clear Hits')
if action == 0: # Fire Missile
error = ''
while True:
y_pos, x_pos = Utils.grid_pos_input(self.height, self.width, question=(error+'\nWhere do you want to fire?').strip())
if True in [(y_pos, x_pos) in self.p2_ships[x]['hits'] for x in range(len(self.p2_ships))] or self.p1_grid_2[y_pos][x_pos] > 26:
error = 'ERROR: You already guessed there!'
continue
if self.p2_grid[y_pos][x_pos] > 26:
error = 'ERROR: You already guessed there!'
continue
if self.p2_grid[y_pos][x_pos] != 0:
Utils.box_string('Direct Hit!', min_width=self.width * 4 + 5, print_string=True)
# Update ship.
self.p2_ships[self.p2_grid[y_pos][x_pos] - 1]['health'] -= 1
self.p2_ships[self.p2_grid[y_pos][x_pos] - 1]['hits'].append((y_pos, x_pos))
# Test if ship still stands.
if self.p2_ships[self.p2_grid[y_pos][x_pos] - 1]['health'] == 0:
Utils.box_string('You sunk a ship!', min_width=self.width * 4 + 5, print_string=True)
# Update grid.
self.p1_grid_2[y_pos][x_pos] = 27
self.p2_grid[y_pos][x_pos] = 27
else:
Utils.box_string('Miss!', min_width=self.width * 4 + 5, print_string=True)
# Update grid.
self.p1_grid_2[y_pos][x_pos] = 28
self.p2_grid[y_pos][x_pos] = 28
break
elif action == 1: # Move Ship
error = ''
ship_num = -1
while True:
ship_num = letters.index(Utils.string_input((error + '\nWhich ship do you want to move?').strip(), condition=('[A-%sa-%s]' % (letters[len(self.p1_ships) - 1], letters[len(self.p1_ships) - 1].lower()))).upper())
ship = self.p1_ships[ship_num]
if ship['health'] == 0:
error = 'ERROR: That ship is sunk!'
continue
move_direction = Utils.num_input('Which direction do you want to move it?', 'Up', 'Down', 'Left', 'Right')
error = ''
try:
if move_direction < 2: # Up or down.
true_dir = -1 if move_direction == 0 else 1
board = self.p1_grid
if ship['direction'] == 0:
for i in range(ship['size']):
# Check if another ship is there.
for ship2 in self.p1_ships:
if ship2['direction'] == 0:
for k in range(ship2['size']):
if ship2['num'] != ship_num and ship2['y_pos'] == ship['y_pos'] + true_dir and ship2['x_pos'] + k == ship['x_pos'] + i:
error = 'ERROR: You cannot move your ship there!'
continue
else:
for l in range(ship2['size']):
if ship2['num'] != ship_num and ship2['y_pos'] + l == ship['y_pos'] + true_dir and ship2['x_pos'] == ship['x_pos'] + i:
error = 'ERROR: You cannot move your ship there!'
continue
if (1 <= board[ship['y_pos'] + true_dir][ship['x_pos'] + i] <= 26 or board[ship['y_pos'] + true_dir][ship['x_pos'] + i] == 29) and (board[ship['y_pos'] + true_dir][ship['x_pos'] + i] != ship_num + 1) or ship['y_pos'] + true_dir < 0 or ship['y_pos'] >= self.height:
error = 'ERROR: You cannot move your ship there!'
else:
for j in range(ship['size']):
# Check if another ship is there.
for ship2 in self.p1_ships:
if ship2['direction'] == 0:
for k in range(ship2['size']):
if ship2['num'] != ship_num and ship2['y_pos'] == ship['y_pos'] + j + true_dir and ship2['x_pos'] + k == ship['x_pos']:
error = 'ERROR: You cannot move your ship there!'
continue
else:
for l in range(ship2['size']):
if ship2['num'] != ship_num and ship2['y_pos'] + l == ship['y_pos'] + j + true_dir and ship2['x_pos'] == ship['x_pos']:
error = 'ERROR: You cannot move your ship there!'
continue
if (1 <= board[ship['y_pos'] + j + true_dir][ship['x_pos']] <= 26 or board[ship['y_pos'] + j + true_dir][ship['x_pos']] == 29) and (board[ship['y_pos'] + j + true_dir][ship['x_pos']] != ship_num + 1) or ship['y_pos'] + j + true_dir < 0 or ship['y_pos'] >= self.height:
error = 'ERROR: You cannot move your ship there!'
if error == '':
self.p1_ships[ship_num]['setup'] = False
self.p1_ships[ship_num]['y_pos'] += true_dir
self.p1_move = 'Player 1 just moved a ship ' + ('up!' if move_direction == 0 else 'down!')
# Update board positions
if ship['direction'] == 0:
for i in range(ship['size'] - 1):
board[ship['y_pos'] + true_dir][ship['x_pos'] + i] = 0
else:
for j in range(ship['size'] - 1):
board[ship['y_pos'] + j + true_dir][ship['x_pos']] = 0
break
else: # Left or right.
true_dir = -1 if move_direction == 2 else 1
board = self.p1_grid
if ship['direction'] == 0:
for i in range(ship['size']):
# Check if another ship is there.
for ship2 in self.p1_ships:
if ship2['direction'] == 0:
for k in range(ship2['size']):
if ship2['num'] != ship_num and ship2['y_pos'] == ship['y_pos'] and ship2['x_pos'] + k == ship['x_pos'] + i + true_dir:
error = 'ERROR: You cannot move your ship there!'
continue
else:
for l in range(ship2['size']):
if ship2['num'] != ship_num and ship2['y_pos'] + l == ship['y_pos'] and ship2['x_pos'] == ship['x_pos'] + i + true_dir:
error = 'ERROR: You cannot move your ship there!'
continue
if (1 <= board[ship['y_pos']][ship['x_pos'] + i + true_dir] <= 26 or board[ship['y_pos']][ship['x_pos'] + i + true_dir] == 29) and (board[ship['y_pos']][ship['x_pos'] + i + true_dir] != ship_num + 1) or ship['x_pos'] + i + true_dir < 0 or ship['x_pos'] >= self.width:
error = 'ERROR: You cannot move your ship there!'
else:
for j in range(ship['size']):
# Check if another ship is there.
for ship2 in self.p1_ships:
if ship2['direction'] == 0:
for k in range(ship2['size']):
if ship2['num'] != ship_num and ship2['y_pos'] == ship['y_pos'] + j and ship2['x_pos'] + k == ship['x_pos'] + true_dir:
error = 'ERROR: You cannot move your ship there!'
continue
else:
for l in range(ship2['size']):
if ship2['num'] != ship_num and ship2['y_pos'] + l == ship['y_pos'] + j and ship2['x_pos'] == ship['x_pos'] + true_dir:
error = 'ERROR: You cannot move your ship there!'
continue
if (1 <= board[ship['y_pos'] + j][ship['x_pos'] + true_dir] <= 26 or board[ship['y_pos'] + j][ship['x_pos'] + true_dir] == 29) and (board[ship['y_pos'] + j][ship['x_pos'] + true_dir] != ship_num + 1) or ship['x_pos'] + true_dir < 0 or ship['x_pos'] >= self.width:
error = 'ERROR: You cannot move your ship there!'
if error == '':
self.p1_ships[ship_num]['setup'] = False
self.p1_ships[ship_num]['x_pos'] += true_dir
self.p1_move = 'Player 1 just moved a ship to the ' + ('left!' if move_direction == 2 else 'right!')
# Update board positions.
if ship['direction'] == 0:
for i in range(ship['size'] - 1):
board[ship['y_pos']][ship['x_pos'] + i + true_dir] = 0
else:
for j in range(ship['size'] - 1):
board[ship['y_pos'] + j][ship['x_pos'] + true_dir] = 0
break
except IndexError:
error = 'ERROR: You cannot move your ship there!'
# Update board positions again, just in case.
for i in range(self.height):
for j in range(self.width):
if board[i][j] == ship_num + 1:
board[i][j] = 0
self.p1_ships[ship_num]['hits'] = []
self.update_board(0)
elif action == 2: # Clear Misses
for i in range(self.height):
for j in range(self.width):
if self.p1_grid_2[i][j] == 28:
self.p1_grid_2[i][j] = 0
return self.p1_turn()
elif action == 3: # Clear Hits
for i in range(self.height):
for j in range(self.width):
if self.p1_grid_2[i][j] == 27:
self.p1_grid_2[i][j] = 0
return self.p1_turn()
else: # Place Mine
error = ''
while True:
y_pos, x_pos = Utils.grid_pos_input(self.height, self.width, question=(error + '\nWhere do you want to place the mine?').strip())
if self.p2_grid[y_pos][x_pos] == 29:
error = 'ERROR: You already placed a mine there!'
continue
if 1 <= self.p2_grid[y_pos][x_pos] <= 26:
ship_num = self.p2_grid[y_pos][x_pos] - 1
self.p2_ships[ship_num]['health'] = 0
for i in range(self.height):
for j in range(self.width):
if self.p2_grid[i][j] == ship_num + 1:
self.p2_grid[i][j] = 27
Utils.box_string('You sunk a ship!', min_width=self.width * 4 + 5, print_string=True)
self.p2_grid[y_pos][x_pos] = 29
self.p1_grid_2[y_pos][x_pos] = 29
self.p1_mines -= 1
break
else:
error = ''
while True:
y_pos, x_pos = Utils.grid_pos_input(self.height, self.width, question=(error + '\nWhere do you want to fire?').strip())
if self.p1_grid_2[y_pos][x_pos] != 0:
error = 'ERROR: You already guessed there!'
continue
if self.p2_grid[y_pos][x_pos] > 26:
error = 'ERROR: You already guessed there!'
continue
if self.p2_grid[y_pos][x_pos] != 0:
Utils.box_string('Direct Hit!', min_width=self.width * 4 + 5, print_string=True)
# Update ship.
self.p2_ships[self.p2_grid[y_pos][x_pos] - 1]['health'] -= 1
self.p2_ships[self.p2_grid[y_pos][x_pos] - 1]['hits'].append((y_pos, x_pos))
# Test if ship still stands.
if self.p2_ships[self.p2_grid[y_pos][x_pos] - 1]['health'] == 0:
Utils.box_string('You sunk a ship!', min_width=self.width * 4 + 5, print_string=True)
# Update grid.
self.p1_grid_2[y_pos][x_pos] = 27
self.p2_grid[y_pos][x_pos] = 27
else:
Utils.box_string('Miss!', min_width=self.width * 4 + 5, print_string=True)
# Update grid.
self.p1_grid_2[y_pos][x_pos] = 28
self.p2_grid[y_pos][x_pos] = 28
break
# End turn.
Utils.box_string('Your turn is now over.', print_string=True)
sleep(self.settings['player_timer'])
# Detect if game is over.
return sum([x['health'] for x in self.p2_ships]) == 0
def p2_turn(self):
"""
Execute a turn for Player 2.
Handles input and output for the turn and updates both player's grids.
Returns
-------
bool
True if game ends after the move, False otherwise
"""
print('\n' * PAD_AMOUNT) # Pad previous output.
Utils.box_string('Player 2\'s Turn', min_width=self.width * 4 + 5, print_string=True)
self.p2_move = ''
# Test if Player 2 is a human.
if not self.p2_cpu: # Player is a human
# Alert Player 1 to look away.
Utils.box_string('Player 1, please look away.', min_width=self.width * 4 + 5, print_string=True)
sleep(self.settings['player_timer'])
self.print_board(1)
if self.p1_move != '':
Utils.box_string('Note: ' + self.p1_move, min_width=self.width * 4 + 5, print_string=True)
# Determine input method based on possible actions.
if self.settings['allow_moves']:
if self.settings['allow_mines'] and self.p2_mines > 0:
action = Utils.num_input('What do you want to do?', 'Fire Missile', 'Move a Ship', 'Clear Misses', 'Clear Hits', 'Place a Mine')
else:
action = Utils.num_input('What do you want to do?', 'Fire Missile', 'Move a Ship', 'Clear Misses', 'Clear Hits')
if action == 0: # Fire Missile
error = ''
while True:
y_pos, x_pos = Utils.grid_pos_input(self.height, self.width, question=(error+'\nWhere do you want to fire?').strip())
if True in [(y_pos, x_pos) in self.p1_ships[x]['hits'] for x in range(len(self.p1_ships))] or self.p2_grid_2[y_pos][x_pos] > 26:
error = 'ERROR: You already guessed there!'
continue
if self.p1_grid[y_pos][x_pos] > 26:
error = 'ERROR: You already guessed there!'
continue
if self.p1_grid[y_pos][x_pos] != 0:
Utils.box_string('Direct Hit!', min_width=self.width * 4 + 5, print_string=True)
# Update ship.
self.p1_ships[self.p1_grid[y_pos][x_pos] - 1]['health'] -= 1
self.p1_ships[self.p1_grid[y_pos][x_pos] - 1]['hits'].append((y_pos, x_pos))
# Test if ship still stands.
if self.p1_ships[self.p1_grid[y_pos][x_pos] - 1]['health'] == 0:
Utils.box_string('You sunk a ship!', min_width=self.width * 4 + 5, print_string=True)
# Update grid.
self.p2_grid_2[y_pos][x_pos] = 27
self.p1_grid[y_pos][x_pos] = 27
else:
Utils.box_string('Miss!', min_width=self.width * 4 + 5, print_string=True)
# Update grid.
self.p2_grid_2[y_pos][x_pos] = 28
self.p1_grid[y_pos][x_pos] = 28
break
elif action == 1: # Move Ship
error = ''
ship_num = -1
while True:
ship_num = letters.index(Utils.string_input((error + '\nWhich ship do you want to move?').strip(), condition=('[A-%sa-%s]' % (letters[len(self.p1_ships) - 1], letters[len(self.p1_ships) - 1].lower()))).upper())
ship = self.p2_ships[ship_num]
if ship['health'] == 0:
error = 'ERROR: That ship is sunk!'
continue
move_direction = Utils.num_input('Which direction do you want to move it?', 'Up', 'Down', 'Left', 'Right')
error = ''
try:
if move_direction < 2: # Up or down.
true_dir = -1 if move_direction == 0 else 1
board = self.p2_grid
if ship['direction'] == 0:
for i in range(ship['size']):
# Check if another ship is there.
for ship2 in self.p2_ships:
if ship2['direction'] == 0:
for k in range(ship2['size']):
if ship2['num'] != ship_num and ship2['y_pos'] == ship['y_pos'] + true_dir and ship2['x_pos'] + k == ship['x_pos'] + i:
error = 'ERROR: You cannot move your ship there!'
continue
else:
for l in range(ship2['size']):
if ship2['num'] != ship_num and ship2['y_pos'] + l == ship['y_pos'] + true_dir and ship2['x_pos'] == ship['x_pos'] + i:
error = 'ERROR: You cannot move your ship there!'
continue
if (1 <= board[ship['y_pos'] + true_dir][ship['x_pos'] + i] <= 26 or board[ship['y_pos'] + true_dir][ship['x_pos'] + i] == 29) and (board[ship['y_pos'] + true_dir][ship['x_pos'] + i] != ship_num + 1) or ship['y_pos'] + true_dir < 0 or ship['y_pos'] >= self.height:
error = 'ERROR: You cannot move your ship there!'
else:
for j in range(ship['size']):
# Check if another ship is there.
for ship2 in self.p2_ships:
if ship2['direction'] == 0:
for k in range(ship2['size']):
if ship2['num'] != ship_num and ship2['y_pos'] == ship['y_pos'] + j + true_dir and ship2['x_pos'] + k == ship['x_pos']:
error = 'ERROR: You cannot move your ship there!'
continue
else:
for l in range(ship2['size']):
if ship2['num'] != ship_num and ship2['y_pos'] + l == ship['y_pos'] + j + true_dir and ship2['x_pos'] == ship['x_pos']:
error = 'ERROR: You cannot move your ship there!'
continue
if (1 <= board[ship['y_pos'] + j + true_dir][ship['x_pos']] <= 26 or board[ship['y_pos'] + j + true_dir][ship['x_pos']] == 29) and (board[ship['y_pos'] + j + true_dir][ship['x_pos']] != ship_num + 1) or ship['y_pos'] + j + true_dir < 0 or ship['y_pos'] >= self.height:
error = 'ERROR: You cannot move your ship there!'
if error == '':
self.p2_ships[ship_num]['setup'] = False
self.p2_ships[ship_num]['y_pos'] += true_dir
self.p2_move = 'Player 2 just moved a ship ' + ('up!' if move_direction == 0 else 'down!')
# Update board positions
if ship['direction'] == 0:
for i in range(ship['size'] - 1):
board[ship['y_pos'] + true_dir][ship['x_pos'] + i] = 0
else:
for j in range(ship['size'] - 1):
board[ship['y_pos'] + j + true_dir][ship['x_pos']] = 0
break
else: # Left or right.
true_dir = -1 if move_direction == 2 else 1
board = self.p2_grid
if ship['direction'] == 0:
for i in range(ship['size']):
# Check if another ship is there.
for ship2 in self.p2_ships:
if ship2['direction'] == 0:
for k in range(ship2['size']):
if ship2['num'] != ship_num and ship2['y_pos'] == ship['y_pos'] and ship2['x_pos'] + k == ship['x_pos'] + i + true_dir:
error = 'ERROR: You cannot move your ship there!'
continue
else:
for l in range(ship2['size']):
if ship2['num'] != ship_num and ship2['y_pos'] + l == ship['y_pos'] and ship2['x_pos'] == ship['x_pos'] + i + true_dir:
error = 'ERROR: You cannot move your ship there!'
continue
if (1 <= board[ship['y_pos']][ship['x_pos'] + i + true_dir] <= 26 or board[ship['y_pos']][ship['x_pos'] + i + true_dir] == 29) and (board[ship['y_pos']][ship['x_pos'] + i + true_dir] != ship_num + 1) or ship['x_pos'] + i + true_dir < 0 or ship['x_pos'] >= self.width:
error = 'ERROR: You cannot move your ship there!'
else:
for j in range(ship['size']):
# Check if another ship is there.
for ship2 in self.p2_ships:
if ship2['direction'] == 0:
for k in range(ship2['size']):
if ship2['num'] != ship_num and ship2['y_pos'] == ship['y_pos'] + j and ship2['x_pos'] + k == ship['x_pos'] + true_dir:
error = 'ERROR: You cannot move your ship there!'
continue
else:
for l in range(ship2['size']):
if ship2['num'] != ship_num and ship2['y_pos'] + l == ship['y_pos'] + j and ship2['x_pos'] == ship['x_pos'] + true_dir:
error = 'ERROR: You cannot move your ship there!'
continue
if (1 <= board[ship['y_pos'] + j][ship['x_pos'] + true_dir] <= 26 or board[ship['y_pos'] + j][ship['x_pos'] + true_dir] == 29) and (board[ship['y_pos'] + j][ship['x_pos'] + true_dir] != ship_num + 1) or ship['x_pos'] + true_dir < 0 or ship['x_pos'] >= self.width:
error = 'ERROR: You cannot move your ship there!'
if error == '':
self.p2_ships[ship_num]['setup'] = False
self.p2_ships[ship_num]['x_pos'] += true_dir
self.p2_move = 'Player 2 just moved a ship to the ' + ('left!' if move_direction == 2 else 'right!')
# Update board positions
if ship['direction'] == 0:
for i in range(ship['size'] - 1):
board[ship['y_pos']][ship['x_pos'] + i + true_dir] = 0
else:
for j in range(ship['size'] - 1):
board[ship['y_pos'] + j][ship['x_pos'] + true_dir] = 0
break
except IndexError:
error = 'ERROR: You cannot move your ship there! (INDEX ERROR)'
# Update board positions again, just in case.
for i in range(self.height):
for j in range(self.width):
if board[i][j] == ship_num + 1:
board[i][j] = 0
self.p2_ships[ship_num]['hits'] = []
self.update_board(1)
elif action == 2: # Clear Misses
for i in range(self.height):
for j in range(self.width):
if self.p2_grid_2[i][j] == 28:
self.p2_grid_2[i][j] = 0
return self.p2_turn()
elif action == 3: # Clear Hits
for i in range(self.height):
for j in range(self.width):
if self.p2_grid_2[i][j] == 27:
self.p2_grid_2[i][j] = 0
return self.p2_turn()
else: # Place Mine
error = ''
while True:
y_pos, x_pos = Utils.grid_pos_input(self.height, self.width, question=(error + '\nWhere do you want to place the mine?').strip())
if self.p1_grid[y_pos][x_pos] == 29:
error = 'ERROR: You already placed a mine there!'
continue
if 1 <= self.p1_grid[y_pos][x_pos] <= 26:
ship_num = self.p1_grid[y_pos][x_pos] - 1
self.p1_ships[ship_num]['health'] = 0
for i in range(self.height):
for j in range(self.width):
if self.p1_grid[i][j] == ship_num + 1:
self.p1_grid[i][j] = 27
Utils.box_string('You sunk a ship!', min_width=self.width * 4 + 5, print_string=True)
self.p1_grid[y_pos][x_pos] = 29
self.p2_grid_2[y_pos][x_pos] = 29
self.p2_mines -= 1
break
else:
error = ''
while True:
y_pos, x_pos = Utils.grid_pos_input(self.height, self.width, question=(error + '\nWhere do you want to fire?').strip())
if self.p2_grid_2[y_pos][x_pos] != 0:
error = 'ERROR: You already guessed there!'
continue
if self.p1_grid[y_pos][x_pos] > 26:
error = 'ERROR: You already guessed there!'
continue
if self.p1_grid[y_pos][x_pos] != 0:
Utils.box_string('Direct Hit!', min_width=self.width * 4 + 5, print_string=True)
# Update ship.
self.p1_ships[self.p1_grid[y_pos][x_pos] - 1]['health'] -= 1
self.p1_ships[self.p1_grid[y_pos][x_pos] - 1]['hits'].append((y_pos, x_pos))
# Test if ship still stands.
if self.p1_ships[self.p1_grid[y_pos][x_pos] - 1]['health'] == 0:
Utils.box_string('You sunk a ship!', min_width=self.width * 4 + 5, print_string=True)
# Update grid.
self.p2_grid_2[y_pos][x_pos] = 27
self.p1_grid[y_pos][x_pos] = 27
else:
Utils.box_string('Miss!', min_width=self.width * 4 + 5, print_string=True)
# Update grid.
self.p2_grid_2[y_pos][x_pos] = 28
self.p1_grid[y_pos][x_pos] = 28
break
else: # Player is CPU
# Alert Player 1 of CPU turn.
Utils.box_string('CPU is deciding...', min_width=self.width * 4 + 5, print_string=True)
sleep(2)
rng = Random()
while True:
pos = (rng.randrange(self.height), rng.randrange(self.width))
y_pos, x_pos = pos
if self.p1_grid[y_pos][x_pos] != 0:
# Update ship.
self.p1_ships[self.p1_grid[y_pos][x_pos] - 1]['health'] -= 1
self.p1_ships[self.p1_grid[y_pos][x_pos] - 1]['hits'].append((y_pos, x_pos))
# Test if ship still stands.
if self.p1_ships[self.p1_grid[y_pos][x_pos] - 1]['health'] == 0:
self.cpu_data['p1_ships']['%d_ships' % self.p1_ships[self.p1_grid[y_pos][x_pos] - 1]['size']] -= 1
# Update grid.
self.p2_grid_2[y_pos][x_pos] = 27
self.p1_grid[y_pos][x_pos] = 27
else:
# Update grid.
self.p2_grid_2[y_pos][x_pos] = 28
self.p1_grid[y_pos][x_pos] = 28
break
# End turn.
Utils.box_string('Your turn is now over.', print_string=True)
sleep(self.settings['player_timer'])
# Detect if game is over.
return sum([x['health'] for x in self.p1_ships]) == 0
def start_game(self):
"""
Start a new game.
Starts a game with the settings provided in the constructor.
All game code is contained here, with relevant helper methods also called here.
Every game has two stages: Setup and Play.
Returns
-------
int
Winning player's number. Zero-indexed.
"""
# Setup Phase:
# In this stage, both players choose where to place their ships.
print('\n' * PAD_AMOUNT) # Pad previous output.
Utils.box_string('Setup Phase', min_width=self.width * 4 + 5, print_string=True)
Utils.box_string('Player 1\'s Turn', min_width=self.width * 4 + 5, print_string=True)
# Test if Player 2 is a human.
if not self.p2_cpu:
# Alert Player 2 to look away.
Utils.box_string('Player 2, please look away.', min_width=self.width * 4 + 5, print_string=True)
sleep(self.settings['player_timer'])
# Player 1
Utils.box_string('Player 1 Setup', min_width=self.width * 4 + 5, print_string=True)
p1_ship_count = 0
for i in range(5):
p1_ship_count = self.setup_ships(i + 1, 0, p1_ship_count)
# Test if Player 2 is a human.
if self.p2_cpu: # Player 2 is CPU
# Setup CPU data.
self.cpu_data['p1_ships'] = {}
for size in range(1, 6):
self.cpu_data['p1_ships']['%d_ships' % size] = self.settings['%d_ships' % size]
# Setup ships.
p2_ship_count = 0
rng = Random()
for size in range(1, 6):
count = 0
# Setup number of ships based on value defined in game settings.
for i in range(self.settings['%d_ships' % size]):
while True:
# Generate ship details.
pos = (rng.randrange(self.height), rng.randrange(self. width))
direction = rng.randrange(2)
# Determine if the ship needs to be randomized again.
error = self.setup_ship(pos, direction, 1, p2_ship_count + count, size)
if error is None:
print('Placed ship ' + str(p2_ship_count + count) + ' at ' + str(pos) + ' with direction ' + str(direction) + ' with size ' + str(size))
break
count += 1
# Update cumulative ship total.
p2_ship_count += count
else: # Player 2 is a human
print('\n' * PAD_AMOUNT) # Pad previous output.
Utils.box_string('Player 2\'s Turn', min_width=self.width * 4 + 5, print_string=True)
# Alert Player 1 to look away.
Utils.box_string('Player 1, please look away.', min_width=self.width * 4 + 5, print_string=True)
sleep(self.settings['player_timer'])
# Player 2
Utils.box_string('Player 2 Setup', min_width=self.width * 4 + 5, print_string=True)
p2_ship_count = 0
for i in range(5):
p2_ship_count = self.setup_ships(i + 1, 1, p2_ship_count)
# Update both boards.
self.update_board(0)
self.update_board(1)
# Play Phase:
# In this stage, the game itself is played.
Utils.box_string('Play Phase', min_width=self.width * 4 + 5, print_string=True)
# Main game loop.
winner = None
while True:
if self.settings['mine_turns'] is not None and self.turn % (self.settings['mine_turns'] * 2) == 0:
self.p1_mines += 1
self.p2_mines += 1
if self.turn % 2 == 0:
if self.p1_turn():
winner = 1
break
else:
if self.p2_turn():
winner = 2
break
self.turn += 1
# Print winner.
Utils.box_string('Player %d won!' % winner, min_width=self.width * 4 + 5, print_string=True)
return winner
def create_game(gm):
"""
Configure and create a game.
Creates a game with base settings equivalent to one of the default presets.
Allows user to customize the settings before starting the game.
Parameters
----------
gm : int
Game type to replicate:
0: Normal mode.
1: Advanced mode.
Returns
-------
BattleshipGame
Game instance with user-chosen settings.
"""
print('\n' * PAD_AMOUNT) # Pad previous output.
# Choose and print default settings.
if gm == 0:
Utils.box_string('Normal Mode', print_string=True)
settings = normal_mode_preset
elif gm == 1:
Utils.box_string('Advanced Mode', print_string=True)
settings = advanced_mode_preset
else: # TODO: REMOVE TESTING MODE
Utils.box_string('Testing Mode', print_string=True)
settings = testing_preset
# Print current settings.
Utils.print_settings(settings)
# Change settings, if applicable.
if Utils.num_input('Would you like to change the settings?', 'No', 'Yes') == 1:
while True:
# Determine which setting group to modify.
setting = Utils.num_input('Settings', 'Grid Size', 'Ship Amount', 'Special Abilities', 'Game Type', 'Exit')
# Modify setting groups.
if setting == 0: # Grid Size
# Take grid dimensions.
settings['width'] = int(Utils.string_input('Grid Width (5-26)', condition=r'^[5-9]$|^1[0-9]$|^2[0-6]$'))
settings['height'] = int(Utils.string_input('Grid Height (5-26)', condition=r'^[5-9]$|^1[0-9]$|^2[0-6]$'))
elif setting == 1: # Ship Amount
while True:
# Take ship amounts.
settings['5_ships'] = int(Utils.string_input('5-Long Ships (0-9)', condition=r'[0-9]'))
settings['4_ships'] = int(Utils.string_input('4-Long Ships (0-9)', condition=r'[0-9]'))
settings['3_ships'] = int(Utils.string_input('3-Long Ships (0-9)', condition=r'[0-9]'))
settings['2_ships'] = int(Utils.string_input('2-Long Ships (0-9)', condition=r'[0-9]'))
settings['1_ships'] = int(Utils.string_input('1-Long Ships (0-9)', condition=r'[0-9]'))
# Test if ship amounts are valid.
count = settings['5_ships'] + settings['4_ships'] + settings['3_ships'] + settings['2_ships'] + settings['1_ships']
if count == 0:
Utils.box_string('You must have at least one ship!', print_string=True)
elif count > 26:
Utils.box_string('You have put in too many ships! (max 26)', print_string=True)
elif settings['5_ships'] * 5 + settings['4_ships'] * 4 + settings['3_ships'] * 3 + settings['2_ships'] * 2 + settings['1_ships'] > settings['width'] * settings['height']:
Utils.box_string('Your ships will not fit inside of the board!', print_string=True)
else:
break
elif setting == 2: # Special Abilities
# Take abilities.
settings['allow_moves'] = Utils.num_input('Ship Moving', 'Enable', 'Disable') == 0
if settings['allow_moves']:
settings['allow_mines'] = Utils.num_input('Mines', 'Enable', 'Disable') == 0
settings['mine_turns'] = int(Utils.string_input('Turns Between Mines', condition=r'\d+')) if settings['allow_mines'] else None
elif setting == 3: # Game Type
# Take game type.
settings['p_type'] = ['CPU', 'Player'][Utils.num_input('Game Type', 'CPU', 'Player')]
# Print updated settings.
Utils.print_settings(settings)
if setting == 4: # Exit
break
return BattleshipGame(settings)
# Test if code is run independently or on repl.it.
if __name__ == '__main__' or __name__ == 'builtins':
print('\n' * PAD_AMOUNT) # Pad previous output.
Utils.box_string('Welcome to Battleship!', print_string=True)
passed_settings = None
while True:
# Create game.
gamemode = Utils.num_input('Which gamemode do you want to play?', 'Normal', 'Advanced', 'testing') # TODO: REMOVE TESTING MODE
if passed_settings is not None:
bs = BattleshipGame(passed_settings)
else:
bs = create_game(gamemode)
passed_settings = bs.settings
# Play game.
bs.start_game()
# Determine if the game should be played again.
again = Utils.num_input('Do you want to play again?', 'Yes [Same Settings]', 'Yes [Different Settings]', 'No')
if again == 0:
pass
elif again == 1:
passed_settings = None
else:
break
| 49.176285 | 308 | 0.466789 | [
"MIT"
] | GamrCorps/STEMExpoBattleship | battleship.py | 62,208 | Python |
from aiocloudflare.commons.auth import Auth
class Domains(Auth):
_endpoint1 = "accounts"
_endpoint2 = "registrar/domains"
_endpoint3 = None
| 19.25 | 43 | 0.727273 | [
"MIT"
] | Stewart86/aioCloudflare | aiocloudflare/api/accounts/registrar/domains/domains.py | 154 | Python |
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import polyaxon_sdk
from marshmallow import fields, validates_schema
from polyaxon.schemas.base import BaseCamelSchema, BaseConfig
from polyaxon.schemas.fields.docker_image import validate_image
from polyaxon.schemas.fields.ref_or_obj import RefOrObject
POLYAXON_DOCKERFILE_NAME = "Dockerfile"
POLYAXON_DOCKER_WORKDIR = "/code"
POLYAXON_DOCKER_SHELL = "/bin/bash"
class DockerfileTypeSchema(BaseCamelSchema):
image = RefOrObject(fields.Str(), required=True)
env = RefOrObject(fields.Dict(keys=fields.Str(), allow_none=True))
path = RefOrObject(fields.List(fields.Str(), allow_none=True))
copy = RefOrObject(fields.List(fields.Str(), allow_none=True))
run = RefOrObject(fields.List(fields.Str(), allow_none=True))
lang_env = RefOrObject(fields.Str(allow_none=True))
uid = RefOrObject(fields.Int(allow_none=True))
gid = RefOrObject(fields.Int(allow_none=True))
filename = RefOrObject(fields.Str(allow_none=True))
workdir = RefOrObject(fields.Str(allow_none=True))
workdir_path = RefOrObject(fields.Str(allow_none=True))
shell = RefOrObject(fields.Str(allow_none=True))
@staticmethod
def schema_config():
return V1DockerfileType
@validates_schema
def validate_dockerfile(self, data, **kwargs):
validate_image(data.get("image"))
class V1DockerfileType(BaseConfig, polyaxon_sdk.V1DockerfileType):
IDENTIFIER = "dockerfile"
SCHEMA = DockerfileTypeSchema
REDUCED_ATTRIBUTES = [
"image",
"env",
"path",
"copy",
"run",
"langEnv",
"uid",
"gid",
"filename",
"workdir",
"workdirPath",
"shell",
]
@property
def filename(self):
return (
self._filename if self._filename is not None else POLYAXON_DOCKERFILE_NAME
)
@filename.setter
def filename(self, filename):
self._filename = filename
@property
def workdir(self):
return self._workdir if self._workdir is not None else POLYAXON_DOCKER_WORKDIR
@workdir.setter
def workdir(self, workdir):
self._workdir = workdir
@property
def shell(self):
return self._shell if self._shell is not None else POLYAXON_DOCKER_SHELL
@shell.setter
def shell(self, shell):
self._shell = shell
@property
def image_tag(self):
if not self.image:
return None
tagged_image = self.image.split(":")
if len(tagged_image) == 1:
return "latest"
if len(tagged_image) == 2:
return "latest" if "/" in tagged_image[-1] else tagged_image[-1]
if len(tagged_image) == 3:
return tagged_image[-1]
| 30.712963 | 86 | 0.682243 | [
"Apache-2.0"
] | gregmbi/polyaxon | core/polyaxon/schemas/types/dockerfile.py | 3,317 | Python |
from typing import Any, Callable
import dask
import dask.dataframe as dd
def map_on_partition_index(
df: dd.DataFrame, f: Callable, *args: Any, **kwargs: Any
) -> dd.DataFrame:
meta = kwargs.pop("meta", None)
return dd.from_delayed(
[
dask.delayed(f)(partition, partition_number, *args, **kwargs)
for partition_number, partition in enumerate(df.partitions)
],
meta=meta,
)
| 24.444444 | 73 | 0.638636 | [
"MIT"
] | galipremsagar/dask-sql | dask_sql/physical/utils/map.py | 440 | Python |
import os
# toolchains options
ARCH='arm'
CPU='cortex-m3'
CROSS_TOOL='gcc'
# bsp lib config
BSP_LIBRARY_TYPE = None
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if os.getenv('RTT_ROOT'):
RTT_ROOT = os.getenv('RTT_ROOT')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:\Users\XXYYZZ'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
EXEC_PATH = r'C:/Program Files (x86)/IAR Systems/Embedded Workbench 8.0'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
CXX = PREFIX + 'g++'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m3 -mthumb -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -Dgcc'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rt-thread.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
CXX = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu Cortex-M3 '
CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --c99'
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --scatter "board\linker_scripts\link.sct" --info sizes --info totals --info unused --info veneers --list rt-thread.map --strict'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/include'
LFLAGS += ' --libpath=' + EXEC_PATH + '/ARM/ARMCC/lib'
CFLAGS += ' -D__MICROLIB '
AFLAGS += ' --pd "__MICROLIB SETA 1" '
LFLAGS += ' --library_type=microlib '
EXEC_PATH += '/ARM/ARMCC/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
CFLAGS += ' -std=c99'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
CXX = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = '-Dewarm'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M3'
CFLAGS += ' -e'
CFLAGS += ' --fpu=None'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M3'
AFLAGS += ' --fpu None'
AFLAGS += ' -S'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = ' --config "board/linker_scripts/link.icf"'
LFLAGS += ' --entry __iar_program_start'
CXXFLAGS = CFLAGS
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = 'ielftool --bin $TARGET rtthread.bin'
def dist_handle(BSP_ROOT):
import sys
cwd_path = os.getcwd()
sys.path.append(os.path.join(os.path.dirname(BSP_ROOT), 'tools'))
from sdk_dist import dist_do_building
dist_do_building(BSP_ROOT)
| 26.342105 | 152 | 0.578172 | [
"Apache-2.0"
] | Ackleys/rt-thread | bsp/stm32/stm32f103-mini-system/rtconfig.py | 4,004 | Python |
"""
WSGI config for thirdproject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'thirdproject.settings')
application = get_wsgi_application()
| 23.588235 | 78 | 0.790524 | [
"MIT"
] | LikeLionSCH/Django_Example | 3_thirdproject/thirdproject/wsgi.py | 401 | Python |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# tar command:
# tar czvf mugshots.tar.gz -T mugshot_files.txt
# where the txt is generated by this script
import django
django.setup()
from django.conf import settings
from djforms.scholars.models import Presentation
from djtools.fields import TODAY
YEAR = int(TODAY.year)
presentations = Presentation.objects.filter(date_updated__year=YEAR)
# list for failed uploads
#bunk = [ ]
#if s.mugshot in bunk:
# print s.first_name, s.last_name
for prez in presentations:
for presenter in prez.presenters.all():
if presenter.mugshot:
print(presenter.mugshot)
| 21.896552 | 68 | 0.729134 | [
"Unlicense"
] | carthage-college/django-djforms | djforms/scholars/tar_mugshots.py | 635 | Python |
from ibis.sql.compiler import DDL, DML
from .compiler import quote_identifier, _type_to_sql_string
import re
fully_qualified_re = re.compile(r"(.*)\.(?:`(.*)`|(.*))")
def _is_fully_qualified(x):
return bool(fully_qualified_re.search(x))
def _is_quoted(x):
regex = re.compile(r"(?:`(.*)`|(.*))")
quoted, _ = regex.match(x).groups()
return quoted is not None
class MapDQualifiedSQLStatement:
def _get_scoped_name(self, obj_name, database): # noqa: F401
return obj_name
class MapDDDL(DDL, MapDQualifiedSQLStatement):
pass
class MapDDML(DML, MapDQualifiedSQLStatement):
pass
class CreateDDL(MapDDDL):
"""Create DDL"""
class DropObject(MapDDDL):
def __init__(self, must_exist=True):
self.must_exist = must_exist
def compile(self):
if_exists = '' if self.must_exist else 'IF EXISTS '
object_name = self._object_name()
return 'DROP {} {}{}'.format(self._object_type, if_exists, object_name)
class DropTable(DropObject):
_object_type = 'TABLE'
def __init__(self, table_name, database=None, must_exist=True):
super().__init__(must_exist=must_exist)
self.table_name = table_name
self.database = database
def _object_name(self):
return self._get_scoped_name(self.table_name, self.database)
def _format_properties(props):
tokens = []
for k, v in sorted(props.items()):
tokens.append(" '{}'='{}'".format(k, v))
return '(\n{}\n)'.format(',\n'.join(tokens))
class CreateTable(CreateDDL):
"""
Parameters
----------
table_name : str
database : str
"""
def __init__(
self, table_name, database=None
):
self.table_name = table_name
self.database = database
@property
def _prefix(self):
return 'CREATE TABLE'
def _create_line(self):
return '{} {}'.format(
self._prefix, self.table_name
)
@property
def pieces(self):
yield self._create_line()
for piece in filter(None, self._pieces):
yield piece
def compile(self):
return '\n'.join(self.pieces)
class CreateTableWithSchema(CreateTable):
def __init__(
self, table_name, schema, database=None, max_rows=None
):
self.table_name = table_name
self.database = database
self.schema = schema
self.max_rows = max_rows
@property
def with_params(self):
return dict(max_rows=self.max_rows)
@property
def _pieces(self):
yield format_schema(self.schema)
with_stmt = ','.join([
'{}={}'.format(i, "'{}'".format(v) if isinstance(v, str) else v)
for i, v in self.with_params.items() if v is not None
])
if with_stmt:
yield ' WITH ({})'.format(with_stmt)
class CTAS(CreateTable):
"""
Create Table As Select
"""
def __init__(self, table_name, select, database=None):
self.table_name = table_name
self.database = database
self.select = select
@property
def _prefix(self):
return 'CREATE TABLE'
@property
def _pieces(self):
yield 'AS ('
yield self.select.compile()
yield ')'
# VIEW
class CreateView(CTAS):
"""Create a view"""
def __init__(self, table_name, select, database=None):
super().__init__(table_name, select, database=database)
@property
def _pieces(self):
yield 'AS'
yield self.select.compile()
@property
def _prefix(self):
return 'CREATE VIEW'
class DropView(DropTable):
_object_type = 'VIEW'
# USER
class AlterUser(MapDDDL):
"""Create user"""
def __init__(
self, name, password=None, database=None, is_super=False,
insert_access=None
):
self.name = name
self.password = password
self.database = database
self.is_super = is_super
self.insert_access = insert_access
@property
def _params(self):
if self.password is not None:
yield " password='{}'".format(self.password)
if self.is_super is not None:
yield " is_super='{}'".format(
'true' if self.is_super else 'false'
)
if self.insert_access:
yield " INSERTACCESS='{}'".format(self.insert_access)
@property
def pieces(self):
yield 'ALTER USER {} ('.format(self.name)
yield ','.join(self._params)
yield ')'
def compile(self):
return '\n'.join(self.pieces)
class CreateUser(MapDDDL):
"""Create user"""
def __init__(self, name, password, database=None, is_super=False):
self.name = name
self.password = password
self.database = database
self.is_super = is_super
@property
def pieces(self):
yield 'CREATE USER {} ('.format(self.name)
yield " password='{}',".format(self.password)
yield " is_super='{}'".format('true' if self.is_super else 'false')
yield ')'
def compile(self):
return '\n'.join(self.pieces)
class DropUser(MapDDDL):
"""Create user"""
def __init__(self, name, database=None):
self.name = name
self.database = database
@property
def pieces(self):
yield 'DROP USER {}'.format(self.name)
def compile(self):
return '\n'.join(self.pieces)
class AlterTable(MapDDDL):
def __init__(self, table, tbl_properties=None):
self.table = table
self.tbl_properties = tbl_properties
def _wrap_command(self, cmd):
return 'ALTER TABLE {}'.format(cmd)
def _format_properties(self, prefix=''):
tokens = []
if self.tbl_properties is not None:
# tokens.append(format_tblproperties(self.tbl_properties))
pass
if len(tokens) > 0:
return '\n{}{}'.format(prefix, '\n'.join(tokens))
else:
return ''
def compile(self):
props = self._format_properties()
action = '{} SET {}'.format(self.table, props)
return self._wrap_command(action)
class RenameTable(AlterTable):
def __init__(self, old_name, new_name, old_database=None,
new_database=None):
# if either database is None, the name is assumed to be fully scoped
self.old_name = old_name
self.old_database = old_database
self.new_name = new_name
self.new_database = new_database
new_qualified_name = new_name
if new_database is not None:
new_qualified_name = self._get_scoped_name(new_name, new_database)
old_qualified_name = old_name
if old_database is not None:
old_qualified_name = self._get_scoped_name(old_name, old_database)
self.old_qualified_name = old_qualified_name
self.new_qualified_name = new_qualified_name
def compile(self):
cmd = '{} RENAME TO {}'.format(self.old_qualified_name,
self.new_qualified_name)
return self._wrap_command(cmd)
class TruncateTable(MapDDDL):
_object_type = 'TABLE'
def __init__(self, table_name, database=None):
self.table_name = table_name
self.database = database
def compile(self):
name = self._get_scoped_name(self.table_name, self.database)
return 'TRUNCATE TABLE {}'.format(name)
class CacheTable(MapDDDL):
def __init__(self, table_name, database=None, pool='default'):
self.table_name = table_name
self.database = database
self.pool = pool
def compile(self):
scoped_name = self._get_scoped_name(self.table_name, self.database)
return "ALTER TABLE {} SET CACHED IN '{}'" .format(
scoped_name, self.pool
)
class CreateDatabase(CreateDDL):
def __init__(self, name, owner=None):
self.name = name
self.owner = owner
def compile(self):
name = quote_identifier(self.name)
cmd = 'CREATE DATABASE'
properties = ''
if self.owner:
properties = '(owner=\'{}\')'.format(self.owner)
return '{} {} {}'.format(cmd, name, properties)
class DropDatabase(DropObject):
_object_type = 'DATABASE'
def __init__(self, name):
super().__init__(must_exist=True)
self.name = name
def _object_name(self):
return self.name
def format_schema(schema):
elements = [
_format_schema_element(name, t)
for name, t in zip(schema.names, schema.types)
]
return '({})'.format(',\n '.join(elements))
def _format_schema_element(name, t):
return '{} {}'.format(
quote_identifier(name, force=False), _type_to_sql_string(t)
)
class InsertPandas(MapDDML):
def __init__(self, table_name, df, insert_index=False, database=None):
self.table_name = table_name
self.database = database
self.df = df.copy()
if insert_index:
self.df.reset_index(inplace=True)
def _get_field_names(self):
return ','.join(self.df.columns)
def _get_value(self, v):
if isinstance(v, str):
return "'{}'".format(v)
elif v is None:
return 'NULL'
else:
return '{}'.format(v)
def _get_field_values(self):
for i, row in self.df[self.df.columns].iterrows():
yield [self._get_value(v) for v in row]
@property
def pieces(self):
cmd = 'INSERT INTO'
fields = self._get_field_names()
stmt = '{0} {1} ({2}) VALUES '.format(
cmd, self.table_name, fields
)
for values in self._get_field_values():
yield '{} ({});'.format(stmt, ','.join(values))
def compile(self):
return '\n'.join(self.pieces)
def _mapd_input_signature(inputs):
# TODO: varargs '{}...'.format(val)
return ', '.join(map(_type_to_sql_string, inputs))
| 24.225728 | 79 | 0.604949 | [
"Apache-2.0"
] | andrewseidl/ibis | ibis/mapd/ddl.py | 9,981 | Python |
# this script finds all the intersecting tiles for a given input AOI, and then downloads corresponding
# 0.5 meter AHN3 DSM and DTM tiles
from shapely.geometry import Polygon
import geopandas as gpd
import pandas as pd
from tqdm import tqdm
from multiprocessing import Pool
import urllib.request
import zipfile
import os
import argparse
def get_intersecting_tile_names(bounds_csv_path, aoi_path):
print("Finding all the intersecting tile names")
# all the tile bounds are in EPSG 28992
# reproject the aoi bounds to EPSG 28992
# define aoi bounds
aoi_df = gpd.read_file(aoi_path)
if aoi_df.crs != 28992:
aoi_df = aoi_df.to_crs(epsg=28992)
tile_names_list = []
# read csv into dataframe
tiles_bounds_df = pd.read_csv(bounds_csv_path)
for i in tqdm(range(len(tiles_bounds_df))):
tile_name = tiles_bounds_df["tile_name"].iloc[i]
tile_left = tiles_bounds_df["left"].iloc[i]
tile_right = tiles_bounds_df["right"].iloc[i]
tile_bottom = tiles_bounds_df["bottom"].iloc[i]
tile_top = tiles_bounds_df["top"].iloc[i]
# generate shapely geometry
tile_poly = gpd.GeoSeries(
[
Polygon(
[
(tile_left, tile_bottom),
(tile_right, tile_bottom),
(tile_right, tile_top),
(tile_left, tile_top),
]
)
]
)
tile_df = gpd.GeoDataFrame(
{"geometry": tile_poly, "df1": [1]}, crs="EPSG:28992"
)
if aoi_df.intersects(tile_df).any():
tile_names_list.append(tile_name)
print("the intersecting tiles are ", tile_names_list)
return tile_names_list
def download_data(download_url, out_path):
urllib.request.urlretrieve(download_url, out_path)
def extract_zip(src_zip_file, out_dir):
zip_name = src_zip_file.split("/")[-1].replace(".zip", "")
zip_data = zipfile.ZipFile(src_zip_file)
zipinfos = zip_data.infolist()
# iterate through each file
os.chdir(out_dir)
for zipinfo in zipinfos:
# Rename the zip content
zipinfo.filename = "{}.tif".format(zip_name)
zip_data.extract(zipinfo)
os.remove(os.path.join(os.path.join(os.getcwd(), "{}.zip".format(zip_name))))
return os.path.join(out_dir, "{}.tif".format(zip_name))
def download_and_extract(tile_name, out_dir, download_url):
try:
out_path = os.path.join(out_dir, "{}.zip".format(tile_name))
download_data(download_url, out_path)
tif_path = extract_zip(out_path, out_dir)
# return tif_path
except Exception as e:
print("some error in ", tile_name)
print("error ", e)
def download_tiles_multiprocess(tile_names_list, out_dir, num_processes):
download_task_list = []
dsm_dir = os.path.join(out_dir, "dsm")
os.makedirs(dsm_dir, exist_ok=True)
dtm_dir = os.path.join(out_dir, "dtm")
os.makedirs(dtm_dir, exist_ok=True)
for tile_name in tile_names_list:
dsm_url = "https://download.pdok.nl/rws/ahn3/v1_0/05m_dsm/R_{}.ZIP".format(
tile_name
)
dtm_url = "https://download.pdok.nl/rws/ahn3/v1_0/05m_dtm/M_{}.ZIP".format(
tile_name
)
download_task_list.append([tile_name, dsm_dir, dsm_url])
download_task_list.append([tile_name, dtm_dir, dtm_url])
print("Dowloding {} tiles".format(len(download_task_list)))
p = Pool(num_processes)
p.starmap(download_and_extract, download_task_list)
p.close()
p.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Download AHN3 DSM and DTM tiles for input AOI"
)
parser.add_argument("--aoi", help="aoi geojson/shpefile path string")
parser.add_argument(
"--out_dir",
help="path to out directory where files will be downloaded",
type=str,
default="downloaded_tiles",
)
parser.add_argument(
"--num_processes",
help="Number of processes to run in parallel, to speed up downloading",
type=int,
default=10,
)
args = parser.parse_args()
aoi_path = args.aoi
out_dir = args.out_dir
num_processes = args.num_processes
os.makedirs(out_dir, exist_ok=True)
bounds_csv_path = "resources/ahn3_tile_bounds.csv"
target_tile_names = get_intersecting_tile_names(bounds_csv_path, aoi_path)
download_tiles_multiprocess(target_tile_names, out_dir, num_processes)
print("Data downloaded at ", os.path.join(os.getcwd(), out_dir))
| 34.007353 | 102 | 0.651892 | [
"MIT"
] | seedlit/ahn3-downloader | download_ahn3_elevation_data.py | 4,625 | Python |
from typing import Optional, List
from discord import Member
from config import PERMITTED_ROLE_IDS
from src.util.includes import includes
def is_authorized_member(member: Member, authorized_ids: Optional[List[int]] = None) -> bool:
if not authorized_ids:
authorized_ids = PERMITTED_ROLE_IDS
member_role_ids: List[int] = [
role.id for role in member.roles
]
if includes(member_role_ids, authorized_ids):
return True
return False
| 22.809524 | 93 | 0.728601 | [
"MIT"
] | Colk-tech/discoplug | src/discord/operations/util/authorized.py | 479 | Python |
import boto3
from queuing_hub.conn.base import BasePub, BaseSub
class AwsBase():
def __init__(self, profile_name=None):
session = boto3.Session(profile_name=profile_name)
self._client = session.client('sqs')
self._queue_list = self._client.list_queues()['QueueUrls']
class AwsPub(AwsBase, BasePub):
def __init__(self, profile_name=None):
AwsBase.__init__(self, profile_name=profile_name)
BasePub.__init__(self)
@property
def topic_list(self) -> list:
return self._queue_list
def push(self, topic: str, body: str) -> dict:
response = self._client.send_message(
QueueUrl=topic,
MessageBody=body
)
return response['MessageId']
class AwsSub(AwsBase, BaseSub):
ATTRIBUTE_NAMES = [
'ApproximateNumberOfMessages',
# 'ApproximateNumberOfMessagesDelayed',
# 'ApproximateNumberOfMessagesNotVisible',
# 'DelaySeconds',
# 'MessageRetentionPeriod',
# 'ReceiveMessageWaitTimeSeconds',
# 'VisibilityTimeout'
]
def __init__(self, profile_name=None):
AwsBase.__init__(self, profile_name=profile_name)
BaseSub.__init__(self)
@property
def sub_list(self) -> list:
return self._queue_list
def qsize(self, sub_list: list = None) -> dict:
response = {'aws': {}}
if not sub_list:
sub_list = self._queue_list
for sub in sub_list:
response['aws'][sub] = self._get_message_count(sub)
return response
def is_empty(self, sub: str) -> bool:
return self._get_message_count(sub) == 0
def purge(self, sub: str) -> None:
self._client.purge_queue(QueueUrl=sub)
def pull(self, sub: str, max_num: int = 1, ack: bool = False) -> list:
response = self._client.receive_message(
QueueUrl=sub,
MaxNumberOfMessages=max_num
)
messages = response.get('Messages')
if ack and messages:
self._ack(sub, messages)
return [message.get('Body') for message in messages]
def _ack(self, sub: str, messages: list) -> None:
receipt_handle_list = \
[message['ReceiptHandle'] for message in messages]
for receipt_handle in receipt_handle_list:
self._client.delete_message(
QueueUrl=sub,
ReceiptHandle=receipt_handle
)
def _get_message_count(self, sub: str) -> int:
attributes = self._get_attributes(sub, self.ATTRIBUTE_NAMES)
return int(attributes[self.ATTRIBUTE_NAMES[0]])
def _get_attributes(self, sub: str, attribute_names: str) -> dict:
response = self._client.get_queue_attributes(
QueueUrl=sub,
AttributeNames=attribute_names
)
return response['Attributes']
| 28.72 | 74 | 0.626393 | [
"MIT"
] | tosh223/pmltq | queuing_hub/conn/aws.py | 2,872 | Python |
from django.test import TestCase
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
from core.models import Recipe, Ingredient
RECIPE_URL = reverse('recipe:recipe-list')
def recipe_url(id):
"""Construct URL for a single recipe based on its ID"""
return reverse('recipe:recipe-detail', args=[id])
def create_sample_recipe(**params):
"""Helper function to create a user"""
return Recipe.objects.create(**params)
class RecipeAPITests(TestCase):
def setUp(self):
self.client = APIClient()
def test_create_recipe_with_ingredients(self):
"""Test creating a recipe including ingredients"""
payload = {
'name': 'Vegan Roast Dinner',
'description': 'Roasted potatoes and mushroom wellington'
' with vegetables and gravy.',
'ingredients': [
{'name': 'carrots'},
{'name': 'potatoes'},
{'name': 'mushrooms'},
]
}
response = self.client.post(RECIPE_URL, payload, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(
payload['name'],
Recipe.objects.get(id=response.data['id']).name
)
self.assertEquals(
len(response.data['ingredients']),
len(payload['ingredients'])
)
def test_get_recipes(self):
"""Test retrieving a recipe"""
create_sample_recipe(
name='Roast Dinner',
description='Roasted potatoes and chicken'
' with vegetables and gravy.'
)
create_sample_recipe(
name='Beans on Toast',
description='Just the best.'
)
response = self.client.get(RECIPE_URL)
recipes = Recipe.objects.all().order_by('-name')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), len(recipes))
def test_get_recipe(self):
"""Test retrieving a single recipe using name as filter"""
test_recipe_name = 'Beans on Toast'
create_sample_recipe(
name='Roast Dinner',
description='Roasted potatoes and chicken'
' with vegetables and gravy.'
)
create_sample_recipe(
name=test_recipe_name,
description='Just the best recipe.'
)
response = self.client.get(RECIPE_URL, {'name': test_recipe_name})
recipes = Recipe.objects.all().order_by('-name')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNotEqual(len(response.data), len(recipes))
self.assertEqual(response.data[0]['name'], test_recipe_name)
def test_update_recipe(self):
"""Test updating a recipe"""
self.recipe = create_sample_recipe(
name='Roast Dinner',
description='Roasted potatoes and chicken'
' with vegetables and gravy.'
)
payload = {
'name': 'Vegan Roast Dinner',
'description': 'Roasted potatoes and mushroom wellington'
' with vegetables and gravy.'
}
response = self.client.patch(
recipe_url(self.recipe.id),
payload, format='json'
)
self.recipe.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self.recipe.name, response.data['name'])
self.assertEqual(self.recipe.description, response.data['description'])
def test_delete_recipe(self):
"""Test deleting a recipe"""
self.recipe = create_sample_recipe(
name='Carrot Cake',
description='Sponge cake with hella carrots.'
)
response = self.client.delete(
recipe_url(self.recipe.id),
format='json'
)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertFalse(Recipe.objects.all())
def test_get_recipes_with_ingredients(self):
"""Test retrieving a recipe including ingredients"""
self.recipe = create_sample_recipe(
name='Carrot Cake',
description='Sponge cake with hella carrots.'
)
Ingredient.objects.create(name='Carrots', recipe=self.recipe)
Ingredient.objects.create(name='Icing Sugar', recipe=self.recipe)
response = self.client.get(RECIPE_URL)
ingredients = Ingredient.objects.all()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEquals(
len(response.data[0]['ingredients']),
len(ingredients)
)
def test_update_recipe_ingredients(self):
"""Test updating a recipe with ingredients included"""
self.recipe = create_sample_recipe(
name='Roast Dinner',
description='Roasted potatoes and chicken'
' with vegetables and gravy.'
)
payload = {
'name': 'Vegan Roast Dinner',
'description': 'Roasted potatoes and mushroom wellington'
' with vegetables and gravy.',
'ingredients': [
{'name': 'carrots'},
{'name': 'potatoes'},
{'name': 'mushrooms'},
]
}
response = self.client.patch(
recipe_url(self.recipe.id),
payload, format='json'
)
self.recipe.refresh_from_db()
ingredients = Ingredient.objects.all()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(ingredients), len(payload['ingredients']))
self.assertEqual(ingredients[0].recipe.name, payload['name'])
def test_delete_recipe_with_ingredients(self):
"""Test deleting a recipe with ingredients included"""
self.recipe = create_sample_recipe(
name='Carrot Cake',
description='Sponge cake with hella carrots.'
)
Ingredient.objects.create(name='Carrots', recipe=self.recipe)
Ingredient.objects.create(name='Icing Sugar', recipe=self.recipe)
response = self.client.delete(
recipe_url(self.recipe.id),
format='json'
)
ingredients = Ingredient.objects.all()
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertFalse(Recipe.objects.all())
self.assertFalse(len(ingredients), 0)
| 31.889423 | 79 | 0.599276 | [
"MIT"
] | jamie-chapman/django-exercise-recipe-app | app/recipe/tests/test_recipe_api.py | 6,633 | Python |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyignite import Client
from pyignite.datatypes.cache_config import CacheMode
from pyignite.datatypes.prop_codes import *
from pyignite.exceptions import SocketError
nodes = [
('127.0.0.1', 10800),
('127.0.0.1', 10801),
('127.0.0.1', 10802),
]
client = Client(timeout=4.0)
client.connect(nodes)
print('Connected')
my_cache = client.get_or_create_cache({
PROP_NAME: 'my_cache',
PROP_CACHE_MODE: CacheMode.PARTITIONED,
PROP_BACKUPS_NUMBER: 2,
})
my_cache.put('test_key', 0)
test_value = 0
# abstract main loop
while True:
try:
# do the work
test_value = my_cache.get('test_key') or 0
my_cache.put('test_key', test_value + 1)
except (OSError, SocketError) as e:
# recover from error (repeat last command, check data
# consistency or just continue − depends on the task)
print('Error: {}'.format(e))
print('Last value: {}'.format(test_value))
print('Reconnecting')
# Connected
# Error: Connection broken.
# Last value: 2650
# Reconnecting
# Error: Connection broken.
# Last value: 10204
# Reconnecting
# Error: Connection broken.
# Last value: 18932
# Reconnecting
# Traceback (most recent call last):
# ...
# pyignite.exceptions.ReconnectError: Can not reconnect: out of nodes.
| 31.30303 | 74 | 0.720232 | [
"Apache-2.0"
] | sberdevices/ignite-python-thin-client | examples/failover.py | 2,068 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetApplicationGatewayResult',
'AwaitableGetApplicationGatewayResult',
'get_application_gateway',
]
@pulumi.output_type
class GetApplicationGatewayResult:
"""
Application gateway resource.
"""
def __init__(__self__, authentication_certificates=None, autoscale_configuration=None, backend_address_pools=None, backend_http_settings_collection=None, custom_error_configurations=None, enable_fips=None, enable_http2=None, etag=None, firewall_policy=None, force_firewall_policy_association=None, frontend_ip_configurations=None, frontend_ports=None, gateway_ip_configurations=None, http_listeners=None, id=None, identity=None, location=None, name=None, operational_state=None, private_endpoint_connections=None, private_link_configurations=None, probes=None, provisioning_state=None, redirect_configurations=None, request_routing_rules=None, resource_guid=None, rewrite_rule_sets=None, sku=None, ssl_certificates=None, ssl_policy=None, ssl_profiles=None, tags=None, trusted_client_certificates=None, trusted_root_certificates=None, type=None, url_path_maps=None, web_application_firewall_configuration=None, zones=None):
if authentication_certificates and not isinstance(authentication_certificates, list):
raise TypeError("Expected argument 'authentication_certificates' to be a list")
pulumi.set(__self__, "authentication_certificates", authentication_certificates)
if autoscale_configuration and not isinstance(autoscale_configuration, dict):
raise TypeError("Expected argument 'autoscale_configuration' to be a dict")
pulumi.set(__self__, "autoscale_configuration", autoscale_configuration)
if backend_address_pools and not isinstance(backend_address_pools, list):
raise TypeError("Expected argument 'backend_address_pools' to be a list")
pulumi.set(__self__, "backend_address_pools", backend_address_pools)
if backend_http_settings_collection and not isinstance(backend_http_settings_collection, list):
raise TypeError("Expected argument 'backend_http_settings_collection' to be a list")
pulumi.set(__self__, "backend_http_settings_collection", backend_http_settings_collection)
if custom_error_configurations and not isinstance(custom_error_configurations, list):
raise TypeError("Expected argument 'custom_error_configurations' to be a list")
pulumi.set(__self__, "custom_error_configurations", custom_error_configurations)
if enable_fips and not isinstance(enable_fips, bool):
raise TypeError("Expected argument 'enable_fips' to be a bool")
pulumi.set(__self__, "enable_fips", enable_fips)
if enable_http2 and not isinstance(enable_http2, bool):
raise TypeError("Expected argument 'enable_http2' to be a bool")
pulumi.set(__self__, "enable_http2", enable_http2)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if firewall_policy and not isinstance(firewall_policy, dict):
raise TypeError("Expected argument 'firewall_policy' to be a dict")
pulumi.set(__self__, "firewall_policy", firewall_policy)
if force_firewall_policy_association and not isinstance(force_firewall_policy_association, bool):
raise TypeError("Expected argument 'force_firewall_policy_association' to be a bool")
pulumi.set(__self__, "force_firewall_policy_association", force_firewall_policy_association)
if frontend_ip_configurations and not isinstance(frontend_ip_configurations, list):
raise TypeError("Expected argument 'frontend_ip_configurations' to be a list")
pulumi.set(__self__, "frontend_ip_configurations", frontend_ip_configurations)
if frontend_ports and not isinstance(frontend_ports, list):
raise TypeError("Expected argument 'frontend_ports' to be a list")
pulumi.set(__self__, "frontend_ports", frontend_ports)
if gateway_ip_configurations and not isinstance(gateway_ip_configurations, list):
raise TypeError("Expected argument 'gateway_ip_configurations' to be a list")
pulumi.set(__self__, "gateway_ip_configurations", gateway_ip_configurations)
if http_listeners and not isinstance(http_listeners, list):
raise TypeError("Expected argument 'http_listeners' to be a list")
pulumi.set(__self__, "http_listeners", http_listeners)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if operational_state and not isinstance(operational_state, str):
raise TypeError("Expected argument 'operational_state' to be a str")
pulumi.set(__self__, "operational_state", operational_state)
if private_endpoint_connections and not isinstance(private_endpoint_connections, list):
raise TypeError("Expected argument 'private_endpoint_connections' to be a list")
pulumi.set(__self__, "private_endpoint_connections", private_endpoint_connections)
if private_link_configurations and not isinstance(private_link_configurations, list):
raise TypeError("Expected argument 'private_link_configurations' to be a list")
pulumi.set(__self__, "private_link_configurations", private_link_configurations)
if probes and not isinstance(probes, list):
raise TypeError("Expected argument 'probes' to be a list")
pulumi.set(__self__, "probes", probes)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if redirect_configurations and not isinstance(redirect_configurations, list):
raise TypeError("Expected argument 'redirect_configurations' to be a list")
pulumi.set(__self__, "redirect_configurations", redirect_configurations)
if request_routing_rules and not isinstance(request_routing_rules, list):
raise TypeError("Expected argument 'request_routing_rules' to be a list")
pulumi.set(__self__, "request_routing_rules", request_routing_rules)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if rewrite_rule_sets and not isinstance(rewrite_rule_sets, list):
raise TypeError("Expected argument 'rewrite_rule_sets' to be a list")
pulumi.set(__self__, "rewrite_rule_sets", rewrite_rule_sets)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if ssl_certificates and not isinstance(ssl_certificates, list):
raise TypeError("Expected argument 'ssl_certificates' to be a list")
pulumi.set(__self__, "ssl_certificates", ssl_certificates)
if ssl_policy and not isinstance(ssl_policy, dict):
raise TypeError("Expected argument 'ssl_policy' to be a dict")
pulumi.set(__self__, "ssl_policy", ssl_policy)
if ssl_profiles and not isinstance(ssl_profiles, list):
raise TypeError("Expected argument 'ssl_profiles' to be a list")
pulumi.set(__self__, "ssl_profiles", ssl_profiles)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if trusted_client_certificates and not isinstance(trusted_client_certificates, list):
raise TypeError("Expected argument 'trusted_client_certificates' to be a list")
pulumi.set(__self__, "trusted_client_certificates", trusted_client_certificates)
if trusted_root_certificates and not isinstance(trusted_root_certificates, list):
raise TypeError("Expected argument 'trusted_root_certificates' to be a list")
pulumi.set(__self__, "trusted_root_certificates", trusted_root_certificates)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if url_path_maps and not isinstance(url_path_maps, list):
raise TypeError("Expected argument 'url_path_maps' to be a list")
pulumi.set(__self__, "url_path_maps", url_path_maps)
if web_application_firewall_configuration and not isinstance(web_application_firewall_configuration, dict):
raise TypeError("Expected argument 'web_application_firewall_configuration' to be a dict")
pulumi.set(__self__, "web_application_firewall_configuration", web_application_firewall_configuration)
if zones and not isinstance(zones, list):
raise TypeError("Expected argument 'zones' to be a list")
pulumi.set(__self__, "zones", zones)
@property
@pulumi.getter(name="authenticationCertificates")
def authentication_certificates(self) -> Optional[Sequence['outputs.ApplicationGatewayAuthenticationCertificateResponse']]:
"""
Authentication certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "authentication_certificates")
@property
@pulumi.getter(name="autoscaleConfiguration")
def autoscale_configuration(self) -> Optional['outputs.ApplicationGatewayAutoscaleConfigurationResponse']:
"""
Autoscale Configuration.
"""
return pulumi.get(self, "autoscale_configuration")
@property
@pulumi.getter(name="backendAddressPools")
def backend_address_pools(self) -> Optional[Sequence['outputs.ApplicationGatewayBackendAddressPoolResponse']]:
"""
Backend address pool of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "backend_address_pools")
@property
@pulumi.getter(name="backendHttpSettingsCollection")
def backend_http_settings_collection(self) -> Optional[Sequence['outputs.ApplicationGatewayBackendHttpSettingsResponse']]:
"""
Backend http settings of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "backend_http_settings_collection")
@property
@pulumi.getter(name="customErrorConfigurations")
def custom_error_configurations(self) -> Optional[Sequence['outputs.ApplicationGatewayCustomErrorResponse']]:
"""
Custom error configurations of the application gateway resource.
"""
return pulumi.get(self, "custom_error_configurations")
@property
@pulumi.getter(name="enableFips")
def enable_fips(self) -> Optional[bool]:
"""
Whether FIPS is enabled on the application gateway resource.
"""
return pulumi.get(self, "enable_fips")
@property
@pulumi.getter(name="enableHttp2")
def enable_http2(self) -> Optional[bool]:
"""
Whether HTTP2 is enabled on the application gateway resource.
"""
return pulumi.get(self, "enable_http2")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="firewallPolicy")
def firewall_policy(self) -> Optional['outputs.SubResourceResponse']:
"""
Reference to the FirewallPolicy resource.
"""
return pulumi.get(self, "firewall_policy")
@property
@pulumi.getter(name="forceFirewallPolicyAssociation")
def force_firewall_policy_association(self) -> Optional[bool]:
"""
If true, associates a firewall policy with an application gateway regardless whether the policy differs from the WAF Config.
"""
return pulumi.get(self, "force_firewall_policy_association")
@property
@pulumi.getter(name="frontendIPConfigurations")
def frontend_ip_configurations(self) -> Optional[Sequence['outputs.ApplicationGatewayFrontendIPConfigurationResponse']]:
"""
Frontend IP addresses of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "frontend_ip_configurations")
@property
@pulumi.getter(name="frontendPorts")
def frontend_ports(self) -> Optional[Sequence['outputs.ApplicationGatewayFrontendPortResponse']]:
"""
Frontend ports of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "frontend_ports")
@property
@pulumi.getter(name="gatewayIPConfigurations")
def gateway_ip_configurations(self) -> Optional[Sequence['outputs.ApplicationGatewayIPConfigurationResponse']]:
"""
Subnets of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "gateway_ip_configurations")
@property
@pulumi.getter(name="httpListeners")
def http_listeners(self) -> Optional[Sequence['outputs.ApplicationGatewayHttpListenerResponse']]:
"""
Http listeners of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "http_listeners")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ManagedServiceIdentityResponse']:
"""
The identity of the application gateway, if configured.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="operationalState")
def operational_state(self) -> str:
"""
Operational state of the application gateway resource.
"""
return pulumi.get(self, "operational_state")
@property
@pulumi.getter(name="privateEndpointConnections")
def private_endpoint_connections(self) -> Sequence['outputs.ApplicationGatewayPrivateEndpointConnectionResponse']:
"""
Private Endpoint connections on application gateway.
"""
return pulumi.get(self, "private_endpoint_connections")
@property
@pulumi.getter(name="privateLinkConfigurations")
def private_link_configurations(self) -> Optional[Sequence['outputs.ApplicationGatewayPrivateLinkConfigurationResponse']]:
"""
PrivateLink configurations on application gateway.
"""
return pulumi.get(self, "private_link_configurations")
@property
@pulumi.getter
def probes(self) -> Optional[Sequence['outputs.ApplicationGatewayProbeResponse']]:
"""
Probes of the application gateway resource.
"""
return pulumi.get(self, "probes")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the application gateway resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="redirectConfigurations")
def redirect_configurations(self) -> Optional[Sequence['outputs.ApplicationGatewayRedirectConfigurationResponse']]:
"""
Redirect configurations of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "redirect_configurations")
@property
@pulumi.getter(name="requestRoutingRules")
def request_routing_rules(self) -> Optional[Sequence['outputs.ApplicationGatewayRequestRoutingRuleResponse']]:
"""
Request routing rules of the application gateway resource.
"""
return pulumi.get(self, "request_routing_rules")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
The resource GUID property of the application gateway resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter(name="rewriteRuleSets")
def rewrite_rule_sets(self) -> Optional[Sequence['outputs.ApplicationGatewayRewriteRuleSetResponse']]:
"""
Rewrite rules for the application gateway resource.
"""
return pulumi.get(self, "rewrite_rule_sets")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.ApplicationGatewaySkuResponse']:
"""
SKU of the application gateway resource.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="sslCertificates")
def ssl_certificates(self) -> Optional[Sequence['outputs.ApplicationGatewaySslCertificateResponse']]:
"""
SSL certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "ssl_certificates")
@property
@pulumi.getter(name="sslPolicy")
def ssl_policy(self) -> Optional['outputs.ApplicationGatewaySslPolicyResponse']:
"""
SSL policy of the application gateway resource.
"""
return pulumi.get(self, "ssl_policy")
@property
@pulumi.getter(name="sslProfiles")
def ssl_profiles(self) -> Optional[Sequence['outputs.ApplicationGatewaySslProfileResponse']]:
"""
SSL profiles of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "ssl_profiles")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="trustedClientCertificates")
def trusted_client_certificates(self) -> Optional[Sequence['outputs.ApplicationGatewayTrustedClientCertificateResponse']]:
"""
Trusted client certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "trusted_client_certificates")
@property
@pulumi.getter(name="trustedRootCertificates")
def trusted_root_certificates(self) -> Optional[Sequence['outputs.ApplicationGatewayTrustedRootCertificateResponse']]:
"""
Trusted Root certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "trusted_root_certificates")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="urlPathMaps")
def url_path_maps(self) -> Optional[Sequence['outputs.ApplicationGatewayUrlPathMapResponse']]:
"""
URL path map of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "url_path_maps")
@property
@pulumi.getter(name="webApplicationFirewallConfiguration")
def web_application_firewall_configuration(self) -> Optional['outputs.ApplicationGatewayWebApplicationFirewallConfigurationResponse']:
"""
Web application firewall configuration.
"""
return pulumi.get(self, "web_application_firewall_configuration")
@property
@pulumi.getter
def zones(self) -> Optional[Sequence[str]]:
"""
A list of availability zones denoting where the resource needs to come from.
"""
return pulumi.get(self, "zones")
class AwaitableGetApplicationGatewayResult(GetApplicationGatewayResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetApplicationGatewayResult(
authentication_certificates=self.authentication_certificates,
autoscale_configuration=self.autoscale_configuration,
backend_address_pools=self.backend_address_pools,
backend_http_settings_collection=self.backend_http_settings_collection,
custom_error_configurations=self.custom_error_configurations,
enable_fips=self.enable_fips,
enable_http2=self.enable_http2,
etag=self.etag,
firewall_policy=self.firewall_policy,
force_firewall_policy_association=self.force_firewall_policy_association,
frontend_ip_configurations=self.frontend_ip_configurations,
frontend_ports=self.frontend_ports,
gateway_ip_configurations=self.gateway_ip_configurations,
http_listeners=self.http_listeners,
id=self.id,
identity=self.identity,
location=self.location,
name=self.name,
operational_state=self.operational_state,
private_endpoint_connections=self.private_endpoint_connections,
private_link_configurations=self.private_link_configurations,
probes=self.probes,
provisioning_state=self.provisioning_state,
redirect_configurations=self.redirect_configurations,
request_routing_rules=self.request_routing_rules,
resource_guid=self.resource_guid,
rewrite_rule_sets=self.rewrite_rule_sets,
sku=self.sku,
ssl_certificates=self.ssl_certificates,
ssl_policy=self.ssl_policy,
ssl_profiles=self.ssl_profiles,
tags=self.tags,
trusted_client_certificates=self.trusted_client_certificates,
trusted_root_certificates=self.trusted_root_certificates,
type=self.type,
url_path_maps=self.url_path_maps,
web_application_firewall_configuration=self.web_application_firewall_configuration,
zones=self.zones)
def get_application_gateway(application_gateway_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApplicationGatewayResult:
"""
Application gateway resource.
:param str application_gateway_name: The name of the application gateway.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['applicationGatewayName'] = application_gateway_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20200701:getApplicationGateway', __args__, opts=opts, typ=GetApplicationGatewayResult).value
return AwaitableGetApplicationGatewayResult(
authentication_certificates=__ret__.authentication_certificates,
autoscale_configuration=__ret__.autoscale_configuration,
backend_address_pools=__ret__.backend_address_pools,
backend_http_settings_collection=__ret__.backend_http_settings_collection,
custom_error_configurations=__ret__.custom_error_configurations,
enable_fips=__ret__.enable_fips,
enable_http2=__ret__.enable_http2,
etag=__ret__.etag,
firewall_policy=__ret__.firewall_policy,
force_firewall_policy_association=__ret__.force_firewall_policy_association,
frontend_ip_configurations=__ret__.frontend_ip_configurations,
frontend_ports=__ret__.frontend_ports,
gateway_ip_configurations=__ret__.gateway_ip_configurations,
http_listeners=__ret__.http_listeners,
id=__ret__.id,
identity=__ret__.identity,
location=__ret__.location,
name=__ret__.name,
operational_state=__ret__.operational_state,
private_endpoint_connections=__ret__.private_endpoint_connections,
private_link_configurations=__ret__.private_link_configurations,
probes=__ret__.probes,
provisioning_state=__ret__.provisioning_state,
redirect_configurations=__ret__.redirect_configurations,
request_routing_rules=__ret__.request_routing_rules,
resource_guid=__ret__.resource_guid,
rewrite_rule_sets=__ret__.rewrite_rule_sets,
sku=__ret__.sku,
ssl_certificates=__ret__.ssl_certificates,
ssl_policy=__ret__.ssl_policy,
ssl_profiles=__ret__.ssl_profiles,
tags=__ret__.tags,
trusted_client_certificates=__ret__.trusted_client_certificates,
trusted_root_certificates=__ret__.trusted_root_certificates,
type=__ret__.type,
url_path_maps=__ret__.url_path_maps,
web_application_firewall_configuration=__ret__.web_application_firewall_configuration,
zones=__ret__.zones)
| 50.361314 | 926 | 0.714545 | [
"Apache-2.0"
] | pulumi/pulumi-azure-nextgen | sdk/python/pulumi_azure_nextgen/network/v20200701/get_application_gateway.py | 27,598 | Python |
from __future__ import (absolute_import, division,print_function, unicode_literals)
from builtins import *
import numpy as np
import cv2
import SimpleITK as sitk
from builtins import *
from scipy.spatial import distance
import sys
import time
############### FUNCTIONS ##########################
def imcomplement(im):
if np.max(im)>1:
imout=255-im
else:
imout=1-im
return imout
def mat2gray(img):
max_img=np.max(img)
min_img=np.min(img)
imgout=(img-min_img)/(max_img-min_img)
return imgout
def im2double(img):
imgout=img.astype('float32')
imgout= mat2gray(imgout)
return imgout
def imreconstruct(marker,mask):
markeritk=sitk.GetImageFromArray(marker)
maskitk=sitk.GetImageFromArray(mask)
recfilt=sitk.ReconstructionByDilationImageFilter()
rectoutitk=recfilt.Execute(markeritk,maskitk)
rectout=sitk.GetArrayFromImage(rectoutitk)
return rectout
def eigen_cov(x,y):
mx=np.mean(x)
my=np.mean(y)
x=x-mx
y=y-my
cxx=np.var(x)
cxy=0
cyy=np.var(y);
nx=len(x)
for ct in range(nx):
cxy=cxy+x[ct]*y[ct];
cxy=cxy/nx;
C=np.zeros((2,2))
C[0,0]=cxx
C[0,1]=cxy
C[1,0]=cxy
C[1,1]=cyy
D,V=np.linalg.eig(C)
return V,D
def improfile(img,x,y,n):
xm=x[0]
x0=x[1]
ym=y[0]
y0=y[1]
a = np.arctan((y0 - ym) / (x0 - xm))
i=range(0,100,int(100/n))
cx=np.squeeze(np.zeros((1,len(i))))
cy=np.squeeze(np.zeros((1,len(i))))
c=np.squeeze(np.zeros((1,len(i))))
ct=0
for t in range(0,100,int(100/30)):
tf=t/100.0
cx[ct] = int(xm + (x0 - xm)*tf)
cy[ct] = int(ym + (y0 - ym)*tf)
c[ct]=img[int(cy[ct]), int(cx[ct])]
ct=ct+1
return c,cx,cy
def filter_result3(img,bw_result,ths,thm):
bw_result_orig=np.copy(bw_result);
points=np.where(bw_result>0)
points=np.reshape(points,np.shape(points))
points=np.transpose(points)
npoints=np.shape(points)[0]
k=20
step=5
hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
sat=hsv[:,:,1]/255
bw_result_filter=np.zeros(np.shape(bw_result))
xc=points[:,1]
yc=points[:,0]
for ct in range(0,npoints,step):
#print(ct/npoints)
ystart=max(0,yc[ct]-k);
xstart=max(0,xc[ct]-k);
yend=min(np.shape(img)[0],yc[ct]+k);
xend=min(np.shape(img)[1],xc[ct]+k);
p=points[ct,:]
p=np.reshape(p,(1,2))
Dpoints=distance.cdist(p,points)
Dpoints=np.squeeze(Dpoints)
ipoints=np.squeeze(np.where(Dpoints<40))
xneigh=points[ipoints,1];
yneigh=points[ipoints,0];
V,D=eigen_cov(xneigh,yneigh)
vmin=V[:,0];
if D[1]<D[0]:
vmin=V[:,1];
x1=xc[ct]-k*vmin[0];
y1=yc[ct]-k*vmin[1];
x2=xc[ct]+k*vmin[0];
y2=yc[ct]+k*vmin[1];
p,px,py=improfile(sat,np.array([x1,x2]),np.array([y1,y2]),30);
s=np.abs(np.mean(p[0:5])-np.mean(p[len(p)-5:len(p)]));
s=round(s*100);
m=np.max([p[0:5],p[len(p)-5:len(p)]]);
if(s<ths and m<thm):
bw_result_filter[ystart:yend,xstart:xend]=bw_result_orig[ystart:yend,xstart:xend];
return bw_result_filter
def min_openings(im,LEN,DEG_NUM):
imo=[];
for i in range(DEG_NUM):
#DEG=(i)*((360/DEG_NUM)/2)
filtername=str(i+1)+'se.txt'
se=np.loadtxt('filters/images/filters/'+filtername)
if(i==0):
se=np.reshape(se,(1,len(se)))
if(i==6):
se=np.reshape(se,(len(se),1))
se=se.astype('uint8')
imoi=cv2.erode(im,se)
imoi=cv2.dilate(imoi,se)
imo.append(imoi)
imB=imo[0]
for i in range(DEG_NUM-1):
k=i+1
imB=np.minimum(imB,imo[k])
return imB
def smooth_cross_section(imV,LEN_diff,DEG_NUM):
imV_c=imcomplement(imV)
imd=[]
for i in range(12):
k=i+1
se1=np.loadtxt('filters/images/filters/'+str(k)+'linekernel1.txt')
se2=np.loadtxt('filters/images/filters/'+str(k)+'linekernel2.txt')
if(i==0):
se1=np.reshape(se1,(1,len(se1)))
se2=np.reshape(se2,(len(se2),1))
if(i==6):
se1=np.reshape(se1,(len(se1),1))
se2=np.reshape(se2,(1,len(se2)))
temp=cv2.filter2D(imV_c.astype('float32'),-1,se1)
imdi=cv2.filter2D(temp,-1,se2)
imdi[imdi<0]=0
imd.append(imdi)
imDiff=imd[0]
for i in range(11):
k=i+1
imDiff=np.maximum(imDiff,imd[k])
imDiff=mat2gray(imDiff)
return imDiff
def reconstruction_by_dilation(im,LEN,DEG_NUM):
imo=[];
for i in range(DEG_NUM):
#DEG=(i)*((360/DEG_NUM)/2)
filtername=str(i+1)+'se.txt'
se=np.loadtxt('filters/images/filters/'+filtername)
if(i==0):
se=np.reshape(se,(1,len(se)))
if(i==6):
se=np.reshape(se,(len(se),1))
se=se.astype('uint8')
imoi=cv2.erode(im,se)
imoi=cv2.dilate(imoi,se)
imo.append(imoi)
imC=imo[0]
for i in range(DEG_NUM-1):
k=i+1
imC=np.maximum(imC,imo[k])
imC2=imreconstruct(imC,im)
imC2=mat2gray(imC2)
return imC2
def reconstruction_by_erosion(im,LEN,DEG_NUM):
im_close=[];
for i in range(DEG_NUM):
#DEG=(i)*((360/DEG_NUM)/2)
filtername=str(i+1)+'se.txt'
se=np.loadtxt('filters/images/filters/'+filtername)
if(i==0):
se=np.reshape(se,(1,len(se)))
if(i==6):
se=np.reshape(se,(len(se),1))
se=se.astype('uint8')
im_closei=cv2.dilate(im,se)
im_closei=cv2.erode(im_closei,se)
im_close.append(im_closei);
imTemp39=im_close[0]
for i in range(DEG_NUM-1):
k=i+1
imTemp39=np.minimum(imTemp39,im_close[k])
marker=imcomplement(imTemp39)
mask=imcomplement(im)
imF=imreconstruct(marker,mask)
imF=mat2gray(imF)
imF=imcomplement(imF)
return imF
############ MAIN ##############
if len(sys.argv)<2:
print('missing input file')
sys.exit(-1)
if len(sys.argv)==4:
img_file_out=sys.argv[2]
img_file_out_bin=sys.argv[3]
else:
img_file_out='output.png'
img_file_out_bin='output.png'
img_file=sys.argv[1]
print('processing '+img_file)
imgorig=cv2.imread(img_file)
start_time = time.time()
size_orig=np.shape(imgorig)
print(size_orig)
## resize if the original size is different from dataset images
## so we can keep the same parameters for the filters
scale=2
rows_dataset=int(2448/scale)
cols_dataset=int(3264/scale)
img_blur = cv2.bilateralFilter(cv2.resize(imgorig,(cols_dataset,rows_dataset)) ,int(51/scale),int(201),int(201/scale))
img_blur=cv2.resize(img_blur,(size_orig[1],size_orig[0]))
##
print("bilateral filter --- %s seconds ---" % (time.time() - start_time))
img=cv2.resize(img_blur,(653,490))
hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
im=hsv[:,:,2]
bw_mask=np.zeros(np.shape(im))
bw_mask_offr=round(np.shape(im)[0]/20)
bw_mask_offc=round(np.shape(im)[1]/20)
bw_mask[bw_mask_offr:np.shape(im)[0]-bw_mask_offr, bw_mask_offc:np.shape(im)[1]-bw_mask_offc]=1;
im=mat2gray(im)*mat2gray(bw_mask)
im=imcomplement(im)
im=im2double(im)
DEG_NUM=12;
LEN_c=11;
LEN_o=11;
LEN_diff=7;
ic1=reconstruction_by_dilation(im,LEN_c,DEG_NUM)
io1=min_openings(im,LEN_o,DEG_NUM)
iv=mat2gray(ic1-io1)
imDiff=smooth_cross_section(iv,LEN_diff,LEN_c)
imL=reconstruction_by_dilation(imDiff,LEN_c,DEG_NUM)
imF=reconstruction_by_erosion(imL,LEN_c,DEG_NUM)
TH_LOW=0.12;
TH_HIGH=0.2;
min_obj=20;
min_hole=10;
mask=np.zeros(np.shape(imF))
marker=np.zeros(np.shape(imF))
mask[imF>TH_LOW]=1
marker[imF>TH_HIGH]=1
bw_result=imreconstruct(marker,mask)
print("bw result --- %s seconds ---" % (time.time() - start_time))
bw_result=filter_result3(img,bw_result,4,0.2)
print("filter result --- %s seconds ---" % (time.time() - start_time))
bw_result=cv2.resize(bw_result,(size_orig[1],size_orig[0]))
imgr=imgorig[:,:,2];
imgr[bw_result>0]=255;
imgorig[:,:,2]=imgr;
print('saving output file: '+img_file_out)
cv2.imwrite(img_file_out,imgorig)
cv2.imwrite(img_file_out_bin,bw_result*255)
print('done ')
| 29.243056 | 119 | 0.594039 | [
"MIT"
] | ficusoftdeveloper/bluedome | scripts/image/crack_detection_fast.py | 8,422 | Python |
#%% First
import numpy as np
import json
import os
import pandas as pd
import requests
from contextlib import closing
import time
from datetime import datetime
from requests.models import HTTPBasicAuth
import seaborn as sns
from matplotlib import pyplot as plt
from requests import get
from requests_futures.sessions import FuturesSession
from bs4 import BeautifulSoup
from dotenv import load_dotenv, dotenv_values
from requests_oauthlib import OAuth2, OAuth2Session
#%%
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
env_vars = dotenv_values('config.env')
client_id = env_vars['id']
client_secret = env_vars['secret']
code = env_vars['code']
callback_uri = "http://localhost:8080"
authorize_url = "https://www.warcraftlogs.com/oauth/authorize"
token_url = "https://www.warcraftlogs.com/oauth/token"
# warcraftlogs = OAuth2Session(client_id, redirect_uri=callback_uri)
# authorization_url, state = warcraftlogs.authorization_url(authorize_url,
# access_type="offline")
# token = warcraftlogs.fetch_token(token_url = token_url,
# auth = HTTPBasicAuth(client_id, client_secret),
# code = code)
# access_token = token['access_token']
# refresh_token = token['refresh_token']
# with open('refresh_token.env', 'w') as f:
# f.write('refresh_token = '+str(refresh_token)+'\nacces_token = '+str(access_token))
if os.path.isfile('refresh_token.env'):
env_vars = dotenv_values('refresh_token.env')
refresh_token = env_vars['refresh_token']
access_token = env_vars['access_token']
else:
raise 'Get your fresh token dumby'
# print(refresh_token)
try:
warcraftlogs = OAuth2Session(client_id = client_id)
graphql_endpoint = "https://www.warcraftlogs.com/api/v2/client"
headers = {"Authorization": f"Bearer {access_token}"}
query = """{
reportData{
reports(guildID: 95321, endTime: 1622872800000.0, startTime: 1605855600000.0){
data{
fights(difficulty: 5){
name
averageItemLevel
# friendlyPlayers
id
}
}
}
}
}"""
r = requests.post(graphql_endpoint, json={"query": query}, headers=headers)
except:
token = warcraftlogs.refresh_token(token_url = token_url,
auth = HTTPBasicAuth(client_id, client_secret),
refresh_token = refresh_token)
access_token = token['access_token']
refresh_token = token['refresh_token']
with open('refresh_token.env', 'w') as f:
f.write('refresh_token = '+str(refresh_token)+'\naccess_token = '+str(access_token))
warcraftlogs = OAuth2Session(client_id = client_id)
graphql_endpoint = "https://www.warcraftlogs.com/api/v2/client"
headers = {"Authorization": f"Bearer {access_token}"}
query = """{
reportData{
reports(guildID: 95321, endTime: 1622872800000.0, startTime: 1605855600000.0){
data{
fights(difficulty: 5){
name
averageItemLevel
# friendlyPlayers
id
}
}
}
}
}"""
r = requests.post(graphql_endpoint, json={"query": query}, headers=headers)
with open('..//get_guild_list/guild_list_hungering.json', encoding='utf-8') as f:
guilds = json.load(f)
#%%
def is_good_response_json(resp):
"""
Returns True if the response seems to be HTML, False otherwise.
"""
content_type = resp.headers['Content-Type'].lower()
return (resp.status_code == 200
and content_type is not None
and content_type.find('json') > -1)
def get_guild_id(guild):
try:
guild_id = int(guild['id'])
except:
query = """
{
guildData{
guild(name: "%s", serverSlug: "%s", serverRegion: "%s"){
id
}
}
}
""" % (guild['name'], guild['realm'].replace(' ', '-'), guild['region'])
r = requests.post(graphql_endpoint, json={"query": query}, headers=headers)
guild_id = r.json()['data']['guildData']['guild']['id']
return guild_id
def get_log_list(guild):
guild['id'] = get_guild_id(guild)
query = ("{"
f"reportData{{"
f" reports(guildID: {guild['id']}, zoneID: 26){{"
f" data{{"
f" code"
f" startTime"
f" endTime"
f" }}"
f" }}"
f"}}"
f"}}")
r = requests.post(graphql_endpoint, json={"query": query}, headers=headers)
log_list = r.json()['data']['reportData']['reports']['data']
return log_list
def get_log_list_apiv1(guild):
with open('..//..//Warcraftlogs//api_key.txt.') as f:
api_key = f.readlines()[0]
link = "https://www.warcraftlogs.com:443/v1/reports/guild/" + \
guild['name'] + "/" + guild['realm'].replace(' ', '-').replace("'","")+ "/" + \
guild['region'] + "?api_key=" + api_key
guild_logs = requests.get(link)
log_list = guild_logs.json()
log_list_new = []
for item in log_list:
if item['zone'] == 26:
log_list_new.append({'code': item['id'],
'startTime': item['start'],
'endTime': item['end']})
return log_list_new
def get_pulls(log, guild):
log_id = log['code']
query = """
{
reportData{
report(code: "%s"){
fights(difficulty: 5){
name
id
averageItemLevel
bossPercentage
kill
startTime
endTime
}
}
}
}
""" % (log_id)
r = requests.post(graphql_endpoint, json={"query": query}, headers=headers)
fight_list = r.json()['data']['reportData']['report']['fights']
for k in range(len(fight_list)):
fight_list[k].update({'log_code': log_id})
return fight_list
def get_fight_info(fight, guild, unique_id):
code = fight['log_code']
fight_ID = fight['id']
start_time = fight['start_time']
end_time = fight['end_time']
query = """
{
reportData{
report(code: "%s"){
table(fightIDs: %s, startTime: %s, endTime: %s)
}
}
}
""" % (code, fight_ID, str(start_time), str(end_time))
r = requests.post(graphql_endpoint, json={"query": query}, headers=headers)
table = r.json()['data']['reportData']['report']['table']['data']
comp = table['composition']
roles = table['playerDetails']
player_list = []
for role in roles:
players = roles[role]
for player in players:
try:
gear_ilvl = [piece['itemLevel'] for piece in player['combatantInfo']['gear']]
ilvl = np.mean(gear_ilvl)
except:
try:
ilvl = player['minItemLevel']
except:
ilvl = np.NaN
try:
covenant = player['combatantInfo']['covenantID']
except:
covenant = np.NaN
try:
spec = player['specs'][0]
except:
spec = np.NaN
try:
stats = player['combatantInfo']['stats']
primaries = ['Agility','Intellect','Strength']
for primary in primaries:
if primary in stats.keys():
break
primary= stats[primary]['min']
mastery= stats['Mastery']['min']
crit= stats['Crit']['min']
haste= stats['Haste']['min']
vers= stats['Versatility']['min']
stamina= stats['Stamina']['min']
except:
primary = np.NaN
mastery = np.NaN
crit = np.NaN
haste = np.NaN
vers = np.NaN
stamina = np.NaN
player_info= {'unique_id': unique_id,
'class': player['type'],
'spec': spec,
'role': role,
'ilvl': ilvl,
'covenant': covenant,
'primary': primary,
'mastery': mastery,
'crit': crit,
'haste': haste,
'vers': vers,
'stamina': stamina,
'boss_name': fight['name']}
player_list.append(player_info)
return player_list
# %% Setup the SQL Stuff
from sqlalchemy import create_engine
import psycopg2
server = 'localhost'
database = 'nathria_prog'
username = 'postgres'
password = 'postgres'
if 'conn' in locals():
conn.close()
engine = create_engine('postgresql://postgres:postgres@localhost:5432/nathria_prog')
conn = psycopg2.connect('host='+server+' dbname='+database+' user='+username+' password='+password)
curs = conn.cursor()
curs.execute("select exists(select * from information_schema.tables where table_name=%s)",\
('nathria_prog_v2',))
if curs.fetchone()[0]:
curs.execute('select distinct guild_name from nathria_prog_v2')
already_added_guilds = [item[0] for item in curs.fetchall()]
already_added_length = len(already_added_guilds)
else:
already_added_guilds = []
already_added_length = 0
def check_in_sql(fight):
unique_id = fight['unique_id']
curs.execute("select * from nathria_prog_v2 where unique_id = '%s'" % (unique_id))
if curs.fetchone() is None:
check_one = False
else:
check_one = True
curs.execute("select * from nathria_prog_v2 where start_time > %s and end_time < %s and guild_name = '%s';" \
% (fight['start_time']-60, fight['end_time']+60, fight['guild_name']))
if curs.fetchone() is None:
check_two = False
else:
check_two = True
check = check_one or check_two
return check
def add_to_sql(curs, table, info):
placeholders = ', '.join(['%s'] * len(info))
columns = ', '.join(info.keys())
sql = "INSERT INTO %s ( %s ) VALUES ( %s )" % (str(table), columns, placeholders)
curs.execute(sql, list(info.values()))
#%% This is for futures use
def make_logs_query(log):
log_id = log['code']
query = """
{
reportData{
report(code: "%s"){
fights(difficulty: 5){
name
id
averageItemLevel
bossPercentage
kill
startTime
endTime
}
}
}
}
""" % (log_id)
return query
def get_log_args(log, graphql_endpoint, headers):
args = {'url': graphql_endpoint,
'json': {'query': make_logs_query(log)},
'headers': headers}
return args
def get_fight_list(log_list, graphql_endpoint, headers):
session = FuturesSession(max_workers = 2)
futures = [session.post(**get_log_args(log, graphql_endpoint, headers)) for log in log_list]
fights_list = []
for q, item in enumerate(futures):
result = item.result()
if result.status_code!=200:
print(result.status_code)
fights = result.json()['data']['reportData']['report']['fights']
for k, fight in enumerate(fights):
fight['log_code'] = log_list[q]['code']
fight['log_start'] = log_list[q]['startTime']
fight['log_end'] = log_list[q]['endTime']
fight['unique_id'] = log_list[q]['code'] + '_' + str(fight['id'])
fights_list.extend([fight])
return fights_list
def get_prog_pulls(df, boss_name):
if type(df.iloc[0]['start_time']) != 'int':
df['start_time'] = [time.mktime(x.to_pydatetime().timetuple()) for x in df['start_time']]
df['end_time'] = [time.mktime(x.to_pydatetime().timetuple()) for x in df['end_time']]
kills_df = df.query('name == "'+boss_name+'"').query('zoneDifficulty == 5').query('kill == True')
first_kill_time = min(kills_df['start_time'])
return df.query('name == "'+boss_name+'"').query('zoneDifficulty == 5').query('start_time <= '+str(first_kill_time))
def add_pull_num(df):
df = df.sort_values(by = ['start_time'])
df.insert(loc = 0, column = 'pull_num', value = np.arange(len(df))+1)
return df
def combine_boss_df(df):
boss_names = [
'Shriekwing', \
'Huntsman Altimor',
'Hungering Destroyer', \
"Sun King's Salvation",
"Artificer Xy'mox", \
'Lady Inerva Darkvein', \
'The Council of Blood', \
'Sludgefist', \
'Stone Legion Generals', \
'Sire Denathrius']
combine_df = pd.DataFrame()
for k, boss_name in enumerate(np.unique(df['name'])):
if boss_name in boss_names and boss_name in np.unique(df['name']):
combine_df = combine_df.append(add_pull_num(df.copy(deep = True).query('name == "'+boss_name+'"')))
combine_df = combine_df.reset_index().drop(columns = 'index')
return combine_df
n_start = 3500
for gnum, guild in enumerate(guilds[n_start:]):
if guild['name'] in already_added_guilds:
continue
# log_list = get_log_list(guild)
try:
log_list = get_log_list_apiv1(guild)
if len(log_list) == 0:
print(f'Log list empty for {guild["name"]}')
fightdf = pd.DataFrame()
playerdf = pd.DataFrame()
print(f'Parsing guild {guild["name"]} (#{gnum+1+n_start} of {len(guilds)})')
fight_list = get_fight_list(log_list, graphql_endpoint, headers)
fightdf = pd.DataFrame()
for q, fight in enumerate(fight_list):
fight['boss_perc'] = fight.pop('bossPercentage')
fight['average_item_level'] = fight.pop('averageItemLevel')
fight['unique_id'] = fight['log_code'] + '_' + str(fight['id'])
fight['start_time'] = fight.pop('startTime')
fight['end_time'] = fight.pop('endTime')
fight['guild_name'] = guild['name']
fight['guild_realm'] = guild['realm']
fight['guild_region'] = guild['region']
fightdf = fightdf.append(pd.DataFrame(fight, index=['i',]))
fightdf = combine_boss_df(fightdf.copy(deep = True))
fightdf.to_sql('nathria_prog_v2', engine, if_exists='append')
if len(fightdf)>1:
print(f'Adding to SQL guild {guild["name"]}')
time.sleep(3)
except:
continue
#%%
asdfasdf
from sqlalchemy import create_engine
import psycopg2
server = 'localhost'
database = 'nathria_prog'
username = 'postgres'
password = 'postgres'
if 'conn' in locals():
conn.close()
engine = create_engine('postgresql://postgres:postgres@localhost:5432/nathria_prog')
conn = psycopg2.connect('host='+server+' dbname='+database+' user='+username+' password='+password)
curs = conn.cursor()
curs.execute("select exists(select * from information_schema.tables where table_name=%s)",\
('nathria_prog_v2',))
if curs.fetchone()[0]:
curs.execute('select distinct guild_name from nathria_prog_v2')
logged_guilds = [item[0] for item in curs.fetchall()]
else:
logged_guilds = []
def make_fights_query(fight):
code = fight['log_code']
fight_ID = fight['id']
start_time = fight['start_time']
end_time = fight['end_time']
query = """
{
reportData{
report(code: "%s"){
table(fightIDs: %s, startTime: %s, endTime: %s)
}
}
}
""" % (code, fight_ID, str(start_time), str(end_time))
return query
def get_fight_args(log, graphql_endpoint, headers):
args = {'url': graphql_endpoint,
'json': {'query': make_fights_query(log)},
'headers': headers}
return args
def get_fight_table(fights_list, graphql_endpoint, headers):
session = FuturesSession(max_workers = 2)
futures = [session.post(**get_fight_args(fight, graphql_endpoint, headers)) for fight in fights_list]
fights_tables = []
for k, item in enumerate(futures):
result = item.result()
if result.status_code!=200:
print(result.status_code)
# if is_good_response_json(item.result()):
try:
fights_tables.append(result.json()['data']['reportData']['report']['table']['data'])
except:
pass
return fights_tables
def parse_fight_table(table, boss_name, unique_id, guild_name):
comp = table['composition']
roles = table['playerDetails']
player_list = []
for role in roles:
players = roles[role]
for player in players:
try:
gear_ilvl = [piece['itemLevel'] for piece in player['combatantInfo']['gear']]
ilvl = np.mean(gear_ilvl)
except:
try:
ilvl = player['minItemLevel']
except:
ilvl = np.NaN
try:
covenant = player['combatantInfo']['covenantID']
except:
covenant = np.NaN
try:
spec = player['specs'][0]
except:
spec = np.NaN
try:
stats = player['combatantInfo']['stats']
primaries = ['Agility','Intellect','Strength']
for primary in primaries:
if primary in stats.keys():
break
primary= stats[primary]['min']
mastery= stats['Mastery']['min']
crit= stats['Crit']['min']
haste= stats['Haste']['min']
vers= stats['Versatility']['min']
stamina= stats['Stamina']['min']
except:
primary = np.NaN
mastery = np.NaN
crit = np.NaN
haste = np.NaN
vers = np.NaN
stamina = np.NaN
player_info= {'unique_id': unique_id,
'name': player['name'],
'guild_name': guild_name,
'server': player['server'],
'class': player['type'],
'spec': spec,
'role': role,
'ilvl': ilvl,
'covenant': covenant,
'primary': primary,
'mastery': mastery,
'crit': crit,
'haste': haste,
'vers': vers,
'stamina': stamina,
'boss_name': boss_name}
player_list.append(player_info)
return player_list
for guild_name in logged_guilds:
curs.execute(f"select * from nathria_prog_v2 where guild_name = '{guild_name}'")
pulls = pd.DataFrame(curs.fetchall())
pulls.columns = [desc[0] for desc in curs.description]
fights_list = pulls.to_dict('records')
curs.execute(f"select distinct unique_id from nathria_prog_v2_players where guild_name = '{guild_name}'")
added_fights = [item[0] for item in curs.fetchall()]
fight_list = [fight for fight in fights_list if fight['unique_id'] not in added_fights]
if len(fight_list)>1:
fights_tables = get_fight_table(fights_list, graphql_endpoint, headers)
playerdf = pd.DataFrame()
for q, table in enumerate(fights_tables):
unique_id = fights_list[q]['unique_id']
guild_name = guild_name
player_info = parse_fight_table(table, fights_list[q]['name'], unique_id, guild_name)
for player in player_info:
for player in player_info:
playerdf = playerdf.append(pd.DataFrame(player, index=['i',]))
if len(playerdf)>1:
print(f'Adding to SQL guild player info {guild["name"]}')
playerdf.to_sql('nathria_prog_v2_players', engine, if_exists='append') | 33.901528 | 120 | 0.565254 | [
"MIT"
] | GBruening/succes_predictor | Pulling data/apiv2_pull.py | 19,968 | Python |
from Tkinter import *
from Tkinter import Text as textcontrol
class StyledTextControl( textcontrol ):
def spaces(self, val):
return str(val*8)
def __screen(self, width, height):
self.
def __init__(self, parent, width, height, fontf, fontsize):
# Predefining Variables
self.POS = "RIGHT"
self.app = parent
self.widget = textcontrol(parent.mainframe)
self.widget.config(tabs=self.spaces(4), background="#ffffff", foreground='#000000', highlightthickness=0, borderwidth=0)
if fontf != None:
if fontsize != None and fontsize != "":
self.widget.config(font=(fontf, fontsize))
else:
self.widget.config(font=fontf)
def setMargins(self, top, left, right, ):
def pack(self):
self.app.configs.append(self.__screen)
self.widget.pack(side=self.POS)
| 36.76 | 129 | 0.607182 | [
"Apache-2.0"
] | CofeePy/Cofee | lib/stc.py | 919 | Python |
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from unittest import mock
sys.modules["dnf"] = mock.Mock()
| 35.263158 | 77 | 0.731343 | [
"Apache-2.0"
] | openstack/tripleo-repos | tests/unit/yum_config/mock_modules.py | 670 | Python |
from litex.soc.cores import uart
from litex.soc.cores.uart import UARTWishboneBridge
from litedram.frontend.bist import LiteDRAMBISTGenerator, LiteDRAMBISTChecker
from litescope import LiteScopeAnalyzer
from litescope import LiteScopeIO
from gateware.memtest import LiteDRAMBISTCheckerScope
from targets.utils import csr_map_update
from targets.mimasv2.base import BaseSoC
class MemTestSoC(BaseSoC):
csr_peripherals = (
"analyzer",
"io",
)
csr_map_update(BaseSoC.csr_map, csr_peripherals)
def __init__(self, platform, *args, **kwargs):
kwargs['cpu_type'] = None
BaseSoC.__init__(self, platform, *args, with_uart=False, **kwargs)
self.add_cpu_or_bridge(UARTWishboneBridge(platform.request("serial"), self.clk_freq, baudrate=19200))
self.add_wb_master(self.cpu_or_bridge.wishbone)
# Litescope for analyzing the BIST output
# --------------------
self.submodules.io = LiteScopeIO(8)
for i in range(8):
try:
self.comb += platform.request("user_led", i).eq(self.io.output[i])
except:
pass
analyzer_signals = [
self.spiflash.bus,
# self.spiflash.cs_n,
# self.spiflash.clk,
# self.spiflash.dq_oe,
# self.spiflash.dqi,
# self.spiflash.sr,
]
self.submodules.analyzer = LiteScopeAnalyzer(analyzer_signals, 1024)
def do_exit(self, vns, filename="test/analyzer.csv"):
self.analyzer.export_csv(vns, filename)
SoC = MemTestSoC
| 29.830189 | 109 | 0.654016 | [
"BSD-2-Clause"
] | CarlFK/HDMI2USB-litex-firmware | targets/mimasv2/scope.py | 1,581 | Python |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
from __future__ import absolute_import, print_function
from distutils.spawn import find_executable
from distutils.version import LooseVersion
import json
import os
import distro
import shutil
import subprocess
import six
import six.moves.urllib as urllib
from six.moves import input
from subprocess import PIPE
from zipfile import BadZipfile
import servo.packages as packages
from servo.util import extract, download_file, host_triple
def check_gstreamer_lib():
return subprocess.call(["pkg-config", "--atleast-version=1.16", "gstreamer-1.0"],
stdout=PIPE, stderr=PIPE) == 0
def run_as_root(command, force=False):
if os.geteuid() != 0:
command.insert(0, 'sudo')
if force:
command.append('-y')
return subprocess.call(command)
def install_linux_deps(context, pkgs_ubuntu, pkgs_fedora, pkgs_void, force):
install = False
pkgs = []
if context.distro in ['Ubuntu', 'Debian GNU/Linux']:
command = ['apt-get', 'install']
pkgs = pkgs_ubuntu
if subprocess.call(['dpkg', '-s'] + pkgs, stdout=PIPE, stderr=PIPE) != 0:
install = True
elif context.distro in ['CentOS', 'CentOS Linux', 'Fedora']:
installed_pkgs = str(subprocess.check_output(['rpm', '-qa'])).replace('\n', '|')
pkgs = pkgs_fedora
for p in pkgs:
command = ['dnf', 'install']
if "|{}".format(p) not in installed_pkgs:
install = True
break
elif context.distro == 'void':
installed_pkgs = str(subprocess.check_output(['xbps-query', '-l']))
pkgs = pkgs_void
for p in pkgs:
command = ['xbps-install', '-A']
if "ii {}-".format(p) not in installed_pkgs:
install = force = True
break
if install:
print("Installing missing dependencies...")
run_as_root(command + pkgs, force)
return install
def install_salt_dependencies(context, force):
pkgs_apt = ['build-essential', 'libssl-dev', 'libffi-dev', 'python-dev']
pkgs_dnf = ['gcc', 'libffi-devel', 'python-devel', 'openssl-devel']
pkgs_xbps = ['gcc', 'libffi-devel', 'python-devel']
if not install_linux_deps(context, pkgs_apt, pkgs_dnf, pkgs_xbps, force):
print("Dependencies are already installed")
def gstreamer(context, force=False):
cur = os.curdir
gstdir = os.path.join(cur, "support", "linux", "gstreamer")
if not os.path.isdir(os.path.join(gstdir, "gst", "lib")):
subprocess.check_call(["bash", "gstreamer.sh"], cwd=gstdir)
return True
return False
def bootstrap_gstreamer(context, force=False):
if not gstreamer(context, force):
print("gstreamer is already set up")
return 0
def linux(context, force=False):
# Please keep these in sync with the packages in README.md
pkgs_apt = ['git', 'curl', 'autoconf', 'libx11-dev', 'libfreetype6-dev',
'libgl1-mesa-dri', 'libglib2.0-dev', 'xorg-dev', 'gperf', 'g++',
'build-essential', 'cmake', 'libssl-dev',
'liblzma-dev', 'libxmu6', 'libxmu-dev',
"libxcb-render0-dev", "libxcb-shape0-dev", "libxcb-xfixes0-dev",
'libgles2-mesa-dev', 'libegl1-mesa-dev', 'libdbus-1-dev',
'libharfbuzz-dev', 'ccache', 'clang', 'libunwind-dev',
'libgstreamer1.0-dev', 'libgstreamer-plugins-base1.0-dev',
'libgstreamer-plugins-bad1.0-dev', 'autoconf2.13',
'libunwind-dev', 'llvm-dev']
pkgs_dnf = ['libtool', 'gcc-c++', 'libXi-devel', 'freetype-devel',
'libunwind-devel', 'mesa-libGL-devel', 'mesa-libEGL-devel',
'glib2-devel', 'libX11-devel', 'libXrandr-devel', 'gperf',
'fontconfig-devel', 'cabextract', 'ttmkfdir', 'expat-devel',
'rpm-build', 'openssl-devel', 'cmake',
'libXcursor-devel', 'libXmu-devel',
'dbus-devel', 'ncurses-devel', 'harfbuzz-devel', 'ccache',
'clang', 'clang-libs', 'llvm', 'autoconf213', 'python3-devel',
'gstreamer1-devel', 'gstreamer1-plugins-base-devel',
'gstreamer1-plugins-bad-free-devel']
pkgs_xbps = ['libtool', 'gcc', 'libXi-devel', 'freetype-devel',
'libunwind-devel', 'MesaLib-devel', 'glib-devel', 'pkg-config',
'libX11-devel', 'libXrandr-devel', 'gperf', 'bzip2-devel',
'fontconfig-devel', 'cabextract', 'expat-devel', 'cmake',
'cmake', 'libXcursor-devel', 'libXmu-devel', 'dbus-devel',
'ncurses-devel', 'harfbuzz-devel', 'ccache', 'glu-devel',
'clang', 'gstreamer1-devel', 'autoconf213',
'gst-plugins-base1-devel', 'gst-plugins-bad1-devel']
installed_something = install_linux_deps(context, pkgs_apt, pkgs_dnf,
pkgs_xbps, force)
if not check_gstreamer_lib():
installed_something |= gstreamer(context, force)
if not installed_something:
print("Dependencies were already installed!")
return 0
def salt(context, force=False):
# Ensure Salt dependencies are installed
install_salt_dependencies(context, force)
# Ensure Salt is installed in the virtualenv
# It's not installed globally because it's a large, non-required dependency,
# and the installation fails on Windows
print("Checking Salt installation...", end='')
reqs_path = os.path.join(context.topdir, 'python', 'requirements-salt.txt')
process = subprocess.Popen(
["pip", "install", "-q", "-I", "-r", reqs_path],
stdout=PIPE,
stderr=PIPE
)
process.wait()
if process.returncode:
out, err = process.communicate()
print('failed to install Salt via pip:')
print('Output: {}\nError: {}'.format(out, err))
return 1
print("done")
salt_root = os.path.join(context.sharedir, 'salt')
config_dir = os.path.join(salt_root, 'etc', 'salt')
pillar_dir = os.path.join(config_dir, 'pillars')
# In order to allow `mach bootstrap` to work from any CWD,
# the `root_dir` must be an absolute path.
# We place it under `context.sharedir` because
# Salt caches data (e.g. gitfs files) in its `var` subdirectory.
# Hence, dynamically generate the config with an appropriate `root_dir`
# and serialize it as JSON (which is valid YAML).
config = {
'hash_type': 'sha384',
'master': 'localhost',
'root_dir': salt_root,
'state_output': 'changes',
'state_tabular': True,
}
if 'SERVO_SALTFS_ROOT' in os.environ:
config.update({
'fileserver_backend': ['roots'],
'file_roots': {
'base': [os.path.abspath(os.environ['SERVO_SALTFS_ROOT'])],
},
})
else:
config.update({
'fileserver_backend': ['git'],
'gitfs_env_whitelist': 'base',
'gitfs_provider': 'gitpython',
'gitfs_remotes': [
'https://github.com/servo/saltfs.git',
],
})
if not os.path.exists(config_dir):
os.makedirs(config_dir, mode=0o700)
with open(os.path.join(config_dir, 'minion'), 'w') as config_file:
config_file.write(json.dumps(config) + '\n')
# Similarly, the pillar data is created dynamically
# and temporarily serialized to disk.
# This dynamism is not yet used, but will be in the future
# to enable Android bootstrapping by using
# context.sharedir as a location for Android packages.
pillar = {
'top.sls': {
'base': {
'*': ['bootstrap'],
},
},
'bootstrap.sls': {
'fully_managed': False,
},
}
if os.path.exists(pillar_dir):
shutil.rmtree(pillar_dir)
os.makedirs(pillar_dir, mode=0o700)
for filename in pillar:
with open(os.path.join(pillar_dir, filename), 'w') as pillar_file:
pillar_file.write(json.dumps(pillar[filename]) + '\n')
cmd = [
# sudo escapes from the venv, need to use full path
find_executable('salt-call'),
'--local',
'--config-dir={}'.format(config_dir),
'--pillar-root={}'.format(pillar_dir),
'state.apply',
'servo-build-dependencies',
]
if not force:
print('Running bootstrap in dry-run mode to show changes')
# Because `test=True` mode runs each state individually without
# considering how required/previous states affect the system,
# it will often report states with requisites as failing due
# to the requisites not actually being run,
# even though these are spurious and will succeed during
# the actual highstate.
# Hence `--retcode-passthrough` is not helpful in dry-run mode,
# so only detect failures of the actual salt-call binary itself.
retcode = run_as_root(cmd + ['test=True'])
if retcode != 0:
print('Something went wrong while bootstrapping')
return retcode
proceed = input(
'Proposed changes are above, proceed with bootstrap? [y/N]: '
)
if proceed.lower() not in ['y', 'yes']:
return 0
print('')
print('Running Salt bootstrap')
retcode = run_as_root(cmd + ['--retcode-passthrough'])
if retcode == 0:
print('Salt bootstrapping complete')
else:
print('Salt bootstrapping encountered errors')
return retcode
def windows_msvc(context, force=False):
'''Bootstrapper for MSVC building on Windows.'''
deps_dir = os.path.join(context.sharedir, "msvc-dependencies")
deps_url = "https://servo-deps-2.s3.amazonaws.com/msvc-deps/"
def version(package):
return packages.WINDOWS_MSVC[package]
def package_dir(package):
return os.path.join(deps_dir, package, version(package))
def check_cmake(version):
cmake_path = find_executable("cmake")
if cmake_path:
cmake = subprocess.Popen([cmake_path, "--version"], stdout=PIPE)
cmake_version_output = six.ensure_str(cmake.stdout.read()).splitlines()[0]
cmake_version = cmake_version_output.replace("cmake version ", "")
if LooseVersion(cmake_version) >= LooseVersion(version):
return True
return False
def prepare_file(zip_path, full_spec):
if not os.path.isfile(zip_path):
zip_url = "{}{}.zip".format(deps_url, urllib.parse.quote(full_spec))
download_file(full_spec, zip_url, zip_path)
print("Extracting {}...".format(full_spec), end='')
try:
extract(zip_path, deps_dir)
except BadZipfile:
print("\nError: %s.zip is not a valid zip file, redownload..." % full_spec)
os.remove(zip_path)
prepare_file(zip_path, full_spec)
else:
print("done")
to_install = {}
for package in packages.WINDOWS_MSVC:
# Don't install CMake if it already exists in PATH
if package == "cmake" and check_cmake(version("cmake")):
continue
if not os.path.isdir(package_dir(package)):
to_install[package] = version(package)
if not to_install:
return 0
print("Installing missing MSVC dependencies...")
for package in to_install:
full_spec = '{}-{}'.format(package, version(package))
parent_dir = os.path.dirname(package_dir(package))
if not os.path.isdir(parent_dir):
os.makedirs(parent_dir)
zip_path = package_dir(package) + ".zip"
prepare_file(zip_path, full_spec)
extracted_path = os.path.join(deps_dir, full_spec)
os.rename(extracted_path, package_dir(package))
return 0
LINUX_SPECIFIC_BOOTSTRAPPERS = {
"salt": salt,
"gstreamer": bootstrap_gstreamer,
}
def get_linux_distribution():
distrib, version, _ = distro.linux_distribution()
distrib = six.ensure_str(distrib)
version = six.ensure_str(version)
if distrib in ['LinuxMint', 'Linux Mint', 'KDE neon']:
if '.' in version:
major, _ = version.split('.', 1)
else:
major = version
if major == '20':
base_version = '20.04'
elif major == '19':
base_version = '18.04'
elif major == '18':
base_version = '16.04'
else:
raise Exception('unsupported version of %s: %s' % (distrib, version))
distrib, version = 'Ubuntu', base_version
elif distrib == 'Pop!_OS':
if '.' in version:
major, _ = version.split('.', 1)
else:
major = version
if major == '20':
base_version = '20.04'
elif major == '19':
base_version = '18.04'
elif major == '18':
base_version = '16.04'
else:
raise Exception('unsupported version of %s: %s' % (distrib, version))
distrib, version = 'Ubuntu', base_version
elif distrib.lower() == 'elementary':
if version == '5.0':
base_version = '18.04'
elif version[0:3] == '0.4':
base_version = '16.04'
else:
raise Exception('unsupported version of %s: %s' % (distrib, version))
distrib, version = 'Ubuntu', base_version
elif distrib.lower() == 'ubuntu':
if version > '21.04':
raise Exception('unsupported version of %s: %s' % (distrib, version))
# Fixme: we should allow checked/supported versions only
elif distrib.lower() not in [
'centos',
'centos linux',
'debian gnu/linux',
'fedora',
'void',
'nixos',
]:
raise Exception('mach bootstrap does not support %s, please file a bug' % distrib)
return distrib, version
def bootstrap(context, force=False, specific=None):
'''Dispatches to the right bootstrapping function for the OS.'''
bootstrapper = None
if "windows-msvc" in host_triple():
bootstrapper = windows_msvc
elif "linux-gnu" in host_triple():
distrib, version = get_linux_distribution()
if distrib.lower() == 'nixos':
print('NixOS does not need bootstrap, it will automatically enter a nix-shell')
print('Just run ./mach build')
print('')
print('You will need to run a nix-shell if you are trying to run any of the built binaries')
print('To enter the nix-shell manually use:')
print(' $ nix-shell etc/shell.nix')
return
context.distro = distrib
context.distro_version = version
bootstrapper = LINUX_SPECIFIC_BOOTSTRAPPERS.get(specific, linux)
if bootstrapper is None:
print('Bootstrap support is not yet available for your OS.')
return 1
return bootstrapper(context, force=force)
| 36.192857 | 104 | 0.600289 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | Florian-Schoenherr/servo | python/servo/bootstrap.py | 15,201 | Python |
#!/usr/bin/env python
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pystadel',
version='1.0.0',
description='Class for sending SMSes using Stadel SMS gateway',
long_description=long_description,
url='https://github.com/luttermann/pystadel',
author='Lasse Luttermann Poulsen',
author_email='[email protected]',
license='BSD-2-Clause',
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
# It might work in other versions, but these are not testet.
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='sms stadel',
py_modules=["stadel"],
)
| 27.066667 | 68 | 0.644499 | [
"BSD-2-Clause"
] | luttermann/pystadel | setup.py | 1,218 | Python |
"""
Copyright (c) 2016-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
import unittest
import s1ap_types
from integ_tests.s1aptests import s1ap_wrapper
class TestSctpAbortAfterSmc(unittest.TestCase):
def setUp(self):
self._s1ap_wrapper = s1ap_wrapper.TestWrapper()
def tearDown(self):
self._s1ap_wrapper.cleanup()
def test_sctp_abort_after_smc(self):
""" testing Sctp Abort after Security Mode Command for a single UE """
self._s1ap_wrapper.configUEDevice(1)
req = self._s1ap_wrapper.ue_req
print(
"************************* Running Sctp Abort after Security"
" Mode Command for a single UE for UE id ",
req.ue_id,
)
attach_req = s1ap_types.ueAttachRequest_t()
attach_req.ue_Id = req.ue_id
sec_ctxt = s1ap_types.TFW_CREATE_NEW_SECURITY_CONTEXT
id_type = s1ap_types.TFW_MID_TYPE_IMSI
eps_type = s1ap_types.TFW_EPS_ATTACH_TYPE_EPS_ATTACH
attach_req.mIdType = id_type
attach_req.epsAttachType = eps_type
attach_req.useOldSecCtxt = sec_ctxt
print("Sending Attach Request ue-id", req.ue_id)
self._s1ap_wrapper._s1_util.issue_cmd(
s1ap_types.tfwCmd.UE_ATTACH_REQUEST, attach_req
)
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.UE_AUTH_REQ_IND.value
)
print("Received auth req ind ue-id", req.ue_id)
auth_res = s1ap_types.ueAuthResp_t()
auth_res.ue_Id = req.ue_id
sqn_recvd = s1ap_types.ueSqnRcvd_t()
sqn_recvd.pres = 0
auth_res.sqnRcvd = sqn_recvd
print("Sending Auth Response ue-id", req.ue_id)
self._s1ap_wrapper._s1_util.issue_cmd(
s1ap_types.tfwCmd.UE_AUTH_RESP, auth_res
)
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.UE_SEC_MOD_CMD_IND.value
)
print("Received Security Mode Command ue-id", req.ue_id)
print("send SCTP ABORT")
sctp_abort = s1ap_types.FwSctpAbortReq_t()
sctp_abort.cause = 3
self._s1ap_wrapper._s1_util.issue_cmd(
s1ap_types.tfwCmd.SCTP_ABORT_REQ, sctp_abort
)
if __name__ == "__main__":
unittest.main()
| 32.55 | 78 | 0.667819 | [
"BSD-3-Clause"
] | 119Vik/magma-1 | lte/gateway/python/integ_tests/s1aptests/test_sctp_abort_after_smc.py | 2,604 | Python |
import pyopencl as cl
def print_device_info() :
print('\n' + '=' * 60 + '\nOpenCL Platforms and Devices')
for platform in cl.get_platforms():
print('=' * 60)
print('Platform - Name: ' + platform.name)
print('Platform - Vendor: ' + platform.vendor)
print('Platform - Version: ' + platform.version)
print('Platform - Profile: ' + platform.profile)
for device in platform.get_devices():
print(' ' + '-' * 56)
print(' Device - Name: ' \
+ device.name)
print(' Device - Type: ' \
+ cl.device_type.to_string(device.type))
print(' Device - Max Clock Speed: {0} Mhz'\
.format(device.max_clock_frequency))
print(' Device - Compute Units: {0}'\
.format(device.max_compute_units))
print(' Device - Local Memory: {0:.0f} KB'\
.format(device.local_mem_size/1024.0))
print(' Device - Constant Memory: {0:.0f} KB'\
.format(device.max_constant_buffer_size/1024.0))
print(' Device - Global Memory: {0:.0f} GB'\
.format(device.global_mem_size/1073741824.0))
print(' Device - Max Buffer/Image Size: {0:.0f} MB'\
.format(device.max_mem_alloc_size/1048576.0))
print(' Device - Max Work Group Size: {0:.0f}'\
.format(device.max_work_group_size))
print('\n')
if __name__ == "__main__":
print_device_info()
| 42.513514 | 67 | 0.527018 | [
"MIT"
] | jsdnhk/python-parallel-programming-cookbook-code | Chapter 6/PyOPENCL/PyOpenCLDeviceInfo.py | 1,573 | Python |
'''
Borrowed from Asteroid.py and Ship.py which was created by Lukas Peraza
url: https://github.com/LBPeraza/Pygame-Asteroids
Subzero sprite borrowed from: https://www.spriters-resource.com/playstation/mkmsz/sheet/37161/
'''
import pygame
import os
from CollegiateObjectFile import CollegiateObject
# right in variable means facing right, left means facing left
class Character(CollegiateObject):
@staticmethod
def init(character):
# Create a list of every image of a character
images = []
path = "images/%s/ordered images" %character
# Upload each image in order, and resize accordingly
maxDim = 70
for imageName in os.listdir(path):
maxDim = 70
image = pygame.image.load(path + os.sep + imageName)
if "effect" in imageName:
# Resize special move effects images with static attribute maxDim
if character == "goku" or character == "raizen" or character == "naruto" or character == "sasuke":
maxDim = 120
else:
maxDim = 70
w, h = image.get_size()
factor = 1
if w != maxDim:
factor = maxDim / w
if h != maxDim:
factor = maxDim / h
image = pygame.transform.scale( image, ( int(w * factor), int(h * factor) ) )
elif "jump" in imageName:
# Resize special move effects images with static attribute maxDim
w, h = image.get_size()
factor = 1
if w != Character.maxWidth:
factor = Character.maxWidth / w
image = pygame.transform.scale( image, ( int(w * factor), int(h * factor) ) )
else:
# Resize character images with static attribute maxWidth and maxHeight
w, h = image.get_size()
factor = 1
if w != Character.maxWidth:
factor = Character.maxWidth / w
if h != Character.maxHeight:
factor = Character.maxHeight / h
image = pygame.transform.scale( image, ( int(w * factor), int(h * factor) ) )
images.append(image)
Character.charactersDict[character] = Character.charactersDict.get(character, images)
# Create a dictionary of the images of a character mapped to the character
charactersDict = {}
maxWidth = 100
maxHeight = 170
maxDim = 70
gravity = .75
runVel = 10
maxHealth = 300
maxEnergy = 100
red = (255, 0, 0)
green = (0, 255, 0)
blue = (0, 0, 255)
orange = (255, 128, 0)
def __init__(self, character, screenWidth, screenHeight, isRight, player):
self.character = character
self.player = player
self.isRight = isRight
if self.character == "subzero": self.specialName = "freeze"
elif self.character == "scorpion": self.specialName = "spear"
elif self.character == "raizen": self.specialName = "spirit shotgun"
elif self.character == "goku": self.specialName = "kamehameha"
elif self.character == "naruto": self.specialName = "rasengan"
elif self.character == "sasuke": self.specialName = "chidori"
Character.maxHeight = screenHeight
Character.maxWidth = screenWidth
# Initiate health and energy bars
margin = 5
barMargin = margin + 45
self.barHeight = 10
self.healthY = 10
self.healthWidth = 300
self.health = Character.maxHealth
self.healthColor = Character.green
labeledge = 20
if self.player == 1:
self.healthX = barMargin
elif self.player == 2:
self.healthX = screenWidth - barMargin - self.healthWidth - labeledge
self.energyY = 30
self.energy = Character.maxEnergy
self.energyColor = Character.red
if self.player == 1:
self.energyX = barMargin
elif self.player == 2:
self.energyX = screenWidth - barMargin - self.healthWidth - labeledge
self.images = Character.charactersDict[character]
# All imported images are uploaded in the following order: icon, idle, jump, block, run, punch, kick, special, effect
characterInstances = ["icon", "idle", "jump", "block", "damage1", "run", "punch", "kick", "special1", "effect1"]# added "damage", after block
# Create a dictionary mapping the character instance to it's respective image
self.spriteRightDict = {}
i = 0
for instance in characterInstances:
self.spriteRightDict[instance] = self.spriteRightDict.get(instance, self.images[i])
i += 1
# Flip all pictures to face left for left disctionary
self.spriteLeftDict = {}
j = 0
for sprite in characterInstances:
# Don't want to flip run image yet
if sprite == "run":
self.spriteLeftDict[sprite] = self.images[j]
image = pygame.transform.flip(self.images[j], True, False)
self.spriteLeftDict[sprite] = self.spriteLeftDict.get(sprite, image)
j += 1
# Pass information to parent CollegiateObject class to initialize character
self.spriteDict = {}
# Get the starting image, and x location
if self.isRight:
self.spriteDict = self.spriteRightDict
idleImage = self.spriteRightDict["idle"]
w, h = idleImage.get_size()
x = margin + (w // 2)
elif not self.isRight:
self.spriteDict = self.spriteLeftDict
idleImage = self.spriteLeftDict["idle"]
w, h = idleImage.get_size()
x = screenWidth - margin - (w // 2)
r = max(w,h) // 2
y = screenHeight - margin - (h // 2)
super(Character, self).__init__(x, y, idleImage, r)
# Get dictionary of sounds (actually set in run game, but initiated here)
self.sounds = {}
# Set other attributes
self.isDead = False
self.isFlipped = False
self.isIdle = True
self.idleCount = 0
self.isAttack = False
self.isDamage = False
# Keep damage image for 1 second
self.damageCount = 1
self.isRunLeft = False
self.isRunRight = False
self.isJump = False
self.jumpVel = 10
self.peakJump = screenWidth // 4
self.idleY = self.y
self.isBlock = False
self.isPunch = False
# Keep punch image for 1 second
self.punchCount = 1
self.punchDamage = 20
self.isKick = False
self.kickCount = 20
self.kickDamage = 25
self.isSpecial = False
self.specialCount = 30
self.specialDamage = 50
#print("Loaded Character")
def loseHealth(self, damage):
if self.isBlock: self.sounds["block"].play()
if not self.isBlock: self.sounds["damage1"].play()
if self.isDamage and self.health > 0:
self.health -= damage
if self.health <= 0:
if self.healthColor == Character.green:
self.health = Character.maxHealth
self.healthColor = Character.orange
elif self.healthColor == Character.orange:
self.health = Character.maxHealth
self.healthColor = Character.red
else:
self.health = 0
self.isDead = True
if not self.isBlock:
self.baseImage = self.spriteDict["damage1"]
def getEnergy(self):
increment = 10
maxEnergy = 100
if self.energy <= (maxEnergy - increment) and self.isAttack:
self.energy += increment
if self.energy >= Character.maxEnergy:
self.energy = Character.maxEnergy
def update(self, dt, keysDown, screenWidth, screenHeight):
# Change facing direction when characters switch sides
if self.isRight:
self.spriteDict = self.spriteRightDict
elif not self.isRight:
self.spriteDict = self.spriteLeftDict
player1Moves = {"Left": keysDown(pygame.K_a), "Right": keysDown(pygame.K_d),
"Down": keysDown(pygame.K_s), "Up": keysDown(pygame.K_w),
"Punch": keysDown(pygame.K_v), "Kick": keysDown(pygame.K_c),
"Special1": keysDown(pygame.K_SPACE) }
player2Moves = {"Left": keysDown(pygame.K_LEFT), "Right": keysDown(pygame.K_RIGHT),
"Down": keysDown(pygame.K_DOWN), "Up": keysDown(pygame.K_UP),
"Punch": keysDown(pygame.K_l), "Kick": keysDown(pygame.K_k),
"Special1": keysDown(pygame.K_j) }
if self.player == 1:
self.moves = player1Moves
elif self.player == 2:
self.moves = player2Moves
self.idleCount += 1
margin = 5
boarderLeft = 0 + margin + (self.width // 2)
boarderRight = screenWidth - margin - (self.width // 2)
boarderBottom = screenHeight - margin - (self.height // 2)
if self.moves["Left"] and self.x > boarderLeft and not self.isJump and not self.isBlock and not self.isDamage:
self.x -= Character.runVel
self.baseImage = pygame.transform.flip(self.spriteDict["run"], True, False)
self.isRunLeft = True
self.isIdle = False
if self.isRunLeft and not self.isJump and not self.moves["Left"]:
self.isRunLeft = False
self.isIdle = True
self.baseImage = self.spriteDict["idle"]
if self.moves["Right"] and self.x < boarderRight and not self.isJump and not self.isBlock and not self.isDamage:
# not elif! if we're holding left and right, don't turn
self.x += Character.runVel
self.baseImage = self.spriteDict["run"]
self.isRunRight = True
self.isIdle = False
if self.isRunRight and not self.isJump and not self.moves["Right"]:
self.isRunRight = False
self.isIdle = True
self.baseImage = self.spriteDict["idle"]
if self.moves["Down"] and not self.isJump and not self.isDamage:
self.baseImage = self.spriteDict["block"]
self.isBlock = True
self.isIdle = False
if self.isBlock and not self.moves["Down"]:
self.isBlock = False
self.isIdle = True
self.baseImage = self.spriteDict["idle"]
if self.moves["Up"] and self.y >= boarderBottom and not self.isJump and not self.isBlock and not self.isDamage:# and self.isIdle:
self.sounds["jump"].play()
self.baseImage = self.spriteDict["jump"]
self.isJump = True
self.isIdle = False
elif self.isJump:
if self.jumpVel >= 0:
self.y -= (self.jumpVel** 2) // 2
if self.isRunLeft and (self.x - Character.runVel) >= boarderLeft:
self.x -= Character.runVel
elif self.isRunRight and (self.x + Character.runVel) <= boarderRight:
self.x += Character.runVel
self.jumpVel -= Character.gravity
else:
self.y += (self.jumpVel** 2) // 2
if self.isRunLeft and (self.x - Character.runVel) >= boarderLeft:
self.x -= Character.runVel
elif self.isRunRight and (self.x + Character.runVel) <= boarderRight:
self.x += Character.runVel
self.jumpVel -= Character.gravity
if self.y > self.idleY:
self.baseImage = self.spriteDict["idle"]
self.y = self.idleY
self.isJump = False
self.isRunLeft = False
self.isRunRight = False
self.isIdle = True
self.jumpVel = 10
if self.moves["Punch"] and self.isIdle and self.idleCount >= 20 and not self.isPunch and not self.isDamage:
self.sounds["punch"].play()
self.baseImage = self.spriteDict["punch"]
self.isPunch = True
self.isIdle = False
elif self.isPunch:
if self.punchCount >= 0:
self.punchCount -= 1
else:
self.isPunch = False
self.isIdle = True
self.idleCount = 0
self.punchCount = 20
self.baseImage = self.spriteDict["idle"]
if self.moves["Kick"] and self.isIdle and self.idleCount >= 20 and not self.isDamage:
self.sounds["kick"].play()
self.baseImage = self.spriteDict["kick"]
self.isKick = True
self.isIdle = False
elif self.isKick:
if self.kickCount >= 0:
self.kickCount -= 1
else:
self.isKick = False
self.isIdle = True
self.idleCount = 0
self.kickCount = 20
self.baseImage = self.spriteDict["idle"]
if self.moves["Special1"] and self.isIdle and self.idleCount >= 20 and (self.energy >= self.specialDamage) and not self.isJump and not self.isBlock and not self.isDamage:
self.sounds["special1"].play()
self.baseImage = self.spriteDict["special1"]
self.isSpecial = True
self.isIdle = False
self.energy -= self.specialDamage
elif self.isSpecial:
if self.specialCount >= 0:
self.specialCount -= 1
else:
self.isSpecial = False
self.isIdle = True
self.idleCount = 0
self.specialCount = 30
self.baseImage = self.spriteDict["idle"]
super(Character, self).update(screenWidth, screenHeight)
| 36.684729 | 178 | 0.530348 | [
"MIT"
] | lbw798/collegiate-combat | CharacterFile.py | 14,894 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .advanced_filter import AdvancedFilter
class NumberNotInAdvancedFilter(AdvancedFilter):
"""NumberNotIn Filter.
All required parameters must be populated in order to send to Azure.
:param key: The filter key. Represents an event property with upto two
levels of nesting.
:type key: str
:param operator_type: Required. Constant filled by server.
:type operator_type: str
:param values: The set of filter values
:type values: list[float]
"""
_validation = {
'operator_type': {'required': True},
}
_attribute_map = {
'key': {'key': 'key', 'type': 'str'},
'operator_type': {'key': 'operatorType', 'type': 'str'},
'values': {'key': 'values', 'type': '[float]'},
}
def __init__(self, **kwargs):
super(NumberNotInAdvancedFilter, self).__init__(**kwargs)
self.values = kwargs.get('values', None)
self.operator_type = 'NumberNotIn'
| 33.139535 | 76 | 0.597895 | [
"MIT"
] | NMijat1024/azure-sdk-for-python | azure-mgmt-eventgrid/azure/mgmt/eventgrid/models/number_not_in_advanced_filter.py | 1,425 | Python |
import os
import sys
import socket
import struct
import SocketServer
import threadpool
# fake ip list
FAKE_IPLIST = {}
# dns server config
TIMEOUT = 2 # set timeout 2 second
TRY_TIMES = 5 # try to recv msg times
DNS_SERVER = '8.8.8.8' # remote dns server
# currently not used
def bytetodomain(s):
domain = ''
i = 0
length = struct.unpack('!B', s[0:1])[0]
while length != 0:
i += 1
domain += s[i:i + length]
i += length
length = struct.unpack('!B', s[i:i+1])[0]
if length != 0:
domain += '.'
return (domain, i + 1)
def skip_query(query):
step = 0
length = struct.unpack('!B', query[0:1])[0]
while length != 0:
step = step + length + 1
length = struct.unpack('!B', query[step:step+1])[0]
return step + 1
def is_valid_pkt(response):
try:
(flag, qdcount, ancount) = struct.unpack('!HHH', response[2:8])
if flag != 0x8180 and flag != 0x8580:
return True
if 1 != qdcount or 1 != ancount:
return True
dlen = skip_query(response[12:])
pos = 12 + dlen
(qtype, qclass) = struct.unpack('!HH', response[pos:pos+4])
# qtype is 1 (mean query HOST ADDRESS), qclass is 1 (mean INTERNET)
if 1 != qtype or 1 != qclass:
return True
pos = pos + 4 # position for response
if ord(response[pos:pos+1]) & 0xc0:
pos = pos + 12
else:
pos = pos + dlen + 10
if response[pos:pos+4] in FAKE_IPLIST:
print('Match: ' + socket.inet_ntoa(response[pos:pos+4]))
return False
except Exception, e:
print(e)
return True
class ThreadPoolMixIn:
def process_request_thread(self, request, client_address):
try:
self.finish_request(request, client_address)
self.shutdown_request(request)
except:
self.handle_error(request, client_address)
self.shutdown_request(request)
def process_request(self, request, client_address):
self.tp.add_task(self.process_request_thread, request, client_address)
def serve_forever(self, poll_interval=0.5):
try:
SocketServer.UDPServer.serve_forever(self, poll_interval)
finally:
self.tp.stop()
class DNSFilter(ThreadPoolMixIn, SocketServer.UDPServer):
# much faster rebinding
allow_reuse_address = True
def __init__(self, s, t):
self.tp = threadpool.ThreadPool(20)
SocketServer.UDPServer.__init__(self, s, t)
class ThreadedUDPRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
query_data = self.request[0]
udp_sock = self.request[1]
addr = self.client_address
response = self.dns_query(DNS_SERVER, 53, query_data)
if response:
# udp dns packet no length
udp_sock.sendto(response, addr)
def dns_query(self, dns_ip, dns_port, query_data):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.settimeout(TIMEOUT) # set socket timeout = 5s
s.sendto(query_data, (dns_ip, dns_port))
for i in xrange(TRY_TIMES):
data, addr = s.recvfrom(1024)
if is_valid_pkt(data):
return data
else:
data = None
except:
return None
finally:
if s: s.close()
return data
if __name__ == '__main__':
print '---------------------------------------------------------------'
print '| To Use this tool, you must set your dns server to 127.0.0.1 |'
print '---------------------------------------------------------------'
# load config file, iplist.txt from https://github.com/clowwindy/ChinaDNS
with open('iplist.txt', 'rb') as f:
while 1:
ip = f.readline()
if ip:
FAKE_IPLIST[socket.inet_aton(ip[:-1])] = None
else:
break
dns_server = DNSFilter(('0.0.0.0', 53), ThreadedUDPRequestHandler)
try:
dns_server.serve_forever()
except:
pass
finally:
pass
| 27.796178 | 78 | 0.539872 | [
"MIT"
] | isayme/DNSFilter | DNSFilter.py | 4,364 | Python |
"""Djinni manager tool"""
import os
import ezored.functions as fn
import ezored.logging as log
from ezored import constants as const
# -----------------------------------------------------------------------------
def run(params={}):
args = params['args']
if len(args) > 0:
action = args[0]
if action:
if action == 'generate':
generate(params)
else:
help(params)
else:
help(params)
else:
help(params)
# -----------------------------------------------------------------------------
def generate(params={}):
dirs = fn.find_dirs_simple(os.path.join(
fn.root_dir(),
const.DIR_NAME_FILES,
const.DIR_NAME_DJINNI),
'*'
)
if dirs:
log.info('Generating files for all modules...')
dirs.sort()
for item in dirs:
if fn.file_exists(os.path.join(item, 'generate.py')):
dir_name = os.path.basename(item)
log.info('Generating djinni files for "{0}"...'.format(dir_name))
fn.run_simple(['python', 'generate.py'], item)
log.ok()
else:
log.error('No djinni modules to generate')
# -----------------------------------------------------------------------------
def help(params={}):
log.colored('Available actions:\n', log.PURPLE)
log.normal(' - generate')
# -----------------------------------------------------------------------------
def get_description(params={}):
return 'Djinni manager tool'
| 25.622951 | 81 | 0.442099 | [
"MIT"
] | uilianries/ezored | files/commands/djinni/djinni.py | 1,563 | Python |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
import datetime
from scrapy.conf import settings
# 学历列表
educations = ("不限","大专","本科","硕士","博士")
#修正学历 有些职位中的学历明显不一致。需要修正
def clean_education(edu,body):
if edu not in educations:
for i in educations:
if i in body:
edu = i
else:
edu = '不限'
return edu
def clear_salary(salary):
res = salary.split("-")
temp = []
for x in res:
temp.append(int(x.upper().replace("K"," "))*1000)
result = {
"min":temp[0],
"max":temp[1],
"avg":int((temp[0]+temp[1])/2)
}
return result
def clear_time(time):
now_year = datetime.datetime.now().year
if '发布于' in time:
time = time.replace("发布于", str(now_year)+"-")
time = time.replace("月", "-")
time = time.replace("日", "")
if time.find("昨天") > 0:
time = str(datetime.date.today() - datetime.timedelta(days=1))
elif time.find(":") > 0:
time = str(datetime.date.today())
return time
def clear_position(name):
data = name.split(" ")
name = data[0]
work_year = data[-2]
educational = data[-1]
return name,work_year,educational
#判断PHP是否在职位名称中,不在就过滤掉。
#jd中含有php不参考,因为很多jd中都乱写
def clean_name(name):
if "PHP" not in name.upper():
return False
return True
class TutorialPipeline(object):
def process_item(self, item, spider):
client = pymongo.MongoClient(host="127.0.0.1", port=27017)
db = client['job']
collection = db['position2']
collection.insert(dict(item))
client.close()
return item
#处理直聘网数据
class ZhipinPipeline(object):
def process_item(self, item, spider):
client = pymongo.MongoClient(host="127.0.0.1", port=27017)
db = client['job']
collection = db['position']
item['salary'] = clear_salary(item['salary'])
item['create_time'] = clear_time(item['create_time'])
item['educational'] = clean_education(item['educational'],item['body'])
is_php = clean_name(item['position_name'])
if is_php is True:
collection.insert(dict(item))
client.close()
return item
#处理51job数据
class FiveJobPipeline(object):
def clear_salary(self,salary):
lists = salary.split("/")[0].split('-')
min,max = lists
unit = 10000
if "千" in max:
unit = 1000
max = max.replace("千","")
else:
max = max.replace("万","")
print(max)
result = {}
result['min'] = float(min)*unit
result['max'] = float(max)*unit
result['avg'] = (result['max']+result['min'])/2
return result
def clear_address(self,address):
if "上班地址" in address:
address = address.replace("上班地址 :"," ")
return address
def clear_workyear(self,work_year):
if "经验" in work_year:
work_year = work_year.replace("工作经验"," ") or work_year.replace("经验"," ")
return work_year
def process_item(self, item, spider):
client = pymongo.MongoClient(host="127.0.0.1", port=27017)
db = client['job']
collection = db['51job']
item['salary'] = self.clear_salary(salary=item['salary'])
item['address'] = self.clear_address(address=item['address'])
item['work_year'] = self.clear_workyear(work_year=item['work_year'])
collection.insert(dict(item))
client.close()
return item
| 29.44 | 84 | 0.580435 | [
"MIT"
] | Annihilater/spider_job | spider/python/tutorial/pipelines.py | 3,902 | Python |
# BSD 2-Clause License
# Copyright (c) 2018, Stan Sakl
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
if __name__ == '__main__':
print("Running kwic.py")
inputFile = open("input.txt", "r", 1)
outputFile = open("output.txt", "w", 1)
lines = inputFile.readlines()
for line in lines:
substring = line.split()
substring.sort()
sortedString = []
for string in substring:
#print(string)
sortedString.append(string)
outputFile.write(string)
outputFile.write(" ")
outputFile.write("\n")
print(sortedString)
inputFile.close()
outputFile.close()
| 38.36 | 80 | 0.721064 | [
"BSD-2-Clause"
] | stansakl/kwic | kwic_python/kwic.py | 1,918 | Python |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadResponse
from ccxt.base.errors import DDoSProtection
from ccxt.base.precise import Precise
class bytetrade(Exchange):
def describe(self):
return self.deep_extend(super(bytetrade, self).describe(), {
'id': 'bytetrade',
'name': 'ByteTrade',
'countries': ['HK'],
'rateLimit': 500,
'requiresWeb3': True,
'certified': False,
# new metainfo interface
'has': {
'cancelOrder': True,
'CORS': False,
'createOrder': True,
'fetchBalance': True,
'fetchBidsAsks': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'fetchWithdrawals': True,
'withdraw': False,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'4h': '4h',
'1d': '1d',
'5d': '5d',
'1w': '1w',
'1M': '1M',
},
'urls': {
'test': {
'market': 'https://api-v2-test.byte-trade.com',
'public': 'https://api-v2-test.byte-trade.com',
},
'logo': 'https://user-images.githubusercontent.com/1294454/67288762-2f04a600-f4e6-11e9-9fd6-c60641919491.jpg',
'api': {
'market': 'https://api-v2.byte-trade.com',
'public': 'https://api-v2.byte-trade.com',
},
'www': 'https://www.byte-trade.com',
'doc': 'https://docs.byte-trade.com/#description',
},
'api': {
'market': {
'get': [
'klines', # Kline of a symbol
'depth', # Market Depth of a symbol
'trades', # Trade records of a symbol
'tickers',
],
},
'public': {
'get': [
'symbols', # Reference information of trading instrument, including base currency, quote precision, etc.
'currencies', # The list of currencies available
'balance', # Get the balance of an account
'orders/open', # Get the open orders of an account
'orders/closed', # Get the closed orders of an account
'orders/all', # Get the open and closed orders of an account
'orders', # Get the details of an order of an account
'orders/trades', # Get detail match results
'depositaddress', # Get deposit address
'withdrawals', # Get withdrawals info
'deposits', # Get deposit info
'transfers', # Get transfer info
],
'post': [
'transaction/createorder', # Post create order transaction to blockchain
'transaction/cancelorder', # Post cancel order transaction to blockchain
'transaction/withdraw', # Post withdraw transaction to blockchain
'transaction/transfer', # Post transfer transaction to blockchain
],
},
},
'fees': {
'trading': {
'taker': 0.0008,
'maker': 0.0008,
},
},
'commonCurrencies': {
'1': 'ByteTrade',
'44': 'ByteHub',
'48': 'Blocktonic',
'133': 'TerraCredit',
},
'exceptions': {
'vertify error': AuthenticationError, # typo on the exchange side, 'vertify'
'verify error': AuthenticationError, # private key signature is incorrect
'transaction already in network': BadRequest, # same transaction submited
'invalid argument': BadRequest,
},
'options': {
'orderExpiration': 31536000000, # one year
},
})
async def fetch_currencies(self, params={}):
currencies = await self.publicGetCurrencies(params)
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
id = self.safe_string(currency, 'code')
code = None
if id in self.commonCurrencies:
code = self.commonCurrencies[id]
else:
code = self.safe_string(currency, 'name')
name = self.safe_string(currency, 'fullname')
# in byte-trade.com DEX, request https://api-v2.byte-trade.com/currencies will return currencies,
# the api doc is https://github.com/Bytetrade/bytetrade-official-api-docs/wiki/rest-api#get-currencies-get-currencys-supported-in-bytetradecom
# we can see the coin name is none-unique in the result, the coin which code is 18 is the CyberMiles ERC20, and the coin which code is 35 is the CyberMiles main chain, but their name is same.
# that is because bytetrade is a DEX, supports people create coin with the same name, but the id(code) of coin is unique, so we should use the id or name and id as the identity of coin.
# For coin name and symbol is same with CCXT, I use name@id as the key of commonCurrencies dict.
# [{
# "name": "CMT", # currency name, non-unique
# "code": "18", # currency id, unique
# "type": "crypto",
# "fullname": "CyberMiles",
# "active": True,
# "chainType": "ethereum",
# "basePrecision": 18,
# "transferPrecision": 10,
# "externalPrecision": 18,
# "chainContractAddress": "0xf85feea2fdd81d51177f6b8f35f0e6734ce45f5f",
# "limits": {
# "deposit": {
# "min": "0",
# "max": "-1"
# },
# "withdraw": {
# "min": "0",
# "max": "-1"
# }
# }
# },
# {
# "name": "CMT",
# "code": "35",
# "type": "crypto",
# "fullname": "CyberMiles",
# "active": True,
# "chainType": "cmt",
# "basePrecision": 18,
# "transferPrecision": 10,
# "externalPrecision": 18,
# "chainContractAddress": "0x0000000000000000000000000000000000000000",
# "limits": {
# "deposit": {
# "min": "1",
# "max": "-1"
# },
# "withdraw": {
# "min": "10",
# "max": "-1"
# }
# }
# }
# ]
active = self.safe_value(currency, 'active')
limits = self.safe_value(currency, 'limits')
deposit = self.safe_value(limits, 'deposit')
amountPrecision = self.safe_integer(currency, 'basePrecision')
maxDeposit = self.safe_number(deposit, 'max')
if maxDeposit == -1.0:
maxDeposit = None
withdraw = self.safe_value(limits, 'withdraw')
maxWithdraw = self.safe_number(withdraw, 'max')
if maxWithdraw == -1.0:
maxWithdraw = None
result[code] = {
'id': id,
'code': code,
'name': name,
'active': active,
'precision': amountPrecision,
'fee': None,
'limits': {
'amount': {'min': None, 'max': None},
'deposit': {
'min': self.safe_number(deposit, 'min'),
'max': maxDeposit,
},
'withdraw': {
'min': self.safe_number(withdraw, 'min'),
'max': maxWithdraw,
},
},
'info': currency,
}
return result
async def fetch_markets(self, params={}):
markets = await self.publicGetSymbols(params)
result = []
for i in range(0, len(markets)):
market = markets[i]
id = self.safe_string(market, 'symbol')
base = self.safe_string(market, 'baseName')
quote = self.safe_string(market, 'quoteName')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
normalBase = base.split('@' + baseId)[0]
normalQuote = quote.split('@' + quoteId)[0]
if quoteId == '126':
normalQuote = 'ZAR' # The id 126 coin is a special coin whose name on the chain is actually ZAR, but it is changed to ZCN after creation, so it must be changed to ZAR when placing the transaction in the chain
normalSymbol = normalBase + '/' + normalQuote
if baseId in self.commonCurrencies:
base = self.commonCurrencies[baseId]
if quoteId in self.commonCurrencies:
quote = self.commonCurrencies[quoteId]
symbol = base + '/' + quote
limits = self.safe_value(market, 'limits', {})
amount = self.safe_value(limits, 'amount', {})
price = self.safe_value(limits, 'price', {})
precision = self.safe_value(market, 'precision', {})
active = self.safe_string(market, 'active')
entry = {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
'active': active,
'precision': {
'amount': self.safe_integer(precision, 'amount'),
'price': self.safe_integer(precision, 'price'),
},
'normalSymbol': normalSymbol,
'limits': {
'amount': {
'min': self.safe_number(amount, 'min'),
'max': self.safe_number(amount, 'max'),
},
'price': {
'min': self.safe_number(price, 'min'),
'max': self.safe_number(price, 'max'),
},
'cost': {
'min': None,
'max': None,
},
},
}
result.append(entry)
return result
async def fetch_balance(self, params={}):
if not ('userid' in params) and (self.apiKey is None):
raise ArgumentsRequired(self.id + ' fetchDeposits() requires self.apiKey or userid argument')
await self.load_markets()
request = {
'userid': self.apiKey,
}
balances = await self.publicGetBalance(self.extend(request, params))
result = {'info': balances}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'code')
code = self.safe_currency_code(currencyId, None)
account = self.account()
account['free'] = self.safe_string(balance, 'free')
account['used'] = self.safe_string(balance, 'used')
result[code] = account
return self.parse_balance(result, False)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['limit'] = limit # default = maximum = 100
response = await self.marketGetDepth(self.extend(request, params))
timestamp = self.safe_value(response, 'timestamp')
orderbook = self.parse_order_book(response, symbol, timestamp)
return orderbook
def parse_ticker(self, ticker, market=None):
timestamp = self.safe_integer(ticker, 'timestamp')
#
# [
# {
# "symbol":"68719476706",
# "name":"ETH/BTC",
# "base":"2",
# "quote":"32",
# "timestamp":1575905991933,
# "datetime":"2019-12-09T15:39:51.933Z",
# "high":"0",
# "low":"0",
# "open":"0",
# "close":"0",
# "last":"0",
# "change":"0",
# "percentage":"0",
# "baseVolume":"0",
# "quoteVolume":"0"
# }
# ]
#
symbol = None
marketId = self.safe_string(ticker, 'symbol')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
else:
baseId = self.safe_string(ticker, 'base')
quoteId = self.safe_string(ticker, 'quote')
if (baseId is not None) and (quoteId is not None):
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if (symbol is None) and (market is not None):
symbol = market['symbol']
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'high'),
'low': self.safe_number(ticker, 'low'),
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': self.safe_number(ticker, 'weightedAvgPrice'),
'open': self.safe_number(ticker, 'open'),
'close': self.safe_number(ticker, 'close'),
'last': self.safe_number(ticker, 'last'),
'previousClose': None, # previous day close
'change': self.safe_number(ticker, 'change'),
'percentage': self.safe_number(ticker, 'percentage'),
'average': None,
'baseVolume': self.safe_number(ticker, 'baseVolume'),
'quoteVolume': self.safe_number(ticker, 'quoteVolume'),
'info': ticker,
}
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = await self.marketGetTickers(self.extend(request, params))
#
# [
# {
# "symbol":"68719476706",
# "name":"ETH/BTC",
# "base":"2",
# "quote":"32",
# "timestamp":1575905991933,
# "datetime":"2019-12-09T15:39:51.933Z",
# "high":"0",
# "low":"0",
# "open":"0",
# "close":"0",
# "last":"0",
# "change":"0",
# "percentage":"0",
# "baseVolume":"0",
# "quoteVolume":"0"
# }
# ]
#
if isinstance(response, list):
ticker = self.safe_value(response, 0)
if ticker is None:
raise BadResponse(self.id + ' fetchTicker() returned an empty response')
return self.parse_ticker(ticker, market)
return self.parse_ticker(response, market)
async def fetch_bids_asks(self, symbols=None, params={}):
await self.load_markets()
response = await self.marketGetDepth(params)
return self.parse_tickers(response, symbols)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.marketGetTickers(params)
return self.parse_tickers(response, symbols)
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1591505760000,
# "242.7",
# "242.76",
# "242.69",
# "242.76",
# "0.1892"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'timeframe': self.timeframes[timeframe],
}
if since is not None:
request['since'] = since
if limit is not None:
request['limit'] = limit
response = await self.marketGetKlines(self.extend(request, params))
#
# [
# [1591505760000,"242.7","242.76","242.69","242.76","0.1892"],
# [1591505820000,"242.77","242.83","242.7","242.72","0.6378"],
# [1591505880000,"242.72","242.73","242.61","242.72","0.4141"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_trade(self, trade, market=None):
timestamp = self.safe_integer(trade, 'timestamp')
price = self.safe_number(trade, 'price')
amount = self.safe_number(trade, 'amount')
cost = self.safe_number(trade, 'cost')
id = self.safe_string(trade, 'id')
type = self.safe_string(trade, 'type')
takerOrMaker = self.safe_string(trade, 'takerOrMaker')
side = self.safe_string(trade, 'side')
datetime = self.iso8601(timestamp) # self.safe_string(trade, 'datetime')
order = self.safe_string(trade, 'order')
symbol = None
if market is None:
marketId = self.safe_string(trade, 'symbol')
market = self.safe_value(self.markets_by_id, marketId)
if market is not None:
symbol = market['symbol']
feeData = self.safe_value(trade, 'fee')
feeCost = self.safe_number(feeData, 'cost')
feeRate = self.safe_number(feeData, 'rate')
feeCode = self.safe_string(feeData, 'code')
feeCurrency = self.safe_currency_code(feeCode)
fee = {
'currency': feeCurrency,
'cost': feeCost,
'rate': feeRate,
}
return {
'info': trade,
'timestamp': timestamp,
'datetime': datetime,
'symbol': symbol,
'id': id,
'order': order,
'type': type,
'takerOrMaker': takerOrMaker,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if since is not None:
request['since'] = since
if limit is not None:
request['limit'] = limit # default = 100, maximum = 500
response = await self.marketGetTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def parse_order(self, order, market=None):
status = self.safe_string(order, 'status')
symbol = None
marketId = self.safe_string(order, 'symbol')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
else:
baseId = self.safe_string(order, 'base')
quoteId = self.safe_string(order, 'quote')
if (baseId is not None) and (quoteId is not None):
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if (symbol is None) and (market is not None):
symbol = market['symbol']
timestamp = self.safe_integer(order, 'timestamp')
datetime = self.safe_string(order, 'datetime')
lastTradeTimestamp = self.safe_integer(order, 'lastTradeTimestamp')
price = self.safe_number(order, 'price')
amount = self.safe_number(order, 'amount')
filled = self.safe_number(order, 'filled')
remaining = self.safe_number(order, 'remaining')
cost = self.safe_number(order, 'cost')
average = self.safe_number(order, 'average')
id = self.safe_string(order, 'id')
type = self.safe_string(order, 'type')
side = self.safe_string(order, 'side')
feeData = self.safe_value(order, 'fee')
feeCost = self.safe_number(feeData, 'cost')
feeRate = self.safe_number(feeData, 'rate')
feeCode = self.safe_string(feeData, 'code')
feeCurrency = self.safe_currency_code(feeCode)
fee = {
'currency': feeCurrency,
'cost': feeCost,
'rate': feeRate,
}
return {
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': datetime,
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': None,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
self.check_required_dependencies()
if self.apiKey is None:
raise ArgumentsRequired('createOrder() requires self.apiKey or userid in params')
await self.load_markets()
market = self.market(symbol)
sideNum = None
typeNum = None
if side == 'sell':
sideNum = 1
else:
sideNum = 2
if type == 'limit':
typeNum = 1
else:
typeNum = 2
price = 0
normalSymbol = market['normalSymbol']
baseId = market['baseId']
baseCurrency = self.currency(market['base'])
amountTruncated = self.amount_to_precision(symbol, amount)
amountChain = self.toWei(amountTruncated, baseCurrency['precision'])
amountChainString = self.number_to_string(amountChain)
quoteId = market['quoteId']
quoteCurrency = self.currency(market['quote'])
priceRounded = self.price_to_precision(symbol, price)
priceChain = self.toWei(priceRounded, quoteCurrency['precision'])
priceChainString = self.number_to_string(priceChain)
now = self.milliseconds()
expiryDelta = self.safe_integer(self.options, 'orderExpiration', 31536000000)
expiration = self.milliseconds() + expiryDelta
datetime = self.iso8601(now)
datetime = datetime.split('.')[0]
expirationDatetime = self.iso8601(expiration)
expirationDatetime = expirationDatetime.split('.')[0]
defaultDappId = 'Sagittarius'
dappId = self.safe_string(params, 'dappId', defaultDappId)
defaultFee = self.safe_string(self.options, 'fee', '300000000000000')
totalFeeRate = self.safe_string(params, 'totalFeeRate', 8)
chainFeeRate = self.safe_string(params, 'chainFeeRate', 1)
fee = self.safe_string(params, 'fee', defaultFee)
eightBytes = Precise.stringPow('2', '64')
allByteStringArray = [
self.number_to_be(1, 32),
self.number_to_le(int(math.floor(now / 1000)), 4),
self.number_to_le(1, 1),
self.number_to_le(int(math.floor(expiration / 1000)), 4),
self.number_to_le(1, 1),
self.number_to_le(32, 1),
self.number_to_le(0, 8),
self.number_to_le(fee, 8), # string for 32 bit php
self.number_to_le(len(self.apiKey), 1),
self.encode(self.apiKey),
self.number_to_le(sideNum, 1),
self.number_to_le(typeNum, 1),
self.number_to_le(len(normalSymbol), 1),
self.encode(normalSymbol),
self.number_to_le(Precise.string_div(amountChainString, eightBytes, 0), 8),
self.number_to_le(Precise.string_mod(amountChainString, eightBytes), 8),
self.number_to_le(Precise.string_div(priceChainString, eightBytes, 0), 8),
self.number_to_le(Precise.string_mod(priceChainString, eightBytes), 8),
self.number_to_le(0, 2),
self.number_to_le(int(math.floor(now / 1000)), 4),
self.number_to_le(int(math.floor(expiration / 1000)), 4),
self.number_to_le(1, 1),
self.number_to_le(int(chainFeeRate), 2),
self.number_to_le(1, 1),
self.number_to_le(int(totalFeeRate), 2),
self.number_to_le(int(quoteId), 4),
self.number_to_le(int(baseId), 4),
self.number_to_le(0, 1),
self.number_to_le(1, 1),
self.number_to_le(len(dappId), 1),
self.encode(dappId),
self.number_to_le(0, 1),
]
txByteStringArray = [
self.number_to_le(int(math.floor(now / 1000)), 4),
self.number_to_le(1, 1),
self.number_to_le(int(math.floor(expiration / 1000)), 4),
self.number_to_le(1, 1),
self.number_to_le(32, 1),
self.number_to_le(0, 8),
self.number_to_le(fee, 8), # string for 32 bit php
self.number_to_le(len(self.apiKey), 1),
self.encode(self.apiKey),
self.number_to_le(sideNum, 1),
self.number_to_le(typeNum, 1),
self.number_to_le(len(normalSymbol), 1),
self.encode(normalSymbol),
self.number_to_le(Precise.string_div(amountChainString, eightBytes, 0), 8),
self.number_to_le(Precise.string_mod(amountChainString, eightBytes), 8),
self.number_to_le(Precise.string_div(priceChainString, eightBytes, 0), 8),
self.number_to_le(Precise.string_mod(priceChainString, eightBytes), 8),
self.number_to_le(0, 2),
self.number_to_le(int(math.floor(now / 1000)), 4),
self.number_to_le(int(math.floor(expiration / 1000)), 4),
self.number_to_le(1, 1),
self.number_to_le(int(chainFeeRate), 2),
self.number_to_le(1, 1),
self.number_to_le(int(totalFeeRate), 2),
self.number_to_le(int(quoteId), 4),
self.number_to_le(int(baseId), 4),
self.number_to_le(0, 1),
self.number_to_le(1, 1),
self.number_to_le(len(dappId), 1),
self.encode(dappId),
self.number_to_le(0, 1),
]
txbytestring = self.binary_concat_array(txByteStringArray)
txidhash = self.hash(txbytestring, 'sha256', 'hex')
txid = txidhash[0:40]
orderidByteStringArray = [
self.number_to_le(len(txid), 1),
self.encode(txid),
self.number_to_be(0, 4),
]
orderidbytestring = self.binary_concat_array(orderidByteStringArray)
orderidhash = self.hash(orderidbytestring, 'sha256', 'hex')
orderid = orderidhash[0:40]
bytestring = self.binary_concat_array(allByteStringArray)
hash = self.hash(bytestring, 'sha256', 'hex')
signature = self.ecdsa(hash, self.secret, 'secp256k1', None, True)
recoveryParam = self.binary_to_base16(self.number_to_le(self.sum(signature['v'], 31), 1))
mySignature = recoveryParam + signature['r'] + signature['s']
operation = {
'now': datetime,
'expiration': expirationDatetime,
'fee': fee,
'creator': self.apiKey,
'side': sideNum,
'order_type': typeNum,
'market_name': normalSymbol,
'amount': amountChain,
'price': priceChain,
'use_btt_as_fee': False,
'money_id': int(quoteId),
'stock_id': int(baseId),
'custom_no_btt_fee_rate': int(totalFeeRate),
'custom_btt_fee_rate': int(chainFeeRate),
}
fatty = {
'timestamp': datetime,
'expiration': expirationDatetime,
'operations': [
[
32,
operation,
],
],
'validate_type': 0,
'dapp': dappId,
'signatures': [
mySignature,
],
}
request = {
'trObj': self.json(fatty),
}
response = await self.publicPostTransactionCreateorder(request)
timestamp = self.milliseconds()
statusCode = self.safe_string(response, 'code')
status = 'open' if (statusCode == '0') else 'failed'
return {
'info': response,
'id': orderid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': None,
'type': None,
'side': None,
'price': None,
'amount': None,
'filled': None,
'remaining': None,
'cost': None,
'trades': None,
'fee': None,
'clientOrderId': None,
'average': None,
}
async def fetch_order(self, id, symbol=None, params={}):
if not ('userid' in params) and (self.apiKey is None):
raise ArgumentsRequired('fetchOrder() requires self.apiKey or userid argument')
await self.load_markets()
request = {
'userid': self.apiKey,
}
market = None
if symbol is not None:
market = self.markets[symbol]
request['symbol'] = market['id']
request['id'] = id
response = await self.publicGetOrders(self.extend(request, params))
return self.parse_order(response, market)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
if not ('userid' in params) and (self.apiKey is None):
raise ArgumentsRequired('fetchOpenOrders() requires self.apiKey or userid argument')
await self.load_markets()
request = {
'userid': self.apiKey,
}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if limit is not None:
request['limit'] = limit
if since is not None:
request['since'] = since
response = await self.publicGetOrdersOpen(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
if not ('userid' in params) and (self.apiKey is None):
raise ArgumentsRequired('fetchClosedOrders() requires self.apiKey or userid argument')
await self.load_markets()
market = None
request = {
'userid': self.apiKey,
}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if limit is not None:
request['limit'] = limit
if since is not None:
request['since'] = since
response = await self.publicGetOrdersClosed(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if not ('userid' in params) and (self.apiKey is None):
raise ArgumentsRequired('fetchOrders() requires self.apiKey or userid argument')
await self.load_markets()
market = None
request = {
'userid': self.apiKey,
}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if limit is not None:
request['limit'] = limit
if since is not None:
request['since'] = since
response = await self.publicGetOrdersAll(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
async def cancel_order(self, id, symbol=None, params={}):
if self.apiKey is None:
raise ArgumentsRequired('cancelOrder() requires hasAlreadyAuthenticatedSuccessfully')
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
baseId = market['baseId']
quoteId = market['quoteId']
normalSymbol = market['normalSymbol']
feeAmount = '300000000000000'
now = self.milliseconds()
expiration = 0
datetime = self.iso8601(now)
datetime = datetime.split('.')[0]
expirationDatetime = self.iso8601(expiration)
expirationDatetime = expirationDatetime.split('.')[0]
defaultDappId = 'Sagittarius'
dappId = self.safe_string(params, 'dappId', defaultDappId)
byteStringArray = [
self.number_to_be(1, 32),
self.number_to_le(int(math.floor(now / 1000)), 4),
self.number_to_le(1, 1),
self.number_to_le(expiration, 4),
self.number_to_le(1, 1),
self.number_to_le(33, 1),
self.number_to_le(0, 8),
self.number_to_le(feeAmount, 8), # string for 32 bit php
self.number_to_le(len(self.apiKey), 1),
self.encode(self.apiKey),
self.number_to_le(len(normalSymbol), 1),
self.encode(normalSymbol),
self.base16_to_binary(id),
self.number_to_le(int(quoteId), 4),
self.number_to_le(int(baseId), 4),
self.number_to_le(0, 1),
self.number_to_le(1, 1),
self.number_to_le(len(dappId), 1),
self.encode(dappId),
self.number_to_le(0, 1),
]
bytestring = self.binary_concat_array(byteStringArray)
hash = self.hash(bytestring, 'sha256', 'hex')
signature = self.ecdsa(hash, self.secret, 'secp256k1', None, True)
recoveryParam = self.binary_to_base16(self.number_to_le(self.sum(signature['v'], 31), 1))
mySignature = recoveryParam + signature['r'] + signature['s']
operation = {
'fee': feeAmount,
'creator': self.apiKey,
'order_id': id,
'market_name': normalSymbol,
'money_id': int(quoteId),
'stock_id': int(baseId),
}
fatty = {
'timestamp': datetime,
'expiration': expirationDatetime,
'operations': [
[
33,
operation,
],
],
'validate_type': 0,
'dapp': dappId,
'signatures': [
mySignature,
],
}
request = {
'trObj': self.json(fatty),
}
response = await self.publicPostTransactionCancelorder(request)
timestamp = self.milliseconds()
statusCode = self.safe_string(response, 'code')
status = 'canceled' if (statusCode == '0') else 'failed'
return {
'info': response,
'id': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': None,
'type': None,
'side': None,
'price': None,
'amount': None,
'filled': None,
'remaining': None,
'cost': None,
'trades': None,
'fee': None,
'clientOrderId': None,
'average': None,
}
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if not ('userid' in params) and (self.apiKey is None):
raise ArgumentsRequired('fetchMyTrades() requires self.apiKey or userid argument')
await self.load_markets()
market = self.market(symbol)
request = {
'userid': self.apiKey,
}
if symbol is not None:
request['symbol'] = market['id']
if limit is not None:
request['limit'] = limit
if since is not None:
request['since'] = since
response = await self.publicGetOrdersTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
if not ('userid' in params) and (self.apiKey is None):
raise ArgumentsRequired('fetchDeposits() requires self.apiKey or userid argument')
currency = None
request = {
'userid': self.apiKey,
}
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if since is not None:
request['since'] = since
if limit is not None:
request['limit'] = limit
response = await self.publicGetDeposits(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
if not ('userid' in params) and (self.apiKey is None):
raise ArgumentsRequired('fetchWithdrawals() requires self.apiKey or userid argument')
currency = None
request = {
'userid': self.apiKey,
}
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if since is not None:
request['since'] = since
if limit is not None:
request['limit'] = limit
response = await self.publicGetWithdrawals(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit)
def parse_transaction_status(self, status):
statuses = {
'DEPOSIT_FAILED': 'failed',
'FEE_SEND_FAILED': 'failed',
'FEE_FAILED': 'failed',
'PAY_SEND_FAILED': 'failed',
'PAY_FAILED': 'failed',
'BTT_FAILED': 'failed',
'WITHDDRAW_FAILED': 'failed',
'USER_FAILED': 'failed',
'FEE_EXECUED': 'pending',
'PAY_EXECUED': 'pending',
'WITHDDRAW_EXECUTED': 'pending',
'USER_EXECUED': 'pending',
'BTT_SUCCED': 'ok',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
id = self.safe_string(transaction, 'id')
address = self.safe_string(transaction, 'address')
tag = self.safe_string(transaction, 'tag')
if tag is not None:
if len(tag) < 1:
tag = None
txid = self.safe_value(transaction, 'txid')
currencyId = self.safe_string(transaction, 'code')
code = self.safe_currency_code(currencyId, currency)
timestamp = self.safe_integer(transaction, 'timestamp')
datetime = self.safe_string(transaction, 'datetime')
type = self.safe_string(transaction, 'type')
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
feeInfo = self.safe_value(transaction, 'fee')
feeCost = self.safe_number(feeInfo, 'cost')
feeCurrencyId = self.safe_string(feeInfo, 'code')
feeCode = self.safe_currency_code(feeCurrencyId, currency)
fee = {
'cost': feeCost,
'currency': feeCode,
}
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': datetime,
'address': address,
'tag': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
if not ('userid' in params) and (self.apiKey is None):
raise ArgumentsRequired('fetchDepositAddress() requires self.apiKey or userid argument')
currency = self.currency(code)
request = {
'userid': self.apiKey,
'code': currency['id'],
}
response = await self.publicGetDepositaddress(request)
address = self.safe_string(response[0], 'address')
tag = self.safe_string(response[0], 'tag')
chainType = self.safe_string(response[0], 'chainType')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'chainType': chainType,
'info': response,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api]
url += '/' + path
if params:
url += '?' + self.urlencode(params)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if code == 503:
raise DDoSProtection(self.id + ' ' + str(code) + ' ' + reason + ' ' + body)
if response is None:
return # fallback to default error handler
if 'code' in response:
status = self.safe_string(response, 'code')
if status == '1':
message = self.safe_string(response, 'msg')
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions, message, feedback)
raise ExchangeError(feedback)
| 41.087199 | 225 | 0.521584 | [
"MIT"
] | Abadiq313/ccxt | python/ccxt/async_support/bytetrade.py | 44,292 | Python |
import pytest
import numpy as np
from ebbef2p.structure import Structure
L = 2
E = 1
I = 1
def test_center_load():
P = 100
M_max = P * L / 4 # maximum moment
S_max = P/2 # max shearing force
w_max = -P * L ** 3 / (48 * E * I) # max displacement
tolerance = 1e-6 #set a tolerance of 0.0001%
s = Structure('test')
s.add_beam(coord=[0, L], E=E, I=I)
s.add_nodal_load(P, L/2, 'fz')
s.add_nodal_support({'uz': 0, 'ur': "NaN"}, 0)
s.add_nodal_support({'uz': 0, 'ur': "NaN"}, L)
s.add_nodes(25)
s.add_elements(s.nodes)
s.solve(s.build_global_matrix(), s.build_load_vector(), s.get_boudary_conditions())
assert min(s.get_displacements()['vertical_displacements']) == pytest.approx(w_max, rel=tolerance)
assert max(s.get_bending_moments()['values']) == pytest.approx(M_max, rel=tolerance)
assert max(s.get_shear_forces()['values']) == pytest.approx(S_max, rel=tolerance)
def test_uniformly_distributed_load():
q = 10
M_max = q * L ** 2 / 8 # maximum moment
S_max = q * L/2 # max shearing force
w_max = -5 * q * L ** 4 / (384 * E * I) # max displacement
tolerance = 1e-4 #set a tolerance of 0.01%
s = Structure('test')
s.add_beam(coord=[0, L], E=E, I=I)
s.add_distributed_load((q, q), (0, L))
s.add_nodal_support({'uz': 0, 'ur': "NaN"}, 0)
s.add_nodal_support({'uz': 0, 'ur': "NaN"}, L)
s.add_nodes(200)
s.add_elements(s.nodes)
s.solve(s.build_global_matrix(), s.build_load_vector(), s.get_boudary_conditions())
assert min(s.get_displacements()['vertical_displacements']) == pytest.approx(w_max, rel=tolerance)
assert max(s.get_bending_moments()['values']) == pytest.approx(M_max, rel=tolerance)
assert max(s.get_shear_forces()['values']) == pytest.approx(S_max, rel=1e-2) | 34.509434 | 102 | 0.630946 | [
"MIT"
] | bteodoru/ebbef2p-python | tests/test_simple supported_beam.py | 1,829 | Python |
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys
class Hwloc(AutotoolsPackage):
"""The Hardware Locality (hwloc) software project.
The Portable Hardware Locality (hwloc) software package
provides a portable abstraction (across OS, versions,
architectures, ...) of the hierarchical topology of modern
architectures, including NUMA memory nodes, sockets, shared
caches, cores and simultaneous multithreading. It also gathers
various system attributes such as cache and memory information
as well as the locality of I/O devices such as network
interfaces, InfiniBand HCAs or GPUs. It primarily aims at
helping applications with gathering information about modern
computing hardware so as to exploit it accordingly and
efficiently.
"""
homepage = "http://www.open-mpi.org/projects/hwloc/"
url = "https://download.open-mpi.org/release/hwloc/v2.0/hwloc-2.0.2.tar.gz"
list_url = "http://www.open-mpi.org/software/hwloc/"
list_depth = 2
git = 'https://github.com/open-mpi/hwloc.git'
version('master', branch='master')
version('2.0.2', sha256='27dcfe42e3fb3422b72ce48b48bf601c0a3e46e850ee72d9bdd17b5863b6e42c')
version('2.0.1', sha256='f1156df22fc2365a31a3dc5f752c53aad49e34a5e22d75ed231cd97eaa437f9d')
version('2.0.0', sha256='a0d425a0fc7c7e3f2c92a272ffaffbd913005556b4443e1887d2e1718d902887')
version('1.11.11', sha256='74329da3be1b25de8e98a712adb28b14e561889244bf3a8138afe91ab18e0b3a')
version('1.11.10', sha256='0a2530b739d9ebf60c4c1e86adb5451a20d9e78f7798cf78d0147cc6df328aac')
version('1.11.9', sha256='85b978995b67db0b1a12dd1a73b09ef3d39f8e3cb09f8b9c60cf04633acce46c')
version('1.11.8', sha256='8af89b1164a330e36d18210360ea9bb305e19f9773d1c882855d261a13054ea8')
version('1.11.7', sha256='ac16bed9cdd3c63bca1fe1ac3de522a1376b1487c4fc85b7b19592e28fd98e26')
version('1.11.6', sha256='67963f15197e6b551539c4ed95a4f8882be9a16cf336300902004361cf89bdee')
version('1.11.5', sha256='da2c780fce9b5440a1a7d1caf78f637feff9181a9d1ca090278cae4bea71b3df')
version('1.11.4', sha256='1b6a58049c31ce36aff162cf4332998fd468486bd08fdfe0249a47437311512d')
version('1.11.3', sha256='03a1cc63f23fed7e17e4d4369a75dc77d5c145111b8578b70e0964a12712dea0')
version('1.11.2', sha256='d11f091ed54c56c325ffca1083113a405fcd8a25d5888af64f5cd6cf587b7b0a')
version('1.11.1', sha256='b41f877d79b6026640943d57ef25311299378450f2995d507a5e633da711be61')
version('1.9', sha256='9fb572daef35a1c8608d1a6232a4a9f56846bab2854c50562dfb9a7be294f4e8')
variant('nvml', default=False, description="Support NVML device discovery")
variant('gl', default=False, description="Support GL device discovery")
variant('cuda', default=False, description="Support CUDA devices")
variant('libxml2', default=True, description="Build with libxml2")
variant('pci', default=(sys.platform != 'darwin'),
description="Support analyzing devices on PCI bus")
variant('shared', default=True, description="Build shared libraries")
variant(
'cairo',
default=False,
description='Enable the Cairo back-end of hwloc\'s lstopo command'
)
depends_on('pkgconfig', type='build')
depends_on('m4', type='build', when='@master')
depends_on('autoconf', type='build', when='@master')
depends_on('automake', type='build', when='@master')
depends_on('libtool', type='build', when='@master')
depends_on('cuda', when='+nvml')
depends_on('cuda', when='+cuda')
depends_on('gl', when='+gl')
depends_on('libpciaccess', when='+pci')
depends_on('libxml2', when='+libxml2')
depends_on('cairo', when='+cairo')
depends_on('numactl', when='@:1.11.11 platform=linux')
def url_for_version(self, version):
return "http://www.open-mpi.org/software/hwloc/v%s/downloads/hwloc-%s.tar.gz" % (version.up_to(2), version)
def configure_args(self):
args = [
# Disable OpenCL, since hwloc might pick up an OpenCL
# library at build time that is then not found at run time
# (Alternatively, we could require OpenCL as dependency.)
"--disable-opencl",
]
if '@2.0.0:' in self.spec:
args.append('--enable-netloc')
args.extend(self.enable_or_disable('cairo'))
args.extend(self.enable_or_disable('nvml'))
args.extend(self.enable_or_disable('gl'))
args.extend(self.enable_or_disable('cuda'))
args.extend(self.enable_or_disable('libxml2'))
args.extend(self.enable_or_disable('pci'))
args.extend(self.enable_or_disable('shared'))
return args
| 50.684211 | 115 | 0.723572 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | CSCfi/spack | var/spack/repos/builtin/packages/hwloc/package.py | 4,815 | Python |
# System Imports
import cv2
import json
from typing import Optional
# Library imports
import numpy
# Twisted Import
from twisted.internet import reactor, defer, threads, protocol
from twisted.internet.endpoints import TCP4ClientEndpoint
from twisted.internet.interfaces import IAddress
# Package Imports
from .data import Image, ColorSpace
class cv_webcam (object):
def __init__ (self, device, img_width, img_height):
self.device_index = device
self.img_width = img_width
self.img_height = img_height
self.name = "cv_webcam(%s)" % device
self.camera = None
@defer.inlineCallbacks
def connect (self, _protocolFactory):
if self.camera is None:
self.camera = yield threads.deferToThread(cv2.VideoCapture, self.device_index)
# Set picture capture dimensions
self.camera.set(3, self.img_width)
self.camera.set(4, self.img_height)
defer.returnValue(self)
@defer.inlineCallbacks
def image (self):
"""
Get an image from the camera.
Returns an Image object.
"""
try:
flag, img_array = yield threads.deferToThread(self.camera.read)
except SystemError:
return
if flag is False:
print ("No image")
return
defer.returnValue(Image(img_array, ColorSpace.BGR))
def disconnect (self):
threads.deferToThread(self.camera.release)
class _camera_proxy_protocol (protocol.Protocol):
_state: str
_buffer: bytes = b''
_image_callback: Optional[defer.Deferred] = None
_camera_id: Optional[bytes] = None
def setCameraId(self, camera_id: int):
self._camera_id = str(camera_id).encode()
self.requestFormat()
# def connectionMade(self):
# if self._camera_id is not None:
# self.requestFormat()
def dataReceived(self, data: bytes):
"""
Byte 1: command
Byte 2-5: length
Byte 6+: data
"""
self._buffer += data
if len(self._buffer) > 5:
command = chr(self._buffer[0])
length = int.from_bytes(self._buffer[1:5], byteorder = 'big')
if len(self._buffer) >= length + 5:
data = self._buffer[5 : 5 + length]
self._buffer = self._buffer[5 + length : ]
if command == 'F':
self.formatReceived(data)
elif command == 'I':
self.imageReceived(data)
def formatReceived (self, data: bytes):
image_format = json.loads(data.decode())
if image_format['channels'] == 1:
self._image_shape = (image_format['height'], image_format['width'])
else:
self._image_shape = (
image_format['height'],
image_format['width'],
image_format['channels']
)
self._image_colorspace = image_format['colorspace']
def imageReceived (self, data: bytes):
try:
img_data = numpy.reshape(
numpy.frombuffer(data, dtype = numpy.uint8),
newshape = self._image_shape
)
self._image_callback.callback(img_data)
except (AttributeError, defer.AlreadyCalledError) as e:
# No callback, or callback already done. (Unexpected image data).
pass
except Exception as e:
try:
self._image_callback.errback(e)
except defer.AlreadyCalledError:
pass
def requestFormat (self):
self.transport.write(b'F' + self._camera_id + b'\n')
def requestImage (self):
self._image_callback = defer.Deferred()
self.transport.write(b'I' + self._camera_id + b'\n')
return self._image_callback
class camera_proxy (object):
def __init__ (self, host, port, camera_id):
self.point = TCP4ClientEndpoint(reactor, host, port)
self.name = f"camera_proxy({host!s}, {port!s})"
self.camera_id = camera_id
@defer.inlineCallbacks
def connect (self, _protocolFactory):
self._protocol = yield self.point.connect(
protocol.Factory.forProtocol(_camera_proxy_protocol)
)
self._protocol.setCameraId(self.camera_id)
# yield self._protocol._get_format_information()
defer.returnValue(self)
@defer.inlineCallbacks
def image (self):
"""
Get an image from the camera.
Returns a SimpleCV Image.
"""
try:
img_array = yield self._protocol.requestImage()
except Exception as e:
print('Exception fetching image', e)
return
defer.returnValue(Image(img_array, ColorSpace.BGR))
def disconnect (self):
threads.deferToThread(self.camera.release) | 29.011834 | 90 | 0.598409 | [
"MIT"
] | gar-syn/congo-lab | src/octopus/image/source.py | 4,903 | Python |
import numpy as np
import logging
import unittest
import os
import scipy.linalg as LA
import time
from sklearn.utils import safe_sqr, check_array
from scipy import stats
from pysnptools.snpreader import Bed,Pheno
from pysnptools.snpreader import SnpData,SnpReader
from pysnptools.kernelreader import KernelNpz
from pysnptools.kernelreader import SnpKernel
from pysnptools.kernelreader import KernelReader
from pysnptools.kernelreader import Identity as KernelIdentity
import pysnptools.util as pstutil
from pysnptools.standardizer import DiagKtoN,UnitTrained
from pysnptools.standardizer import Unit
from pysnptools.util import intersect_apply
from pysnptools.standardizer import Standardizer
from fastlmm.inference.lmm import LMM
from fastlmm.inference.fastlmm_predictor import _pheno_fixup
from fastlmm.inference import FastLMM
from pysnptools.standardizer import Identity as StandardizerIdentity
from scipy.stats import multivariate_normal
from fastlmm.util.pickle_io import load, save
# make FastLmm use this when there are no SNPs or K is Identity?
class LinearRegression(object):
'''
A linear regression predictor, that works like the FastLMM in fastlmm_predictor.py, but that expects all similarity matrices to be identity.
**Constructor:**
:Parameters: * **covariate_standardizer** (:class:`Standardizer`) -- The PySnpTools standardizer to be apply to X, the covariate data. Some choices include :class:`Standardizer.Unit` (Default. Fills missing with zero) and :class:`Standardizer.Identity` (do nothing)
:Example:
>>> import numpy as np
>>> import logging
>>> from pysnptools.snpreader import Pheno
>>> from fastlmm.inference import LinearRegression
>>> logging.basicConfig(level=logging.INFO)
>>> cov = Pheno("../feature_selection/examples/toydata.cov")
>>> pheno_fn = "../feature_selection/examples/toydata.phe"
>>> train_idx = np.r_[10:cov.iid_count] # iids 10 and on
>>> test_idx = np.r_[0:10] # the first 10 iids
>>> linreg = LinearRegression()
>>> #We give it phenotype information for extra examples, but it reorders and intersects the examples, so only training examples are used.
>>> _ = linreg.fit(X=cov[train_idx,:],y=pheno_fn)
>>> mean, covariance = linreg.predict(X=cov[test_idx,:])
>>> print mean.iid[0], round(mean.val[0],7), round(covariance.val[0,0],7)
['per0' 'per0'] 0.1518764 0.9043703
>>> nll = linreg.score(X=cov[test_idx,:],y=pheno_fn)
>>> print round(nll,7)
13.6688448
'''
def __init__(self,covariate_standardizer=Unit()):
self.covariate_standardizer = covariate_standardizer
self.is_fitted = False
def fit(self, X=None, y=None, K0_train=None, K1_train=None, h2=None, mixing=None,count_A1=None):
"""
Method for training a :class:`FastLMM` predictor. If the examples in X, y, K0_train, K1_train are not the same, they will be reordered and intersected.
:param X: training covariate information, optional:
If you give a string, it should be the file name of a PLINK phenotype-formatted file.
:type X: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string.
:param y: training phenotype:
If you give a string, it should be the file name of a PLINK phenotype-formatted file.
:type y: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string.
:param K0_train: Must be None. Represents the identity similarity matrix.
:type K0_train: None
:param K1_train: Must be None. Represents the identity similarity matrix.
:type K1_train: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__
:param h2: Ignored. Optional.
:type h2: number
:param mixing: Ignored. Optional.
:type mixing: number
:param count_A1: If it needs to read SNP data from a BED-formatted file, tells if it should count the number of A1
alleles (the PLINK standard) or the number of A2 alleles. False is the current default, but in the future the default will change to True.
:type count_A1: bool
:rtype: self, the fitted Linear Regression predictor
"""
self.is_fitted = True
assert K0_train is None # could also accept that ID or no snps
assert K1_train is None # could also accept that ID or no snps
assert y is not None, "y must be given"
y = _pheno_fixup(y,count_A1=count_A1)
assert y.sid_count == 1, "Expect y to be just one variable"
X = _pheno_fixup(X, iid_if_none=y.iid,count_A1=count_A1)
X, y = intersect_apply([X, y])
y = y.read()
X, covar_unit_trained = X.read().standardize(self.covariate_standardizer,return_trained=True)
# add a column of 1's to cov to increase DOF of model (and accuracy) by allowing a constant offset
X = SnpData(iid=X.iid,
sid=FastLMM._new_snp_name(X),
val=np.c_[X.val,np.ones((X.iid_count,1))])
lsqSol = np.linalg.lstsq(X.val, y.val[:,0],rcond=-1)
bs=lsqSol[0] #weights
r2=lsqSol[1] #squared residuals
D=lsqSol[2] #rank of design matrix
N=y.iid_count
self.beta = bs
self.ssres = float(r2)
self.sstot = ((y.val-y.val.mean())**2).sum()
self.covar_unit_trained = covar_unit_trained
self.iid_count = X.iid_count
self.covar_sid = X.sid
self.pheno_sid = y.sid
return self
def predict(self,X=None,K0_whole_test=None,K1_whole_test=None,iid_if_none=None,count_A1=None):
"""
Method for predicting from a fitted :class:`FastLMM` predictor.
If the examples in X, K0_whole_test, K1_whole_test are not the same, they will be reordered and intersected.
:param X: testing covariate information, optional:
If you give a string, it should be the file name of a PLINK phenotype-formatted file.
:type X: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string.
:param K0_whole_test: Must be None. Represents the identity similarity matrix.
:type K0_whole_test: None
:param K1_whole_test: Must be None. Represents the identity similarity matrix.
:type K1_whole_test: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__
:param iid_if_none: Examples to predict for if no X, K0_whole_test, K1_whole_test is provided.
:type iid_if_none: an ndarray of two strings
:param count_A1: If it needs to read SNP data from a BED-formatted file, tells if it should count the number of A1
alleles (the PLINK standard) or the number of A2 alleles. False is the current default, but in the future the default will change to True.
:type count_A1: bool
:rtype: A `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__ of the means and a :class:`KernelData` of the covariance
"""
assert self.is_fitted, "Can only predict after predictor has been fitted"
assert K0_whole_test is None or isinstance(K0_whole_test,KernelIdentity) # could also accept no snps
assert K1_whole_test is None or isinstance(K1_whole_test,KernelIdentity) # could also accept no snps
X = _pheno_fixup(X,iid_if_none=iid_if_none,count_A1=count_A1)
X = X.read().standardize(self.covar_unit_trained)
# add a column of 1's to cov to increase DOF of model (and accuracy) by allowing a constant offset
X = SnpData(iid=X.iid,
sid=FastLMM._new_snp_name(X),
val=np.c_[X.read().val,np.ones((X.iid_count,1))])
assert np.array_equal(X.sid,self.covar_sid), "Expect covar sids to be the same in train and test."
pheno_predicted = X.val.dot(self.beta).reshape(-1,1)
ret0 = SnpData(iid = X.iid, sid=self.pheno_sid,val=pheno_predicted,pos=np.array([[np.nan,np.nan,np.nan]]),name="linear regression Prediction") #!!!replace 'parent_string' with 'name'
from pysnptools.kernelreader import KernelData
ret1 = KernelData(iid=X.iid,val=np.eye(X.iid_count)* self.ssres / self.iid_count)
return ret0, ret1
def score(self, X=None, y=None, K0_whole_test=None, K1_whole_test=None, iid_if_none=None, return_mse_too=False, count_A1=None):
"""
Method for calculating the negative log likelihood of testing examples.
If the examples in X,y, K0_whole_test, K1_whole_test are not the same, they will be reordered and intersected.
:param X: testing covariate information, optional:
If you give a string, it should be the file name of a PLINK phenotype-formatted file.
:type X: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string.
:param y: testing phenotype:
If you give a string, it should be the file name of a PLINK phenotype-formatted file.
:type y: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string.
:param K0_whole_test: Must be None. Represents the identity similarity matrix.
:type K0_whole_test: None
:param K1_whole_test: Must be None. Represents the identity similarity matrix.
:type K1_whole_test: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__
:param iid_if_none: Examples to predict for if no X, K0_whole_test, K1_whole_test is provided.
:type iid_if_none: an ndarray of two strings
:param return_mse_too: If true, will also return the mean squared error.
:type return_mse_too: bool
:param count_A1: If it needs to read SNP data from a BED-formatted file, tells if it should count the number of A1
alleles (the PLINK standard) or the number of A2 alleles. False is the current default, but in the future the default will change to True.
:type count_A1: bool
:rtype: a float of the negative log likelihood and, optionally, a float of the mean squared error.
"""
mean0, covar0 = self.predict(K0_whole_test=K0_whole_test,K1_whole_test=K1_whole_test,X=X,iid_if_none=iid_if_none,count_A1=count_A1)
y = _pheno_fixup(y, iid_if_none=covar0.iid,count_A1=count_A1)
mean, covar, y = intersect_apply([mean0, covar0, y])
var = multivariate_normal(mean=mean.read(order='A',view_ok=True).val.reshape(-1), cov=covar.read(order='A',view_ok=True).val)
y_actual = y.read().val
nll = -np.log(var.pdf(y_actual.reshape(-1)))
if not return_mse_too:
return nll
else:
mse = ((y_actual-mean)**2).sum()
return nll, mse
"""
Created on 2013-08-02
@author: Christian Widmer <[email protected]>
@summary: Module for univariate feature selection in the presence of covariates
Motivated by sklearn's linear regression method for feature
selection, we've come up with an extended version that takes
care of covariates
based on sklearn code (f_regression):
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/univariate_selection.py
"""
#def get_example_data():
# """
# load plink files
# """
# import fastlmm.pyplink.plink as plink
# import pysnptools.snpreader.bed as Bed
# import fastlmm.util.util as util
# ipheno = 0
# foldIter = 0
# """
# import dataset
# dat = dataset.importDataset("pheno4")
# fn_bed = dat["bedFile"]
# fn_pheno = dat["phenoFile"]
# """
# fn_bed = "../featureSelection/examples/toydata"
# fn_pheno = "../feature_selection/examples/toydata.phe"
# import pysnptools.util.pheno as pstpheno
# pheno = pstpheno.loadPhen(fn_pheno)
# # load data
# bed = plink.Bed(fn_bed)
# indarr = util.intersect_ids([pheno['iid'],bed.iid])
# pheno['iid'] = pheno['iid'][indarr[:,0]]
# pheno['vals'] = pheno['vals'][indarr[:,0]]
# bed = bed[indarr[:,1],:]
# N = pheno['vals'].shape[0]
# y = pheno['vals'][:,ipheno]
# iid = pheno['iid']
# snps = bed.read().standardize()
# return snps, y
def f_regression_block(fun,X,y,blocksize=None,**args):
"""
runs f_regression for each block separately (saves memory).
-------------------------
fun : method that returns statistics,pval
X : {array-like, sparse matrix} shape = (n_samples, n_features)
The set of regressors that will tested sequentially.
y : array of shape(n_samples).
The data matrix
blocksize : number of SNPs per block
"""
if blocksize==None:
return fun(X,y,**args)
idx_start = 0
idx_stop = int(blocksize)
pval = np.zeros(X.shape[1])
stats = np.zeros(X.shape[1])
while idx_start<X.shape[1]:
stats[idx_start:idx_stop], pval[idx_start:idx_stop] = fun(X[:,idx_start:idx_stop],y,**args)
idx_start = idx_stop
idx_stop += blocksize
if idx_stop>X.shape[1]:
idx_stop = X.shape[1]
return stats,pval
def f_regression_cov_alt(X, y, C):
"""
Implementation as derived in tex document
See pg 12 of following document for definition of F-statistic
http://www-stat.stanford.edu/~jtaylo/courses/stats191/notes/simple_diagnostics.pdf
Parameters
----------
X : {array-like, sparse matrix} shape = (n_samples, n_features)
The set of regressors that will tested sequentially.
y : array of shape(n_samples).
The data matrix
c : {array-like, sparse matrix} shape = (n_samples, n_covariates)
The set of covariates.
Returns
-------
F : array, shape=(n_features,)
F values of features.
pval : array, shape=(n_features,)
p-values of F-scores.
"""
# make sure we don't overwrite input data
old_flag_X = X.flags.writeable
old_flag_C = C.flags.writeable
old_flag_y = y.flags.writeable
X.flags.writeable = False
C.flags.writeable = False
y.flags.writeable = False
#X, C, y = check_array(X, C, y, dtype=np.float)
y = y.ravel()
# make copy of input data
X = X.copy(order="F")
y = y.copy()
assert C.shape[1] < C.shape[0]
cpinv = np.linalg.pinv(C)
X -= np.dot(C,(np.dot(cpinv, X))) #most expensive line (runtime)
y -= np.dot(C,(np.dot(cpinv, y)))
yS = safe_sqr(y.T.dot(X)) # will create a copy
# Note: (X*X).sum(0) = X.T.dot(X).diagonal(), computed efficiently
# see e.g.: http://stackoverflow.com/questions/14758283/is-there-a-numpy-scipy-dot-product-calculating-only-the-diagonal-entries-of-the
# TODO: make this smarter using either stride tricks or cython
X *= X
denom = X.sum(0) * y.T.dot(y) - yS
F = yS / denom
# degrees of freedom
dof = (X.shape[0] - 1 - C.shape[1]) / (1) #(df_fm / (df_rm - df_fm))
F *= dof
# convert to p-values
pv = stats.f.sf(F, 1, dof)
# restore old state
X.flags.writeable = old_flag_X
C.flags.writeable = old_flag_C
y.flags.writeable = old_flag_y
return F, pv
def f_regression_cov(X, y, C):
"""Univariate linear regression tests
Quick linear model for testing the effect of a single regressor,
sequentially for many regressors.
This is done in 3 steps:
1. the regressor of interest and the data are orthogonalized
wrt constant regressors
2. the cross correlation between data and regressors is computed
3. it is converted to an F score then to a p-value
Parameters
----------
X : {array-like, sparse matrix} shape = (n_samples, n_features)
The set of regressors that will tested sequentially.
y : array of shape(n_samples).
The data matrix
c : {array-like, sparse matrix} shape = (n_samples, n_covariates)
The set of covariates.
Returns
-------
F : array, shape=(n_features,)
F values of features.
pval : array, shape=(n_features,)
p-values of F-scores.
"""
X = check_array(X, dtype=np.float)
C = check_array(C, dtype=np.float)
y = check_array(y, dtype=np.float)
y = y.ravel()
assert C.shape[1] < C.shape[0]
cpinv = np.linalg.pinv(C)
X -= np.dot(C,(np.dot(cpinv, X)))
y -= np.dot(C,(np.dot(cpinv, y)))
# compute the correlation
corr = np.dot(y, X)
corr /= np.asarray(np.sqrt(safe_sqr(X).sum(axis=0))).ravel()
corr /= np.asarray(np.sqrt(safe_sqr(y).sum())).ravel()
# convert to p-value
dof = (X.shape[0] - 1 - C.shape[1]) / (1) #(df_fm / (df_rm - df_fm))
F = corr ** 2 / (1 - corr ** 2) * dof
pv = stats.f.sf(F, 1, dof)
return F, pv
def test_bias():
"""
make sure we get the same result for setting C=unitvec
"""
S, y = get_example_data()
C = np.ones((len(y),1))
from sklearn.feature_selection import f_regression
F1, pval1 = f_regression(S, y, center=True)
F2, pval2 = f_regression_cov(S, C, y)
F3, pval3 = f_regression_cov_alt(S, C, y)
# make sure values are the same
np.testing.assert_array_almost_equal(F1, F2)
np.testing.assert_array_almost_equal(F2, F3)
np.testing.assert_array_almost_equal(pval1, pval2)
np.testing.assert_array_almost_equal(pval2, pval3)
def test_cov():
"""
compare different implementations, make sure results are the same
"""
S, y = get_example_data()
C = S[:,0:10]
S = S[:,10:]
F1, pval1 = f_regression_cov(S, C, y)
F2, pval2 = f_regression_cov_alt(S, C, y)
np.testing.assert_array_almost_equal(F1, F2)
np.testing.assert_array_almost_equal(pval1, pval2)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
import doctest
doctest.testmod()
#test_cov()
#test_bias()
| 39.667347 | 274 | 0.654113 | [
"Apache-2.0"
] | HealthML/FaST-LMM | fastlmm/inference/linear_regression.py | 19,437 | Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 CERN.
# Copyright (C) 2019 Northwestern University.
#
# Invenio-RDM-Records is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Fake demo records."""
import datetime
import json
import random
from pathlib import Path
from edtf.parser.grammar import level0Expression
from faker import Faker
from invenio_access.permissions import system_identity
from invenio_rdm_records.fixtures import VocabulariesFixture
class CachedVocabularies:
"""Singleton to store some vocabulary entries.
This is needed because otherwise expensive random picking would have to be
done for every call to create_fake_record().
Even then, we shouldn't load all vocabularies' entries in memory
(at least not big ones).
"""
_resource_type_ids = []
_subject_ids = []
@classmethod
def _read_vocabulary(cls, vocabulary):
dir_ = Path(__file__).parent
return VocabulariesFixture(
system_identity,
[Path("./app_data"), dir_ / "data"],
"vocabularies.yaml",
).get_records_by_vocabulary(vocabulary)
@classmethod
def fake_resource_type(cls):
"""Generate a random resource_type."""
if not cls._resource_type_ids:
cls._resource_type_ids = []
dir_ = Path(__file__).parent
res_types = cls._read_vocabulary("resource_types")
for res in res_types:
cls._resource_type_ids.append(res["id"])
random_id = random.choice(cls._resource_type_ids)
return {"id": random_id}
@classmethod
def fake_subjects(cls):
"""Generate random subjects."""
if not cls._subject_ids:
subjects = cls._read_vocabulary("subjects")
for subj in subjects:
cls._subject_ids.append(subj["id"])
if not cls._subject_ids:
return []
n = random.choice([0, 1, 2])
random_ids = random.sample(cls._subject_ids, n)
return [{"id": i} for i in random_ids]
@classmethod
def fake_language(cls):
"""Generate a random resource_type."""
random_id = random.choice(["eng", "aah", "aag"])
return {"id": random_id}
def fake_edtf_level_0():
"""Generates a fake publication_date string."""
def fake_date(end_date=None):
fake = Faker()
date_pattern = ['%Y', '%m', '%d']
# make it less and less likely to get less and less parts of the date
if random.choice([True, False]):
date_pattern.pop()
if random.choice([True, False]):
date_pattern.pop()
return fake.date("-".join(date_pattern), end_datetime=end_date)
f_date = fake_date()
# if interval
if random.choice([True, False]):
# get f_date as date object
parser = level0Expression("level0")
parsed_date = parser.parseString(f_date)[0]
date_tuple = parsed_date.lower_strict()[:3]
f_date_object = datetime.date(*date_tuple)
interval_start = fake_date(end_date=f_date_object)
return "/".join([interval_start, f_date])
return f_date
def create_fake_record():
"""Create records for demo purposes."""
fake = Faker()
data_to_use = {
"access": {
"record": "public",
"files": "public",
},
"files": {
"enabled": False,
},
"pids": {
},
"metadata": {
"resource_type": CachedVocabularies.fake_resource_type(),
"creators": [{
"person_or_org": {
"family_name": fake.last_name(),
"given_name": fake.first_name(),
"type": "personal",
"identifiers": [{
"scheme": "orcid",
"identifier": "0000-0002-1825-0097",
}],
},
"affiliations": [{
"name": fake.company(),
"identifiers": [{
"scheme": "ror",
"identifier": "03yrm5c26",
}]
}]
} for i in range(4)],
"title": fake.company() + "'s gallery",
"additional_titles": [{
"title": "a research data management platform",
"type": "subtitle",
"lang": "eng"
}, {
"title": fake.company() + "'s gallery",
"type": "alternativetitle",
"lang": "eng"
}],
"publisher": "InvenioRDM",
"publication_date": fake_edtf_level_0(),
"subjects": CachedVocabularies.fake_subjects(),
"contributors": [{
"person_or_org": {
"family_name": fake.last_name(),
"given_name": fake.first_name(),
"type": "personal",
},
"affiliations": [{
"name": fake.company(),
"identifiers": [{
"scheme": "ror",
"identifier": "03yrm5c26",
}]
}],
"role": "rightsholder"
} for i in range(3)],
# "dates": [{
# # No end date to avoid computations based on start
# "date": fake.date(pattern='%Y-%m-%d'),
# "description": "Random test date",
# "type": "other"
# }],
"languages": [CachedVocabularies.fake_language()],
# "related_identifiers": [{
# "identifier": "10.9999/rdm.9999988",
# "scheme": "doi",
# "relation_type": "requires",
# "resource_type": fake_resource_type()
# }],
"sizes": [
"11 pages"
],
"formats": [
"application/pdf"
],
"version": "v0.0.1",
# "rights": [{
# "rights": "Berkeley Software Distribution 3",
# "uri": "https://opensource.org/licenses/BSD-3-Clause",
# "identifier": "03yrm5c26",
# "scheme": "ror",
# }],
"description": fake.text(max_nb_chars=3000),
"additional_descriptions": [{
"description": fake.text(max_nb_chars=200),
"type": "methods",
"lang": "eng"
} for i in range(2)],
"funding": [{
"funder": {
"name": "European Commission",
"identifier": "03yrm5c26",
"scheme": "ror"
},
"award": {
"title": "OpenAIRE",
"number": "246686",
"identifier": "0000-0002-1825-0097",
"scheme": "orcid"
}
}],
# "locations": [{
# 'geometry': {
# 'type': 'Point',
# 'coordinates': [
# float(fake.latitude()), float(fake.longitude())
# ]
# },
# "place": fake.location_on_land()[2],
# "description": "Random place on land...",
# 'identifiers': [{
# 'scheme': 'ror',
# 'identifier': '03yrm5c26',
# }, {
# 'scheme': 'orcid',
# 'identifier': '0000-0002-1825-0097',
# }]
# }, {
# 'geometry': {
# 'type': 'MultiPoint',
# 'coordinates': [
# [float(fake.latitude()), float(fake.longitude())],
# [float(fake.latitude()), float(fake.longitude())]
# ]
# },
# "place": fake.location_on_land()[2],
# }
# ],
"references": [{
"reference": "Reference to something et al.",
"identifier": "0000000114559647",
"scheme": "isni"
}],
"identifiers": [{
"identifier": "ark:/123/456",
"scheme": "ark"
}],
}
}
return json.loads(json.dumps(data_to_use))
| 32.591603 | 78 | 0.469259 | [
"MIT"
] | Pineirin/invenio-rdm-records | invenio_rdm_records/fixtures/demo.py | 8,539 | Python |
# Generated by Django 3.2.5 on 2021-07-09 16:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0009_auto_20210709_1606'),
]
operations = [
migrations.AlterField(
model_name='account',
name='conf_label',
field=models.SmallIntegerField(choices=[(4, 'Top Secret'), (3, 'Secret'), (2, 'Confidential'), (1, 'Unclassified')], default=1, verbose_name='Confidentiality Label'),
),
migrations.AlterField(
model_name='account',
name='id',
field=models.CharField(default=4044705356, editable=False, max_length=10, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='account',
name='integrity_label',
field=models.SmallIntegerField(choices=[(4, 'Very Trusted'), (3, 'Trusted'), (2, 'Slightly Trusted'), (1, 'Untrusted')], default=1, verbose_name='Integrity Label'),
),
migrations.AlterField(
model_name='historicalaccount',
name='conf_label',
field=models.SmallIntegerField(choices=[(4, 'Top Secret'), (3, 'Secret'), (2, 'Confidential'), (1, 'Unclassified')], default=1, verbose_name='Confidentiality Label'),
),
migrations.AlterField(
model_name='historicalaccount',
name='id',
field=models.CharField(db_index=True, default=4044705356, editable=False, max_length=10),
),
migrations.AlterField(
model_name='historicalaccount',
name='integrity_label',
field=models.SmallIntegerField(choices=[(4, 'Very Trusted'), (3, 'Trusted'), (2, 'Slightly Trusted'), (1, 'Untrusted')], default=1, verbose_name='Integrity Label'),
),
]
| 41.727273 | 178 | 0.607298 | [
"MIT"
] | mmohajer9/banker | backend/accounts/migrations/0010_auto_20210709_1658.py | 1,836 | Python |
# -*- coding: utf-8 -*-
###############################################################################
#
# ListMembers
# Retrieves the email addresses of members of a MailChimp list.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ListMembers(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ListMembers Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ListMembers, self).__init__(temboo_session, '/Library/MailChimp/ListMembers')
def new_input_set(self):
return ListMembersInputSet()
def _make_result_set(self, result, path):
return ListMembersResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListMembersChoreographyExecution(session, exec_id, path)
class ListMembersInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ListMembers
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Mailchimp.)
"""
super(ListMembersInputSet, self)._set_input('APIKey', value)
def set_Limit(self, value):
"""
Set the value of the Limit input for this Choreo. ((optional, integer) Specifies the number of records in a page to be returned. Must be greater than zero and less than or equal to 15000. Defaults to 100.)
"""
super(ListMembersInputSet, self)._set_input('Limit', value)
def set_ListId(self, value):
"""
Set the value of the ListId input for this Choreo. ((required, string) The id of the Mailchimp list to retrieve members from.)
"""
super(ListMembersInputSet, self)._set_input('ListId', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) Indicates the desired format for the response. Accepted values are "json" or "xml" (the default).)
"""
super(ListMembersInputSet, self)._set_input('ResponseFormat', value)
def set_Since(self, value):
"""
Set the value of the Since input for this Choreo. ((optional, date) Retrieves records that have changed since this date/time. Formatted like 'YYYY-MM-DD HH:MM:SS.)
"""
super(ListMembersInputSet, self)._set_input('Since', value)
def set_Start(self, value):
"""
Set the value of the Start input for this Choreo. ((optional, integer) Specifies the page at which to begin returning records. Page size is defined by the limit argument. Must be zero or greater. Defaults to 0.)
"""
super(ListMembersInputSet, self)._set_input('Start', value)
def set_Status(self, value):
"""
Set the value of the Status input for this Choreo. ((optional, string) Must be one of 'subscribed', 'unsubscribed', 'cleaned', or 'updated'. Defaults to 'subscribed'.)
"""
super(ListMembersInputSet, self)._set_input('Status', value)
class ListMembersResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ListMembers Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Mailchimp. Corresponds to the format specified in the ResponseFormat parameter. Defaults to "xml".)
"""
return self._output.get('Response', None)
class ListMembersChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListMembersResultSet(response, path)
| 42.823009 | 219 | 0.679686 | [
"Apache-2.0"
] | jordanemedlock/psychtruths | temboo/Library/MailChimp/ListMembers.py | 4,839 | Python |
#!/usr/bin/env python
"""
Script which takes one or more file paths and reports on their detected
encodings
Example::
% chardetect somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
If no paths are provided, it takes its input from stdin.
"""
# Copyright (c) 2020
# Author: xiaoweixiang
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import sys
from pip._vendor.chardet import __version__
from pip._vendor.chardet.compat import PY2
from pip._vendor.chardet.universaldetector import UniversalDetector
def description_of(lines, name='stdin'):
"""
Return a string describing the probable encoding of a file or
list of strings.
:param lines: The lines to get the encoding of.
:type lines: Iterable of bytes
:param name: Name of file or collection of lines
:type name: str
"""
u = UniversalDetector()
for line in lines:
line = bytearray(line)
u.feed(line)
# shortcut out of the loop to save reading further - particularly useful if we read a BOM.
if u.done:
break
u.close()
result = u.result
if PY2:
name = name.decode(sys.getfilesystemencoding(), 'ignore')
if result['encoding']:
return '{0}: {1} with confidence {2}'.format(name, result['encoding'],
result['confidence'])
else:
return '{0}: no result'.format(name)
def main(argv=None):
"""
Handles command line arguments and gets things started.
:param argv: List of arguments, as if specified on the command-line.
If None, ``sys.argv[1:]`` is used instead.
:type argv: list of str
"""
# Get command line arguments
parser = argparse.ArgumentParser(
description="Takes one or more file paths and reports their detected \
encodings")
parser.add_argument('input',
help='File whose encoding we would like to determine. \
(default: stdin)',
type=argparse.FileType('rb'), nargs='*',
default=[sys.stdin if PY2 else sys.stdin.buffer])
parser.add_argument('--version', action='version',
version='%(prog)s {0}'.format(__version__))
args = parser.parse_args(argv)
for f in args.input:
if f.isatty():
print("You are running chardetect interactively. Press " +
"CTRL-D twice at the start of a blank line to signal the " +
"end of your input. If you want help, run chardetect " +
"--help\n", file=sys.stderr)
print(description_of(f, f.name))
if __name__ == '__main__':
main()
| 31.696629 | 98 | 0.616094 | [
"Apache-2.0"
] | fortbox/leetcode-solve | venv/lib/python3.8/site-packages/pip/_vendor/chardet/cli/chardetect.py | 2,821 | Python |
try:
from setuptools import setup
except:
from distutils import setup
setup(
name="minspan",
version="0.1.0",
py_modules=["minspan"],
install_requires=["cobra"],
author="Ali Ebrahim and Aarash Bordbar",
author_email="[email protected]",
url="https://github.com/SBRG/minspan",
license="MIT",
classifiers=["License :: OSI Approved :: MIT License",
"Intended Audience :: Science/Research",
],
)
| 24.789474 | 58 | 0.613588 | [
"MIT"
] | SBRG/minspan | setup.py | 471 | Python |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import time
import math
from fairseq import utils
from fairseq import bleu
class Meter(object):
def reset(self):
pass
def update(self, val, n=1):
pass
@property
def avg(self):
pass
@property
def std(self):
return 0.0
class AverageMeter(Meter):
"""Computes and stores the average and current value"""
def __init__(self):
self.sum = 0
self.count = 0
self.sum_square = 0
def reset(self):
self.sum = 0
self.count = 0
self.sum_square = 0
def update(self, val, n=1):
if isinstance(val, AverageMeter):
reduced_meter: AverageMeter = utils.reduce_average_meter(self, val)
self.sum = reduced_meter.sum
self.count = reduced_meter.count
self.sum_square = reduced_meter.sum_square
else:
self.sum += val * n
self.count += n
self.sum_square = self.sum_square + (val * val) * n
@property
def avg(self):
if self.count == 0:
return 0.0
return self.sum / self.count
@property
def std(self):
expected_sum_square = self.sum_square / self.count
expected_sum = self.avg
return math.sqrt(expected_sum_square - expected_sum * expected_sum)
class ConcatentateMeter(Meter):
def __init__(self, lowercase=False):
self.scorer = bleu.SacrebleuScorer(lowercase=lowercase)
self.target_sum = []
self.hypo_sum = []
self.count = 0
def reset(self):
self.target_sum = []
self.hypo_sum = []
self.count = 0
def update(self, val, n=1):
self.target_sum += val[0] * n
self.hypo_sum += val[1] * n
self.count += n
# TODO compute corpus bleu here
@property
def avg(self):
if self.count == 0:
return 0.0
# Compute the corpus level BLEU
self.scorer.sys = self.hypo_sum
self.scorer.ref = self.target_sum
return self.scorer.score()
class BleuMeter(Meter):
def __init__(self):
self.correct, self.total, self.sys_len, self.ref_len = utils.get_zero_bleu_stats()
# TODO handle lowercase
self.scorer = bleu.SacrebleuScorer(lowercase=False)
def reset(self):
self.correct, self.total, self.sys_len, self.ref_len = utils.get_zero_bleu_stats()
def update(self, val, n=1):
# val will be a namedtuple
# We need to reduce
for _ in range(n):
self.correct = utils.reduce_lists(self.correct, val.correct)
self.total = utils.reduce_lists(self.total, val.total)
self.sys_len += val.sys_len
self.ref_len += val.ref_len
@property
def avg(self):
# We have the sufficient statistics, just compute the BLEU score
return self.scorer.compute_bleu(correct=self.correct, total=self.total, sys_len=self.sys_len,
ref_len=self.ref_len).score
class TimeMeter(object):
"""Computes the average occurrence of some event per second"""
def __init__(self, init=0):
self.reset(init)
def reset(self, init=0):
self.init = init
self.start = time.time()
self.n = 0
def update(self, val=1):
self.n += val
@property
def avg(self):
return self.n / self.elapsed_time
@property
def elapsed_time(self):
return self.init + (time.time() - self.start)
class StopwatchMeter(Meter):
"""Computes the sum/avg duration of some event in seconds"""
def __init__(self):
self.reset()
def start(self):
self.start_time = time.time()
def stop(self, n=1):
if self.start_time is not None:
delta = time.time() - self.start_time
self.sum += delta
self.n += n
self.start_time = None
def reset(self):
self.sum = 0
self.n = 0
self.start_time = None
@property
def avg(self):
return self.sum / self.n
| 26.351515 | 101 | 0.596826 | [
"BSD-3-Clause"
] | NLP2CT/Meta-Curriculum | fairseq/meters.py | 4,348 | Python |
from flask_wtf import FlaskForm
from wtforms import StringField,SubmitField
from wtforms.validators import DataRequired
class AddTaskForm(FlaskForm):
title = StringField('Title', validators=[DataRequired()])
submit = SubmitField('Submit')
class DeleteTaskForm(FlaskForm):
submit = SubmitField('Delete') | 28.818182 | 61 | 0.782334 | [
"MIT"
] | rkustas/taskmanager | .history/forms_20200723155707.py | 317 | Python |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = u'QB'
copyright = u'2018, NRSER'
author = u'NRSER'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'QBdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'QB.tex', u'QB Documentation',
u'NRSER', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'qb', u'QB Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'QB', u'QB Documentation',
author, 'QB', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True | 30.448864 | 79 | 0.639112 | [
"MIT"
] | nrser/qb | dev/scratch/sphinx-quickstart/conf.py | 5,359 | Python |
import os
import sys
import logging
import io
from xml.sax.saxutils import escape
import template
#===============================================================================
#===============================================================================
class _TemplateHandler(object):
def __init__(self, project):
self.project = project
# Get toolchain build path ex:'/opt/arm-2012.03/bin'
toolchain_path = os.path.dirname(project.get_target_var("CC"))
# on Mac add homebrew path to compiler path
if sys.platform == "darwin":
toolchain_path += ":/usr/local/bin"
# Get toolchain cross prefix
# ex:'arm-none-linux-gnueabi-' or '' for native
toolchain_cross = project.get_target_var("CROSS")
if toolchain_cross:
toolchain_cross = os.path.basename(toolchain_cross)
# Replacement map
self.replacement = {
"NAME": project.name,
"PRODUCT": project.product,
"VARIANT": project.variant,
"TOOLCHAIN_PATH": toolchain_path,
"TOOLCHAIN_CROSS": toolchain_cross,
"BUILD_DIR": project.build_dir,
"BUILD_CMD": "${CWD}/build.sh",
"BUILD_ARGS": project.build_args,
"BUILD_TARGET": project.build_target,
"CLEAN_TARGET": project.clean_target,
"LINKED_RESOURCES": self._gen_linked_resources,
"SOURCE_ENTRIES": self._gen_source_entries,
"C_INCLUDE_DIRS": self._gen_include_dirs,
"C_DEFINES": self._gen_c_defines,
"C_INCLUDE_FILES": self._gen_include_files,
"CXX_INCLUDE_DIRS": self._gen_include_dirs,
"CXX_DEFINES": self._gen_cxx_defines,
"CXX_INCLUDE_FILES": self._gen_include_files,
}
def __call__(self, pattern):
action = self.replacement.get(pattern, None)
if action is None:
logging.error("%s: unknown replacement pattern '%s'",
self.project.name, pattern)
return ""
elif callable(action):
return action()
else:
return action
def _gen_linked_resources(self):
output = io.StringIO()
for dep in self.project.linked_resources:
dep_path = self.project.linked_resources[dep]
output.write("<link>\n")
output.write("\t<name>%s</name>\n" % dep)
output.write("\t<type>2</type>\n")
output.write("\t<location>%s</location>\n" % dep_path)
output.write("</link>\n")
return output.getvalue()
def _gen_source_entries(self):
output = io.StringIO()
if self.project.linked_resources:
excluding = "|".join(self.project.linked_resources.keys())
output.write("<entry "
"excluding=\"%s\" "
"flags=\"VALUE_WORKSPACE_PATH|RESOLVED\" "
"kind=\"sourcePath\" "
"name=\"\"/>\n" % excluding)
for dep in self.project.linked_resources:
output.write("<entry "
"flags=\"VALUE_WORKSPACE_PATH|RESOLVED\" "
"kind=\"sourcePath\" "
"name=\"%s\"/>\n" % dep)
return output.getvalue()
def _gen_include_dirs(self):
output = io.StringIO()
for include in sorted(self.project.includes):
output.write("<listOptionValue "
"builtIn=\"false\" "
"value=\"%s\"/>\n" % include)
return output.getvalue()
def _gen_include_files(self):
output = io.StringIO()
for autoconf_h_file in sorted(self.project.autoconf_h_files):
output.write("<listOptionValue "
"builtIn=\"false\" "
"value=\"%s\"/>\n" % autoconf_h_file)
return output.getvalue()
def _gen_c_defines(self):
return self._gen_defines(self.project.defines_c)
def _gen_cxx_defines(self):
defines = {}
defines.update(self.project.defines_c)
defines.update(self.project.defines_cxx)
return self._gen_defines(defines)
@staticmethod
def _gen_defines(defines):
output = io.StringIO()
for define in sorted(defines.keys()):
output.write("<listOptionValue "
"builtIn=\"false\" "
"value=\"%s=%s\"/>\n" %
(define, escape(defines[define], {"\"": """})))
return output.getvalue()
#===============================================================================
#===============================================================================
def setup_argparse(parser):
# Nothing to do
pass
#===============================================================================
#===============================================================================
def generate(project):
_entries = [
(".project", "eclipse.project.template"),
(".cproject", "eclipse.cproject.template"),
]
for entry in _entries:
outfilepath = os.path.join(project.outdirpath, entry[0])
infilepath = os.path.join(os.path.dirname(__file__), entry[1])
logging.info("%s: generating '%s'", project.name, outfilepath)
template.expand(infilepath, outfilepath, _TemplateHandler(project))
| 37.797203 | 80 | 0.522109 | [
"BSD-3-Clause"
] | Parrot-Developers/alchemy | scripts/genproject/eclipse.py | 5,405 | Python |
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometerclient.openstack.common.apiclient import client
from ceilometerclient.openstack.common.apiclient import fake_client
from ceilometerclient.tests import utils
import ceilometerclient.v2.trait_descriptions
fixtures = {
'/v2/event_types/Foo/traits': {
'GET': (
{},
[
{'name': 'trait_1', 'type': 'string'},
{'name': 'trait_2', 'type': 'integer'},
{'name': 'trait_3', 'type': 'datetime'}
]
),
}
}
class TraitDescriptionManagerTest(utils.BaseTestCase):
def setUp(self):
super(TraitDescriptionManagerTest, self).setUp()
self.http_client = fake_client.FakeHTTPClient(fixtures=fixtures)
self.api = client.BaseClient(self.http_client)
self.mgr = (ceilometerclient.v2.trait_descriptions.
TraitDescriptionManager(self.api))
def test_list(self):
trait_descriptions = list(self.mgr.list('Foo'))
expect = [
'GET', '/v2/event_types/Foo/traits'
]
self.http_client.assert_called(*expect)
self.assertEqual(len(trait_descriptions), 3)
for i, vals in enumerate([('trait_1', 'string'),
('trait_2', 'integer'),
('trait_3', 'datetime')]):
name, type = vals
self.assertEqual(trait_descriptions[i].name, name)
self.assertEqual(trait_descriptions[i].type, type)
| 37.210526 | 78 | 0.630363 | [
"Apache-2.0"
] | zqfan/python-ceilometerclient | ceilometerclient/tests/v2/test_trait_descriptions.py | 2,121 | Python |
import os
import subprocess
import sys
import shutil
import json
import argparse
import git
import getpass
import time
import platform
PKG_ROOT = 'lmctl'
PKG_INFO = 'pkg_info.json'
DIST_DIR = 'dist'
WHL_FORMAT = 'lmctl-{version}-py3-none-any.whl'
DOCS_FORMAT = 'lmctl-{version}-docs'
DOCS_DIR = 'docs'
DOCKER_IMG_TAG = 'accanto/lmctl-jnlp-slave:{version}'
DOCKER_IMG_PATH = os.path.join('docker', 'jenkins-jnlp-slave')
parser=argparse.ArgumentParser()
parser.add_argument('--release', default=False, action='store_true')
parser.add_argument('--version', help='version to set for the release')
parser.add_argument('--post-version', help='version to set after the release')
parser.add_argument('--pypi-user', help='user for uploading to Pypi')
parser.add_argument('--pypi-pass', help='password for user uploading to Pypi')
args = parser.parse_args()
class BuildError(Exception):
pass
class BuildVariables:
def __init__(self):
version = None
post_version = None
pypi_user = None
pypi_pass = None
class Secret:
def __init__(self, value):
self.value = value
class Stage:
def __init__(self, builder, title):
self.builder = builder
self.title = title
self.exit_reason = None
self.exit_code = 0
def __enter__(self):
print('================================================')
print('{0}'.format(self.title))
print('================================================')
return self
def __exit__(self, type, err_value, traceback):
if err_value != None:
# Legit python error thrown
print('ERROR: {0}\n'.format(str(err_value)))
self.exit_code = 1
self.exit_reason = str(err_value)
try:
self.builder.report()
except e:
pass
return
if self.exit_code != 0:
if self.exit_reason != None:
print(self.exit_reason)
self.builder.report()
exit(self.exit_code)
else:
print('')
def _cmd_exit(self, exit_code):
self.exit_code = exit_code
def exit_with_error(self, exit_code, reason):
self.exit_reason = reason
self.exit_code = exit_code
def parse_cmd(self, cmd):
parsed_cmd = []
printout = []
for item in cmd:
if isinstance(item, Secret):
printout.append('***')
parsed_cmd.append(item.value)
else:
printout.append(item)
parsed_cmd.append(item)
print('Executing: {0}'.format(' '.join(printout)))
return parsed_cmd
def run_cmd(self, *cmd):
cmd = self.parse_cmd(cmd)
working_dir = self.builder.project_path if self.builder.project_path != None and self.builder.project_path != '' else None
process = subprocess.Popen(cmd, stdout=sys.stdout, stderr=sys.stderr, cwd=working_dir)
process.communicate()
if process.returncode != 0:
return self._cmd_exit(process.returncode)
class Builder:
def __init__(self):
self.project_path = os.path.dirname(__file__)
self.project_path_is_current_dir = False
if self.project_path == None or self.project_path == '':
self.project_path_is_current_dir = True
self.stages = []
self.args_to_vars()
def args_to_vars(self):
self.vars = BuildVariables()
self.vars.version = args.version
self.vars.post_version = args.post_version
self.vars.pypi_user = args.pypi_user
self.vars.pypi_pass = args.pypi_pass
def report(self):
print('================================================')
print('Build Result')
print('================================================')
for s in self.stages:
if s.exit_code == 0:
print(' {0} - OK'.format(s.title))
else:
print(' {0} - FAILED'.format(s.title))
print(' ')
def stage(self, title):
stage = Stage(self, title)
self.stages.append(stage)
return stage
def _establish_who_we_are(self):
if self.project_path_is_current_dir:
print('Building at: ./')
else:
print('Building at: {0}'.format(self.project_path))
def doIt(self):
self._establish_who_we_are()
if args.release == True:
self.release()
else:
self.build()
self.report()
def build(self):
self.determine_version()
self.run_unit_tests()
self.build_python_wheel()
self.pkg_docs()
def release(self):
if self.vars.version is None:
raise ValueError('Must set --version when releasing')
if self.vars.post_version is None:
raise ValueError('Must set --post-version when releasing')
self.set_version()
self.build()
self.push_whl()
print('Waiting 5 seconds for Pypi to update....')
# Give the whl some time to be indexed on pypi
time.sleep(5)
self.build_jnlp_docker_image() # Requires the whl to have been pushed
self.push_jnlp_docker_image()
self.push_release_git_changes()
self.set_post_version()
self.push_post_release_git_changes()
def get_pypi_details(self):
if self.vars.pypi_user is None:
self.vars.pypi_user = input('Pypi Username: ')
if self.vars.pypi_pass is None:
self.vars.pypi_pass = getpass.getpass('Pypi Password:')
def set_version(self):
with self.stage('Setting Release Version') as s:
pkg_info_path = os.path.join(self.project_path, PKG_ROOT, PKG_INFO)
print('Setting version in {0} to {1}'.format(pkg_info_path, self.vars.version))
with open(pkg_info_path, 'r') as f:
pkg_info_data = json.load(f)
pkg_info_data['version'] = self.vars.version
with open(pkg_info_path, 'w') as f:
json.dump(pkg_info_data, f)
def run_unit_tests(self):
with self.stage('Run Unit Tests') as s:
s.run_cmd('python3', '-m', 'unittest', 'discover', '-s', 'tests.unit')
def build_python_wheel(self):
with self.stage('Build Wheel') as s:
print('Cleaning directory: {0}'.format(DIST_DIR))
dist_path = os.path.join(self.project_path, DIST_DIR)
if os.path.exists(dist_path):
shutil.rmtree(dist_path)
s.run_cmd('python3', 'setup.py', 'bdist_wheel')
def set_post_version(self):
with self.stage('Setting Post Release Version') as s:
pkg_info_path = os.path.join(self.project_path, PKG_ROOT, PKG_INFO)
print('Setting version in {0} to {1}'.format(pkg_info_path, self.vars.post_version))
with open(pkg_info_path, 'r') as f:
pkg_info_data = json.load(f)
pkg_info_data['version'] = self.vars.post_version
with open(pkg_info_path, 'w') as f:
json.dump(pkg_info_data, f)
def push_release_git_changes(self):
with self.stage('Commit Release Changes') as s:
repo = git.Repo(self.project_path)
repo.index.add([os.path.join(PKG_ROOT, PKG_INFO)])
repo.index.commit('Update version for release')
if self.vars.version in repo.tags:
repo.delete_tag(self.vars.version)
repo.create_tag(self.vars.version)
def push_post_release_git_changes(self):
with self.stage('Commit Post Release Changes') as s:
repo = git.Repo(self.project_path)
repo.index.add([os.path.join(PKG_ROOT, PKG_INFO)])
repo.index.commit('Update version for development')
origin = repo.remote('origin')
origin.push()
origin.push(tags=True)
def push_whl(self):
with self.stage('Push Whl to Pypi') as s:
self.get_pypi_details()
whl_path = os.path.join(self.project_path, DIST_DIR, WHL_FORMAT.format(version=self.vars.version))
s.run_cmd('python3', '-m', 'twine', 'upload', whl_path, '-u', self.vars.pypi_user, '-p', Secret(self.vars.pypi_pass))
def determine_version(self):
with self.stage('Gathering Version') as s:
pkg_info_path = os.path.join(self.project_path, PKG_ROOT, PKG_INFO)
print('Reading version from {0}'.format(pkg_info_path))
with open(pkg_info_path, 'r') as f:
pkg_info_data = json.load(f)
if 'version' not in pkg_info_data:
return s.exit_with_error('\'version\' not found in {0}'.format(pkg_info_path))
else:
self.vars.version = pkg_info_data['version']
print('Found version is: {0}'.format(self.vars.version))
def pkg_docs(self):
with self.stage('Package Docs') as s:
print('Packaging docs at {0}'.format(DOCS_DIR))
docs_output = DOCS_FORMAT.format(version=self.vars.version)
docs_output_file = docs_output + '.tgz'
transform_command = 's/{0}/{1}/'.format(DOCS_DIR, docs_output)
# Note that a system running on Mac will return 'Darwin' for platform.system()
if platform.system() == 'Darwin':
transform_command = '/{0}/{1}/'.format(DOCS_DIR, docs_output)
s.run_cmd('tar', '-cvz', '-s', transform_command, '-f', docs_output_file, DOCS_DIR+'/')
else:
s.run_cmd('tar', '-cvzf', docs_output_file, DOCS_DIR+'/', '--transform', transform_command)
def build_jnlp_docker_image(self):
with self.stage('Build JNLP Slave Docker Image') as s:
img_tag = DOCKER_IMG_TAG.format(version=self.vars.version)
s.run_cmd('docker', 'build', '--build-arg', 'LMCTL_VERSION={version}'.format(version=self.vars.version), '-t', img_tag, DOCKER_IMG_PATH)
def push_jnlp_docker_image(self):
with self.stage('Push JNLP Slave Docker Image') as s:
img_tag = DOCKER_IMG_TAG.format(version=self.vars.version)
s.run_cmd('docker', 'push', img_tag)
def main():
builder = Builder()
builder.doIt()
if __name__== "__main__":
main()
| 36.770318 | 148 | 0.590044 | [
"Apache-2.0"
] | IBM/lmctl | build.py | 10,406 | Python |
"""Define AWS storage backends for media files."""
from storages.backends.s3boto3 import S3Boto3Storage
def MediaBackend():
"""Media storage backend."""
return S3Boto3Storage(location="media")
| 22.666667 | 52 | 0.740196 | [
"MIT"
] | florimondmanca/personal-api | aws/backends.py | 204 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-03-04 14:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('talk', '0010_auto_20170304_1500'),
]
operations = [
migrations.AlterModelOptions(
name='room',
options={'ordering': ['priority', 'name'], 'verbose_name': 'Room', 'verbose_name_plural': 'Rooms'},
),
migrations.AlterModelOptions(
name='timeslot',
options={'ordering': ['name'], 'verbose_name': 'Time slot', 'verbose_name_plural': 'Time slots'},
),
migrations.AddField(
model_name='room',
name='priority',
field=models.PositiveSmallIntegerField(default=0, verbose_name='Priority'),
),
migrations.AlterField(
model_name='room',
name='name',
field=models.CharField(max_length=100, unique=True, verbose_name='Name'),
),
]
| 30.588235 | 111 | 0.589423 | [
"BSD-3-Clause"
] | stefanbethke/devday_website | devday/talk/migrations/0011_auto_20170304_1515.py | 1,040 | Python |
#-*- coding: utf-8 -*-
"""
what : process data, generate batch
"""
import numpy as np
import pickle
import random
from project_config import *
class ProcessDataText:
# store data
train_set = []
dev_set = []
test_set = []
def __init__(self, data_path):
self.data_path = data_path
# load data
self.train_set = self.load_data(DATA_TRAIN_TRANS, DATA_TRAIN_LABEL)
self.dev_set = self.load_data(DATA_DEV_TRANS, DATA_DEV_LABEL)
self.test_set = self.load_data(DATA_TEST_TRANS, DATA_TEST_LABEL)
self.dic_size = 0
with open( data_path + DIC ) as f:
self.dic_size = len( pickle.load(f) )
def load_data(self, text_trans, label):
print 'load data : ' + text_trans + ' ' + label
output_set = []
tmp_text_trans = np.load(self.data_path + text_trans)
tmp_label = np.load(self.data_path + label)
for i in xrange( len(tmp_label) ) :
output_set.append( [tmp_text_trans[i], tmp_label[i]] )
print '[completed] load data'
return output_set
def get_glove(self):
return np.load( self.data_path + GLOVE )
"""
inputs:
data : data to be processed (train/dev/test)
batch_size : mini-batch size
encoder_size : max encoder time step
is_test : True, inference stage (ordered input) ( default : False )
start_index : start index of mini-batch
return:
encoder_input : [batch, time_step(==encoder_size)]
encoder_seq : [batch] - valid word sequence
labels : [batch, category] - category is one-hot vector
"""
def get_batch(self, data, batch_size, encoder_size, is_test=False, start_index=0):
encoder_inputs, encoder_seq, labels = [], [], []
index = start_index
# Get a random batch of encoder and encoderR inputs from data,
# pad them if needed
for _ in xrange(batch_size):
if is_test is False:
# train case - random sampling
trans, label = random.choice(data)
else:
# dev, test case = ordered data
if index >= len(data):
trans, label = data[0] # won't be evaluated
index += 1
else:
trans, label = data[index]
index += 1
tmp_index = np.where( trans == 0 )[0] # find the pad index
if ( len(tmp_index) > 0 ) : # pad exists
seqN = np.min((tmp_index[0],encoder_size))
else : # no-pad
seqN = encoder_size
encoder_inputs.append( trans[:encoder_size] )
encoder_seq.append( seqN )
tmp_label = np.zeros( N_CATEGORY, dtype=np.float )
tmp_label[label] = 1
labels.append( tmp_label )
return encoder_inputs, encoder_seq, np.reshape( labels, (batch_size,N_CATEGORY) )
| 31.865385 | 93 | 0.514786 | [
"MIT"
] | Ruddy261994/multimodal-speech-emotion | model/process_data_text.py | 3,314 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['ActionGroup']
class ActionGroup(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
action_group_name: Optional[pulumi.Input[str]] = None,
automation_runbook_receivers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AutomationRunbookReceiverArgs']]]]] = None,
azure_app_push_receivers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AzureAppPushReceiverArgs']]]]] = None,
email_receivers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EmailReceiverArgs']]]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
group_short_name: Optional[pulumi.Input[str]] = None,
itsm_receivers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ItsmReceiverArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sms_receivers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SmsReceiverArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
webhook_receivers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['WebhookReceiverArgs']]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
An action group resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] action_group_name: The name of the action group.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AutomationRunbookReceiverArgs']]]] automation_runbook_receivers: The list of AutomationRunbook receivers that are part of this action group.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AzureAppPushReceiverArgs']]]] azure_app_push_receivers: The list of AzureAppPush receivers that are part of this action group.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EmailReceiverArgs']]]] email_receivers: The list of email receivers that are part of this action group.
:param pulumi.Input[bool] enabled: Indicates whether this action group is enabled. If an action group is not enabled, then none of its receivers will receive communications.
:param pulumi.Input[str] group_short_name: The short name of the action group. This will be used in SMS messages.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ItsmReceiverArgs']]]] itsm_receivers: The list of ITSM receivers that are part of this action group.
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SmsReceiverArgs']]]] sms_receivers: The list of SMS receivers that are part of this action group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['WebhookReceiverArgs']]]] webhook_receivers: The list of webhook receivers that are part of this action group.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['action_group_name'] = action_group_name
__props__['automation_runbook_receivers'] = automation_runbook_receivers
__props__['azure_app_push_receivers'] = azure_app_push_receivers
__props__['email_receivers'] = email_receivers
if enabled is None:
enabled = True
if enabled is None and not opts.urn:
raise TypeError("Missing required property 'enabled'")
__props__['enabled'] = enabled
if group_short_name is None and not opts.urn:
raise TypeError("Missing required property 'group_short_name'")
__props__['group_short_name'] = group_short_name
__props__['itsm_receivers'] = itsm_receivers
__props__['location'] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['sms_receivers'] = sms_receivers
__props__['tags'] = tags
__props__['webhook_receivers'] = webhook_receivers
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:insights/v20170401:ActionGroup"), pulumi.Alias(type_="azure-native:insights:ActionGroup"), pulumi.Alias(type_="azure-nextgen:insights:ActionGroup"), pulumi.Alias(type_="azure-native:insights/latest:ActionGroup"), pulumi.Alias(type_="azure-nextgen:insights/latest:ActionGroup"), pulumi.Alias(type_="azure-native:insights/v20180301:ActionGroup"), pulumi.Alias(type_="azure-nextgen:insights/v20180301:ActionGroup"), pulumi.Alias(type_="azure-native:insights/v20180901:ActionGroup"), pulumi.Alias(type_="azure-nextgen:insights/v20180901:ActionGroup"), pulumi.Alias(type_="azure-native:insights/v20190301:ActionGroup"), pulumi.Alias(type_="azure-nextgen:insights/v20190301:ActionGroup"), pulumi.Alias(type_="azure-native:insights/v20190601:ActionGroup"), pulumi.Alias(type_="azure-nextgen:insights/v20190601:ActionGroup")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ActionGroup, __self__).__init__(
'azure-native:insights/v20170401:ActionGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ActionGroup':
"""
Get an existing ActionGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["automation_runbook_receivers"] = None
__props__["azure_app_push_receivers"] = None
__props__["email_receivers"] = None
__props__["enabled"] = None
__props__["group_short_name"] = None
__props__["itsm_receivers"] = None
__props__["location"] = None
__props__["name"] = None
__props__["sms_receivers"] = None
__props__["tags"] = None
__props__["type"] = None
__props__["webhook_receivers"] = None
return ActionGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="automationRunbookReceivers")
def automation_runbook_receivers(self) -> pulumi.Output[Optional[Sequence['outputs.AutomationRunbookReceiverResponse']]]:
"""
The list of AutomationRunbook receivers that are part of this action group.
"""
return pulumi.get(self, "automation_runbook_receivers")
@property
@pulumi.getter(name="azureAppPushReceivers")
def azure_app_push_receivers(self) -> pulumi.Output[Optional[Sequence['outputs.AzureAppPushReceiverResponse']]]:
"""
The list of AzureAppPush receivers that are part of this action group.
"""
return pulumi.get(self, "azure_app_push_receivers")
@property
@pulumi.getter(name="emailReceivers")
def email_receivers(self) -> pulumi.Output[Optional[Sequence['outputs.EmailReceiverResponse']]]:
"""
The list of email receivers that are part of this action group.
"""
return pulumi.get(self, "email_receivers")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[bool]:
"""
Indicates whether this action group is enabled. If an action group is not enabled, then none of its receivers will receive communications.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="groupShortName")
def group_short_name(self) -> pulumi.Output[str]:
"""
The short name of the action group. This will be used in SMS messages.
"""
return pulumi.get(self, "group_short_name")
@property
@pulumi.getter(name="itsmReceivers")
def itsm_receivers(self) -> pulumi.Output[Optional[Sequence['outputs.ItsmReceiverResponse']]]:
"""
The list of ITSM receivers that are part of this action group.
"""
return pulumi.get(self, "itsm_receivers")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Azure resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="smsReceivers")
def sms_receivers(self) -> pulumi.Output[Optional[Sequence['outputs.SmsReceiverResponse']]]:
"""
The list of SMS receivers that are part of this action group.
"""
return pulumi.get(self, "sms_receivers")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Azure resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="webhookReceivers")
def webhook_receivers(self) -> pulumi.Output[Optional[Sequence['outputs.WebhookReceiverResponse']]]:
"""
The list of webhook receivers that are part of this action group.
"""
return pulumi.get(self, "webhook_receivers")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 50.452586 | 905 | 0.674071 | [
"Apache-2.0"
] | pulumi-bot/pulumi-azure-native | sdk/python/pulumi_azure_native/insights/v20170401/action_group.py | 11,705 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.